code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import pytest
from pants.backend.core.tasks.list_goals import ListGoals
from pants.backend.core.tasks.task import Task
from pants.goal.goal import Goal
from pants.goal.task_registrar import TaskRegistrar
from pants_test.tasks.test_base import ConsoleTaskTest
class ListGoalsTest(ConsoleTaskTest):
_INSTALLED_HEADER = 'Installed goals:'
_UNDOCUMENTED_HEADER = 'Undocumented goals:'
_LIST_GOALS_NAME = 'goals'
_LIST_GOALS_DESC = 'List all documented goals.'
_LLAMA_NAME = 'llama'
_LLAMA_DESC = 'With such handsome fiber, no wonder everyone loves Llamas.'
_ALPACA_NAME = 'alpaca'
@classmethod
def task_type(cls):
return ListGoals
class LlamaTask(Task):
pass
class AlpacaTask(Task):
pass
def test_list_goals(self):
Goal.clear()
self.assert_console_output(self._INSTALLED_HEADER)
TaskRegistrar(name=self._LIST_GOALS_NAME, action=ListGoals)\
.install().with_description(self._LIST_GOALS_DESC)
self.assert_console_output(
self._INSTALLED_HEADER,
' %s: %s' % (self._LIST_GOALS_NAME, self._LIST_GOALS_DESC),
)
TaskRegistrar(name=self._LLAMA_NAME, action=ListGoalsTest.LlamaTask)\
.install().with_description(self._LLAMA_DESC)
self.assert_console_output(
self._INSTALLED_HEADER,
' %s: %s' % (self._LIST_GOALS_NAME, self._LIST_GOALS_DESC),
' %s: %s' % (self._LLAMA_NAME, self._LLAMA_DESC),
)
TaskRegistrar(name=self._ALPACA_NAME, action=ListGoalsTest.AlpacaTask, dependencies=[self._LLAMA_NAME])\
.install()
self.assert_console_output(
self._INSTALLED_HEADER,
' %s: %s' % (self._LIST_GOALS_NAME, self._LIST_GOALS_DESC),
' %s: %s' % (self._LLAMA_NAME, self._LLAMA_DESC),
)
def test_list_goals_all(self):
Goal.clear()
TaskRegistrar(name=self._LIST_GOALS_NAME, action=ListGoals)\
.install().with_description(self._LIST_GOALS_DESC)
TaskRegistrar(name=self._LLAMA_NAME, action=ListGoalsTest.LlamaTask)\
.install().with_description(self._LLAMA_DESC)
TaskRegistrar(name=self._ALPACA_NAME, action=ListGoalsTest.AlpacaTask, dependencies=[self._LLAMA_NAME])\
.install()
self.assert_console_output(
self._INSTALLED_HEADER,
' %s: %s' % (self._LIST_GOALS_NAME, self._LIST_GOALS_DESC),
' %s: %s' % (self._LLAMA_NAME, self._LLAMA_DESC),
'',
self._UNDOCUMENTED_HEADER,
' %s' % self._ALPACA_NAME,
args=['--test-all'],
)
# TODO(John Sirois): Re-enable when fixing up ListGoals `--graph` in
# https://github.com/pantsbuild/pants/issues/918
@pytest.mark.xfail
def test_list_goals_graph(self):
Goal.clear()
TaskRegistrar(name=self._LIST_GOALS_NAME, action=ListGoals)\
.install().with_description(self._LIST_GOALS_DESC)
TaskRegistrar(name=self._LLAMA_NAME, action=ListGoalsTest.LlamaTask)\
.install().with_description(self._LLAMA_DESC)
TaskRegistrar(name=self._ALPACA_NAME, action=ListGoalsTest.AlpacaTask, dependencies=[self._LLAMA_NAME])\
.install()
self.assert_console_output(
'digraph G {\n rankdir=LR;\n graph [compound=true];',
' subgraph cluster_goals {\n node [style=filled];\n color = blue;\n label = "goals";',
' goals_goals [label="goals"];',
' }',
' subgraph cluster_llama {\n node [style=filled];\n color = blue;\n label = "llama";',
' llama_llama [label="llama"];',
' }',
' subgraph cluster_alpaca {\n node [style=filled];\n color = blue;\n label = "alpaca";',
' alpaca_alpaca [label="alpaca"];',
' }',
' alpaca_alpaca -> llama_llama [ltail=cluster_alpaca lhead=cluster_llama];',
'}',
args=['--test-graph'],
)
| tejal29/pants | tests/python/pants_test/tasks/test_list_goals.py | Python | apache-2.0 | 4,020 |
"""Unit tests for the Jira manual test duration collector."""
from .base import JiraTestCase
class JiraManualTestDurationTest(JiraTestCase):
"""Unit tests for the Jira manual test duration collector."""
METRIC_TYPE = "manual_test_duration"
async def test_duration(self):
"""Test that the duration is returned."""
test_cases_json = dict(
issues=[self.issue(key="1", field=10), self.issue(key="2", field=15), self.issue(key="3", field=None)]
)
response = await self.get_response(test_cases_json)
self.assert_measurement(
response,
value="25",
entities=[self.entity(key="1", duration="10.0"), self.entity(key="2", duration="15.0")],
)
| ICTU/quality-time | components/collector/tests/source_collectors/jira/test_manual_test_duration.py | Python | apache-2.0 | 745 |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pandas as pd
import tempfile
import os
from bigdl.chronos.forecaster.prophet_forecaster import ProphetForecaster
from unittest import TestCase
import pytest
def create_data():
seq_len = 400
data = pd.DataFrame(pd.date_range('20130101', periods=seq_len), columns=['ds'])
data.insert(1, 'y', np.random.rand(seq_len))
horizon = np.random.randint(2, 50)
validation_data = pd.DataFrame(pd.date_range('20140426', periods=horizon), columns=['ds'])
validation_data.insert(1, 'y', np.random.rand(horizon))
return data, validation_data
class TestChronosModelProphetForecaster(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_prophet_forecaster_fit_eval_pred(self):
data, validation_data = create_data()
for valid_data in [None, validation_data]:
forecaster = ProphetForecaster(changepoint_prior_scale=0.05,
seasonality_prior_scale=10.0,
holidays_prior_scale=10.0,
seasonality_mode='additive',
changepoint_range=0.8,
metric="mse",
)
train_loss = forecaster.fit(data, valid_data)
test_pred = forecaster.predict(validation_data.shape[0])
assert test_pred.shape[0] == validation_data.shape[0]
test_mse = forecaster.evaluate(validation_data)
def test_prophet_forecaster_save_restore(self):
data, validation_data = create_data()
forecaster = ProphetForecaster(changepoint_prior_scale=0.05,
seasonality_prior_scale=10.0,
holidays_prior_scale=10.0,
seasonality_mode='additive',
changepoint_range=0.8,
metric="mse",
)
train_loss = forecaster.fit(data, validation_data)
with tempfile.TemporaryDirectory() as tmp_dir_name:
ckpt_name = os.path.join(tmp_dir_name, "json")
test_pred_save = forecaster.predict(validation_data.shape[0])
forecaster.save(ckpt_name)
forecaster.restore(ckpt_name)
test_pred_restore = forecaster.predict(validation_data.shape[0])
assert (test_pred_save['yhat'] == test_pred_restore['yhat']).all()
def test_prophet_forecaster_runtime_error(self):
data, validation_data = create_data()
forecaster = ProphetForecaster(changepoint_prior_scale=0.05,
seasonality_prior_scale=10.0,
holidays_prior_scale=10.0,
seasonality_mode='additive',
changepoint_range=0.8,
metric="mse",
)
with pytest.raises(Exception,
match="You must call fit or restore first before calling predict!"):
forecaster.predict(horizon=validation_data.shape[0])
with pytest.raises(Exception,
match="You must call fit or restore first before calling save!"):
model_file = "tmp.json"
forecaster.save(model_file)
def test_prophet_forecaster_shape_error(self):
data, validation_data = create_data()
forecaster = ProphetForecaster(changepoint_prior_scale=0.05,
seasonality_prior_scale=10.0,
holidays_prior_scale=10.0,
seasonality_mode='additive',
changepoint_range=0.8,
metric="mse",
)
with pytest.raises(AssertionError):
forecaster.fit(data[['ds']], validation_data)
with pytest.raises(AssertionError):
forecaster.fit(data, validation_data[['ds']])
| intel-analytics/BigDL | python/chronos/test/bigdl/chronos/forecaster/test_prophet_forecaster.py | Python | apache-2.0 | 4,818 |
#!/usr/bin/env python
#Source Code provided by https://pointlessprogramming.wordpress.com/2011/02/13/python-cgi-tutorial-2/ by Nick Zarczynski.
import cgi
form = cgi.FieldStorage()
val1 = form.getvalue('first')
val2 = form.getvalue('last')
print "Content-type: text/html"
print
print "<html><head><title>Test URL Encoding</title></head><body> Hello my name is %s %s </body></html>" % (val1, val2) | davischau/CMPUT410Lab3 | server/test_urlencode.py | Python | apache-2.0 | 404 |
# Copyright 2020. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License"];
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from random import randint, uniform, choice
from string import ascii_lowercase
from thingsboard_gateway.connectors.odbc.odbc_uplink_converter import OdbcUplinkConverter
class OdbcUplinkConverterTests(unittest.TestCase):
def setUp(self):
self.converter = OdbcUplinkConverter()
self.db_data = {"boolValue": True,
"intValue": randint(0, 256),
"floatValue": uniform(-3.1415926535, 3.1415926535),
"stringValue": "".join(choice(ascii_lowercase) for _ in range(8))}
def test_glob_matching(self):
converted_data = self.converter.convert("*", self.db_data)
self.assertDictEqual(converted_data, self.db_data)
def test_data_subset(self):
config = ["floatValue", "boolValue"]
converted_data = self.converter.convert(config, self.db_data)
expected_data = {}
for key in config:
expected_data[key] = self.db_data[key]
self.assertDictEqual(converted_data, expected_data)
def test_alias(self):
config = [{"column": "stringValue", "name": "valueOfString"}]
converted_data = self.converter.convert(config, self.db_data)
self.assertDictEqual(converted_data, {config[0]["name"]: self.db_data[config[0]["column"]]})
def test_name_expression(self):
attr_name = "someAttribute"
config = [{"nameExpression": "key", "value": "intValue"}]
self.db_data["key"] = attr_name
converted_data = self.converter.convert(config, self.db_data)
self.assertDictEqual(converted_data, {attr_name: self.db_data[config[0]["value"]]})
def test_value_config(self):
config = [{"name": "someValue", "value": "stringValue + str(intValue)"}]
converted_data = self.converter.convert(config, self.db_data)
self.assertDictEqual(converted_data, {config[0]["name"]: self.db_data["stringValue"] + str(self.db_data["intValue"])})
def test_one_valid_one_invalid_configs(self):
config = ["unkownColumnValue", "stringValue"]
converted_data = self.converter.convert(config, self.db_data)
self.assertDictEqual(converted_data, {config[1]: self.db_data[config[1]]})
if __name__ == '__main__':
unittest.main()
| thingsboard/thingsboard-gateway | tests/converters/test_odbc_uplink_converter.py | Python | apache-2.0 | 2,890 |
from mpl_toolkits.basemap import Basemap
import BaseDomsHandler
import ResultsStorage
import numpy as np
import string
from cStringIO import StringIO
from multiprocessing import Process, Manager
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
PARAMETER_TO_FIELD = {
"sst": "sea_water_temperature",
"sss": "sea_water_salinity"
}
PARAMETER_TO_UNITS = {
"sst": "($^\circ$ C)",
"sss": "(g/L)"
}
def __square(minLon, maxLon, minLat, maxLat):
if maxLat - minLat > maxLon - minLon:
a = ((maxLat - minLat) - (maxLon - minLon)) / 2.0
minLon -= a
maxLon += a
elif maxLon - minLon > maxLat - minLat:
a = ((maxLon - minLon) - (maxLat - minLat)) / 2.0
minLat -= a
maxLat += a
return minLon, maxLon, minLat, maxLat
def render(d, lats, lons, z, primary, secondary, parameter):
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax.set_title(string.upper("%s vs. %s" % (primary, secondary)))
# ax.set_ylabel('Latitude')
# ax.set_xlabel('Longitude')
minLatA = np.min(lats)
maxLatA = np.max(lats)
minLonA = np.min(lons)
maxLonA = np.max(lons)
minLat = minLatA - (abs(maxLatA - minLatA) * 0.1)
maxLat = maxLatA + (abs(maxLatA - minLatA) * 0.1)
minLon = minLonA - (abs(maxLonA - minLonA) * 0.1)
maxLon = maxLonA + (abs(maxLonA - minLonA) * 0.1)
minLon, maxLon, minLat, maxLat = __square(minLon, maxLon, minLat, maxLat)
# m = Basemap(projection='mill', llcrnrlon=-180,llcrnrlat=-80,urcrnrlon=180,urcrnrlat=80,resolution='l')
m = Basemap(projection='mill', llcrnrlon=minLon, llcrnrlat=minLat, urcrnrlon=maxLon, urcrnrlat=maxLat,
resolution='l')
m.drawparallels(np.arange(minLat, maxLat, (maxLat - minLat) / 5.0), labels=[1, 0, 0, 0], fontsize=10)
m.drawmeridians(np.arange(minLon, maxLon, (maxLon - minLon) / 5.0), labels=[0, 0, 0, 1], fontsize=10)
m.drawcoastlines()
m.drawmapboundary(fill_color='#99ffff')
m.fillcontinents(color='#cc9966', lake_color='#99ffff')
#lats, lons = np.meshgrid(lats, lons)
masked_array = np.ma.array(z, mask=np.isnan(z))
z = masked_array
values = np.zeros(len(z))
for i in range(0, len(z)):
values[i] = ((z[i] - np.min(z)) / (np.max(z) - np.min(z)) * 20.0) + 10
x, y = m(lons, lats)
im1 = m.scatter(x, y, values)
im1.set_array(z)
cb = m.colorbar(im1)
units = PARAMETER_TO_UNITS[parameter] if parameter in PARAMETER_TO_UNITS else PARAMETER_TO_UNITS["sst"]
cb.set_label("Difference %s" % units)
sio = StringIO()
plt.savefig(sio, format='png')
plot = sio.getvalue()
if d is not None:
d['plot'] = plot
return plot
class DomsMapPlotQueryResults(BaseDomsHandler.DomsQueryResults):
def __init__(self, lats, lons, z, parameter, primary, secondary, args=None, bounds=None, count=None, details=None, computeOptions=None, executionId=None, plot=None):
BaseDomsHandler.DomsQueryResults.__init__(self, results={"lats": lats, "lons": lons, "values": z}, args=args, details=details, bounds=bounds, count=count, computeOptions=computeOptions, executionId=executionId)
self.__lats = lats
self.__lons = lons
self.__z = np.array(z)
self.__parameter = parameter
self.__primary = primary
self.__secondary = secondary
self.__plot = plot
def toImage(self):
return self.__plot
def renderAsync(x, y, z, primary, secondary, parameter):
manager = Manager()
d = manager.dict()
p = Process(target=render, args=(d, x, y, z, primary, secondary, parameter))
p.start()
p.join()
return d['plot']
def createMapPlot(id, parameter):
with ResultsStorage.ResultsRetrieval() as storage:
params, stats, data = storage.retrieveResults(id)
primary = params["primary"]
secondary = params["matchup"][0]
lats = []
lons = []
z = []
field = PARAMETER_TO_FIELD[parameter] if parameter in PARAMETER_TO_FIELD else PARAMETER_TO_FIELD["sst"]
for entry in data:
for match in entry["matches"]:
if match["source"] == secondary:
if field in entry and field in match:
a = entry[field]
b = match[field]
z.append((a - b))
z.append((a - b))
else:
z.append(1.0)
z.append(1.0)
lats.append(entry["y"])
lons.append(entry["x"])
lats.append(match["y"])
lons.append(match["x"])
plot = renderAsync(lats, lons, z, primary, secondary, parameter)
r = DomsMapPlotQueryResults(lats=lats, lons=lons, z=z, parameter=parameter, primary=primary, secondary=secondary,
args=params,
details=stats, bounds=None, count=None, computeOptions=None, executionId=id, plot=plot)
return r
| dataplumber/nexus | analysis/webservice/algorithms/doms/mapplot.py | Python | apache-2.0 | 5,004 |
# ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
from neon import NervanaObject
import numpy as np
class Cost(NervanaObject):
"""
Base class for the cost functions
"""
def __call__(self, y, t):
"""
Applies the cost function
Args:
y (Tensor or OpTree): Output of previous layer or model
t (Tensor or OpTree): True targets corresponding to y
Returns:
OpTree: Returns the cost
"""
return self.func(y, t)
def bprop(self, y, t):
"""
Computes the derivative of the cost function
Args:
y (Tensor or OpTree): Output of previous layer or model
t (Tensor or OpTree): True targets corresponding to y
Returns:
OpTree: Returns the derivative of the cost function
"""
return self.funcgrad(y, t)
class Metric(Cost):
"""
Base class for Metric
Meant for non-smooth costs that we just want to check on validation.
"""
def __call__(self, y, t):
"""
To implement in derived classes
Args:
y (Tensor or OpTree): Output of previous layer or model
t (Tensor or OpTree): True targets corresponding to y
Returns:
float: Returns the metric
"""
raise NotImplementedError()
def bprop(self, y, t):
"""
Not relevant for Metric
"""
pass
class CrossEntropyBinary(Cost):
"""
Applies the binary cross entropy function
Note:
bprop assumes that shortcut is used to calculate derivative
"""
def __init__(self, scale=1):
"""
Initialize the binary cross entropy function
Args:
scale (float): amount by which to scale the backpropagated error
"""
self.scale = scale
def __call__(self, y, t):
"""
Applies the binary cross entropy cost function
Args:
y (Tensor or OpTree): Output of previous layer or model
t (Tensor or OpTree): True targets corresponding to y
Returns:
OpTree: Returns the binary cross entropy cost
"""
a = - self.be.safelog(y) * t
b = - self.be.safelog(1 - y) * (1 - t)
return self.be.sum(a + b, axis=0)
def bprop(self, y, t):
"""
Computes the shortcut derivative of the binary cross entropy cost function
Args:
y (Tensor or OpTree): Output of previous layer or model
t (Tensor or OpTree): True targets corresponding to y
Returns:
OpTree: Returns the (mean) shortcut derivative of the binary entropy
cost function ``(y - t) / y.shape[1]``
"""
return self.scale * (y - t)
class CrossEntropyMulti(Cost):
"""
Applies the multiclass cross entropy function
Note:
bprop assumes that shortcut is used to calculate derivative
"""
def __init__(self, scale=1, usebits=False):
"""
Initialize the multiclass cross entropy function
Args:
scale (float): amount by which to scale the backpropagated error
usebits (boolean): whether to display costs in bits or nats (default)
"""
self.scale = scale
self.logscale = np.float(1. / np.log(2.0) if usebits else 1.)
def __call__(self, y, t):
"""
Applies the multiclass cross entropy cost function
Args:
y (Tensor or OpTree): Output of previous layer or model
t (Tensor or OpTree): True targets corresponding to y
Returns:
OpTree: Returns the multiclass cross entropy cost
"""
return (self.be.sum(-t * self.logscale * self.be.safelog(y), axis=0))
def bprop(self, y, t):
"""
Computes the shortcut derivative of the multiclass cross entropy cost
function
Args:
y (Tensor or OpTree): Output of previous layer or model
t (Tensor or OpTree): True targets corresponding to y
Returns:
OpTree: Returns the (mean) shortcut derivative of the multiclass
entropy cost function ``(y - t) / y.shape[1]``
"""
return self.scale * (y - t)
class SumSquared(Cost):
"""
Applies the squared error cost function
"""
def __init__(self):
"""
Initialize the squared error cost functions
"""
self.func = lambda y, t: self.be.sum(
self.be.square(y - t), axis=0) / 2.
self.funcgrad = lambda y, t: (y - t)
class MeanSquared(Cost):
"""
Applies the mean squared error cost function
"""
def __init__(self):
"""
Initialize the squared error cost functions
"""
self.func = lambda y, t: self.be.mean(
self.be.square(y - t), axis=0) / 2.
self.funcgrad = lambda y, t: (y - t)/y.shape[0]
class LogLoss(Metric):
"""
Compute logloss
"""
def __init__(self):
self.correctProbs = self.be.iobuf(1)
self.metric_names = ['LogLoss']
def __call__(self, y, t, calcrange=slice(0, None)):
"""
Args:
y (Tensor or OpTree): Output of previous layer or model
t (Tensor or OpTree): True targets corresponding to y
Returns:
numpy array : Returns the log loss metric in numpy array,
[LogLoss]
"""
self.correctProbs[:] = self.be.sum(y * t, axis=0)
self.correctProbs[:] = -self.be.safelog(self.correctProbs)
return np.array(self.correctProbs.get()[:, calcrange].mean())
class TopKMisclassification(Metric):
"""
Compute logloss, top1, and topk misclassification error metric
"""
def __init__(self, k):
self.correctProbs = self.be.iobuf(1)
self.top1 = self.be.iobuf(1)
self.topk = self.be.iobuf(1)
self.k = k
self.metric_names = ['LogLoss', 'Top1Misclass', 'Top' + str(k) + 'Misclass']
def __call__(self, y, t, calcrange=slice(0, None)):
"""
Compute the misclassification error metric
Args:
y (Tensor or OpTree): Output of previous layer or model
t (Tensor or OpTree): True targets corresponding to y
Returns:
numpy ary : Returns the metrics in numpy array,
[LogLoss, Top 1 misclass, Top k misclass]
"""
be = self.be
self.correctProbs[:] = be.sum(y * t, axis=0)
nSlots = self.k - be.sum((y > self.correctProbs), axis=0)
nEq = be.sum(y == self.correctProbs, axis=0)
self.topk[:] = 1. - (nSlots > 0) * ((nEq <= nSlots) * (1 - nSlots / nEq) + nSlots / nEq)
self.top1[:] = 1. - (be.max(y, axis=0) == self.correctProbs) / nEq
self.correctProbs[:] = -be.safelog(self.correctProbs)
return np.array((self.correctProbs.get()[:, calcrange].mean(),
self.top1.get()[:, calcrange].mean(),
self.topk.get()[:, calcrange].mean()))
class Misclassification(Metric):
"""
Compute the misclassification error metric
"""
def __init__(self):
self.preds = self.be.iobuf(1)
self.hyps = self.be.iobuf(1)
self.outputs = self.preds # Contains per record metric
self.metric_names = ['Top1Misclass']
def __call__(self, y, t, calcrange=slice(0, None)):
"""
Compute the misclassification error metric
Args:
y (Tensor or OpTree): Output of previous layer or model
t (Tensor or OpTree): True targets corresponding to y
Returns:
float: Returns the metric
"""
# convert back from onehot and compare
self.preds[:] = self.be.argmax(y, axis=0)
self.hyps[:] = self.be.argmax(t, axis=0)
self.outputs[:] = self.be.not_equal(self.preds, self.hyps)
return self.outputs.get()[:, calcrange].mean()
class Accuracy(Metric):
"""
Compute the accuracy metric
"""
def __init__(self):
self.preds = self.be.iobuf(1)
self.hyps = self.be.iobuf(1)
self.outputs = self.preds # Contains per record metric
self.metric_names = ['Accuracy']
def __call__(self, y, t, calcrange=slice(0, None)):
"""
Compute the accuracy metric
Args:
y (Tensor or OpTree): Output of previous layer or model
t (Tensor or OpTree): True targets corresponding to y
Returns:
float: Returns the metric
"""
# convert back from onehot and compare
self.preds[:] = self.be.argmax(y, axis=0)
self.hyps[:] = self.be.argmax(t, axis=0)
self.outputs[:] = self.be.equal(self.preds, self.hyps)
return self.outputs.get()[:, calcrange].mean()
class PrecisionRecall(Metric):
"""
Compute precision and recall metrics
Arguments:
num_classes (int): Number of different output classes.
binarize (bool, optional): If True will attempt to convert the model
outputs to a one-hot encoding (in place).
Defaults to False.
epsilon (float, optional): Smoothing to apply to avoid divsion by zero.
Defaults to 1e-6.
"""
def __init__(self, num_classes, binarize=False, epsilon=1e-6):
self.outputs = self.be.empty((num_classes, 2))
self.token_stats = self.be.empty((num_classes, 3))
self.metric_names = ['Precision', 'Recall']
if binarize:
self.bin_buf = self.be.iobuf(1, dtype=np.int32)
else:
self.bin_buf = None
self.eps = epsilon
def __call__(self, y, t):
"""
Compute the precision and recall of a multi-class classification model
Args:
y (Tensor or OpTree): Output of previous layer or model (we assume
already binarized, or you need to ensure
binarize is True during construction).
t (Tensor or OpTree): True targets corresponding to y (we assume
already binarized)
Returns:
ndarray: Returns the class averaged precision (item 0) and recall (item
1) values. Per-class statistics remain in self.outputs.
"""
if self.bin_buf is not None:
self.be.argmax(y, axis=0, out=self.bin_buf)
y[:] = self.be.onehot(self.bin_buf, axis=0)
# True positives
self.token_stats[:, 0] = self.be.sum(y * t, axis=1)
# Prediction
self.token_stats[:, 1] = self.be.sum(y, axis=1)
# Targets
self.token_stats[:, 2] = self.be.sum(t, axis=1)
# Precision
self.outputs[:, 0] = self.token_stats[:, 0] / (self.token_stats[:, 1] +
self.eps)
# Recall
self.outputs[:, 1] = self.token_stats[:, 0] / (self.token_stats[:, 2] +
self.eps)
return self.outputs.get().mean(axis=0)
| Bam4d/neon | neon/transforms/cost.py | Python | apache-2.0 | 11,917 |
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nsx_common as common
from cloudify import exceptions as cfy_exc
def get_tag(client_session, name):
return common.nsx_search(
client_session, 'body/securityTags/securityTag',
name, 'securityTag'
)
def add_tag(client_session, name, description):
security_group = {
'securityTag': {
'name': name
}
}
if description:
security_group['securityTag']['description'] = description
result_raw = client_session.create(
'securityTag',
request_body_dict=security_group
)
common.check_raw_result(result_raw)
return result_raw['objectId']
def delete_tag(client_session, resource_id):
result = client_session.delete(
'securityTagID',
uri_parameters={'tagId': resource_id}
)
common.check_raw_result(result)
def tag_vm_to_resource_id(tag_id, vm_id):
"""Generate resource_id from tag_id/vm_id"""
if not vm_id or not tag_id:
raise cfy_exc.NonRecoverableError(
"Please recheck tag_id/vm_id"
)
return "%s|%s" % (tag_id, vm_id)
def add_tag_vm(client_session, tag_id, vm_id):
resource_id = tag_vm_to_resource_id(tag_id, vm_id)
result_raw = client_session.update(
'securityTagVM',
uri_parameters={
'tagId': tag_id,
'vmMoid': vm_id
}
)
common.check_raw_result(result_raw)
return resource_id
def delete_tag_vm(client_session, resource_id):
ids = resource_id.split("|")
if len(ids) != 2:
raise cfy_exc.NonRecoverableError(
'Unexpected error retrieving resource ID'
)
# get list of attached
attached_vms_raw = common.nsx_read(
client_session, 'body',
'securityTagVMsList', uri_parameters={'tagId': ids[0]}
)
if not attached_vms_raw:
return
attached_vms = common.nsx_struct_get_list(
attached_vms_raw, 'basicinfolist/basicinfo'
)
# delete only attached
for vm in attached_vms:
if vm.get('objectId') == ids[1]:
result_raw = client_session.delete(
'securityTagVM',
uri_parameters={
'tagId': ids[0],
'vmMoid': ids[1]
}
)
common.check_raw_result(result_raw)
break
| cloudify-cosmo/cloudify-nsx-plugin | cloudify_nsx/library/nsx_security_tag.py | Python | apache-2.0 | 2,966 |
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import netrc
from optparse import SUPPRESS_HELP
import os
import pickle
import re
import shutil
import socket
import subprocess
import sys
import time
import urlparse
import xmlrpclib
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
try:
import resource
def _rlimit_nofile():
return resource.getrlimit(resource.RLIMIT_NOFILE)
except ImportError:
def _rlimit_nofile():
return (256, 256)
try:
import multiprocessing
except ImportError:
multiprocessing = None
from git_command import GIT, git_require
from git_refs import R_HEADS, HEAD
from main import WrapperModule
from project import Project
from project import RemoteSpec
from command import Command, MirrorSafeCommand
from error import RepoChangedException, GitError, ManifestParseError
from project import SyncBuffer
from progress import Progress
_ONE_DAY_S = 24 * 60 * 60
class _FetchError(Exception):
"""Internal error thrown in _FetchHelper() when we don't want stack trace."""
pass
class Sync(Command, MirrorSafeCommand):
jobs = 1
common = True
helpSummary = "Update working tree to the latest revision"
helpUsage = """
%prog [<project>...]
"""
helpDescription = """
The '%prog' command synchronizes local project directories
with the remote repositories specified in the manifest. If a local
project does not yet exist, it will clone a new local directory from
the remote repository and set up tracking branches as specified in
the manifest. If the local project already exists, '%prog'
will update the remote branches and rebase any new local changes
on top of the new remote changes.
'%prog' will synchronize all projects listed at the command
line. Projects can be specified either by name, or by a relative
or absolute path to the project's local directory. If no projects
are specified, '%prog' will synchronize all projects listed in
the manifest.
The -d/--detach option can be used to switch specified projects
back to the manifest revision. This option is especially helpful
if the project is currently on a topic branch, but the manifest
revision is temporarily needed.
The -s/--smart-sync option can be used to sync to a known good
build as specified by the manifest-server element in the current
manifest. The -t/--smart-tag option is similar and allows you to
specify a custom tag/label.
The -u/--manifest-server-username and -p/--manifest-server-password
options can be used to specify a username and password to authenticate
with the manifest server when using the -s or -t option.
If -u and -p are not specified when using the -s or -t option, '%prog'
will attempt to read authentication credentials for the manifest server
from the user's .netrc file.
'%prog' will not use authentication credentials from -u/-p or .netrc
if the manifest server specified in the manifest file already includes
credentials.
The -f/--force-broken option can be used to proceed with syncing
other projects if a project sync fails.
The --no-clone-bundle option disables any attempt to use
$URL/clone.bundle to bootstrap a new Git repository from a
resumeable bundle file on a content delivery network. This
may be necessary if there are problems with the local Python
HTTP client or proxy configuration, but the Git binary works.
The --fetch-submodules option enables fetching Git submodules
of a project from server.
SSH Connections
---------------
If at least one project remote URL uses an SSH connection (ssh://,
git+ssh://, or user@host:path syntax) repo will automatically
enable the SSH ControlMaster option when connecting to that host.
This feature permits other projects in the same '%prog' session to
reuse the same SSH tunnel, saving connection setup overheads.
To disable this behavior on UNIX platforms, set the GIT_SSH
environment variable to 'ssh'. For example:
export GIT_SSH=ssh
%prog
Compatibility
~~~~~~~~~~~~~
This feature is automatically disabled on Windows, due to the lack
of UNIX domain socket support.
This feature is not compatible with url.insteadof rewrites in the
user's ~/.gitconfig. '%prog' is currently not able to perform the
rewrite early enough to establish the ControlMaster tunnel.
If the remote SSH daemon is Gerrit Code Review, version 2.0.10 or
later is required to fix a server side protocol bug.
"""
def _Options(self, p, show_smart=True):
try:
self.jobs = self.manifest.default.sync_j
except ManifestParseError:
self.jobs = 1
p.add_option('-f', '--force-broken',
dest='force_broken', action='store_true',
help="continue sync even if a project fails to sync")
p.add_option('-l', '--local-only',
dest='local_only', action='store_true',
help="only update working tree, don't fetch")
p.add_option('-n', '--network-only',
dest='network_only', action='store_true',
help="fetch only, don't update working tree")
p.add_option('-d', '--detach',
dest='detach_head', action='store_true',
help='detach projects back to manifest revision')
p.add_option('-c', '--current-branch',
dest='current_branch_only', action='store_true',
help='fetch only current branch from server')
p.add_option('-q', '--quiet',
dest='quiet', action='store_true',
help='be more quiet')
p.add_option('-j', '--jobs',
dest='jobs', action='store', type='int',
help="projects to fetch simultaneously (default %d)" % self.jobs)
p.add_option('-m', '--manifest-name',
dest='manifest_name',
help='temporary manifest to use for this sync', metavar='NAME.xml')
p.add_option('--no-clone-bundle',
dest='no_clone_bundle', action='store_true',
help='disable use of /clone.bundle on HTTP/HTTPS')
p.add_option('-u', '--manifest-server-username', action='store',
dest='manifest_server_username',
help='username to authenticate with the manifest server')
p.add_option('-p', '--manifest-server-password', action='store',
dest='manifest_server_password',
help='password to authenticate with the manifest server')
p.add_option('--fetch-submodules',
dest='fetch_submodules', action='store_true',
help='fetch submodules from server')
p.add_option('--no-tags',
dest='no_tags', action='store_true',
help="don't fetch tags")
if show_smart:
p.add_option('-s', '--smart-sync',
dest='smart_sync', action='store_true',
help='smart sync using manifest from a known good build')
p.add_option('-t', '--smart-tag',
dest='smart_tag', action='store',
help='smart sync using manifest from a known tag')
g = p.add_option_group('repo Version options')
g.add_option('--no-repo-verify',
dest='no_repo_verify', action='store_true',
default=True,
help='do not verify repo source code')
g.add_option('--repo-upgraded',
dest='repo_upgraded', action='store_true',
help=SUPPRESS_HELP)
def _FetchHelper(self, opt, project, lock, fetched, pm, sem, err_event):
"""Main function of the fetch threads when jobs are > 1.
Args:
opt: Program options returned from optparse. See _Options().
project: Project object for the project to fetch.
lock: Lock for accessing objects that are shared amongst multiple
_FetchHelper() threads.
fetched: set object that we will add project.gitdir to when we're done
(with our lock held).
pm: Instance of a Project object. We will call pm.update() (with our
lock held).
sem: We'll release() this semaphore when we exit so that another thread
can be started up.
err_event: We'll set this event in the case of an error (after printing
out info about the error).
"""
# We'll set to true once we've locked the lock.
did_lock = False
# Encapsulate everything in a try/except/finally so that:
# - We always set err_event in the case of an exception.
# - We always make sure we call sem.release().
# - We always make sure we unlock the lock if we locked it.
try:
try:
start = time.time()
success = project.Sync_NetworkHalf(
quiet=opt.quiet,
current_branch_only=opt.current_branch_only,
clone_bundle=not opt.no_clone_bundle,
no_tags=opt.no_tags)
self._fetch_times.Set(project, time.time() - start)
# Lock around all the rest of the code, since printing, updating a set
# and Progress.update() are not thread safe.
lock.acquire()
did_lock = True
if not success:
print('error: Cannot fetch %s' % project.name, file=sys.stderr)
if opt.force_broken:
print('warn: --force-broken, continuing to sync',
file=sys.stderr)
else:
raise _FetchError()
fetched.add(project.gitdir)
pm.update()
except _FetchError:
err_event.set()
except:
err_event.set()
raise
finally:
if did_lock:
lock.release()
sem.release()
def _Fetch(self, projects, opt):
fetched = set()
pm = Progress('Fetching projects', len(projects))
if self.jobs == 1:
for project in projects:
pm.update()
if project.Sync_NetworkHalf(
quiet=opt.quiet,
current_branch_only=opt.current_branch_only,
clone_bundle=not opt.no_clone_bundle,
no_tags=opt.no_tags):
fetched.add(project.gitdir)
else:
print('error: Cannot fetch %s' % project.name, file=sys.stderr)
if opt.force_broken:
print('warn: --force-broken, continuing to sync', file=sys.stderr)
else:
sys.exit(1)
else:
threads = set()
lock = _threading.Lock()
sem = _threading.Semaphore(self.jobs)
err_event = _threading.Event()
for project in projects:
# Check for any errors before starting any new threads.
# ...we'll let existing threads finish, though.
if err_event.isSet():
break
sem.acquire()
t = _threading.Thread(target = self._FetchHelper,
args = (opt,
project,
lock,
fetched,
pm,
sem,
err_event))
# Ensure that Ctrl-C will not freeze the repo process.
t.daemon = True
threads.add(t)
t.start()
for t in threads:
t.join()
# If we saw an error, exit with code 1 so that other scripts can check.
if err_event.isSet():
print('\nerror: Exited sync due to fetch errors', file=sys.stderr)
sys.exit(1)
pm.end()
self._fetch_times.Save()
self._GCProjects(projects)
return fetched
def _GCProjects(self, projects):
has_dash_c = git_require((1, 7, 2))
if multiprocessing and has_dash_c:
cpu_count = multiprocessing.cpu_count()
else:
cpu_count = 1
jobs = min(self.jobs, cpu_count)
if jobs < 2:
for project in projects:
project.bare_git.gc('--auto')
return
config = {'pack.threads': cpu_count / jobs if cpu_count > jobs else 1}
threads = set()
sem = _threading.Semaphore(jobs)
err_event = _threading.Event()
def GC(project):
try:
try:
project.bare_git.gc('--auto', config=config)
except GitError:
err_event.set()
except:
err_event.set()
raise
finally:
sem.release()
for project in projects:
if err_event.isSet():
break
sem.acquire()
t = _threading.Thread(target=GC, args=(project,))
t.daemon = True
threads.add(t)
t.start()
for t in threads:
t.join()
if err_event.isSet():
print('\nerror: Exited sync due to gc errors', file=sys.stderr)
sys.exit(1)
def UpdateProjectList(self):
new_project_paths = []
for project in self.GetProjects(None, missing_ok=True):
if project.relpath:
new_project_paths.append(project.relpath)
file_name = 'project.list'
file_path = os.path.join(self.manifest.repodir, file_name)
old_project_paths = []
if os.path.exists(file_path):
fd = open(file_path, 'r')
try:
old_project_paths = fd.read().split('\n')
finally:
fd.close()
for path in old_project_paths:
if not path:
continue
if path not in new_project_paths:
# If the path has already been deleted, we don't need to do it
if os.path.exists(self.manifest.topdir + '/' + path):
project = Project(
manifest = self.manifest,
name = path,
remote = RemoteSpec('origin'),
gitdir = os.path.join(self.manifest.topdir,
path, '.git'),
worktree = os.path.join(self.manifest.topdir, path),
relpath = path,
revisionExpr = 'HEAD',
revisionId = None,
groups = None)
if project.IsDirty():
print('error: Cannot remove project "%s": uncommitted changes'
'are present' % project.relpath, file=sys.stderr)
print(' commit changes, then run sync again',
file=sys.stderr)
return -1
else:
print('Deleting obsolete path %s' % project.worktree,
file=sys.stderr)
shutil.rmtree(project.worktree)
# Try deleting parent subdirs if they are empty
project_dir = os.path.dirname(project.worktree)
while project_dir != self.manifest.topdir:
try:
os.rmdir(project_dir)
except OSError:
break
project_dir = os.path.dirname(project_dir)
new_project_paths.sort()
fd = open(file_path, 'w')
try:
fd.write('\n'.join(new_project_paths))
fd.write('\n')
finally:
fd.close()
return 0
def Execute(self, opt, args):
if opt.jobs:
self.jobs = opt.jobs
if self.jobs > 1:
soft_limit, _ = _rlimit_nofile()
self.jobs = min(self.jobs, (soft_limit - 5) / 3)
if opt.network_only and opt.detach_head:
print('error: cannot combine -n and -d', file=sys.stderr)
sys.exit(1)
if opt.network_only and opt.local_only:
print('error: cannot combine -n and -l', file=sys.stderr)
sys.exit(1)
if opt.manifest_name and opt.smart_sync:
print('error: cannot combine -m and -s', file=sys.stderr)
sys.exit(1)
if opt.manifest_name and opt.smart_tag:
print('error: cannot combine -m and -t', file=sys.stderr)
sys.exit(1)
if opt.manifest_server_username or opt.manifest_server_password:
if not (opt.smart_sync or opt.smart_tag):
print('error: -u and -p may only be combined with -s or -t',
file=sys.stderr)
sys.exit(1)
if None in [opt.manifest_server_username, opt.manifest_server_password]:
print('error: both -u and -p must be given', file=sys.stderr)
sys.exit(1)
if opt.manifest_name:
self.manifest.Override(opt.manifest_name)
if opt.smart_sync or opt.smart_tag:
if not self.manifest.manifest_server:
print('error: cannot smart sync: no manifest server defined in'
'manifest', file=sys.stderr)
sys.exit(1)
manifest_server = self.manifest.manifest_server
if not '@' in manifest_server:
username = None
password = None
if opt.manifest_server_username and opt.manifest_server_password:
username = opt.manifest_server_username
password = opt.manifest_server_password
else:
try:
info = netrc.netrc()
except IOError:
print('.netrc file does not exist or could not be opened',
file=sys.stderr)
else:
try:
parse_result = urlparse.urlparse(manifest_server)
if parse_result.hostname:
username, _account, password = \
info.authenticators(parse_result.hostname)
except TypeError:
# TypeError is raised when the given hostname is not present
# in the .netrc file.
print('No credentials found for %s in .netrc'
% parse_result.hostname, file=sys.stderr)
except netrc.NetrcParseError as e:
print('Error parsing .netrc file: %s' % e, file=sys.stderr)
if (username and password):
manifest_server = manifest_server.replace('://', '://%s:%s@' %
(username, password),
1)
try:
server = xmlrpclib.Server(manifest_server)
if opt.smart_sync:
p = self.manifest.manifestProject
b = p.GetBranch(p.CurrentBranch)
branch = b.merge
if branch.startswith(R_HEADS):
branch = branch[len(R_HEADS):]
env = os.environ.copy()
if (env.has_key('TARGET_PRODUCT') and
env.has_key('TARGET_BUILD_VARIANT')):
target = '%s-%s' % (env['TARGET_PRODUCT'],
env['TARGET_BUILD_VARIANT'])
[success, manifest_str] = server.GetApprovedManifest(branch, target)
else:
[success, manifest_str] = server.GetApprovedManifest(branch)
else:
assert(opt.smart_tag)
[success, manifest_str] = server.GetManifest(opt.smart_tag)
if success:
manifest_name = "smart_sync_override.xml"
manifest_path = os.path.join(self.manifest.manifestProject.worktree,
manifest_name)
try:
f = open(manifest_path, 'w')
try:
f.write(manifest_str)
finally:
f.close()
except IOError:
print('error: cannot write manifest to %s' % manifest_path,
file=sys.stderr)
sys.exit(1)
self.manifest.Override(manifest_name)
else:
print('error: %s' % manifest_str, file=sys.stderr)
sys.exit(1)
except (socket.error, IOError, xmlrpclib.Fault) as e:
print('error: cannot connect to manifest server %s:\n%s'
% (self.manifest.manifest_server, e), file=sys.stderr)
sys.exit(1)
except xmlrpclib.ProtocolError as e:
print('error: cannot connect to manifest server %s:\n%d %s'
% (self.manifest.manifest_server, e.errcode, e.errmsg),
file=sys.stderr)
sys.exit(1)
rp = self.manifest.repoProject
rp.PreSync()
mp = self.manifest.manifestProject
mp.PreSync()
if opt.repo_upgraded:
_PostRepoUpgrade(self.manifest, quiet=opt.quiet)
if not opt.local_only:
mp.Sync_NetworkHalf(quiet=opt.quiet,
current_branch_only=opt.current_branch_only,
no_tags=opt.no_tags)
if mp.HasChanges:
syncbuf = SyncBuffer(mp.config)
mp.Sync_LocalHalf(syncbuf)
if not syncbuf.Finish():
sys.exit(1)
self.manifest._Unload()
if opt.jobs is None:
self.jobs = self.manifest.default.sync_j
all_projects = self.GetProjects(args,
missing_ok=True,
submodules_ok=opt.fetch_submodules)
self._fetch_times = _FetchTimes(self.manifest)
if not opt.local_only:
to_fetch = []
now = time.time()
if _ONE_DAY_S <= (now - rp.LastFetch):
to_fetch.append(rp)
to_fetch.extend(all_projects)
to_fetch.sort(key=self._fetch_times.Get, reverse=True)
fetched = self._Fetch(to_fetch, opt)
_PostRepoFetch(rp, opt.no_repo_verify)
if opt.network_only:
# bail out now; the rest touches the working tree
return
# Iteratively fetch missing and/or nested unregistered submodules
previously_missing_set = set()
while True:
self.manifest._Unload()
all_projects = self.GetProjects(args,
missing_ok=True,
submodules_ok=opt.fetch_submodules)
missing = []
for project in all_projects:
if project.gitdir not in fetched:
missing.append(project)
if not missing:
break
# Stop us from non-stopped fetching actually-missing repos: If set of
# missing repos has not been changed from last fetch, we break.
missing_set = set(p.name for p in missing)
if previously_missing_set == missing_set:
break
previously_missing_set = missing_set
fetched.update(self._Fetch(missing, opt))
if self.manifest.IsMirror:
# bail out now, we have no working tree
return
if self.UpdateProjectList():
sys.exit(1)
syncbuf = SyncBuffer(mp.config,
detach_head = opt.detach_head)
pm = Progress('Syncing work tree', len(all_projects))
for project in all_projects:
pm.update()
if project.worktree:
project.Sync_LocalHalf(syncbuf)
pm.end()
print(file=sys.stderr)
if not syncbuf.Finish():
sys.exit(1)
# If there's a notice that's supposed to print at the end of the sync, print
# it now...
if self.manifest.notice:
print(self.manifest.notice)
def _PostRepoUpgrade(manifest, quiet=False):
wrapper = WrapperModule()
if wrapper.NeedSetupGnuPG():
wrapper.SetupGnuPG(quiet)
for project in manifest.projects.values():
if project.Exists:
project.PostRepoUpgrade()
def _PostRepoFetch(rp, no_repo_verify=False, verbose=False):
if rp.HasChanges:
print('info: A new version of repo is available', file=sys.stderr)
print(file=sys.stderr)
if no_repo_verify or _VerifyTag(rp):
syncbuf = SyncBuffer(rp.config)
rp.Sync_LocalHalf(syncbuf)
if not syncbuf.Finish():
sys.exit(1)
print('info: Restarting repo with latest version', file=sys.stderr)
raise RepoChangedException(['--repo-upgraded'])
else:
print('warning: Skipped upgrade to unverified version', file=sys.stderr)
else:
if verbose:
print('repo version %s is current' % rp.work_git.describe(HEAD),
file=sys.stderr)
def _VerifyTag(project):
gpg_dir = os.path.expanduser('~/.repoconfig/gnupg')
if not os.path.exists(gpg_dir):
print('warning: GnuPG was not available during last "repo init"\n'
'warning: Cannot automatically authenticate repo."""',
file=sys.stderr)
return True
try:
cur = project.bare_git.describe(project.GetRevisionId())
except GitError:
cur = None
if not cur \
or re.compile(r'^.*-[0-9]{1,}-g[0-9a-f]{1,}$').match(cur):
rev = project.revisionExpr
if rev.startswith(R_HEADS):
rev = rev[len(R_HEADS):]
print(file=sys.stderr)
print("warning: project '%s' branch '%s' is not signed"
% (project.name, rev), file=sys.stderr)
return False
env = os.environ.copy()
env['GIT_DIR'] = project.gitdir.encode()
env['GNUPGHOME'] = gpg_dir.encode()
cmd = [GIT, 'tag', '-v', cur]
proc = subprocess.Popen(cmd,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
env = env)
out = proc.stdout.read()
proc.stdout.close()
err = proc.stderr.read()
proc.stderr.close()
if proc.wait() != 0:
print(file=sys.stderr)
print(out, file=sys.stderr)
print(err, file=sys.stderr)
print(file=sys.stderr)
return False
return True
class _FetchTimes(object):
_ALPHA = 0.5
def __init__(self, manifest):
self._path = os.path.join(manifest.repodir, '.repopickle_fetchtimes')
self._times = None
self._seen = set()
def Get(self, project):
self._Load()
return self._times.get(project.name, _ONE_DAY_S)
def Set(self, project, t):
self._Load()
name = project.name
old = self._times.get(name, t)
self._seen.add(name)
a = self._ALPHA
self._times[name] = (a*t) + ((1-a) * old)
def _Load(self):
if self._times is None:
try:
f = open(self._path)
except IOError:
self._times = {}
return self._times
try:
try:
self._times = pickle.load(f)
except IOError:
try:
os.remove(self._path)
except OSError:
pass
self._times = {}
finally:
f.close()
return self._times
def Save(self):
if self._times is None:
return
to_delete = []
for name in self._times:
if name not in self._seen:
to_delete.append(name)
for name in to_delete:
del self._times[name]
try:
f = open(self._path, 'wb')
try:
pickle.dump(self._times, f)
except (IOError, OSError, pickle.PickleError):
try:
os.remove(self._path)
except OSError:
pass
finally:
f.close()
| linux-knight/repo | subcmds/sync.py | Python | apache-2.0 | 26,550 |
#coding:utf-8
'''
New Integration Test for zstack cloudformation.
Create a Basic vm with eip.
Cover resource:VM,VIP,EIP
@author: Lei Liu
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.operations.resource_stack as resource_stack_ops
import zstackwoodpecker.operations.resource_operations as res_ops
def test():
test_util.test_dsc("Test Resource template Apis")
cond = res_ops.gen_query_conditions('status', '=', 'Ready')
cond = res_ops.gen_query_conditions('state', '=', 'Enabled', cond)
cond = res_ops.gen_query_conditions('system', '=', 'false', cond)
image_queried = res_ops.query_resource(res_ops.IMAGE, cond)
cond = res_ops.gen_query_conditions("category", '=', "Public")
l3_pub_queried = res_ops.query_resource(res_ops.L3_NETWORK, cond)
cond = res_ops.gen_query_conditions("category", '=', "Private")
cond = res_ops.gen_query_conditions('networkServices.networkServiceType', '=', 'EIP')
l3_pri_queried = res_ops.query_resource(res_ops.L3_NETWORK, cond)
cond = res_ops.gen_query_conditions('state', '=', 'Enabled')
cond = res_ops.gen_query_conditions('type', '=', 'UserVm', cond)
instance_offering_queried = res_ops.query_resource(res_ops.INSTANCE_OFFERING, cond)
resource_stack_option = test_util.ResourceStackOption()
resource_stack_option.set_name("Create_STACK")
resource_stack_option.set_rollback("true")
templateContent = '''
{
"ZStackTemplateFormatVersion": "2018-06-18",
"Description": "Just create a VM with eip",
"Parameters": {
"InstanceOfferingUuid": {
"Type": "String",
"Label": "vm instance offering"
},
"ImageUuid":{
"Type": "String"
},
"PrivateNetworkUuid":{
"Type": "String"
},
"PublicNetworkUuid":{
"Type": "String"
}
},
"Resources": {
"VmInstance": {
"Type": "ZStack::Resource::VmInstance",
"Properties": {
"name": {"Fn::Join":["-",[{"Ref":"ZStack::StackName"},"VM"]]},
"instanceOfferingUuid": {"Ref":"InstanceOfferingUuid"},
"imageUuid":{"Ref":"ImageUuid"},
"l3NetworkUuids":[{"Ref":"PrivateNetworkUuid"}]
}
},
"VIP": {
"Type": "ZStack::Resource::Vip",
"Properties": {
"name": {"Fn::Join":["-",[{"Ref":"ZStack::StackName"},"VIP"]]},
"l3NetworkUuid":{"Ref":"PublicNetworkUuid"}
}
},
"EIP":{
"Type": "ZStack::Resource::Eip",
"Properties": {
"name": {"Fn::Join":["-",[{"Ref":"ZStack::StackName"},"EIP"]]},
"vipUuid":{"Fn::GetAtt":["VIP","uuid"]},
"vmNicUuid":{"Fn::GetAtt":[{"Fn::Select":[0,{"Fn::GetAtt":["VmInstance","vmNics"]}]},"uuid"]}
}
}
},
"Outputs": {
"VmInstance": {
"Value": {
"Ref": "VmInstance"
}
}
}
}
'''
#1.create resource stack
test_util.test_logger('{"PrivateNetworkUuid":"%s","PublicNetworkUuid":"%s","ImageUuid":"%s","InstanceOfferingUuid":"%s"}' % (l3_pri_queried[0].uuid, l3_pub_queried[0].uuid, image_queried[0].uuid, instance_offering_queried[0].uuid))
parameter = '{"PrivateNetworkUuid":"%s","PublicNetworkUuid":"%s","ImageUuid":"%s","InstanceOfferingUuid":"%s"}' % (l3_pri_queried[0].uuid, l3_pub_queried[0].uuid, image_queried[0].uuid, instance_offering_queried[0].uuid)
resource_stack_option.set_templateContent(templateContent)
resource_stack_option.set_parameters(parameter)
preview_resource_stack = resource_stack_ops.preview_resource_stack(resource_stack_option)
resource_stack = resource_stack_ops.create_resource_stack(resource_stack_option)
#2.query resource stack
cond = res_ops.gen_query_conditions('uuid', '=', resource_stack.uuid)
resource_stack_queried = res_ops.query_resource(res_ops.RESOURCE_STACK, cond)
cond = res_ops.gen_query_conditions('name', '=', 'Create_STACK-VM')
vm_queried = res_ops.query_resource(res_ops.VM_INSTANCE, cond)
cond = res_ops.gen_query_conditions('name', '=', 'Create_STACK-VIP')
vip_queried = res_ops.query_resource(res_ops.VIP, cond)
cond = res_ops.gen_query_conditions('name', '=', 'Create_STACK-EIP')
eip_queried = res_ops.query_resource(res_ops.EIP, cond)
if len(resource_stack_queried) == 0:
test_util.test_fail("Fail to query resource stack")
if resource_stack_queried[0].status == 'Created':
if len(vm_queried) == 0 or len(vip_queried) == 0 or len(eip_queried) == 0:
test_util.test_fail("Fail to create all resource when resource stack status is Created")
elif len(vm_queried) != 0 or len(vip_queried) != 0 or len(eip_queried) != 0:
test_util.test_fail("Fail to delete all resource when resource stack status is Rollbacked or Deleted")
#3.get resource from resource stack
resource = resource_stack_ops.get_resource_from_resource_stack(resource_stack.uuid)
cond = res_ops.gen_query_conditions('name', '=', "vrouter")
vrouter_provider = res_ops.query_resource(res_ops.NETWORK_SERVICE_PROVIDER, cond)
cond = res_ops.gen_query_conditions('name', '=', "virtualrouter")
virtualrouter_provider = res_ops.query_resource(res_ops.NETWORK_SERVICE_PROVIDER, cond)
networkServiceProviderUuid = map(lambda x: x.networkServiceProviderUuid, l3_pri_queried[0].networkServices)
if vrouter_provider[0].uuid in networkServiceProviderUuid or virtualrouter_provider[0].uuid in networkServiceProviderUuid:
resource_num = 4
else:
resource_num = 3
if resource == None or len(resource) != resource_num:
test_util.test_fail("Fail to get resource from resource_stack")
#4.query event from resource stack
cond = res_ops.gen_query_conditions('stackUuid', '=', resource_stack.uuid)
event = res_ops.query_event_from_resource_stack(cond)
if event == None or len(event) != 6:
test_util.test_fail("Fail to get event from resource_stack")
#5.delete resource stack
resource_stack_ops.delete_resource_stack(resource_stack.uuid)
cond = res_ops.gen_query_conditions('uuid', '=', resource_stack.uuid)
resource_stack_queried = res_ops.query_resource(res_ops.RESOURCE_STACK, cond)
cond = res_ops.gen_query_conditions('name', '=', 'Create_STACK-VM')
vm_queried = res_ops.query_resource(res_ops.VM_INSTANCE, cond)
cond = res_ops.gen_query_conditions('name', '=', 'Create_STACK-VIP')
vip_queried = res_ops.query_resource(res_ops.VIP, cond)
cond = res_ops.gen_query_conditions('name', '=', 'Create_STACK-EIP')
eip_queried = res_ops.query_resource(res_ops.EIP, cond)
if len(resource_stack_queried) != 0 :
test_util.test_fail("Fail to delete resource stack")
elif len(vm_queried) != 0 or len(vip_queried) != 0 or len(eip_queried) != 0:
test_util.test_fail("Fail to delete resource when resource stack is deleted")
test_util.test_pass('Create Resource Stack Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
print "Ignore cleanup"
| zstackio/zstack-woodpecker | integrationtest/vm/zstackformation/test_create_eip.py | Python | apache-2.0 | 7,061 |
# Copyright 2015 Open vStorage NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This package contains test related stuff
"""
| tcpcloud/openvstorage | ovs/extensions/db/arakoon/tests/__init__.py | Python | apache-2.0 | 630 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
import helpers
import tsqa.test_cases
import tsqa.utils
import tsqa.endpoint
class TestRedirection(helpers.EnvironmentCase, tsqa.test_cases.HTTPBinCase):
@classmethod
def setUpEnv(cls, env):
cls.configs['records.config']['CONFIG'].update({
'proxy.config.http.redirection_enabled': 1,
'proxy.config.http.number_of_redirections': 10
})
cls.configs['remap.config'].add_line('map / http://127.0.0.1:{0}'.format(cls.http_endpoint.address[1]))
def test_redirection(self):
server_ports = self.configs['records.config']['CONFIG']['proxy.config.http.server_ports']
# By default Requests will perform location redirection
# Disable redirection handling with the allow_redirects parameter
r = requests.get('http://127.0.0.1:{0}/redirect/9'.format(server_ports), allow_redirects=False)
self.assertEqual(r.status_code, 200)
r = requests.get('http://127.0.0.1:{0}/redirect/10'.format(server_ports), allow_redirects=False)
self.assertEqual(r.status_code, 302)
| chenglongwei/trafficserver | ci/tsqa/tests/test_redirection.py | Python | apache-2.0 | 1,880 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import uuid
from datetime import date, datetime
from typing import Any, NamedTuple
from unittest import mock
import jinja2
import pendulum
import pytest
from airflow.decorators import task as task_decorator
from airflow.exceptions import AirflowException
from airflow.lineage.entities import File
from airflow.models import DAG
from airflow.models.baseoperator import (
BaseOperator,
BaseOperatorMeta,
MappedOperator,
chain,
cross_downstream,
)
from airflow.utils.context import Context
from airflow.utils.edgemodifier import Label
from airflow.utils.task_group import TaskGroup
from airflow.utils.trigger_rule import TriggerRule
from airflow.utils.weight_rule import WeightRule
from tests.models import DEFAULT_DATE
from tests.test_utils.config import conf_vars
from tests.test_utils.mock_operators import DeprecatedOperator, MockOperator
class ClassWithCustomAttributes:
"""Class for testing purpose: allows to create objects with custom attributes in one single statement."""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __str__(self):
return f"{ClassWithCustomAttributes.__name__}({str(self.__dict__)})"
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
# Objects with circular references (for testing purpose)
object1 = ClassWithCustomAttributes(attr="{{ foo }}_1", template_fields=["ref"])
object2 = ClassWithCustomAttributes(attr="{{ foo }}_2", ref=object1, template_fields=["ref"])
setattr(object1, 'ref', object2)
# Essentially similar to airflow.models.baseoperator.BaseOperator
class DummyClass(metaclass=BaseOperatorMeta):
def __init__(self, test_param, params=None, default_args=None):
self.test_param = test_param
def set_xcomargs_dependencies(self):
...
class DummySubClass(DummyClass):
def __init__(self, test_sub_param, **kwargs):
super().__init__(**kwargs)
self.test_sub_param = test_sub_param
class MockNamedTuple(NamedTuple):
var1: str
var2: str
class TestBaseOperator:
def test_apply(self):
dummy = DummyClass(test_param=True)
assert dummy.test_param
with pytest.raises(AirflowException, match='Argument.*test_param.*required'):
DummySubClass(test_sub_param=True)
def test_default_args(self):
default_args = {'test_param': True}
dummy_class = DummyClass(default_args=default_args)
assert dummy_class.test_param
default_args = {'test_param': True, 'test_sub_param': True}
dummy_subclass = DummySubClass(default_args=default_args)
assert dummy_class.test_param
assert dummy_subclass.test_sub_param
default_args = {'test_param': True}
dummy_subclass = DummySubClass(default_args=default_args, test_sub_param=True)
assert dummy_class.test_param
assert dummy_subclass.test_sub_param
with pytest.raises(AirflowException, match='Argument.*test_sub_param.*required'):
DummySubClass(default_args=default_args)
def test_incorrect_default_args(self):
default_args = {'test_param': True, 'extra_param': True}
dummy_class = DummyClass(default_args=default_args)
assert dummy_class.test_param
default_args = {'random_params': True}
with pytest.raises(AirflowException, match='Argument.*test_param.*required'):
DummyClass(default_args=default_args)
def test_incorrect_priority_weight(self):
error_msg = "`priority_weight` for task 'test_op' only accepts integers, received '<class 'str'>'."
with pytest.raises(AirflowException, match=error_msg):
BaseOperator(task_id="test_op", priority_weight="2")
def test_illegal_args(self):
"""
Tests that Operators reject illegal arguments
"""
msg = r'Invalid arguments were passed to BaseOperator \(task_id: test_illegal_args\)'
with conf_vars({('operators', 'allow_illegal_arguments'): 'True'}):
with pytest.warns(PendingDeprecationWarning, match=msg):
BaseOperator(
task_id='test_illegal_args',
illegal_argument_1234='hello?',
)
def test_illegal_args_forbidden(self):
"""
Tests that operators raise exceptions on illegal arguments when
illegal arguments are not allowed.
"""
msg = r'Invalid arguments were passed to BaseOperator \(task_id: test_illegal_args\)'
with pytest.raises(AirflowException, match=msg):
BaseOperator(
task_id='test_illegal_args',
illegal_argument_1234='hello?',
)
@pytest.mark.parametrize(
("content", "context", "expected_output"),
[
("{{ foo }}", {"foo": "bar"}, "bar"),
(["{{ foo }}_1", "{{ foo }}_2"], {"foo": "bar"}, ["bar_1", "bar_2"]),
(("{{ foo }}_1", "{{ foo }}_2"), {"foo": "bar"}, ("bar_1", "bar_2")),
(
{"key1": "{{ foo }}_1", "key2": "{{ foo }}_2"},
{"foo": "bar"},
{"key1": "bar_1", "key2": "bar_2"},
),
(
{"key_{{ foo }}_1": 1, "key_2": "{{ foo }}_2"},
{"foo": "bar"},
{"key_{{ foo }}_1": 1, "key_2": "bar_2"},
),
(date(2018, 12, 6), {"foo": "bar"}, date(2018, 12, 6)),
(datetime(2018, 12, 6, 10, 55), {"foo": "bar"}, datetime(2018, 12, 6, 10, 55)),
(MockNamedTuple("{{ foo }}_1", "{{ foo }}_2"), {"foo": "bar"}, MockNamedTuple("bar_1", "bar_2")),
({"{{ foo }}_1", "{{ foo }}_2"}, {"foo": "bar"}, {"bar_1", "bar_2"}),
(None, {}, None),
([], {}, []),
({}, {}, {}),
(
# check nested fields can be templated
ClassWithCustomAttributes(att1="{{ foo }}_1", att2="{{ foo }}_2", template_fields=["att1"]),
{"foo": "bar"},
ClassWithCustomAttributes(att1="bar_1", att2="{{ foo }}_2", template_fields=["att1"]),
),
(
# check deep nested fields can be templated
ClassWithCustomAttributes(
nested1=ClassWithCustomAttributes(
att1="{{ foo }}_1", att2="{{ foo }}_2", template_fields=["att1"]
),
nested2=ClassWithCustomAttributes(
att3="{{ foo }}_3", att4="{{ foo }}_4", template_fields=["att3"]
),
template_fields=["nested1"],
),
{"foo": "bar"},
ClassWithCustomAttributes(
nested1=ClassWithCustomAttributes(
att1="bar_1", att2="{{ foo }}_2", template_fields=["att1"]
),
nested2=ClassWithCustomAttributes(
att3="{{ foo }}_3", att4="{{ foo }}_4", template_fields=["att3"]
),
template_fields=["nested1"],
),
),
(
# check null value on nested template field
ClassWithCustomAttributes(att1=None, template_fields=["att1"]),
{},
ClassWithCustomAttributes(att1=None, template_fields=["att1"]),
),
(
# check there is no RecursionError on circular references
object1,
{"foo": "bar"},
object1,
),
# By default, Jinja2 drops one (single) trailing newline
("{{ foo }}\n\n", {"foo": "bar"}, "bar\n"),
],
)
def test_render_template(self, content, context, expected_output):
"""Test render_template given various input types."""
task = BaseOperator(task_id="op1")
result = task.render_template(content, context)
assert result == expected_output
@pytest.mark.parametrize(
("content", "context", "expected_output"),
[
("{{ foo }}", {"foo": "bar"}, "bar"),
("{{ foo }}", {"foo": ["bar1", "bar2"]}, ["bar1", "bar2"]),
(["{{ foo }}", "{{ foo | length}}"], {"foo": ["bar1", "bar2"]}, [['bar1', 'bar2'], 2]),
(("{{ foo }}_1", "{{ foo }}_2"), {"foo": "bar"}, ("bar_1", "bar_2")),
("{{ ds }}", {"ds": date(2018, 12, 6)}, date(2018, 12, 6)),
(datetime(2018, 12, 6, 10, 55), {"foo": "bar"}, datetime(2018, 12, 6, 10, 55)),
("{{ ds }}", {"ds": datetime(2018, 12, 6, 10, 55)}, datetime(2018, 12, 6, 10, 55)),
(MockNamedTuple("{{ foo }}_1", "{{ foo }}_2"), {"foo": "bar"}, MockNamedTuple("bar_1", "bar_2")),
(
("{{ foo }}", "{{ foo.isoformat() }}"),
{"foo": datetime(2018, 12, 6, 10, 55)},
(datetime(2018, 12, 6, 10, 55), '2018-12-06T10:55:00'),
),
(None, {}, None),
([], {}, []),
({}, {}, {}),
],
)
def test_render_template_with_native_envs(self, content, context, expected_output):
"""Test render_template given various input types with Native Python types"""
with DAG("test-dag", start_date=DEFAULT_DATE, render_template_as_native_obj=True):
task = BaseOperator(task_id="op1")
result = task.render_template(content, context)
assert result == expected_output
def test_render_template_fields(self):
"""Verify if operator attributes are correctly templated."""
task = MockOperator(task_id="op1", arg1="{{ foo }}", arg2="{{ bar }}")
# Assert nothing is templated yet
assert task.arg1 == "{{ foo }}"
assert task.arg2 == "{{ bar }}"
# Trigger templating and verify if attributes are templated correctly
task.render_template_fields(context={"foo": "footemplated", "bar": "bartemplated"})
assert task.arg1 == "footemplated"
assert task.arg2 == "bartemplated"
@pytest.mark.parametrize(("content",), [(object(),), (uuid.uuid4(),)])
def test_render_template_fields_no_change(self, content):
"""Tests if non-templatable types remain unchanged."""
task = BaseOperator(task_id="op1")
result = task.render_template(content, {"foo": "bar"})
assert content is result
def test_nested_template_fields_declared_must_exist(self):
"""Test render_template when a nested template field is missing."""
task = BaseOperator(task_id="op1")
with pytest.raises(AttributeError) as ctx:
task.render_template(ClassWithCustomAttributes(template_fields=["missing_field"]), {})
assert "'ClassWithCustomAttributes' object has no attribute 'missing_field'" == str(ctx.value)
def test_jinja_invalid_expression_is_just_propagated(self):
"""Test render_template propagates Jinja invalid expression errors."""
task = BaseOperator(task_id="op1")
with pytest.raises(jinja2.exceptions.TemplateSyntaxError):
task.render_template("{{ invalid expression }}", {})
@mock.patch("airflow.templates.SandboxedEnvironment", autospec=True)
def test_jinja_env_creation(self, mock_jinja_env):
"""Verify if a Jinja environment is created only once when templating."""
task = MockOperator(task_id="op1", arg1="{{ foo }}", arg2="{{ bar }}")
task.render_template_fields(context={"foo": "whatever", "bar": "whatever"})
assert mock_jinja_env.call_count == 1
def test_default_resources(self):
task = BaseOperator(task_id="default-resources")
assert task.resources is None
def test_custom_resources(self):
task = BaseOperator(task_id="custom-resources", resources={"cpus": 1, "ram": 1024})
assert task.resources.cpus.qty == 1
assert task.resources.ram.qty == 1024
def test_default_email_on_actions(self):
test_task = BaseOperator(task_id='test_default_email_on_actions')
assert test_task.email_on_retry is True
assert test_task.email_on_failure is True
def test_email_on_actions(self):
test_task = BaseOperator(
task_id='test_default_email_on_actions', email_on_retry=False, email_on_failure=True
)
assert test_task.email_on_retry is False
assert test_task.email_on_failure is True
def test_cross_downstream(self):
"""Test if all dependencies between tasks are all set correctly."""
dag = DAG(dag_id="test_dag", start_date=datetime.now())
start_tasks = [BaseOperator(task_id=f"t{i}", dag=dag) for i in range(1, 4)]
end_tasks = [BaseOperator(task_id=f"t{i}", dag=dag) for i in range(4, 7)]
cross_downstream(from_tasks=start_tasks, to_tasks=end_tasks)
for start_task in start_tasks:
assert set(start_task.get_direct_relatives(upstream=False)) == set(end_tasks)
# Begin test for `XComArgs`
xstart_tasks = [
task_decorator(task_id=f"xcomarg_task{i}", python_callable=lambda: None, dag=dag)()
for i in range(1, 4)
]
xend_tasks = [
task_decorator(task_id=f"xcomarg_task{i}", python_callable=lambda: None, dag=dag)()
for i in range(4, 7)
]
cross_downstream(from_tasks=xstart_tasks, to_tasks=xend_tasks)
for xstart_task in xstart_tasks:
assert set(xstart_task.operator.get_direct_relatives(upstream=False)) == {
xend_task.operator for xend_task in xend_tasks
}
def test_chain(self):
dag = DAG(dag_id='test_chain', start_date=datetime.now())
# Begin test for classic operators with `EdgeModifiers`
[label1, label2] = [Label(label=f"label{i}") for i in range(1, 3)]
[op1, op2, op3, op4, op5, op6] = [BaseOperator(task_id=f't{i}', dag=dag) for i in range(1, 7)]
chain(op1, [label1, label2], [op2, op3], [op4, op5], op6)
assert {op2, op3} == set(op1.get_direct_relatives(upstream=False))
assert [op4] == op2.get_direct_relatives(upstream=False)
assert [op5] == op3.get_direct_relatives(upstream=False)
assert {op4, op5} == set(op6.get_direct_relatives(upstream=True))
assert {"label": "label1"} == dag.get_edge_info(
upstream_task_id=op1.task_id, downstream_task_id=op2.task_id
)
assert {"label": "label2"} == dag.get_edge_info(
upstream_task_id=op1.task_id, downstream_task_id=op3.task_id
)
# Begin test for `XComArgs` with `EdgeModifiers`
[xlabel1, xlabel2] = [Label(label=f"xcomarg_label{i}") for i in range(1, 3)]
[xop1, xop2, xop3, xop4, xop5, xop6] = [
task_decorator(task_id=f"xcomarg_task{i}", python_callable=lambda: None, dag=dag)()
for i in range(1, 7)
]
chain(xop1, [xlabel1, xlabel2], [xop2, xop3], [xop4, xop5], xop6)
assert {xop2.operator, xop3.operator} == set(xop1.operator.get_direct_relatives(upstream=False))
assert [xop4.operator] == xop2.operator.get_direct_relatives(upstream=False)
assert [xop5.operator] == xop3.operator.get_direct_relatives(upstream=False)
assert {xop4.operator, xop5.operator} == set(xop6.operator.get_direct_relatives(upstream=True))
assert {"label": "xcomarg_label1"} == dag.get_edge_info(
upstream_task_id=xop1.operator.task_id, downstream_task_id=xop2.operator.task_id
)
assert {"label": "xcomarg_label2"} == dag.get_edge_info(
upstream_task_id=xop1.operator.task_id, downstream_task_id=xop3.operator.task_id
)
# Begin test for `TaskGroups`
[tg1, tg2] = [TaskGroup(group_id=f"tg{i}", dag=dag) for i in range(1, 3)]
[op1, op2] = [BaseOperator(task_id=f'task{i}', dag=dag) for i in range(1, 3)]
[tgop1, tgop2] = [
BaseOperator(task_id=f'task_group_task{i}', task_group=tg1, dag=dag) for i in range(1, 3)
]
[tgop3, tgop4] = [
BaseOperator(task_id=f'task_group_task{i}', task_group=tg2, dag=dag) for i in range(1, 3)
]
chain(op1, tg1, tg2, op2)
assert {tgop1, tgop2} == set(op1.get_direct_relatives(upstream=False))
assert {tgop3, tgop4} == set(tgop1.get_direct_relatives(upstream=False))
assert {tgop3, tgop4} == set(tgop2.get_direct_relatives(upstream=False))
assert [op2] == tgop3.get_direct_relatives(upstream=False)
assert [op2] == tgop4.get_direct_relatives(upstream=False)
def test_chain_not_support_type(self):
dag = DAG(dag_id='test_chain', start_date=datetime.now())
[op1, op2] = [BaseOperator(task_id=f't{i}', dag=dag) for i in range(1, 3)]
with pytest.raises(TypeError):
chain([op1, op2], 1)
# Begin test for `XComArgs`
[xop1, xop2] = [
task_decorator(task_id=f"xcomarg_task{i}", python_callable=lambda: None, dag=dag)()
for i in range(1, 3)
]
with pytest.raises(TypeError):
chain([xop1, xop2], 1)
# Begin test for `EdgeModifiers`
with pytest.raises(TypeError):
chain([Label("labe1"), Label("label2")], 1)
# Begin test for `TaskGroups`
[tg1, tg2] = [TaskGroup(group_id=f"tg{i}", dag=dag) for i in range(1, 3)]
with pytest.raises(TypeError):
chain([tg1, tg2], 1)
def test_chain_different_length_iterable(self):
dag = DAG(dag_id='test_chain', start_date=datetime.now())
[label1, label2] = [Label(label=f"label{i}") for i in range(1, 3)]
[op1, op2, op3, op4, op5] = [BaseOperator(task_id=f't{i}', dag=dag) for i in range(1, 6)]
with pytest.raises(AirflowException):
chain([op1, op2], [op3, op4, op5])
with pytest.raises(AirflowException):
chain([op1, op2, op3], [label1, label2])
# Begin test for `XComArgs` with `EdgeModifiers`
[label3, label4] = [Label(label=f"xcomarg_label{i}") for i in range(1, 3)]
[xop1, xop2, xop3, xop4, xop5] = [
task_decorator(task_id=f"xcomarg_task{i}", python_callable=lambda: None, dag=dag)()
for i in range(1, 6)
]
with pytest.raises(AirflowException):
chain([xop1, xop2], [xop3, xop4, xop5])
with pytest.raises(AirflowException):
chain([xop1, xop2, xop3], [label1, label2])
# Begin test for `TaskGroups`
[tg1, tg2, tg3, tg4, tg5] = [TaskGroup(group_id=f"tg{i}", dag=dag) for i in range(1, 6)]
with pytest.raises(AirflowException):
chain([tg1, tg2], [tg3, tg4, tg5])
def test_lineage_composition(self):
"""
Test composition with lineage
"""
inlet = File(url="in")
outlet = File(url="out")
dag = DAG("test-dag", start_date=DEFAULT_DATE)
task1 = BaseOperator(task_id="op1", dag=dag)
task2 = BaseOperator(task_id="op2", dag=dag)
# mock
task1.supports_lineage = True
# note: operator precedence still applies
inlet > task1 | (task2 > outlet)
assert task1.get_inlet_defs() == [inlet]
assert task2.get_inlet_defs() == [task1.task_id]
assert task2.get_outlet_defs() == [outlet]
fail = ClassWithCustomAttributes()
with pytest.raises(TypeError):
fail > task1
with pytest.raises(TypeError):
task1 > fail
with pytest.raises(TypeError):
fail | task1
with pytest.raises(TypeError):
task1 | fail
task3 = BaseOperator(task_id="op3", dag=dag)
extra = File(url="extra")
[inlet, extra] > task3
assert task3.get_inlet_defs() == [inlet, extra]
task1.supports_lineage = False
with pytest.raises(ValueError):
task1 | task3
assert task2.supports_lineage is False
task2 | task3
assert len(task3.get_inlet_defs()) == 3
task4 = BaseOperator(task_id="op4", dag=dag)
task4 > [inlet, outlet, extra]
assert task4.get_outlet_defs() == [inlet, outlet, extra]
def test_warnings_are_properly_propagated(self):
with pytest.warns(DeprecationWarning) as warnings:
DeprecatedOperator(task_id="test")
assert len(warnings) == 1
warning = warnings[0]
# Here we check that the trace points to the place
# where the deprecated class was used
assert warning.filename == __file__
def test_pre_execute_hook(self):
hook = mock.MagicMock()
op = BaseOperator(task_id="test_task", pre_execute=hook)
op_copy = op.prepare_for_execution()
op_copy.pre_execute({})
assert hook.called
def test_post_execute_hook(self):
hook = mock.MagicMock()
op = BaseOperator(task_id="test_task", post_execute=hook)
op_copy = op.prepare_for_execution()
op_copy.post_execute({})
assert hook.called
def test_task_naive_datetime(self):
naive_datetime = DEFAULT_DATE.replace(tzinfo=None)
op_no_dag = BaseOperator(
task_id='test_task_naive_datetime', start_date=naive_datetime, end_date=naive_datetime
)
assert op_no_dag.start_date.tzinfo
assert op_no_dag.end_date.tzinfo
def test_setattr_performs_no_custom_action_at_execute_time(self):
op = MockOperator(task_id="test_task")
op_copy = op.prepare_for_execution()
with mock.patch("airflow.models.baseoperator.BaseOperator.set_xcomargs_dependencies") as method_mock:
op_copy.execute({})
assert method_mock.call_count == 0
def test_upstream_is_set_when_template_field_is_xcomarg(self):
with DAG("xcomargs_test", default_args={"start_date": datetime.today()}):
op1 = BaseOperator(task_id="op1")
op2 = MockOperator(task_id="op2", arg1=op1.output)
assert op1 in op2.upstream_list
assert op2 in op1.downstream_list
def test_set_xcomargs_dependencies_works_recursively(self):
with DAG("xcomargs_test", default_args={"start_date": datetime.today()}):
op1 = BaseOperator(task_id="op1")
op2 = BaseOperator(task_id="op2")
op3 = MockOperator(task_id="op3", arg1=[op1.output, op2.output])
op4 = MockOperator(task_id="op4", arg1={"op1": op1.output, "op2": op2.output})
assert op1 in op3.upstream_list
assert op2 in op3.upstream_list
assert op1 in op4.upstream_list
assert op2 in op4.upstream_list
def test_set_xcomargs_dependencies_works_when_set_after_init(self):
with DAG(dag_id='xcomargs_test', default_args={"start_date": datetime.today()}):
op1 = BaseOperator(task_id="op1")
op2 = MockOperator(task_id="op2")
op2.arg1 = op1.output # value is set after init
assert op1 in op2.upstream_list
def test_set_xcomargs_dependencies_error_when_outside_dag(self):
with pytest.raises(AirflowException):
op1 = BaseOperator(task_id="op1")
MockOperator(task_id="op2", arg1=op1.output)
def test_invalid_trigger_rule(self):
with pytest.raises(
AirflowException,
match=(
f"The trigger_rule must be one of {TriggerRule.all_triggers()},"
"'.op1'; received 'some_rule'."
),
):
BaseOperator(task_id="op1", trigger_rule="some_rule")
@pytest.mark.parametrize(("rule"), [("dummy"), (TriggerRule.DUMMY)])
def test_replace_dummy_trigger_rule(self, rule):
with pytest.warns(
DeprecationWarning, match="dummy Trigger Rule is deprecated. Please use `TriggerRule.ALWAYS`."
):
op1 = BaseOperator(task_id="op1", trigger_rule=rule)
assert op1.trigger_rule == TriggerRule.ALWAYS
def test_weight_rule_default(self):
op = BaseOperator(task_id="test_task")
assert WeightRule.DOWNSTREAM == op.weight_rule
def test_weight_rule_override(self):
op = BaseOperator(task_id="test_task", weight_rule="upstream")
assert WeightRule.UPSTREAM == op.weight_rule
def test_init_subclass_args():
class InitSubclassOp(BaseOperator):
_class_arg: Any
def __init_subclass__(cls, class_arg=None, **kwargs) -> None:
cls._class_arg = class_arg
super().__init_subclass__()
def execute(self, context: Context):
self.context_arg = context
class_arg = "foo"
context = {"key": "value"}
class ConcreteSubclassOp(InitSubclassOp, class_arg=class_arg):
pass
task = ConcreteSubclassOp(task_id="op1")
task_copy = task.prepare_for_execution()
task_copy.execute(context)
assert task_copy._class_arg == class_arg
assert task_copy.context_arg == context
def test_operator_retries_invalid(dag_maker):
with pytest.raises(AirflowException) as ctx:
with dag_maker():
BaseOperator(
task_id='test_illegal_args',
retries='foo',
)
assert str(ctx.value) == "'retries' type must be int, not str"
@pytest.mark.parametrize(
("retries", "expected"),
[
pytest.param(None, [], id="None"),
pytest.param(5, [], id="5"),
pytest.param(
"1",
[
(
"airflow.models.baseoperator.BaseOperator",
logging.WARNING,
"Implicitly converting 'retries' for task test_dag.test_illegal_args from '1' to int",
),
],
id="str",
),
],
)
def test_operator_retries(caplog, dag_maker, retries, expected):
with caplog.at_level(logging.WARNING):
with dag_maker():
BaseOperator(
task_id='test_illegal_args',
retries=retries,
)
assert caplog.record_tuples == expected
def test_task_mapping_with_dag():
with DAG("test-dag", start_date=DEFAULT_DATE) as dag:
task1 = BaseOperator(task_id="op1")
literal = ['a', 'b', 'c']
mapped = MockOperator(task_id='task_2').map(arg2=literal)
finish = MockOperator(task_id="finish")
task1 >> mapped >> finish
assert task1.downstream_list == [mapped]
assert mapped in dag.tasks
# At parse time there should only be three tasks!
assert len(dag.tasks) == 3
assert finish.upstream_list == [mapped]
assert mapped.downstream_list == [finish]
def test_task_mapping_without_dag_context():
with DAG("test-dag", start_date=DEFAULT_DATE) as dag:
task1 = BaseOperator(task_id="op1")
literal = ['a', 'b', 'c']
mapped = MockOperator(task_id='task_2').map(arg2=literal)
task1 >> mapped
assert isinstance(mapped, MappedOperator)
assert mapped in dag.tasks
assert task1.downstream_list == [mapped]
assert mapped in dag.tasks
# At parse time there should only be two tasks!
assert len(dag.tasks) == 2
def test_task_mapping_default_args():
default_args = {'start_date': DEFAULT_DATE.now(), 'owner': 'test'}
with DAG("test-dag", start_date=DEFAULT_DATE, default_args=default_args):
task1 = BaseOperator(task_id="op1")
literal = ['a', 'b', 'c']
mapped = MockOperator(task_id='task_2').map(arg2=literal)
task1 >> mapped
assert mapped.partial_kwargs['owner'] == 'test'
assert mapped.start_date == pendulum.instance(default_args['start_date'])
def test_map_unknown_arg_raises():
with pytest.raises(TypeError, match=r"argument 'file'"):
BaseOperator(task_id='a').map(file=[1, 2, {'a': 'b'}])
def test_partial_on_instance() -> None:
"""`.partial` on an instance should fail -- it's only designed to be called on classes"""
with pytest.raises(TypeError):
MockOperator(
task_id='a',
).partial()
def test_partial_on_class() -> None:
# Test that we accept args for superclasses too
op = MockOperator.partial(task_id='a', arg1="a", trigger_rule=TriggerRule.ONE_FAILED)
assert op.partial_kwargs == {'arg1': 'a', 'trigger_rule': TriggerRule.ONE_FAILED}
def test_partial_on_class_invalid_ctor_args() -> None:
"""Test that when we pass invalid args to partial().
I.e. if an arg is not known on the class or any of its parent classes we error at parse time
"""
with pytest.raises(TypeError, match=r"arguments 'foo', 'bar'"):
MockOperator.partial(task_id='a', foo='bar', bar=2)
| mistercrunch/airflow | tests/models/test_baseoperator.py | Python | apache-2.0 | 29,590 |
import os
import re
SCHEDULER_NAME = os.environ.get("MEMSQL_SCHEDULER_NAME", "memsql")
if SCHEDULER_NAME == "memsql":
DEFAULT_DATA_ROOT_PATH = "memsql_scheduler"
else:
DEFAULT_DATA_ROOT_PATH = "memsql_scheduler_%s" % re.sub("\W", "", SCHEDULER_NAME)
__version__ = "0.0.1"
| memsql/memsql-mesos | memsql_framework/__init__.py | Python | apache-2.0 | 282 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import pytest
from pants.backend.core.targets.resources import Resources
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.base.exceptions import TargetDefinitionException
from pants_test.base_test import BaseTest
class JvmTargetTest(BaseTest):
def test_traversable_dependency_specs(self):
self.make_target(':resource_target', Resources)
target = self.make_target(':foo', JvmTarget, resources=[':resource_target'])
self.assertSequenceEqual([], list(target.traversable_specs))
self.assertSequenceEqual([':resource_target'], list(target.traversable_dependency_specs))
| jtrobec/pants | tests/python/pants_test/backend/jvm/targets/test_jvm_target.py | Python | apache-2.0 | 908 |
"""
Cloning resources.
"""
from marshmallow import (
Schema,
fields,
post_load,
pre_dump,
)
class EdgeSchema(Schema):
"""
An edge between UUID node ids.
"""
fromId = fields.UUID(
attribute="from_id",
required=True,
)
toId = fields.UUID(
attribute="to_id",
required=True,
)
class SubstitutionSchema(Schema):
"""
A substitution from one UUID id to another.
This schema is identical to an Edge currently, but is kept distinct in order
to support non-UUID substitutions if ever needed.
"""
fromId = fields.UUID(
attribute="from_id",
required=True,
)
toId = fields.UUID(
attribute="to_id",
required=True,
)
class DAGSchema(Schema):
"""
Represents a DAG.
Nodes should be overridden with a non-raw schema.
"""
nodes = fields.Nested(
fields.Raw,
required=True,
attribute="nodes_map",
)
edges = fields.List(
fields.Nested(EdgeSchema),
required=True,
)
substitutions = fields.List(
fields.Nested(SubstitutionSchema),
missing=[],
required=False,
)
@pre_dump
def unflatten(self, obj, **kwargs):
"""
Translate substitutions dictionary into objects.
"""
obj.substitutions = [
dict(from_id=key, to_id=value)
for key, value in getattr(obj, "substitutions", {}).items()
]
return obj
class NewCloneSchema(Schema):
commit = fields.Boolean(
missing=True,
required=False,
)
substitutions = fields.List(
fields.Nested(SubstitutionSchema),
missing=[],
required=False,
)
@post_load
def flatten(self, obj, **kwargs):
"""
Translate substitutions into a dictionary.
"""
obj["substitutions"] = {
item["from_id"]: item["to_id"]
for item in obj["substitutions"]
}
return obj
class DAGCloningController:
def __init__(self, store):
self.store = store
def explain(self, **kwargs):
"""
Return a DAG "explaining" what may be cloned.
"""
return self.store.explain(**kwargs)
def clone(self, substitutions, commit=True, **kwargs):
"""
Clone a DAG, optionally skipping the commit.
"""
return self.store.clone(substitutions, **kwargs)
| globality-corp/microcosm-flask | microcosm_flask/cloning.py | Python | apache-2.0 | 2,468 |
# unit tests for each of the 3 main modules within berrl
# pipehtml and piperealtime tests still need written!!!
import os
#testing pipegeohash
os.chdir('pipegeohash_test')
execfile('test_pipegeohash.py')
os.chdir('..')
# testing pipegeojson
os.chdir('pipegeojson_test')
execfile('test_pipegeojson.py')
os.chdir('..')
| murphy214/berrl | tests/build_test.py | Python | apache-2.0 | 323 |
import numpy as np
from . import Measurement
class PixelAverage(Measurement):
"""
This is an incredibly basic example of a feature-extraction measurement.
Returns
-------
np.array
Contains a single element equal to the average of all pixel values in the image.
"""
def compute(self, image):
return [np.average(image)]
| widoptimization-willett/feature-extraction | feature_extraction/measurements/pixelaverage.py | Python | apache-2.0 | 332 |
# Copyright (c) 2018 by contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import sys
import os
import ctypes
from numpy import ndarray
from .base import Series, DataFrame
import numpy as np
from .base import _LIB, XLearnHandle
from .base import _check_call, c_str
from .data import DMatrix
class XLearn(object):
"""XLearn is the core interface used by python API."""
def __init__(self, handle):
"""Initalizes a new XLearn
Parameters
----------
handle : XLearnHandle
'XLearn' handle of C API.
"""
assert isinstance(handle, XLearnHandle)
self.handle = handle
def __del__(self):
_check_call(_LIB.XLearnHandleFree(ctypes.byref(self.handle)))
def _set_Param(self, param):
"""Set hyper-parameter for xlearn handle
Parameters
----------
param : dict
xlearn hyper-parameters
"""
for (key, value) in param.items():
if key == 'task':
_check_call(_LIB.XLearnSetStr(ctypes.byref(self.handle),
c_str(key), c_str(value)))
elif key == 'metric':
_check_call(_LIB.XLearnSetStr(ctypes.byref(self.handle),
c_str(key), c_str(value)))
elif key == 'opt':
_check_call(_LIB.XLearnSetStr(ctypes.byref(self.handle),
c_str(key), c_str(value)))
elif key == 'log':
_check_call(_LIB.XLearnSetStr(ctypes.byref(self.handle),
c_str(key), c_str(value)))
elif key == 'lr':
_check_call(_LIB.XLearnSetFloat(ctypes.byref(self.handle),
c_str(key), ctypes.c_float(value)))
elif key == 'k':
_check_call(_LIB.XLearnSetInt(ctypes.byref(self.handle),
c_str(key), ctypes.c_uint(value)))
elif key == 'lambda':
_check_call(_LIB.XLearnSetFloat(ctypes.byref(self.handle),
c_str(key), ctypes.c_float(value)))
elif key == 'init':
_check_call(_LIB.XLearnSetFloat(ctypes.byref(self.handle),
c_str(key), ctypes.c_float(value)))
elif key == 'epoch':
_check_call(_LIB.XLearnSetInt(ctypes.byref(self.handle),
c_str(key), ctypes.c_uint(value)))
elif key == 'fold':
_check_call(_LIB.XLearnSetInt(ctypes.byref(self.handle),
c_str(key), ctypes.c_uint(value)))
elif key == 'alpha':
_check_call(_LIB.XLearnSetFloat(ctypes.byref(self.handle),
c_str(key), ctypes.c_float(value)))
elif key == 'beta':
_check_call(_LIB.XLearnSetFloat(ctypes.byref(self.handle),
c_str(key), ctypes.c_float(value)))
elif key == 'lambda_1':
_check_call(_LIB.XLearnSetFloat(ctypes.byref(self.handle),
c_str(key), ctypes.c_float(value)))
elif key == 'lambda_2':
_check_call(_LIB.XLearnSetFloat(ctypes.byref(self.handle),
c_str(key), ctypes.c_float(value)))
elif key == 'nthread':
_check_call(_LIB.XLearnSetInt(ctypes.byref(self.handle),
c_str(key), ctypes.c_uint(value)))
elif key == 'block_size':
_check_call(_LIB.XLearnSetInt(ctypes.byref(self.handle),
c_str(key), ctypes.c_uint(value)))
elif key == 'stop_window':
_check_call(_LIB.XLearnSetInt(ctypes.byref(self.handle),
c_str(key), ctypes.c_uint(value)))
elif key == 'seed':
_check_call(_LIB.XLearnSetInt(ctypes.byref(self.handle),
c_str(key), ctypes.c_uint(value)))
else:
raise Exception("Invalid key!", key)
def show(self):
"""Show model information
"""
_check_call(_LIB.XLearnShow(ctypes.byref(self.handle)))
def setTrain(self, train_path):
"""Set file path of training data.
Parameters
----------
train_path : str
the path of training data
"""
if isinstance(train_path, str):
_check_call(_LIB.XLearnSetTrain(ctypes.byref(self.handle), c_str(train_path)))
_check_call(_LIB.XLearnSetBool(ctypes.byref(self.handle), c_str("from_file"), ctypes.c_bool(True)))
elif isinstance(train_path, DMatrix):
key = "train"
_check_call(_LIB.XLearnSetDMatrix(ctypes.byref(self.handle), c_str(key), ctypes.byref(train_path.handle)))
_check_call(_LIB.XLearnSetBool(ctypes.byref(self.handle), c_str("from_file"), ctypes.c_bool(False)))
else:
raise Exception("Invalid train.Can be test file path or xLearn DMatrix", type(train_path))
def setTest(self, test_path):
"""Set file path of test data.
Parameters
----------
test_path : str
the path of test data.
"""
if isinstance(test_path, str):
_check_call(_LIB.XLearnSetTest(ctypes.byref(self.handle), c_str(test_path)))
_check_call(_LIB.XLearnSetBool(ctypes.byref(self.handle), c_str("from_file"), ctypes.c_bool(True)))
elif isinstance(test_path, DMatrix):
key = "test"
_check_call(_LIB.XLearnSetDMatrix(ctypes.byref(self.handle), c_str(key), ctypes.byref(test_path.handle)))
_check_call(_LIB.XLearnSetBool(ctypes.byref(self.handle), c_str("from_file"), ctypes.c_bool(False)))
else:
raise Exception("Invalid test.Can be test file path or xLearn DMatrix", type(test_path))
def setPreModel(self, pre_model_path):
""" Set file path of pre-trained model.
Parameters
----------
pre_model_path : str
the path of pre-trained model.
"""
_check_call(_LIB.XLearnSetPreModel(ctypes.byref(self.handle), c_str(pre_model_path)))
def setValidate(self, val_path):
"""Set file path of validation data.
Parameters
----------
val_path : str
the path of validation data.
"""
if isinstance(val_path, str):
_check_call(_LIB.XLearnSetValidate(ctypes.byref(self.handle), c_str(val_path)))
elif isinstance(val_path, DMatrix):
key = "validate"
_check_call(_LIB.XLearnSetDMatrix(ctypes.byref(self.handle), c_str(key), ctypes.byref(val_path.handle)))
else:
raise Exception("Invalid validation.Can be test file path or xLearn DMatrix", type(val_path))
def setTXTModel(self, model_path):
"""Set the path of TXT model file.
Parameters
----------
model_path : str
the path of the TXT model file.
"""
_check_call(_LIB.XLearnSetTXTModel(ctypes.byref(self.handle), c_str(model_path)))
def setQuiet(self):
"""Set xlearn to quiet model"""
key = 'quiet'
_check_call(_LIB.XLearnSetBool(ctypes.byref(self.handle),
c_str(key), ctypes.c_bool(True)))
def setOnDisk(self):
"""Set xlearn to use on-disk training"""
key = 'on_disk'
_check_call(_LIB.XLearnSetBool(ctypes.byref(self.handle),
c_str(key), ctypes.c_bool(True)))
def setNoBin(self):
"""Do not generate bin file"""
key = 'bin_out'
_check_call(_LIB.XLearnSetBool(ctypes.byref(self.handle),
c_str(key), ctypes.c_bool(False)))
def disableNorm(self):
"""Disable instance-wise normalization"""
key = 'norm'
_check_call(_LIB.XLearnSetBool(ctypes.byref(self.handle),
c_str(key), ctypes.c_bool(False)))
def disableLockFree(self):
"""Disable lock free training"""
key = 'lock_free'
_check_call(_LIB.XLearnSetBool(ctypes.byref(self.handle),
c_str(key), ctypes.c_bool(False)))
def disableEarlyStop(self):
"""Disable early-stopping"""
key = 'early_stop'
_check_call(_LIB.XLearnSetBool(ctypes.byref(self.handle),
c_str(key), ctypes.c_bool(False)))
def setSign(self):
"""Convert output to 0 and 1"""
key = 'sign'
_check_call(_LIB.XLearnSetBool(ctypes.byref(self.handle),
c_str(key), ctypes.c_bool(True)))
def setSigmoid(self):
"""Convert output by using sigmoid"""
key = 'sigmoid'
_check_call(_LIB.XLearnSetBool(ctypes.byref(self.handle),
c_str(key), ctypes.c_bool(True)))
def fit(self, param, model_path):
"""Check hyper-parameters, train model, and dump model.
Parameters
----------
param : dict
hyper-parameter used by xlearn.
model_path : str
path of model checkpoint.
"""
self._set_Param(param)
_check_call(_LIB.XLearnFit(ctypes.byref(self.handle), c_str(model_path)))
def cv(self, param):
""" Do cross-validation
Parameters
----------
param : dict
hyper-parameter used by xlearn
"""
self._set_Param(param)
_check_call(_LIB.XLearnCV(ctypes.byref(self.handle)))
def predict(self, model_path, out_path=None):
"""Predict output
Parameters
----------
model_path : str. path of model checkpoint.
out_path : str, default None. if a path of output result is setted, then will save result to local file,
and will not return numpy res.
"""
if out_path is None:
length = ctypes.c_uint64()
preds = ctypes.POINTER(ctypes.c_float)()
_check_call(_LIB.XLearnPredictForMat(ctypes.byref(self.handle),
c_str(model_path),
ctypes.byref(length),
ctypes.byref(preds)))
res = np.zeros(length.value, dtype=np.float32)
ctypes.memmove(res.ctypes.data, preds, length.value * res.strides[0])
return res
else:
_check_call(_LIB.XLearnPredictForFile(ctypes.byref(self.handle),
c_str(model_path),
c_str(out_path)))
def create_linear():
"""
Create a linear model.
"""
model_type = 'linear'
handle = XLearnHandle()
_check_call(_LIB.XLearnCreate(c_str(model_type), ctypes.byref(handle)))
return XLearn(handle)
def create_fm():
"""
Create a factorization machine.
"""
model_type = 'fm'
handle = XLearnHandle()
_check_call(_LIB.XLearnCreate(c_str(model_type), ctypes.byref(handle)))
return XLearn(handle)
def create_ffm():
"""
Create a field-aware factorization machine.
"""
model_type = 'ffm'
handle = XLearnHandle()
_check_call(_LIB.XLearnCreate(c_str(model_type), ctypes.byref(handle)))
return XLearn(handle)
def hello():
"""
Say hello to user
"""
_check_call(_LIB.XLearnHello())
| aksnzhy/xLearn | python-package/xlearn/xlearn.py | Python | apache-2.0 | 12,443 |
import os.path
import queue
import requests
import threading
from . import linkextraction
class MockStorage(object):
def store(self, url, html):
pass
class FileStorage(object):
def __init__(self, folder):
self.folder = folder
def store(self, url, html):
filename = url.replace('_', '__').replace('/', '_')
with open(os.path.join(self.folder, filename), 'w+') as f:
f.write(html)
class Crawler(threading.Thread):
IDLE_WAIT_TIME = 2
def __init__(self, storage, frontier, frontier_empty_callback=None):
super().__init__()
self.is_stopped = False
self.storage = storage
self.frontier = frontier
if frontier_empty_callback is None:
self.frontier_empty_callback = self.stop
else:
self.frontier_empty_callback = frontier_empty_callback
def stop(self):
self.is_stopped = True
def wait_idle(self):
time.sleep(self.IDLE_WAIT_TIME)
def run(self):
while not self.is_stopped:
self.crawl_next_url()
def crawl_next_url(self):
try:
url = self.frontier.next_valid_url()
response = requests.get(url)
new_urls = linkextraction.extract_links(response.text)
self.frontier.add_urls(new_urls)
self.storage.store(url, response.text)
except requests.exceptions.RequestException:
self.frontier.punish_url(url)
except queue.Empty:
self.frontier_empty_callback()
| aufziehvogel/dungbeetle | dungbeetle/crawler.py | Python | apache-2.0 | 1,548 |
"""
This module contains all request handlers which
are associated with the enter of user to application.
For example, there are functions for registration requests,
request to enter system, and request to logout.
"""
__author__ = 'Thodoris Sotiropoulos'
import json
from flask import session, request, redirect, url_for, Response
from mvc.model.user_model import User
from mvc.model.application_model import delete_data
from main import app
from mvc.controller.graphfile import graphfile
@app.route('/login', methods=['GET', 'POST'])
def login():
"""
Request handler for login operation. It takes two parameters.
First one is username and the second one is password. Then it searches
database if a user with the given credentials exists. If user exists, go to the
main page of application, otherwise go to the page with the login form and a
message that wrong credentials were given is returned.
:return: Page to forward accodring to the existence of user.
"""
username = request.form['username']
password = request.form['password']
user = User(username, password)
message = user.check_credentials()
if message is None:
session.pop('warningMessage', None)
session['login'] = True
session['showimage'] = False
session['user'] = username
return redirect(url_for('mainpage'))
else:
session['warningMessage'] = message
return redirect(url_for('index'))
@app.route('/logout', methods=['GET', 'POST'])
def logout():
"""
Request of user to logout system. Removes all sessions associated with the user.
:return: Page with the login form.
"""
graphfile.pop(session['user'], None)
delete_data()
session['user'] = None
session['showimage'] = False
session['login'] = False
return redirect(url_for('index'))
@app.route('/_check_user')
def check_username():
"""
A request handler function that takes as username as parameters
and it looks if a user with the username given as parameter exists.
If user exists, then returns message to user accordingly.
It is implemented via AJAX and when a user types a username in the
corresponding field of registration form, then client sends a request
to this function to check if a user with the same username as the one
given by user who wants to register system in order to warn user that there
is already a user with this username.
:return: Message to user, if username is accepted or not.
"""
input_value = request.args.get('value', None, type=str)
input_type = request.args.get('inputType', None, type=str)
if input_type == 'username':
user = User(username=input_value)
else:
user = User(email=input_value)
if not user.user_exists(input_type):
return Response(json.dumps('Your ' + input_type + ' is accepted!'))
else:
return Response(json.dumps(input_type + ' already exists!'))
@app.route('/_new_account', methods=['GET', 'POST'])
def new_account():
""" A new user is added to the system. """
username = request.args.get('username', None, type=str)
password = request.args.get('password', None, type=str)
first_name = request.args.get('name', None, type=str)
last_name = request.args.get('lastName', None, type=str)
email = request.args.get('email', None, type=str)
user = User(username, password, first_name, last_name, email)
user.add_user()
return Response(json.dumps('Your registration completed!'))
| theosotr/netxanal | mvc/view/registration.py | Python | apache-2.0 | 3,541 |
import re
import os
import sys
import json
import time
import doctest
import textwrap
import subprocess
from nbformat import v3, v4
from datetime import datetime, date
import argparse
# Hacky solution to avoid picking up ParaView's python packages (i.e. pygments)
# that causes the notebooks to not be generated
try:
os.environ['PYTHONPATH'] = os.environ['ROOTSYS'] + "/lib"
except:
print("Error: ROOTSYS was not set. Please source thisbdm.sh.")
exit(1)
# List of types that will be considered when looking for a C++ function. If a macro returns a
# type not included on the list, the regular expression will not match it, and thus the function
# will not be properly defined. Thus, any other type returned by function must be added to this list
# for the script to work correctly.
gTypesList = ["inline int", "void", "int",
"string", "bool", "double", "float", "char"]
def Indent(string, spaces=2):
new_string = ''
lines = string.splitlines()
skip = True
for line in lines:
if line == "" and skip:
continue
new_string += line + "\n"
skip = False
return new_string
def Unindent(string, spaces=2):
"""
Returns string with each line unindented by 2 spaces. If line isn't indented, it stays the same.
>>> Unindent(" foobar")
'foobar\\n'
>>> Unindent("foobar")
'foobar\\n'
>>> Unindent('''foobar
... foobar
... foobar''')
'foobar\\nfoobar\\nfoobar\\n'
"""
newstring = ''
lines = string.splitlines()
for line in lines:
if line.startswith(spaces*' '):
newstring += (line[spaces:] + "\n")
else:
newstring += (line + "\n")
return newstring
def ReadHeader(text):
"""
Extracts the description from the header, and removes the copyright notice
"""
lines = text.splitlines()
# Skip copyright notice
lines = lines[13:]
newTitle = ""
visualize = False
description = ''
for i, line in enumerate(lines):
if line.startswith("// \\title "):
newTitle = line[9:]
elif line.startswith("// \\visualize"):
visualize = True
elif line.startswith("//"):
if line == "//" or not line.startswith("// --"):
description += ('# ' + line[3:] + '\n')
else:
break
newtext = ''
for line in lines[i:]:
newtext += (line + "\n")
description = description.replace("\\f$", "$")
description = description.replace("\\f[", "$$")
description = description.replace("\\f]", "$$")
return newtext, description, newTitle, visualize
def ExtractMainFunction(text):
"""
Extracts the contents of the Simulate(argc, argv) function.
"""
functionContentRe = re.compile(
r'{((\n|.)*)}', flags=re.DOTALL | re.MULTILINE)
match = functionContentRe.search(text)
text = match.group()
text = text[1:-1] # remove "{" and "}"
new_text = ''
for line in text.splitlines():
if "argc, argv" in line:
new_text += line.replace("argc, argv",
'"{}"'.format(tutName)) + "\n"
continue
if "return 0;" in line:
new_text += '\n'
continue
new_text += line + "\n"
return new_text
def Comments(text):
"""
Converts comments delimited by // and on a new line into a markdown cell.
>>> Comments('''// This is a
... // multiline comment
... void function(){}''')
'# <markdowncell>\\n# This is a\\n# multiline comment\\n# <codecell>\\nvoid function(){}\\n'
>>> Comments('''void function(){
... int variable = 5 // Comment not in cell
... // Comment also not in cell
... }''')
'void function(){\\n int variable = 5 // Comment not in cell\\n // Comment also not in cell\\n}\\n'
"""
text = text.splitlines()
newtext = ''
inComment = False
for line in text:
if line.startswith("//") and not inComment: # True if first line of comment
inComment = True
newtext += "# <markdowncell>\n"
# Don't use .capitalize() if line starts with hash, ie it is a header
if line[2:].lstrip().startswith("#"):
newtext += ("# " + line[2:]+"\n")
else:
newtext += ("# " + line[2:].lstrip().capitalize()+"\n")
# True if first line after comment
elif inComment and not line.startswith("//"):
inComment = False
newtext += "# <codecell>\n"
newtext += (line+"\n")
# True if in the middle of a comment block
elif inComment and line.startswith("//"):
newtext += ("# " + line[2:] + "\n")
else:
newtext += (line+"\n")
return newtext
def split(text):
"""
Splits the text string into main, helpers, and rest. main is the main function,
i.e. the function tha thas the same name as the macro file. Helpers is a list of
strings, each a helper function, i.e. any other function that is not the main function.
Finally, rest is a string containing any top-level code outside of any function.
Comments immediately prior to a helper cell are converted into markdown cell,
added to the helper, and removed from rest.
Intended for C++ files only.
>>> split('''void tutorial(){
... content of tutorial
... }''')
('void tutorial(){\\n content of tutorial\\n}', [], '')
>>> split('''void tutorial(){
... content of tutorial
... }
... void helper(arguments = values){
... helper function
... content spans lines
... }''')
('void tutorial(){\\n content of tutorial\\n}', ['\\n# <markdowncell>\\n A helper function is created: \\n# <codecell>\\n%%cpp -d\\nvoid helper(arguments = values){\\n helper function\\n content spans lines\\n}'], '')
>>> split('''#include <header.h>
... using namespace NAMESPACE
... void tutorial(){
... content of tutorial
... }
... void helper(arguments = values){
... helper function
... content spans lines
... }''')
('void tutorial(){\\n content of tutorial\\n}', ['\\n# <markdowncell>\\n A helper function is created: \\n# <codecell>\\n%%cpp -d\\nvoid helper(arguments = values){\\n helper function\\n content spans lines\\n}'], '#include <header.h>\\nusing namespace NAMESPACE')
>>> split('''void tutorial(){
... content of tutorial
... }
... // This is a multiline
... // description of the
... // helper function
... void helper(arguments = values){
... helper function
... content spans lines
... }''')
('void tutorial(){\\n content of tutorial\\n}', ['\\n# <markdowncell>\\n This is a multiline\\n description of the\\n helper function\\n \\n# <codecell>\\n%%cpp -d\\nvoid helper(arguments = values){\\n helper function\\n content spans lines\\n}'], '')
"""
functionReString = "("
for cpptype in gTypesList:
functionReString += ("^%s|") % cpptype
functionReString = functionReString[:-1] + \
r")\s?\*?&?\s?[\w:]*?\s?\([^\)]*\)\s*\{.*?^\}"
functionRe = re.compile(functionReString, flags=re.DOTALL | re.MULTILINE)
#functionre = re.compile(r'(^void|^int|^Int_t|^TF1|^string|^bool|^double|^float|^char|^TCanvas|^TTree|^TString|^TSeqCollection|^Double_t|^TFile|^Long64_t|^Bool_t)\s?\*?\s?[\w:]*?\s?\([^\)]*\)\s*\{.*?^\}', flags = re.DOTALL | re.MULTILINE)
functionMatches = functionRe.finditer(text)
helpers = []
main = ""
for matchString in [match.group() for match in functionMatches]:
if findFunctionName(matchString) == "Simulate": # the main simulation function
main = matchString
else:
helpers.append(matchString)
# Create rest by replacing the main and helper functions with blank strings
rest = text.replace(main, "")
for helper in helpers:
rest = rest.replace(helper, "")
newHelpers = []
lines = text.splitlines()
for helper in helpers: # For each helper function
# Look through the lines until the
for i, line in enumerate(lines):
# first line of the helper is found
if line.startswith(helper[:helper.find("\n")]):
j = 1
commentList = []
# Add comment lines immediately prior to list
while lines[i-j].startswith("//"):
commentList.append(lines[i-j])
j += 1
if commentList: # Convert list to string
commentList.reverse()
helperDescription = ''
for comment in commentList:
if comment in ("//", "// "):
helperDescription += "\n\n" # Two newlines to create hard break in Markdown
else:
helperDescription += (comment[2:] + "\n")
rest = rest.replace(comment, "")
break
else: # If no comments are found create generic description
helperDescription = "A helper function is created:"
break
if findFunctionName(helper) != "main": # remove void main function
newHelpers.append("\n# <markdowncell>\n " +
helperDescription + " \n# <codecell>\n%%cpp -d\n" + helper)
headers = ''
for line in rest.splitlines():
if line.startswith("#include"):
headers += line + "\n"
rest = rest.replace(line, "")
# remove newlines and empty comments at the end of string
rest = rest.rstrip("\n /")
return main, newHelpers, headers, rest
def findFunctionName(text):
"""
Takes a string representation of a C++ function as an input,
finds and returns the name of the function
>>> findFunctionName('void functionName(arguments = values){}')
'functionName'
>>> findFunctionName('void functionName (arguments = values){}')
'functionName'
>>> findFunctionName('void *functionName(arguments = values){}')
'functionName'
>>> findFunctionName('void* functionName(arguments = values){}')
'functionName'
>>> findFunctionName('void * functionName(arguments = values){}')
'functionName'
>>> findFunctionName('void class::functionName(arguments = values){}')
'class::functionName'
"""
functionNameReString = "(?<="
for cpptype in gTypesList:
functionNameReString += ("(?<=%s)|") % cpptype
functionNameReString = functionNameReString[:-
1] + r")\s?\*?\s?[^\s]*?(?=\s?\()"
functionNameRe = re.compile(
functionNameReString, flags=re.DOTALL | re.MULTILINE)
#functionnamere = re.compile(r'(?<=(?<=int)|(?<=void)|(?<=TF1)|(?<=Int_t)|(?<=string)|(?<=double)|(?<=Double_t)|(?<=float)|(?<=char)|(?<=TString)|(?<=bool)|(?<=TSeqCollection)|(?<=TCanvas)|(?<=TTree)|(?<=TFile)|(?<=Long64_t)|(?<=Bool_t))\s?\*?\s?[^\s]*?(?=\s?\()', flags = re.DOTALL | re.MULTILINE)
match = functionNameRe.search(text)
functionname = match.group().strip(" *\n")
return functionname
def processmain(text):
argumentsCell = ''
if text:
argumentsre = re.compile(
r'(?<=\().*?(?=\))', flags=re.DOTALL | re.MULTILINE)
arguments = argumentsre.search(text)
if len(arguments.group()) > 3:
# argumentsCell = "# <markdowncell> \n Arguments are defined. \n# <codecell>\n"
# , flags = re.DOTALL) #| re.MULTILINE)
individualArgumentre = re.compile(r'[^/\n,]*?=[^/\n,]*')
argumentList = individualArgumentre.findall(arguments.group())
for argument in argumentList:
argumentsCell += argument.strip("\n ") + ";\n"
# argumentsCell += "# <codecell>\n"
return text, argumentsCell
def changeMarkdown(code):
code = code.replace("~~~", "```")
code = code.replace("{.cpp}", "cpp")
code = code.replace("{.bash}", "bash")
return code
def RemoveIncludeGuardsAndNamespace(text):
lines = text.splitlines()
new_text = ''
for line in lines:
if line.startswith("#ifndef DEMO_") or line.startswith("#define DEMO_") or line.startswith("#endif // DEMO_"):
continue
elif line.startswith("namespace") or line.startswith("} // namespace"):
continue
else:
new_text += line + "\n"
return new_text
# Creates the macro function declaration, such that `root -x function_name.C`
# can be used
def CreateMainFunction(content):
signature = "void {}()".format(tutName) + " {\n"
return signature + Indent(content) + "\n}\n"
# -------------------------------------
# ------------ Main Program------------
# -------------------------------------
def mainfunction(text, visualize):
"""
Main function. Calls all other functions. Also, it adds a cell that draws the result. The working text is
then converted to a version 3 jupyter notebook, subsequently updated to a version 4. Then, metadata
associated with the language the macro is written in is attatched to he notebook. Finally the
notebook is executed and output as a Jupyter notebook.
"""
# Modify text from macros to suit a notebook
main, helpers, headers, rest = split(text)
main_macro = CreateMainFunction(Indent(ExtractMainFunction(main)))
main, argumentsCell = processmain(main)
# Remove function, Unindent, and convert comments to Markdown cells
main = Comments(Unindent(ExtractMainFunction(main)))
rest = RemoveIncludeGuardsAndNamespace(rest)
# Command for loading rootlogon.C
libloading_macro = '%jsroot on\ngROOT->LoadMacro("${BDMSYS}/etc/rootlogon.C");\n\n'
c_macro = headers + rest + main_macro
with open(outPathNameMacro, 'w') as fout:
fout.write(c_macro)
if argumentsCell:
main = argumentsCell + main
if visualize:
visComment = "# <markdowncell>\n Let's visualize the output!"
main += '\n%s\n# <codecell>\nVisualizeInNotebook();\n' % visComment
# Convert top level code comments to Markdown cells
rest = Comments(rest)
# Construct text by starting with top level code, then the helper functions, and finally the main function.
# Also add cells for headerfile, or keepfunction
text = "# <codecell>\n" + rest
for helper in helpers:
text += helper
text += ("\n# <codecell>\n" + main)
# Change to standard Markdown
newDescription = changeMarkdown(description)
# Horizontal title line
hline = '<hr style="border-top-width: 4px; border-top-color: #34609b;">'
# Add the title and header of the notebook
text = "# <markdowncell> \n# # %s\n%s\n%s# \n# \n# <codecell>\n%s\n# <codecell>\n%s\n# <codecell>\n%s" % (
tutTitle, hline, newDescription, libloading_macro, headers, text)
# Create a notebook from the working text
nbook = v3.reads_py(text)
nbook = v4.upgrade(nbook) # Upgrade v3 to v4
# Load notebook string into json format, essentially creating a dictionary
json_data = json.loads(v4.writes(nbook))
# add the corresponding metadata
json_data['metadata'] = {
"kernelspec": {
"display_name": "ROOT C++",
"language": "c++",
"name": "root"
},
"language_info": {
"codemirror_mode": "text/x-c++src",
"file_extension": ".C",
"mimetype": " text/x-c++src",
"name": "c++"
}
}
# write the json file with the metadata
with open(outPathName, 'w') as fout:
json.dump(json_data, fout, indent=1, sort_keys=True)
timeout = 60
execute = "--execute"
if args.skip:
execute = ""
# Call commmand that executes the notebook and creates a new notebook with the output
nbconvert_cmd = "jupyter nbconvert --to=html --ExecutePreprocessor.timeout=%d %s %s" % (timeout, execute, outPathName)
r = subprocess.call(["jupyter", "nbconvert", "--to=html", "--ExecutePreprocessor.timeout=%d" %
timeout, execute, outPathName])
if r != 0:
sys.stderr.write(
"NOTEBOOK_CONVERSION_ERROR: nbconvert failed for notebook %s with return code %s\n" % (outname, r))
sys.stderr.write("FAILED COMMAND: %s\n" % nbconvert_cmd)
exit(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--tutpath', type=str)
parser.add_argument('--outdir', type=str)
parser.add_argument('--skip', action='store_true')
args = parser.parse_args()
tutPathName = args.tutpath
tutPath = os.path.dirname(tutPathName)
tutFileName = os.path.basename(tutPathName)
tutName, extension = tutFileName.split(".")
tutTitle = re.sub(r"([A-Z\d])", r" \1", tutName).title()
tutTitle = tutTitle.replace("_", " ")
outname = tutName + ".ipynb"
outnameMacro = tutName + ".C"
outnameconverted = tutName + ".html"
# Extract output directory
if args.outdir:
outdir = args.outdir
else:
outdir = tutPath
outPathName = os.path.join(outdir, outname)
outPathNameMacro = os.path.join(outdir, outnameMacro)
date = datetime.now().strftime("%A, %B %d, %Y at %I:%M %p")
# Open the file to be converted
with open(tutPathName) as fin:
text = fin.read()
text, description, newTitle, visualize = ReadHeader(text)
if newTitle != "":
tutTitle = newTitle
starttime = time.time()
mainfunction(text, visualize)
| BioDynaMo/biodynamo | util/demo_to_notebook.py | Python | apache-2.0 | 15,802 |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved
# Copyright (c) 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import time
from neutronclient.common import exceptions as neutron_client_exc
from oslo.config import cfg
from nova.compute import flavors
from nova.compute import utils as compute_utils
from nova import conductor
from nova import exception
from nova.network import base_api
from nova.network import model as network_model
from nova.network import neutronv2
from nova.network.neutronv2 import constants
from nova.network.security_group import openstack_driver
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
neutron_opts = [
cfg.StrOpt('neutron_url',
default='http://127.0.0.1:9696',
help='URL for connecting to neutron'),
cfg.IntOpt('neutron_url_timeout',
default=30,
help='Timeout value for connecting to neutron in seconds'),
cfg.StrOpt('neutron_admin_username',
help='Username for connecting to neutron in admin context'),
cfg.StrOpt('neutron_admin_password',
help='Password for connecting to neutron in admin context',
secret=True),
cfg.StrOpt('neutron_admin_tenant_id',
help='Tenant id for connecting to neutron in admin context'),
cfg.StrOpt('neutron_admin_tenant_name',
help='Tenant name for connecting to neutron in admin context. '
'This option is mutually exclusive with '
'neutron_admin_tenant_id. Note that with Keystone V3 '
'tenant names are only unique within a domain.'),
cfg.StrOpt('neutron_region_name',
help='Region name for connecting to neutron in admin context'),
cfg.StrOpt('neutron_admin_auth_url',
default='http://localhost:5000/v2.0',
help='Authorization URL for connecting to neutron in admin '
'context'),
cfg.BoolOpt('neutron_api_insecure',
default=False,
help='If set, ignore any SSL validation issues'),
cfg.StrOpt('neutron_auth_strategy',
default='keystone',
help='Authorization strategy for connecting to '
'neutron in admin context'),
# TODO(berrange) temporary hack until Neutron can pass over the
# name of the OVS bridge it is configured with
cfg.StrOpt('neutron_ovs_bridge',
default='br-int',
help='Name of Integration Bridge used by Open vSwitch'),
cfg.IntOpt('neutron_extension_sync_interval',
default=600,
help='Number of seconds before querying neutron for'
' extensions'),
cfg.StrOpt('neutron_ca_certificates_file',
help='Location of CA certificates file to use for '
'neutron client requests.'),
]
CONF = cfg.CONF
CONF.register_opts(neutron_opts)
CONF.import_opt('default_floating_pool', 'nova.network.floating_ips')
CONF.import_opt('flat_injected', 'nova.network.manager')
LOG = logging.getLogger(__name__)
class API(base_api.NetworkAPI):
"""API for interacting with the neutron 2.x API."""
def __init__(self):
super(API, self).__init__()
self.last_neutron_extension_sync = None
self.extensions = {}
self.conductor_api = conductor.API()
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
def setup_networks_on_host(self, context, instance, host=None,
teardown=False):
"""Setup or teardown the network structures."""
def _get_available_networks(self, context, project_id,
net_ids=None, neutron=None):
"""Return a network list available for the tenant.
The list contains networks owned by the tenant and public networks.
If net_ids specified, it searches networks with requested IDs only.
"""
if not neutron:
neutron = neutronv2.get_client(context)
if net_ids:
# If user has specified to attach instance only to specific
# networks then only add these to **search_opts. This search will
# also include 'shared' networks.
search_opts = {'id': net_ids}
nets = neutron.list_networks(**search_opts).get('networks', [])
else:
# (1) Retrieve non-public network list owned by the tenant.
search_opts = {'tenant_id': project_id, 'shared': False}
nets = neutron.list_networks(**search_opts).get('networks', [])
# (2) Retrieve public network list.
search_opts = {'shared': True}
nets += neutron.list_networks(**search_opts).get('networks', [])
_ensure_requested_network_ordering(
lambda x: x['id'],
nets,
net_ids)
if not context.is_admin:
for net in nets:
# Perform this check here rather than in validate_networks to
# ensure the check is performed every time
# allocate_for_instance is invoked
if net.get('router:external'):
raise exception.ExternalNetworkAttachForbidden(
network_uuid=net['id'])
return nets
def _create_port(self, port_client, instance, network_id, port_req_body,
fixed_ip=None, security_group_ids=None,
available_macs=None, dhcp_opts=None):
"""Attempts to create a port for the instance on the given network.
:param port_client: The client to use to create the port.
:param instance: Create the port for the given instance.
:param network_id: Create the port on the given network.
:param port_req_body: Pre-populated port request. Should have the
device_id, device_owner, and any required neutron extension values.
:param fixed_ip: Optional fixed IP to use from the given network.
:param security_group_ids: Optional list of security group IDs to
apply to the port.
:param available_macs: Optional set of available MAC addresses to use.
:param dhcp_opts: Optional DHCP options.
:returns: ID of the created port.
:raises PortLimitExceeded: If neutron fails with an OverQuota error.
"""
try:
if fixed_ip:
port_req_body['port']['fixed_ips'] = [{'ip_address': fixed_ip}]
port_req_body['port']['network_id'] = network_id
port_req_body['port']['admin_state_up'] = True
port_req_body['port']['tenant_id'] = instance['project_id']
if security_group_ids:
port_req_body['port']['security_groups'] = security_group_ids
if available_macs is not None:
if not available_macs:
raise exception.PortNotFree(
instance=instance['display_name'])
mac_address = available_macs.pop()
port_req_body['port']['mac_address'] = mac_address
if dhcp_opts is not None:
port_req_body['port']['extra_dhcp_opts'] = dhcp_opts
port_id = port_client.create_port(port_req_body)['port']['id']
LOG.debug('Successfully created port: %s', port_id,
instance=instance)
return port_id
except neutron_client_exc.NeutronClientException as e:
# NOTE(mriedem): OverQuota in neutron is a 409
if e.status_code == 409:
LOG.warning(_('Neutron error: quota exceeded'))
raise exception.PortLimitExceeded()
with excutils.save_and_reraise_exception():
LOG.exception(_('Neutron error creating port on network %s'),
network_id, instance=instance)
def allocate_for_instance(self, context, instance, **kwargs):
"""Allocate network resources for the instance.
:param requested_networks: optional value containing
network_id, fixed_ip, and port_id
:param security_groups: security groups to allocate for instance
:param macs: None or a set of MAC addresses that the instance
should use. macs is supplied by the hypervisor driver (contrast
with requested_networks which is user supplied).
NB: NeutronV2 currently assigns hypervisor supplied MAC addresses
to arbitrary networks, which requires openflow switches to
function correctly if more than one network is being used with
the bare metal hypervisor (which is the only one known to limit
MAC addresses).
:param dhcp_options: None or a set of key/value pairs that should
determine the DHCP BOOTP response, eg. for PXE booting an instance
configured with the baremetal hypervisor. It is expected that these
are already formatted for the neutron v2 api.
See nova/virt/driver.py:dhcp_options_for_instance for an example.
"""
hypervisor_macs = kwargs.get('macs', None)
available_macs = None
if hypervisor_macs is not None:
# Make a copy we can mutate: records macs that have not been used
# to create a port on a network. If we find a mac with a
# pre-allocated port we also remove it from this set.
available_macs = set(hypervisor_macs)
neutron = neutronv2.get_client(context)
LOG.debug('allocate_for_instance() for %s',
instance['display_name'])
if not instance['project_id']:
msg = _('empty project id for instance %s')
raise exception.InvalidInput(
reason=msg % instance['display_name'])
requested_networks = kwargs.get('requested_networks')
dhcp_opts = kwargs.get('dhcp_options', None)
ports = {}
fixed_ips = {}
net_ids = []
if requested_networks:
for network_id, fixed_ip, port_id in requested_networks:
if port_id:
port = neutron.show_port(port_id)['port']
if port.get('device_id'):
raise exception.PortInUse(port_id=port_id)
if hypervisor_macs is not None:
if port['mac_address'] not in hypervisor_macs:
raise exception.PortNotUsable(port_id=port_id,
instance=instance['display_name'])
else:
# Don't try to use this MAC if we need to create a
# port on the fly later. Identical MACs may be
# configured by users into multiple ports so we
# discard rather than popping.
available_macs.discard(port['mac_address'])
network_id = port['network_id']
ports[network_id] = port
elif fixed_ip and network_id:
fixed_ips[network_id] = fixed_ip
if network_id:
net_ids.append(network_id)
nets = self._get_available_networks(context, instance['project_id'],
net_ids)
if not nets:
LOG.warn(_("No network configured!"), instance=instance)
return network_model.NetworkInfo([])
security_groups = kwargs.get('security_groups', [])
security_group_ids = []
# TODO(arosen) Should optimize more to do direct query for security
# group if len(security_groups) == 1
if len(security_groups):
search_opts = {'tenant_id': instance['project_id']}
user_security_groups = neutron.list_security_groups(
**search_opts).get('security_groups')
for security_group in security_groups:
name_match = None
uuid_match = None
for user_security_group in user_security_groups:
if user_security_group['name'] == security_group:
if name_match:
raise exception.NoUniqueMatch(
_("Multiple security groups found matching"
" '%s'. Use an ID to be more specific.") %
security_group)
name_match = user_security_group['id']
if user_security_group['id'] == security_group:
uuid_match = user_security_group['id']
# If a user names the security group the same as
# another's security groups uuid, the name takes priority.
if not name_match and not uuid_match:
raise exception.SecurityGroupNotFound(
security_group_id=security_group)
elif name_match:
security_group_ids.append(name_match)
elif uuid_match:
security_group_ids.append(uuid_match)
touched_port_ids = []
created_port_ids = []
ports_in_requested_order = []
for network in nets:
# If security groups are requested on an instance then the
# network must has a subnet associated with it. Some plugins
# implement the port-security extension which requires
# 'port_security_enabled' to be True for security groups.
# That is why True is returned if 'port_security_enabled'
# is not found.
if (security_groups and not (
network['subnets']
and network.get('port_security_enabled', True))):
raise exception.SecurityGroupCannotBeApplied()
network_id = network['id']
zone = 'compute:%s' % instance['availability_zone']
port_req_body = {'port': {'device_id': instance['uuid'],
'device_owner': zone}}
try:
port = ports.get(network_id)
self._populate_neutron_extension_values(context, instance,
port_req_body)
# Requires admin creds to set port bindings
port_client = (neutron if not
self._has_port_binding_extension(context) else
neutronv2.get_client(context, admin=True))
if port:
port_client.update_port(port['id'], port_req_body)
touched_port_ids.append(port['id'])
ports_in_requested_order.append(port['id'])
else:
created_port = self._create_port(
port_client, instance, network_id,
port_req_body, fixed_ips.get(network_id),
security_group_ids, available_macs, dhcp_opts)
created_port_ids.append(created_port)
ports_in_requested_order.append(created_port)
except Exception:
with excutils.save_and_reraise_exception():
for port_id in touched_port_ids:
try:
port_req_body = {'port': {'device_id': ''}}
# Requires admin creds to set port bindings
if self._has_port_binding_extension(context):
port_req_body['port']['binding:host_id'] = None
port_client = neutronv2.get_client(
context, admin=True)
else:
port_client = neutron
port_client.update_port(port_id, port_req_body)
except Exception:
msg = _("Failed to update port %s")
LOG.exception(msg, port_id)
for port_id in created_port_ids:
try:
neutron.delete_port(port_id)
except Exception:
msg = _("Failed to delete port %s")
LOG.exception(msg, port_id)
nw_info = self.get_instance_nw_info(context, instance, networks=nets,
port_ids=ports_in_requested_order)
# NOTE(danms): Only return info about ports we created in this run.
# In the initial allocation case, this will be everything we created,
# and in later runs will only be what was created that time. Thus,
# this only affects the attach case, not the original use for this
# method.
return network_model.NetworkInfo([port for port in nw_info
if port['id'] in created_port_ids +
touched_port_ids])
def _refresh_neutron_extensions_cache(self, context):
"""Refresh the neutron extensions cache when necessary."""
if (not self.last_neutron_extension_sync or
((time.time() - self.last_neutron_extension_sync)
>= CONF.neutron_extension_sync_interval)):
neutron = neutronv2.get_client(context)
extensions_list = neutron.list_extensions()['extensions']
self.last_neutron_extension_sync = time.time()
self.extensions.clear()
self.extensions = dict((ext['name'], ext)
for ext in extensions_list)
def _has_port_binding_extension(self, context, refresh_cache=False):
if refresh_cache:
self._refresh_neutron_extensions_cache(context)
return constants.PORTBINDING_EXT in self.extensions
def _populate_neutron_extension_values(self, context, instance,
port_req_body):
"""Populate neutron extension values for the instance.
If the extension contains nvp-qos then get the rxtx_factor.
"""
self._refresh_neutron_extensions_cache(context)
if 'nvp-qos' in self.extensions:
flavor = flavors.extract_flavor(instance)
rxtx_factor = flavor.get('rxtx_factor')
port_req_body['port']['rxtx_factor'] = rxtx_factor
if self._has_port_binding_extension(context):
port_req_body['port']['binding:host_id'] = instance.get('host')
def deallocate_for_instance(self, context, instance, **kwargs):
"""Deallocate all network resources related to the instance."""
LOG.debug('deallocate_for_instance() for %s',
instance['display_name'])
search_opts = {'device_id': instance['uuid']}
neutron = neutronv2.get_client(context)
data = neutron.list_ports(**search_opts)
ports = [port['id'] for port in data.get('ports', [])]
requested_networks = kwargs.get('requested_networks') or {}
ports_to_skip = [port_id for nets, fips, port_id in requested_networks]
ports = set(ports) - set(ports_to_skip)
for port in ports:
try:
neutron.delete_port(port)
except neutronv2.exceptions.NeutronClientException as e:
if e.status_code == 404:
LOG.warning(_("Port %s does not exist"), port)
else:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to delete neutron port %s"),
port)
# NOTE(arosen): This clears out the network_cache only if the instance
# hasn't already been deleted. This is needed when an instance fails to
# launch and is rescheduled onto another compute node. If the instance
# has already been deleted this call does nothing.
base_api.update_instance_cache_with_nw_info(self, context, instance,
network_model.NetworkInfo([]))
def allocate_port_for_instance(self, context, instance, port_id,
network_id=None, requested_ip=None):
"""Allocate a port for the instance."""
return self.allocate_for_instance(context, instance,
requested_networks=[(network_id, requested_ip, port_id)])
def deallocate_port_for_instance(self, context, instance, port_id):
"""Remove a specified port from the instance.
Return network information for the instance
"""
try:
neutronv2.get_client(context).delete_port(port_id)
except Exception:
LOG.exception(_("Failed to delete neutron port %s") %
port_id)
return self.get_instance_nw_info(context, instance)
def list_ports(self, context, **search_opts):
"""List ports for the client based on search options."""
return neutronv2.get_client(context).list_ports(**search_opts)
def show_port(self, context, port_id):
"""Return the port for the client given the port id."""
return neutronv2.get_client(context).show_port(port_id)
def get_instance_nw_info(self, context, instance, networks=None,
port_ids=None, use_slave=False):
"""Return network information for specified instance
and update cache.
"""
# NOTE(geekinutah): It would be nice if use_slave had us call
# special APIs that pummeled slaves instead of
# the master. For now we just ignore this arg.
result = self._get_instance_nw_info(context, instance, networks,
port_ids)
base_api.update_instance_cache_with_nw_info(self, context, instance,
result, update_cells=False)
return result
def _get_instance_nw_info(self, context, instance, networks=None,
port_ids=None):
# keep this caching-free version of the get_instance_nw_info method
# because it is used by the caching logic itself.
LOG.debug('get_instance_nw_info() for %s', instance['display_name'])
nw_info = self._build_network_info_model(context, instance, networks,
port_ids)
return network_model.NetworkInfo.hydrate(nw_info)
def _gather_port_ids_and_networks(self, context, instance, networks=None,
port_ids=None):
"""Return an instance's complete list of port_ids and networks."""
if ((networks is None and port_ids is not None) or
(port_ids is None and networks is not None)):
message = ("This method needs to be called with either "
"networks=None and port_ids=None or port_ids and "
" networks as not none.")
raise exception.NovaException(message=message)
ifaces = compute_utils.get_nw_info_for_instance(instance)
# This code path is only done when refreshing the network_cache
if port_ids is None:
port_ids = [iface['id'] for iface in ifaces]
net_ids = [iface['network']['id'] for iface in ifaces]
if networks is None:
networks = self._get_available_networks(context,
instance['project_id'],
net_ids)
# an interface was added/removed from instance.
else:
# Since networks does not contain the existing networks on the
# instance we use their values from the cache and add it.
networks = networks + [
{'id': iface['network']['id'],
'name': iface['network']['label'],
'tenant_id': iface['network']['meta']['tenant_id']}
for iface in ifaces]
# Include existing interfaces so they are not removed from the db.
port_ids = [iface['id'] for iface in ifaces] + port_ids
return networks, port_ids
@base_api.refresh_cache
def add_fixed_ip_to_instance(self, context, instance, network_id):
"""Add a fixed ip to the instance from specified network."""
search_opts = {'network_id': network_id}
data = neutronv2.get_client(context).list_subnets(**search_opts)
ipam_subnets = data.get('subnets', [])
if not ipam_subnets:
raise exception.NetworkNotFoundForInstance(
instance_id=instance['uuid'])
zone = 'compute:%s' % instance['availability_zone']
search_opts = {'device_id': instance['uuid'],
'device_owner': zone,
'network_id': network_id}
data = neutronv2.get_client(context).list_ports(**search_opts)
ports = data['ports']
for p in ports:
for subnet in ipam_subnets:
fixed_ips = p['fixed_ips']
fixed_ips.append({'subnet_id': subnet['id']})
port_req_body = {'port': {'fixed_ips': fixed_ips}}
try:
neutronv2.get_client(context).update_port(p['id'],
port_req_body)
return self._get_instance_nw_info(context, instance)
except Exception as ex:
msg = _("Unable to update port %(portid)s on subnet "
"%(subnet_id)s with failure: %(exception)s")
LOG.debug(msg, {'portid': p['id'],
'subnet_id': subnet['id'],
'exception': ex})
raise exception.NetworkNotFoundForInstance(
instance_id=instance['uuid'])
@base_api.refresh_cache
def remove_fixed_ip_from_instance(self, context, instance, address):
"""Remove a fixed ip from the instance."""
zone = 'compute:%s' % instance['availability_zone']
search_opts = {'device_id': instance['uuid'],
'device_owner': zone,
'fixed_ips': 'ip_address=%s' % address}
data = neutronv2.get_client(context).list_ports(**search_opts)
ports = data['ports']
for p in ports:
fixed_ips = p['fixed_ips']
new_fixed_ips = []
for fixed_ip in fixed_ips:
if fixed_ip['ip_address'] != address:
new_fixed_ips.append(fixed_ip)
port_req_body = {'port': {'fixed_ips': new_fixed_ips}}
try:
neutronv2.get_client(context).update_port(p['id'],
port_req_body)
except Exception as ex:
msg = _("Unable to update port %(portid)s with"
" failure: %(exception)s")
LOG.debug(msg, {'portid': p['id'], 'exception': ex})
return self._get_instance_nw_info(context, instance)
raise exception.FixedIpNotFoundForSpecificInstance(
instance_uuid=instance['uuid'], ip=address)
def validate_networks(self, context, requested_networks, num_instances):
"""Validate that the tenant can use the requested networks.
Return the number of instances than can be successfully allocated
with the requested network configuration.
"""
LOG.debug('validate_networks() for %s',
requested_networks)
neutron = neutronv2.get_client(context)
ports_needed_per_instance = 0
if not requested_networks:
nets = self._get_available_networks(context, context.project_id,
neutron=neutron)
if len(nets) > 1:
# Attaching to more than one network by default doesn't
# make sense, as the order will be arbitrary and the guest OS
# won't know which to configure
msg = _("Multiple possible networks found, use a Network "
"ID to be more specific.")
raise exception.NetworkAmbiguous(msg)
else:
ports_needed_per_instance = 1
else:
instance_on_net_ids = []
net_ids_requested = []
for (net_id, _i, port_id) in requested_networks:
if port_id:
try:
port = neutron.show_port(port_id).get('port')
except neutronv2.exceptions.NeutronClientException as e:
if e.status_code == 404:
port = None
else:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to access port %s"),
port_id)
if not port:
raise exception.PortNotFound(port_id=port_id)
if port.get('device_id', None):
raise exception.PortInUse(port_id=port_id)
if not port.get('fixed_ips'):
raise exception.PortRequiresFixedIP(port_id=port_id)
net_id = port['network_id']
else:
ports_needed_per_instance += 1
net_ids_requested.append(net_id)
if net_id in instance_on_net_ids:
raise exception.NetworkDuplicated(network_id=net_id)
instance_on_net_ids.append(net_id)
# Now check to see if all requested networks exist
if net_ids_requested:
nets = self._get_available_networks(
context, context.project_id, net_ids_requested,
neutron=neutron)
for net in nets:
if not net.get('subnets'):
raise exception.NetworkRequiresSubnet(
network_uuid=net['id'])
if len(nets) != len(net_ids_requested):
requested_netid_set = set(net_ids_requested)
returned_netid_set = set([net['id'] for net in nets])
lostid_set = requested_netid_set - returned_netid_set
id_str = ''
for _id in lostid_set:
id_str = id_str and id_str + ', ' + _id or _id
raise exception.NetworkNotFound(network_id=id_str)
# Note(PhilD): Ideally Nova would create all required ports as part of
# network validation, but port creation requires some details
# from the hypervisor. So we just check the quota and return
# how many of the requested number of instances can be created
if ports_needed_per_instance:
ports = neutron.list_ports(tenant_id=context.project_id)['ports']
quotas = neutron.show_quota(tenant_id=context.project_id)['quota']
if quotas.get('port') == -1:
# Unlimited Port Quota
return num_instances
else:
free_ports = quotas.get('port') - len(ports)
ports_needed = ports_needed_per_instance * num_instances
if free_ports >= ports_needed:
return num_instances
else:
return free_ports // ports_needed_per_instance
return num_instances
def _get_instance_uuids_by_ip(self, context, address):
"""Retrieve instance uuids associated with the given ip address.
:returns: A list of dicts containing the uuids keyed by 'instance_uuid'
e.g. [{'instance_uuid': uuid}, ...]
"""
search_opts = {"fixed_ips": 'ip_address=%s' % address}
data = neutronv2.get_client(context).list_ports(**search_opts)
ports = data.get('ports', [])
return [{'instance_uuid': port['device_id']} for port in ports
if port['device_id']]
def get_instance_uuids_by_ip_filter(self, context, filters):
"""Return a list of dicts in the form of
[{'instance_uuid': uuid}] that matched the ip filter.
"""
# filters['ip'] is composed as '^%s$' % fixed_ip.replace('.', '\\.')
ip = filters.get('ip')
# we remove ^$\ in the ip filer
if ip[0] == '^':
ip = ip[1:]
if ip[-1] == '$':
ip = ip[:-1]
ip = ip.replace('\\.', '.')
return self._get_instance_uuids_by_ip(context, ip)
def _get_port_id_by_fixed_address(self, client,
instance, address):
"""Return port_id from a fixed address."""
zone = 'compute:%s' % instance['availability_zone']
search_opts = {'device_id': instance['uuid'],
'device_owner': zone}
data = client.list_ports(**search_opts)
ports = data['ports']
port_id = None
for p in ports:
for ip in p['fixed_ips']:
if ip['ip_address'] == address:
port_id = p['id']
break
if not port_id:
raise exception.FixedIpNotFoundForAddress(address=address)
return port_id
@base_api.refresh_cache
def associate_floating_ip(self, context, instance,
floating_address, fixed_address,
affect_auto_assigned=False):
"""Associate a floating ip with a fixed ip."""
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = neutronv2.get_client(context)
port_id = self._get_port_id_by_fixed_address(client, instance,
fixed_address)
fip = self._get_floating_ip_by_address(client, floating_address)
param = {'port_id': port_id,
'fixed_ip_address': fixed_address}
client.update_floatingip(fip['id'], {'floatingip': param})
if fip['port_id']:
port = client.show_port(fip['port_id'])['port']
orig_instance_uuid = port['device_id']
msg_dict = dict(address=floating_address,
instance_id=orig_instance_uuid)
LOG.info(_('re-assign floating IP %(address)s from '
'instance %(instance_id)s') % msg_dict)
orig_instance = self.db.instance_get_by_uuid(context,
orig_instance_uuid)
# purge cached nw info for the original instance
base_api.update_instance_cache_with_nw_info(self, context,
orig_instance)
def get_all(self, context):
"""Get all networks for client."""
client = neutronv2.get_client(context)
networks = client.list_networks().get('networks')
for network in networks:
network['label'] = network['name']
return networks
def get(self, context, network_uuid):
"""Get specific network for client."""
client = neutronv2.get_client(context)
network = client.show_network(network_uuid).get('network') or {}
network['label'] = network['name']
return network
def delete(self, context, network_uuid):
"""Delete a network for client."""
raise NotImplementedError()
def disassociate(self, context, network_uuid):
"""Disassociate a network for client."""
raise NotImplementedError()
def associate(self, context, network_uuid, host=base_api.SENTINEL,
project=base_api.SENTINEL):
"""Associate a network for client."""
raise NotImplementedError()
def get_fixed_ip(self, context, id):
"""Get a fixed ip from the id."""
raise NotImplementedError()
def get_fixed_ip_by_address(self, context, address):
"""Return instance uuids given an address."""
uuid_maps = self._get_instance_uuids_by_ip(context, address)
if len(uuid_maps) == 1:
return uuid_maps[0]
elif not uuid_maps:
raise exception.FixedIpNotFoundForAddress(address=address)
else:
raise exception.FixedIpAssociatedWithMultipleInstances(
address=address)
def _setup_net_dict(self, client, network_id):
if not network_id:
return {}
pool = client.show_network(network_id)['network']
return {pool['id']: pool}
def _setup_port_dict(self, client, port_id):
if not port_id:
return {}
port = client.show_port(port_id)['port']
return {port['id']: port}
def _setup_pools_dict(self, client):
pools = self._get_floating_ip_pools(client)
return dict([(i['id'], i) for i in pools])
def _setup_ports_dict(self, client, project_id=None):
search_opts = {'tenant_id': project_id} if project_id else {}
ports = client.list_ports(**search_opts)['ports']
return dict([(p['id'], p) for p in ports])
def get_floating_ip(self, context, id):
"""Return floating ip object given the floating ip id."""
client = neutronv2.get_client(context)
try:
fip = client.show_floatingip(id)['floatingip']
except neutronv2.exceptions.NeutronClientException as e:
if e.status_code == 404:
raise exception.FloatingIpNotFound(id=id)
else:
with excutils.save_and_reraise_exception():
LOG.exception(_('Unable to access floating IP %s'), id)
pool_dict = self._setup_net_dict(client,
fip['floating_network_id'])
port_dict = self._setup_port_dict(client, fip['port_id'])
return self._format_floating_ip_model(fip, pool_dict, port_dict)
def _get_floating_ip_pools(self, client, project_id=None):
search_opts = {constants.NET_EXTERNAL: True}
if project_id:
search_opts.update({'tenant_id': project_id})
data = client.list_networks(**search_opts)
return data['networks']
def get_floating_ip_pools(self, context):
"""Return floating ip pools."""
client = neutronv2.get_client(context)
pools = self._get_floating_ip_pools(client)
return [{'name': n['name'] or n['id']} for n in pools]
def _format_floating_ip_model(self, fip, pool_dict, port_dict):
pool = pool_dict[fip['floating_network_id']]
result = {'id': fip['id'],
'address': fip['floating_ip_address'],
'pool': pool['name'] or pool['id'],
'project_id': fip['tenant_id'],
# In Neutron v2, an exact fixed_ip_id does not exist.
'fixed_ip_id': fip['port_id'],
}
# In Neutron v2 API fixed_ip_address and instance uuid
# (= device_id) are known here, so pass it as a result.
result['fixed_ip'] = {'address': fip['fixed_ip_address']}
if fip['port_id']:
instance_uuid = port_dict[fip['port_id']]['device_id']
result['instance'] = {'uuid': instance_uuid}
else:
result['instance'] = None
return result
def get_floating_ip_by_address(self, context, address):
"""Return a floating ip given an address."""
client = neutronv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
pool_dict = self._setup_net_dict(client,
fip['floating_network_id'])
port_dict = self._setup_port_dict(client, fip['port_id'])
return self._format_floating_ip_model(fip, pool_dict, port_dict)
def get_floating_ips_by_project(self, context):
client = neutronv2.get_client(context)
project_id = context.project_id
fips = client.list_floatingips(tenant_id=project_id)['floatingips']
pool_dict = self._setup_pools_dict(client)
port_dict = self._setup_ports_dict(client, project_id)
return [self._format_floating_ip_model(fip, pool_dict, port_dict)
for fip in fips]
def get_floating_ips(self, context, all_tenants=False):
client = neutronv2.get_client(context)
project_id = context.project_id
if all_tenants:
fips = client.list_floatingips()['floatingips']
port_dict = self._setup_ports_dict(client)
else:
fips = client.list_floatingips(
tenant_id=project_id)['floatingips']
port_dict = self._setup_ports_dict(client, project_id)
pool_dict = self._setup_pools_dict(client)
return [self._format_floating_ip_model(fip, pool_dict, port_dict)
for fip in fips]
def get_floating_ips_by_fixed_address(self, context, fixed_address):
raise NotImplementedError()
def get_instance_id_by_floating_address(self, context, address):
"""Return the instance id a floating ip's fixed ip is allocated to."""
client = neutronv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
if not fip['port_id']:
return None
port = client.show_port(fip['port_id'])['port']
return port['device_id']
def get_vifs_by_instance(self, context, instance):
raise NotImplementedError()
def get_vif_by_mac_address(self, context, mac_address):
raise NotImplementedError()
def _get_floating_ip_pool_id_by_name_or_id(self, client, name_or_id):
search_opts = {constants.NET_EXTERNAL: True, 'fields': 'id'}
if uuidutils.is_uuid_like(name_or_id):
search_opts.update({'id': name_or_id})
else:
search_opts.update({'name': name_or_id})
data = client.list_networks(**search_opts)
nets = data['networks']
if len(nets) == 1:
return nets[0]['id']
elif len(nets) == 0:
raise exception.FloatingIpPoolNotFound()
else:
msg = (_("Multiple floating IP pools matches found for name '%s'")
% name_or_id)
raise exception.NovaException(message=msg)
def allocate_floating_ip(self, context, pool=None):
"""Add a floating ip to a project from a pool."""
client = neutronv2.get_client(context)
pool = pool or CONF.default_floating_pool
pool_id = self._get_floating_ip_pool_id_by_name_or_id(client, pool)
# TODO(amotoki): handle exception during create_floatingip()
# At this timing it is ensured that a network for pool exists.
# quota error may be returned.
param = {'floatingip': {'floating_network_id': pool_id}}
try:
fip = client.create_floatingip(param)
except (neutron_client_exc.IpAddressGenerationFailureClient,
neutron_client_exc.ExternalIpAddressExhaustedClient) as e:
raise exception.NoMoreFloatingIps(unicode(e))
return fip['floatingip']['floating_ip_address']
def _get_floating_ip_by_address(self, client, address):
"""Get floatingip from floating ip address."""
if not address:
raise exception.FloatingIpNotFoundForAddress(address=address)
data = client.list_floatingips(floating_ip_address=address)
fips = data['floatingips']
if len(fips) == 0:
raise exception.FloatingIpNotFoundForAddress(address=address)
elif len(fips) > 1:
raise exception.FloatingIpMultipleFoundForAddress(address=address)
return fips[0]
def _get_floating_ips_by_fixed_and_port(self, client, fixed_ip, port):
"""Get floatingips from fixed ip and port."""
try:
data = client.list_floatingips(fixed_ip_address=fixed_ip,
port_id=port)
# If a neutron plugin does not implement the L3 API a 404 from
# list_floatingips will be raised.
except neutronv2.exceptions.NeutronClientException as e:
if e.status_code == 404:
return []
with excutils.save_and_reraise_exception():
LOG.exception(_('Unable to access floating IP %(fixed_ip)s '
'for port %(port_id)s'),
{'fixed_ip': fixed_ip, 'port_id': port})
return data['floatingips']
def release_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Remove a floating ip with the given address from a project."""
# Note(amotoki): We cannot handle a case where multiple pools
# have overlapping IP address range. In this case we cannot use
# 'address' as a unique key.
# This is a limitation of the current nova.
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = neutronv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
if fip['port_id']:
raise exception.FloatingIpAssociated(address=address)
client.delete_floatingip(fip['id'])
@base_api.refresh_cache
def disassociate_floating_ip(self, context, instance, address,
affect_auto_assigned=False):
"""Disassociate a floating ip from the instance."""
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = neutronv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
client.update_floatingip(fip['id'], {'floatingip': {'port_id': None}})
def migrate_instance_start(self, context, instance, migration):
"""Start to migrate the network of an instance."""
# NOTE(wenjianhn): just pass to make migrate instance doesn't
# raise for now.
pass
def migrate_instance_finish(self, context, instance, migration):
"""Finish migrating the network of an instance."""
if not self._has_port_binding_extension(context, refresh_cache=True):
return
neutron = neutronv2.get_client(context, admin=True)
search_opts = {'device_id': instance['uuid'],
'tenant_id': instance['project_id']}
data = neutron.list_ports(**search_opts)
ports = data['ports']
for p in ports:
port_req_body = {'port': {'binding:host_id':
migration['dest_compute']}}
try:
neutron.update_port(p['id'], port_req_body)
except Exception:
with excutils.save_and_reraise_exception():
msg = _("Unable to update host of port %s")
LOG.exception(msg, p['id'])
def add_network_to_project(self, context, project_id, network_uuid=None):
"""Force add a network to the project."""
raise NotImplementedError()
def _nw_info_get_ips(self, client, port):
network_IPs = []
for fixed_ip in port['fixed_ips']:
fixed = network_model.FixedIP(address=fixed_ip['ip_address'])
floats = self._get_floating_ips_by_fixed_and_port(
client, fixed_ip['ip_address'], port['id'])
for ip in floats:
fip = network_model.IP(address=ip['floating_ip_address'],
type='floating')
fixed.add_floating_ip(fip)
network_IPs.append(fixed)
return network_IPs
def _nw_info_get_subnets(self, context, port, network_IPs):
subnets = self._get_subnets_from_port(context, port)
for subnet in subnets:
subnet['ips'] = [fixed_ip for fixed_ip in network_IPs
if fixed_ip.is_in_subnet(subnet)]
return subnets
def _nw_info_build_network(self, port, networks, subnets):
network_name = None
for net in networks:
if port['network_id'] == net['id']:
network_name = net['name']
tenant_id = net['tenant_id']
break
else:
tenant_id = port['tenant_id']
LOG.warning(_("Network %(id)s not matched with the tenants "
"network! The ports tenant %(tenant_id)s will be "
"used."),
{'id': port['network_id'], 'tenant_id': tenant_id})
bridge = None
ovs_interfaceid = None
# Network model metadata
should_create_bridge = None
vif_type = port.get('binding:vif_type')
# TODO(berrange) Neutron should pass the bridge name
# in another binding metadata field
if vif_type == network_model.VIF_TYPE_OVS:
bridge = CONF.neutron_ovs_bridge
ovs_interfaceid = port['id']
elif vif_type == network_model.VIF_TYPE_BRIDGE:
bridge = "brq" + port['network_id']
should_create_bridge = True
if bridge is not None:
bridge = bridge[:network_model.NIC_NAME_LEN]
network = network_model.Network(
id=port['network_id'],
bridge=bridge,
injected=CONF.flat_injected,
label=network_name,
tenant_id=tenant_id
)
network['subnets'] = subnets
port_profile = port.get('binding:profile')
if port_profile:
physical_network = port_profile.get('physical_network')
if physical_network:
network['physical_network'] = physical_network
if should_create_bridge is not None:
network['should_create_bridge'] = should_create_bridge
return network, ovs_interfaceid
def _build_network_info_model(self, context, instance, networks=None,
port_ids=None):
"""Return list of ordered VIFs attached to instance.
:param context - request context.
:param instance - instance we are returning network info for.
:param networks - List of networks being attached to an instance.
If value is None this value will be populated
from the existing cached value.
:param port_ids - List of port_ids that are being attached to an
instance in order of attachment. If value is None
this value will be populated from the existing
cached value.
"""
search_opts = {'tenant_id': instance['project_id'],
'device_id': instance['uuid'], }
client = neutronv2.get_client(context, admin=True)
data = client.list_ports(**search_opts)
current_neutron_ports = data.get('ports', [])
networks, port_ids = self._gather_port_ids_and_networks(
context, instance, networks, port_ids)
nw_info = network_model.NetworkInfo()
current_neutron_port_map = {}
for current_neutron_port in current_neutron_ports:
current_neutron_port_map[current_neutron_port['id']] = (
current_neutron_port)
for port_id in port_ids:
current_neutron_port = current_neutron_port_map.get(port_id)
if current_neutron_port:
vif_active = False
if (current_neutron_port['admin_state_up'] is False
or current_neutron_port['status'] == 'ACTIVE'):
vif_active = True
network_IPs = self._nw_info_get_ips(client,
current_neutron_port)
subnets = self._nw_info_get_subnets(context,
current_neutron_port,
network_IPs)
devname = "tap" + current_neutron_port['id']
devname = devname[:network_model.NIC_NAME_LEN]
network, ovs_interfaceid = (
self._nw_info_build_network(current_neutron_port,
networks, subnets))
nw_info.append(network_model.VIF(
id=current_neutron_port['id'],
address=current_neutron_port['mac_address'],
network=network,
type=current_neutron_port.get('binding:vif_type'),
details=current_neutron_port.get('binding:vif_details'),
ovs_interfaceid=ovs_interfaceid,
devname=devname,
active=vif_active))
return nw_info
def _get_subnets_from_port(self, context, port):
"""Return the subnets for a given port."""
fixed_ips = port['fixed_ips']
# No fixed_ips for the port means there is no subnet associated
# with the network the port is created on.
# Since list_subnets(id=[]) returns all subnets visible for the
# current tenant, returned subnets may contain subnets which is not
# related to the port. To avoid this, the method returns here.
if not fixed_ips:
return []
search_opts = {'id': [ip['subnet_id'] for ip in fixed_ips]}
data = neutronv2.get_client(context).list_subnets(**search_opts)
ipam_subnets = data.get('subnets', [])
subnets = []
for subnet in ipam_subnets:
subnet_dict = {'cidr': subnet['cidr'],
'gateway': network_model.IP(
address=subnet['gateway_ip'],
type='gateway'),
}
# attempt to populate DHCP server field
search_opts = {'network_id': subnet['network_id'],
'device_owner': 'network:dhcp'}
data = neutronv2.get_client(context).list_ports(**search_opts)
dhcp_ports = data.get('ports', [])
for p in dhcp_ports:
for ip_pair in p['fixed_ips']:
if ip_pair['subnet_id'] == subnet['id']:
subnet_dict['dhcp_server'] = ip_pair['ip_address']
break
subnet_object = network_model.Subnet(**subnet_dict)
for dns in subnet.get('dns_nameservers', []):
subnet_object.add_dns(
network_model.IP(address=dns, type='dns'))
# TODO(gongysh) get the routes for this subnet
subnets.append(subnet_object)
return subnets
def get_dns_domains(self, context):
"""Return a list of available dns domains.
These can be used to create DNS entries for floating ips.
"""
raise NotImplementedError()
def add_dns_entry(self, context, address, name, dns_type, domain):
"""Create specified DNS entry for address."""
raise NotImplementedError()
def modify_dns_entry(self, context, name, address, domain):
"""Create specified DNS entry for address."""
raise NotImplementedError()
def delete_dns_entry(self, context, name, domain):
"""Delete the specified dns entry."""
raise NotImplementedError()
def delete_dns_domain(self, context, domain):
"""Delete the specified dns domain."""
raise NotImplementedError()
def get_dns_entries_by_address(self, context, address, domain):
"""Get entries for address and domain."""
raise NotImplementedError()
def get_dns_entries_by_name(self, context, name, domain):
"""Get entries for name and domain."""
raise NotImplementedError()
def create_private_dns_domain(self, context, domain, availability_zone):
"""Create a private DNS domain with nova availability zone."""
raise NotImplementedError()
def create_public_dns_domain(self, context, domain, project=None):
"""Create a private DNS domain with optional nova project."""
raise NotImplementedError()
def _ensure_requested_network_ordering(accessor, unordered, preferred):
"""Sort a list with respect to the preferred network ordering."""
if preferred:
unordered.sort(key=lambda i: preferred.index(accessor(i)))
| afrolov1/nova | nova/network/neutronv2/api.py | Python | apache-2.0 | 57,638 |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_non_resource_attributes import V1NonResourceAttributes
class TestV1NonResourceAttributes(unittest.TestCase):
""" V1NonResourceAttributes unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1NonResourceAttributes(self):
"""
Test V1NonResourceAttributes
"""
model = kubernetes.client.models.v1_non_resource_attributes.V1NonResourceAttributes()
if __name__ == '__main__':
unittest.main()
| skuda/client-python | kubernetes/test/test_v1_non_resource_attributes.py | Python | apache-2.0 | 925 |
#!/usr/bin/python
# Copyright 2016 Joel Dunham
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""oldterm helps you interact with an OLD application via the command line (terminal).
Usage:
1. Setup an OLD web service and begin serving it.
$ cd old
$ paster setup-app development.ini
$ paster serve development.ini
2. Enter a Python prompt and import oldterm:
$ python
>>> import oldterm
>>> old = oldterm.OLD('127.0.0.1', '5000')
>>> old.login('username', 'password')
True
>>> forms = old.get('forms')
"""
import requests
import simplejson as json
class OLD(object):
"""Create an OLD instance to connect to a live OLD application."""
def __init__(self, host, port):
self.host = host
self.port = port
self.baseurl = 'http://%s:%s' % (host, port)
self.session = requests.Session()
self.session.headers.update({'Content-Type': 'application/json'})
def login(self, username, password):
payload = json.dumps({'username': username, 'password': password})
response = self.session.post('%s/login/authenticate' % self.baseurl, data=payload)
return response.json().get('authenticated', False)
def get(self, path, params=None):
response = self.session.get('%s/%s' % (self.baseurl, path), params=params)
return self.return_response(response)
def post(self, path, data=json.dumps({})):
response = self.session.post('%s/%s' % (self.baseurl, path), data=json.dumps(data))
return self.return_response(response)
def put(self, path, data=json.dumps({})):
response = self.session.put('%s/%s' % (self.baseurl, path), data=json.dumps(data))
return self.return_response(response)
def delete(self, path, data=json.dumps({})):
response = self.session.delete('%s/%s' % (self.baseurl, path), data=json.dumps(data))
return self.return_response(response)
def search(self, path, data):
response = self.session.request('SEARCH', '%s/%s' % (self.baseurl, path), data=json.dumps(data))
return self.return_response(response)
def return_response(self, response):
try:
return response.json()
except Exception:
return response
form_create_params = {
'transcription': u'',
'phonetic_transcription': u'',
'narrow_phonetic_transcription': u'',
'morpheme_break': u'',
'grammaticality': u'',
'morpheme_gloss': u'',
'translations': [],
'comments': u'',
'speaker_comments': u'',
'elicitation_method': u'',
'tags': [],
'syntactic_category': u'',
'speaker': u'',
'elicitor': u'',
'verifier': u'',
'source': u'',
'status': u'tested',
'date_elicited': u'', # mm/dd/yyyy
'syntax': u'',
'semantics': u''
}
def printform(form):
"""Print an OLD form to the terminal"""
tmp = [('id', form['id'])]
if form.get('narrow_phonetic_transcription', None): tmp.append(('NP', form['narrow_phonetic_transcription']))
if form.get('phonetic_transcription', None): tmp.append(('BP', form['phonetic_transcription']))
tmp.append(('TR', '%s%s' % (form['grammaticality'], form['transcription'])))
if form.get('morpheme_break', None): tmp.append(('MB', form['morpheme_break']))
if form.get('morpheme_gloss', None): tmp.append(('MG', form['morpheme_gloss']))
tmp.append(('TL', ', '.join([u'\u2018%s\u2019' % tl['transcription'] for tl in form['translations']])))
if form.get('syntactic_category_string', None): tmp.append(('SCS', form['syntactic_category_string']))
if form.get('break_gloss_category', None): tmp.append(('BGC', form['break_gloss_category']))
if form.get('syntactic_category', None): tmp.append(('SC', form['syntactic_category']['name']))
print u'\n'.join([u'%-5s%s' % (u'%s:' % t[0], t[1]) for t in tmp])
class NTKOLD(OLD):
"""Subclass of the OLD that provides some Nata (NTK)-specific attributes.
"""
orthography = [u'mb', u'nd', u'ng', u't', u'ch', u'h', u'k', u'm', u'n', u'ny',
u"ng'", u'r', u'bh', u's', u'sh', u'gh', u'w', u'y', u'i', u'i\u0301', u'u',
u'u\u0301', u'e', u'e\u0301', u'o', u'o\u0301', u'e\u0323', u'e\u0323\u0301',
u'o\u0323', u'o\u0323\u0301', u'a', u'a\u0301']
C = [u'mb', u'nd', u'ng', u't', u'ch', u'h', u'k', u'm', u'n', u'ny',
u"ng'", u'r', u'bh', u's', u'sh', u'gh', u'w', u'y']
C = u'(%s)' % u'|'.join(C)
V = [u'i', u'i\u0301', u'u', u'u\u0301', u'e', u'e\u0301', u'o', u'o\u0301',
u'e\u0323', u'e\u0323\u0301', u'o\u0323', u'o\u0323\u0301', u'a', u'a\u0301']
V = u'(%s)' % u'|'.join(V)
H = [u'i\u0301', u'u\u0301', u'e\u0301', u'o\u0301',
u'e\u0323\u0301', u'o\u0323\u0301', u'a\u0301']
H = u'(%s)' % u'|'.join(H)
L = u'(%s)' % u'|'.join([u'i', u'u', u'e', u'o', u'e\u0323', u'o\u0323'])
CV = u'%s%s' % (C, V)
CVV = u'%s%s' % (CV, V)
CH = u'%s%s' % (C, H)
CL = u'%s%s' % (C, L)
# Find all forms that are CVV morphemes
CVV_m = '^%s$' % CVV
# Find all forms containing words that are CL.CL.CH
CLCLCH_w = '(^| )%s%s%s($| )' % (CL, CL, CH)
# Find all forms containing morphemes of the form GREEK LETTER BETA followed by "u"
# i.e., PASS or C14
bu_m = u'-\u03b2u(-| |$)'
# Find all forms containing morphemes of the form GREEK LETTER BETA followed by "u"
# i.e., PASS or C14
PASS_m = u'-PASS(-| |$)'
# Find all /Bu/ 'PASS' forms
bu_PASS_m = ur'-\u03b2u\|PASS\|[^-]+(-| |$)'
| jrwdunham/old | oldterm.py | Python | apache-2.0 | 6,105 |
import os
import glob
import sys
import re
if __name__ == '__main__':
script, subCommand, workingDir, jdkDir, outputDir, optionsPath, classPathsPath, sourcesPath, logPath, listFilePath, faninPath = sys.argv
def read_list_file():
if os.path.exists(listFilePath):
with open(listFilePath, "rt") as listFile:
outputList = [path.rstrip() for path in listFile.readlines()]
return outputList
else:
return []
def unlink_old_outputs(outputList):
for path in outputList:
if os.path.exists(path):
os.unlink(path)
def generate_list_file():
with open(sourcesPath, "rt") as sourcesFile:
sourcesList = sourcesFile.readlines()
with open(listFilePath, "wt") as listFile:
for sourcePath in sourcesList:
if os.path.isabs(sourcePath):
# TODO: handle this better
print("error: cannot handle outputs for absolute source paths:\n %s", sourcePath)
exit(1)
else:
# strip off ".java" and normalize slashes
basePath = os.path.normpath(os.path.join(outputDir, sourcePath)[0:-5])
listFile.write("%s.class\n" % basePath)
implicitOutputs = glob.glob("%s$*.class" % basePath)
for implicitOutput in implicitOutputs:
listFile.write("%s\n" % implicitOutput)
def create_class_path_options_file(fileName):
with open(classPathsPath, "rt") as classPathsFile:
classPathList = classPathsFile.readlines()
with open(fileName, "wt") as file:
file.write("-classpath \".")
for classPath in classPathList:
file.write(os.pathsep)
file.write(classPath.replace("\\", "/"))
file.write("\"")
file.write(" -implicit:none")
def java_compile(outputList):
if os.path.exists(listFilePath):
os.unlink(listFilePath)
unlink_old_outputs(outputList)
cpOptionsPath = classPathsPath + ".t"
create_class_path_options_file(cpOptionsPath)
cmd = "javac \"@%s\" \"@%s\" \"@%s\" -d \"%s\" > \"%s\" 2>&1" % (optionsPath, cpOptionsPath, sourcesPath, outputDir, logPath)
exitcode = os.system(cmd)
os.unlink(cpOptionsPath)
with open(logPath, "rt") as logFile:
logContents = logFile.read()
if re.search("warning:|error:", logContents, re.MULTILINE):
print("%s" % logContents)
if exitcode:
sys.exit(exitcode)
generate_list_file()
def escape_path(path):
return path.replace(" ", "\\ ").replace("$", "\\$")
def all_paths_exist(pathList):
for path in pathList:
if not os.path.exists(path):
return False
return True
def generate_fanin_file(outputList):
if os.path.exists(faninPath):
os.unlink(faninPath)
# do not unlink the faninDeps file, so that upon error it remains dirty
# if any implicit-output was deleted, redo the build command
if not all_paths_exist(outputList):
java_compile()
faninDepsPath = faninPath + ".d"
with open(faninDepsPath, "wt") as faninDepsFile:
faninDepsFile.write("%s: \\\n" % escape_path(faninPath))
for path in outputList:
faninDepsFile.write("%s \\\n" % escape_path(path))
with open(faninPath, "wt") as faninFile:
faninFile.write("1")
os.chdir(workingDir)
if not os.path.exists(outputDir):
os.makedirs(outputDir)
oldPathEnv = os.environ['PATH']
os.environ['PATH'] = "%s%sbin%s%s" % (jdkDir, os.sep, os.pathsep, oldPathEnv)
os.environ['JAVA_HOME'] = "%s\jre" % (jdkDir)
outputList = read_list_file()
if subCommand == "compile":
java_compile(outputList)
elif subCommand == "fanin":
generate_fanin_file(outputList)
else:
print("error: unknown subCommand")
sys.exit(1)
sys.exit(0)
| fifoforlifo/pynja | packages/pynja/scripts/javac-invoke.py | Python | apache-2.0 | 4,162 |
# Copyright 2017-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
"""Test Motor, an asynchronous driver for MongoDB and Tornado."""
import copy
import sys
import unittest
from pymongo import InsertOne, IndexModel
from tornado import gen
from pymongo.errors import InvalidOperation
from tornado.testing import gen_test
from test import SkipTest
from test.test_environment import env
from test.tornado_tests import MotorTest
from test.utils import TestListener, session_ids
class MotorSessionTest(MotorTest):
@classmethod
def setUpClass(cls):
super(MotorSessionTest, cls).setUpClass()
if not env.sessions_enabled:
raise SkipTest("Sessions not supported")
@gen.coroutine
def _test_ops(self, client, *ops):
listener = client.event_listeners()[0][0]
for f, args, kw in ops:
# Simulate "async with" on all Pythons.
s = yield client.start_session()
try:
listener.results.clear()
# In case "f" modifies its inputs.
args2 = copy.copy(args)
kw2 = copy.copy(kw)
kw2['session'] = s
yield f(*args2, **kw2)
for event in listener.results['started']:
self.assertTrue(
'lsid' in event.command,
"%s sent no lsid with %s" % (
f.__name__, event.command_name))
self.assertEqual(
s.session_id,
event.command['lsid'],
"%s sent wrong lsid with %s" % (
f.__name__, event.command_name))
self.assertFalse(s.has_ended)
finally:
yield s.end_session()
with self.assertRaises(InvalidOperation) as ctx:
yield f(*args2, **kw2)
self.assertIn("ended session", str(ctx.exception))
# No explicit session.
for f, args, kw in ops:
listener.results.clear()
yield f(*args, **kw)
self.assertGreaterEqual(len(listener.results['started']), 1)
lsids = []
for event in listener.results['started']:
self.assertTrue(
'lsid' in event.command,
"%s sent no lsid with %s" % (
f.__name__, event.command_name))
lsids.append(event.command['lsid'])
if 'PyPy' not in sys.version:
# Server session was returned to pool. Ignore interpreters with
# non-deterministic GC.
for lsid in lsids:
self.assertIn(
lsid, session_ids(client),
"%s did not return implicit session to pool" % (
f.__name__,))
@gen_test
def test_database(self):
listener = TestListener()
client = self.motor_client(event_listeners=[listener])
db = client.pymongo_test
ops = [
(db.command, ['ping'], {}),
(db.drop_collection, ['collection'], {}),
(db.create_collection, ['collection'], {}),
(db.list_collection_names, [], {}),
]
yield self._test_ops(client, *ops)
@gen_test(timeout=30)
def test_collection(self):
listener = TestListener()
client = self.motor_client(event_listeners=[listener])
yield client.drop_database('motor_test')
coll = client.motor_test.test_collection
@gen.coroutine
def list_indexes(session=None):
yield coll.list_indexes(session=session).to_list(length=None)
@gen.coroutine
def aggregate(session=None):
yield coll.aggregate([], session=session).to_list(length=None)
# Test some collection methods - the rest are in test_cursor.
yield self._test_ops(
client,
(coll.drop, [], {}),
(coll.bulk_write, [[InsertOne({})]], {}),
(coll.insert_one, [{}], {}),
(coll.insert_many, [[{}, {}]], {}),
(coll.replace_one, [{}, {}], {}),
(coll.update_one, [{}, {'$set': {'a': 1}}], {}),
(coll.update_many, [{}, {'$set': {'a': 1}}], {}),
(coll.delete_one, [{}], {}),
(coll.delete_many, [{}], {}),
(coll.find_one_and_replace, [{}, {}], {}),
(coll.find_one_and_update, [{}, {'$set': {'a': 1}}], {}),
(coll.find_one_and_delete, [{}, {}], {}),
(coll.rename, ['collection2'], {}),
# Drop collection2 between tests of "rename", above.
(client.motor_test.drop_collection, ['collection2'], {}),
(coll.distinct, ['a'], {}),
(coll.find_one, [], {}),
(coll.count_documents, [{}], {}),
(coll.create_indexes, [[IndexModel('a')]], {}),
(coll.create_index, ['a'], {}),
(coll.drop_index, ['a_1'], {}),
(coll.drop_indexes, [], {}),
(list_indexes, [], {}),
(coll.index_information, [], {}),
(coll.options, [], {}),
(aggregate, [], {}))
@gen_test
def test_cursor(self):
listener = TestListener()
client = self.motor_client(event_listeners=[listener])
yield self.make_test_data()
coll = client.motor_test.test_collection
s = yield client.start_session()
# Simulate "async with" on all Pythons.
try:
listener.results.clear()
cursor = coll.find(session=s)
yield cursor.to_list(length=None)
self.assertEqual(len(listener.results['started']), 2)
for event in listener.results['started']:
self.assertTrue(
'lsid' in event.command,
"find sent no lsid with %s" % (event.command_name,))
self.assertEqual(
s.session_id,
event.command['lsid'],
"find sent wrong lsid with %s" % (event.command_name,))
finally:
yield s.end_session()
with self.assertRaises(InvalidOperation) as ctx:
yield coll.find(session=s).to_list(length=None)
self.assertIn("ended session", str(ctx.exception))
# No explicit session.
listener.results.clear()
cursor = coll.find()
yield cursor.to_list(length=None)
self.assertEqual(len(listener.results['started']), 2)
event0 = listener.first_command_started()
self.assertTrue(
'lsid' in event0.command,
"find sent no lsid with %s" % (event0.command_name,))
lsid = event0.command['lsid']
for event in listener.results['started'][1:]:
self.assertTrue(
'lsid' in event.command,
"find sent no lsid with %s" % (event.command_name,))
self.assertEqual(
lsid,
event.command['lsid'],
"find sent wrong lsid with %s" % (event.command_name,))
@gen_test
def test_options(self):
s = yield self.cx.start_session()
self.assertTrue(s.options.causal_consistency)
s = yield self.cx.start_session(False)
self.assertFalse(s.options.causal_consistency)
s = yield self.cx.start_session(causal_consistency=True)
self.assertTrue(s.options.causal_consistency)
s = yield self.cx.start_session(causal_consistency=False)
self.assertFalse(s.options.causal_consistency)
if __name__ == '__main__':
unittest.main()
| wujuguang/motor | test/tornado_tests/test_motor_session.py | Python | apache-2.0 | 8,232 |
"""
sphinx.roles
~~~~~~~~~~~~
Handlers for additional ReST roles.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from typing import TYPE_CHECKING, Any, Dict, List, Tuple, Type
from docutils import nodes, utils
from docutils.nodes import Element, Node, TextElement, system_message
from sphinx import addnodes
from sphinx.locale import _
from sphinx.util import ws_re
from sphinx.util.docutils import ReferenceRole, SphinxRole
from sphinx.util.typing import RoleFunction
if TYPE_CHECKING:
from sphinx.application import Sphinx
from sphinx.environment import BuildEnvironment
generic_docroles = {
'command': addnodes.literal_strong,
'dfn': nodes.emphasis,
'kbd': nodes.literal,
'mailheader': addnodes.literal_emphasis,
'makevar': addnodes.literal_strong,
'manpage': addnodes.manpage,
'mimetype': addnodes.literal_emphasis,
'newsgroup': addnodes.literal_emphasis,
'program': addnodes.literal_strong, # XXX should be an x-ref
'regexp': nodes.literal,
}
# -- generic cross-reference role ----------------------------------------------
class XRefRole(ReferenceRole):
"""
A generic cross-referencing role. To create a callable that can be used as
a role function, create an instance of this class.
The general features of this role are:
* Automatic creation of a reference and a content node.
* Optional separation of title and target with `title <target>`.
* The implementation is a class rather than a function to make
customization easier.
Customization can be done in two ways:
* Supplying constructor parameters:
* `fix_parens` to normalize parentheses (strip from target, and add to
title if configured)
* `lowercase` to lowercase the target
* `nodeclass` and `innernodeclass` select the node classes for
the reference and the content node
* Subclassing and overwriting `process_link()` and/or `result_nodes()`.
"""
nodeclass: Type[Element] = addnodes.pending_xref
innernodeclass: Type[TextElement] = nodes.literal
def __init__(self, fix_parens: bool = False, lowercase: bool = False,
nodeclass: Type[Element] = None, innernodeclass: Type[TextElement] = None,
warn_dangling: bool = False) -> None:
self.fix_parens = fix_parens
self.lowercase = lowercase
self.warn_dangling = warn_dangling
if nodeclass is not None:
self.nodeclass = nodeclass
if innernodeclass is not None:
self.innernodeclass = innernodeclass
super().__init__()
def update_title_and_target(self, title: str, target: str) -> Tuple[str, str]:
if not self.has_explicit_title:
if title.endswith('()'):
# remove parentheses
title = title[:-2]
if self.config.add_function_parentheses:
# add them back to all occurrences if configured
title += '()'
# remove parentheses from the target too
if target.endswith('()'):
target = target[:-2]
return title, target
def run(self) -> Tuple[List[Node], List[system_message]]:
if ':' not in self.name:
self.refdomain, self.reftype = '', self.name
self.classes = ['xref', self.reftype]
else:
self.refdomain, self.reftype = self.name.split(':', 1)
self.classes = ['xref', self.refdomain, '%s-%s' % (self.refdomain, self.reftype)]
if self.disabled:
return self.create_non_xref_node()
else:
return self.create_xref_node()
def create_non_xref_node(self) -> Tuple[List[Node], List[system_message]]:
text = utils.unescape(self.text[1:])
if self.fix_parens:
self.has_explicit_title = False # treat as implicit
text, target = self.update_title_and_target(text, "")
node = self.innernodeclass(self.rawtext, text, classes=self.classes)
return self.result_nodes(self.inliner.document, self.env, node, is_ref=False)
def create_xref_node(self) -> Tuple[List[Node], List[system_message]]:
target = self.target
title = self.title
if self.lowercase:
target = target.lower()
if self.fix_parens:
title, target = self.update_title_and_target(title, target)
# create the reference node
options = {'refdoc': self.env.docname,
'refdomain': self.refdomain,
'reftype': self.reftype,
'refexplicit': self.has_explicit_title,
'refwarn': self.warn_dangling}
refnode = self.nodeclass(self.rawtext, **options)
self.set_source_info(refnode)
# determine the target and title for the class
title, target = self.process_link(self.env, refnode, self.has_explicit_title,
title, target)
refnode['reftarget'] = target
refnode += self.innernodeclass(self.rawtext, title, classes=self.classes)
return self.result_nodes(self.inliner.document, self.env, refnode, is_ref=True)
# methods that can be overwritten
def process_link(self, env: "BuildEnvironment", refnode: Element, has_explicit_title: bool,
title: str, target: str) -> Tuple[str, str]:
"""Called after parsing title and target text, and creating the
reference node (given in *refnode*). This method can alter the
reference node and must return a new (or the same) ``(title, target)``
tuple.
"""
return title, ws_re.sub(' ', target)
def result_nodes(self, document: nodes.document, env: "BuildEnvironment", node: Element,
is_ref: bool) -> Tuple[List[Node], List[system_message]]:
"""Called before returning the finished nodes. *node* is the reference
node if one was created (*is_ref* is then true), else the content node.
This method can add other nodes and must return a ``(nodes, messages)``
tuple (the usual return value of a role function).
"""
return [node], []
class AnyXRefRole(XRefRole):
def process_link(self, env: "BuildEnvironment", refnode: Element, has_explicit_title: bool,
title: str, target: str) -> Tuple[str, str]:
result = super().process_link(env, refnode, has_explicit_title, title, target)
# add all possible context info (i.e. std:program, py:module etc.)
refnode.attributes.update(env.ref_context)
return result
class PEP(ReferenceRole):
def run(self) -> Tuple[List[Node], List[system_message]]:
target_id = 'index-%s' % self.env.new_serialno('index')
entries = [('single', _('Python Enhancement Proposals; PEP %s') % self.target,
target_id, '', None)]
index = addnodes.index(entries=entries)
target = nodes.target('', '', ids=[target_id])
self.inliner.document.note_explicit_target(target)
try:
refuri = self.build_uri()
reference = nodes.reference('', '', internal=False, refuri=refuri, classes=['pep'])
if self.has_explicit_title:
reference += nodes.strong(self.title, self.title)
else:
title = "PEP " + self.title
reference += nodes.strong(title, title)
except ValueError:
msg = self.inliner.reporter.error('invalid PEP number %s' % self.target,
line=self.lineno)
prb = self.inliner.problematic(self.rawtext, self.rawtext, msg)
return [prb], [msg]
return [index, target, reference], []
def build_uri(self) -> str:
base_url = self.inliner.document.settings.pep_base_url
ret = self.target.split('#', 1)
if len(ret) == 2:
return base_url + 'pep-%04d#%s' % (int(ret[0]), ret[1])
else:
return base_url + 'pep-%04d' % int(ret[0])
class RFC(ReferenceRole):
def run(self) -> Tuple[List[Node], List[system_message]]:
target_id = 'index-%s' % self.env.new_serialno('index')
entries = [('single', 'RFC; RFC %s' % self.target, target_id, '', None)]
index = addnodes.index(entries=entries)
target = nodes.target('', '', ids=[target_id])
self.inliner.document.note_explicit_target(target)
try:
refuri = self.build_uri()
reference = nodes.reference('', '', internal=False, refuri=refuri, classes=['rfc'])
if self.has_explicit_title:
reference += nodes.strong(self.title, self.title)
else:
title = "RFC " + self.title
reference += nodes.strong(title, title)
except ValueError:
msg = self.inliner.reporter.error('invalid RFC number %s' % self.target,
line=self.lineno)
prb = self.inliner.problematic(self.rawtext, self.rawtext, msg)
return [prb], [msg]
return [index, target, reference], []
def build_uri(self) -> str:
base_url = self.inliner.document.settings.rfc_base_url
ret = self.target.split('#', 1)
if len(ret) == 2:
return base_url + self.inliner.rfc_url % int(ret[0]) + '#' + ret[1]
else:
return base_url + self.inliner.rfc_url % int(ret[0])
_amp_re = re.compile(r'(?<!&)&(?![&\s])')
class GUILabel(SphinxRole):
amp_re = re.compile(r'(?<!&)&(?![&\s])')
def run(self) -> Tuple[List[Node], List[system_message]]:
node = nodes.inline(rawtext=self.rawtext, classes=[self.name])
spans = self.amp_re.split(self.text)
node += nodes.Text(spans.pop(0))
for span in spans:
span = span.replace('&&', '&')
letter = nodes.Text(span[0])
accelerator = nodes.inline('', '', letter, classes=['accelerator'])
node += accelerator
node += nodes.Text(span[1:])
return [node], []
class MenuSelection(GUILabel):
BULLET_CHARACTER = '\N{TRIANGULAR BULLET}'
def run(self) -> Tuple[List[Node], List[system_message]]:
self.text = self.text.replace('-->', self.BULLET_CHARACTER)
return super().run()
_litvar_re = re.compile('{([^}]+)}')
parens_re = re.compile(r'(\\*{|\\*})')
class EmphasizedLiteral(SphinxRole):
parens_re = re.compile(r'(\\\\|\\{|\\}|{|})')
def run(self) -> Tuple[List[Node], List[system_message]]:
children = self.parse(self.text)
node = nodes.literal(self.rawtext, '', *children,
role=self.name.lower(), classes=[self.name])
return [node], []
def parse(self, text: str) -> List[Node]:
result: List[Node] = []
stack = ['']
for part in self.parens_re.split(text):
if part == '\\\\': # escaped backslash
stack[-1] += '\\'
elif part == '{':
if len(stack) >= 2 and stack[-2] == "{": # nested
stack[-1] += "{"
else:
# start emphasis
stack.append('{')
stack.append('')
elif part == '}':
if len(stack) == 3 and stack[1] == "{" and len(stack[2]) > 0:
# emphasized word found
if stack[0]:
result.append(nodes.Text(stack[0], stack[0]))
result.append(nodes.emphasis(stack[2], stack[2]))
stack = ['']
else:
# emphasized word not found; the rparen is not a special symbol
stack.append('}')
stack = [''.join(stack)]
elif part == '\\{': # escaped left-brace
stack[-1] += '{'
elif part == '\\}': # escaped right-brace
stack[-1] += '}'
else: # others (containing escaped braces)
stack[-1] += part
if ''.join(stack):
# remaining is treated as Text
text = ''.join(stack)
result.append(nodes.Text(text, text))
return result
_abbr_re = re.compile(r'\((.*)\)$', re.S)
class Abbreviation(SphinxRole):
abbr_re = re.compile(r'\((.*)\)$', re.S)
def run(self) -> Tuple[List[Node], List[system_message]]:
options = self.options.copy()
matched = self.abbr_re.search(self.text)
if matched:
text = self.text[:matched.start()].strip()
options['explanation'] = matched.group(1)
else:
text = self.text
return [nodes.abbreviation(self.rawtext, text, **options)], []
specific_docroles: Dict[str, RoleFunction] = {
# links to download references
'download': XRefRole(nodeclass=addnodes.download_reference),
# links to anything
'any': AnyXRefRole(warn_dangling=True),
'pep': PEP(),
'rfc': RFC(),
'guilabel': GUILabel(),
'menuselection': MenuSelection(),
'file': EmphasizedLiteral(),
'samp': EmphasizedLiteral(),
'abbr': Abbreviation(),
}
def setup(app: "Sphinx") -> Dict[str, Any]:
from docutils.parsers.rst import roles
for rolename, nodeclass in generic_docroles.items():
generic = roles.GenericRole(rolename, nodeclass)
role = roles.CustomRole(rolename, generic, {'classes': [rolename]})
roles.register_local_role(rolename, role)
for rolename, func in specific_docroles.items():
roles.register_local_role(rolename, func)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| sonntagsgesicht/regtest | .aux/venv/lib/python3.9/site-packages/sphinx/roles.py | Python | apache-2.0 | 13,846 |
import unittest
from mock import patch, Mock, call
from the_ark import jcr_helpers
import requests.exceptions
TEST_URL = "http://www.test.com"
TEST_PATH = "/content/path"
JCR_NON_PAGE = {
"jcr:primaryType": "nt:unstructured",
"jcr:lastModifiedBy": "admin",
"jcr:lastModified": "Thu Dec 08 2016 00:19:17 GMT+0000"
}
JCR_GREATGRANDCHILD = {
"jcr:primaryType": "cq:Page",
"jcr:createdBy": "admin",
"jcr:created": "Thu Jan 18 2018 00:17:21 GMT+0000",
"jcr:content": {
"jcr:primaryType": "cq:PageContent",
"jcr:createdBy": "admin",
"jcr:title": "Great Grandchild 1",
"jcr:versionHistory": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
"cq:template": "/greatgrandchild",
"jcr:lastModifiedBy": "admin",
"jcr:predecessors": [
"XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
],
"jcr:created": "Thu Jan 18 2018 00:17:21 GMT+0000",
"cq:lastModified": "Wed Jan 11 2017 16:15:23 GMT+0000",
"jcr:baseVersion": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
"jcr:lastModified": "Wed Jan 11 2017 16:15:23 GMT+0000",
"jcr:uuid": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
"sling:resourceType": "/greatgrandchild",
"cq:lastModifiedBy": "admin"
}
}
JCR_GRANDCHILD_1 = {
"jcr:primaryType": "cq:Page",
"jcr:createdBy": "admin",
"jcr:created": "Thu Jan 18 2018 00:17:21 GMT+0000",
"jcr:content": {
"jcr:primaryType": "cq:PageContent",
"jcr:createdBy": "admin",
"jcr:title": "Grandchild 1",
"jcr:versionHistory": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
"cq:template": "/grandchild1",
"jcr:lastModifiedBy": "admin",
"jcr:predecessors": [
"XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
],
"jcr:created": "Thu Jan 18 2018 00:17:21 GMT+0000",
"cq:lastModified": "Wed Jan 11 2017 16:15:23 GMT+0000",
"jcr:baseVersion": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
"jcr:lastModified": "Wed Jan 11 2017 16:15:23 GMT+0000",
"jcr:uuid": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
"sling:resourceType": "/grandchild1",
"cq:lastModifiedBy": "admin"
},
"great_grandchild1": JCR_GREATGRANDCHILD
}
JCR_GRANDCHILD_2 = {
"jcr:primaryType": "cq:Page",
"jcr:createdBy": "admin",
"jcr:created": "Thu Jan 18 2018 00:17:21 GMT+0000",
"jcr:content": {
"jcr:primaryType": "cq:PageContent",
"jcr:createdBy": "admin",
"jcr:title": "Grandchild 2",
"jcr:versionHistory": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
"cq:template": "/grandchild2",
"jcr:lastModifiedBy": "admin",
"jcr:predecessors": [
"XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
],
"jcr:created": "Thu Jan 18 2018 00:17:21 GMT+0000",
"cq:lastModified": "Wed Jan 11 2017 16:15:23 GMT+0000",
"jcr:baseVersion": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
"jcr:lastModified": "Wed Jan 11 2017 16:15:23 GMT+0000",
"jcr:uuid": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
"sling:resourceType": "/grandchild2",
"cq:lastModifiedBy": "admin"
},
"nonpage3": JCR_NON_PAGE,
}
JCR_CHILD_1 = {
"jcr:primaryType": "cq:Page",
"jcr:createdBy": "admin",
"jcr:created": "Mon Feb 19 2018 00:17:26 GMT+0000",
"jcr:content": {
"jcr:primaryType": "cq:PageContent",
"jcr:createdBy": "admin",
"jcr:title": "Child 1",
"jcr:versionHistory": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
"cq:template": "/child",
"jcr:lastModifiedBy": "admin",
"jcr:predecessors": [
"XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
],
"jcr:created": "Fri Dec 09 2016 18:34:21 GMT+0000",
"cq:lastModified": "Mon Feb 06 2017 17:33:11 GMT+0000",
"jcr:baseVersion": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
"jcr:uuid": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
"sling:resourceType": "/child",
"cq:lastModifiedBy": "admin"
}
}
JCR_CHILD_2 = {
"jcr:primaryType": "cq:Page",
"jcr:createdBy": "admin",
"jcr:created": "Thu Jan 18 2018 00:17:21 GMT+0000",
"jcr:content": {
"jcr:primaryType": "cq:PageContent",
"jcr:createdBy": "admin",
"jcr:title": "Child 2",
"jcr:versionHistory": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
"cq:template": "/child2",
"jcr:lastModifiedBy": "admin",
"jcr:predecessors": [
"XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
],
"jcr:created": "Thu Jan 18 2018 00:17:21 GMT+0000",
"cq:lastModified": "Wed Nov 08 2017 18:22:25 GMT+0000",
"jcr:description": "testing",
"jcr:baseVersion": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
"jcr:lastModified": "Thu Jan 12 2017 18:40:21 GMT+0000",
"jcr:uuid": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
"sling:resourceType": "/child",
"cq:lastModifiedBy": "admin"
},
"nonpage1": JCR_NON_PAGE,
"nonpage2": JCR_NON_PAGE,
"grandchild1": JCR_GRANDCHILD_1,
"grandchild2": JCR_GRANDCHILD_2
}
class UtilsTestCase(unittest.TestCase):
def setUp(self):
self.jcr_content_infinity_dict = {
"jcr:primaryType": "cq:Page",
"jcr:createdBy": "admin",
"jcr:created": "Thu Jan 18 2018 00:17:21 GMT+0000",
"jcr:content": {
"jcr:primaryType": "cq:PageContent",
"jcr:createdBy": "admin",
"jcr:title": "Root",
"jcr:versionHistory": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
"cq:template": "/root",
"jcr:lastModifiedBy": "admin",
"jcr:predecessors": [
"XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
],
"jcr:created": "Thu Jan 18 2018 00:17:21 GMT+0000",
"cq:lastModified": "Mon Apr 24 2017 20:44:33 GMT+0000",
"jcr:baseVersion": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
"jcr:lastModified": "Mon Apr 24 2017 20:44:33 GMT+0000",
"jcr:uuid": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
"sling:resourceType": "/root",
"cq:designPath": "/etc/designs/test",
"cq:lastModifiedBy": "admin"
},
"nonpage": JCR_NON_PAGE,
"child1": JCR_CHILD_1,
"child2": JCR_CHILD_2
}
self.jcr_content_infinity_list = [
"{}.2.json".format(TEST_PATH),
"{}.1.json".format(TEST_PATH),
"{}.0.json"
]
self.page_hierarchy_with_jcr_content = {
'{}.html'.format(TEST_PATH): {
'children': ['{}/child1.html'.format(TEST_PATH), '{}/child2.html'.format(TEST_PATH)],
'template': self.jcr_content_infinity_dict["jcr:content"]["cq:template"],
'depth': 0,
'parent': False,
'jcr_content': self.jcr_content_infinity_dict["jcr:content"],
'url': '{}{}.html'.format(TEST_URL, TEST_PATH)
},
'{}/child1.html'.format(TEST_PATH): {
'children': [],
'template': JCR_CHILD_1["jcr:content"]["cq:template"],
'depth': 1,
'parent': '{}.html'.format(TEST_PATH),
'jcr_content': JCR_CHILD_1["jcr:content"],
'url': '{}{}/child1.html'.format(TEST_URL, TEST_PATH)
},
'{}/child2.html'.format(TEST_PATH): {
'children': ['{}/child2/grandchild1.html'.format(TEST_PATH),
'{}/child2/grandchild2.html'.format(TEST_PATH)],
'template': JCR_CHILD_2["jcr:content"]["cq:template"],
'depth': 1,
'parent': '{}.html'.format(TEST_PATH),
'jcr_content': JCR_CHILD_2["jcr:content"],
'url': '{}{}/child2.html'.format(TEST_URL, TEST_PATH)
},
'{}/child2/grandchild1.html'.format(TEST_PATH): {
'children': ['{}/child2/grandchild1/great_grandchild1.html'.format(TEST_PATH)],
'template': JCR_GRANDCHILD_1["jcr:content"]["cq:template"],
'depth': 2,
'parent': '{}/child2.html'.format(TEST_PATH),
'jcr_content': JCR_GRANDCHILD_1["jcr:content"],
'url': '{}{}/child2/grandchild1.html'.format(TEST_URL, TEST_PATH)
},
'{}/child2/grandchild1/great_grandchild1.html'.format(TEST_PATH): {
'children': [],
'template': JCR_GREATGRANDCHILD["jcr:content"]["cq:template"],
'depth': 3,
'parent': '{}/child2/grandchild1.html'.format(TEST_PATH),
'jcr_content': JCR_GREATGRANDCHILD["jcr:content"],
'url': '{}{}/child2/grandchild1/great_grandchild1.html'.format(TEST_URL, TEST_PATH)
},
'{}/child2/grandchild2.html'.format(TEST_PATH): {
'children': [],
'template': JCR_GRANDCHILD_2["jcr:content"]["cq:template"],
'depth': 2,
'parent': '{}/child2.html'.format(TEST_PATH),
'jcr_content': JCR_GRANDCHILD_2["jcr:content"],
'url': '{}{}/child2/grandchild2.html'.format(TEST_URL, TEST_PATH)
}
}
self.page_hierarchy = {
k: {x: v[x] for x in v if x != "jcr_content"} for k, v in self.page_hierarchy_with_jcr_content.iteritems()
}
@patch('requests.get')
def test_get_jcr_content_default_depth(self, requests_get):
mock_response = Mock()
mock_response.json.return_value = JCR_CHILD_1
requests_get.return_value = mock_response
resp = jcr_helpers.get_jcr_content(root_url=TEST_URL, root_path=TEST_PATH)
requests_get.assert_called_once_with("{}{}.0.json".format(TEST_URL, TEST_PATH))
self.assertEqual(resp, JCR_CHILD_1)
@patch('requests.get')
def test_get_jcr_content_provided_depth(self, requests_get):
mock_response = Mock()
mock_response.json.return_value = self.jcr_content_infinity_dict
requests_get.return_value = mock_response
test_url = "{}/".format(TEST_URL)
test_path = TEST_PATH[1:]
resp = jcr_helpers.get_jcr_content(root_url=test_url, root_path=test_path, depth=100)
requests_get.assert_called_once_with("{}{}.100.json".format(test_url[:-1], "/{}".format(test_path)))
self.assertEqual(resp, self.jcr_content_infinity_dict)
@patch('requests.get')
def test_get_jcr_content_infinity(self, requests_get):
mock_response = Mock()
mock_response.json.return_value = self.jcr_content_infinity_dict
requests_get.return_value = mock_response
resp = jcr_helpers.get_jcr_content(root_url=TEST_URL, root_path=TEST_PATH, infinity=True)
requests_get.assert_called_once_with("{}{}.infinity.json".format(TEST_URL, TEST_PATH))
self.assertEqual(resp, self.jcr_content_infinity_dict)
@patch('requests.get')
def test_get_jcr_content_missing_scheme(self, requests_get):
mock_response = Mock()
mock_response.json.return_value = JCR_CHILD_1
requests_get.return_value = mock_response
test_url = TEST_URL[7:]
test_path = "{}.html".format(TEST_PATH)
resp = jcr_helpers.get_jcr_content(root_url=test_url, root_path=test_path)
requests_get.assert_called_once_with("http://{}{}.0.json".format(test_url, test_path[:-5]))
self.assertEqual(resp, JCR_CHILD_1)
@patch('requests.get')
def test_get_jcr_content_request_exception(self, requests_get):
requests_get.side_effect = requests.exceptions.ConnectionError()
self.assertRaises(
jcr_helpers.JCRHelperException, jcr_helpers.get_jcr_content,
root_url=TEST_URL, root_path=TEST_PATH, infinity=True)
@patch('requests.get')
def test_get_jcr_content_unexpected_jcr_content_response(self, requests_get):
mock_response = Mock()
mock_response.json.side_effect = ValueError()
requests_get.return_value = mock_response
self.assertRaises(
jcr_helpers.JCRHelperException, jcr_helpers.get_jcr_content,
root_url=TEST_URL, root_path=TEST_PATH, infinity=True)
@patch('requests.get')
def test_get_jcr_content_unexpected_error(self, requests_get):
requests_get.return_value = {}
self.assertRaises(
jcr_helpers.JCRHelperException, jcr_helpers.get_jcr_content,
root_url="", root_path=TEST_PATH, infinity=True)
@patch('the_ark.jcr_helpers.get_jcr_content')
def test_get_page_hierarchy_non_paginated(self, get_jcr_content):
get_jcr_content.return_value = self.jcr_content_infinity_dict
resp = jcr_helpers.get_page_hierarchy(TEST_URL, TEST_PATH)
get_jcr_content.assert_called_once_with(root_url=TEST_URL, root_path=TEST_PATH, infinity=True)
self.assertEqual(resp, self.page_hierarchy)
@patch('the_ark.jcr_helpers.get_jcr_content')
def test_get_page_hierarchy_paginated(self, get_jcr_content):
get_jcr_content.side_effect = [self.jcr_content_infinity_list, self.jcr_content_infinity_dict]
resp = jcr_helpers.get_page_hierarchy(TEST_URL, TEST_PATH)
get_jcr_content.assert_has_calls([
call(root_url=TEST_URL, root_path=TEST_PATH, infinity=True),
call(root_url=TEST_URL, root_path=TEST_PATH, depth=self.jcr_content_infinity_list[0].split('.')[1])
])
self.assertEqual(resp, self.page_hierarchy)
@patch('the_ark.jcr_helpers.get_jcr_content')
def test_get_page_hierarchy_missing_scheme(self, get_jcr_content):
get_jcr_content.return_value = self.jcr_content_infinity_dict
test_url = TEST_URL[7:]
test_path = "{}.html".format(TEST_PATH)
resp = jcr_helpers.get_page_hierarchy(root_url=test_url, root_path=test_path)
get_jcr_content.assert_called_once_with(root_url=TEST_URL, root_path=TEST_PATH, infinity=True)
self.assertEqual(resp, self.page_hierarchy)
@patch('the_ark.jcr_helpers.get_jcr_content')
def test_get_page_hierarchy_with_jcr_content(self, get_jcr_content):
get_jcr_content.return_value = self.jcr_content_infinity_dict
resp = jcr_helpers.get_page_hierarchy(TEST_URL, TEST_PATH, include_jcr_content=True)
get_jcr_content.assert_called_once_with(root_url=TEST_URL, root_path=TEST_PATH, infinity=True)
self.assertEqual(resp, self.page_hierarchy_with_jcr_content)
@patch('the_ark.jcr_helpers.get_jcr_content')
def test_get_page_hierarchy_unexpected_jcr_content(self, get_jcr_content):
get_jcr_content.return_value = {}
self.assertRaises(
jcr_helpers.JCRHelperException, jcr_helpers.get_page_hierarchy,
root_url=TEST_URL, root_path=TEST_PATH)
def test_jcrhelperexception_to_string(self):
jcr_exc = jcr_helpers.JCRHelperException(msg="error message")
self.assertIn("error message", str(jcr_exc))
def test_jcrhelperexception_with_stacktrace(self):
jcr_exc = jcr_helpers.JCRHelperException(msg="error message", stacktrace="test")
self.assertIn("test", str(jcr_exc))
def test_exception_with_details(self):
details = {"test": "testing"}
jcr_exc = jcr_helpers.JCRHelperException(msg="error message", details=details)
self.assertIn("test: testing", str(jcr_exc))
self.assertIn("Exception Details", str(jcr_exc))
| meltmedia/the-ark | tests/test_jcr_helpers.py | Python | apache-2.0 | 15,587 |
from troposphere import (
Parameter,
Ref,
Equals,
Output,
Tags,
GetAtt,
Base64,
Join,
cloudwatch as cw,
ec2,
elasticloadbalancing as elb,
autoscaling as asg
)
from utils.cfn import get_recent_ami
from utils.constants import (
ALLOW_ALL_CIDR,
EC2_INSTANCE_TYPES,
HTTP,
HTTPS,
POSTGRESQL,
SSH,
VPC_CIDR
)
from majorkirby import StackNode, MKUnresolvableInputError
class Application(StackNode):
INPUTS = {
'Tags': ['global:Tags'],
'Region': ['global:Region'],
'StackType': ['global:StackType'],
'StackColor': ['global:StackColor'],
'KeyName': ['global:KeyName'],
'AvailabilityZones': ['global:AvailabilityZones',
'VPC:AvailabilityZones'],
'RDSPassword': ['global:RDSPassword', 'DataPlane:RDSPassword'],
'AppServerInstanceType': ['global:AppServerInstanceType'],
'AppServerAMI': ['global:AppServerAMI'],
'AppServerInstanceProfile': ['global:AppServerInstanceProfile'],
'AppServerAutoScalingDesired': ['global:AppServerAutoScalingDesired'],
'AppServerAutoScalingMin': ['global:AppServerAutoScalingMin'],
'AppServerAutoScalingMax': ['global:AppServerAutoScalingMax'],
'SSLCertificateARN': ['global:SSLCertificateARN'],
'PublicSubnets': ['global:PublicSubnets', 'VPC:PublicSubnets'],
'PrivateSubnets': ['global:PrivateSubnets', 'VPC:PrivateSubnets'],
'VpcId': ['global:VpcId', 'VPC:VpcId'],
'GlobalNotificationsARN': ['global:GlobalNotificationsARN'],
}
DEFAULTS = {
'Tags': {},
'Region': 'us-east-1',
'StackType': 'Staging',
'StackColor': 'Green',
'KeyName': 'rf-stg',
'AppServerInstanceType': 't2.micro',
'AppServerInstanceProfile': 'AppServerInstanceProfile',
'AppServerAutoScalingDesired': '1',
'AppServerAutoScalingMin': '1',
'AppServerAutoScalingMax': '1',
}
ATTRIBUTES = {
'StackType': 'StackType',
'StackColor': 'StackColor',
}
def set_up_stack(self):
super(Application, self).set_up_stack()
tags = self.get_input('Tags').copy()
tags.update({'StackType': 'Application'})
self.default_tags = tags
self.region = self.get_input('Region')
self.add_description('Application server stack for Raster Foundry')
# Parameters
self.color = self.add_parameter(Parameter(
'StackColor', Type='String',
Description='Stack color', AllowedValues=['Blue', 'Green']
), 'StackColor')
self.keyname = self.add_parameter(Parameter(
'KeyName', Type='String',
Description='Name of an existing EC2 key pair'
), 'KeyName')
self.availability_zones = self.add_parameter(Parameter(
'AvailabilityZones', Type='CommaDelimitedList',
Description='Comma delimited list of availability zones'
), 'AvailabilityZones')
self.rds_password = self.add_parameter(Parameter(
'RDSPassword', Type='String', NoEcho=True,
Description='Database password',
), 'RDSPassword')
self.app_server_instance_type = self.add_parameter(Parameter(
'AppServerInstanceType', Type='String', Default='t2.micro',
Description='Application server EC2 instance type',
AllowedValues=EC2_INSTANCE_TYPES,
ConstraintDescription='must be a valid EC2 instance type.'
), 'AppServerInstanceType')
self.app_server_ami = self.add_parameter(Parameter(
'AppServerAMI', Type='String',
Default=self.get_recent_app_server_ami(),
Description='Application server AMI'
), 'AppServerAMI')
self.app_server_instance_profile = self.add_parameter(Parameter(
'AppServerInstanceProfile', Type='String',
Default='AppServerInstanceProfile',
Description='Application server instance profile'
), 'AppServerInstanceProfile')
self.app_server_auto_scaling_desired = self.add_parameter(Parameter(
'AppServerAutoScalingDesired', Type='String', Default='1',
Description='Application server AutoScalingGroup desired'
), 'AppServerAutoScalingDesired')
self.app_server_auto_scaling_min = self.add_parameter(Parameter(
'AppServerAutoScalingMin', Type='String', Default='1',
Description='Application server AutoScalingGroup minimum'
), 'AppServerAutoScalingMin')
self.app_server_auto_scaling_max = self.add_parameter(Parameter(
'AppServerAutoScalingMax', Type='String', Default='1',
Description='Application server AutoScalingGroup maximum'
), 'AppServerAutoScalingMax')
self.ssl_certificate_arn = self.add_parameter(Parameter(
'SSLCertificateARN', Type='String',
Description='ARN for a SSL certificate stored in IAM'
), 'SSLCertificateARN')
self.public_subnets = self.add_parameter(Parameter(
'PublicSubnets', Type='CommaDelimitedList',
Description='A list of public subnets'
), 'PublicSubnets')
self.private_subnets = self.add_parameter(Parameter(
'PrivateSubnets', Type='CommaDelimitedList',
Description='A list of private subnets'
), 'PrivateSubnets')
self.vpc_id = self.add_parameter(Parameter(
'VpcId', Type='String',
Description='VPC ID'
), 'VpcId')
self.notification_topic_arn = self.add_parameter(Parameter(
'GlobalNotificationsARN', Type='String',
Description='ARN for an SNS topic to broadcast notifications'
), 'GlobalNotificationsARN')
app_server_lb_security_group, \
app_server_security_group = self.create_security_groups()
app_server_lb = self.create_load_balancer(app_server_lb_security_group)
self.create_auto_scaling_resources(app_server_security_group,
app_server_lb)
self.create_cloud_watch_resources(app_server_lb)
self.add_output(Output('AppServerLoadBalancerEndpoint',
Value=GetAtt(app_server_lb, 'DNSName')))
self.add_output(Output('AppServerLoadBalancerHostedZoneNameID',
Value=GetAtt(app_server_lb,
'CanonicalHostedZoneNameID')))
def get_recent_app_server_ami(self):
try:
app_server_ami_id = self.get_input('AppServerAMI')
except MKUnresolvableInputError:
filters = {'name': 'rf-app-*',
'architecture': 'x86_64',
'block-device-mapping.volume-type': 'gp2',
'root-device-type': 'ebs',
'virtualization-type': 'hvm'}
app_server_ami_id = get_recent_ami(self.aws_profile, filters,
region=self.region)
return app_server_ami_id
def create_security_groups(self):
app_server_lb_security_group_name = 'sgAppServerLoadBalancer'
app_server_lb_security_group = self.add_resource(ec2.SecurityGroup(
app_server_lb_security_group_name,
GroupDescription='Enables access to application servers via a '
'load balancer',
VpcId=Ref(self.vpc_id),
SecurityGroupIngress=[
ec2.SecurityGroupRule(
IpProtocol='tcp', CidrIp=ALLOW_ALL_CIDR, FromPort=p,
ToPort=p
)
for p in [HTTP, HTTPS]
],
SecurityGroupEgress=[
ec2.SecurityGroupRule(
IpProtocol='tcp', CidrIp=VPC_CIDR, FromPort=p, ToPort=p
)
for p in [HTTP]
],
Tags=self.get_tags(Name=app_server_lb_security_group_name)
))
app_server_security_group_name = 'sgAppServer'
app_server_security_group = self.add_resource(ec2.SecurityGroup(
app_server_security_group_name,
DependsOn='sgAppServerLoadBalancer',
GroupDescription='Enables access to application servers',
VpcId=Ref(self.vpc_id),
SecurityGroupIngress=[
ec2.SecurityGroupRule(
IpProtocol='tcp', CidrIp=VPC_CIDR, FromPort=p, ToPort=p
)
for p in [SSH, HTTP]
] + [
ec2.SecurityGroupRule(
IpProtocol='tcp', SourceSecurityGroupId=Ref(sg),
FromPort=HTTP, ToPort=HTTP
)
for sg in [app_server_lb_security_group]
],
SecurityGroupEgress=[
ec2.SecurityGroupRule(
IpProtocol='tcp', CidrIp=VPC_CIDR, FromPort=p, ToPort=p
)
for p in [POSTGRESQL]
] + [
ec2.SecurityGroupRule(
IpProtocol='tcp', CidrIp=ALLOW_ALL_CIDR, FromPort=p,
ToPort=p
)
for p in [HTTP, HTTPS]
],
Tags=self.get_tags(Name=app_server_security_group_name)
))
return app_server_lb_security_group, app_server_security_group
def create_load_balancer(self, app_server_lb_security_group):
app_server_lb_name = 'elbAppServer'
return self.add_resource(elb.LoadBalancer(
app_server_lb_name,
ConnectionDrainingPolicy=elb.ConnectionDrainingPolicy(
Enabled=True,
Timeout=300,
),
CrossZone=True,
SecurityGroups=[Ref(app_server_lb_security_group)],
Listeners=[
elb.Listener(
LoadBalancerPort='80',
InstancePort='80',
Protocol='HTTP',
),
elb.Listener(
LoadBalancerPort='443',
InstancePort='80',
Protocol='HTTPS',
SSLCertificateId=Ref(self.ssl_certificate_arn)
)
],
HealthCheck=elb.HealthCheck(
Target='HTTP:80/health-check/',
HealthyThreshold='3',
UnhealthyThreshold='2',
Interval='30',
Timeout='5',
),
Subnets=Ref(self.public_subnets),
Tags=self.get_tags(Name=app_server_lb_name)
))
def create_auto_scaling_resources(self, app_server_security_group,
app_server_lb):
self.add_condition('BlueCondition', Equals('Blue', Ref(self.color)))
self.add_condition('GreenCondition', Equals('Green', Ref(self.color)))
blue_app_server_launch_config = self.add_resource(
asg.LaunchConfiguration(
'lcAppServerBlue',
Condition='BlueCondition',
ImageId=Ref(self.app_server_ami),
IamInstanceProfile=Ref(self.app_server_instance_profile),
InstanceType=Ref(self.app_server_instance_type),
KeyName=Ref(self.keyname),
SecurityGroups=[Ref(app_server_security_group)],
UserData=Base64(
Join('', self.get_cloud_config()))
))
self.add_resource(
asg.AutoScalingGroup(
'asgAppServerBlue',
AvailabilityZones=Ref(self.availability_zones),
Condition='BlueCondition',
Cooldown=300,
DesiredCapacity=Ref(self.app_server_auto_scaling_desired),
HealthCheckGracePeriod=600,
HealthCheckType='ELB',
LaunchConfigurationName=Ref(blue_app_server_launch_config),
LoadBalancerNames=[Ref(app_server_lb)],
MaxSize=Ref(self.app_server_auto_scaling_max),
MinSize=Ref(self.app_server_auto_scaling_min),
NotificationConfigurations=[
asg.NotificationConfigurations(
TopicARN=Ref(self.notification_topic_arn),
NotificationTypes=[
asg.EC2_INSTANCE_LAUNCH,
asg.EC2_INSTANCE_LAUNCH_ERROR,
asg.EC2_INSTANCE_TERMINATE,
asg.EC2_INSTANCE_TERMINATE_ERROR
]
)
],
VPCZoneIdentifier=Ref(self.private_subnets),
Tags=[asg.Tag('Name', 'AppServer', True)])
)
green_app_server_launch_config = self.add_resource(
asg.LaunchConfiguration(
'lcAppServerGreen',
Condition='GreenCondition',
ImageId=Ref(self.app_server_ami),
IamInstanceProfile=Ref(self.app_server_instance_profile),
InstanceType=Ref(self.app_server_instance_type),
KeyName=Ref(self.keyname),
SecurityGroups=[Ref(app_server_security_group)],
UserData=Base64(
Join('', self.get_cloud_config()))
))
self.add_resource(
asg.AutoScalingGroup(
'asgAppServerGreen',
AvailabilityZones=Ref(self.availability_zones),
Condition='GreenCondition',
Cooldown=300,
DesiredCapacity=Ref(self.app_server_auto_scaling_desired),
HealthCheckGracePeriod=600,
HealthCheckType='ELB',
LaunchConfigurationName=Ref(green_app_server_launch_config),
LoadBalancerNames=[Ref(app_server_lb)],
MaxSize=Ref(self.app_server_auto_scaling_max),
MinSize=Ref(self.app_server_auto_scaling_min),
NotificationConfigurations=[
asg.NotificationConfigurations(
TopicARN=Ref(self.notification_topic_arn),
NotificationTypes=[
asg.EC2_INSTANCE_LAUNCH,
asg.EC2_INSTANCE_LAUNCH_ERROR,
asg.EC2_INSTANCE_TERMINATE,
asg.EC2_INSTANCE_TERMINATE_ERROR
]
)
],
VPCZoneIdentifier=Ref(self.private_subnets),
Tags=[asg.Tag('Name', 'AppServer', True)])
)
def get_cloud_config(self):
return ['#cloud-config\n',
'\n',
'write_files:\n',
' - path: /etc/default/rf-app\n',
' permissions: 0644\n',
' content: PACKER_RUNNING=\n',
' - path: /etc/rf.d/env/RF_STACK_COLOR\n',
' permissions: 0750\n',
' owner: root:rf\n',
' content: ', Ref(self.color), '\n',
' - path: /etc/rf.d/env/RF_DB_PASSWORD\n',
' permissions: 0750\n',
' owner: root:rf\n',
' content: ', Ref(self.rds_password)]
def create_cloud_watch_resources(self, app_server_lb):
self.add_resource(cw.Alarm(
'alarmAppServerBackend4XX',
AlarmDescription='Application server backend 4XXs',
AlarmActions=[Ref(self.notification_topic_arn)],
Statistic='Sum',
Period=300,
Threshold='20',
EvaluationPeriods=1,
ComparisonOperator='GreaterThanThreshold',
MetricName='HTTPCode_Backend_4XX',
Namespace='AWS/ELB',
Dimensions=[
cw.MetricDimension(
'metricLoadBalancerName',
Name='LoadBalancerName',
Value=Ref(app_server_lb)
)
],
))
self.add_resource(cw.Alarm(
'alarmAppServerBackend5XX',
AlarmDescription='Application server backend 5XXs',
AlarmActions=[Ref(self.notification_topic_arn)],
Statistic='Sum',
Period=60,
Threshold='0',
EvaluationPeriods=1,
ComparisonOperator='GreaterThanThreshold',
MetricName='HTTPCode_Backend_5XX',
Namespace='AWS/ELB',
Dimensions=[
cw.MetricDimension(
'metricLoadBalancerName',
Name='LoadBalancerName',
Value=Ref(app_server_lb)
)
],
))
def get_tags(self, **kwargs):
"""Helper method to return Troposphere tags + default tags
Args:
**kwargs: arbitrary keyword arguments to be used as tags
"""
kwargs.update(self.default_tags)
return Tags(**kwargs)
| kdeloach/raster-foundry | deployment/cfn/application.py | Python | apache-2.0 | 17,090 |
"""
page 132
"""
from sklearn.naive_bayes import GaussianNB
data_table = [
[1, 1, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0],
[1, 1, 0, 1],
[1, 0, 0, 1],
[0, 1, 1, 1],
[0, 0, 0, 0],
[1, 0, 1, 0],
[0, 1, 0, 1]
]
X = [x[0:2] for x in data_table]
print(X)
y1 = [x[2] for x in data_table]
print(y1)
clf1 = GaussianNB().fit(X, y1)
p1 = [[1, 0]]
print(clf1.predict(p1))
y2 = [x[3] for x in data_table]
print(y2)
clf2 = GaussianNB().fit(X, y2)
p2 = [[1, 0]]
print(clf1.predict(p2))
| hhj0325/pystock | com/hhj/baihuabigdata/demo4.py | Python | apache-2.0 | 523 |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides GposDiffFinder, which finds differences in ttxn feature output.
GposDiffFinder takes in two paths, to font binaries from which ttxn output is
made. It provides methods that compare the OpenType feature contents of these
files: `find_kerning_diffs`, `find_mark_class_diffs`, and
`find_positioning_diffs`.
Unlike ShapeDiffFinder, the methods don't have a `stats` argument and can't
accumulate a report between method calls (yet?). They simply report the
differences via a returned string.
"""
from collections import defaultdict
import re
import subprocess
import tempfile
class GposDiffFinder:
"""Provides methods to report diffs in GPOS content between ttxn outputs."""
def __init__(self, file_a, file_b, error_bound, output_lines=6):
ttxn_file_a = tempfile.NamedTemporaryFile()
ttxn_file_b = tempfile.NamedTemporaryFile()
subprocess.call(['ttxn', '-q', '-t', 'GPOS', '-o', ttxn_file_a.name,
'-f', file_a])
subprocess.call(['ttxn', '-q', '-t', 'GPOS', '-o', ttxn_file_b.name,
'-f', file_b])
self.text_a = ttxn_file_a.read()
self.text_b = ttxn_file_b.read()
self.err = error_bound
self.out_lines = output_lines
def find_kerning_diffs(self):
"""Report differences in kerning rules."""
classes_a, classes_b = {}, {}
rx = re.compile(r'(@[\w\d_.]+) = \[([\s\w\d_.]+)\];')
self._parse_kerning_classes(rx, self.text_a, classes_a)
self._parse_kerning_classes(rx, self.text_b, classes_b)
unmatched = defaultdict(list)
mismatched = defaultdict(list)
rx = re.compile('pos \[?([\w\d@_.]+)\]? \[?([\w\d@_.]+)\]? (-?\d+);')
self._parse_kerning(rx, '-', self.text_a, classes_a, unmatched)
self._parse_kerning(rx, '+', self.text_b, classes_b, unmatched)
self._organize_kerning_diffs(unmatched, mismatched)
unmatched = [(k, v) for k, v in unmatched.iteritems() if v]
res = ['%d differences in kerning pairs' % len(unmatched)]
# (('+', 'a', 'b'), [-20, 10])
# Sort order:
# 1. Reverse absolute value of kerning
# 2. Left-side glyph name
# 3. Right-side glyph name
unmatched.sort(key=lambda t:(-max(abs(v) for v in t[1]),
t[0][1],
t[0][2]))
for (sign, left, right), vals in unmatched[:self.out_lines]:
res.append('%s pos %s %s %s' % (sign, left, right, vals))
res.append('')
mismatched = [(k, v) for k, v in mismatched.iteritems() if any(v)]
res.append('%d differences in kerning values' % len(mismatched))
# (('V', 'A'), ([-4], [-17]))
# Sort order:
# 1. Reverse absolute difference between before and after kern values
# 2. Left-side glyph name
# 3. Right-side glyph name
mismatched.sort(key=lambda t:(-sum(abs(v1-v2) for v1, v2 in
zip(t[1][0], t[1][1])),
t[0][0],
t[0][1]))
for (left, right), (vals1, vals2) in mismatched[:self.out_lines]:
if sum(abs(v1 - v2) for v1, v2 in zip(vals1, vals2)) > self.err:
res.append('pos %s %s: %s vs %s' % (left, right, vals1, vals2))
res.append('')
return '\n'.join(res)
def find_mark_class_diffs(self):
"""Report differences in mark class definitions."""
unmatched = {}
mismatched = {}
rx = re.compile('mark \[([\w\d\s@_.]+)\] <anchor (-?\d+) (-?\d+)> '
'(@[\w\d_.]+);')
self._parse_anchor_info(rx, '-', self.text_a, unmatched, mismatched)
self._parse_anchor_info(rx, '+', self.text_b, unmatched, mismatched)
res = ['%d differences in mark class definitions' % len(unmatched)]
unmatched = unmatched.items()
# (('+', 'uni0325', '@uni0323_6'), (0, -30))
# Sort order:
# 1. Glyph class
# 2. Mark class
unmatched.sort(key=lambda t: (t[0][1], t[0][2]))
for (sign, member, mark_class), (x, y) in unmatched[:self.out_lines]:
res.append('%s mark [%s] <anchor %d %d> %s;' %
(sign, member, x, y, mark_class))
res.append('')
res.append('%d differences in mark class values' % len(mismatched))
mismatched = mismatched.items()
# (('uni0300', '@uni0300_23'), ((0, 527), (300, 527)))
# Sort order:
# 1. Reverse absolute difference between position before and after
# 2. Glyph class
# 3. Mark class
mismatched.sort(key=lambda t:(-(abs(t[1][0][0] - t[1][1][0])
+ abs(t[1][0][1] - t[1][1][1])),
t[0][0],
t[0][1]))
for (member, cls), ((x1, y1), (x2, y2)) in mismatched[:self.out_lines]:
if abs(x1 - x2) > self.err or abs(y1 - y2) > self.err:
res.append('%s %s <%d %d> vs <%d %d>' %
(member, cls, x1, y1, x2, y2))
res.append('')
return '\n'.join(res)
def find_positioning_diffs(self, mark_type='base'):
"""Report differences in positioning rules."""
unmatched = {}
mismatched = {}
rx = re.compile('pos %s \[([\w\d\s@_.]+)\]\s+<anchor (-?\d+) (-?\d+)> '
'mark (@?[\w\d_.]+);' % mark_type)
self._parse_anchor_info(rx, '-', self.text_a, unmatched, mismatched)
self._parse_anchor_info(rx, '+', self.text_b, unmatched, mismatched)
res = ['%d differences in mark-to-%s positioning rule coverage' %
(len(unmatched), mark_type)]
unmatched = unmatched.items()
# Sort order: same as 'mark class definitions'
unmatched.sort(key=lambda t: (t[0][1], t[0][2]))
for (sign, member, mark_class), (x, y) in unmatched[:self.out_lines]:
res.append('%s pos %s [%s] <anchor %d %d> mark %s;' %
(sign, mark_type, member, x, y, mark_class))
res.append('')
res.append('%d differences in mark-to-%s positioning rule values' %
(len(mismatched), mark_type))
mismatched = mismatched.items()
# Sort order: same as 'mark class values'
mismatched.sort(key=lambda t:(-(abs(t[1][0][0] - t[1][1][0])
+ abs(t[1][0][1] - t[1][1][1])),
t[0][0],
t[0][1]))
for (member, cls), ((x1, y1), (x2, y2)) in mismatched[:self.out_lines]:
if abs(x1 - x2) > self.err or abs(y1 - y2) > self.err:
res.append('%s %s <%d %d> vs <%d %d>' %
(member, cls, x1, y1, x2, y2))
res.append('')
return '\n'.join(res)
def _parse_kerning_classes(self, rx, text, classes):
"""Parse kerning class definitions."""
for definition in rx.findall(text):
name, members = definition
classes[name] = members.split()
def _parse_kerning(self, rx, sign, text, classes, unmatched):
"""Parse kerning rules."""
for rule in rx.findall(text):
left, right, val = rule
val = int(val)
if left in classes:
left = classes[left]
else:
left = [left]
if right in classes:
right = classes[right]
else:
right = [right]
for left_glyph in left:
for right_glyph in right:
key = sign, left_glyph, right_glyph
key_match = (self._reverse_sign(sign), left_glyph,
right_glyph)
if val in unmatched[key_match]:
unmatched[key_match].remove(val)
else:
unmatched[key].append(val)
def _organize_kerning_diffs(self, unmatched, mismatched):
"""Move mismatched kerning rules into a separate dictionary."""
keys = unmatched.keys()
for key in keys:
if key not in unmatched: # already matched and removed
continue
sign, left, right = key
key_match = self._reverse_sign(sign), left, right
if (key_match in unmatched and
unmatched[key] and unmatched[key_match]):
if sign == '+':
key, key_match = key_match, key
mismatched[left, right] = (
unmatched.pop(key), unmatched.pop(key_match))
def _parse_anchor_info(self, rx, sign, text, unmatched, mismatched):
"""Parse unmatched and mismatched mark classes."""
for members, x, y, mark_class in rx.findall(text):
# hack to get around unexpected class naming differences (ttxn bug?)
mark_class = '_'.join(mark_class.split('_', 2)[:2])
for member in members.split():
val = int(x), int(y)
key_match = self._reverse_sign(sign), member, mark_class
if key_match in unmatched:
if unmatched[key_match] != val:
mismatched[member, mark_class] = (
unmatched[key_match], val)
del unmatched[key_match]
else:
unmatched[sign, member, mark_class] = val
def _reverse_sign(self, sign):
"""Return the reverse of a sign contained in a string."""
if sign == '-':
return '+'
elif sign == '+':
return '-'
else:
raise ValueError('Bad sign "%s".' % sign)
| dougfelt/nototools | nototools/gpos_diff.py | Python | apache-2.0 | 10,512 |
"""
WSGI config for navel project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "navel.settings")
application = get_wsgi_application()
| flychensc/orange | navel/wsgi.py | Python | apache-2.0 | 388 |
# -*- coding: utf-8 -*-
from __future__ import with_statement
import psycopg2
from psycopg2 import pool
from config import LoggerLoader
from database.postgres_db import PostgresDataBase
__author__ = 'leandroloi'
__license__ = "GPL"
__version__ = "0.0.1"
__maintainer__ = "Leandro Loi"
__email__ = "leandroloi at gmail dot com"
logger = LoggerLoader(__name__).get_logger()
class DatabaseLoader(object):
"""Singleton"""
def __new__(cls, *args, **kwargs):
if not hasattr(cls, '_instance'):
cls._instance = super(DatabaseLoader, cls).__new__(cls, *args, **kwargs)
cls._instance.__initialized = False
return cls._instance
def __init__(self, settings=None):
if not self.__initialized:
connection = self.config(settings)
self.db = PostgresDataBase(connection)
self.__initialized = True
def get_database(self):
return self.db
def config(self, settings):
"""Called by the app on startup to setup bindings to the DB
:param settings: Database settings, like database,port, user, password
"""
try:
conn = psycopg2.pool.SimpleConnectionPool(1, 10, database=settings.get('database'), user=settings.get('user'),
password=settings.get('password'), host=settings.get('host'),
port=settings.get('port'))
return conn
except Exception, e:
logger.error('The system is having problem to connect. Exception {exception}'.format(exception=e))
raise e
def initialize_database(settings):
"""
:type settings: dict
:return:
:raise e:
"""
try:
return DatabaseLoader(settings).get_database()
except Exception, e:
raise e
| leandroloi/bovespa-eod | database/__init__.py | Python | apache-2.0 | 1,860 |
import asyncio
import logging
from contextlib import ContextDecorator
from functools import wraps
from unittest.mock import patch, _patch
from typing import List, Dict, Tuple, TypeVar, Type, NamedTuple, Optional, Set
from lightbus import (
RpcTransport,
EventTransport,
SchemaTransport,
ResultTransport,
RpcMessage,
ResultMessage,
EventMessage,
)
from lightbus.client.commands import SendEventCommand, CallRpcCommand
from lightbus.config import Config
from lightbus.path import BusPath
from lightbus.client import BusClient
from lightbus.transports.registry import TransportRegistry
from lightbus.utilities.internal_queue import InternalQueue
_registry: Dict[str, List] = {}
logger = logging.getLogger(__name__)
class MockResult:
"""Utility for mocking bus calls
This is the context variable provided when using the BusMocker utility.
Examples:
# bus_mock will be a MockResult
with bus_mocker as bus_mock:
# ... do stuff with the bus ...
bus_mock.assertEventFired()
"""
def __init__(
self, mocker_context: "BusQueueMockerContext", mock_responses: dict, mock_events: set
):
self.mocker_context = mocker_context
self.mock_responses = mock_responses
self.mock_events = mock_events
def assertEventFired(self, full_event_name, *, times=None):
event_names_fired = self.eventNamesFired
if times is None or times > 0:
assert ( # nosec
full_event_name in event_names_fired
), f"Event {full_event_name} was never fired. Fired events were: {', '.join(event_names_fired)}"
if times is not None:
total_times_fired = len([v for v in event_names_fired if v == full_event_name])
assert total_times_fired == times, ( # nosec
f"Event fired the incorrect number of times. "
f"Expected {times}, actual {total_times_fired}"
)
assert_events_fired = assertEventFired
def assertEventNotFired(self, full_event_name):
assert (
full_event_name not in self.eventNamesFired
), f"Event {full_event_name} was unexpectedly fired"
assert_event_not_fired = assertEventNotFired
def getEventMessages(self, full_event_name=None) -> List[EventMessage]:
commands = self.mocker_context.event.to_transport.commands.get_all(SendEventCommand)
if full_event_name is None:
return [c.message for c in commands]
else:
return [c.message for c in commands if c.message.canonical_name == full_event_name]
get_event_messages = getEventMessages
def mockEventFiring(self, full_event_name: str):
self.mock_events.add(full_event_name)
mock_event_firing = mockEventFiring
def assertRpcCalled(self, full_rpc_name, *, times=None):
rpc_names_called = self.rpcNamesCalled
if times is None or times > 0:
assert (
full_rpc_name in rpc_names_called
), f"RPC {full_rpc_name} was never called. Called RPCs were: {set(rpc_names_called)}"
if times is not None:
total_times_called = len([v for v in rpc_names_called if v == full_rpc_name])
assert total_times_called == times, ( # nosec
f"RPC {full_rpc_name} called the incorrect number of times. "
f"Expected {times}, actual {total_times_called}"
)
assert_rpc_called = assertRpcCalled
def assertRpcNotCalled(self, full_rpc_name):
assert (
full_rpc_name not in self.rpcNamesCalled
), f"Event {full_rpc_name} was unexpectedly fired"
assert_rpc_not_called = assertRpcNotCalled
def getRpcMessages(self, full_rpc_name=None) -> List[RpcMessage]:
commands = self.mocker_context.rpc_result.to_transport.commands.get_all(CallRpcCommand)
if full_rpc_name is None:
return [c.message for c in commands]
else:
return [c.message for c in commands if c.message.canonical_name == full_rpc_name]
get_rpc_messages = getRpcMessages
def mockRpcCall(self, full_rpc_name, result=None, **rpc_result_message_kwargs):
self.mock_responses[full_rpc_name] = dict(result=result, **rpc_result_message_kwargs)
mock_rpc_call = mockRpcCall
@property
def eventNamesFired(self) -> List[str]:
return [c.canonical_name for c in self.getEventMessages()]
event_names_fired = eventNamesFired
@property
def rpcNamesCalled(self) -> List[str]:
return [c.canonical_name for c in self.getRpcMessages()]
rpc_names_called = rpcNamesCalled
def __repr__(self):
return f"<MockResult: events: {len(self.getEventMessages())}, rpcs: {len(self.getRpcMessages())}>"
class BusMocker(ContextDecorator):
def __init__(self, bus: BusPath, require_mocking=True):
self.bus = bus
self.old_transport_registry = None
self.require_mocking = require_mocking
self.stack: List[BusQueueMockerContext] = []
def __call__(self, func):
# Overriding ContextDecorator.__call__ to pass the mock
# result to the function. This is exactly the same as the parent
# implementation except we pass mock_result into func().
@wraps(func)
def inner(*args, **kwds):
with self._recreate_cm() as mock_result:
return func(*args, mock_result, **kwds)
return inner
def __enter__(self):
"""Start of a context where all the bus' transports have been replaced with mocks"""
# Mutable structures which we will use this to get the mocked data into
# the transports
# RPC
mock_responses = {}
# Events
mock_events = set()
# Create our transport classes, into which we inject our mutable structures
TestRpcTransport = make_test_rpc_transport()
TestResultTransport = make_test_result_transport(mock_responses)
TestEventTransport = make_test_event_transport(mock_events)
TestSchemaTransport = make_test_schema_transport()
new_registry = TransportRegistry()
new_registry.set_schema_transport(
TestSchemaTransport, TestSchemaTransport.Config(), self.bus.client.config
)
self.old_transport_registry = self.bus.client.transport_registry
for api_name, entry in self.old_transport_registry._registry.items():
new_registry.set_rpc_transport(
api_name, TestRpcTransport, TestRpcTransport.Config(), self.bus.client.config
)
new_registry.set_result_transport(
api_name,
TestResultTransport,
TestResultTransport.Config(require_mocking=self.require_mocking),
self.bus.client.config,
)
new_registry.set_event_transport(
api_name,
TestEventTransport,
TestEventTransport.Config(require_mocking=self.require_mocking),
self.bus.client.config,
)
# The docs are only available on the bus client during testing
self.bus.client.event_dock.transport_registry = new_registry
self.bus.client.rpc_result_dock.transport_registry = new_registry
queue_mocker = BusQueueMockerContext(client=self.bus.client)
bus_with_mocked_queues = queue_mocker.__enter__()
self.stack.append(queue_mocker)
return MockResult(
bus_with_mocked_queues, mock_responses=mock_responses, mock_events=mock_events
)
def __exit__(self, exc_type, exc, exc_tb):
"""Restores the bus back to its original state"""
bus_with_mocked_queues = self.stack.pop()
bus_with_mocked_queues.__exit__(exc_type, exc, exc_tb)
# The docs are only available on the bus client during testing
bus_with_mocked_queues.client.event_dock.transport_registry = self.old_transport_registry
bus_with_mocked_queues.client.rpc_result_dock.transport_registry = (
self.old_transport_registry
)
bus_mocker = BusMocker
T = TypeVar("T")
def make_test_rpc_transport():
class TestRpcTransport(RpcTransport):
def __init__(self):
self.rpcs: List[Tuple[RpcMessage, dict]] = []
async def call_rpc(self, rpc_message, options: dict):
pass
async def consume_rpcs(self, apis):
raise NotImplementedError("Not yet supported by mocks")
return TestRpcTransport
def make_test_result_transport(mock_responses: Dict[str, dict]):
class TestResultTransport(ResultTransport):
_mock_responses: Dict[str, dict] = mock_responses
def __init__(self, require_mocking=True):
super().__init__()
self.mock_responses = mock_responses
self.require_mocking = require_mocking
@classmethod
def from_config(cls: Type[T], config: "Config", require_mocking: bool = True) -> T:
return cls(require_mocking=require_mocking)
async def get_return_path(self, rpc_message):
return "test://"
async def send_result(self, rpc_message, result_message, return_path):
raise NotImplementedError("Not yet supported by mocks")
async def receive_result(self, rpc_message, return_path, options):
if self.require_mocking:
assert rpc_message.canonical_name in self.mock_responses, (
f"RPC {rpc_message.canonical_name} unexpectedly called. "
f"Perhaps you need to use mockRpcCall() to ensure the mocker expects this call."
)
if rpc_message.canonical_name in self.mock_responses:
kwargs = self.mock_responses[rpc_message.canonical_name].copy()
kwargs.setdefault("api_name", rpc_message.api_name)
kwargs.setdefault("procedure_name", rpc_message.procedure_name)
kwargs.setdefault("rpc_message_id", rpc_message.id)
return ResultMessage(**kwargs)
else:
return ResultMessage(
result=None,
rpc_message_id="1",
api_name=rpc_message.api_name,
procedure_name=rpc_message.procedure_name,
)
return TestResultTransport
def make_test_event_transport(mock_events: set):
class TestEventTransport(EventTransport):
_mock_events: set = mock_events
def __init__(self, require_mocking=True):
super().__init__()
self.events = []
self.mock_events = mock_events
self.require_mocking = require_mocking
@classmethod
def from_config(cls: Type[T], config: "Config", require_mocking: bool = True) -> T:
return cls(require_mocking=require_mocking)
async def send_event(self, event_message, options):
if self.require_mocking:
assert event_message.canonical_name in self.mock_events, (
f"Event {event_message.canonical_name} unexpectedly fired. "
f"Perhaps you need to use mockEventFiring() to ensure the mocker expects this call."
)
async def consume(self, listen_for: List[Tuple[str, str]], listener_name: str, **kwargs):
"""Consume RPC events for the given API"""
raise NotImplementedError("Not yet supported by mocks")
return TestEventTransport
def make_test_schema_transport():
class TestSchemaTransport(SchemaTransport):
def __init__(self):
self.schemas = {}
async def store(self, api_name: str, schema: Dict, ttl_seconds: int):
self.schemas[api_name] = schema
async def ping(self, api_name: str, schema: Dict, ttl_seconds: int):
pass
async def load(self) -> Dict[str, Dict]:
return self.schemas
return TestSchemaTransport
# Command mocking
# These tools are not part of the public API, but are used for internal testing,
# and to power the above BusMocker (which is part of the public API)
class CommandList(list):
def types(self):
return [type(i for i in self)]
def get_all(self, type_: Type[T]) -> List[T]:
commands = []
for i in self:
if type(i) == type_:
commands.append(i)
return commands
def get(self, type_: Type[T], multiple_ok=False) -> T:
commands = self.get_all(type_)
if not commands:
raise ValueError(f"No command found of type {type_}")
elif len(commands) > 1 and not multiple_ok:
raise ValueError(f"Multiple ({len(commands)}) commands found of type {type_}")
else:
return commands[0]
def has(self, type_: Type[T]):
return any(type(i) == type_ for i in self)
def count(self, type_: Type[T]):
len(self.get_all(type_))
Command = TypeVar("Command", bound=NamedTuple)
class QueueMockContext:
def __init__(self, queue: InternalQueue):
self.queue = queue
self.put_items: List[Tuple[Command, asyncio.Event]] = []
self.got_items: List[Tuple[Command, asyncio.Event]] = []
self._patched_put_nowait: Optional[_patch] = None
self._patched_get_nowait: Optional[_patch] = None
self._blackhole: Set[Type[Command]] = set()
def __enter__(self) -> "QueueMockContext":
self.put_items = []
self.got_items = []
self._orig_put_nowait = self.queue.put_nowait
self._orig_get_nowait = self.queue.get_nowait
self._patched_put_nowait = patch.object(self.queue, "put_nowait", wraps=self._put_nowait)
self._patched_get_nowait = patch.object(self.queue, "get_nowait", wraps=self._get_nowait)
self._patched_put_nowait.start()
self._patched_get_nowait.start()
return self
def blackhole(self, *command_types):
self._blackhole = self._blackhole.union(set(command_types))
def __exit__(self, exc_type, exc_val, exc_tb):
self._patched_put_nowait.stop()
self._patched_get_nowait.stop()
def _put_nowait(self, item, *args, **kwargs):
self.put_items.append(item)
try:
command, event = item
except ValueError:
# The item being added isn't a command
pass
else:
# Don't call blackholed commands
if type(command) in self._blackhole:
return
return self._orig_put_nowait(item, *args, **kwargs)
def _get_nowait(self, *args, **kwargs):
item = self._orig_get_nowait(*args, **kwargs)
self.got_items.append(item)
return item
@property
def put_commands(self) -> CommandList:
return CommandList([i[0] for i in self.put_items])
@property
def got_commands(self) -> CommandList:
return CommandList([i[0] for i in self.got_items])
@property
def items(self):
# Just a little shortcut for the common use case
return self.put_items
@property
def commands(self):
# Just a little shortcut for the common use case
return self.put_commands
class BusQueueMockerContext:
class Queues(NamedTuple):
to_transport: QueueMockContext
from_transport: QueueMockContext
def __init__(self, client: BusClient):
self.client = client
self.event: Optional[BusQueueMockerContext.Queues] = None
self.rpc_result: Optional[BusQueueMockerContext.Queues] = None
self.errors: Optional[QueueMockContext] = None
def __enter__(self):
self.event = BusQueueMockerContext.Queues(
to_transport=QueueMockContext(self.client.event_client.producer.queue).__enter__(),
from_transport=QueueMockContext(self.client.event_client.consumer.queue).__enter__(),
)
self.rpc_result = BusQueueMockerContext.Queues(
to_transport=QueueMockContext(self.client.rpc_result_client.producer.queue).__enter__(),
from_transport=QueueMockContext(
self.client.rpc_result_client.consumer.queue
).__enter__(),
)
self.errors = QueueMockContext(self.client.error_queue).__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.event.to_transport.__exit__(exc_type, exc_val, exc_tb)
self.event.from_transport.__exit__(exc_type, exc_val, exc_tb)
self.rpc_result.to_transport.__exit__(exc_type, exc_val, exc_tb)
self.rpc_result.from_transport.__exit__(exc_type, exc_val, exc_tb)
| adamcharnock/lightbus | lightbus/utilities/testing.py | Python | apache-2.0 | 16,695 |
from __future__ import print_function, absolute_import, division
from future.builtins import *
from future import standard_library
standard_library.install_aliases()
# Copyright 2017 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import zlib
import future.utils
from . import Alias
if future.utils.PY2:
import dumbdbm
else:
import dbm.dumb as dumbdbm
class CompressedJsonDbm(object):
""" Quick-and-dirty interface to a DBM file
"""
def __init__(self, filename, flag='r', dbm=dumbdbm):
self.dbm = dbm
if hasattr(dbm, 'open'):
self.db = self.dbm.open(filename, flag)
else:
self.db = self.dbm(filename, flag)
def __getattr__(self, item):
return getattr(self.db, item)
def __dir__(self):
return list(self.__dict__.keys()) + dir(self.db)
def __len__(self):
return len(self.db)
def __getitem__(self, key):
gzvalue = self.db[key]
return json.loads(zlib.decompress(gzvalue).decode())
def __setitem__(self, key, value):
gzvalue = zlib.compress(json.dumps(value))
self.db[key] = gzvalue
__contains__ = Alias('db.__contains__')
class ReadOnlyDumb(dumbdbm._Database):
""" A read-only subclass of dumbdbm
All possible operations that could result in a disk write have been turned into no-ops or raise
exceptions
"""
def _commit(self):
# Does nothing!
pass
def __setitem__(self, key, value):
raise NotImplementedError('This is a read-only database')
def __delitem__(self, key):
raise NotImplementedError('This is a read-only database')
def _addkey(self, *args):
assert False, 'Should never be here - this is a read-only database'
| Autodesk/molecular-design-toolkit | moldesign/utils/databases.py | Python | apache-2.0 | 2,281 |
# Copyright 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Handlers for editing proofs
import cgi
import logging
import StringIO
import urllib
import webapp2
from webapp2_extras import json
import verify
import common
import read
import textutils
import users
import babygit.appengine
import babygit.repo
import babygit.stage
s = babygit.appengine.AEStore()
# Retrieves a prefix of a proof file, up to the named theorem. This logic is
# likely to move into the client, but for now it's a fairly straightforward
# adaptation of the older logic.
class UptoHandler(webapp2.RequestHandler):
def __init__(self, request, response):
self.initialize(request, response)
self.store = s
self.repo = babygit.repo.Repo(s)
def get(self, arg):
o = self.response.out
asplit = arg.rsplit('/', 1)
if len(asplit) < 2:
o.write('error: expected proof_file.gh/thmname')
return
url = '/' + asplit[0]
thmname = asplit[1]
urlctx = read.UrlCtx(url)
text = urlctx.resolve(url)
if text is None:
o.write('error: didn\'t find url: ' + url)
return
lines = text.readlines()
digest = textutils.split_gh_file(lines)
digestd = dict(digest)
if digestd.has_key(thmname):
start, end = digestd[thmname]
lines = lines[:start]
self.response.headers['Content-Type'] = 'text/plain; charset=UTF-8'
o.write(''.join(lines))
class EditHandler(users.AuthenticatedHandler):
def __init__(self, request, response):
self.initialize(request, response)
self.store = s
self.repo = babygit.repo.Repo(s)
def get(self, arg):
o = self.response.out
useAce = self.request.get("ace")
asplit = arg.rsplit('/', 1)
if len(asplit) < 2:
o.write('expected proof_file.gh/thmname')
return
url = '/' + asplit[0]
thmname = asplit[1]
urlctx = read.UrlCtx(url)
text = urlctx.resolve(url)
if text is None:
o.write('Well, didn\'t find url: ' + url + '. ')
ghiFile = '/' + asplit[0] + 'i'
o.write('Unfortunately, the axioms (statements without proofs) are not set up ')
o.write('properly and end up here. ')
o.write('Maybe try the ghi file: <a href="' + ghiFile + '">' + ghiFile + '</a>')
return
lines = text.readlines()
digest = textutils.split_gh_file(lines)
digestd = dict(digest)
logging.debug(`(thmname, json.encode(thmname), urllib.quote(arg))`)
if self.userobj:
auth = 'Logged in as ' + cgi.escape(self.userobj.identity)
if self.has_write_perm:
auth += ', ok to save.'
else:
auth += ', but no save permissions.'
elif users.bypass_local_auth(self.request):
auth = 'Not logged in, but local dev server.'
else:
auth = 'Not logged in, save won\'t work.'
o.write("""<head>
<title>Edit</title>
<link rel=stylesheet href="/static/editor.css" type="text/css">
<link rel=stylesheet href="/static/prover.css" type="text/css">
<link rel=stylesheet href="/static/common.css" type="text/css">
<style type="text/css">
""")
if useAce: o.write(""" .ace_marker-layer .gh_error {
position: absolute;
z-index: 4;
background-color: #fcc;
}
</style>
<style type="text/css" media="screen">
#canvas {
position: relative;
width: 600px;
height: 400px;
}
""")
o.write("""</style>
</head>
<body class="stack-mode">
""")
o.write('<div class="header">')
o.write(' <a href="/"><img src="/static/logo.png" style="position:absolute;"/></a>')
o.write(' <span id="header-boxes">');
o.write(""" <span class="header-box dictionary-entry" onclick="GH.Panel.setPanelNum(3)">dictionary</span> """);
o.write(' </span>');
o.write('</div>')
o.write('<span id="authetication">%s</span>' % auth);
o.write("""
<script type="text/javascript"
src="//cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/MathJax.js?config=TeX-AMS-MML_HTMLorMML">
</script>
<script src="/js/verify.js" type="text/javascript"></script>
<script src="/js/sandbox.js" type="text/javascript"></script>
<script src="/js/inputlayer.js" type="text/javascript"></script>
<script src="/js/edit.js" type="text/javascript"></script>
<script src="/js/direct.js" type="text/javascript"></script>
<script src="/js/proofstep.js" type="text/javascript"></script>
<script src="/js/proofsegment.js" type="text/javascript"></script>
<script src="/js/prover/prover.js" type="text/javascript"></script>
<script src="/js/sexpression.js" type="text/javascript"></script>
<script src="/js/prover/archiveSearcher.js" type="text/javascript"></script>
<script src="/js/prover/buttonController.js" type="text/javascript"></script>
<script src="/js/prover/equalizer.js" type="text/javascript"></script>
<script src="/js/prover/evaluator/evaluator.js" type="text/javascript"></script>
<script src="/js/prover/evaluator/add.js" type="text/javascript"></script>
<script src="/js/prover/evaluator/and.js" type="text/javascript"></script>
<script src="/js/prover/evaluator/apply.js" type="text/javascript"></script>
<script src="/js/prover/evaluator/constant.js" type="text/javascript"></script>
<script src="/js/prover/evaluator/div.js" type="text/javascript"></script>
<script src="/js/prover/evaluator/divides.js" type="text/javascript"></script>
<script src="/js/prover/evaluator/divide.js" type="text/javascript"></script>
<script src="/js/prover/evaluator/elementOf.js" type="text/javascript"></script>
<script src="/js/prover/evaluator/equality.js" type="text/javascript"></script>
<script src="/js/prover/evaluator/exponent.js" type="text/javascript"></script>
<script src="/js/prover/evaluator/factorial.js" type="text/javascript"></script>
<script src="/js/prover/evaluator/fibonacci.js" type="text/javascript"></script>
<script src="/js/prover/evaluator/greaterThan.js" type="text/javascript"></script>
<script src="/js/prover/evaluator/greaterThanEqual.js" type="text/javascript"></script>
<script src="/js/prover/evaluator/halfminus.js" type="text/javascript"></script>
<script src="/js/prover/evaluator/ifn.js" type="text/javascript"></script>
<script src="/js/prover/evaluator/intersection.js" type="text/javascript"></script>
<script src="/js/prover/evaluator/interval.js" type="text/javascript"></script>
<script src="/js/prover/evaluator/lessThan.js" type="text/javascript"></script>
<script src="/js/prover/evaluator/lessThanEqual.js" type="text/javascript"></script>
<script src="/js/prover/evaluator/minus.js" type="text/javascript"></script>
<script src="/js/prover/evaluator/modulo.js" type="text/javascript"></script>
<script src="/js/prover/evaluator/multiply.js" type="text/javascript"></script>
<script src="/js/prover/evaluator/negative.js" type="text/javascript"></script>
<script src="/js/prover/evaluator/product.js" type="text/javascript"></script>
<script src="/js/prover/evaluator/properSubset.js" type="text/javascript"></script>
<script src="/js/prover/evaluator/prime.js" type="text/javascript"></script>
<script src="/js/prover/evaluator/setEquality.js" type="text/javascript"></script>
<script src="/js/prover/evaluator/subset.js" type="text/javascript"></script>
<script src="/js/prover/evaluator/substitution.js" type="text/javascript"></script>
<script src="/js/prover/evaluator/successor.js" type="text/javascript"></script>
<script src="/js/prover/evaluator/sum.js" type="text/javascript"></script>
<script src="/js/prover/evaluator/triangle.js" type="text/javascript"></script>
<script src="/js/prover/evaluator/union.js" type="text/javascript"></script>
<script src="/js/prover/theorem/attachDecorator.js" type="text/javascript"></script>
<script src="/js/prover/theorem/baseTheorem.js" type="text/javascript"></script>
<script src="/js/prover/theorem/commuteDecorator.js" type="text/javascript"></script>
<script src="/js/prover/theorem/inferDecorator.js" type="text/javascript"></script>
<script src="/js/prover/theorem/deduceDecorator.js" type="text/javascript"></script>
<script src="/js/prover/theorem/theoremFactory.js" type="text/javascript"></script>
<script src="/js/prover/theorem/theoremWriter.js" type="text/javascript"></script>
<script src="/js/prover/remover.js" type="text/javascript"></script>
<script src="/js/prover/replacer.js" type="text/javascript"></script>
<script src="/js/prover/multiReplacer.js" type="text/javascript"></script>
<script src="/js/prover/conditionalReplacer.js" type="text/javascript"></script>
<script src="/js/prover/existGeneralizer.js" type="text/javascript"></script>
<script src="/js/prover/instantiator.js" type="text/javascript"></script>
<script src="/js/prover/repositioner.js" type="text/javascript"></script>
<script src="/js/prover/symbolTree.js" type="text/javascript"></script>
<script src="/js/prover/numUtil.js" type="text/javascript"></script>
<script src="/js/prover/setUtil.js" type="text/javascript"></script>
<script src="/js/prover/tupleUtil.js" type="text/javascript"></script>
<script src="/js/prover/operatorUtil.js" type="text/javascript"></script>
<script src="/js/prover/variableGenerator.js" type="text/javascript"></script>
<script src="/js/panel.js" type="text/javascript"></script>
<script src="/js/typeset.js" type="text/javascript"></script>
<div id="editor-body">
<div id="dictionary">
<div style="overflow:hidden" "border-right: 1px solid #bbb">
<div class="section-title">Dictionary</div>
<button id="inferences">Inference</button>
<button id="deductions">Deduction</button>
<button id="unified">Unified</button>
<span class="section-close" onclick="GH.Panel.setPanelNum(2)">X</span>
<span style="float: right">
<label for="filter">filter: </label><input type="text" id="filter" class="minor-input"/>
</span>
</div>
<div id="panel-container" style="display:block;float:left">
<table id="panel" border="1" style="border:1px solid;">
</table>
</div>
</div>
<div id="editor-section">
<span class="section-title">Editor</span>
<label for="number">before: </label><input type="text" id="number" value="%s" class="minor-input"/>
""" % thmname)
o.write("""<span class="section-close" onclick="GH.Panel.setPanelNum(1)">X</span>""")
o.write("""
<span id="saving"></span>
<input type="button" id="save" onclick="log(mainpanel); GH.save(window.mainpanel.getValue(), url)" name="save" value="save"/>
<input type="button" id="exp-adder" onclick="window.direct.prover.openExpAdder()" name="numberAdder" value="Add"/>
<br/>
""")
if useAce: o.write("""<div id="canvas"></div>
<script src="//d1n0x3qji82z53.cloudfront.net/src-min-noconflict/ace.js" type="text/javascript" charset="utf-8"></script>
<script>
var editor = ace.edit("canvas");
</script>
""")
else:
o.write('<textarea id="canvas" cols="60" rows="20" width="640" height="480" tabindex="0"></textarea><br/>\n')
o.write("""
<a href="#" id="autounify" style="display:none">autounify</a><br/>
</div>
<div id="right-panel">
<div class="thmtitle">
<span id="thmname"></span>
<span class="edit-entry" onclick="GH.Panel.setPanelNum(2)">edit</span>
</div>
<div id="stack">...</div>
<div id="suggest"></div>
</div>
<div id="output" style="clear:left;"></div>
<script type="text/javascript">
name = %s;
GH.Direct.replace_thmname(name);
url = %s;
// TODO: better handling of raw urls (ideally draw from a specific commit)
uc = new GH.XhrUrlCtx('/', '/git' + url);
v = new GH.VerifyCtx(uc, run);
""" % (json.encode(thmname), json.encode(url)))
if url[-4:] != '.ghi':
o.write("""
v.set_suppress_errors(true);
inputArgs = run(uc, '/proofs_upto/%s', v);
v.set_suppress_errors(false);
""" % (urllib.quote(arg)))
else:
o.write("""
inputArgs = interfaceRun(uc, '%s', v);
""" % (url))
if useAce:
o.write('window.mainpanel = new GH.AceEdit(editor);\n')
else:
o.write("window.mainpanel = new GH.TextareaEdit(document.getElementById('canvas'));\n")
o.write("""window.direct = new GH.Direct(window.mainpanel, document.getElementById('stack'), document.getElementById('suggest'), inputArgs);
window.direct.vg = v;
var number = document.getElementById('number');
number.onchange = function() {
var uc = new GH.XhrUrlCtx('/', '/git' + url);
var v = new GH.VerifyCtx(uc, run);
v.set_suppress_errors(true);
run(uc, '/proofs_upto' + url + '/' + number.value, v);
v.set_suppress_errors(false);
window.direct.vg = v;
window.direct.update();
};
var panel = new GH.Panel(window.direct.vg);
""")
if digestd.has_key(thmname):
start, end = digestd[thmname]
thmbody = lines[start:end]
thmbody = [l.rstrip() for l in thmbody]
result = json.encode(thmbody)
o.write('window.mainpanel.setLines(%s);\n' % result)
o.write('</script>\n')
def skip_blank_lines(index, lines):
while index < len(lines) and lines[index].rstrip() == '':
index += 1
return index
class ErrorHandler:
def __init__(self):
self.first_error = None
self.fatal_error = False
def error_handler(self, label, msg):
if self.first_error is None:
self.first_error = (label, msg)
if label == '':
# Errors that happen inside theorem contexts can be recovered. This roughly
# corresponds to being able to continue (-c in the command line).
self.fatal_error = True
return True
class SaveHandler(users.AuthenticatedHandler):
def __init__(self, request, response):
self.initialize(request, response)
self.store = s
self.repo = babygit.repo.Repo(s)
def post(self):
if not self.has_write_perm:
return common.error_403(self)
name = self.request.get('name')
content = self.request.get('content')
number = self.request.get('number')
url = self.request.get('url')
logging.debug(`url`)
# TODO: validate the name a bit more (no / or space)
# Edit the content into the theorem text. This is probably best done
# client-side, but for now we'll stick with the way it worked before.
logging.debug(`(name, content, number)`)
urlctx = read.UrlCtx(url)
text = urlctx.resolve(url)
if text is None:
lines = []
else:
lines = text.readlines()
digest = textutils.split_gh_file(lines)
digestd = dict(digest)
# Insert before number, or at end of file if not found
if len(digest) == 0:
insertpt = len(lines)
elif digestd.has_key(number):
insertpt = digestd[number][0]
else:
insertpt = skip_blank_lines(digest[-1][1][1], lines)
if digestd.has_key(name):
start, end = digestd[name]
end = skip_blank_lines(end, lines)
lines[start:end] = []
if insertpt >= end:
insertpt -= end - start
contentlines = content.split('\n')
while len(contentlines) > 0 and contentlines[-1].rstrip() == '':
contentlines.pop()
if len(contentlines) > 0:
if insertpt > 0 and lines[insertpt - 1].rstrip() != '':
contentlines.insert(0, '\n')
contentlines.append('')
lines[insertpt:insertpt] = contentlines
lines = [line.rstrip() for line in lines]
newcontent = '\n'.join(lines)
if isinstance(newcontent, unicode): newcontent = newcontent.encode('utf-8')
o = self.response.out
# Verify
pipe = StringIO.StringIO(newcontent)
urlctx = read.UrlCtx(url, pipe)
error_handler = ErrorHandler()
ctx = verify.VerifyCtx(urlctx, verify.run, error_handler.error_handler)
tmpout = StringIO.StringIO()
ctx.run(urlctx, '-', ctx, tmpout)
logging.debug(`tmpout.getvalue()`)
if error_handler.fatal_error:
# TODO: plumb actual error message
o.write(json.encode(['error', error_handler.first_error[1]]))
return
# Now save the new text
git_path = str(url)
if git_path.startswith('/'): git_path = git_path[1:]
babygit.stage.checkout(self.repo)
tree = babygit.stage.save(self.repo, git_path, newcontent)
babygit.stage.add(self.repo, tree)
author = self.identity
msg = 'Commit from web thm editor: save ' + name + '\n'
commitsha = babygit.stage.commit(self.repo, author, msg)
o.write(json.encode(['ok', 'successfully saved ' + name]))
| raphlinus/ghilbert | app/edit.py | Python | apache-2.0 | 17,458 |
"""
* The <code>ThreadedTest</code> is a test decorator that
* runs a test in a separate thread.
*
* @author <b>Mike Clark</b>
* @author Clarkware Consulting, Inc.
**************************************
* Ported to Python by Grig Gheorghiu *
**************************************
"""
import time
from threading import currentThread
from ThreadInGroup import ThreadInGroup
from Test import Test
from ThreadBarrier import ThreadBarrier
class ThreadedTest(Test):
def __init__(self, test, thread_group=None, thread_barrier=None):
"""
Constructs a <code>ThreadedTest</code> to decorate the
specified test using the specified thread group and
thread barrier.
@param test Test to decorate.
@param group Thread group.
@param barrier Thread barrier.
"""
#self.test = test_class(test_name)
self.test = test
self.group = thread_group
self.barrier = thread_barrier
if self.barrier is None:
self.barrier = ThreadBarrier(1)
def countTestCases(self):
"""
Returns the number of test cases in this threaded test.
@return Number of test cases.
"""
return self.test.countTestCases()
def run(self, result):
"""
Runs this test.
@param result Test result.
"""
test_runner = TestRunner(result, self.test, self.barrier)
t = ThreadInGroup(group=self.group, target=test_runner)
#print "ThreadedTest thread starting at:", time.time()
t.start()
#return t
#t.join()
def __str__(self):
return "ThreadedTest: " + str(self.test)
class TestRunner:
def __init__(self, result, test, barrier):
self.result = result
self.test = test
self.barrier = barrier
def __call__(self):
self.test.run(self.result)
self.barrier.onCompletion(currentThread())
| nmondal/pyunitperf | ThreadedTest.py | Python | apache-2.0 | 1,719 |
# Nothing in this file should need to be edited.
# Use package.json to adjust metadata about this package.
# Use MANIFEST.in to include package-specific data files.
import os
import json
from setuptools import setup, find_packages
info = json.load(open("./package.json"))
def generate_namespaces(package):
i = package.count(".")
while i:
yield package.rsplit(".", i)[0]
i -= 1
NAMESPACE_PACKAGES = list(generate_namespaces(info['name']))
if os.path.exists("MANIFEST"):
os.unlink("MANIFEST")
setup_kwargs = {
"author": "Texas Tribune & The Center for Investigative Reporting",
"author_email": "[email protected]",
"url": "http://github.com/armstrong/%s/" % info["name"],
"packages": find_packages(),
"namespace_packages": NAMESPACE_PACKAGES,
"include_package_data": True,
"classifiers": [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
}
setup_kwargs.update(info)
setup(**setup_kwargs)
| armstrong/armstrong.core.arm_layout | setup.py | Python | apache-2.0 | 1,334 |
from django.conf import settings
from django.contrib.auth.models import User
from uuidfield import UUIDField
from django.db import models
# Base API User Class
class APIUser(models.Model):
user = models.OneToOneField(User)
id = UUIDField(auto=True, primary_key=True)
host = models.URLField(null=False, default=settings.HOST)
# TODO: change this to false for production
enabled = models.BooleanField(default=False)
type = models.CharField(max_length=32, default="Author")
def __setattr__(self, attr, value):
if attr == 'type':
if value not in ["Author", "Node"]:
raise ValueError('User type must be "Author" or "Node"')
super(APIUser, self).__setattr__(attr, value)
class Meta:
abstract = True
| CMPUT404/socialdistribution | api/models/user.py | Python | apache-2.0 | 780 |
# Example of calling REST API from Python to manage APIC-EM users/roles using APIC-EM APIs.
# * THIS SAMPLE APPLICATION AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY
# * OF ANY KIND BY CISCO, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED
# * TO THE IMPLIED WARRANTIES OF MERCHANTABILITY FITNESS FOR A PARTICULAR
# * PURPOSE, NONINFRINGEMENT, SATISFACTORY QUALITY OR ARISING FROM A COURSE OF
# * DEALING, LAW, USAGE, OR TRADE PRACTICE. CISCO TAKES NO RESPONSIBILITY
# * REGARDING ITS USAGE IN AN APPLICATION, AND IT IS PRESENTED ONLY AS AN
# * EXAMPLE. THE SAMPLE CODE HAS NOT BEEN THOROUGHLY TESTED AND IS PROVIDED AS AN
# * EXAMPLE ONLY, THEREFORE CISCO DOES NOT GUARANTEE OR MAKE ANY REPRESENTATIONS
# * REGARDING ITS RELIABILITY, SERVICEABILITY, OR FUNCTION. IN NO EVENT DOES
# * CISCO WARRANT THAT THE SOFTWARE IS ERROR FREE OR THAT CUSTOMER WILL BE ABLE
# * TO OPERATE THE SOFTWARE WITHOUT PROBLEMS OR INTERRUPTIONS. NOR DOES CISCO
# * WARRANT THAT THE SOFTWARE OR ANY EQUIPMENT ON WHICH THE SOFTWARE IS USED WILL
# * BE FREE OF VULNERABILITY TO INTRUSION OR ATTACK. THIS SAMPLE APPLICATION IS
# * NOT SUPPORTED BY CISCO IN ANY MANNER. CISCO DOES NOT ASSUME ANY LIABILITY
# * ARISING FROM THE USE OF THE APPLICATION. FURTHERMORE, IN NO EVENT SHALL CISCO
# * OR ITS SUPPLIERS BE LIABLE FOR ANY INCIDENTAL OR CONSEQUENTIAL DAMAGES, LOST
# * PROFITS, OR LOST DATA, OR ANY OTHER INDIRECT DAMAGES EVEN IF CISCO OR ITS
# * SUPPLIERS HAVE BEEN INFORMED OF THE POSSIBILITY THEREOF.-->
# import requests library
import requests
#import json library
import json
# Disable warnings
requests.packages.urllib3.disable_warnings()
controller='198.18.129.100'
#creates and returns a service ticket.
def getTicket():
print("\nCreating ticket")
# put the ip address or dns of your apic-em controller in this url
url = "https://" + controller + "/api/v1/ticket"
#the username and password to access the APIC-EM Controller
payload = {"username":"admin","password":"C1sco12345"}
#Content type must be included in the header
header = {"content-type": "application/json"}
#Performs a POST on the specified url to get the service ticket
response= requests.post(url,data=json.dumps(payload), headers=header, verify=False)
print(response.text)
#convert response to json format
r_json=response.json()
#parse the json to get the service ticket
ticket = r_json["response"]["serviceTicket"]
return ticket
#Get and display the APIC-EM Users
def getUsers(ticket):
print("\nGetting list of existing users")
# URL for user REST API call to get list of APIC-EM users.
url = "https://" + controller + "/api/v1/user"
#Content type as well as the ticket must be included in the header
header = {"content-type": "application/json", "X-Auth-Token":ticket}
# this statement performs a GET on the specified host url
response = requests.get(url, headers=header, verify=False)
# json.dumps serializes the json into a string and allows us to
# print the response in a 'pretty' format with indentation etc.
print ("Users = ")
print (json.dumps(response.json(), indent=4, separators=(',', ': ')))
#Adds a APIC-EM User
def addUser(ticket):
print("\nAdding new user")
# URL for user REST API call to get list of existing users in the network.
url = "https://" + controller + "/api/v1/user"
#Content type as well as the ticket must be included in the header
header = {"content-type": "application/json", "X-Auth-Token":ticket}
username="brett"
#Data for new user
payload={"password":"Brett123!","username":username,"authorization":[{"scope":"ALL","role":"ROLE_OBSERVER"}]}
# this statement performs a Post on the specified user url
response = requests.post(url, data=json.dumps(payload), headers=header, verify=False)
print ("Response after post: " + response.text)
return (username)
#Delete the user that corresponds to the passed in username parameter
def deleteUser(username, ticket):
print("\nRemoving user: " + username)
# URL for a specified user REST API call.
url = "https://" + controller + "/api/v1/user/" + username
#Content type as well as the ticket must be included in the header
header = {"content-type": "application/json", "X-Auth-Token":ticket}
# this statement performs a Delete on the specified user url
response = requests.delete(url, headers=header, verify=False)
print (response.text)
#Show the User that corresponds to the passed in username parameter
def showUser(username, ticket):
print("\nDisplaying user: " + username)
# URL for user REST API call to get APIC-EM user with corresponding name.
url = "https://" + controller + "/api/v1/user/" + username
#Content type as well as the ticket must be included in the header
header = {"content-type": "application/json", "X-Auth-Token":ticket}
# this statement performs a GET on the specified user url
response = requests.get(url, headers=header, verify=False)
# json.dumps serializes the json into a string and allows us to
# print the response in a 'pretty' format with indentation etc.
print ("User found = ")
print (json.dumps(response.json(), indent=4, separators=(',', ': ')))
theTicket=getTicket()
getUsers(theTicket)
name=addUser(theTicket)
showUser(name,theTicket)
getUsers(theTicket)
deleteUser(name,theTicket)
getUsers(theTicket) | CiscoDevNet/coding-skills-sample-code | coding102-REST-python-dcloud/manage-users.py | Python | apache-2.0 | 5,294 |
""" Cisco_IOS_XR_common_acl_datatypes
This module contains a collection of generally useful
derived YANG data types.
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class AclUsageAppIdEnumEnum(Enum):
"""
AclUsageAppIdEnumEnum
Acl usage app id enum
.. data:: pfilter = 1
General Usage Statistics
.. data:: bgp = 2
Usage staistics related to BGP Traffic
.. data:: ospf = 3
Usage staistics related to OSPF Traffic
"""
pfilter = 1
bgp = 2
ospf = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_common_acl_datatypes as meta
return meta._meta_table['AclUsageAppIdEnumEnum']
| 111pontes/ydk-py | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_common_acl_datatypes.py | Python | apache-2.0 | 920 |
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from magnum.conductor.tasks import heat_tasks
from magnum.tests import base
import mock
from taskflow import engines
from taskflow.patterns import linear_flow
class HeatTasksTests(base.TestCase):
def setUp(self):
super(HeatTasksTests, self).setUp()
self.heat_client = mock.MagicMock(name='heat_client')
def _get_create_stack_flow(self, heat_client):
flow = linear_flow.Flow("create stack flow")
flow.add(
heat_tasks.CreateStack(
os_client=heat_client,
requires=('stack_name', 'parameters', 'template', 'files'),
provides='new_stack',
),
)
return flow
def _get_update_stack_flow(self, heat_client):
flow = linear_flow.Flow("update stack flow")
flow.add(
heat_tasks.UpdateStack(
os_client=heat_client,
requires=('stack_id', 'parameters', 'template', 'files'),
),
)
return flow
def _get_delete_stack_flow(self, heat_client):
flow = linear_flow.Flow("delete stack flow")
flow.add(
heat_tasks.DeleteStack(
os_client=heat_client,
requires=('stack_id'),
),
)
return flow
def test_create_stack(self):
heat_client = mock.MagicMock(name='heat_client')
stack_id = 'stack_id'
stack_name = 'stack_name'
stack = {
'stack': {
'id': stack_id
}
}
heat_client.stacks.create.return_value = stack
flow_store = {
'stack_name': stack_name,
'parameters': 'parameters',
'template': 'template',
'files': 'files'
}
flow = self._get_create_stack_flow(heat_client)
result = engines.run(flow, store=flow_store)
heat_client.stacks.create.assert_called_once_with(**flow_store)
self.assertEqual(stack_id, result['new_stack']['stack']['id'])
def test_create_stack_with_error(self):
heat_client = mock.MagicMock(name='heat_client')
heat_client.stacks.create.side_effect = ValueError
stack_name = 'stack_name'
flow_store = {
'stack_name': stack_name,
'parameters': 'parameters',
'template': 'template',
'files': 'files'
}
flow = self._get_create_stack_flow(heat_client)
self.assertRaises(ValueError, engines.run, flow, store=flow_store)
def test_update_stack(self):
heat_client = mock.MagicMock(name='heat_client')
stack_id = 'stack_id'
flow_store = {
'stack_id': stack_id,
'parameters': 'parameters',
'template': 'template',
'files': 'files'
}
flow = self._get_update_stack_flow(heat_client)
expected_params = dict(flow_store)
del expected_params['stack_id']
engines.run(flow, store=flow_store)
heat_client.stacks.update.assert_called_once_with(stack_id,
**expected_params)
def test_update_stack_with_error(self):
heat_client = mock.MagicMock(name='heat_client')
heat_client.stacks.update.side_effect = ValueError
stack_id = 'stack_id'
flow_store = {
'stack_id': stack_id,
'parameters': 'parameters',
'template': 'template',
'files': 'files'
}
flow = self._get_update_stack_flow(heat_client)
self.assertRaises(ValueError, engines.run, flow, store=flow_store)
def test_delete_stack(self):
heat_client = mock.MagicMock(name='heat_client')
stack_id = 'stack_id'
flow_store = {'stack_id': stack_id}
flow = self._get_delete_stack_flow(heat_client)
engines.run(flow, store=flow_store)
heat_client.stacks.delete.assert_called_once_with(stack_id)
def test_delete_stack_with_error(self):
heat_client = mock.MagicMock(name='heat_client')
heat_client.stacks.delete.side_effect = ValueError
stack_id = 'stack_id'
flow_store = {'stack_id': stack_id}
flow = self._get_delete_stack_flow(heat_client)
self.assertRaises(ValueError, engines.run, flow, store=flow_store)
| dimtruck/magnum | magnum/tests/unit/conductor/tasks/test_heat_tasks.py | Python | apache-2.0 | 4,943 |
import re, sys, json, time, xbmc
import control
import bookmarks
class player(xbmc.Player):
def __init__(self):
xbmc.Player.__init__(self)
self.totalTime = 0
self.loadingTime = 0
self.currentTime = 0
self.title = ""
self.year = 2015
self.offset = '0'
self.dbid = 0
def run(self, meta):
# if control.window.getProperty('PseudoTVRunning') == 'True':
# return control.player.play(url, control.item(path=url))
self.getVideoInfo(meta)
if meta["thumb"] is None:
meta["thumb"] = "DefaultVideo.png"
item = control.item(path=meta["url"], iconImage=meta["thumb"], thumbnailImage=meta["thumb"])
item.setInfo(type='Video', infoLabels={"Title": self.title, "Plot": meta["plot"], "Genre": meta["genre"]})
item.setProperty('Video', 'true')
# item.setProperty('IsPlayable', 'true')
item.setProperty("ListItem.IsResumable", "true")
item.setProperty("ListItem.EndTime", meta["endTime"])
item.setProperty("totaltime", meta["endTime"])
control.player.play(meta["url"], item)
for i in range(0, 240):
if self.isPlayingVideo(): break
xbmc.sleep(1000)
while self.isPlayingVideo():
try:
self.totalTime = self.getTotalTime()
except Exception, e:
print str(e)
pass
try:
self.currentTime = self.getTime()
except Exception, e:
print str(e)
pass
xbmc.sleep(1000)
time.sleep(5)
def getVideoInfo(self, meta):
try:
self.loadingTime = time.time()
self.totalTime = meta["endTime"]
self.currentTime = 0
self.title = meta["title"]
self.year = meta["year"]
self.dbid = meta["dbid"]
except Exception, e:
print str(e)
pass
try:
# if control.setting('resume_playback') == 'true':
self.offset = bookmarks.getBookmark(self.title, meta["id"])
if self.offset == '0': raise Exception()
minutes, seconds = divmod(float(self.offset), 60);
hours, minutes = divmod(minutes, 60)
yes = control.yesnoDialog(
'%s %02d:%02d:%02d' % (control.lang(30461).encode('utf-8'), hours, minutes, seconds), '', '',
self.title, control.lang(30463).encode('utf-8'), control.lang(30462).encode('utf-8'))
if yes:
self.offset = '0'
except Exception, e:
print str(e)
pass
def setWatchedStatus(self):
return
# if self.content == 'episode':
# try:
# control.jsonrpc(
# '{"jsonrpc": "2.0", "method": "VideoLibrary.SetEpisodeDetails", "params": {"episodeid" : %s, "playcount" : 1 }, "id": 1 }' % str(
# self.DBID))
# if not self.folderPath.startswith('plugin://'): control.refresh()
# except:
# pass
# try:
# from metahandler import metahandlers
# metaget = metahandlers.MetaData(preparezip=False)
# metaget.get_meta('tvshow', self.tvshowtitle, imdb_id=self.imdb)
# metaget.get_episode_meta(self.tvshowtitle, self.imdb, self.season, self.episode)
# metaget.change_watched(self.content, '', self.imdb, season=self.season, episode=self.episode, year='',
# watched=7)
# except:
# pass
def onPlayBackStarted(self):
for i in range(0, 200):
if control.condVisibility('Window.IsActive(busydialog)') == 1:
control.idle()
else:
break
control.sleep(100)
if control.setting('playback_info') == 'true':
elapsedTime = '%s %s %s' % (control.lang(30464).encode('utf-8'), int((time.time() - self.loadingTime)),
control.lang(30465).encode('utf-8'))
control.infoDialog(elapsedTime, heading=self.title)
try:
if self.offset == '0':
raise Exception()
self.seekTime(float(self.offset))
except Exception, e:
print str(e)
pass
def onPlayBackStopped(self):
try:
bookmarks.deleteBookmark(self.title, self.dbid)
ok = int(self.currentTime) > 180 and (self.currentTime / self.totalTime) <= .92
if ok:
print "adding bookmark: %s : %s" % (self.currentTime, self.dbid)
bookmarks.addBookmark(self.currentTime, self.title, self.dbid)
except Exception, e:
print str(e)
pass
try:
ok = self.currentTime / self.totalTime >= .9
if ok: self.setWatchedStatus()
except Exception, e:
print str(e)
pass
def onPlayBackEnded(self):
self.onPlayBackStopped()
| camalot/plugin.video.microsoftvirtualacademy | resources/lib/player.py | Python | apache-2.0 | 5,130 |
from django.conf.urls.defaults import *
from django.conf import settings
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
#(r'^tinymcewrapper/', include('tinymcewrapper.urls')),
(r'^admin/', include(admin.site.urls)),
)
urlpatterns = urlpatterns + patterns('',
(r'^static/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
) if settings.DEBUG else urlpatterson
| callowayproject/django-tinymcewrapper | example/urls.py | Python | apache-2.0 | 456 |
# -*- coding: utf-8 -*-
# Import the basic Django ORM models library
from django.db import models
from model_utils.models import TimeStampedModel
from taggit.managers import TaggableManager
from autoslug.fields import AutoSlugField
from users.models import User
class Category(TimeStampedModel):
title = models.CharField(max_length=255)
slug = AutoSlugField(populate_from='title', unique=True)
description = models.TextField(max_length=1024, blank=True, null=True)
thumbnail_url = models.URLField(blank=True, null=True)
def __unicode__(self):
return self.title
@property
def post_count(self):
return self.post_set.filter(published=True).count()
# Post model
class Post(TimeStampedModel):
category = models.ForeignKey(Category)
author = models.ForeignKey(User)
title = models.CharField(max_length=255)
slug = AutoSlugField(populate_from='title', unique=True)
content = models.TextField()
summary = models.CharField(max_length=1024, blank=True, null=True)
tags = TaggableManager(blank=True)
thumbnail_url = models.URLField(blank=True, null=True, help_text='Size: 848x307')
view_count = models.PositiveIntegerField(default=0)
like_count = models.PositiveIntegerField(default=0)
published = models.BooleanField(default=False)
class Meta:
ordering = ['-created']
def __unicode__(self):
return self.title
@property
def comment_count(self):
return self.comment_set.filter(approved=True).count()
def approved_comments(self):
return self.comment_set.filter(approved=True, parent__isnull=True)
class Comment(TimeStampedModel):
post = models.ForeignKey(Post)
content = models.CharField(max_length=1024)
user = models.ForeignKey(User, blank=True, null=True)
user_name = models.CharField(max_length=255, blank=True, null=True)
user_email = models.EmailField(max_length=255, blank=True, null=True)
user_website = models.URLField(max_length=255, blank=True, null=True)
parent = models.ForeignKey('self', blank=True, null=True)
approved = models.BooleanField(default=False)
def __unicode__(self):
return self.content
def approved_replies(self):
return self.comment_set.filter(approved=True)
@property
def json(self):
user_id = None
if self.user:
user_id = self.user.id
parent_id = None
if self.parent:
parent_id = self.parent.id
return {
'post_id': self.post.id,
'content': self.content,
'user_id': user_id,
'parent_id': parent_id,
'user_name': self.user_name,
'user_email': self.user_email,
'user_website': self.user_website,
'approved': self.approved,
}
| zhiwehu/zhiwehu | zhiwehu/post/models.py | Python | apache-2.0 | 2,831 |
"""Reslowe process model."""
from django.conf import settings
from django.contrib.postgres.fields import JSONField
from django.core.validators import RegexValidator
from django.db import models
from .base import BaseModel
class Process(BaseModel):
"""Postgres model for storing processes."""
class Meta(BaseModel.Meta):
"""Process Meta options."""
permissions = (
("view_process", "Can view process"),
("share_process", "Can share process"),
("owner_process", "Is owner of the process"),
)
#: raw persistence
PERSISTENCE_RAW = 'RAW'
#: cached persistence
PERSISTENCE_CACHED = 'CAC'
#: temp persistence
PERSISTENCE_TEMP = 'TMP'
PERSISTENCE_CHOICES = (
(PERSISTENCE_RAW, 'Raw'),
(PERSISTENCE_CACHED, 'Cached'),
(PERSISTENCE_TEMP, 'Temp'),
)
SCHEDULING_CLASS_INTERACTIVE = 'IN'
SCHEDULING_CLASS_BATCH = 'BA'
SCHEDULING_CLASS_CHOICES = (
(SCHEDULING_CLASS_INTERACTIVE, "Interactive"),
(SCHEDULING_CLASS_BATCH, "Batch"),
)
#: data type
type = models.CharField(max_length=100, validators=[
RegexValidator(
regex=r'^data:[a-z0-9:]+:$',
message='Type may be alphanumerics separated by colon',
code='invalid_type'
)
])
#: category
category = models.CharField(max_length=200, default='Other:', validators=[
RegexValidator(
regex=r'^([a-zA-Z0-9]+[:\-])*[a-zA-Z0-9]+:$',
message='Category may be alphanumerics separated by colon',
code='invalid_category'
)
])
persistence = models.CharField(max_length=3, choices=PERSISTENCE_CHOICES, default=PERSISTENCE_RAW)
"""
Persistence of :class:`~resolwe.flow.models.Data` objects created
with this process. It can be one of the following:
- :attr:`PERSISTENCE_RAW`
- :attr:`PERSISTENCE_CACHED`
- :attr:`PERSISTENCE_TEMP`
.. note::
If persistence is set to ``PERSISTENCE_CACHED`` or
``PERSISTENCE_TEMP``, the process must be idempotent.
"""
#: designates whether this process should be treated as active
is_active = models.BooleanField('active', default=True)
#: detailed description
description = models.TextField(default='')
#: template for name of Data object created with Process
data_name = models.CharField(max_length=200, null=True, blank=True)
input_schema = JSONField(blank=True, default=list)
"""
process input schema (describes input parameters, form layout **"Inputs"** for :attr:`Data.input`)
Handling:
- schema defined by: *dev*
- default by: *user*
- changable by: *none*
"""
output_schema = JSONField(blank=True, default=list)
"""
process output schema (describes output JSON, form layout **"Results"** for :attr:`Data.output`)
Handling:
- schema defined by: *dev*
- default by: *dev*
- changable by: *dev*
Implicitly defined fields (by
:func:`resolwe.flow.management.commands.register` or
``resolwe.flow.executors.run.BaseFlowExecutor.run`` or its
derivatives):
- ``progress`` of type ``basic:float`` (from 0.0 to 1.0)
- ``proc`` of type ``basic:group`` containing:
- ``stdout`` of type ``basic:text``
- ``rc`` of type ``basic:integer``
- ``task`` of type ``basic:string`` (celery task id)
- ``worker`` of type ``basic:string`` (celery worker hostname)
- ``runtime`` of type ``basic:string`` (runtime instance hostname)
- ``pid`` of type ``basic:integer`` (process ID)
"""
entity_type = models.CharField(max_length=100, null=True, blank=True)
"""
Automatically add :class:`~resolwe.flow.models.Data` object created
with this process to an :class:`~resolwe.flow.models.Entity` object
representing a data-flow. If all input ``Data`` objects belong to
the same entity, add newly created ``Data`` object to it, otherwise
create a new one.
"""
entity_descriptor_schema = models.CharField(max_length=100, null=True, blank=True)
"""
Slug of the descriptor schema assigned to the Entity created with
:attr:`~resolwe.flow.models.Process.entity_type`.
"""
entity_input = models.CharField(max_length=100, null=True, blank=True)
"""
Limit the entity selection in
:attr:`~resolwe.flow.models.Process.entity_type` to a single input.
"""
run = JSONField(default=dict)
"""
process command and environment description for internal use
Handling:
- schema defined by: *dev*
- default by: *dev*
- changable by: *dev*
"""
requirements = JSONField(default=dict)
"""
process requirements
"""
scheduling_class = models.CharField(max_length=2, choices=SCHEDULING_CLASS_CHOICES,
default=SCHEDULING_CLASS_BATCH)
"""
process scheduling class
"""
def get_resource_limits(self):
"""Get the core count and memory usage limits for this process.
:return: A dictionary with the resource limits, containing the
following keys:
- ``memory``: Memory usage limit, in MB. Defaults to 4096 if
not otherwise specified in the resource requirements.
- ``cores``: Core count limit. Defaults to 1.
:rtype: dict
"""
# Get limit defaults and overrides.
limit_defaults = getattr(settings, 'FLOW_PROCESS_RESOURCE_DEFAULTS', {})
limit_overrides = getattr(settings, 'FLOW_PROCESS_RESOURCE_OVERRIDES', {})
limits = {}
resources = self.requirements.get('resources', {}) # pylint: disable=no-member
limits['cores'] = int(resources.get('cores', 1))
max_cores = getattr(settings, 'FLOW_PROCESS_MAX_CORES', None)
if max_cores:
limits['cores'] = min(limits['cores'], max_cores)
memory = limit_overrides.get('memory', {}).get(self.slug, None)
if memory is None:
memory = int(resources.get(
'memory',
# If no memory resource is configured, check settings.
limit_defaults.get('memory', 4096)
))
limits['memory'] = memory
return limits
| jberci/resolwe | resolwe/flow/models/process.py | Python | apache-2.0 | 6,293 |
# Copyright 2016 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import docparsers
from .callsigs import *
from .classes import *
from .databases import *
from .utils import *
| tkzeng/molecular-design-toolkit | moldesign/utils/__init__.py | Python | apache-2.0 | 695 |
import gunicorn
from bottle import request, response, route
import bottle
from sys import argv
@route('/')
def index():
return 'Hello!'
@route('/oauth/authorise/basic', method=['GET','POST'])
def oauth_redirect():
return 'Hello!'
@route('/oauth/access/token', method=['GET','POST'])
def oauth_access_token():
return 'Hello!'
@bottle.route('/oauth/redirect')
def oauth_redirect():
return 'Hello!'
bottle.run(server='gunicorn', host='0.0.0.0', port=argv[1]) | cmlh/Heroku-py | example_gunicorn.py | Python | apache-2.0 | 480 |
"""add ondelete cascade to organisation_committee
Revision ID: 15cc05702df3
Revises: 29978548fcb6
Create Date: 2015-02-10 09:58:45.663899
"""
# revision identifiers, used by Alembic.
revision = '15cc05702df3'
down_revision = '29978548fcb6'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.drop_constraint("organisation_committee_committee_id_fkey", 'organisation_committee')
op.create_foreign_key('organisation_committee_committee_id_fkey', 'organisation_committee', 'committee', ['committee_id'], ['id'], ondelete='CASCADE')
op.drop_constraint("organisation_committee_organisation_id_fkey", 'organisation_committee')
op.create_foreign_key('organisation_committee_organisation_id_fkey', 'organisation_committee', 'organisation', ['organisation_id'], ['id'], ondelete='CASCADE')
def downgrade():
pass
| Code4SA/pmg-cms-2 | migrations/versions/15cc05702df3_add_ondelete_cascade_to_organisation_.py | Python | apache-2.0 | 884 |
# -*- coding: utf-8 -*-
"""
Created on Feb 20, 2014
@author: Aaron Ponti
"""
import re
import random
import math
from MicroscopyCompositeDatasetConfig import MicroscopyCompositeDatasetConfig
from ch.systemsx.cisd.openbis.dss.etl.dto.api.impl import MaximumIntensityProjectionGenerationAlgorithm
from ch.systemsx.cisd.openbis.dss.etl.dto.api import ChannelColor
from ch.systemsx.cisd.openbis.dss.etl.dto.api import ImageIdentifier
from ch.systemsx.cisd.openbis.dss.etl.dto.api import ImageMetadata
from ch.systemsx.cisd.openbis.dss.etl.dto.api import OriginalDataStorageFormat
from ch.systemsx.cisd.openbis.dss.etl.dto.api import ChannelColorRGB
from ch.systemsx.cisd.openbis.dss.etl.dto.api import Channel
import xml.etree.ElementTree as ET
from GlobalSettings import GlobalSettings
from java.io import BufferedReader
from java.io import File
from java.io import FileReader
from java.util import HashMap
from com.sun.rowset.internal import Row
import string
# Letters array
LETTERS = list(string.ascii_uppercase)
class VisitronNDCompositeDatasetConfig(MicroscopyCompositeDatasetConfig):
"""Image data configuration class for Visitron ND experiments."""
_DEBUG = False
# List of metadata attributes obtained either from the settings XML
# file generated by the Annotation Tool or returned by
# BioFormatsProcessor.getMetadata(asXML=False)
# (for all series in the file, sorted by series).
_allSeriesMetadata = None
# Number of the series to register (for a multi-series dataset).
_seriesNum = 0
# Series indices (since they might not always start from zero and
# grow monotonically.
_seriesIndices = []
# Logger
_logger = None
# Metadata folder
_metadataFolder = ""
# Maintain a metadata array
_metadata = []
# Regular expression patterns
_pattern = re.compile(r'^(?P<basename>.*?)' + # Series basename: group 1
'(_w(?P<channel>\d.*?)' + # Channel number (optional)
'(?P<channelname>.*?))?' + # Channel name (optional)
'(conf(?P<wavelength>\d.*?))?' + # Wavelength
'(_s(?P<series>\d.*?))?' + # Series number (optional)
'(_t(?P<timepoint>\d.*?))?' + # Time index (optional)
'(\.tif{1,2}|\.stk)$', # File extension
re.IGNORECASE|re.UNICODE)
def __init__(self, allSeriesMetadata, seriesIndices, logger, seriesNum=0):
"""Constructor.
@param allSeriesMetadata: list of metadata attributes generated either
by the Annotation Tool and parsed from the
settings XML file, or from BioFormatsProcessor
and returned via:
BioFormatsProcessor.getMetadataXML(asXML=False)
@param seriesIndices: list of known series indices (do not
necessarily need to start at 0 and increase
monotonically by one; could be [22, 30, 32]
@param seriesNum: Int Number of the series to register. All
other series in the file will be ignored.
seriesNum MUST BE CONTAINED in seriesIndices.
@param logger: logger object
"""
# Store the logger
self._logger = logger
# Inform
if self._DEBUG:
self._logger.info("Initializing VISITRONNDCOMPOSITEDATASETCONFIG for series number " + str(seriesNum))
# Store the series metadata
self._allSeriesMetadata = allSeriesMetadata
# Store the seriesIndices
if type(seriesIndices) == str:
seriesIndices = seriesIndices.split(",")
self._seriesIndices = map(int, seriesIndices)
# Store the series number: make sure that it belongs to seriesIndices
self._seriesNum = int(seriesNum)
try:
self._seriesIndices.index(self._seriesNum)
except:
raise(Exception("seriesNum (" + str(self._seriesNum) + ") MUST be contained " +
"in seriesIndices " + str(self._seriesIndices) + "!"))
# This is microscopy data
self.setMicroscopyData(True)
# Store raw data in original form
self.setOriginalDataStorageFormat(OriginalDataStorageFormat.UNCHANGED)
# Set the image library
self.setImageLibrary("BioFormats")
# Disable thumbnail generation by ImageMagick
self.setUseImageMagicToGenerateThumbnails(False)
# Specify resolution of image representations explicitly
resolutions = GlobalSettings.ImageResolutions
if not resolutions:
self._logger.info("Skipping thumbnails generation.")
self.setGenerateThumbnails(False)
else:
self._logger.info("Creating thumbnails at resolutions: " + str(resolutions))
self.setGenerateImageRepresentationsUsingImageResolutions(resolutions)
self.setGenerateThumbnails(True)
# Set the recognized extensions
self.setRecognizedImageExtensions(["tif", "tiff", "stk"])
# Set the dataset type
self.setDataSetType("MICROSCOPY_IMG")
# Create representative image (MIP) for the first series only
if self._seriesIndices.index(self._seriesNum) == 0:
self.setImageGenerationAlgorithm(
MaximumIntensityProjectionGenerationAlgorithm(
"MICROSCOPY_IMG_THUMBNAIL", 256, 256, "thumbnail.png"))
def createChannel(self, channelCode):
"""Create a channel from the channelCode with the name as read from
the file via the MetadataReader and the color (RGB) as read.
@param channelCode Code of the channel as generated by extractImagesMetadata().
"""
# Get the indices of series and channel from the channel code
(seriesIndx, channelIndx) = self._getSeriesAndChannelNumbers(channelCode)
# Get the channel name
name = self._getChannelName(seriesIndx, channelIndx)
# Get the channel color (RGB)
colorRGB = self._getChannelColor(seriesIndx, channelIndx)
if self._DEBUG:
self._logger.info("VISITRONNDCOMPOSITEDATASETCONFIG::createChannel(): " +
"channel (s = " + str(seriesIndx) + ", c = " +
str(channelIndx) + ") has code " + channelCode +
", color (" + str(colorRGB) + " and name " + name)
# Return the channel with given name and color (the code is set to
# be the same as the channel name).
return Channel(channelCode, name, colorRGB)
def extractImagesMetadata(self, imagePath, imageIdentifiers):
"""Overrides extractImageMetadata method making sure to store
both series and channel indices in the channel code to be reused
later to extract color information and other metadata.
The channel code is in the form SERIES-(\d+)_CHANNEL-(\d+).
Only metadata for the relevant series number is returned!
@param imagePath Full path to the file to process
@param imageIdentifiers Array of ImageIdentifier's
@see constructor.
"""
# Info
self._logger.info("Processing file " + str(imagePath) +
" with identifiers " + str(imageIdentifiers))
# Extract the relevant information from the file name - the image
# identifiers in this case do not carry any useful information.
m = self._pattern.match(imagePath)
if m is None:
err = "VISITRONNDCOMPOSITEDATASETCONFIG::extractImageMetadata(): " + \
"unexpected file name " + str(imagePath)
self._logger.error(err)
raise Exception(err)
# Get the extracted info
fileinfo = m.groupdict()
# Get and store the base name
basename = fileinfo['basename']
# Extract the series number
series = self._seriesNumFromFileName(imagePath)
if series == -1:
raise Exception("Could not find any series containing file " + imagePath + "!")
if self._DEBUG:
self._logger.info("Found file " + imagePath + " in series " + str(series))
# Make sure to process only the relevant series
if series != self._seriesNum:
return []
# Get current metadata
currentMetaData = self._allSeriesMetadata[series]
# Extract the channel number
# The channel number in the file name is 1-based
if fileinfo["channel"] is not None:
channelNumberFromFile = int(fileinfo['channel']) - 1
if self._DEBUG:
self._logger.info("Found channel number " + str(channelNumberFromFile) + " in file name.")
else:
self._logger.info("Channel number not found: fall back to image identifiers.")
channelNumberFromFile = -1
if fileinfo["channelname"] is not None:
channelName = fileinfo['channelname']
if self._DEBUG:
self._logger.info("Found channel name " + channelName + " in file name.")
else:
if channelNumberFromFile != -1:
keyName = "channelName" + channelNumberFromFile
if keyName in currentMetadata:
channelName = currentMetadata[keyName]
self._logger.info("Channel name from metadata: " + channelName)
else:
self._logger.info("Channel name not found: falling back to ''.")
channelName = ""
self._logger.info("Channel name not found: falling back to ''.")
channelName = ""
# Extract the wavelength
wavelengthFromFile = fileinfo['wavelength']
# Extract the timepoint
# The timepoint number in the file (if defined) is 1-based
if fileinfo["timepoint"] is not None:
timepointFromFile = int(fileinfo['timepoint']) - 1
if self._DEBUG:
self._logger.info("Found timepoint " + str(timepointFromFile) + " in file name.")
else:
timepointFromFile = -1
# Inform
if self._DEBUG:
self._logger.info("Parsing of file " + str(imagePath) + " gives: " + \
"basename = " + str(basename) + "; " + \
"channelNumber = " + str(channelNumberFromFile) + "; " + \
"channelName = " + channelName + "; " + \
"wavelength = " + str(wavelengthFromFile) + "; " + \
"seriesNum = " + str(series) + "; " + \
"timepoint = " + str(timepointFromFile))
# Initialize array of metadata entries
metaData = []
# Now process the file indentifiers for this file
# Iterate over all image identifiers
for id in imageIdentifiers:
# Extract the relevant info from the image identifier
plane = id.focalPlaneIndex
# Fallback
if channelNumberFromFile == -1:
channelNumber = int(id.colorChannelIndex)
else:
channelNumber = channelNumberFromFile
if timepointFromFile == -1:
timepoint = id.timeSeriesIndex
else:
timepoint = timepointFromFile
if self._DEBUG:
self._logger.info("Image identifiers for image " + str(imagePath) +
": " + str(id) + " map to " +
"channel = " + str(id.colorChannelIndex) +
"; plane = " + str(id.focalPlaneIndex) +
"; series = " + str(id.seriesIndex) +
"; timepoint = " + str(id.timeSeriesIndex))
self._logger.info("Geometry after integrating image identifiers: " +
"channel = " + str(channelNumber) +
"; plane = " + str(plane) +
"; series = " + str(series) +
"; timepoint = " + str(timepoint))
# Build the channel code
channelCode = "SERIES-" + str(series) + "_CHANNEL-" + str(channelNumber)
if self._DEBUG:
self._logger.info("Adding image to channel with channel code " + channelCode)
# Attempt to work around a geometry-parsing issue in imageIdentifiers
expectedNumPlanes = int(currentMetaData["sizeZ"])
expectedNumTimepoints = int(currentMetaData["sizeT"])
if (timepoint > (expectedNumTimepoints - 1) and expectedNumPlanes > 1) or \
(plane > (expectedNumPlanes - 1) and expectedNumTimepoints > 1):
self._logger.info("Swapping Z and T")
timepoint, plane = plane, timepoint
# Update the ImageIdentifier
id = ImageIdentifier(series, timepoint, plane, channelNumber)
# Initialize a new ImageMetadata object
imageMetadata = ImageMetadata();
# Fill in all information
imageMetadata.imageIdentifier = id
imageMetadata.seriesNumber = series
imageMetadata.timepoint = timepoint
imageMetadata.depth = plane
imageMetadata.channelCode = channelCode
imageMetadata.tileNumber = 1 # + self._seriesNum
imageMetadata.well = "IGNORED"
# Append metadata for current image
metaData.append(imageMetadata)
# Now return the image metadata object in an array
return metaData
def _getChannelName(self, seriesIndx, channelIndx):
"""Returns the channel name (from the parsed metadata) for
a given channel in a given series."
"""
self._logger.info("Retrieving channel name for " + \
"series " + str(seriesIndx) + " and " + \
"channel " + str(channelIndx))
# Get the metadata for the requested series
metadata = self._allSeriesMetadata[seriesIndx]
# Try extracting the name for the given series and channel
try:
key = "channelName" + str(channelIndx)
name = metadata[key]
except KeyError:
err = "VISITRONNDCOMPOSITEDATASETCONFIG::getChannelName(): " + \
"Could not create channel name for channel " + str(channelIndx) + \
" and series " + str(seriesIndx) + "for key = " + \
key + " from metadata = " + \
str(metadata)
self._logger.error(err)
raise(Exception(err))
# In case no name was found, assign default name
if name == "":
name = "No name"
self._logger.info("The channel name is " + name)
return name
def _getChannelColor(self, seriesIndx, channelIndx):
"""Returns the channel color (from the parsed metadata) for
a given channel in a given series."
"""
# Get the position in the seriesIndices list
indx = self._seriesIndices.index(int(seriesIndx))
# Get the metadata for the requested series
metadata = self._allSeriesMetadata[indx]
# Get the metadata
try:
key = "channelColor" + str(channelIndx)
color = metadata[key]
except:
color = None
if color is not None:
# The color is already in the 0 .. 255 range
color = color.split(",")
R = int(float(color[0]))
G = int(float(color[1]))
B = int(float(color[2]))
else:
if channelIndx == 0:
R = 255
G = 0
B = 0
elif channelIndx == 1:
R = 0
G = 255
B = 0
elif channelIndx == 2:
R = 0
G = 0
B = 255
else:
R = random.randint(0, 255)
G = random.randint(0, 255)
B = random.randint(0, 255)
# Work around an issue if all color components are 0
if R == G == B == 0:
R = 255
G = 255
B = 255
self._logger.info("Color changed from (0, 0, 0) to (255, 255, 255)")
# Create the ChannelColorRGB object
colorRGB = ChannelColorRGB(R, G, B)
# Return it
return colorRGB
def _getSeriesAndChannelNumbers(self, channelCode):
"""Extract series and channel number from channel code in
the form SERIES-(\d+)_CHANNEL-(\d+) to a tuple
(seriesIndx, channelIndx).
@param channelCode Code of the channel as generated by extractImagesMetadata().
"""
p = re.compile("SERIES-(\d+)_CHANNEL-(\d+)")
m = p.match(channelCode)
if m is None or len(m.groups()) != 2:
err = "YOUSCOPEEXPERMENTCOMPOSITEDATASETCONFIG::_getSeriesAndChannelNumbers(): " + \
"Could not extract series and channel number!"
self._logger.error(err)
raise Exception(err)
# Now assign the indices
seriesIndx = int(m.group(1))
channelIndx = int(m.group(2))
if self._DEBUG:
self._logger.info("Current channel code " + channelCode + \
" corresponds to series = " + str(seriesIndx) + \
" and channel = " + str(channelIndx))
# Return them
return seriesIndx, channelIndx
def _seriesNumFromFileName(self, fileName):
"""
Return the series number from its unique ID.
"""
self._logger.info(fileName)
for i in range(len(self._allSeriesMetadata)):
metadata = self._allSeriesMetadata[i]
fileNamesFromMetadata = metadata['filenames'].split(';')
for name in fileNamesFromMetadata:
if name.lower().endswith(fileName.lower()):
return i
return -1
| aarpon/obit_microscopy_core_technology | core-plugins/microscopy/2/dss/drop-boxes/MicroscopyDropbox/VisitronNDCompositeDatasetConfig.py | Python | apache-2.0 | 18,425 |
#
# sonos.py
# Who doesn't love some music?
#
__Author__ = 'Chris Swanson'
import soco
import ConfigParser
import time
class Sonos:
def __init__(self):
self.import_conf()
self.sonos = soco.SoCo(self.ipaddr)
def import_conf(self):
config = ConfigParser.ConfigParser()
config.read('config.ini')
# Sonos IP Address
self.ipaddr = config.get('sonos','ipaddr')
# Starting Volume
self.vol_start = int(config.get('sonos','vol_start'))
# Ending Volume
self.vol_end = int(config.get('sonos','vol_end'))
# Rate the audio should adjust at (in seconds)
self.rate = int(config.get('sonos','rate'))
# Audio url for files that Sonos should play for the alarm.
self.audio_url = config.get('audio','url')
def _iter_vol(self):
for vol in xrange(self.vol_start, self.vol_end+1):
self.sonos.volume = vol
print('Volume:\t\t%s' % vol)
time.sleep(1)
def play_audio(self):
if self.audio_url:
self.sonos.play_uri(self.audio_url)
print('Playing:\t%s' % self.audio_url)
self._iter_vol()
| ChrisSwanson/PygerAlarm | alarms/sonos.py | Python | apache-2.0 | 1,193 |
"""Config flow to configure Heos."""
import asyncio
from pyheos import Heos
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_HOST, CONF_NAME
from .const import DATA_DISCOVERED_HOSTS, DOMAIN
def format_title(host: str) -> str:
"""Format the title for config entries."""
return "Controller ({})".format(host)
@config_entries.HANDLERS.register(DOMAIN)
class HeosFlowHandler(config_entries.ConfigFlow):
"""Define a flow for HEOS."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
async def async_step_ssdp(self, discovery_info):
"""Handle a discovered Heos device."""
# Store discovered host
friendly_name = "{} ({})".format(
discovery_info[CONF_NAME], discovery_info[CONF_HOST]
)
self.hass.data.setdefault(DATA_DISCOVERED_HOSTS, {})
self.hass.data[DATA_DISCOVERED_HOSTS][friendly_name] = discovery_info[CONF_HOST]
# Abort if other flows in progress or an entry already exists
if self._async_in_progress() or self._async_current_entries():
return self.async_abort(reason="already_setup")
# Show selection form
return self.async_show_form(step_id="user")
async def async_step_import(self, user_input=None):
"""Occurs when an entry is setup through config."""
host = user_input[CONF_HOST]
return self.async_create_entry(title=format_title(host), data={CONF_HOST: host})
async def async_step_user(self, user_input=None):
"""Obtain host and validate connection."""
self.hass.data.setdefault(DATA_DISCOVERED_HOSTS, {})
# Only a single entry is needed for all devices
if self._async_current_entries():
return self.async_abort(reason="already_setup")
# Try connecting to host if provided
errors = {}
host = None
if user_input is not None:
host = user_input[CONF_HOST]
# Map host from friendly name if in discovered hosts
host = self.hass.data[DATA_DISCOVERED_HOSTS].get(host, host)
heos = Heos(host)
try:
await heos.connect()
self.hass.data.pop(DATA_DISCOVERED_HOSTS)
return await self.async_step_import({CONF_HOST: host})
except (asyncio.TimeoutError, ConnectionError):
errors[CONF_HOST] = "connection_failure"
finally:
await heos.disconnect()
# Return form
host_type = (
str
if not self.hass.data[DATA_DISCOVERED_HOSTS]
else vol.In(list(self.hass.data[DATA_DISCOVERED_HOSTS]))
)
return self.async_show_form(
step_id="user",
data_schema=vol.Schema({vol.Required(CONF_HOST, default=host): host_type}),
errors=errors,
)
| fbradyirl/home-assistant | homeassistant/components/heos/config_flow.py | Python | apache-2.0 | 2,907 |
from pony.orm import *
from datetime import datetime
from model.group import Group
from model.contact import Contact
from pymysql.converters import encoders, decoders, convert_mysql_timestamp
class ORMFixture:
db = Database()
class ORMGroup(db.Entity):
_table_ = 'group_list'
id = PrimaryKey(int, column = 'group_id')
name = Optional(str, column = 'group_name')
header = Optional(str, column='group_header')
footer = Optional(str, column='group_footer')
contacts = Set(lambda: ORMFixture.ORMContact, table = "address_in_groups",column = "id",reverse = "groups", lazy=True)
class ORMContact(db.Entity):
_table_ = 'addressbook'
id = PrimaryKey(int, column = 'id')
firstname = Optional(str, column = 'firstname')
lastname = Optional(str, column='lastname')
deprecated = Optional(datetime, column='deprecated')
groups = Set(lambda: ORMFixture.ORMGroup, table="address_in_groups", column="group_id", reverse="contacts", lazy=True)
def __init__(self, host, name, user, password):
conv = encoders
conv.update(decoders)
conv[datetime] = convert_mysql_timestamp
#self.db.bind('mysql', host=host, database=name, user=user, password=password, conv = decoders)
self.db.bind('mysql', host=host, database=name, user=user, password=password, conv=conv)
self.db.generate_mapping()
sql_debug(True)
#из объекта ORMGroup строим объект Group
def convert_groups_to_model(self, groups):
def convert(group):
return Group(id=str(group.id), name = group.name, header = group.header, footer = group.footer)
return list(map(convert, groups))
@db_session #нужная пометка, указывает, что ф-я выполняется в рамках сессии
def get_group_list(self):
return self.convert_groups_to_model(list(select(g for g in ORMFixture.ORMGroup)))
def convert_contacts_to_model(self, contacts):
def convert(contact):
return Contact(id=str(contact.id), firstname = contact.firstname, lastname = contact.lastname)
return list(map(convert, contacts))
@db_session #нужная пометка, указывает, что ф-я выполняется в рамках сессии
def get_contact_list(self):
return self.convert_contacts_to_model(list(select(c for c in ORMFixture.ORMContact if c.deprecated is None)))
#return list(select(c for c in ORMFixture.ORMContact if c.deprecated is None))
@db_session # нужная пометка, указывает, что ф-я выполняется в рамках сессии
def get_contacts_in_group(self, group):
orm_group = list(select(g for g in ORMFixture.ORMGroup if g.id ==group.id))[0]
return self.convert_contacts_to_model(orm_group.contacts)
#return orm_group.contacts
@db_session # нужная пометка, указывает, что ф-я выполняется в рамках сессии
def get_contacts_not_in_group(self, group):
orm_group = list(select(g for g in ORMFixture.ORMGroup if g.id ==group.id))[0]
return self.convert_contacts_to_model(
select(c for c in ORMFixture.ORMContact if c.deprecated is None and orm_group not in c.groups))
| vspitsyn/python_taining | fixture/orm.py | Python | apache-2.0 | 3,388 |
# vim:tw=50
"""Tuples
You have already seen one kind of sequence: the
string. Strings are a sequence of one-character
strings - they're strings all the way down. They
are also **immutable**: once you have defined one,
it can never change.
Another immutable seqeunce type in Python is the
**tuple**. You define a tuple by separating values
by commas, thus:
10, 20, 30 # This is a 3-element tuple.
They are usually set apart with parentheses, e.g.,
|(10, 20, 30)|, though these are not always
required (the empty tuple |()|, however, does
require parentheses). It's usually best to just
use them.
Tuples, as is true of every other Python sequence,
support **indexing**, accessing a single element
with the |[]| notation:
print(my_tuple[10]) # Get element 10.
Exercises
- Create a one-element tuple and print it out,
e.g., |a = 4,| (the trailing comma is required).
- Try comparing two tuples to each other using
standard comparison operators, like |<| or |>=|.
How does the comparison work?
"""
# A basic tuple.
a = 1, 3, 'hey', 2
print(a)
# Usually you see them with parentheses:
b = (1, 3, 'hey', 2)
print(b)
print("b has", len(b), "elements")
# Indexing is easy:
print("first element", b[0])
print("third element", b[2])
# Even from the right side (the 'back'):
print("last element", b[-1])
print("penultimate", b[-2])
# Parentheses are always required for the empty
# tuple:
print("empty", ())
# And single-element tuples have to have a comma:
print("singleton", (5,)) # A tuple
print("not a tuple", (5)) # A number
# They are immutable, though: you can't change
# them.
b[1] = 'new value' # oops
| shiblon/pytour | 3/tutorials/tuples.py | Python | apache-2.0 | 1,633 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import socket
import time
from urllib.error import HTTPError, URLError
import jenkins
from jenkins import JenkinsException
from requests import Request
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.jenkins.hooks.jenkins import JenkinsHook
from airflow.utils.decorators import apply_defaults
def jenkins_request_with_headers(jenkins_server, req):
"""
We need to get the headers in addition to the body answer
to get the location from them
This function uses jenkins_request method from python-jenkins library
with just the return call changed
:param jenkins_server: The server to query
:param req: The request to execute
:return: Dict containing the response body (key body)
and the headers coming along (headers)
"""
try:
response = jenkins_server.jenkins_request(req)
response_body = response.content
response_headers = response.headers
if response_body is None:
raise jenkins.EmptyResponseException(
"Error communicating with server[%s]: "
"empty response" % jenkins_server.server)
return {'body': response_body.decode('utf-8'), 'headers': response_headers}
except HTTPError as e:
# Jenkins's funky authentication means its nigh impossible to distinguish errors.
if e.code in [401, 403, 500]:
raise JenkinsException(
'Error in request. Possibly authentication failed [%s]: %s' % (e.code, e.msg)
)
elif e.code == 404:
raise jenkins.NotFoundException('Requested item could not be found')
else:
raise
except socket.timeout as e:
raise jenkins.TimeoutException('Error in request: %s' % e)
except URLError as e:
raise JenkinsException('Error in request: %s' % e.reason)
class JenkinsJobTriggerOperator(BaseOperator):
"""
Trigger a Jenkins Job and monitor it's execution.
This operator depend on python-jenkins library,
version >= 0.4.15 to communicate with jenkins server.
You'll also need to configure a Jenkins connection in the connections screen.
:param jenkins_connection_id: The jenkins connection to use for this job
:type jenkins_connection_id: str
:param job_name: The name of the job to trigger
:type job_name: str
:param parameters: The parameters block to provide to jenkins. (templated)
:type parameters: str
:param sleep_time: How long will the operator sleep between each status
request for the job (min 1, default 10)
:type sleep_time: int
:param max_try_before_job_appears: The maximum number of requests to make
while waiting for the job to appears on jenkins server (default 10)
:type max_try_before_job_appears: int
"""
template_fields = ('parameters',)
template_ext = ('.json',)
ui_color = '#f9ec86'
@apply_defaults
def __init__(self,
jenkins_connection_id,
job_name,
parameters="",
sleep_time=10,
max_try_before_job_appears=10,
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.job_name = job_name
self.parameters = parameters
if sleep_time < 1:
sleep_time = 1
self.sleep_time = sleep_time
self.jenkins_connection_id = jenkins_connection_id
self.max_try_before_job_appears = max_try_before_job_appears
def build_job(self, jenkins_server):
"""
This function makes an API call to Jenkins to trigger a build for 'job_name'
It returned a dict with 2 keys : body and headers.
headers contains also a dict-like object which can be queried to get
the location to poll in the queue.
:param jenkins_server: The jenkins server where the job should be triggered
:return: Dict containing the response body (key body)
and the headers coming along (headers)
"""
# Warning if the parameter is too long, the URL can be longer than
# the maximum allowed size
if self.parameters and isinstance(self.parameters, str):
import ast
self.parameters = ast.literal_eval(self.parameters)
if not self.parameters:
# We need a None to call the non parametrized jenkins api end point
self.parameters = None
request = Request(
method='POST',
url=jenkins_server.build_job_url(self.job_name, self.parameters, None))
return jenkins_request_with_headers(jenkins_server, request)
def poll_job_in_queue(self, location, jenkins_server):
"""
This method poll the jenkins queue until the job is executed.
When we trigger a job through an API call,
the job is first put in the queue without having a build number assigned.
Thus we have to wait the job exit the queue to know its build number.
To do so, we have to add /api/json (or /api/xml) to the location
returned by the build_job call and poll this file.
When a 'executable' block appears in the json, it means the job execution started
and the field 'number' then contains the build number.
:param location: Location to poll, returned in the header of the build_job call
:param jenkins_server: The jenkins server to poll
:return: The build_number corresponding to the triggered job
"""
try_count = 0
location = location + '/api/json'
# TODO Use get_queue_info instead
# once it will be available in python-jenkins (v > 0.4.15)
self.log.info('Polling jenkins queue at the url %s', location)
while try_count < self.max_try_before_job_appears:
location_answer = jenkins_request_with_headers(
jenkins_server, Request(method='POST', url=location))
if location_answer is not None:
json_response = json.loads(location_answer['body'])
if 'executable' in json_response:
build_number = json_response['executable']['number']
self.log.info('Job executed on Jenkins side with the build number %s',
build_number)
return build_number
try_count += 1
time.sleep(self.sleep_time)
raise AirflowException("The job hasn't been executed after polling "
f"the queue {self.max_try_before_job_appears} times")
def get_hook(self):
"""
Instantiate jenkins hook
"""
return JenkinsHook(self.jenkins_connection_id)
def execute(self, context):
if not self.jenkins_connection_id:
self.log.error(
'Please specify the jenkins connection id to use.'
'You must create a Jenkins connection before'
' being able to use this operator')
raise AirflowException('The jenkins_connection_id parameter is missing,'
'impossible to trigger the job')
if not self.job_name:
self.log.error("Please specify the job name to use in the job_name parameter")
raise AirflowException('The job_name parameter is missing,'
'impossible to trigger the job')
self.log.info(
'Triggering the job %s on the jenkins : %s with the parameters : %s',
self.job_name, self.jenkins_connection_id, self.parameters)
jenkins_server = self.get_hook().get_jenkins_server()
jenkins_response = self.build_job(jenkins_server)
build_number = self.poll_job_in_queue(
jenkins_response['headers']['Location'], jenkins_server)
time.sleep(self.sleep_time)
keep_polling_job = True
build_info = None
# pylint: disable=too-many-nested-blocks
while keep_polling_job:
try:
build_info = jenkins_server.get_build_info(name=self.job_name,
number=build_number)
if build_info['result'] is not None:
keep_polling_job = False
# Check if job had errors.
if build_info['result'] != 'SUCCESS':
raise AirflowException(
'Jenkins job failed, final state : %s.'
'Find more information on job url : %s'
% (build_info['result'], build_info['url']))
else:
self.log.info('Waiting for job to complete : %s , build %s',
self.job_name, build_number)
time.sleep(self.sleep_time)
except jenkins.NotFoundException as err:
# pylint: disable=no-member
raise AirflowException(
'Jenkins job status check failed. Final error was: '
f'{err.resp.status}')
except jenkins.JenkinsException as err:
raise AirflowException(
f'Jenkins call failed with error : {err}, if you have parameters '
'double check them, jenkins sends back '
'this exception for unknown parameters'
'You can also check logs for more details on this exception '
'(jenkins_url/log/rss)')
if build_info:
# If we can we return the url of the job
# for later use (like retrieving an artifact)
return build_info['url']
| wooga/airflow | airflow/providers/jenkins/operators/jenkins_job_trigger.py | Python | apache-2.0 | 10,560 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'sortdialog.ui'
#
# Created: Sat Apr 18 15:24:45 2015
# by: pyside-uic 0.2.15 running on PySide 1.2.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_SortDialog(object):
def setupUi(self, SortDialog):
SortDialog.setObjectName("SortDialog")
SortDialog.resize(317, 276)
self.gridLayout_4 = QtGui.QGridLayout(SortDialog)
self.gridLayout_4.setObjectName("gridLayout_4")
self.primaryGroupBox = QtGui.QGroupBox(SortDialog)
self.primaryGroupBox.setObjectName("primaryGroupBox")
self.gridLayout = QtGui.QGridLayout(self.primaryGroupBox)
self.gridLayout.setObjectName("gridLayout")
self.label = QtGui.QLabel(self.primaryGroupBox)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.primaryColumnCombo = QtGui.QComboBox(self.primaryGroupBox)
self.primaryColumnCombo.setObjectName("primaryColumnCombo")
self.primaryColumnCombo.addItem("")
self.gridLayout.addWidget(self.primaryColumnCombo, 0, 1, 1, 1)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem, 0, 2, 1, 1)
self.label_2 = QtGui.QLabel(self.primaryGroupBox)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 1, 0, 1, 1)
self.primaryOrderCombo = QtGui.QComboBox(self.primaryGroupBox)
self.primaryOrderCombo.setObjectName("primaryOrderCombo")
self.primaryOrderCombo.addItem("")
self.primaryOrderCombo.addItem("")
self.gridLayout.addWidget(self.primaryOrderCombo, 1, 1, 1, 2)
self.gridLayout_4.addWidget(self.primaryGroupBox, 0, 0, 1, 1)
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.okButton = QtGui.QPushButton(SortDialog)
self.okButton.setDefault(True)
self.okButton.setObjectName("okButton")
self.verticalLayout.addWidget(self.okButton)
self.cancelButton = QtGui.QPushButton(SortDialog)
self.cancelButton.setObjectName("cancelButton")
self.verticalLayout.addWidget(self.cancelButton)
spacerItem1 = QtGui.QSpacerItem(20, 0, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem1)
self.moreButton = QtGui.QPushButton(SortDialog)
self.moreButton.setCheckable(True)
self.moreButton.setObjectName("moreButton")
self.verticalLayout.addWidget(self.moreButton)
self.gridLayout_4.addLayout(self.verticalLayout, 0, 1, 2, 1)
spacerItem2 = QtGui.QSpacerItem(20, 0, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_4.addItem(spacerItem2, 1, 0, 1, 1)
self.secondaryGroupBox = QtGui.QGroupBox(SortDialog)
self.secondaryGroupBox.setObjectName("secondaryGroupBox")
self.gridLayout_2 = QtGui.QGridLayout(self.secondaryGroupBox)
self.gridLayout_2.setObjectName("gridLayout_2")
self.label_3 = QtGui.QLabel(self.secondaryGroupBox)
self.label_3.setObjectName("label_3")
self.gridLayout_2.addWidget(self.label_3, 0, 0, 1, 1)
self.secondaryColumnCombo = QtGui.QComboBox(self.secondaryGroupBox)
self.secondaryColumnCombo.setObjectName("secondaryColumnCombo")
self.secondaryColumnCombo.addItem("")
self.gridLayout_2.addWidget(self.secondaryColumnCombo, 0, 1, 1, 1)
spacerItem3 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem3, 0, 2, 1, 1)
self.label_4 = QtGui.QLabel(self.secondaryGroupBox)
self.label_4.setObjectName("label_4")
self.gridLayout_2.addWidget(self.label_4, 1, 0, 1, 1)
self.secondaryOrderCombo = QtGui.QComboBox(self.secondaryGroupBox)
self.secondaryOrderCombo.setObjectName("secondaryOrderCombo")
self.secondaryOrderCombo.addItem("")
self.secondaryOrderCombo.addItem("")
self.gridLayout_2.addWidget(self.secondaryOrderCombo, 1, 1, 1, 2)
self.gridLayout_4.addWidget(self.secondaryGroupBox, 2, 0, 1, 1)
self.tertiaryGroupBox = QtGui.QGroupBox(SortDialog)
self.tertiaryGroupBox.setObjectName("tertiaryGroupBox")
self.gridLayout_3 = QtGui.QGridLayout(self.tertiaryGroupBox)
self.gridLayout_3.setObjectName("gridLayout_3")
self.label_5 = QtGui.QLabel(self.tertiaryGroupBox)
self.label_5.setObjectName("label_5")
self.gridLayout_3.addWidget(self.label_5, 0, 0, 1, 1)
self.tertiaryColumnCombo = QtGui.QComboBox(self.tertiaryGroupBox)
self.tertiaryColumnCombo.setObjectName("tertiaryColumnCombo")
self.tertiaryColumnCombo.addItem("")
self.gridLayout_3.addWidget(self.tertiaryColumnCombo, 0, 1, 1, 1)
spacerItem4 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_3.addItem(spacerItem4, 0, 2, 1, 1)
self.label_6 = QtGui.QLabel(self.tertiaryGroupBox)
self.label_6.setObjectName("label_6")
self.gridLayout_3.addWidget(self.label_6, 1, 0, 1, 1)
self.tertiaryOrderCombo = QtGui.QComboBox(self.tertiaryGroupBox)
self.tertiaryOrderCombo.setObjectName("tertiaryOrderCombo")
self.tertiaryOrderCombo.addItem("")
self.tertiaryOrderCombo.addItem("")
self.gridLayout_3.addWidget(self.tertiaryOrderCombo, 1, 1, 1, 2)
self.gridLayout_4.addWidget(self.tertiaryGroupBox, 3, 0, 1, 1)
self.retranslateUi(SortDialog)
QtCore.QObject.connect(self.okButton, QtCore.SIGNAL("clicked()"), SortDialog.accept)
QtCore.QObject.connect(self.cancelButton, QtCore.SIGNAL("clicked()"), SortDialog.reject)
QtCore.QObject.connect(self.moreButton, QtCore.SIGNAL("toggled(bool)"), self.secondaryGroupBox.setVisible)
QtCore.QObject.connect(self.moreButton, QtCore.SIGNAL("toggled(bool)"), self.tertiaryGroupBox.setVisible)
QtCore.QMetaObject.connectSlotsByName(SortDialog)
SortDialog.setTabOrder(self.primaryColumnCombo, self.primaryOrderCombo)
SortDialog.setTabOrder(self.primaryOrderCombo, self.secondaryColumnCombo)
SortDialog.setTabOrder(self.secondaryColumnCombo, self.secondaryOrderCombo)
SortDialog.setTabOrder(self.secondaryOrderCombo, self.tertiaryColumnCombo)
SortDialog.setTabOrder(self.tertiaryColumnCombo, self.tertiaryOrderCombo)
SortDialog.setTabOrder(self.tertiaryOrderCombo, self.okButton)
SortDialog.setTabOrder(self.okButton, self.cancelButton)
SortDialog.setTabOrder(self.cancelButton, self.moreButton)
def retranslateUi(self, SortDialog):
SortDialog.setWindowTitle(QtGui.QApplication.translate("SortDialog", "Sort", None, QtGui.QApplication.UnicodeUTF8))
self.primaryGroupBox.setTitle(QtGui.QApplication.translate("SortDialog", "&Primary Key", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("SortDialog", "Column:", None, QtGui.QApplication.UnicodeUTF8))
self.primaryColumnCombo.setItemText(0, QtGui.QApplication.translate("SortDialog", "None", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("SortDialog", "Order:", None, QtGui.QApplication.UnicodeUTF8))
self.primaryOrderCombo.setItemText(0, QtGui.QApplication.translate("SortDialog", "Ascending", None, QtGui.QApplication.UnicodeUTF8))
self.primaryOrderCombo.setItemText(1, QtGui.QApplication.translate("SortDialog", "Descending", None, QtGui.QApplication.UnicodeUTF8))
self.okButton.setText(QtGui.QApplication.translate("SortDialog", "OK", None, QtGui.QApplication.UnicodeUTF8))
self.cancelButton.setText(QtGui.QApplication.translate("SortDialog", "Cancel", None, QtGui.QApplication.UnicodeUTF8))
self.moreButton.setText(QtGui.QApplication.translate("SortDialog", "&Advenced >>>", None, QtGui.QApplication.UnicodeUTF8))
self.secondaryGroupBox.setTitle(QtGui.QApplication.translate("SortDialog", "&Secondary Key", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("SortDialog", "Column:", None, QtGui.QApplication.UnicodeUTF8))
self.secondaryColumnCombo.setItemText(0, QtGui.QApplication.translate("SortDialog", "None", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("SortDialog", "Order:", None, QtGui.QApplication.UnicodeUTF8))
self.secondaryOrderCombo.setItemText(0, QtGui.QApplication.translate("SortDialog", "Ascending", None, QtGui.QApplication.UnicodeUTF8))
self.secondaryOrderCombo.setItemText(1, QtGui.QApplication.translate("SortDialog", "Descending", None, QtGui.QApplication.UnicodeUTF8))
self.tertiaryGroupBox.setTitle(QtGui.QApplication.translate("SortDialog", "&Tertiary Key", None, QtGui.QApplication.UnicodeUTF8))
self.label_5.setText(QtGui.QApplication.translate("SortDialog", "Column:", None, QtGui.QApplication.UnicodeUTF8))
self.tertiaryColumnCombo.setItemText(0, QtGui.QApplication.translate("SortDialog", "None", None, QtGui.QApplication.UnicodeUTF8))
self.label_6.setText(QtGui.QApplication.translate("SortDialog", "Order:", None, QtGui.QApplication.UnicodeUTF8))
self.tertiaryOrderCombo.setItemText(0, QtGui.QApplication.translate("SortDialog", "Ascending", None, QtGui.QApplication.UnicodeUTF8))
self.tertiaryOrderCombo.setItemText(1, QtGui.QApplication.translate("SortDialog", "Descending", None, QtGui.QApplication.UnicodeUTF8))
| piexona/pyside-examples | sortdialog.py | Python | apache-2.0 | 9,805 |
import os
import jinja2
import webapp2
import logging
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
from flask import Flask
app = Flask(__name__)
app.config['DEBUG'] = True
# Note: We don't need to call run() since our application is embedded within
# the App Engine WSGI application server.
@app.route('/')
def hello():
template = JINJA_ENVIRONMENT.get_template('templates/index.html')
return template.render()
@app.errorhandler(404)
def page_not_found(e):
"""Return a custom 404 error."""
return 'Sorry, nothing at this URL.', 404
| rchlchang/byte1 | main.py | Python | apache-2.0 | 682 |
import numpy as np
from .VariableUnitTest import VariableUnitTest
from gwlfe.MultiUse_Fxns.Runoff import CNum
class TestCNum(VariableUnitTest):
def test_elementwise_CNum(self):
z = self.z
np.testing.assert_array_almost_equal(
np.load(self.basepath + "/CNum.npy"),
CNum.CNum(z.NYrs, z.DaysMonth, z.Temp, z.Prec, z.InitSnow_0, z.AntMoist_0, z.CN, z.NRur, z.NUrb,
z.Grow_0), decimal=7)
def test_CNum(self):
z = self.z
np.testing.assert_array_almost_equal(
CNum.CNum(z.NYrs, z.DaysMonth, z.Temp, z.Prec, z.InitSnow_0, z.AntMoist_0, z.CN, z.NRur, z.NUrb, z.Grow_0),
CNum.CNum_f(z.NYrs, z.DaysMonth, z.Temp, z.Prec, z.InitSnow_0, z.AntMoist_0, z.CN, z.NRur, z.NUrb,
z.Grow_0), decimal=7)
| WikiWatershed/gwlf-e | test/unittests/test_CNum.py | Python | apache-2.0 | 821 |
#
# This file is execfile()d with the current directory set to its containing dir
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
try:
import sphinx_rtd_theme
except ImportError:
sphinx_rtd_theme = None
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
import nacl # flake8: noqa
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = "1.0"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named "sphinx.ext.*") or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = "utf-8-sig"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "PyNaCl"
copyright = "2013, Donald Stufft and Individual Contributors"
# The version info for the project you"re documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "".join(nacl.__version__.split(".")[:2])
# The full version, including alpha/beta/rc tags.
release = nacl.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ""
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = "%B %d, %Y"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents
# default_role = None
# If true, "()" will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if sphinx_rtd_theme:
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
else:
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not "", a "Last updated on:" timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = "%b %d, %Y"
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ""
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "PyNaCldoc"
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"http://docs.python.org/": None}
# Retry requests in the linkcheck builder so that we're resilient against
# transient network errors.
linkcheck_retries = 10
| pyca/pynacl | docs/conf.py | Python | apache-2.0 | 6,159 |
#!/usr/bin/env python
# Copyright 2016 Criteo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from biggraphite import test_utils as bg_test_utils # noqa
bg_test_utils.prepare_graphite_imports() # noqa
import unittest
from carbon import conf as carbon_conf
from carbon import exceptions as carbon_exceptions
from biggraphite.plugins import carbon as bg_carbon
_TEST_METRIC = "mytestmetric"
class TestCarbonDatabase(bg_test_utils.TestCaseWithFakeAccessor):
def setUp(self):
super(TestCarbonDatabase, self).setUp()
self.fake_drivers()
settings = carbon_conf.Settings()
settings["BG_CONTACT_POINTS"] = "host1,host2"
settings["BG_KEYSPACE"] = self.KEYSPACE
settings["STORAGE_DIR"] = self.tempdir
self._plugin = bg_carbon.BigGraphiteDatabase(settings)
self._plugin.create(
_TEST_METRIC,
retentions=[(1, 60)],
xfilesfactor=0.5,
aggregation_method="sum",
)
def test_empty_settings(self):
self.assertRaises(carbon_exceptions.CarbonConfigException,
bg_carbon.BigGraphiteDatabase, carbon_conf.Settings())
def test_get_fs_path(self):
path = self._plugin.getFilesystemPath(_TEST_METRIC)
self.assertTrue(path.startswith("//biggraphite/"))
self.assertIn(_TEST_METRIC, path)
def test_create_get(self):
other_metric = _TEST_METRIC + "-other"
self._plugin.create(
other_metric,
retentions=[(1, 60)],
xfilesfactor=0.5,
aggregation_method="avg",
)
self.assertTrue(self._plugin.exists(other_metric))
self.assertEqual("avg", self._plugin.getMetadata(other_metric, "aggregationMethod"))
def test_nosuchmetric(self):
other_metric = _TEST_METRIC + "-nosuchmetric"
self.assertRaises(
ValueError,
self._plugin.setMetadata, other_metric, "aggregationMethod", "avg")
self.assertRaises(
ValueError,
self._plugin.getMetadata, other_metric, "aggregationMethod")
def test_set(self):
# Setting the same value should work
self._plugin.setMetadata(_TEST_METRIC, "aggregationMethod", "sum")
# Setting a different value should fail
self.assertRaises(
ValueError,
self._plugin.setMetadata, _TEST_METRIC, "aggregationMethod", "avg")
def test_write(self):
metric = bg_test_utils.make_metric(_TEST_METRIC)
points = [(1, 42)]
self.accessor.create_metric(metric)
# Writing twice (the first write is sync and the next one isn't)
self._plugin.write(metric.name, points)
self._plugin.write(metric.name, points)
actual_points = self.accessor.fetch_points(metric, 1, 2, stage=metric.retention[0])
self.assertEqual(points, list(actual_points))
if __name__ == "__main__":
unittest.main()
| unbrice/biggraphite | tests/test_carbon.py | Python | apache-2.0 | 3,483 |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Orders import *
from QuantConnect.Algorithm import *
from QuantConnect.Algorithm.Framework import *
from QuantConnect.Algorithm.Framework.Alphas import *
from QuantConnect.Algorithm.Framework.Selection import *
from Alphas.RsiAlphaModel import RsiAlphaModel
from Alphas.EmaCrossAlphaModel import EmaCrossAlphaModel
from Portfolio.EqualWeightingPortfolioConstructionModel import EqualWeightingPortfolioConstructionModel
from Execution.ImmediateExecutionModel import ImmediateExecutionModel
from Risk.NullRiskManagementModel import NullRiskManagementModel
from datetime import timedelta
import numpy as np
### <summary>
### Show cases how to use the CompositeAlphaModel to define.
### </summary>
class CompositeAlphaModelFrameworkAlgorithm(QCAlgorithmFramework):
'''Show cases how to use the CompositeAlphaModel to define.'''
def Initialize(self):
self.SetStartDate(2013,10,7) #Set Start Date
self.SetEndDate(2013,10,11) #Set End Date
self.SetCash(100000) #Set Strategy Cash
# even though we're using a framework algorithm, we can still add our securities
# using the AddEquity/Forex/Crypto/ect methods and then pass them into a manual
# universe selection model using Securities.Keys
self.AddEquity("SPY")
self.AddEquity("IBM")
self.AddEquity("BAC")
self.AddEquity("AIG")
# define a manual universe of all the securities we manually registered
self.SetUniverseSelection(ManualUniverseSelectionModel(self.Securities.Keys))
# define alpha model as a composite of the rsi and ema cross models
self.SetAlpha(CompositeAlphaModel(RsiAlphaModel(), EmaCrossAlphaModel()))
# default models for the rest
self.SetPortfolioConstruction(EqualWeightingPortfolioConstructionModel())
self.SetExecution(ImmediateExecutionModel())
self.SetRiskManagement(NullRiskManagementModel()) | AnshulYADAV007/Lean | Algorithm.Python/CompositeAlphaModelFrameworkAlgorithm.py | Python | apache-2.0 | 2,828 |
import re
import logging
from cattle import utils
from cattle.lock import lock
from cattle.utils import JsonObject
log = logging.getLogger("agent")
class BaseHandler(object):
def __init__(self):
pass
def events(self):
ret = []
for i in utils.events_from_methods(self):
ret.append(".".join([self._get_handler_category(None), i]))
return ret
def supports(self, req):
method = self._get_method_for(req)
if method is None:
return False
return self._check_supports(req)
def execute(self, req):
method = self._get_method_for(req)
if method is None:
return None
else:
return method(req=req, **req.data.__dict__)
def _get_method_for(self, req):
prefix = ''
category = self._get_handler_category(req)
if len(category) > 0:
prefix = category + '.'
if len(req.name) <= len(prefix):
return None
name = req.name[len(prefix):].replace('.', '_')
idx = name.find(';')
if idx != -1:
name = name[0:idx]
try:
return getattr(self, name)
except:
return None
def _reply(self, req, response_data):
if req is None:
return None
resp = utils.reply(req)
resp.data = JsonObject(response_data)
return resp
def _do(self, req=None, check=None, result=None, lock_obj=None,
action=None, post_check=True):
if check():
return self._reply(req, result())
with lock(lock_obj):
if check():
return self._reply(req, result())
action()
data = result()
if post_check and not check():
raise Exception("Operation failed")
return self._reply(req, data)
def _get_response_data(self, req, obj):
resource_type = req.get("resourceType")
type = obj.get("type")
if type is not None:
inner_name = re.sub("([A-Z])", r'_\1', type)
method_name = "_get_{0}_data".format(inner_name).lower()
method = None
try:
method = getattr(self, method_name)
except AttributeError:
pass
if method is not None:
return {resource_type: method(obj)}
return {}
def _check_supports(self, req):
raise Exception("Not implemented")
def _get_handler_category(self, req):
return ''
class KindBasedMixin(object):
CHECK_PATHS = [
["imageStoragePoolMap", "storagePool", "kind"],
["volumeStoragePoolMap", "storagePool", "kind"],
["snapshotStoragePoolMap", "storagePool", "kind"],
["instanceHostMap", "host", "kind"],
["instanceForceStop", "kind"],
["instanceInspect", "kind"],
["instancePull", "kind"]
]
def __init__(self, kind=None):
super(KindBasedMixin, self).__init__()
self._kind = kind
def _check_supports(self, req):
for check in KindBasedMixin.CHECK_PATHS:
val = req.data
try:
for part in check:
val = val[part]
if val == self._kind:
return True
except KeyError:
pass
return False
| wlan0/python-agent | cattle/agent/handler.py | Python | apache-2.0 | 3,407 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_settings")
from django.core.management import execute_from_command_line
is_testing = 'test' in sys.argv
if is_testing:
import coverage
cov = coverage.coverage(source=['easy_timezones'], omit=['*tests.py'])
cov.erase()
cov.start()
execute_from_command_line(sys.argv)
if is_testing:
cov.stop()
cov.save()
cov.report()
| Miserlou/django-easy-timezones | manage.py | Python | apache-2.0 | 526 |
"""Adding metric warning_text
Revision ID: 19a814813610
Revises: ca69c70ec99b
Create Date: 2017-09-15 15:09:40.495345
"""
# revision identifiers, used by Alembic.
revision = '19a814813610'
down_revision = 'ca69c70ec99b'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('metrics', sa.Column('warning_text', sa.Text(), nullable=True))
op.add_column('sql_metrics', sa.Column('warning_text', sa.Text(), nullable=True))
def downgrade():
with op.batch_alter_table('sql_metrics') as batch_op_sql_metrics:
batch_op_sql_metrics.drop_column('warning_text')
with op.batch_alter_table('metrics') as batch_op_metrics:
batch_op_metrics.drop_column('warning_text')
| alanmcruickshank/superset-dev | superset/migrations/versions/19a814813610_adding_metric_warning_text.py | Python | apache-2.0 | 717 |
import tensorflow as tf
import numpy as np
from . import base_model
class Qnetwork(base_model.BaseModel):
"""
Args:
name (string): label for model namespace
path (string): path to save/load model
input_shape (tuple): tuple of inputs to network.
output_shape (int): number of output nodes for network.
filter_sizes (tuple of ints): number of filters in each of the two hidden layers. Defaults to (512,512).
learning_rate (float): network's willingness to change current weights given new example
regularization (float): strength of weights regularization term in loss function
discount_factor (float): factor by which future reward after next action is taken are discounted
tau (float): Hyperparameter used in updating target network (if used)
Some notable values:
tau = 1.e-3 -> used in original paper
tau = 0.5 -> average DDQN
tau = 1.0 -> copy online -> target
A Q-network class which is responsible for holding and updating the weights and biases used in predicing Q-values for a given state. This Q-network will consist of
the following layers:
1) Input- a DraftState state s (an array of bool) representing the current state reshaped into an [n_batch, *input_shape] tensor.
2) Two layers of relu-activated hidden fc layers with dropout
3) Output- linearly activated estimations for Q-values Q(s,a) for each of the output_shape actions a available.
"""
@property
def name(self):
return self._name
@property
def discount_factor(self):
return self._discount_factor
def __init__(self, name, path, input_shape, output_shape, filter_sizes=(512,512), learning_rate=1.e-5, regularization_coeff=1.e-4, discount_factor=0.9, tau=1.0):
super().__init__(name=name, path=path)
self._input_shape = input_shape
self._output_shape = output_shape
self._filter_sizes = filter_sizes
self._learning_rate = learning_rate
self._regularization_coeff = regularization_coeff
self._discount_factor = discount_factor
self._n_hidden_layers = len(filter_sizes)
self._n_layers = self._n_hidden_layers + 2
self._tau = tau
self.online_name = "online"
self.target_name = "target"
# Build base Q-network model
self.online_ops = self.build_model(name = self.online_name)
# If using a target network for DDQN network, add related ops to model
if(self.target_name):
self.target_ops = self.build_model(name = self.target_name)
self.target_ops["target_init"] = self.create_target_initialization_ops(self.target_name, self.online_name)
self.target_ops["target_update"] = self.create_target_update_ops(self.target_name, self.online_name, tau=self._tau)
with self._graph.as_default():
self.online_ops["init"] = tf.global_variables_initializer()
self.init_saver()
def init_saver(self):
with self._graph.as_default():
self.saver = tf.train.Saver()
def save(self, path):
self.saver.save(self.sess, save_path=path)
def load(self, path):
self.saver.restore(self.sess, save_path=path)
def build_model(self, name):
ops_dict = {}
with self._graph.as_default():
with tf.variable_scope(name):
ops_dict["learning_rate"] = tf.Variable(self._learning_rate, trainable=False, name="learning_rate")
# Incoming state matrices are of size input_size = (nChampions, nPos+2)
# 'None' here means the input tensor will flex with the number of training
# examples (aka batch size).
ops_dict["input"] = tf.placeholder(tf.float32, (None,)+self._input_shape, name="inputs")
ops_dict["dropout_keep_prob"] = tf.placeholder_with_default(1.0,shape=())
# Fully connected (FC) layers:
fc0 = tf.layers.dense(
ops_dict["input"],
self._filter_sizes[0],
activation=tf.nn.relu,
bias_initializer=tf.constant_initializer(0.1),
name="fc_0")
dropout0 = tf.nn.dropout(fc0, ops_dict["dropout_keep_prob"])
fc1 = tf.layers.dense(
dropout0,
self._filter_sizes[1],
activation=tf.nn.relu,
bias_initializer=tf.constant_initializer(0.1),
name="fc_1")
dropout1 = tf.nn.dropout(fc1, ops_dict["dropout_keep_prob"])
# FC output layer
ops_dict["outQ"] = tf.layers.dense(
dropout1,
self._output_shape,
activation=None,
bias_initializer=tf.constant_initializer(0.1),
kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=self._regularization_coeff),
name="q_vals")
# Placeholder for valid actions filter
ops_dict["valid_actions"] = tf.placeholder(tf.bool, shape=ops_dict["outQ"].shape, name="valid_actions")
# Filtered Q-values
ops_dict["valid_outQ"] = tf.where(ops_dict["valid_actions"], ops_dict["outQ"], tf.scalar_mul(-np.inf,tf.ones_like(ops_dict["outQ"])), name="valid_q_vals")
# Max Q value amongst valid actions
ops_dict["max_Q"] = tf.reduce_max(ops_dict["valid_outQ"], axis=1, name="max_Q")
# Predicted optimal action amongst valid actions
ops_dict["prediction"] = tf.argmax(ops_dict["valid_outQ"], axis=1, name="prediction")
# Loss function and optimization:
# The inputs self.target and self.actions are indexed by training example. If
# s[i] = starting state for ith training example (recall that input state s is described by a vector so this will be a matrix)
# a*[i] = action taken from state s[i] during this training sample
# Q*(s[i],a*[i]) = the actual value observed from taking action a*[i] from state s[i]
# outQ[i,-] = estimated values for all actions from state s[i]
# Then we can write the inputs as
# self.target[i] = Q*(s[i],a*[i])
# self.actions[i] = a*[i]
ops_dict["target"] = tf.placeholder(tf.float32, shape=[None], name="target_Q")
ops_dict["actions"] = tf.placeholder(tf.int32, shape=[None], name="submitted_action")
# Since the Qnet outputs a vector Q(s,-) of predicted values for every possible action that can be taken from state s,
# we need to connect each target value with the appropriate predicted Q(s,a*) = Qout[i,a*[i]].
# Main idea is to get indexes into the outQ tensor based on input actions and gather the resulting Q values
# For some reason this isn't easy for tensorflow to do. So we must manually form the list of
# [i, actions[i]] index pairs for outQ..
# n_batch = outQ.shape[0] = actions.shape[0]
# n_actions = outQ.shape[1]
ind = tf.stack([tf.range(tf.shape(ops_dict["actions"])[0]),ops_dict["actions"]],axis=1)
# and then "gather" them.
estimatedQ = tf.gather_nd(ops_dict["outQ"], ind)
# Special notes: this is more efficient than indexing into the flattened version of outQ (which I have seen before)
# because the gather operation is applied to outQ directly. Apparently this propagates the gradient more efficiently
# under specific sparsity conditions (which tf.Variables like outQ satisfy)
# Simple sum-of-squares loss (error) function. Note that biases do not
# need to be regularized since they are (generally) not subject to overfitting.
ops_dict["loss"] = tf.reduce_mean(0.5*tf.square(ops_dict["target"]-estimatedQ), name="loss")
ops_dict["trainer"] = tf.train.AdamOptimizer(learning_rate = ops_dict["learning_rate"])
ops_dict["update"] = ops_dict["trainer"].minimize(ops_dict["loss"], name="update")
return ops_dict
def create_target_update_ops(self, target_scope, online_scope, tau=1e-3, name="target_update"):
"""
Adds operations to graph which are used to update the target network after after a training batch is sent
through the online network.
This function should be executed only once before training begins. The resulting operations should
be run within a tf.Session() once per training batch.
In double-Q network learning, the online (primary) network is updated using traditional backpropegation techniques
with target values produced by the target-Q network.
To improve stability, the target-Q is updated using a linear combination of its current weights
with the current weights of the online network:
Q_target = tau*Q_online + (1-tau)*Q_target
Typical tau values are small (tau ~ 1e-3). For more, see https://arxiv.org/abs/1509.06461 and https://arxiv.org/pdf/1509.02971.pdf.
Args:
target_scope (str): name of scope that target network occupies
online_scope (str): name of scope that online network occupies
tau (float32): Hyperparameter for combining target-Q and online-Q networks
name (str): name of operation which updates the target network when run within a session
Returns: Tensorflow operation which updates the target nework when run.
"""
with self._graph.as_default():
target_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=target_scope)
online_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=online_scope)
ops = [target_params[i].assign(tf.add(tf.multiply(tau,online_params[i]),tf.multiply(1.-tau,target_params[i]))) for i in range(len(target_params))]
return tf.group(*ops,name=name)
def create_target_initialization_ops(self, target_scope, online_scope):
"""
This adds operations to the graph in order to initialize the target Q network to the same values as the
online network.
This function should be executed only once just after the online network has been initialized.
Args:
target_scope (str): name of scope that target network occupies
online_scope (str): name of scope that online network occupies
Returns:
Tensorflow operation (named "target_init") which initialize the target nework when run.
"""
return self.create_target_update_ops(target_scope, online_scope, tau=1.0, name="target_init")
| lightd22/smartDraft | src/models/qNetwork.py | Python | apache-2.0 | 10,997 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import sys
import fixtures as fx
import mock
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from oslo_utils import uuidutils
import sqlalchemy
import testtools
from nova.compute import rpcapi as compute_rpcapi
from nova import conductor
from nova import context
from nova.db.sqlalchemy import api as session
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova.objects import service as service_obj
from nova import test
from nova.tests import fixtures
from nova.tests.unit import conf_fixture
from nova.tests.unit import policy_fixture
from nova import utils
CONF = cfg.CONF
class TestConfFixture(testtools.TestCase):
"""Test the Conf fixtures in Nova.
This is a basic test that this fixture works like we expect.
Expectations:
1. before using the fixture, a default value (api_paste_config)
comes through untouched.
2. before using the fixture, a known default value that we
override is correct.
3. after using the fixture a known value that we override is the
new value.
4. after using the fixture we can set a default value to something
random, and it will be reset once we are done.
There are 2 copies of this test so that you can verify they do the
right thing with:
tox -e py27 test_fixtures -- --concurrency=1
As regardless of run order, their initial asserts would be
impacted if the reset behavior isn't working correctly.
"""
def _test_override(self):
self.assertEqual('api-paste.ini', CONF.wsgi.api_paste_config)
self.assertFalse(CONF.fake_network)
self.useFixture(conf_fixture.ConfFixture())
CONF.set_default('api_paste_config', 'foo', group='wsgi')
self.assertTrue(CONF.fake_network)
def test_override1(self):
self._test_override()
def test_override2(self):
self._test_override()
class TestOutputStream(testtools.TestCase):
"""Ensure Output Stream capture works as expected.
This has the added benefit of providing a code example of how you
can manipulate the output stream in your own tests.
"""
def test_output(self):
self.useFixture(fx.EnvironmentVariable('OS_STDOUT_CAPTURE', '1'))
self.useFixture(fx.EnvironmentVariable('OS_STDERR_CAPTURE', '1'))
out = self.useFixture(fixtures.OutputStreamCapture())
sys.stdout.write("foo")
sys.stderr.write("bar")
self.assertEqual("foo", out.stdout)
self.assertEqual("bar", out.stderr)
# TODO(sdague): nuke the out and err buffers so it doesn't
# make it to testr
class TestLogging(testtools.TestCase):
def test_default_logging(self):
stdlog = self.useFixture(fixtures.StandardLogging())
root = logging.getLogger()
# there should be a null handler as well at DEBUG
self.assertEqual(2, len(root.handlers), root.handlers)
log = logging.getLogger(__name__)
log.info("at info")
log.debug("at debug")
self.assertIn("at info", stdlog.logger.output)
self.assertNotIn("at debug", stdlog.logger.output)
# broken debug messages should still explode, even though we
# aren't logging them in the regular handler
self.assertRaises(TypeError, log.debug, "this is broken %s %s", "foo")
# and, ensure that one of the terrible log messages isn't
# output at info
warn_log = logging.getLogger('migrate.versioning.api')
warn_log.info("warn_log at info, should be skipped")
warn_log.error("warn_log at error")
self.assertIn("warn_log at error", stdlog.logger.output)
self.assertNotIn("warn_log at info", stdlog.logger.output)
def test_debug_logging(self):
self.useFixture(fx.EnvironmentVariable('OS_DEBUG', '1'))
stdlog = self.useFixture(fixtures.StandardLogging())
root = logging.getLogger()
# there should no longer be a null handler
self.assertEqual(1, len(root.handlers), root.handlers)
log = logging.getLogger(__name__)
log.info("at info")
log.debug("at debug")
self.assertIn("at info", stdlog.logger.output)
self.assertIn("at debug", stdlog.logger.output)
class TestTimeout(testtools.TestCase):
"""Tests for our timeout fixture.
Testing the actual timeout mechanism is beyond the scope of this
test, because it's a pretty clear pass through to fixtures'
timeout fixture, which tested in their tree.
"""
def test_scaling(self):
# a bad scaling factor
self.assertRaises(ValueError, fixtures.Timeout, 1, 0.5)
# various things that should work.
timeout = fixtures.Timeout(10)
self.assertEqual(10, timeout.test_timeout)
timeout = fixtures.Timeout("10")
self.assertEqual(10, timeout.test_timeout)
timeout = fixtures.Timeout("10", 2)
self.assertEqual(20, timeout.test_timeout)
class TestOSAPIFixture(testtools.TestCase):
@mock.patch('nova.objects.Service.get_by_host_and_binary')
@mock.patch('nova.objects.Service.create')
def test_responds_to_version(self, mock_service_create, mock_get):
"""Ensure the OSAPI server responds to calls sensibly."""
self.useFixture(fixtures.OutputStreamCapture())
self.useFixture(fixtures.StandardLogging())
self.useFixture(conf_fixture.ConfFixture())
self.useFixture(fixtures.RPCFixture('nova.test'))
api = self.useFixture(fixtures.OSAPIFixture()).api
# request the API root, which provides us the versions of the API
resp = api.api_request('/', strip_version=True)
self.assertEqual(200, resp.status_code, resp.content)
# request a bad root url, should be a 404
#
# NOTE(sdague): this currently fails, as it falls into the 300
# dispatcher instead. This is a bug. The test case is left in
# here, commented out until we can address it.
#
# resp = api.api_request('/foo', strip_version=True)
# self.assertEqual(resp.status_code, 400, resp.content)
# request a known bad url, and we should get a 404
resp = api.api_request('/foo')
self.assertEqual(404, resp.status_code, resp.content)
class TestDatabaseFixture(testtools.TestCase):
def test_fixture_reset(self):
# because this sets up reasonable db connection strings
self.useFixture(conf_fixture.ConfFixture())
self.useFixture(fixtures.Database())
engine = session.get_engine()
conn = engine.connect()
result = conn.execute("select * from instance_types")
rows = result.fetchall()
self.assertEqual(0, len(rows), "Rows %s" % rows)
# insert a 6th instance type, column 5 below is an int id
# which has a constraint on it, so if new standard instance
# types are added you have to bump it.
conn.execute("insert into instance_types VALUES "
"(NULL, NULL, NULL, 't1.test', 6, 4096, 2, 0, NULL, '87'"
", 1.0, 40, 0, 0, 1, 0)")
result = conn.execute("select * from instance_types")
rows = result.fetchall()
self.assertEqual(1, len(rows), "Rows %s" % rows)
# reset by invoking the fixture again
#
# NOTE(sdague): it's important to reestablish the db
# connection because otherwise we have a reference to the old
# in mem db.
self.useFixture(fixtures.Database())
conn = engine.connect()
result = conn.execute("select * from instance_types")
rows = result.fetchall()
self.assertEqual(0, len(rows), "Rows %s" % rows)
def test_api_fixture_reset(self):
# This sets up reasonable db connection strings
self.useFixture(conf_fixture.ConfFixture())
self.useFixture(fixtures.Database(database='api'))
engine = session.get_api_engine()
conn = engine.connect()
result = conn.execute("select * from cell_mappings")
rows = result.fetchall()
self.assertEqual(0, len(rows), "Rows %s" % rows)
uuid = uuidutils.generate_uuid()
conn.execute("insert into cell_mappings (uuid, name) VALUES "
"('%s', 'fake-cell')" % (uuid,))
result = conn.execute("select * from cell_mappings")
rows = result.fetchall()
self.assertEqual(1, len(rows), "Rows %s" % rows)
# reset by invoking the fixture again
#
# NOTE(sdague): it's important to reestablish the db
# connection because otherwise we have a reference to the old
# in mem db.
self.useFixture(fixtures.Database(database='api'))
conn = engine.connect()
result = conn.execute("select * from cell_mappings")
rows = result.fetchall()
self.assertEqual(0, len(rows), "Rows %s" % rows)
def test_fixture_cleanup(self):
# because this sets up reasonable db connection strings
self.useFixture(conf_fixture.ConfFixture())
fix = fixtures.Database()
self.useFixture(fix)
# manually do the cleanup that addCleanup will do
fix.cleanup()
# ensure the db contains nothing
engine = session.get_engine()
conn = engine.connect()
schema = "".join(line for line in conn.connection.iterdump())
self.assertEqual(schema, "BEGIN TRANSACTION;COMMIT;")
def test_api_fixture_cleanup(self):
# This sets up reasonable db connection strings
self.useFixture(conf_fixture.ConfFixture())
fix = fixtures.Database(database='api')
self.useFixture(fix)
# No data inserted by migrations so we need to add a row
engine = session.get_api_engine()
conn = engine.connect()
uuid = uuidutils.generate_uuid()
conn.execute("insert into cell_mappings (uuid, name) VALUES "
"('%s', 'fake-cell')" % (uuid,))
result = conn.execute("select * from cell_mappings")
rows = result.fetchall()
self.assertEqual(1, len(rows), "Rows %s" % rows)
# Manually do the cleanup that addCleanup will do
fix.cleanup()
# Ensure the db contains nothing
engine = session.get_api_engine()
conn = engine.connect()
schema = "".join(line for line in conn.connection.iterdump())
self.assertEqual("BEGIN TRANSACTION;COMMIT;", schema)
class TestDatabaseAtVersionFixture(testtools.TestCase):
def test_fixture_schema_version(self):
self.useFixture(conf_fixture.ConfFixture())
# In/after 317 aggregates did have uuid
self.useFixture(fixtures.DatabaseAtVersion(318))
engine = session.get_engine()
engine.connect()
meta = sqlalchemy.MetaData(engine)
aggregate = sqlalchemy.Table('aggregates', meta, autoload=True)
self.assertTrue(hasattr(aggregate.c, 'uuid'))
# Before 317, aggregates had no uuid
self.useFixture(fixtures.DatabaseAtVersion(316))
engine = session.get_engine()
engine.connect()
meta = sqlalchemy.MetaData(engine)
aggregate = sqlalchemy.Table('aggregates', meta, autoload=True)
self.assertFalse(hasattr(aggregate.c, 'uuid'))
engine.dispose()
def test_fixture_after_database_fixture(self):
self.useFixture(conf_fixture.ConfFixture())
self.useFixture(fixtures.Database())
self.useFixture(fixtures.DatabaseAtVersion(318))
class TestDefaultFlavorsFixture(testtools.TestCase):
@mock.patch("nova.objects.flavor.Flavor._send_notification")
def test_flavors(self, mock_send_notification):
self.useFixture(conf_fixture.ConfFixture())
self.useFixture(fixtures.Database())
self.useFixture(fixtures.Database(database='api'))
engine = session.get_api_engine()
conn = engine.connect()
result = conn.execute("select * from flavors")
rows = result.fetchall()
self.assertEqual(0, len(rows), "Rows %s" % rows)
self.useFixture(fixtures.DefaultFlavorsFixture())
result = conn.execute("select * from flavors")
rows = result.fetchall()
self.assertEqual(6, len(rows), "Rows %s" % rows)
class TestIndirectionAPIFixture(testtools.TestCase):
def test_indirection_api(self):
# Should initially be None
self.assertIsNone(obj_base.NovaObject.indirection_api)
# make sure the fixture correctly sets the value
fix = fixtures.IndirectionAPIFixture('foo')
self.useFixture(fix)
self.assertEqual('foo', obj_base.NovaObject.indirection_api)
# manually do the cleanup that addCleanup will do
fix.cleanup()
# ensure the initial value is restored
self.assertIsNone(obj_base.NovaObject.indirection_api)
class TestSpawnIsSynchronousFixture(testtools.TestCase):
def test_spawn_patch(self):
orig_spawn = utils.spawn_n
fix = fixtures.SpawnIsSynchronousFixture()
self.useFixture(fix)
self.assertNotEqual(orig_spawn, utils.spawn_n)
def test_spawn_passes_through(self):
self.useFixture(fixtures.SpawnIsSynchronousFixture())
tester = mock.MagicMock()
utils.spawn_n(tester.function, 'foo', bar='bar')
tester.function.assert_called_once_with('foo', bar='bar')
def test_spawn_return_has_wait(self):
self.useFixture(fixtures.SpawnIsSynchronousFixture())
gt = utils.spawn(lambda x: '%s' % x, 'foo')
foo = gt.wait()
self.assertEqual('foo', foo)
def test_spawn_n_return_has_wait(self):
self.useFixture(fixtures.SpawnIsSynchronousFixture())
gt = utils.spawn_n(lambda x: '%s' % x, 'foo')
foo = gt.wait()
self.assertEqual('foo', foo)
def test_spawn_has_link(self):
self.useFixture(fixtures.SpawnIsSynchronousFixture())
gt = utils.spawn(mock.MagicMock)
passed_arg = 'test'
call_count = []
def fake(thread, param):
self.assertEqual(gt, thread)
self.assertEqual(passed_arg, param)
call_count.append(1)
gt.link(fake, passed_arg)
self.assertEqual(1, len(call_count))
def test_spawn_n_has_link(self):
self.useFixture(fixtures.SpawnIsSynchronousFixture())
gt = utils.spawn_n(mock.MagicMock)
passed_arg = 'test'
call_count = []
def fake(thread, param):
self.assertEqual(gt, thread)
self.assertEqual(passed_arg, param)
call_count.append(1)
gt.link(fake, passed_arg)
self.assertEqual(1, len(call_count))
class TestBannedDBSchemaOperations(testtools.TestCase):
def test_column(self):
column = sqlalchemy.Column()
with fixtures.BannedDBSchemaOperations(['Column']):
self.assertRaises(exception.DBNotAllowed,
column.drop)
self.assertRaises(exception.DBNotAllowed,
column.alter)
def test_table(self):
table = sqlalchemy.Table()
with fixtures.BannedDBSchemaOperations(['Table']):
self.assertRaises(exception.DBNotAllowed,
table.drop)
self.assertRaises(exception.DBNotAllowed,
table.alter)
class TestAllServicesCurrentFixture(testtools.TestCase):
@mock.patch('nova.objects.Service._db_service_get_minimum_version')
def test_services_current(self, mock_db):
mock_db.return_value = {'nova-compute': 123}
self.assertEqual(123, service_obj.Service.get_minimum_version(
None, 'nova-compute'))
mock_db.assert_called_once_with(None, ['nova-compute'],
use_slave=False)
mock_db.reset_mock()
compute_rpcapi.LAST_VERSION = 123
self.useFixture(fixtures.AllServicesCurrent())
self.assertIsNone(compute_rpcapi.LAST_VERSION)
self.assertEqual(service_obj.SERVICE_VERSION,
service_obj.Service.get_minimum_version(
None, 'nova-compute'))
self.assertFalse(mock_db.called)
class TestNoopConductorFixture(testtools.TestCase):
@mock.patch('nova.conductor.api.ComputeTaskAPI.resize_instance')
def test_task_api_not_called(self, mock_resize):
self.useFixture(fixtures.NoopConductorFixture())
conductor.ComputeTaskAPI().resize_instance()
self.assertFalse(mock_resize.called)
@mock.patch('nova.conductor.api.API.wait_until_ready')
def test_api_not_called(self, mock_wait):
self.useFixture(fixtures.NoopConductorFixture())
conductor.API().wait_until_ready()
self.assertFalse(mock_wait.called)
class TestSingleCellSimpleFixture(testtools.TestCase):
def test_single_cell(self):
self.useFixture(fixtures.SingleCellSimple())
cml = objects.CellMappingList.get_all(None)
self.assertEqual(1, len(cml))
def test_target_cell(self):
self.useFixture(fixtures.SingleCellSimple())
with context.target_cell(mock.sentinel.context, None) as c:
self.assertIs(mock.sentinel.context, c)
class TestPlacementFixture(testtools.TestCase):
def setUp(self):
super(TestPlacementFixture, self).setUp()
# We need ConfFixture since PlacementPolicyFixture reads from config.
self.useFixture(conf_fixture.ConfFixture())
# We need PlacementPolicyFixture because placement-api checks policy.
self.useFixture(policy_fixture.PlacementPolicyFixture())
# Database is needed to start placement API
self.useFixture(fixtures.Database(database='placement'))
def test_responds_to_version(self):
"""Ensure the Placement server responds to calls sensibly."""
placement_fixture = self.useFixture(fixtures.PlacementFixture())
# request the API root, which provides us the versions of the API
resp = placement_fixture._fake_get(None, '/')
self.assertEqual(200, resp.status_code)
# request a known bad url, and we should get a 404
resp = placement_fixture._fake_get(None, '/foo')
self.assertEqual(404, resp.status_code)
# unsets the token so we fake missing it
placement_fixture.token = None
resp = placement_fixture._fake_get(None, '/foo')
self.assertEqual(401, resp.status_code)
class TestWarningsFixture(test.TestCase):
def test_invalid_uuid_errors(self):
"""Creating an oslo.versionedobject with an invalid UUID value for a
UUIDField should raise an exception.
"""
valid_migration_kwargs = {
"created_at": timeutils.utcnow().replace(microsecond=0),
"updated_at": None,
"deleted_at": None,
"deleted": False,
"id": 123,
"uuid": uuids.migration,
"source_compute": "compute-source",
"dest_compute": "compute-dest",
"source_node": "node-source",
"dest_node": "node-dest",
"dest_host": "host-dest",
"old_instance_type_id": 42,
"new_instance_type_id": 84,
"instance_uuid": "fake-uuid",
"status": "migrating",
"migration_type": "resize",
"hidden": False,
"memory_total": 123456,
"memory_processed": 12345,
"memory_remaining": 111111,
"disk_total": 234567,
"disk_processed": 23456,
"disk_remaining": 211111,
}
# this shall not throw FutureWarning
objects.migration.Migration(**valid_migration_kwargs)
invalid_migration_kwargs = copy.deepcopy(valid_migration_kwargs)
invalid_migration_kwargs["uuid"] = "fake_id"
self.assertRaises(FutureWarning, objects.migration.Migration,
**invalid_migration_kwargs)
| gooddata/openstack-nova | nova/tests/unit/test_fixtures.py | Python | apache-2.0 | 20,898 |
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.networks.ports \
import tables as project_tables
LOG = logging.getLogger(__name__)
def get_device_owner(port):
if port['device_owner'] == 'network:router_gateway':
return _('External Gateway')
elif port['device_owner'] == 'network:router_interface':
return _('Internal Interface')
else:
return ' '
class AddInterface(tables.LinkAction):
name = "create"
verbose_name = _("Add Interface")
url = "horizon:project:routers:addinterface"
classes = ("ajax-modal", "btn-create")
policy_rules = (("network", "add_router_interface"),)
def get_policy_target(self, request, datum=None):
project_id = None
if datum:
project_id = getattr(datum, 'tenant_id', None)
return {"project_id": project_id}
def get_link_url(self, datum=None):
router_id = self.table.kwargs['router_id']
return reverse(self.url, args=(router_id,))
class RemoveInterface(tables.DeleteAction):
data_type_singular = _("Interface")
data_type_plural = _("Interfaces")
failure_url = 'horizon:project:routers:detail'
policy_rules = (("network", "remove_router_interface"),)
def get_policy_target(self, request, datum=None):
project_id = None
if datum:
project_id = getattr(datum, 'tenant_id', None)
return {"project_id": project_id}
def delete(self, request, obj_id):
try:
router_id = self.table.kwargs['router_id']
port = api.neutron.port_get(request, obj_id)
if port['device_owner'] == 'network:router_gateway':
api.neutron.router_remove_gateway(request, router_id)
else:
api.neutron.router_remove_interface(request,
router_id,
port_id=obj_id)
except Exception:
msg = _('Failed to delete interface %s') % obj_id
LOG.info(msg)
router_id = self.table.kwargs['router_id']
redirect = reverse(self.failure_url,
args=[router_id])
exceptions.handle(request, msg, redirect=redirect)
def allowed(self, request, datum=None):
if datum and datum['device_owner'] == 'network:router_gateway':
return False
return True
class PortsTable(tables.DataTable):
name = tables.Column("name",
verbose_name=_("Name"),
link="horizon:project:networks:ports:detail")
fixed_ips = tables.Column(project_tables.get_fixed_ips,
verbose_name=_("Fixed IPs"))
status = tables.Column("status", verbose_name=_("Status"))
device_owner = tables.Column(get_device_owner,
verbose_name=_("Type"))
admin_state = tables.Column("admin_state",
verbose_name=_("Admin State"))
def get_object_display(self, port):
return port.id
class Meta:
name = "interfaces"
verbose_name = _("Interfaces")
table_actions = (AddInterface, RemoveInterface)
row_actions = (RemoveInterface, )
| spandanb/horizon | openstack_dashboard/dashboards/project/routers/ports/tables.py | Python | apache-2.0 | 4,088 |
from oslo_utils import uuidutils
def rand_name(name=''):
rand_data = uuidutils.generate_uuid()[:8]
if name:
return '%s-%s' % (name, rand_data)
else:
return rand_data
| esikachev/my-dev-client | my_dev/tests/utils.py | Python | apache-2.0 | 196 |
from botor import Botor
from botor.aws.iam import get_role_managed_policies, get_role_inline_policies, get_role_instance_profiles
from bozor.aws.iam import _get_name_from_structure, modify, _conn_from_args
def _get_base(role, **conn):
"""
Determine whether the boto get_role call needs to be made or if we already have all that data
in the role object.
:param role: dict containing (at the very least) role_name and/or arn.
:param conn: dict containing enough information to make a connection to the desired account.
:return: Camelized dict describing role containing all all base_fields.
"""
base_fields = frozenset(['Arn', 'AssumeRolePolicyDocument', 'Path', 'RoleId', 'RoleName', 'CreateDate'])
needs_base = False
for field in base_fields:
if field not in role:
needs_base = True
break
if needs_base:
role_name = _get_name_from_structure(role, 'RoleName')
role = Botor.go('iam.client.get_role', RoleName=role_name, **conn)
role = role['Role']
# cast CreateDate from a datetime to something JSON serializable.
role.update(dict(CreateDate=str(role['CreateDate'])))
return role
def get_role(role, output='camelized', **conn):
"""
Orchestrates all the calls required to fully build out an IAM Role in the following format:
{
"Arn": ...,
"AssumeRolePolicyDocument": ...,
"CreateDate": ..., # str
"InlinePolicies": ...,
"InstanceProfiles": ...,
"ManagedPolicies": ...,
"Path": ...,
"RoleId": ...,
"RoleName": ...,
}
:param role: dict containing (at the very least) role_name and/or arn.
:param output: Determines whether keys should be returned camelized or underscored.
:param conn: dict containing enough information to make a connection to the desired account.
Must at least have 'assume_role' key.
:return: dict containing a fully built out role.
"""
role = modify(role, 'camelized')
_conn_from_args(role, conn)
role = _get_base(role, **conn)
role.update(
{
'managed_policies': get_role_managed_policies(role, **conn),
'inline_policies': get_role_inline_policies(role, **conn),
'instance_profiles': get_role_instance_profiles(role, **conn)
}
)
return modify(role, format=output) | monkeysecurity/bozor | bozor/aws/iam/role.py | Python | apache-2.0 | 2,398 |
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
version interface
"""
from six.moves import urllib
from novaclient import base
from novaclient import client
from novaclient import exceptions as exc
class Version(base.Resource):
"""
Compute REST API information
"""
def __repr__(self):
return "<Version>"
class VersionManager(base.ManagerWithFind):
resource_class = Version
def _is_session_client(self):
return isinstance(self.api.client, client.SessionClient)
def _get_current(self):
"""Returns info about current version."""
# TODO(sdague): we've now got to make up to 3 HTTP requests to
# determine what version we are running, due to differences in
# deployments and versions. We really need to cache the
# results of this per endpoint and keep the results of it for
# some reasonable TTL (like 24 hours) to reduce our round trip
# traffic.
if self._is_session_client():
try:
# Assume that the value of get_endpoint() is something
# we can get the version of. This is a 404 for Nova <
# Mitaka if the service catalog contains project_id.
#
# TODO(sdague): add microversion for when this will
# change
url = "%s" % self.api.client.get_endpoint()
return self._get(url, "version")
except exc.NotFound:
# If that's a 404, we can instead try hacking together
# an endpoint root url by chopping off the last 2 /s.
# This is kind of gross, but we've had this baked in
# so long people got used to this hard coding.
#
# NOTE(sdague): many service providers don't really
# implement GET / in the expected way, if we do a GET
# /v2 that's actually a 300 redirect to
# /v2/... because of how paste works. So adding the
# end slash is really important.
url = "%s/" % url.rsplit("/", 1)[0]
return self._get(url, "version")
else:
# NOTE(andreykurilin): HTTPClient doesn't have ability to send get
# request without token in the url, so `self._get` doesn't work.
all_versions = self.list()
url = self.client.management_url.rsplit("/", 1)[0]
for version in all_versions:
for link in version.links:
if link["href"].rstrip('/') == url:
version.append_request_ids(all_versions.request_ids)
return version
def get_current(self):
try:
return self._get_current()
except exc.Unauthorized:
# NOTE(sdague): RAX's repose configuration blocks access to the
# versioned endpoint, which is definitely non-compliant behavior.
# However, there is no defcore test for this yet. Remove this code
# block once we land things in defcore.
return None
def list(self):
"""List all versions."""
version_url = None
if self._is_session_client():
# NOTE: "list versions" API needs to be accessed without base
# URI (like "v2/{project-id}"), so here should be a scheme("http",
# etc.) and a hostname.
endpoint = self.api.client.get_endpoint()
url = urllib.parse.urlparse(endpoint)
version_url = '%s://%s/' % (url.scheme, url.netloc)
return self._list(version_url, "versions")
| xuweiliang/Codelibrary | novaclient/v2/versions.py | Python | apache-2.0 | 4,239 |
import unittest
from robot.utils.asserts import assert_equal
from robot.model.statistics import Statistics
from robot.result import TestCase, TestSuite
def verify_stat(stat, name, passed, failed, skipped, critical=None, combined=None,
id=None, elapsed=0):
assert_equal(stat.name, name, 'stat.name')
assert_equal(stat.passed, passed)
assert_equal(stat.failed, failed)
assert_equal(stat.skipped, skipped)
assert_equal(stat.total, passed + failed + skipped)
if hasattr(stat, 'critical'):
assert_equal(stat.critical,
False if critical is None else bool(critical))
assert_equal(stat.non_critical,
False if critical is None else not bool(critical))
assert_equal(stat.combined, combined)
if hasattr(stat, 'id'):
assert_equal(stat.id, id)
assert_equal(stat.elapsed, elapsed)
def verify_suite(suite, name, id, passed, failed, skipped):
verify_stat(suite.stat, name, passed, failed, skipped, id=id)
def generate_suite():
suite = TestSuite(name='Root Suite')
suite.set_criticality(critical_tags=['smoke'])
s1 = suite.suites.create(name='First Sub Suite')
s2 = suite.suites.create(name='Second Sub Suite')
s11 = s1.suites.create(name='Sub Suite 1_1')
s12 = s1.suites.create(name='Sub Suite 1_2')
s13 = s1.suites.create(name='Sub Suite 1_3')
s21 = s2.suites.create(name='Sub Suite 2_1')
s11.tests = [TestCase(status='PASS'), TestCase(status='FAIL', tags=['t1'])]
s12.tests = [TestCase(status='PASS', tags=['t_1','t2',]),
TestCase(status='PASS', tags=['t1','smoke']),
TestCase(status='FAIL', tags=['t1','t2','t3','smoke'])]
s13.tests = [TestCase(status='PASS', tags=['t1','t 2','smoke'])]
s21.tests = [TestCase(status='FAIL', tags=['t3','Smoke'])]
return suite
class TestStatisticsSimple(unittest.TestCase):
def setUp(self):
suite = TestSuite(name='Hello')
suite.tests = [TestCase(status='PASS'), TestCase(status='PASS'),
TestCase(status='FAIL')]
self.statistics = Statistics(suite)
def test_total(self):
verify_stat(self.statistics.total.critical, 'Critical Tests', 2, 1, 0)
verify_stat(self.statistics.total.all, 'All Tests', 2, 1, 0)
def test_suite(self):
verify_suite(self.statistics.suite, 'Hello', 's1', 2, 1, 0)
def test_tags(self):
assert_equal(list(self.statistics.tags), [])
class TestStatisticsNotSoSimple(unittest.TestCase):
def setUp(self):
suite = generate_suite()
suite.set_criticality(critical_tags=['smoke'])
self.statistics = Statistics(suite, 2, ['t*','smoke'], ['t3'],
[('t? & smoke', ''), ('none NOT t1', 'a title')])
def test_total(self):
verify_stat(self.statistics.total.all, 'All Tests', 4, 3, 0)
verify_stat(self.statistics.total.critical, 'Critical Tests', 2, 2, 0)
def test_suite(self):
suite = self.statistics.suite
verify_suite(suite, 'Root Suite', 's1', 4, 3, 0)
[s1, s2] = suite.suites
verify_suite(s1, 'Root Suite.First Sub Suite', 's1-s1', 4, 2, 0)
verify_suite(s2, 'Root Suite.Second Sub Suite', 's1-s2', 0, 1, 0)
assert_equal(len(s1.suites), 0)
assert_equal(len(s2.suites), 0)
def test_tags(self):
# Tag stats are tested more thoroughly in test_tagstatistics.py
tags = self.statistics.tags
verify_stat(tags.tags['smoke'], 'smoke', 2, 2, 0)
verify_stat(tags.tags['t1'], 't1', 3, 2, 0)
verify_stat(tags.tags['t2'], 't2', 2, 1, 0)
#expected = [(u'smoke', 4), (u't1', 5), (u't2', 3)]
expected = [(u'smoke', 4), ('a title', 0), ('t? & smoke', 4), (u't1', 5), (u't2', 3)]
assert_equal(len(list(tags)), len(expected))
for t, e in zip(tags, expected):
verify_stat(t, *e)
class TestSuiteStatistics(unittest.TestCase):
def test_all_levels(self):
suite = Statistics(generate_suite()).suite
verify_suite(suite, 'Root Suite', 's1', 4, 3, 0)
[s1, s2] = suite.suites
verify_suite(s1, 'Root Suite.First Sub Suite', 's1-s1', 4, 2, 0)
verify_suite(s2, 'Root Suite.Second Sub Suite', 's1-s2', 0, 1, 0)
[s11, s12, s13] = s1.suites
verify_suite(s11, 'Root Suite.First Sub Suite.Sub Suite 1_1', 's1-s1-s1', 1, 1, 0)
verify_suite(s12, 'Root Suite.First Sub Suite.Sub Suite 1_2', 's1-s1-s2', 2, 1, 0)
verify_suite(s13, 'Root Suite.First Sub Suite.Sub Suite 1_3', 's1-s1-s3', 1, 0, 0)
[s21] = s2.suites
verify_suite(s21, 'Root Suite.Second Sub Suite.Sub Suite 2_1', 's1-s2-s1', 0, 1, 0)
def test_only_root_level(self):
suite = Statistics(generate_suite(), suite_stat_level=1).suite
verify_suite(suite, 'Root Suite', 's1', 4, 3, 0)
assert_equal(len(suite.suites), 0)
def test_deeper_level(self):
PASS = TestCase(status='PASS')
FAIL = TestCase(status='FAIL')
SKIP = TestCase(status='SKIP')
suite = TestSuite(name='1')
suite.suites = [TestSuite(name='1'), TestSuite(name='2'), TestSuite(name='3')]
suite.suites[0].suites = [TestSuite(name='1')]
suite.suites[1].suites = [TestSuite(name='1'), TestSuite(name='2')]
suite.suites[2].tests = [PASS, FAIL]
suite.suites[0].suites[0].suites = [TestSuite(name='1')]
suite.suites[1].suites[0].tests = [PASS, PASS, PASS, FAIL]
suite.suites[1].suites[1].tests = [PASS, PASS, FAIL, FAIL]
suite.suites[0].suites[0].suites[0].tests = [FAIL, FAIL, FAIL]
s1 = Statistics(suite, suite_stat_level=3).suite
verify_suite(s1, '1', 's1', 6, 7, 0)
[s11, s12, s13] = s1.suites
verify_suite(s11, '1.1', 's1-s1', 0, 3, 0)
verify_suite(s12, '1.2', 's1-s2', 5, 3, 0)
verify_suite(s13, '1.3', 's1-s3', 1, 1, 0)
[s111] = s11.suites
verify_suite(s111, '1.1.1', 's1-s1-s1', 0, 3, 0)
[s121, s122] = s12.suites
verify_suite(s121, '1.2.1', 's1-s2-s1', 3, 1, 0)
verify_suite(s122, '1.2.2', 's1-s2-s2', 2, 2, 0)
assert_equal(len(s111.suites), 0)
def test_iter_only_one_level(self):
[stat] = list(Statistics(generate_suite(), suite_stat_level=1).suite)
verify_stat(stat, 'Root Suite', 4, 3, 0, id='s1')
def test_iter_also_sub_suites(self):
stats = list(Statistics(generate_suite()).suite)
verify_stat(stats[0], 'Root Suite', 4, 3, 0, id='s1')
verify_stat(stats[1], 'Root Suite.First Sub Suite', 4, 2, 0, id='s1-s1')
verify_stat(stats[2], 'Root Suite.First Sub Suite.Sub Suite 1_1', 1, 1, 0, id='s1-s1-s1')
verify_stat(stats[3], 'Root Suite.First Sub Suite.Sub Suite 1_2', 2, 1, 0, id='s1-s1-s2')
verify_stat(stats[4], 'Root Suite.First Sub Suite.Sub Suite 1_3', 1, 0, 0, id='s1-s1-s3')
verify_stat(stats[5], 'Root Suite.Second Sub Suite', 0, 1, 0, id='s1-s2')
verify_stat(stats[6], 'Root Suite.Second Sub Suite.Sub Suite 2_1', 0, 1, 0, id='s1-s2-s1')
class TestElapsedTime(unittest.TestCase):
def setUp(self):
ts = '20120816 00:00:'
suite = TestSuite(starttime=ts+'00.000', endtime=ts+'59.999')
suite.suites = [
TestSuite(starttime=ts+'00.000', endtime=ts+'30.000'),
TestSuite(starttime=ts+'30.000', endtime=ts+'42.042')
]
suite.suites[0].tests = [
TestCase(starttime=ts+'00.000', endtime=ts+'00.001', tags=['t1']),
TestCase(starttime=ts+'00.001', endtime=ts+'01.001', tags=['t1', 't2'])
]
suite.suites[1].tests = [
TestCase(starttime=ts+'30.000', endtime=ts+'40.000', tags=['t1', 't2', 't3'])
]
suite.set_criticality(critical_tags=['t2'])
self.stats = Statistics(suite, tag_stat_combine=[('?2', 'combined')])
def test_total_stats(self):
assert_equal(self.stats.total.all.elapsed, 11001)
assert_equal(self.stats.total.critical.elapsed, 11000)
def test_tag_stats(self):
t1, t2, t3 = self.stats.tags.tags.values()
verify_stat(t1, 't1', 0, 3, 0, elapsed=11001)
verify_stat(t2, 't2', 0, 2, 0, elapsed=11000)
verify_stat(t3, 't3', 0, 1, 0, elapsed=10000)
def test_combined_tag_stats(self):
combined = self.stats.tags.combined[0]
verify_stat(combined, 'combined', 0, 2, 0, combined='?2', elapsed=11000)
def test_suite_stats(self):
assert_equal(self.stats.suite.stat.elapsed, 59999)
assert_equal(self.stats.suite.suites[0].stat.elapsed, 30000)
assert_equal(self.stats.suite.suites[1].stat.elapsed, 12042)
def test_suite_stats_when_suite_has_no_times(self):
suite = TestSuite()
assert_equal(Statistics(suite).suite.stat.elapsed, 0)
ts = '20120816 00:00:'
suite.tests = [TestCase(starttime=ts+'00.000', endtime=ts+'00.001'),
TestCase(starttime=ts+'00.001', endtime=ts+'01.001')]
assert_equal(Statistics(suite).suite.stat.elapsed, 1001)
suite.suites = [TestSuite(starttime=ts+'02.000', endtime=ts+'12.000'),
TestSuite()]
assert_equal(Statistics(suite).suite.stat.elapsed, 11001)
def test_elapsed_from_get_attributes(self):
for time, expected in [('00:00:00.000', '00:00:00'),
('00:00:00.001', '00:00:00'),
('00:00:00.500', '00:00:01'),
('00:00:00.999', '00:00:01'),
('00:00:01.000', '00:00:01'),
('00:00:01.001', '00:00:01'),
('00:00:01.499', '00:00:01'),
('00:00:01.500', '00:00:02'),
('01:59:59:499', '01:59:59'),
('01:59:59:500', '02:00:00')]:
suite = TestSuite(starttime='20120817 00:00:00.000',
endtime='20120817 ' + time)
stat = Statistics(suite).suite.stat
elapsed = stat.get_attributes(include_elapsed=True)['elapsed']
assert_equal(elapsed, expected, time)
if __name__ == "__main__":
unittest.main()
| snyderr/robotframework | utest/model/test_statistics.py | Python | apache-2.0 | 10,312 |
__license__ = """
Copyright 2012 DISQUS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from fabric.api import *
import time
env.name = 'bokbok'
env.time = int(time.time())
env.buildroot = '/tmp/%s/%d' % (env.name, env.time)
env.app = '%s/%s' % (env.buildroot, env.name)
env.deploy = '/usr/local/%s/releases' % env.name
env.hosts = ['CHANGEME']
env.repo = 'CHANGEME'
env.blob = '%s-%d.tgz' % (env.name, env.time)
def all():
mkvirtualenv()
prepare()
build()
deploy()
restart_apache()
def deploy():
put('/tmp/%s' % env.blob, '/tmp')
with settings(hide('stdout')):
with cd(env.deploy):
run('mkdir %s' % env.time)
with cd('%s' % env.time):
run('tar xzf /tmp/%s' % env.blob)
run('rm -f current; ln -sf %s current' % env.time)
def build():
with settings(hide('stdout')):
with lcd(env.buildroot):
with prefix('. bin/activate'):
local('pip install -r %s/requirements.txt' % env.buildroot)
local('tar czf /tmp/%s .' % env.blob)
def mkvirtualenv():
with settings(hide('stdout')):
local('virtualenv --no-site-packages --setuptools --python python2.6 %s' % env.buildroot)
def prepare():
dirs = ['tmp', env.name]
with lcd(env.buildroot):
local('mkdir %s' % ' '.join(dirs))
with settings(hide('stdout')):
with lcd('tmp'):
with settings(hide('stderr')):
local('git clone %s %s' % (env.repo, env.name))
with lcd(env.name):
local('git archive master | tar -xC %s' % env.buildroot)
local('rm -rf %s' % env.name)
def restart_apache():
run('sudo service apache2 restart')
| disqus/bokbok | fabfile.py | Python | apache-2.0 | 2,084 |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Checkpoint functions for BigMLer
"""
from __future__ import absolute_import
import os
import bigml.api
from bigml.util import console_log
from bigmler.utils import log_message
def is_source_created(path, suffix=""):
"""Checks existence and reads the source id from the source file in the
path directory
"""
source_id = None
try:
with open("%s%ssource%s" % (path, os.sep, suffix)) as source_file:
source_id = source_file.readline().strip()
try:
source_id = bigml.api.get_source_id(source_id)
return True, source_id
except ValueError:
return False, None
except IOError:
return False, None
def is_dataset_created(path, suffix=""):
"""Checks existence and reads the dataset id from the dataset file in
the path directory
"""
dataset_id = None
try:
with open("%s%sdataset%s" % (path, os.sep, suffix)) as dataset_file:
dataset_id = dataset_file.readline().strip()
try:
dataset_id = bigml.api.get_dataset_id(dataset_id)
return True, dataset_id
except ValueError:
return False, None
except IOError:
return False, None
def are_datasets_created(path, number_of_datasets, suffix='parts'):
"""Checks existence and reads the dataset ids from the datasets file in
the path directory
"""
dataset_ids = []
try:
with open("%s%sdataset_%s" % (path, os.sep, suffix)) as datasets_file:
for line in datasets_file:
dataset = line.strip()
try:
dataset_id = bigml.api.get_dataset_id(dataset)
dataset_ids.append(dataset_id)
except ValueError:
return False, dataset_ids
if len(dataset_ids) == number_of_datasets:
return True, dataset_ids
else:
return False, dataset_ids
except IOError:
return False, dataset_ids
def are_models_created(path, number_of_models):
"""Checks existence and reads the model ids from the models file in the
path directory
"""
model_ids = []
try:
with open("%s%smodels" % (path, os.sep)) as models_file:
for line in models_file:
model = line.strip()
try:
model_id = bigml.api.get_model_id(model)
model_ids.append(model_id)
except ValueError:
return False, model_ids
if len(model_ids) == number_of_models:
return True, model_ids
else:
return False, model_ids
except IOError:
return False, model_ids
def are_predictions_created(predictions_file, number_of_tests):
"""Checks existence and reads the predictions from the predictions file in
the path directory
"""
predictions = file_number_of_lines(predictions_file)
if predictions != number_of_tests:
os.remove(predictions_file)
return False, None
return True, None
def is_evaluation_created(path):
"""Checks existence and reads the evaluation id from the evaluation file
in the path directory
"""
evaluation_id = None
try:
with open("%s%sevaluation" % (path, os.sep)) as evaluation_file:
evaluation_id = evaluation_file.readline().strip()
try:
evaluation_id = bigml.api.get_evaluation_id(evaluation_id)
return True, evaluation_id
except ValueError:
return False, None
except IOError:
return False, None
def are_evaluations_created(path, number_of_evaluations):
"""Checks existence and reads the evaluation ids from the evaluations file
in the path directory and checks the corresponding evaluations
"""
evaluation_ids = []
try:
with open("%s%sevaluations" % (path, os.sep)) as evaluations_file:
for line in evaluations_file:
evaluation = line.strip()
try:
evaluation_id = bigml.api.get_evaluation_id(evaluation)
evaluation_ids.append(evaluation_id)
except ValueError:
return False, evaluation_ids
if len(evaluation_ids) == number_of_evaluations:
return True, evaluation_ids
else:
return False, evaluation_ids
except IOError:
return False, evaluation_ids
def are_ensembles_created(path, number_of_ensembles):
"""Checks and reads the ensembles ids from the ensembles file in the
path directory
"""
ensemble_ids = []
try:
with open("%s%sensembles" % (path, os.sep)) as ensembles_file:
for line in ensembles_file:
ensemble = line.strip()
try:
ensemble_id = bigml.api.get_ensemble_id(ensemble)
ensemble_ids.append(ensemble_id)
except ValueError:
return False, ensemble_ids
if len(ensemble_ids) == number_of_ensembles:
return True, ensemble_ids
else:
return False, ensemble_ids
except IOError:
return False, ensemble_ids
def checkpoint(function, *args, **kwargs):
"""Redirects to each checkpoint function
"""
common_parms = ['debug', 'message', 'log_file', 'console']
debug = kwargs.get('debug', False)
message = kwargs.get('message', None)
log_file = kwargs.get('log_file', None)
console = kwargs.get('console', False)
f_kwargs = {key: value for key, value in kwargs.items()
if not key in common_parms}
result = function(*args, **f_kwargs)
if debug:
console_log("Checkpoint: checking %s with args:\n%s\n\nResult:\n%s\n" %
(function.__name__, "\n".join([repr(arg) for arg in args]),
repr(result)))
# resume is the first element in the result tuple
if not result[0] and message is not None:
log_message(message, log_file=log_file, console=console)
return result
def file_number_of_lines(file_name):
"""Counts the number of lines in a file
"""
try:
item = (0, None)
with open(file_name) as file_handler:
for item in enumerate(file_handler):
pass
return item[0] + 1
except IOError:
return 0
def is_batch_prediction_created(path):
"""Checks existence and reads the batch prediction id from the
batch_prediction file in the path directory
"""
batch_prediction_id = None
try:
with open("%s%sbatch_prediction"
% (path, os.sep)) as batch_prediction_file:
batch_prediction_id = batch_prediction_file.readline().strip()
try:
batch_prediction_id = bigml.api.get_batch_prediction_id(
batch_prediction_id)
return True, batch_prediction_id
except ValueError:
return False, None
except IOError:
return False, None
def is_batch_centroid_created(path):
"""Checks existence and reads the batch centroid id from the
batch_centroid file in the path directory
"""
batch_centroid_id = None
try:
with open("%s%sbatch_centroid"
% (path, os.sep)) as batch_prediction_file:
batch_centroid_id = batch_prediction_file.readline().strip()
try:
batch_centroid_id = bigml.api.get_batch_centroid_id(
batch_centroid_id)
return True, batch_centroid_id
except ValueError:
return False, None
except IOError:
return False, None
def are_clusters_created(path, number_of_clusters):
"""Checks existence and reads the cluster ids from the clusters file in the
path directory
"""
cluster_ids = []
try:
with open("%s%sclusters" % (path, os.sep)) as clusters_file:
for line in clusters_file:
cluster = line.strip()
try:
cluster_id = bigml.api.get_cluster_id(cluster)
cluster_ids.append(cluster_id)
except ValueError:
return False, cluster_ids
if len(cluster_ids) == number_of_clusters:
return True, cluster_ids
else:
return False, cluster_ids
except IOError:
return False, cluster_ids
def is_dataset_exported(filename):
"""Checks the existence of the CSV exported dataset file
"""
try:
with open(filename):
return True
except IOError:
return False
def is_batch_anomaly_score_created(path):
"""Checks existence and reads the batch anomaly score id from the
batch_anomaly_score file in the path directory
"""
batch_anomaly_score_id = None
try:
with open("%s%sbatch_anomaly_score"
% (path, os.sep)) as batch_prediction_file:
batch_anomaly_score_id = batch_prediction_file.readline().strip()
try:
batch_anomaly_score_id = bigml.api.get_batch_anomaly_score_id(
batch_anomaly_score_id)
return True, batch_anomaly_score_id
except ValueError:
return False, None
except IOError:
return False, None
def are_anomalies_created(path, number_of_anomalies):
"""Checks existence and reads the anomaly detector ids from the
anomalies file in the path directory
"""
anomaly_ids = []
try:
with open("%s%sanomalies" % (path, os.sep)) as anomalies_file:
for line in anomalies_file:
anomaly = line.strip()
try:
anomaly_id = bigml.api.get_anomaly_id(anomaly)
anomaly_ids.append(anomaly_id)
except ValueError:
return False, anomaly_ids
if len(anomaly_ids) == number_of_anomalies:
return True, anomaly_ids
else:
return False, anomaly_ids
except IOError:
return False, anomaly_ids
def is_project_created(path):
"""Checks existence and reads project id from the
project file in the path directory
"""
project_id = None
try:
with open("%s%sproject"
% (path, os.sep)) as project_file:
project_id = project_file.readline().strip()
try:
project_id = bigml.api.get_project_id(
project_id)
return True, project_id
except ValueError:
return False, None
except IOError:
return False, None
def are_samples_created(path, number_of_samples):
"""Checks existence and reads the samples ids from the samples file in the
path directory
"""
sample_ids = []
try:
with open("%s%ssamples" % (path, os.sep)) as samples_file:
for line in samples_file:
sample = line.strip()
try:
sample_id = bigml.api.get_sample_id(sample)
sample_ids.append(sample_id)
except ValueError:
return False, sample_ids
if len(sample_ids) == number_of_samples:
return True, sample_ids
else:
return False, sample_ids
except IOError:
return False, sample_ids
| brokendata/bigmler | bigmler/checkpoint.py | Python | apache-2.0 | 12,195 |
import random
import threading
import uuid
import socket
import six
import pexpect
from pandaharvester.harvestercore import core_utils
if six.PY2:
pexpect_spawn = pexpect.spawn
else:
pexpect_spawn = pexpect.spawnu
# logger
baseLogger = core_utils.setup_logger('ssh_tunnel_pool')
# Pool of SSH tunnels
class SshTunnelPool(object):
# constructor
def __init__(self):
self.lock = threading.Lock()
self.pool = dict()
self.params = dict()
# make a dict key
def make_dict_key(self, host, port):
return '{0}:{1}'.format(host, port)
# make a tunnel server
def make_tunnel_server(self, remote_host, remote_port, remote_bind_port=None, num_tunnels=1,
ssh_username=None, ssh_password=None, private_key=None, pass_phrase=None,
jump_host=None, jump_port=None, login_timeout=60, reconnect=False,
with_lock=True):
dict_key = self.make_dict_key(remote_host, remote_port)
if with_lock:
self.lock.acquire()
# make dicts
if dict_key not in self.pool:
self.pool[dict_key] = []
# preserve parameters
if not reconnect:
self.params[dict_key] = {'remote_bind_port': remote_bind_port,
'num_tunnels': num_tunnels,
'ssh_username': ssh_username,
'ssh_password': ssh_password,
'private_key': private_key,
'pass_phrase': pass_phrase,
'jump_host': jump_host,
'jump_port': jump_port,
'login_timeout': login_timeout
}
else:
remote_bind_port = self.params[dict_key]['remote_bind_port']
num_tunnels = self.params[dict_key]['num_tunnels']
ssh_username = self.params[dict_key]['ssh_username']
ssh_password = self.params[dict_key]['ssh_password']
private_key = self.params[dict_key]['private_key']
pass_phrase = self.params[dict_key]['pass_phrase']
jump_host = self.params[dict_key]['jump_host']
jump_port = self.params[dict_key]['jump_port']
login_timeout = self.params[dict_key]['login_timeout']
# make a tunnel server
for i in range(num_tunnels - len(self.pool[dict_key])):
# get a free port
s = socket.socket()
s.bind(('', 0))
com = "ssh -L {local_bind_port}:127.0.0.1:{remote_bind_port} "
com += "-p {remote_port} {ssh_username}@{remote_host} "
com += "-o ServerAliveInterval=120 -o ServerAliveCountMax=2 "
if private_key is not None:
com += "-i {private_key} "
if jump_port is not None:
com += '-o ProxyCommand="ssh -p {jump_port} {ssh_username}@{jump_host} -W %h:%p" '
local_bind_port = s.getsockname()[1]
com = com.format(remote_host=remote_host, remote_port=remote_port, remote_bind_port=remote_bind_port,
ssh_username=ssh_username, private_key=private_key, jump_host=jump_host,
jump_port=jump_port, local_bind_port=local_bind_port)
s.close()
# list of expected strings
loginString = 'login_to_be_confirmed_with ' + uuid.uuid4().hex
expected_list = [
pexpect.EOF,
pexpect.TIMEOUT,
"(?i)are you sure you want to continue connecting",
'(?i)password:',
'(?i)enter passphrase for key.*',
loginString,
]
c = pexpect_spawn(com, echo=False)
c.logfile_read = baseLogger.handlers[0].stream
isOK = False
for iTry in range(3):
idx = c.expect(expected_list, timeout=login_timeout)
if idx == expected_list.index(loginString):
# succeeded
isOK = True
break
if idx == 1:
# timeout
baseLogger.error('timeout when making a tunnel with com={0} out={1}'.format(com,
c.buffer))
c.close()
break
if idx == 2:
# new certificate
c.sendline("yes")
idx = c.expect(expected_list, timeout=login_timeout)
if idx == 1:
# timeout
baseLogger.error('timeout after accepting new cert with com={0} out={1}'.format(com,
c.buffer))
c.close()
break
if idx == 3:
# password prompt
c.sendline(ssh_password)
elif idx == 4:
# passphrase prompt
c.sendline(pass_phrase)
elif idx == 0:
baseLogger.error('something weired with com={0} out={1}'.format(com,
c.buffer))
c.close()
break
# exec to confirm login
c.sendline('echo {0}'.format(loginString))
if isOK:
self.pool[dict_key].append((local_bind_port, c))
if with_lock:
self.lock.release()
# get a tunnel
def get_tunnel(self, remote_host, remote_port):
dict_key = self.make_dict_key(remote_host, remote_port)
self.lock.acquire()
active_tunnels = []
someClosed = False
for port, child in self.pool[dict_key]:
if child.isalive():
active_tunnels.append([port, child])
else:
child.close()
someClosed = True
if someClosed:
self.make_tunnel_server(remote_host, remote_port, reconnect=True, with_lock=False)
active_tunnels = [item for item in self.pool[dict_key] if item[1].isalive()]
if len(active_tunnels) > 0:
port, child = random.choice(active_tunnels)
else:
port, child = None, None
self.lock.release()
return ("127.0.0.1", port, child)
# singleton
sshTunnelPool = SshTunnelPool()
del SshTunnelPool
| PanDAWMS/panda-harvester | pandaharvester/harvestermiddleware/ssh_tunnel_pool.py | Python | apache-2.0 | 6,713 |
# -*- coding: utf-8 -*-
# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
FIBC Api Server
"""
import logging
from ryu.base import app_manager
from ryu.lib import hub
from ryu.controller import handler
from fabricflow.fibc.net import fibcnet
from fabricflow.fibc.api import fibcapi
from fabricflow.fibc.api import fibcapi_pb2 as pb
from fabricflow.fibc.lib import fibcevt
from fabricflow.fibc.lib import fibclog
from fabricflow.fibc.lib import fibcryu
from fabricflow.fibc.dbm import fibcdbm
_LOG = logging.getLogger(__name__)
_RE_ID_ANY = "0.0.0.0"
class FIBCApiVmController(object):
"""
FIBCApi Vm Controller
"""
def __init__(self, soc, data, app):
self.soc = soc
self.app = app
self.hello = fibcapi.parse_hello(data)
def _send_evt(self, evt):
_LOG.info("%s %s %s", evt.mtype, evt.msg, evt)
self.app.send_event_to_observers(evt)
def get_id(self):
"""
get client id.
"""
return self.hello.re_id # pylint: disable=no-member
def initialize(self):
"""
send vm-config(ente) event
"""
cid = self.get_id()
if cid in self.app.clients:
raise KeyError()
self.app.clients[cid] = self
_LOG.info("client(VM) %s registerd.", cid)
if cid != _RE_ID_ANY:
evt = fibcevt.EventFIBCVmConfig(self.hello, True)
self._send_evt(evt)
def finalize(self):
"""
send vm-config(leave) event
"""
cid = self.get_id()
self.app.clients.pop(cid)
_LOG.info("client(VM) %s unregistered.", cid)
if cid != _RE_ID_ANY:
evt = fibcevt.EventFIBCVmConfig(self.hello, False)
self._send_evt(evt)
def send_data(self, mtype, data, xid=0):
"""
send data to vm
"""
fibcnet.write_fib_msg(self.soc, mtype, xid, data)
def dispatch(self, hdr, data):
"""
Dispatch mmessage.
"""
mtype = hdr[0]
if mtype == pb.PORT_CONFIG:
port_conf = fibcapi.parse_port_config(data)
port_conf_evt = fibcevt.EventFIBCPortConfig(port_conf)
self._send_evt(port_conf_evt)
elif mtype == pb.FLOW_MOD:
flow_mod = fibcapi.parse_flow_mod(data)
flow_mod_evt = fibcevt.EventFIBCFlowMod(flow_mod)
self._send_evt(flow_mod_evt)
elif mtype == pb.GROUP_MOD:
group_mod = fibcapi.parse_group_mod(data)
group_mod_evt = fibcevt.EventFIBCGroupMod(group_mod)
self._send_evt(group_mod_evt)
else:
_LOG.warn("Unknown message %s", hdr)
if fibclog.dump_msg():
_LOG.debug("%s", data)
class FIBCApiDpController(object):
"""
FIBCApi DP Controller
"""
def __init__(self, soc, data, app):
self.soc = soc
self.app = app
self.que = hub.Queue() # (mtype, data, xid)
self.hello = fibcapi.parse_ff_hello(data)
self.dpath = fibcryu.FFDatapath(self.que, self.get_id())
self.ports = list()
def _send_evt(self, evt):
_LOG.info("%s %s", evt.mtype, evt)
self.app.send_event_to_observers(evt)
def _process_que(self):
_LOG.debug("_process_que %s started.", self.get_id())
while True:
msg = self.que.get() # msg is None or (mtype, data, xid)
if msg is None:
break
self.send_data(*msg)
_LOG.debug("_process_que %s exit.", self.get_id())
def get_id(self):
"""
get client id
"""
return self.hello.dp_id # pylint: disable=no-member
def initialize(self):
"""
send MultiPartRequet(PoerDesc)
"""
fibcdbm.dps().add_dp(self.dpath)
_LOG.debug("FFDatapath registered. %s", self.dpath)
msg = fibcapi.new_ff_multipart_request_portdesc(self.get_id(), internal=True)
self.send_msg(pb.FF_MULTIPART_REQUEST, msg)
hub.spawn(self._process_que)
def finalize(self):
"""
send event (leave)
"""
self.que.put(None)
evt = fibcevt.EventFIBCEnterDP(self.dpath, False, self.ports)
self._send_evt(evt)
fibcdbm.dps().del_dp(self.get_id())
_LOG.debug("FFDatapath unregistered. %s", self.dpath)
def send_msg(self, mtype, msg, xid=0):
"""
put to send queue.
"""
data = msg.SerializeToString()
self.que.put((mtype, data, xid))
def send_data(self, mtype, data, xid=0):
"""
write msgs to fibc sock.
"""
fibcnet.write_fib_msg(self.soc, mtype, xid, data)
def dispatch(self, hdr, data):
"""
Dispatch mmessage.
"""
mtype = hdr[0]
if mtype == pb.FF_MULTIPART_REPLY:
msg = fibcapi.parse_ff_multipart_reply(data)
if msg.mp_type == pb.FFMultipart.PORT_DESC and msg.port_desc.internal: # pylint: disable=no-member
self.ports = msg.port_desc.port # pylint: disable=no-member
evt = fibcevt.EventFIBCEnterDP(self.dpath, True, msg.port_desc.port) # pylint: disable=no-member
self._send_evt(evt)
else:
evt = fibcevt.EventFIBCMultipartReply(self.dpath,
msg,
fibcnet.get_fib_header_xid(hdr))
self._send_evt(evt)
elif mtype == pb.FF_PACKET_IN:
msg = fibcapi.parse_ff_packet_in(data)
evt = fibcevt.EventFIBCPacketIn(self.dpath, msg, fibcnet.get_fib_header_xid(hdr))
self._send_evt(evt)
elif mtype == pb.FF_PORT_STATUS:
msg = fibcapi.parse_ff_port_status(data)
msg = fibcryu.FFPortStatus(self.dpath, msg.reason, msg.port) # pylint: disable=no-member
evt = fibcevt.EventFIBCFFPortStatus(msg)
self._send_evt(evt)
elif mtype == pb.FF_L2ADDR_STATUS:
msg = fibcapi.parse_ff_l2addr_status(data)
evt = fibcevt.EventFIBCFFL2AddrStatus(self.dpath, msg)
self._send_evt(evt)
else:
_LOG.warn("Unknown message dp_id:%d %s", self.dpath.id, hdr)
if fibclog.dump_msg():
_LOG.debug("%s", data)
class FIBCApiApp(app_manager.RyuApp):
"""
FIBC Api Server
"""
_EVENTS = [
fibcevt.EventFIBCVmConfig,
fibcevt.EventFIBCPortConfig,
fibcevt.EventFIBCFlowMod,
fibcevt.EventFIBCGroupMod,
fibcevt.EventFIBCEnterDP,
fibcevt.EventFIBCMultipartReply,
fibcevt.EventFIBCPacketIn,
fibcevt.EventFIBCFFPortStatus,
fibcevt.EventFIBCFFL2AddrStatus,
]
def __init__(self, *args, **kwargs):
super(FIBCApiApp, self).__init__(*args, **kwargs)
self.clients = dict()
def _stream_server(self, host):
sserver = hub.StreamServer(host, self.on_connect)
sserver.serve_forever()
def start_server(self, host):
"""
Start server
host: (addr, port)
"""
hub.spawn(self._stream_server, host)
_LOG.info("Server started.")
def send_to_monitor(self, mtype, data, xid):
"""
send msgs to monitor.
"""
monitor = self.clients.get(_RE_ID_ANY, None)
if monitor is not None:
monitor.send_data(mtype, xid, data)
def on_connect(self, soc, addr):
"""
Receive and process message from clinet.
- addr: (ip, port)
"""
_LOG.info("NewConnection %s", addr)
hdr, data = fibcnet.read_fib_msg(soc)
try:
if hdr[0] == pb.HELLO:
ctl = FIBCApiVmController(soc, data, self)
elif hdr[0] == pb.FF_HELLO:
ctl = FIBCApiDpController(soc, data, self)
else:
raise TypeError()
if fibclog.dump_msg():
_LOG.debug("%s", ctl.hello)
ctl.initialize()
while True:
try:
hdr, data = fibcnet.read_fib_msg(soc)
if hdr is None:
_LOG.info("Disconnected %s", addr)
break
_LOG.debug("Recv %s", hdr)
ctl.dispatch(hdr, data)
except Exception as ex: # pylint: disable=broad-except
_LOG.exception("%s", ex)
ctl.finalize()
except Exception as ex: # pylint: disable=broad-except
_LOG.exception("Invalid message. %s %s", hdr, ex)
finally:
soc.close()
_LOG.debug("Connection closed %s", addr)
def send_to_vm(self, mtype, msg, xid=0):
"""
Send message to ribc.
"""
cid = msg.re_id
if fibclog.dump_msg():
_LOG.debug("%s %s %s %s", cid, mtype, msg, xid)
try:
data = msg.SerializeToString()
client = self.clients[cid]
self.send_to_monitor(mtype, data, xid)
client.send_data(mtype, data, xid)
except KeyError as err:
_LOG.warn("client not exist. id:%s, %s", cid, err)
@handler.set_ev_cls([fibcevt.EventFIBCPortStatus,
fibcevt.EventFIBCL2AddrStatus,
fibcevt.EventFIBCDpStatus], handler.MAIN_DISPATCHER)
def _send_msg_to_vm_handler(self, evt):
"""
process event to send to ribc.
"""
msg = evt.msg
mtype = evt.mtype
_LOG.info("send_to_vm: %s %s", mtype, msg)
self.send_to_vm(mtype, msg)
| beluganos/beluganos | src/fabricflow/fibc/app/fibcapi.py | Python | apache-2.0 | 10,265 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Install JAX-DFT."""
import os
import setuptools
# Read in requirements
with open(os.path.join(os.path.dirname(__file__), 'requirements.txt')) as f:
requirements = [r.strip() for r in f]
setuptools.setup(
name='jax_dft',
version='0.0.0',
license='Apache 2.0',
author='Google LLC',
author_email='[email protected]',
install_requires=requirements,
url='https://github.com/google-research/google-research/'
'tree/master/jax_dft',
packages=setuptools.find_packages(),
python_requires='>=3.6')
| google-research/google-research | jax_dft/setup.py | Python | apache-2.0 | 1,145 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds a text ad with ad parameters. To get ad groups, run
get_ad_groups.py. To get keywords, run add_keywords.py.
Tags: AdGroupAdService.mutate, AdParamService.mutate
Api: AdWordsOnly
"""
__author__ = '[email protected] (Kevin Winter)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import AdWordsClient
ad_group_id = 'INSERT_AD_GROUP_ID_HERE'
criterion_id = 'INSERT_KEYWORD_CRITERION_ID_HERE'
def main(client, ad_group_id, criterion_id):
# Initialize appropriate service.
ad_group_ad_service = client.GetAdGroupAdService(
'https://adwords-sandbox.google.com', 'v201109')
ad_param_service = client.GetAdParamService(
'https://adwords-sandbox.google.com', 'v201109')
# Construct operations for adding text ad object and add to an ad group.
operations = [{
'operator': 'ADD',
'operand': {
'xsi_type': 'AdGroupAd',
'adGroupId': ad_group_id,
'ad': {
'xsi_type': 'TextAd',
'url': 'http://www.example.com',
'displayUrl': 'example.com',
'description1': 'Low-gravity fun for {param1:cheap}.',
'description2': 'Only {param2:a few} seats left!',
'headline': 'Luxury Mars Cruises'
},
'status': 'ENABLED'
}
}]
ads = ad_group_ad_service.Mutate(operations)[0]['value']
# Display results.
for ad in ads:
print ('Text ad with id \'%s\' was successfully added to an ad group with '
'id \'%s\'.' % (ad['adGroupId'], ad['ad']['id']))
# Construct operations for setting ad parameters.
operations = [
{
'operator': 'SET',
'operand': {
'adGroupId': ad_group_id,
'criterionId': criterion_id,
'insertionText': '£100',
'paramIndex': '1'
}
},
{
'operator': 'SET',
'operand': {
'adGroupId': ad_group_id,
'criterionId': criterion_id,
'insertionText': '50',
'paramIndex': '2'
}
}
]
ad_params = ad_param_service.Mutate(operations)
# Display results.
for ad_param in ad_params:
print ('Ad parameter with text \'%s\' was successfully set for criterion '
'with id \'%s\' and ad group id \'%s\'.'
% (ad_param['insertionText'], ad_param['criterionId'],
ad_param['adGroupId']))
print
print ('Usage: %s units, %s operations' % (client.GetUnits(),
client.GetOperations()))
if __name__ == '__main__':
# Initialize client object.
client = AdWordsClient(path=os.path.join('..', '..', '..', '..', '..'))
main(client, ad_group_id, criterion_id)
| nearlyfreeapps/python-googleadwords | examples/adspygoogle/adwords/v201109/campaign_management/set_ad_parameters.py | Python | apache-2.0 | 3,488 |
# -*- coding: utf-8 -*-
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# google-cloud-spanner documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
# For plugins that can not read conf.py.
# See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85
sys.path.insert(0, os.path.abspath("."))
__version__ = ""
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.5.5"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"recommonmark",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_options = {"members": True}
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The root toctree document.
root_doc = "index"
# General information about the project.
project = "google-cloud-spanner"
copyright = "2019, Google"
author = "Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
"_build",
"**/.nox/**/*",
"samples/AUTHORING_GUIDE.md",
"samples/CONTRIBUTING.md",
"samples/snippets/README.rst",
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for google-cloud-spanner",
"github_user": "googleapis",
"github_repo": "python-spanner",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-spanner-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
root_doc,
"google-cloud-spanner.tex",
"google-cloud-spanner Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
root_doc,
"google-cloud-spanner",
"google-cloud-spanner Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
root_doc,
"google-cloud-spanner",
"google-cloud-spanner Documentation",
author,
"google-cloud-spanner",
"google-cloud-spanner Library",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://python.readthedocs.org/en/latest/", None),
"google-auth": ("https://googleapis.dev/python/google-auth/latest/", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,),
"grpc": ("https://grpc.github.io/grpc/python/", None),
"proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None),
"protobuf": ("https://googleapis.dev/python/protobuf/latest/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
| googleapis/python-spanner | docs/conf.py | Python | apache-2.0 | 12,378 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities and helper functions."""
import abc
import contextlib
import datetime
import functools
import hashlib
import inspect
import logging as py_logging
import os
import pyclbr
import random
import re
import shutil
import socket
import stat
import sys
import tempfile
import time
import types
from xml.dom import minidom
from xml.parsers import expat
from xml import sax
from xml.sax import expatreader
from xml.sax import saxutils
from os_brick.initiator import connector
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_utils import importutils
from oslo_utils import timeutils
import retrying
import six
from cinder import exception
from cinder.i18n import _, _LE, _LW
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
ISO_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f"
VALID_TRACE_FLAGS = {'method', 'api'}
TRACE_METHOD = False
TRACE_API = False
synchronized = lockutils.synchronized_with_prefix('cinder-')
def find_config(config_path):
"""Find a configuration file using the given hint.
:param config_path: Full or relative path to the config.
:returns: Full path of the config, if it exists.
:raises: `cinder.exception.ConfigNotFound`
"""
possible_locations = [
config_path,
os.path.join(CONF.state_path, "etc", "cinder", config_path),
os.path.join(CONF.state_path, "etc", config_path),
os.path.join(CONF.state_path, config_path),
"/etc/cinder/%s" % config_path,
]
for path in possible_locations:
if os.path.exists(path):
return os.path.abspath(path)
raise exception.ConfigNotFound(path=os.path.abspath(config_path))
def as_int(obj, quiet=True):
# Try "2" -> 2
try:
return int(obj)
except (ValueError, TypeError):
pass
# Try "2.5" -> 2
try:
return int(float(obj))
except (ValueError, TypeError):
pass
# Eck, not sure what this is then.
if not quiet:
raise TypeError(_("Can not translate %s to integer.") % (obj))
return obj
def is_int_like(val):
"""Check if a value looks like an int."""
try:
return str(int(val)) == str(val)
except Exception:
return False
def check_exclusive_options(**kwargs):
"""Checks that only one of the provided options is actually not-none.
Iterates over all the kwargs passed in and checks that only one of said
arguments is not-none, if more than one is not-none then an exception will
be raised with the names of those arguments who were not-none.
"""
if not kwargs:
return
pretty_keys = kwargs.pop("pretty_keys", True)
exclusive_options = {}
for (k, v) in kwargs.items():
if v is not None:
exclusive_options[k] = True
if len(exclusive_options) > 1:
# Change the format of the names from pythonic to
# something that is more readable.
#
# Ex: 'the_key' -> 'the key'
if pretty_keys:
names = [k.replace('_', ' ') for k in kwargs.keys()]
else:
names = kwargs.keys()
names = ", ".join(sorted(names))
msg = (_("May specify only one of %s") % (names))
raise exception.InvalidInput(reason=msg)
def execute(*cmd, **kwargs):
"""Convenience wrapper around oslo's execute() method."""
if 'run_as_root' in kwargs and 'root_helper' not in kwargs:
kwargs['root_helper'] = get_root_helper()
return processutils.execute(*cmd, **kwargs)
def check_ssh_injection(cmd_list):
ssh_injection_pattern = ['`', '$', '|', '||', ';', '&', '&&', '>', '>>',
'<']
# Check whether injection attacks exist
for arg in cmd_list:
arg = arg.strip()
# Check for matching quotes on the ends
is_quoted = re.match('^(?P<quote>[\'"])(?P<quoted>.*)(?P=quote)$', arg)
if is_quoted:
# Check for unescaped quotes within the quoted argument
quoted = is_quoted.group('quoted')
if quoted:
if (re.match('[\'"]', quoted) or
re.search('[^\\\\][\'"]', quoted)):
raise exception.SSHInjectionThreat(command=cmd_list)
else:
# We only allow spaces within quoted arguments, and that
# is the only special character allowed within quotes
if len(arg.split()) > 1:
raise exception.SSHInjectionThreat(command=cmd_list)
# Second, check whether danger character in command. So the shell
# special operator must be a single argument.
for c in ssh_injection_pattern:
if c not in arg:
continue
result = arg.find(c)
if not result == -1:
if result == 0 or not arg[result - 1] == '\\':
raise exception.SSHInjectionThreat(command=cmd_list)
def create_channel(client, width, height):
"""Invoke an interactive shell session on server."""
channel = client.invoke_shell()
channel.resize_pty(width, height)
return channel
def cinderdir():
import cinder
return os.path.abspath(cinder.__file__).split('cinder/__init__.py')[0]
def last_completed_audit_period(unit=None):
"""This method gives you the most recently *completed* audit period.
arguments:
units: string, one of 'hour', 'day', 'month', 'year'
Periods normally begin at the beginning (UTC) of the
period unit (So a 'day' period begins at midnight UTC,
a 'month' unit on the 1st, a 'year' on Jan, 1)
unit string may be appended with an optional offset
like so: 'day@18' This will begin the period at 18:00
UTC. 'month@15' starts a monthly period on the 15th,
and year@3 begins a yearly one on March 1st.
returns: 2 tuple of datetimes (begin, end)
The begin timestamp of this audit period is the same as the
end of the previous.
"""
if not unit:
unit = CONF.volume_usage_audit_period
offset = 0
if '@' in unit:
unit, offset = unit.split("@", 1)
offset = int(offset)
rightnow = timeutils.utcnow()
if unit not in ('month', 'day', 'year', 'hour'):
raise ValueError('Time period must be hour, day, month or year')
if unit == 'month':
if offset == 0:
offset = 1
end = datetime.datetime(day=offset,
month=rightnow.month,
year=rightnow.year)
if end >= rightnow:
year = rightnow.year
if 1 >= rightnow.month:
year -= 1
month = 12 + (rightnow.month - 1)
else:
month = rightnow.month - 1
end = datetime.datetime(day=offset,
month=month,
year=year)
year = end.year
if 1 >= end.month:
year -= 1
month = 12 + (end.month - 1)
else:
month = end.month - 1
begin = datetime.datetime(day=offset, month=month, year=year)
elif unit == 'year':
if offset == 0:
offset = 1
end = datetime.datetime(day=1, month=offset, year=rightnow.year)
if end >= rightnow:
end = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 1)
begin = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 2)
else:
begin = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 1)
elif unit == 'day':
end = datetime.datetime(hour=offset,
day=rightnow.day,
month=rightnow.month,
year=rightnow.year)
if end >= rightnow:
end = end - datetime.timedelta(days=1)
begin = end - datetime.timedelta(days=1)
elif unit == 'hour':
end = rightnow.replace(minute=offset, second=0, microsecond=0)
if end >= rightnow:
end = end - datetime.timedelta(hours=1)
begin = end - datetime.timedelta(hours=1)
return (begin, end)
def list_of_dicts_to_dict(seq, key):
"""Convert list of dicts to a indexted dict.
Takes a list of dicts, and converts it a nested dict
indexed by <key>
:param seq: list of dicts
:parm key: key in dicts to index by
example:
lst = [{'id': 1, ...}, {'id': 2, ...}...]
key = 'id'
returns {1:{'id': 1, ...}, 2:{'id':2, ...}
"""
return {d[key]: dict(d, index=d[key]) for (i, d) in enumerate(seq)}
class ProtectedExpatParser(expatreader.ExpatParser):
"""An expat parser which disables DTD's and entities by default."""
def __init__(self, forbid_dtd=True, forbid_entities=True,
*args, **kwargs):
# Python 2.x old style class
expatreader.ExpatParser.__init__(self, *args, **kwargs)
self.forbid_dtd = forbid_dtd
self.forbid_entities = forbid_entities
def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
raise ValueError("Inline DTD forbidden")
def entity_decl(self, entityName, is_parameter_entity, value, base,
systemId, publicId, notationName):
raise ValueError("<!ENTITY> forbidden")
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
# expat 1.2
raise ValueError("<!ENTITY> forbidden")
def reset(self):
expatreader.ExpatParser.reset(self)
if self.forbid_dtd:
self._parser.StartDoctypeDeclHandler = self.start_doctype_decl
if self.forbid_entities:
self._parser.EntityDeclHandler = self.entity_decl
self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
def safe_minidom_parse_string(xml_string):
"""Parse an XML string using minidom safely.
"""
try:
return minidom.parseString(xml_string, parser=ProtectedExpatParser())
except sax.SAXParseException:
raise expat.ExpatError()
def xhtml_escape(value):
"""Escapes a string so it is valid within XML or XHTML."""
return saxutils.escape(value, {'"': '"', "'": '''})
def get_from_path(items, path):
"""Returns a list of items matching the specified path.
Takes an XPath-like expression e.g. prop1/prop2/prop3, and for each item
in items, looks up items[prop1][prop2][prop3]. Like XPath, if any of the
intermediate results are lists it will treat each list item individually.
A 'None' in items or any child expressions will be ignored, this function
will not throw because of None (anywhere) in items. The returned list
will contain no None values.
"""
if path is None:
raise exception.Error('Invalid mini_xpath')
(first_token, sep, remainder) = path.partition('/')
if first_token == '':
raise exception.Error('Invalid mini_xpath')
results = []
if items is None:
return results
if not isinstance(items, list):
# Wrap single objects in a list
items = [items]
for item in items:
if item is None:
continue
get_method = getattr(item, 'get', None)
if get_method is None:
continue
child = get_method(first_token)
if child is None:
continue
if isinstance(child, list):
# Flatten intermediate lists
for x in child:
results.append(x)
else:
results.append(child)
if not sep:
# No more tokens
return results
else:
return get_from_path(results, remainder)
def is_valid_boolstr(val):
"""Check if the provided string is a valid bool string or not."""
val = str(val).lower()
return (val == 'true' or val == 'false' or
val == 'yes' or val == 'no' or
val == 'y' or val == 'n' or
val == '1' or val == '0')
def is_none_string(val):
"""Check if a string represents a None value."""
if not isinstance(val, six.string_types):
return False
return val.lower() == 'none'
def monkey_patch():
"""Patches decorators for all functions in a specified module.
If the CONF.monkey_patch set as True,
this function patches a decorator
for all functions in specified modules.
You can set decorators for each modules
using CONF.monkey_patch_modules.
The format is "Module path:Decorator function".
Example: 'cinder.api.ec2.cloud:' \
cinder.openstack.common.notifier.api.notify_decorator'
Parameters of the decorator is as follows.
(See cinder.openstack.common.notifier.api.notify_decorator)
:param name: name of the function
:param function: object of the function
"""
# If CONF.monkey_patch is not True, this function do nothing.
if not CONF.monkey_patch:
return
# Get list of modules and decorators
for module_and_decorator in CONF.monkey_patch_modules:
module, decorator_name = module_and_decorator.split(':')
# import decorator function
decorator = importutils.import_class(decorator_name)
__import__(module)
# Retrieve module information using pyclbr
module_data = pyclbr.readmodule_ex(module)
for key in module_data.keys():
# set the decorator for the class methods
if isinstance(module_data[key], pyclbr.Class):
clz = importutils.import_class("%s.%s" % (module, key))
for method, func in inspect.getmembers(clz, inspect.ismethod):
setattr(
clz, method,
decorator("%s.%s.%s" % (module, key, method), func))
# set the decorator for the function
if isinstance(module_data[key], pyclbr.Function):
func = importutils.import_class("%s.%s" % (module, key))
setattr(sys.modules[module], key,
decorator("%s.%s" % (module, key), func))
def make_dev_path(dev, partition=None, base='/dev'):
"""Return a path to a particular device.
>>> make_dev_path('xvdc')
/dev/xvdc
>>> make_dev_path('xvdc', 1)
/dev/xvdc1
"""
path = os.path.join(base, dev)
if partition:
path += str(partition)
return path
def sanitize_hostname(hostname):
"""Return a hostname which conforms to RFC-952 and RFC-1123 specs."""
if six.PY3:
hostname = hostname.encode('latin-1', 'ignore')
hostname = hostname.decode('latin-1')
else:
if isinstance(hostname, six.text_type):
hostname = hostname.encode('latin-1', 'ignore')
hostname = re.sub('[ _]', '-', hostname)
hostname = re.sub('[^\w.-]+', '', hostname)
hostname = hostname.lower()
hostname = hostname.strip('.-')
return hostname
def hash_file(file_like_object):
"""Generate a hash for the contents of a file."""
checksum = hashlib.sha1()
any(map(checksum.update, iter(lambda: file_like_object.read(32768), b'')))
return checksum.hexdigest()
def service_is_up(service):
"""Check whether a service is up based on last heartbeat."""
last_heartbeat = service['updated_at'] or service['created_at']
# Timestamps in DB are UTC.
elapsed = (timeutils.utcnow() - last_heartbeat).total_seconds()
return abs(elapsed) <= CONF.service_down_time
def read_file_as_root(file_path):
"""Secure helper to read file as root."""
try:
out, _err = execute('cat', file_path, run_as_root=True)
return out
except processutils.ProcessExecutionError:
raise exception.FileNotFound(file_path=file_path)
@contextlib.contextmanager
def temporary_chown(path, owner_uid=None):
"""Temporarily chown a path.
:params owner_uid: UID of temporary owner (defaults to current user)
"""
if owner_uid is None:
owner_uid = os.getuid()
orig_uid = os.stat(path).st_uid
if orig_uid != owner_uid:
execute('chown', owner_uid, path, run_as_root=True)
try:
yield
finally:
if orig_uid != owner_uid:
execute('chown', orig_uid, path, run_as_root=True)
@contextlib.contextmanager
def tempdir(**kwargs):
tmpdir = tempfile.mkdtemp(**kwargs)
try:
yield tmpdir
finally:
try:
shutil.rmtree(tmpdir)
except OSError as e:
LOG.debug('Could not remove tmpdir: %s',
six.text_type(e))
def walk_class_hierarchy(clazz, encountered=None):
"""Walk class hierarchy, yielding most derived classes first."""
if not encountered:
encountered = []
for subclass in clazz.__subclasses__():
if subclass not in encountered:
encountered.append(subclass)
# drill down to leaves first
for subsubclass in walk_class_hierarchy(subclass, encountered):
yield subsubclass
yield subclass
def get_root_helper():
return 'sudo cinder-rootwrap %s' % CONF.rootwrap_config
def brick_get_connector_properties(multipath=False, enforce_multipath=False):
"""Wrapper to automatically set root_helper in brick calls.
:param multipath: A boolean indicating whether the connector can
support multipath.
:param enforce_multipath: If True, it raises exception when multipath=True
is specified but multipathd is not running.
If False, it falls back to multipath=False
when multipathd is not running.
"""
root_helper = get_root_helper()
return connector.get_connector_properties(root_helper,
CONF.my_ip,
multipath,
enforce_multipath)
def brick_get_connector(protocol, driver=None,
execute=processutils.execute,
use_multipath=False,
device_scan_attempts=3,
*args, **kwargs):
"""Wrapper to get a brick connector object.
This automatically populates the required protocol as well
as the root_helper needed to execute commands.
"""
root_helper = get_root_helper()
return connector.InitiatorConnector.factory(protocol, root_helper,
driver=driver,
execute=execute,
use_multipath=use_multipath,
device_scan_attempts=
device_scan_attempts,
*args, **kwargs)
def require_driver_initialized(driver):
"""Verifies if `driver` is initialized
If the driver is not initialized, an exception will be raised.
:params driver: The driver instance.
:raises: `exception.DriverNotInitialized`
"""
# we can't do anything if the driver didn't init
if not driver.initialized:
driver_name = driver.__class__.__name__
LOG.error(_LE("Volume driver %s not initialized"), driver_name)
raise exception.DriverNotInitialized()
def get_file_mode(path):
"""This primarily exists to make unit testing easier."""
return stat.S_IMODE(os.stat(path).st_mode)
def get_file_gid(path):
"""This primarily exists to make unit testing easier."""
return os.stat(path).st_gid
def get_file_size(path):
"""Returns the file size."""
return os.stat(path).st_size
def _get_disk_of_partition(devpath, st=None):
"""Gets a disk device path and status from partition path.
Returns a disk device path from a partition device path, and stat for
the device. If devpath is not a partition, devpath is returned as it is.
For example, '/dev/sda' is returned for '/dev/sda1', and '/dev/disk1' is
for '/dev/disk1p1' ('p' is prepended to the partition number if the disk
name ends with numbers).
"""
diskpath = re.sub('(?:(?<=\d)p)?\d+$', '', devpath)
if diskpath != devpath:
try:
st_disk = os.stat(diskpath)
if stat.S_ISBLK(st_disk.st_mode):
return (diskpath, st_disk)
except OSError:
pass
# devpath is not a partition
if st is None:
st = os.stat(devpath)
return (devpath, st)
def get_blkdev_major_minor(path, lookup_for_file=True):
"""Get 'major:minor' number of block device.
Get the device's 'major:minor' number of a block device to control
I/O ratelimit of the specified path.
If lookup_for_file is True and the path is a regular file, lookup a disk
device which the file lies on and returns the result for the device.
"""
st = os.stat(path)
if stat.S_ISBLK(st.st_mode):
path, st = _get_disk_of_partition(path, st)
return '%d:%d' % (os.major(st.st_rdev), os.minor(st.st_rdev))
elif stat.S_ISCHR(st.st_mode):
# No I/O ratelimit control is provided for character devices
return None
elif lookup_for_file:
# lookup the mounted disk which the file lies on
out, _err = execute('df', path)
devpath = out.split("\n")[1].split()[0]
if devpath[0] is not '/':
# the file is on a network file system
return None
return get_blkdev_major_minor(devpath, False)
else:
msg = _("Unable to get a block device for file \'%s\'") % path
raise exception.Error(msg)
def check_string_length(value, name, min_length=0, max_length=None):
"""Check the length of specified string.
:param value: the value of the string
:param name: the name of the string
:param min_length: the min_length of the string
:param max_length: the max_length of the string
"""
if not isinstance(value, six.string_types):
msg = _("%s is not a string or unicode") % name
raise exception.InvalidInput(message=msg)
if len(value) < min_length:
msg = _("%(name)s has a minimum character requirement of "
"%(min_length)s.") % {'name': name, 'min_length': min_length}
raise exception.InvalidInput(message=msg)
if max_length and len(value) > max_length:
msg = _("%(name)s has more than %(max_length)s "
"characters.") % {'name': name, 'max_length': max_length}
raise exception.InvalidInput(message=msg)
_visible_admin_metadata_keys = ['readonly', 'attached_mode']
def add_visible_admin_metadata(volume):
"""Add user-visible admin metadata to regular metadata.
Extracts the admin metadata keys that are to be made visible to
non-administrators, and adds them to the regular metadata structure for the
passed-in volume.
"""
visible_admin_meta = {}
if volume.get('volume_admin_metadata'):
if isinstance(volume['volume_admin_metadata'], dict):
volume_admin_metadata = volume['volume_admin_metadata']
for key in volume_admin_metadata:
if key in _visible_admin_metadata_keys:
visible_admin_meta[key] = volume_admin_metadata[key]
else:
for item in volume['volume_admin_metadata']:
if item['key'] in _visible_admin_metadata_keys:
visible_admin_meta[item['key']] = item['value']
# avoid circular ref when volume is a Volume instance
elif (volume.get('admin_metadata') and
isinstance(volume.get('admin_metadata'), dict)):
for key in _visible_admin_metadata_keys:
if key in volume['admin_metadata'].keys():
visible_admin_meta[key] = volume['admin_metadata'][key]
if not visible_admin_meta:
return
# NOTE(zhiyan): update visible administration metadata to
# volume metadata, administration metadata will rewrite existing key.
if volume.get('volume_metadata'):
orig_meta = list(volume.get('volume_metadata'))
for item in orig_meta:
if item['key'] in visible_admin_meta.keys():
item['value'] = visible_admin_meta.pop(item['key'])
for key, value in visible_admin_meta.items():
orig_meta.append({'key': key, 'value': value})
volume['volume_metadata'] = orig_meta
# avoid circular ref when vol is a Volume instance
elif (volume.get('metadata') and
isinstance(volume.get('metadata'), dict)):
volume['metadata'].update(visible_admin_meta)
else:
volume['metadata'] = visible_admin_meta
def remove_invalid_filter_options(context, filters,
allowed_search_options):
"""Remove search options that are not valid for non-admin API/context."""
if context.is_admin:
# Allow all options
return
# Otherwise, strip out all unknown options
unknown_options = [opt for opt in filters
if opt not in allowed_search_options]
bad_options = ", ".join(unknown_options)
LOG.debug("Removing options '%s' from query.", bad_options)
for opt in unknown_options:
del filters[opt]
def is_blk_device(dev):
try:
if stat.S_ISBLK(os.stat(dev).st_mode):
return True
return False
except Exception:
LOG.debug('Path %s not found in is_blk_device check', dev)
return False
def retry(exceptions, interval=1, retries=3, backoff_rate=2,
wait_random=False):
def _retry_on_exception(e):
return isinstance(e, exceptions)
def _backoff_sleep(previous_attempt_number, delay_since_first_attempt_ms):
exp = backoff_rate ** previous_attempt_number
wait_for = interval * exp
if wait_random:
random.seed()
wait_val = random.randrange(interval * 1000.0, wait_for * 1000.0)
else:
wait_val = wait_for * 1000.0
LOG.debug("Sleeping for %s seconds", (wait_val / 1000.0))
return wait_val
def _print_stop(previous_attempt_number, delay_since_first_attempt_ms):
delay_since_first_attempt = delay_since_first_attempt_ms / 1000.0
LOG.debug("Failed attempt %s", previous_attempt_number)
LOG.debug("Have been at this for %s seconds",
delay_since_first_attempt)
return previous_attempt_number == retries
if retries < 1:
raise ValueError('Retries must be greater than or '
'equal to 1 (received: %s). ' % retries)
def _decorator(f):
@six.wraps(f)
def _wrapper(*args, **kwargs):
r = retrying.Retrying(retry_on_exception=_retry_on_exception,
wait_func=_backoff_sleep,
stop_func=_print_stop)
return r.call(f, *args, **kwargs)
return _wrapper
return _decorator
def convert_version_to_int(version):
try:
if isinstance(version, six.string_types):
version = convert_version_to_tuple(version)
if isinstance(version, tuple):
return six.moves.reduce(lambda x, y: (x * 1000) + y, version)
except Exception:
msg = _("Version %s is invalid.") % version
raise exception.CinderException(msg)
def convert_version_to_str(version_int):
version_numbers = []
factor = 1000
while version_int != 0:
version_number = version_int - (version_int // factor * factor)
version_numbers.insert(0, six.text_type(version_number))
version_int = version_int // factor
return '.'.join(map(str, version_numbers))
def convert_version_to_tuple(version_str):
return tuple(int(part) for part in version_str.split('.'))
def convert_str(text):
"""Convert to native string.
Convert bytes and Unicode strings to native strings:
* convert to bytes on Python 2:
encode Unicode using encodeutils.safe_encode()
* convert to Unicode on Python 3: decode bytes from UTF-8
"""
if six.PY2:
return encodeutils.safe_encode(text)
else:
if isinstance(text, bytes):
return text.decode('utf-8')
else:
return text
def trace_method(f):
"""Decorates a function if TRACE_METHOD is true."""
@functools.wraps(f)
def trace_method_logging_wrapper(*args, **kwargs):
if TRACE_METHOD:
return trace(f)(*args, **kwargs)
return f(*args, **kwargs)
return trace_method_logging_wrapper
def trace_api(f):
"""Decorates a function if TRACE_API is true."""
@functools.wraps(f)
def trace_api_logging_wrapper(*args, **kwargs):
if TRACE_API:
return trace(f)(*args, **kwargs)
return f(*args, **kwargs)
return trace_api_logging_wrapper
def trace(f):
"""Trace calls to the decorated function.
This decorator should always be defined as the outermost decorator so it
is defined last. This is important so it does not interfere
with other decorators.
Using this decorator on a function will cause its execution to be logged at
`DEBUG` level with arguments, return values, and exceptions.
:returns a function decorator
"""
func_name = f.__name__
@functools.wraps(f)
def trace_logging_wrapper(*args, **kwargs):
if len(args) > 0:
maybe_self = args[0]
else:
maybe_self = kwargs.get('self', None)
if maybe_self and hasattr(maybe_self, '__module__'):
logger = logging.getLogger(maybe_self.__module__)
else:
logger = LOG
# NOTE(ameade): Don't bother going any further if DEBUG log level
# is not enabled for the logger.
if not logger.isEnabledFor(py_logging.DEBUG):
return f(*args, **kwargs)
all_args = inspect.getcallargs(f, *args, **kwargs)
logger.debug('==> %(func)s: call %(all_args)r',
{'func': func_name, 'all_args': all_args})
start_time = time.time() * 1000
try:
result = f(*args, **kwargs)
except Exception as exc:
total_time = int(round(time.time() * 1000)) - start_time
logger.debug('<== %(func)s: exception (%(time)dms) %(exc)r',
{'func': func_name,
'time': total_time,
'exc': exc})
raise
total_time = int(round(time.time() * 1000)) - start_time
logger.debug('<== %(func)s: return (%(time)dms) %(result)r',
{'func': func_name,
'time': total_time,
'result': result})
return result
return trace_logging_wrapper
class TraceWrapperMetaclass(type):
"""Metaclass that wraps all methods of a class with trace_method.
This metaclass will cause every function inside of the class to be
decorated with the trace_method decorator.
To use the metaclass you define a class like so:
@six.add_metaclass(utils.TraceWrapperMetaclass)
class MyClass(object):
"""
def __new__(meta, classname, bases, classDict):
newClassDict = {}
for attributeName, attribute in classDict.items():
if isinstance(attribute, types.FunctionType):
# replace it with a wrapped version
attribute = functools.update_wrapper(trace_method(attribute),
attribute)
newClassDict[attributeName] = attribute
return type.__new__(meta, classname, bases, newClassDict)
class TraceWrapperWithABCMetaclass(abc.ABCMeta, TraceWrapperMetaclass):
"""Metaclass that wraps all methods of a class with trace."""
pass
def setup_tracing(trace_flags):
"""Set global variables for each trace flag.
Sets variables TRACE_METHOD and TRACE_API, which represent
whether to log method and api traces.
:param trace_flags: a list of strings
"""
global TRACE_METHOD
global TRACE_API
try:
trace_flags = [flag.strip() for flag in trace_flags]
except TypeError: # Handle when trace_flags is None or a test mock
trace_flags = []
for invalid_flag in (set(trace_flags) - VALID_TRACE_FLAGS):
LOG.warning(_LW('Invalid trace flag: %s'), invalid_flag)
TRACE_METHOD = 'method' in trace_flags
TRACE_API = 'api' in trace_flags
def resolve_hostname(hostname):
"""Resolves host name to IP address.
Resolves a host name (my.data.point.com) to an IP address (10.12.143.11).
This routine also works if the data passed in hostname is already an IP.
In this case, the same IP address will be returned.
:param hostname: Host name to resolve.
:return: IP Address for Host name.
"""
result = socket.getaddrinfo(hostname, None)[0]
(family, socktype, proto, canonname, sockaddr) = result
LOG.debug('Asked to resolve hostname %(host)s and got IP %(ip)s.',
{'host': hostname, 'ip': sockaddr[0]})
return sockaddr[0]
| potsmaster/cinder | cinder/utils.py | Python | apache-2.0 | 34,378 |
# Copyright 2016-2017, Fabien Boucher
# Copyright 2016-2017, Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pkg_resources
def get_version():
version = pkg_resources.get_distribution("repoxplorer").version
return version
| morucci/repoxplorer | repoxplorer/version.py | Python | apache-2.0 | 755 |
""" openconfig_mpls
This module provides data definitions for configuration of
Multiprotocol Label Switching (MPLS) and associated protocols for
signaling and traffic engineering.
RFC 3031\: Multiprotocol Label Switching Architecture
The MPLS / TE data model consists of several modules and
submodules as shown below. The top\-level MPLS module describes
the overall framework. Three types of LSPs are supported\:
i) traffic\-engineered (or constrained\-path)
ii) IGP\-congruent (LSPs that follow the IGP path)
iii) static LSPs which are not signaled
The structure of each of these LSP configurations is defined in
corresponding submodules. Companion modules define the relevant
configuration and operational data specific to key signaling
protocols used in operational practice.
+\-\-\-\-\-\-\-+
+\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\->\| MPLS \|<\-\-\-\-\-\-\-\-\-\-\-\-\-\-+
\| +\-\-\-\-\-\-\-+ \|
\| ^ \|
\| \| \|
+\-\-\-\-+\-\-\-\-\-+ +\-\-\-\-\-\-\-\-+\-\-\-\-\-\-\-+ +\-\-\-\-\-+\-\-\-\-\-+
\| TE LSPs \| \| IGP\-based LSPs \| \|static LSPs\|
\| \| \| \| \| \|
+\-\-\-\-\-\-\-\-\-\-+ +\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-+ +\-\-\-\-\-\-\-\-\-\-\-+
^ ^ ^ ^
\| +\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-+ \| +\-\-\-\-\-\-\-\-+
\| \| \| \|
\| +\-\-\-\-\-\-+ +\-+\-\-\-+\-+ +\-\-+\-\-+
+\-\-\-+ RSVP \| \|SEGMENT\| \| LDP \|
+\-\-\-\-\-\-+ \|ROUTING\| +\-\-\-\-\-+
+\-\-\-\-\-\-\-+
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
from ydk.models.openconfig.openconfig_mpls_types import LspOperStatusIdentity
from ydk.models.openconfig.openconfig_mpls_types import LspRoleIdentity
from ydk.models.openconfig.openconfig_mpls_types import MplsLabelEnum
from ydk.models.openconfig.openconfig_mpls_types import NullLabelTypeIdentity
from ydk.models.openconfig.openconfig_mpls_types import ProtectionTypeIdentity
from ydk.models.openconfig.openconfig_mpls_types import TunnelAdminStatusIdentity
from ydk.models.openconfig.openconfig_mpls_types import TunnelTypeEnum
from ydk.models.openconfig.openconfig_mpls_types import TunnelTypeIdentity
class CspfTieBreakingEnum(Enum):
"""
CspfTieBreakingEnum
type to indicate the CSPF selection policy when
multiple equal cost paths are available
.. data:: RANDOM = 0
CSPF calculation selects a random path among
multiple equal-cost paths to the destination
.. data:: LEAST_FILL = 1
CSPF calculation selects the path with greatest
available bandwidth
.. data:: MOST_FILL = 2
CSPF calculation selects the path with the least
available bandwidth
"""
RANDOM = 0
LEAST_FILL = 1
MOST_FILL = 2
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['CspfTieBreakingEnum']
class MplsHopTypeEnum(Enum):
"""
MplsHopTypeEnum
enumerated type for specifying loose or strict
paths
.. data:: LOOSE = 0
loose hop in an explicit path
.. data:: STRICT = 1
strict hop in an explicit path
"""
LOOSE = 0
STRICT = 1
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['MplsHopTypeEnum']
class MplsSrlgFloodingTypeEnum(Enum):
"""
MplsSrlgFloodingTypeEnum
Enumerated bype for specifying how the SRLG is flooded
.. data:: FLOODED_SRLG = 0
SRLG is flooded in the IGP
.. data:: STATIC_SRLG = 1
SRLG is not flooded, the members are
statically configured
"""
FLOODED_SRLG = 0
STATIC_SRLG = 1
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['MplsSrlgFloodingTypeEnum']
class TeBandwidthTypeEnum(Enum):
"""
TeBandwidthTypeEnum
enumerated type for specifying whether bandwidth is
explicitly specified or automatically computed
.. data:: SPECIFIED = 0
Bandwidth is explicitly specified
.. data:: AUTO = 1
Bandwidth is automatically computed
"""
SPECIFIED = 0
AUTO = 1
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['TeBandwidthTypeEnum']
class TeMetricTypeEnum(Enum):
"""
TeMetricTypeEnum
union type for setting the LSP TE metric to a
static value, or to track the IGP metric
.. data:: IGP = 0
set the LSP metric to track the underlying
IGP metric
"""
IGP = 0
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['TeMetricTypeEnum']
class PathComputationMethodIdentity(object):
"""
base identity for supported path computation
mechanisms
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
pass
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['PathComputationMethodIdentity']['meta_info']
class Mpls(object):
"""
Anchor point for mpls configuration and operational
data
.. attribute:: global_
general mpls configuration applicable to any type of LSP and signaling protocol \- label ranges, entropy label supportmay be added here
**type**\: :py:class:`Global <ydk.models.openconfig.openconfig_mpls.Mpls.Global>`
.. attribute:: lsps
LSP definitions and configuration
**type**\: :py:class:`Lsps <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps>`
.. attribute:: signaling_protocols
top\-level signaling protocol configuration
**type**\: :py:class:`SignalingProtocols <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols>`
.. attribute:: te_global_attributes
traffic\-engineering global attributes
**type**\: :py:class:`TeGlobalAttributes <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes>`
.. attribute:: te_interface_attributes
traffic engineering attributes specific for interfaces
**type**\: :py:class:`TeInterfaceAttributes <ydk.models.openconfig.openconfig_mpls.Mpls.TeInterfaceAttributes>`
.. attribute:: _is_presence
Is present if this instance represents presence container else not
**type**\: bool
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self._is_presence = True
self.global_ = Mpls.Global()
self.global_.parent = self
self.lsps = Mpls.Lsps()
self.lsps.parent = self
self.signaling_protocols = Mpls.SignalingProtocols()
self.signaling_protocols.parent = self
self.te_global_attributes = Mpls.TeGlobalAttributes()
self.te_global_attributes.parent = self
self.te_interface_attributes = Mpls.TeInterfaceAttributes()
self.te_interface_attributes.parent = self
class Global(object):
"""
general mpls configuration applicable to any
type of LSP and signaling protocol \- label ranges,
entropy label supportmay be added here
.. attribute:: config
Top level global MPLS configuration
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.Global.Config>`
.. attribute:: mpls_interface_attributes
Parameters related to MPLS interfaces
**type**\: :py:class:`MplsInterfaceAttributes <ydk.models.openconfig.openconfig_mpls.Mpls.Global.MplsInterfaceAttributes>`
.. attribute:: state
Top level global MPLS state
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.Global.State>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.config = Mpls.Global.Config()
self.config.parent = self
self.mpls_interface_attributes = Mpls.Global.MplsInterfaceAttributes()
self.mpls_interface_attributes.parent = self
self.state = Mpls.Global.State()
self.state.parent = self
class Config(object):
"""
Top level global MPLS configuration
.. attribute:: null_label
The null\-label type used, implicit or explicit
**type**\: :py:class:`NullLabelTypeIdentity <ydk.models.openconfig.openconfig_mpls_types.NullLabelTypeIdentity>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.null_label = None
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:global/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.null_label is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Global.Config']['meta_info']
class State(object):
"""
Top level global MPLS state
.. attribute:: null_label
The null\-label type used, implicit or explicit
**type**\: :py:class:`NullLabelTypeIdentity <ydk.models.openconfig.openconfig_mpls_types.NullLabelTypeIdentity>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.null_label = None
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:global/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.null_label is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Global.State']['meta_info']
class MplsInterfaceAttributes(object):
"""
Parameters related to MPLS interfaces
.. attribute:: interface
List of TE interfaces
**type**\: list of :py:class:`Interface <ydk.models.openconfig.openconfig_mpls.Mpls.Global.MplsInterfaceAttributes.Interface>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.interface = YList()
self.interface.parent = self
self.interface.name = 'interface'
class Interface(object):
"""
List of TE interfaces
.. attribute:: name <key>
The interface name
**type**\: str
**refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_mpls.Mpls.Global.MplsInterfaceAttributes.Interface.Config>`
.. attribute:: config
Configuration parameters related to MPLS interfaces\:
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.Global.MplsInterfaceAttributes.Interface.Config>`
.. attribute:: state
State parameters related to TE interfaces
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.Global.MplsInterfaceAttributes.Interface.State>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.name = None
self.config = Mpls.Global.MplsInterfaceAttributes.Interface.Config()
self.config.parent = self
self.state = Mpls.Global.MplsInterfaceAttributes.Interface.State()
self.state.parent = self
class Config(object):
"""
Configuration parameters related to MPLS interfaces\:
.. attribute:: mpls_enabled
Enable MPLS forwarding on this interfacek
**type**\: bool
.. attribute:: name
reference to interface name
**type**\: str
**refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_interfaces.Interfaces.Interface>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.mpls_enabled = None
self.name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.mpls_enabled is not None:
return True
if self.name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Global.MplsInterfaceAttributes.Interface.Config']['meta_info']
class State(object):
"""
State parameters related to TE interfaces
.. attribute:: mpls_enabled
Enable MPLS forwarding on this interfacek
**type**\: bool
.. attribute:: name
reference to interface name
**type**\: str
**refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_interfaces.Interfaces.Interface>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.mpls_enabled = None
self.name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.mpls_enabled is not None:
return True
if self.name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Global.MplsInterfaceAttributes.Interface.State']['meta_info']
@property
def _common_path(self):
if self.name is None:
raise YPYModelError('Key property name is None')
return '/openconfig-mpls:mpls/openconfig-mpls:global/openconfig-mpls:mpls-interface-attributes/openconfig-mpls:interface[openconfig-mpls:name = ' + str(self.name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.name is not None:
return True
if self.config is not None and self.config._has_data():
return True
if self.state is not None and self.state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Global.MplsInterfaceAttributes.Interface']['meta_info']
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:global/openconfig-mpls:mpls-interface-attributes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.interface is not None:
for child_ref in self.interface:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Global.MplsInterfaceAttributes']['meta_info']
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:global'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.config is not None and self.config._has_data():
return True
if self.mpls_interface_attributes is not None and self.mpls_interface_attributes._has_data():
return True
if self.state is not None and self.state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Global']['meta_info']
class TeGlobalAttributes(object):
"""
traffic\-engineering global attributes
.. attribute:: igp_flooding_bandwidth
Interface bandwidth change percentages that trigger update events into the IGP traffic engineering database (TED)
**type**\: :py:class:`IgpFloodingBandwidth <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.IgpFloodingBandwidth>`
.. attribute:: mpls_admin_groups
Top\-level container for admin\-groups configuration and state
**type**\: :py:class:`MplsAdminGroups <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.MplsAdminGroups>`
.. attribute:: srlg
Shared risk link groups attributes
**type**\: :py:class:`Srlg <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.Srlg>`
.. attribute:: te_lsp_timers
Definition for delays associated with setup and cleanup of TE LSPs
**type**\: :py:class:`TeLspTimers <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.TeLspTimers>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.igp_flooding_bandwidth = Mpls.TeGlobalAttributes.IgpFloodingBandwidth()
self.igp_flooding_bandwidth.parent = self
self.mpls_admin_groups = Mpls.TeGlobalAttributes.MplsAdminGroups()
self.mpls_admin_groups.parent = self
self.srlg = Mpls.TeGlobalAttributes.Srlg()
self.srlg.parent = self
self.te_lsp_timers = Mpls.TeGlobalAttributes.TeLspTimers()
self.te_lsp_timers.parent = self
class Srlg(object):
"""
Shared risk link groups attributes
.. attribute:: srlg
List of shared risk link groups
**type**\: list of :py:class:`Srlg <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.Srlg.Srlg>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.srlg = YList()
self.srlg.parent = self
self.srlg.name = 'srlg'
class Srlg(object):
"""
List of shared risk link groups
.. attribute:: name <key>
The SRLG group identifier
**type**\: str
**refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.Srlg.Srlg.Config>`
.. attribute:: config
Configuration parameters related to the SRLG
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.Srlg.Srlg.Config>`
.. attribute:: state
State parameters related to the SRLG
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.Srlg.Srlg.State>`
.. attribute:: static_srlg_members
SRLG members for static (not flooded) SRLGs
**type**\: :py:class:`StaticSrlgMembers <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.Srlg.Srlg.StaticSrlgMembers>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.name = None
self.config = Mpls.TeGlobalAttributes.Srlg.Srlg.Config()
self.config.parent = self
self.state = Mpls.TeGlobalAttributes.Srlg.Srlg.State()
self.state.parent = self
self.static_srlg_members = Mpls.TeGlobalAttributes.Srlg.Srlg.StaticSrlgMembers()
self.static_srlg_members.parent = self
class Config(object):
"""
Configuration parameters related to the SRLG
.. attribute:: cost
The cost of the SRLG to the computation algorithm
**type**\: int
**range:** 0..4294967295
.. attribute:: flooding_type
The type of SRLG, either flooded in the IGP or statically configured
**type**\: :py:class:`MplsSrlgFloodingTypeEnum <ydk.models.openconfig.openconfig_mpls.MplsSrlgFloodingTypeEnum>`
.. attribute:: name
SRLG group identifier
**type**\: str
.. attribute:: value
group ID for the SRLG
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.cost = None
self.flooding_type = None
self.name = None
self.value = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.cost is not None:
return True
if self.flooding_type is not None:
return True
if self.name is not None:
return True
if self.value is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.TeGlobalAttributes.Srlg.Srlg.Config']['meta_info']
class State(object):
"""
State parameters related to the SRLG
.. attribute:: cost
The cost of the SRLG to the computation algorithm
**type**\: int
**range:** 0..4294967295
.. attribute:: flooding_type
The type of SRLG, either flooded in the IGP or statically configured
**type**\: :py:class:`MplsSrlgFloodingTypeEnum <ydk.models.openconfig.openconfig_mpls.MplsSrlgFloodingTypeEnum>`
.. attribute:: name
SRLG group identifier
**type**\: str
.. attribute:: value
group ID for the SRLG
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.cost = None
self.flooding_type = None
self.name = None
self.value = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.cost is not None:
return True
if self.flooding_type is not None:
return True
if self.name is not None:
return True
if self.value is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.TeGlobalAttributes.Srlg.Srlg.State']['meta_info']
class StaticSrlgMembers(object):
"""
SRLG members for static (not flooded) SRLGs
.. attribute:: members_list
List of SRLG members, which are expressed as IP address endpoints of links contained in the SRLG
**type**\: list of :py:class:`MembersList <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.Srlg.Srlg.StaticSrlgMembers.MembersList>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.members_list = YList()
self.members_list.parent = self
self.members_list.name = 'members_list'
class MembersList(object):
"""
List of SRLG members, which are expressed
as IP address endpoints of links contained in the
SRLG
.. attribute:: from_address <key>
The from address of the link in the SRLG
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: config
Configuration parameters relating to the SRLG members
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.Srlg.Srlg.StaticSrlgMembers.MembersList.Config>`
.. attribute:: state
State parameters relating to the SRLG members
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.Srlg.Srlg.StaticSrlgMembers.MembersList.State>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.from_address = None
self.config = Mpls.TeGlobalAttributes.Srlg.Srlg.StaticSrlgMembers.MembersList.Config()
self.config.parent = self
self.state = Mpls.TeGlobalAttributes.Srlg.Srlg.StaticSrlgMembers.MembersList.State()
self.state.parent = self
class Config(object):
"""
Configuration parameters relating to the
SRLG members
.. attribute:: from_address
IP address of the a\-side of the SRLG link
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: to_address
IP address of the z\-side of the SRLG link
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.from_address = None
self.to_address = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.from_address is not None:
return True
if self.to_address is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.TeGlobalAttributes.Srlg.Srlg.StaticSrlgMembers.MembersList.Config']['meta_info']
class State(object):
"""
State parameters relating to the SRLG
members
.. attribute:: from_address
IP address of the a\-side of the SRLG link
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: to_address
IP address of the z\-side of the SRLG link
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.from_address = None
self.to_address = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.from_address is not None:
return True
if self.to_address is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.TeGlobalAttributes.Srlg.Srlg.StaticSrlgMembers.MembersList.State']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.from_address is None:
raise YPYModelError('Key property from_address is None')
return self.parent._common_path +'/openconfig-mpls:members-list[openconfig-mpls:from-address = ' + str(self.from_address) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.from_address is not None:
return True
if self.config is not None and self.config._has_data():
return True
if self.state is not None and self.state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.TeGlobalAttributes.Srlg.Srlg.StaticSrlgMembers.MembersList']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:static-srlg-members'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.members_list is not None:
for child_ref in self.members_list:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.TeGlobalAttributes.Srlg.Srlg.StaticSrlgMembers']['meta_info']
@property
def _common_path(self):
if self.name is None:
raise YPYModelError('Key property name is None')
return '/openconfig-mpls:mpls/openconfig-mpls:te-global-attributes/openconfig-mpls:srlg/openconfig-mpls:srlg[openconfig-mpls:name = ' + str(self.name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.name is not None:
return True
if self.config is not None and self.config._has_data():
return True
if self.state is not None and self.state._has_data():
return True
if self.static_srlg_members is not None and self.static_srlg_members._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.TeGlobalAttributes.Srlg.Srlg']['meta_info']
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:te-global-attributes/openconfig-mpls:srlg'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.srlg is not None:
for child_ref in self.srlg:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.TeGlobalAttributes.Srlg']['meta_info']
class IgpFloodingBandwidth(object):
"""
Interface bandwidth change percentages
that trigger update events into the IGP traffic
engineering database (TED)
.. attribute:: config
Configuration parameters for TED update threshold
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.IgpFloodingBandwidth.Config>`
.. attribute:: state
State parameters for TED update threshold
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.IgpFloodingBandwidth.State>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.config = Mpls.TeGlobalAttributes.IgpFloodingBandwidth.Config()
self.config.parent = self
self.state = Mpls.TeGlobalAttributes.IgpFloodingBandwidth.State()
self.state.parent = self
class Config(object):
"""
Configuration parameters for TED
update threshold
.. attribute:: delta_percentage
The percentage of the maximum\-reservable\-bandwidth considered as the delta that results in an IGP update being flooded
**type**\: int
**range:** 0..100
.. attribute:: down_thresholds
The thresholds (expressed as a percentage of the maximum reservable bandwidth) at which bandwidth updates are to be triggered when the bandwidth is decreasing
**type**\: list of int
**range:** 0..100
.. attribute:: threshold_specification
This value specifies whether a single set of threshold values should be used for both increasing and decreasing bandwidth when determining whether to trigger updated bandwidth values to be flooded in the IGP TE extensions. MIRRORED\-UP\-DOWN indicates that a single value (or set of values) should be used for both increasing and decreasing values, where SEPARATE\-UP\-DOWN specifies that the increasing and decreasing values will be separately specified
**type**\: :py:class:`ThresholdSpecificationEnum <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.IgpFloodingBandwidth.Config.ThresholdSpecificationEnum>`
.. attribute:: threshold_type
The type of threshold that should be used to specify the values at which bandwidth is flooded. DELTA indicates that the local system should flood IGP updates when a change in reserved bandwidth >= the specified delta occurs on the interface. Where THRESHOLD\-CROSSED is specified, the local system should trigger an update (and hence flood) the reserved bandwidth when the reserved bandwidth changes such that it crosses, or becomes equal to one of the threshold values
**type**\: :py:class:`ThresholdTypeEnum <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.IgpFloodingBandwidth.Config.ThresholdTypeEnum>`
.. attribute:: up_down_thresholds
The thresholds (expressed as a percentage of the maximum reservable bandwidth of the interface) at which bandwidth updates are flooded \- used both when the bandwidth is increasing and decreasing
**type**\: list of int
**range:** 0..100
.. attribute:: up_thresholds
The thresholds (expressed as a percentage of the maximum reservable bandwidth) at which bandwidth updates are to be triggered when the bandwidth is increasing
**type**\: list of int
**range:** 0..100
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.delta_percentage = None
self.down_thresholds = YLeafList()
self.down_thresholds.parent = self
self.down_thresholds.name = 'down_thresholds'
self.threshold_specification = None
self.threshold_type = None
self.up_down_thresholds = YLeafList()
self.up_down_thresholds.parent = self
self.up_down_thresholds.name = 'up_down_thresholds'
self.up_thresholds = YLeafList()
self.up_thresholds.parent = self
self.up_thresholds.name = 'up_thresholds'
class ThresholdSpecificationEnum(Enum):
"""
ThresholdSpecificationEnum
This value specifies whether a single set of threshold
values should be used for both increasing and decreasing
bandwidth when determining whether to trigger updated
bandwidth values to be flooded in the IGP TE extensions.
MIRRORED\-UP\-DOWN indicates that a single value (or set of
values) should be used for both increasing and decreasing
values, where SEPARATE\-UP\-DOWN specifies that the increasing
and decreasing values will be separately specified
.. data:: MIRRORED_UP_DOWN = 0
MIRRORED-UP-DOWN indicates that a single set of
threshold values should be used for both increasing
and decreasing bandwidth when determining whether
to trigger updated bandwidth values to be flooded
in the IGP TE extensions.
.. data:: SEPARATE_UP_DOWN = 1
SEPARATE-UP-DOWN indicates that a separate
threshold values should be used for the increasing
and decreasing bandwidth when determining whether
to trigger updated bandwidth values to be flooded
in the IGP TE extensions.
"""
MIRRORED_UP_DOWN = 0
SEPARATE_UP_DOWN = 1
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.TeGlobalAttributes.IgpFloodingBandwidth.Config.ThresholdSpecificationEnum']
class ThresholdTypeEnum(Enum):
"""
ThresholdTypeEnum
The type of threshold that should be used to specify the
values at which bandwidth is flooded. DELTA indicates that
the local system should flood IGP updates when a change in
reserved bandwidth >= the specified delta occurs on the
interface. Where THRESHOLD\-CROSSED is specified, the local
system should trigger an update (and hence flood) the
reserved bandwidth when the reserved bandwidth changes such
that it crosses, or becomes equal to one of the threshold
values
.. data:: DELTA = 0
DELTA indicates that the local
system should flood IGP updates when a
change in reserved bandwidth >= the specified
delta occurs on the interface.
.. data:: THRESHOLD_CROSSED = 1
THRESHOLD-CROSSED indicates that
the local system should trigger an update (and
hence flood) the reserved bandwidth when the
reserved bandwidth changes such that it crosses,
or becomes equal to one of the threshold values.
"""
DELTA = 0
THRESHOLD_CROSSED = 1
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.TeGlobalAttributes.IgpFloodingBandwidth.Config.ThresholdTypeEnum']
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:te-global-attributes/openconfig-mpls:igp-flooding-bandwidth/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.delta_percentage is not None:
return True
if self.down_thresholds is not None:
for child in self.down_thresholds:
if child is not None:
return True
if self.threshold_specification is not None:
return True
if self.threshold_type is not None:
return True
if self.up_down_thresholds is not None:
for child in self.up_down_thresholds:
if child is not None:
return True
if self.up_thresholds is not None:
for child in self.up_thresholds:
if child is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.TeGlobalAttributes.IgpFloodingBandwidth.Config']['meta_info']
class State(object):
"""
State parameters for TED update threshold
.. attribute:: delta_percentage
The percentage of the maximum\-reservable\-bandwidth considered as the delta that results in an IGP update being flooded
**type**\: int
**range:** 0..100
.. attribute:: down_thresholds
The thresholds (expressed as a percentage of the maximum reservable bandwidth) at which bandwidth updates are to be triggered when the bandwidth is decreasing
**type**\: list of int
**range:** 0..100
.. attribute:: threshold_specification
This value specifies whether a single set of threshold values should be used for both increasing and decreasing bandwidth when determining whether to trigger updated bandwidth values to be flooded in the IGP TE extensions. MIRRORED\-UP\-DOWN indicates that a single value (or set of values) should be used for both increasing and decreasing values, where SEPARATE\-UP\-DOWN specifies that the increasing and decreasing values will be separately specified
**type**\: :py:class:`ThresholdSpecificationEnum <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.IgpFloodingBandwidth.State.ThresholdSpecificationEnum>`
.. attribute:: threshold_type
The type of threshold that should be used to specify the values at which bandwidth is flooded. DELTA indicates that the local system should flood IGP updates when a change in reserved bandwidth >= the specified delta occurs on the interface. Where THRESHOLD\-CROSSED is specified, the local system should trigger an update (and hence flood) the reserved bandwidth when the reserved bandwidth changes such that it crosses, or becomes equal to one of the threshold values
**type**\: :py:class:`ThresholdTypeEnum <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.IgpFloodingBandwidth.State.ThresholdTypeEnum>`
.. attribute:: up_down_thresholds
The thresholds (expressed as a percentage of the maximum reservable bandwidth of the interface) at which bandwidth updates are flooded \- used both when the bandwidth is increasing and decreasing
**type**\: list of int
**range:** 0..100
.. attribute:: up_thresholds
The thresholds (expressed as a percentage of the maximum reservable bandwidth) at which bandwidth updates are to be triggered when the bandwidth is increasing
**type**\: list of int
**range:** 0..100
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.delta_percentage = None
self.down_thresholds = YLeafList()
self.down_thresholds.parent = self
self.down_thresholds.name = 'down_thresholds'
self.threshold_specification = None
self.threshold_type = None
self.up_down_thresholds = YLeafList()
self.up_down_thresholds.parent = self
self.up_down_thresholds.name = 'up_down_thresholds'
self.up_thresholds = YLeafList()
self.up_thresholds.parent = self
self.up_thresholds.name = 'up_thresholds'
class ThresholdSpecificationEnum(Enum):
"""
ThresholdSpecificationEnum
This value specifies whether a single set of threshold
values should be used for both increasing and decreasing
bandwidth when determining whether to trigger updated
bandwidth values to be flooded in the IGP TE extensions.
MIRRORED\-UP\-DOWN indicates that a single value (or set of
values) should be used for both increasing and decreasing
values, where SEPARATE\-UP\-DOWN specifies that the increasing
and decreasing values will be separately specified
.. data:: MIRRORED_UP_DOWN = 0
MIRRORED-UP-DOWN indicates that a single set of
threshold values should be used for both increasing
and decreasing bandwidth when determining whether
to trigger updated bandwidth values to be flooded
in the IGP TE extensions.
.. data:: SEPARATE_UP_DOWN = 1
SEPARATE-UP-DOWN indicates that a separate
threshold values should be used for the increasing
and decreasing bandwidth when determining whether
to trigger updated bandwidth values to be flooded
in the IGP TE extensions.
"""
MIRRORED_UP_DOWN = 0
SEPARATE_UP_DOWN = 1
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.TeGlobalAttributes.IgpFloodingBandwidth.State.ThresholdSpecificationEnum']
class ThresholdTypeEnum(Enum):
"""
ThresholdTypeEnum
The type of threshold that should be used to specify the
values at which bandwidth is flooded. DELTA indicates that
the local system should flood IGP updates when a change in
reserved bandwidth >= the specified delta occurs on the
interface. Where THRESHOLD\-CROSSED is specified, the local
system should trigger an update (and hence flood) the
reserved bandwidth when the reserved bandwidth changes such
that it crosses, or becomes equal to one of the threshold
values
.. data:: DELTA = 0
DELTA indicates that the local
system should flood IGP updates when a
change in reserved bandwidth >= the specified
delta occurs on the interface.
.. data:: THRESHOLD_CROSSED = 1
THRESHOLD-CROSSED indicates that
the local system should trigger an update (and
hence flood) the reserved bandwidth when the
reserved bandwidth changes such that it crosses,
or becomes equal to one of the threshold values.
"""
DELTA = 0
THRESHOLD_CROSSED = 1
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.TeGlobalAttributes.IgpFloodingBandwidth.State.ThresholdTypeEnum']
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:te-global-attributes/openconfig-mpls:igp-flooding-bandwidth/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.delta_percentage is not None:
return True
if self.down_thresholds is not None:
for child in self.down_thresholds:
if child is not None:
return True
if self.threshold_specification is not None:
return True
if self.threshold_type is not None:
return True
if self.up_down_thresholds is not None:
for child in self.up_down_thresholds:
if child is not None:
return True
if self.up_thresholds is not None:
for child in self.up_thresholds:
if child is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.TeGlobalAttributes.IgpFloodingBandwidth.State']['meta_info']
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:te-global-attributes/openconfig-mpls:igp-flooding-bandwidth'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.config is not None and self.config._has_data():
return True
if self.state is not None and self.state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.TeGlobalAttributes.IgpFloodingBandwidth']['meta_info']
class MplsAdminGroups(object):
"""
Top\-level container for admin\-groups configuration
and state
.. attribute:: admin_group
configuration of value to name mapping for mpls affinities/admin\-groups
**type**\: list of :py:class:`AdminGroup <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.MplsAdminGroups.AdminGroup>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.admin_group = YList()
self.admin_group.parent = self
self.admin_group.name = 'admin_group'
class AdminGroup(object):
"""
configuration of value to name mapping
for mpls affinities/admin\-groups
.. attribute:: admin_group_name <key>
name for mpls admin\-group
**type**\: str
**refers to**\: :py:class:`admin_group_name <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.MplsAdminGroups.AdminGroup.Config>`
.. attribute:: config
Configurable items for admin\-groups
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.MplsAdminGroups.AdminGroup.Config>`
.. attribute:: state
Operational state for admin\-groups
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.MplsAdminGroups.AdminGroup.State>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.admin_group_name = None
self.config = Mpls.TeGlobalAttributes.MplsAdminGroups.AdminGroup.Config()
self.config.parent = self
self.state = Mpls.TeGlobalAttributes.MplsAdminGroups.AdminGroup.State()
self.state.parent = self
class Config(object):
"""
Configurable items for admin\-groups
.. attribute:: admin_group_name
name for mpls admin\-group
**type**\: str
.. attribute:: bit_position
bit\-position value for mpls admin\-group. The value for the admin group is an integer that represents one of the bit positions in the admin\-group bitmask. Values between 0 and 31 are interpreted as the original limit of 32 admin groups. Values >=32 are interpreted as extended admin group values as per RFC7308
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.admin_group_name = None
self.bit_position = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.admin_group_name is not None:
return True
if self.bit_position is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.TeGlobalAttributes.MplsAdminGroups.AdminGroup.Config']['meta_info']
class State(object):
"""
Operational state for admin\-groups
.. attribute:: admin_group_name
name for mpls admin\-group
**type**\: str
.. attribute:: bit_position
bit\-position value for mpls admin\-group. The value for the admin group is an integer that represents one of the bit positions in the admin\-group bitmask. Values between 0 and 31 are interpreted as the original limit of 32 admin groups. Values >=32 are interpreted as extended admin group values as per RFC7308
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.admin_group_name = None
self.bit_position = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.admin_group_name is not None:
return True
if self.bit_position is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.TeGlobalAttributes.MplsAdminGroups.AdminGroup.State']['meta_info']
@property
def _common_path(self):
if self.admin_group_name is None:
raise YPYModelError('Key property admin_group_name is None')
return '/openconfig-mpls:mpls/openconfig-mpls:te-global-attributes/openconfig-mpls:mpls-admin-groups/openconfig-mpls:admin-group[openconfig-mpls:admin-group-name = ' + str(self.admin_group_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.admin_group_name is not None:
return True
if self.config is not None and self.config._has_data():
return True
if self.state is not None and self.state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.TeGlobalAttributes.MplsAdminGroups.AdminGroup']['meta_info']
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:te-global-attributes/openconfig-mpls:mpls-admin-groups'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.admin_group is not None:
for child_ref in self.admin_group:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.TeGlobalAttributes.MplsAdminGroups']['meta_info']
class TeLspTimers(object):
"""
Definition for delays associated with setup
and cleanup of TE LSPs
.. attribute:: config
Configuration parameters related to timers for TE LSPs
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.TeLspTimers.Config>`
.. attribute:: state
State related to timers for TE LSPs
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.TeLspTimers.State>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.config = Mpls.TeGlobalAttributes.TeLspTimers.Config()
self.config.parent = self
self.state = Mpls.TeGlobalAttributes.TeLspTimers.State()
self.state.parent = self
class Config(object):
"""
Configuration parameters related
to timers for TE LSPs
.. attribute:: cleanup_delay
delay the removal of old te lsp for a specified amount of time
**type**\: int
**range:** 0..65535
.. attribute:: install_delay
delay the use of newly installed te lsp for a specified amount of time
**type**\: int
**range:** 0..3600
.. attribute:: reoptimize_timer
frequency of reoptimization of a traffic engineered LSP
**type**\: int
**range:** 0..65535
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.cleanup_delay = None
self.install_delay = None
self.reoptimize_timer = None
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:te-global-attributes/openconfig-mpls:te-lsp-timers/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.cleanup_delay is not None:
return True
if self.install_delay is not None:
return True
if self.reoptimize_timer is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.TeGlobalAttributes.TeLspTimers.Config']['meta_info']
class State(object):
"""
State related to timers for TE LSPs
.. attribute:: cleanup_delay
delay the removal of old te lsp for a specified amount of time
**type**\: int
**range:** 0..65535
.. attribute:: install_delay
delay the use of newly installed te lsp for a specified amount of time
**type**\: int
**range:** 0..3600
.. attribute:: reoptimize_timer
frequency of reoptimization of a traffic engineered LSP
**type**\: int
**range:** 0..65535
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.cleanup_delay = None
self.install_delay = None
self.reoptimize_timer = None
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:te-global-attributes/openconfig-mpls:te-lsp-timers/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.cleanup_delay is not None:
return True
if self.install_delay is not None:
return True
if self.reoptimize_timer is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.TeGlobalAttributes.TeLspTimers.State']['meta_info']
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:te-global-attributes/openconfig-mpls:te-lsp-timers'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.config is not None and self.config._has_data():
return True
if self.state is not None and self.state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.TeGlobalAttributes.TeLspTimers']['meta_info']
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:te-global-attributes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.igp_flooding_bandwidth is not None and self.igp_flooding_bandwidth._has_data():
return True
if self.mpls_admin_groups is not None and self.mpls_admin_groups._has_data():
return True
if self.srlg is not None and self.srlg._has_data():
return True
if self.te_lsp_timers is not None and self.te_lsp_timers._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.TeGlobalAttributes']['meta_info']
class TeInterfaceAttributes(object):
"""
traffic engineering attributes specific
for interfaces
.. attribute:: interface
List of TE interfaces
**type**\: list of :py:class:`Interface <ydk.models.openconfig.openconfig_mpls.Mpls.TeInterfaceAttributes.Interface>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.interface = YList()
self.interface.parent = self
self.interface.name = 'interface'
class Interface(object):
"""
List of TE interfaces
.. attribute:: name <key>
The interface name
**type**\: str
**refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_mpls.Mpls.TeInterfaceAttributes.Interface.Config>`
.. attribute:: config
Configuration parameters related to TE interfaces\:
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.TeInterfaceAttributes.Interface.Config>`
.. attribute:: igp_flooding_bandwidth
Interface bandwidth change percentages that trigger update events into the IGP traffic engineering database (TED)
**type**\: :py:class:`IgpFloodingBandwidth <ydk.models.openconfig.openconfig_mpls.Mpls.TeInterfaceAttributes.Interface.IgpFloodingBandwidth>`
.. attribute:: state
State parameters related to TE interfaces
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.TeInterfaceAttributes.Interface.State>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.name = None
self.config = Mpls.TeInterfaceAttributes.Interface.Config()
self.config.parent = self
self.igp_flooding_bandwidth = Mpls.TeInterfaceAttributes.Interface.IgpFloodingBandwidth()
self.igp_flooding_bandwidth.parent = self
self.state = Mpls.TeInterfaceAttributes.Interface.State()
self.state.parent = self
class Config(object):
"""
Configuration parameters related to TE interfaces\:
.. attribute:: admin_group
list of admin groups (by name) on the interface
**type**\: list of str
.. attribute:: name
reference to interface name
**type**\: str
**refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_interfaces.Interfaces.Interface>`
.. attribute:: srlg_membership
list of references to named shared risk link groups that the interface belongs to
**type**\: list of str
**refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.Srlg.Srlg>`
.. attribute:: te_metric
TE specific metric for the link
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.admin_group = YLeafList()
self.admin_group.parent = self
self.admin_group.name = 'admin_group'
self.name = None
self.srlg_membership = YLeafList()
self.srlg_membership.parent = self
self.srlg_membership.name = 'srlg_membership'
self.te_metric = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.admin_group is not None:
for child in self.admin_group:
if child is not None:
return True
if self.name is not None:
return True
if self.srlg_membership is not None:
for child in self.srlg_membership:
if child is not None:
return True
if self.te_metric is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.TeInterfaceAttributes.Interface.Config']['meta_info']
class State(object):
"""
State parameters related to TE interfaces
.. attribute:: admin_group
list of admin groups (by name) on the interface
**type**\: list of str
.. attribute:: name
reference to interface name
**type**\: str
**refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_interfaces.Interfaces.Interface>`
.. attribute:: srlg_membership
list of references to named shared risk link groups that the interface belongs to
**type**\: list of str
**refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.Srlg.Srlg>`
.. attribute:: te_metric
TE specific metric for the link
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.admin_group = YLeafList()
self.admin_group.parent = self
self.admin_group.name = 'admin_group'
self.name = None
self.srlg_membership = YLeafList()
self.srlg_membership.parent = self
self.srlg_membership.name = 'srlg_membership'
self.te_metric = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.admin_group is not None:
for child in self.admin_group:
if child is not None:
return True
if self.name is not None:
return True
if self.srlg_membership is not None:
for child in self.srlg_membership:
if child is not None:
return True
if self.te_metric is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.TeInterfaceAttributes.Interface.State']['meta_info']
class IgpFloodingBandwidth(object):
"""
Interface bandwidth change percentages
that trigger update events into the IGP traffic
engineering database (TED)
.. attribute:: config
Configuration parameters for TED update threshold
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.TeInterfaceAttributes.Interface.IgpFloodingBandwidth.Config>`
.. attribute:: state
State parameters for TED update threshold
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.TeInterfaceAttributes.Interface.IgpFloodingBandwidth.State>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.config = Mpls.TeInterfaceAttributes.Interface.IgpFloodingBandwidth.Config()
self.config.parent = self
self.state = Mpls.TeInterfaceAttributes.Interface.IgpFloodingBandwidth.State()
self.state.parent = self
class Config(object):
"""
Configuration parameters for TED
update threshold
.. attribute:: delta_percentage
The percentage of the maximum\-reservable\-bandwidth considered as the delta that results in an IGP update being flooded
**type**\: int
**range:** 0..100
.. attribute:: down_thresholds
The thresholds (expressed as a percentage of the maximum reservable bandwidth) at which bandwidth updates are to be triggered when the bandwidth is decreasing
**type**\: list of int
**range:** 0..100
.. attribute:: threshold_specification
This value specifies whether a single set of threshold values should be used for both increasing and decreasing bandwidth when determining whether to trigger updated bandwidth values to be flooded in the IGP TE extensions. MIRRORED\-UP\-DOWN indicates that a single value (or set of values) should be used for both increasing and decreasing values, where SEPARATE\-UP\-DOWN specifies that the increasing and decreasing values will be separately specified
**type**\: :py:class:`ThresholdSpecificationEnum <ydk.models.openconfig.openconfig_mpls.Mpls.TeInterfaceAttributes.Interface.IgpFloodingBandwidth.Config.ThresholdSpecificationEnum>`
.. attribute:: threshold_type
The type of threshold that should be used to specify the values at which bandwidth is flooded. DELTA indicates that the local system should flood IGP updates when a change in reserved bandwidth >= the specified delta occurs on the interface. Where THRESHOLD\-CROSSED is specified, the local system should trigger an update (and hence flood) the reserved bandwidth when the reserved bandwidth changes such that it crosses, or becomes equal to one of the threshold values
**type**\: :py:class:`ThresholdTypeEnum <ydk.models.openconfig.openconfig_mpls.Mpls.TeInterfaceAttributes.Interface.IgpFloodingBandwidth.Config.ThresholdTypeEnum>`
.. attribute:: up_down_thresholds
The thresholds (expressed as a percentage of the maximum reservable bandwidth of the interface) at which bandwidth updates are flooded \- used both when the bandwidth is increasing and decreasing
**type**\: list of int
**range:** 0..100
.. attribute:: up_thresholds
The thresholds (expressed as a percentage of the maximum reservable bandwidth) at which bandwidth updates are to be triggered when the bandwidth is increasing
**type**\: list of int
**range:** 0..100
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.delta_percentage = None
self.down_thresholds = YLeafList()
self.down_thresholds.parent = self
self.down_thresholds.name = 'down_thresholds'
self.threshold_specification = None
self.threshold_type = None
self.up_down_thresholds = YLeafList()
self.up_down_thresholds.parent = self
self.up_down_thresholds.name = 'up_down_thresholds'
self.up_thresholds = YLeafList()
self.up_thresholds.parent = self
self.up_thresholds.name = 'up_thresholds'
class ThresholdSpecificationEnum(Enum):
"""
ThresholdSpecificationEnum
This value specifies whether a single set of threshold
values should be used for both increasing and decreasing
bandwidth when determining whether to trigger updated
bandwidth values to be flooded in the IGP TE extensions.
MIRRORED\-UP\-DOWN indicates that a single value (or set of
values) should be used for both increasing and decreasing
values, where SEPARATE\-UP\-DOWN specifies that the increasing
and decreasing values will be separately specified
.. data:: MIRRORED_UP_DOWN = 0
MIRRORED-UP-DOWN indicates that a single set of
threshold values should be used for both increasing
and decreasing bandwidth when determining whether
to trigger updated bandwidth values to be flooded
in the IGP TE extensions.
.. data:: SEPARATE_UP_DOWN = 1
SEPARATE-UP-DOWN indicates that a separate
threshold values should be used for the increasing
and decreasing bandwidth when determining whether
to trigger updated bandwidth values to be flooded
in the IGP TE extensions.
"""
MIRRORED_UP_DOWN = 0
SEPARATE_UP_DOWN = 1
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.TeInterfaceAttributes.Interface.IgpFloodingBandwidth.Config.ThresholdSpecificationEnum']
class ThresholdTypeEnum(Enum):
"""
ThresholdTypeEnum
The type of threshold that should be used to specify the
values at which bandwidth is flooded. DELTA indicates that
the local system should flood IGP updates when a change in
reserved bandwidth >= the specified delta occurs on the
interface. Where THRESHOLD\-CROSSED is specified, the local
system should trigger an update (and hence flood) the
reserved bandwidth when the reserved bandwidth changes such
that it crosses, or becomes equal to one of the threshold
values
.. data:: DELTA = 0
DELTA indicates that the local
system should flood IGP updates when a
change in reserved bandwidth >= the specified
delta occurs on the interface.
.. data:: THRESHOLD_CROSSED = 1
THRESHOLD-CROSSED indicates that
the local system should trigger an update (and
hence flood) the reserved bandwidth when the
reserved bandwidth changes such that it crosses,
or becomes equal to one of the threshold values.
"""
DELTA = 0
THRESHOLD_CROSSED = 1
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.TeInterfaceAttributes.Interface.IgpFloodingBandwidth.Config.ThresholdTypeEnum']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.delta_percentage is not None:
return True
if self.down_thresholds is not None:
for child in self.down_thresholds:
if child is not None:
return True
if self.threshold_specification is not None:
return True
if self.threshold_type is not None:
return True
if self.up_down_thresholds is not None:
for child in self.up_down_thresholds:
if child is not None:
return True
if self.up_thresholds is not None:
for child in self.up_thresholds:
if child is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.TeInterfaceAttributes.Interface.IgpFloodingBandwidth.Config']['meta_info']
class State(object):
"""
State parameters for TED update threshold
.. attribute:: delta_percentage
The percentage of the maximum\-reservable\-bandwidth considered as the delta that results in an IGP update being flooded
**type**\: int
**range:** 0..100
.. attribute:: down_thresholds
The thresholds (expressed as a percentage of the maximum reservable bandwidth) at which bandwidth updates are to be triggered when the bandwidth is decreasing
**type**\: list of int
**range:** 0..100
.. attribute:: threshold_specification
This value specifies whether a single set of threshold values should be used for both increasing and decreasing bandwidth when determining whether to trigger updated bandwidth values to be flooded in the IGP TE extensions. MIRRORED\-UP\-DOWN indicates that a single value (or set of values) should be used for both increasing and decreasing values, where SEPARATE\-UP\-DOWN specifies that the increasing and decreasing values will be separately specified
**type**\: :py:class:`ThresholdSpecificationEnum <ydk.models.openconfig.openconfig_mpls.Mpls.TeInterfaceAttributes.Interface.IgpFloodingBandwidth.State.ThresholdSpecificationEnum>`
.. attribute:: threshold_type
The type of threshold that should be used to specify the values at which bandwidth is flooded. DELTA indicates that the local system should flood IGP updates when a change in reserved bandwidth >= the specified delta occurs on the interface. Where THRESHOLD\-CROSSED is specified, the local system should trigger an update (and hence flood) the reserved bandwidth when the reserved bandwidth changes such that it crosses, or becomes equal to one of the threshold values
**type**\: :py:class:`ThresholdTypeEnum <ydk.models.openconfig.openconfig_mpls.Mpls.TeInterfaceAttributes.Interface.IgpFloodingBandwidth.State.ThresholdTypeEnum>`
.. attribute:: up_down_thresholds
The thresholds (expressed as a percentage of the maximum reservable bandwidth of the interface) at which bandwidth updates are flooded \- used both when the bandwidth is increasing and decreasing
**type**\: list of int
**range:** 0..100
.. attribute:: up_thresholds
The thresholds (expressed as a percentage of the maximum reservable bandwidth) at which bandwidth updates are to be triggered when the bandwidth is increasing
**type**\: list of int
**range:** 0..100
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.delta_percentage = None
self.down_thresholds = YLeafList()
self.down_thresholds.parent = self
self.down_thresholds.name = 'down_thresholds'
self.threshold_specification = None
self.threshold_type = None
self.up_down_thresholds = YLeafList()
self.up_down_thresholds.parent = self
self.up_down_thresholds.name = 'up_down_thresholds'
self.up_thresholds = YLeafList()
self.up_thresholds.parent = self
self.up_thresholds.name = 'up_thresholds'
class ThresholdSpecificationEnum(Enum):
"""
ThresholdSpecificationEnum
This value specifies whether a single set of threshold
values should be used for both increasing and decreasing
bandwidth when determining whether to trigger updated
bandwidth values to be flooded in the IGP TE extensions.
MIRRORED\-UP\-DOWN indicates that a single value (or set of
values) should be used for both increasing and decreasing
values, where SEPARATE\-UP\-DOWN specifies that the increasing
and decreasing values will be separately specified
.. data:: MIRRORED_UP_DOWN = 0
MIRRORED-UP-DOWN indicates that a single set of
threshold values should be used for both increasing
and decreasing bandwidth when determining whether
to trigger updated bandwidth values to be flooded
in the IGP TE extensions.
.. data:: SEPARATE_UP_DOWN = 1
SEPARATE-UP-DOWN indicates that a separate
threshold values should be used for the increasing
and decreasing bandwidth when determining whether
to trigger updated bandwidth values to be flooded
in the IGP TE extensions.
"""
MIRRORED_UP_DOWN = 0
SEPARATE_UP_DOWN = 1
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.TeInterfaceAttributes.Interface.IgpFloodingBandwidth.State.ThresholdSpecificationEnum']
class ThresholdTypeEnum(Enum):
"""
ThresholdTypeEnum
The type of threshold that should be used to specify the
values at which bandwidth is flooded. DELTA indicates that
the local system should flood IGP updates when a change in
reserved bandwidth >= the specified delta occurs on the
interface. Where THRESHOLD\-CROSSED is specified, the local
system should trigger an update (and hence flood) the
reserved bandwidth when the reserved bandwidth changes such
that it crosses, or becomes equal to one of the threshold
values
.. data:: DELTA = 0
DELTA indicates that the local
system should flood IGP updates when a
change in reserved bandwidth >= the specified
delta occurs on the interface.
.. data:: THRESHOLD_CROSSED = 1
THRESHOLD-CROSSED indicates that
the local system should trigger an update (and
hence flood) the reserved bandwidth when the
reserved bandwidth changes such that it crosses,
or becomes equal to one of the threshold values.
"""
DELTA = 0
THRESHOLD_CROSSED = 1
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.TeInterfaceAttributes.Interface.IgpFloodingBandwidth.State.ThresholdTypeEnum']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.delta_percentage is not None:
return True
if self.down_thresholds is not None:
for child in self.down_thresholds:
if child is not None:
return True
if self.threshold_specification is not None:
return True
if self.threshold_type is not None:
return True
if self.up_down_thresholds is not None:
for child in self.up_down_thresholds:
if child is not None:
return True
if self.up_thresholds is not None:
for child in self.up_thresholds:
if child is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.TeInterfaceAttributes.Interface.IgpFloodingBandwidth.State']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:igp-flooding-bandwidth'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.config is not None and self.config._has_data():
return True
if self.state is not None and self.state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.TeInterfaceAttributes.Interface.IgpFloodingBandwidth']['meta_info']
@property
def _common_path(self):
if self.name is None:
raise YPYModelError('Key property name is None')
return '/openconfig-mpls:mpls/openconfig-mpls:te-interface-attributes/openconfig-mpls:interface[openconfig-mpls:name = ' + str(self.name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.name is not None:
return True
if self.config is not None and self.config._has_data():
return True
if self.igp_flooding_bandwidth is not None and self.igp_flooding_bandwidth._has_data():
return True
if self.state is not None and self.state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.TeInterfaceAttributes.Interface']['meta_info']
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:te-interface-attributes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.interface is not None:
for child_ref in self.interface:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.TeInterfaceAttributes']['meta_info']
class SignalingProtocols(object):
"""
top\-level signaling protocol configuration
.. attribute:: ldp
LDP global signaling configuration
**type**\: :py:class:`Ldp <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.Ldp>`
.. attribute:: rsvp_te
RSVP\-TE global signaling protocol configuration
**type**\: :py:class:`RsvpTe <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe>`
.. attribute:: segment_routing
SR global signaling config
**type**\: :py:class:`SegmentRouting <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.SegmentRouting>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.ldp = Mpls.SignalingProtocols.Ldp()
self.ldp.parent = self
self.rsvp_te = Mpls.SignalingProtocols.RsvpTe()
self.rsvp_te.parent = self
self.segment_routing = Mpls.SignalingProtocols.SegmentRouting()
self.segment_routing.parent = self
class RsvpTe(object):
"""
RSVP\-TE global signaling protocol configuration
.. attribute:: global_
Platform wide RSVP configuration and state
**type**\: :py:class:`Global <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.Global>`
.. attribute:: interface_attributes
Attributes relating to RSVP\-TE enabled interfaces
**type**\: :py:class:`InterfaceAttributes <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes>`
.. attribute:: neighbors
Configuration and state for RSVP neighbors connecting to the device
**type**\: :py:class:`Neighbors <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.Neighbors>`
.. attribute:: sessions
Configuration and state of RSVP sessions
**type**\: :py:class:`Sessions <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.Sessions>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.global_ = Mpls.SignalingProtocols.RsvpTe.Global()
self.global_.parent = self
self.interface_attributes = Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes()
self.interface_attributes.parent = self
self.neighbors = Mpls.SignalingProtocols.RsvpTe.Neighbors()
self.neighbors.parent = self
self.sessions = Mpls.SignalingProtocols.RsvpTe.Sessions()
self.sessions.parent = self
class Sessions(object):
"""
Configuration and state of RSVP sessions
.. attribute:: config
Configuration of RSVP sessions on the device
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.Sessions.Config>`
.. attribute:: state
State information relating to RSVP sessions on the device
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.Sessions.State>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.config = Mpls.SignalingProtocols.RsvpTe.Sessions.Config()
self.config.parent = self
self.state = Mpls.SignalingProtocols.RsvpTe.Sessions.State()
self.state.parent = self
class Config(object):
"""
Configuration of RSVP sessions on the device
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:signaling-protocols/openconfig-mpls:rsvp-te/openconfig-mpls:sessions/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.Sessions.Config']['meta_info']
class State(object):
"""
State information relating to RSVP sessions
on the device
.. attribute:: session
List of RSVP sessions
**type**\: list of :py:class:`Session <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.Sessions.State.Session>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.session = YList()
self.session.parent = self
self.session.name = 'session'
class Session(object):
"""
List of RSVP sessions
.. attribute:: destination_address <key>
Destination address of RSVP session
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: destination_port <key>
RSVP source port
**type**\: int
**range:** 0..65535
.. attribute:: source_address <key>
Origin address of RSVP session
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: source_port <key>
RSVP source port
**type**\: int
**range:** 0..65535
.. attribute:: associated_lsps
List of label switched paths associated with this RSVP session
**type**\: list of str
**refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.Config>`
.. attribute:: label_in
Incoming MPLS label associated with this RSVP session
**type**\: one of the below types:
**type**\: int
**range:** 16..1048575
----
**type**\: :py:class:`MplsLabelEnum <ydk.models.openconfig.openconfig_mpls_types.MplsLabelEnum>`
----
.. attribute:: label_out
Outgoing MPLS label associated with this RSVP session
**type**\: one of the below types:
**type**\: int
**range:** 16..1048575
----
**type**\: :py:class:`MplsLabelEnum <ydk.models.openconfig.openconfig_mpls_types.MplsLabelEnum>`
----
.. attribute:: status
Enumeration of RSVP session states
**type**\: :py:class:`StatusEnum <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.Sessions.State.Session.StatusEnum>`
.. attribute:: tunnel_id
Unique identifier of RSVP session
**type**\: int
**range:** 0..65535
.. attribute:: type
Enumeration of possible RSVP session types
**type**\: :py:class:`TypeEnum <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.Sessions.State.Session.TypeEnum>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.destination_address = None
self.destination_port = None
self.source_address = None
self.source_port = None
self.associated_lsps = YLeafList()
self.associated_lsps.parent = self
self.associated_lsps.name = 'associated_lsps'
self.label_in = None
self.label_out = None
self.status = None
self.tunnel_id = None
self.type = None
class StatusEnum(Enum):
"""
StatusEnum
Enumeration of RSVP session states
.. data:: UP = 0
RSVP session is up
.. data:: DOWN = 1
RSVP session is down
"""
UP = 0
DOWN = 1
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.Sessions.State.Session.StatusEnum']
class TypeEnum(Enum):
"""
TypeEnum
Enumeration of possible RSVP session types
.. data:: SOURCE = 0
RSVP session originates on this device
.. data:: TRANSIT = 1
RSVP session transits this device only
.. data:: DESTINATION = 2
RSVP session terminates on this device
"""
SOURCE = 0
TRANSIT = 1
DESTINATION = 2
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.Sessions.State.Session.TypeEnum']
@property
def _common_path(self):
if self.destination_address is None:
raise YPYModelError('Key property destination_address is None')
if self.destination_port is None:
raise YPYModelError('Key property destination_port is None')
if self.source_address is None:
raise YPYModelError('Key property source_address is None')
if self.source_port is None:
raise YPYModelError('Key property source_port is None')
return '/openconfig-mpls:mpls/openconfig-mpls:signaling-protocols/openconfig-mpls:rsvp-te/openconfig-mpls:sessions/openconfig-mpls:state/openconfig-mpls:session[openconfig-mpls:destination-address = ' + str(self.destination_address) + '][openconfig-mpls:destination-port = ' + str(self.destination_port) + '][openconfig-mpls:source-address = ' + str(self.source_address) + '][openconfig-mpls:source-port = ' + str(self.source_port) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.destination_address is not None:
return True
if self.destination_port is not None:
return True
if self.source_address is not None:
return True
if self.source_port is not None:
return True
if self.associated_lsps is not None:
for child in self.associated_lsps:
if child is not None:
return True
if self.label_in is not None:
return True
if self.label_out is not None:
return True
if self.status is not None:
return True
if self.tunnel_id is not None:
return True
if self.type is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.Sessions.State.Session']['meta_info']
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:signaling-protocols/openconfig-mpls:rsvp-te/openconfig-mpls:sessions/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.session is not None:
for child_ref in self.session:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.Sessions.State']['meta_info']
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:signaling-protocols/openconfig-mpls:rsvp-te/openconfig-mpls:sessions'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.config is not None and self.config._has_data():
return True
if self.state is not None and self.state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.Sessions']['meta_info']
class Neighbors(object):
"""
Configuration and state for RSVP neighbors connecting
to the device
.. attribute:: config
Configuration of RSVP neighbor information
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.Neighbors.Config>`
.. attribute:: state
State information relating to RSVP neighbors
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.Neighbors.State>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.config = Mpls.SignalingProtocols.RsvpTe.Neighbors.Config()
self.config.parent = self
self.state = Mpls.SignalingProtocols.RsvpTe.Neighbors.State()
self.state.parent = self
class Config(object):
"""
Configuration of RSVP neighbor information
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:signaling-protocols/openconfig-mpls:rsvp-te/openconfig-mpls:neighbors/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.Neighbors.Config']['meta_info']
class State(object):
"""
State information relating to RSVP neighbors
.. attribute:: neighbor
List of RSVP neighbors connecting to the device, keyed by neighbor address
**type**\: list of :py:class:`Neighbor <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.Neighbors.State.Neighbor>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.neighbor = YList()
self.neighbor.parent = self
self.neighbor.name = 'neighbor'
class Neighbor(object):
"""
List of RSVP neighbors connecting to the device,
keyed by neighbor address
.. attribute:: address <key>
Address of RSVP neighbor
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: detected_interface
Interface where RSVP neighbor was detected
**type**\: str
.. attribute:: neighbor_status
Enumuration of possible RSVP neighbor states
**type**\: :py:class:`NeighborStatusEnum <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.Neighbors.State.Neighbor.NeighborStatusEnum>`
.. attribute:: refresh_reduction
Suppport of neighbor for RSVP refresh reduction
**type**\: bool
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.address = None
self.detected_interface = None
self.neighbor_status = None
self.refresh_reduction = None
class NeighborStatusEnum(Enum):
"""
NeighborStatusEnum
Enumuration of possible RSVP neighbor states
.. data:: UP = 0
RSVP hello messages are detected from the neighbor
.. data:: DOWN = 1
RSVP neighbor not detected as up, due to a
communication failure or IGP notification
the neighbor is unavailable
"""
UP = 0
DOWN = 1
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.Neighbors.State.Neighbor.NeighborStatusEnum']
@property
def _common_path(self):
if self.address is None:
raise YPYModelError('Key property address is None')
return '/openconfig-mpls:mpls/openconfig-mpls:signaling-protocols/openconfig-mpls:rsvp-te/openconfig-mpls:neighbors/openconfig-mpls:state/openconfig-mpls:neighbor[openconfig-mpls:address = ' + str(self.address) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.address is not None:
return True
if self.detected_interface is not None:
return True
if self.neighbor_status is not None:
return True
if self.refresh_reduction is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.Neighbors.State.Neighbor']['meta_info']
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:signaling-protocols/openconfig-mpls:rsvp-te/openconfig-mpls:neighbors/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.neighbor is not None:
for child_ref in self.neighbor:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.Neighbors.State']['meta_info']
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:signaling-protocols/openconfig-mpls:rsvp-te/openconfig-mpls:neighbors'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.config is not None and self.config._has_data():
return True
if self.state is not None and self.state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.Neighbors']['meta_info']
class Global(object):
"""
Platform wide RSVP configuration and state
.. attribute:: graceful_restart
Operational state and configuration parameters relating to graceful\-restart for RSVP
**type**\: :py:class:`GracefulRestart <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.Global.GracefulRestart>`
.. attribute:: hellos
Top level container for RSVP hello parameters
**type**\: :py:class:`Hellos <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.Global.Hellos>`
.. attribute:: soft_preemption
Protocol options relating to RSVP soft preemption
**type**\: :py:class:`SoftPreemption <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.Global.SoftPreemption>`
.. attribute:: state
Platform wide RSVP state, including counters
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.Global.State>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.graceful_restart = Mpls.SignalingProtocols.RsvpTe.Global.GracefulRestart()
self.graceful_restart.parent = self
self.hellos = Mpls.SignalingProtocols.RsvpTe.Global.Hellos()
self.hellos.parent = self
self.soft_preemption = Mpls.SignalingProtocols.RsvpTe.Global.SoftPreemption()
self.soft_preemption.parent = self
self.state = Mpls.SignalingProtocols.RsvpTe.Global.State()
self.state.parent = self
class GracefulRestart(object):
"""
Operational state and configuration parameters relating to
graceful\-restart for RSVP
.. attribute:: config
Configuration parameters relating to graceful\-restart
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.Global.GracefulRestart.Config>`
.. attribute:: state
State information associated with RSVP graceful\-restart
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.Global.GracefulRestart.State>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.config = Mpls.SignalingProtocols.RsvpTe.Global.GracefulRestart.Config()
self.config.parent = self
self.state = Mpls.SignalingProtocols.RsvpTe.Global.GracefulRestart.State()
self.state.parent = self
class Config(object):
"""
Configuration parameters relating to
graceful\-restart
.. attribute:: enable
Enables graceful restart on the node
**type**\: bool
.. attribute:: recovery_time
RSVP state recovery time
**type**\: int
**range:** 0..4294967295
.. attribute:: restart_time
Graceful restart time (seconds)
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.enable = None
self.recovery_time = None
self.restart_time = None
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:signaling-protocols/openconfig-mpls:rsvp-te/openconfig-mpls:global/openconfig-mpls:graceful-restart/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.enable is not None:
return True
if self.recovery_time is not None:
return True
if self.restart_time is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.Global.GracefulRestart.Config']['meta_info']
class State(object):
"""
State information associated with
RSVP graceful\-restart
.. attribute:: enable
Enables graceful restart on the node
**type**\: bool
.. attribute:: recovery_time
RSVP state recovery time
**type**\: int
**range:** 0..4294967295
.. attribute:: restart_time
Graceful restart time (seconds)
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.enable = None
self.recovery_time = None
self.restart_time = None
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:signaling-protocols/openconfig-mpls:rsvp-te/openconfig-mpls:global/openconfig-mpls:graceful-restart/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.enable is not None:
return True
if self.recovery_time is not None:
return True
if self.restart_time is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.Global.GracefulRestart.State']['meta_info']
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:signaling-protocols/openconfig-mpls:rsvp-te/openconfig-mpls:global/openconfig-mpls:graceful-restart'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.config is not None and self.config._has_data():
return True
if self.state is not None and self.state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.Global.GracefulRestart']['meta_info']
class SoftPreemption(object):
"""
Protocol options relating to RSVP
soft preemption
.. attribute:: config
Configuration parameters relating to RSVP soft preemption support
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.Global.SoftPreemption.Config>`
.. attribute:: state
State parameters relating to RSVP soft preemption support
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.Global.SoftPreemption.State>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.config = Mpls.SignalingProtocols.RsvpTe.Global.SoftPreemption.Config()
self.config.parent = self
self.state = Mpls.SignalingProtocols.RsvpTe.Global.SoftPreemption.State()
self.state.parent = self
class Config(object):
"""
Configuration parameters relating to RSVP
soft preemption support
.. attribute:: enable
Enables soft preemption on a node
**type**\: bool
.. attribute:: soft_preemption_timeout
Timeout value for soft preemption to revert to hard preemption
**type**\: int
**range:** 0..65535
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.enable = None
self.soft_preemption_timeout = None
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:signaling-protocols/openconfig-mpls:rsvp-te/openconfig-mpls:global/openconfig-mpls:soft-preemption/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.enable is not None:
return True
if self.soft_preemption_timeout is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.Global.SoftPreemption.Config']['meta_info']
class State(object):
"""
State parameters relating to RSVP
soft preemption support
.. attribute:: enable
Enables soft preemption on a node
**type**\: bool
.. attribute:: soft_preemption_timeout
Timeout value for soft preemption to revert to hard preemption
**type**\: int
**range:** 0..65535
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.enable = None
self.soft_preemption_timeout = None
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:signaling-protocols/openconfig-mpls:rsvp-te/openconfig-mpls:global/openconfig-mpls:soft-preemption/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.enable is not None:
return True
if self.soft_preemption_timeout is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.Global.SoftPreemption.State']['meta_info']
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:signaling-protocols/openconfig-mpls:rsvp-te/openconfig-mpls:global/openconfig-mpls:soft-preemption'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.config is not None and self.config._has_data():
return True
if self.state is not None and self.state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.Global.SoftPreemption']['meta_info']
class Hellos(object):
"""
Top level container for RSVP hello parameters
.. attribute:: config
Configuration parameters relating to RSVP hellos
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.Global.Hellos.Config>`
.. attribute:: state
State information associated with RSVP hellos
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.Global.Hellos.State>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.config = Mpls.SignalingProtocols.RsvpTe.Global.Hellos.Config()
self.config.parent = self
self.state = Mpls.SignalingProtocols.RsvpTe.Global.Hellos.State()
self.state.parent = self
class Config(object):
"""
Configuration parameters relating to RSVP
hellos
.. attribute:: hello_interval
set the interval in ms between RSVP hello messages
**type**\: int
**range:** 1000..60000
.. attribute:: refresh_reduction
enables all RSVP refresh reduction message bundling, RSVP message ID, reliable message delivery and summary refresh
**type**\: bool
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.hello_interval = None
self.refresh_reduction = None
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:signaling-protocols/openconfig-mpls:rsvp-te/openconfig-mpls:global/openconfig-mpls:hellos/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.hello_interval is not None:
return True
if self.refresh_reduction is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.Global.Hellos.Config']['meta_info']
class State(object):
"""
State information associated with RSVP hellos
.. attribute:: hello_interval
set the interval in ms between RSVP hello messages
**type**\: int
**range:** 1000..60000
.. attribute:: refresh_reduction
enables all RSVP refresh reduction message bundling, RSVP message ID, reliable message delivery and summary refresh
**type**\: bool
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.hello_interval = None
self.refresh_reduction = None
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:signaling-protocols/openconfig-mpls:rsvp-te/openconfig-mpls:global/openconfig-mpls:hellos/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.hello_interval is not None:
return True
if self.refresh_reduction is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.Global.Hellos.State']['meta_info']
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:signaling-protocols/openconfig-mpls:rsvp-te/openconfig-mpls:global/openconfig-mpls:hellos'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.config is not None and self.config._has_data():
return True
if self.state is not None and self.state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.Global.Hellos']['meta_info']
class State(object):
"""
Platform wide RSVP state, including counters
.. attribute:: counters
Platform wide RSVP statistics and counters
**type**\: :py:class:`Counters <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.Global.State.Counters>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.counters = Mpls.SignalingProtocols.RsvpTe.Global.State.Counters()
self.counters.parent = self
class Counters(object):
"""
Platform wide RSVP statistics and counters
.. attribute:: in_ack_messages
Number of received RSVP refresh reduction ack messages
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: in_hello_messages
Number of received RSVP hello messages
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: in_path_error_messages
Number of received RSVP Path Error messages
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: in_path_messages
Number of received RSVP Path messages
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: in_path_tear_messages
Number of received RSVP Path Tear messages
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: in_reservation_error_messages
Number of received RSVP Resv Error messages
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: in_reservation_messages
Number of received RSVP Resv messages
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: in_reservation_tear_messages
Number of received RSVP Resv Tear messages
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: in_srefresh_messages
Number of received RSVP summary refresh messages
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: out_ack_messages
Number of sent RSVP refresh reduction ack messages
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: out_hello_messages
Number of sent RSVP hello messages
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: out_path_error_messages
Number of sent RSVP Path Error messages
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: out_path_messages
Number of sent RSVP PATH messages
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: out_path_tear_messages
Number of sent RSVP Path Tear messages
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: out_reservation_error_messages
Number of sent RSVP Resv Error messages
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: out_reservation_messages
Number of sent RSVP Resv messages
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: out_reservation_tear_messages
Number of sent RSVP Resv Tear messages
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: out_srefresh_messages
Number of sent RSVP summary refresh messages
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: path_timeouts
TODO
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: rate_limited_messages
RSVP messages dropped due to rate limiting
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: reservation_timeouts
TODO
**type**\: long
**range:** 0..18446744073709551615
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.in_ack_messages = None
self.in_hello_messages = None
self.in_path_error_messages = None
self.in_path_messages = None
self.in_path_tear_messages = None
self.in_reservation_error_messages = None
self.in_reservation_messages = None
self.in_reservation_tear_messages = None
self.in_srefresh_messages = None
self.out_ack_messages = None
self.out_hello_messages = None
self.out_path_error_messages = None
self.out_path_messages = None
self.out_path_tear_messages = None
self.out_reservation_error_messages = None
self.out_reservation_messages = None
self.out_reservation_tear_messages = None
self.out_srefresh_messages = None
self.path_timeouts = None
self.rate_limited_messages = None
self.reservation_timeouts = None
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:signaling-protocols/openconfig-mpls:rsvp-te/openconfig-mpls:global/openconfig-mpls:state/openconfig-mpls:counters'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.in_ack_messages is not None:
return True
if self.in_hello_messages is not None:
return True
if self.in_path_error_messages is not None:
return True
if self.in_path_messages is not None:
return True
if self.in_path_tear_messages is not None:
return True
if self.in_reservation_error_messages is not None:
return True
if self.in_reservation_messages is not None:
return True
if self.in_reservation_tear_messages is not None:
return True
if self.in_srefresh_messages is not None:
return True
if self.out_ack_messages is not None:
return True
if self.out_hello_messages is not None:
return True
if self.out_path_error_messages is not None:
return True
if self.out_path_messages is not None:
return True
if self.out_path_tear_messages is not None:
return True
if self.out_reservation_error_messages is not None:
return True
if self.out_reservation_messages is not None:
return True
if self.out_reservation_tear_messages is not None:
return True
if self.out_srefresh_messages is not None:
return True
if self.path_timeouts is not None:
return True
if self.rate_limited_messages is not None:
return True
if self.reservation_timeouts is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.Global.State.Counters']['meta_info']
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:signaling-protocols/openconfig-mpls:rsvp-te/openconfig-mpls:global/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.counters is not None and self.counters._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.Global.State']['meta_info']
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:signaling-protocols/openconfig-mpls:rsvp-te/openconfig-mpls:global'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.graceful_restart is not None and self.graceful_restart._has_data():
return True
if self.hellos is not None and self.hellos._has_data():
return True
if self.soft_preemption is not None and self.soft_preemption._has_data():
return True
if self.state is not None and self.state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.Global']['meta_info']
class InterfaceAttributes(object):
"""
Attributes relating to RSVP\-TE enabled interfaces
.. attribute:: interface
list of per\-interface RSVP configurations
**type**\: list of :py:class:`Interface <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.interface = YList()
self.interface.parent = self
self.interface.name = 'interface'
class Interface(object):
"""
list of per\-interface RSVP configurations
.. attribute:: interface_name <key>
references a configured IP interface
**type**\: str
**refers to**\: :py:class:`interface_name <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Config>`
.. attribute:: authentication
Configuration and state parameters relating to RSVP authentication as per RFC2747
**type**\: :py:class:`Authentication <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Authentication>`
.. attribute:: config
Configuration of per\-interface RSVP parameters
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Config>`
.. attribute:: hellos
Top level container for RSVP hello parameters
**type**\: :py:class:`Hellos <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Hellos>`
.. attribute:: protection
link\-protection (NHOP) related configuration
**type**\: :py:class:`Protection <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Protection>`
.. attribute:: state
Per\-interface RSVP protocol and state information
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.State>`
.. attribute:: subscription
Bandwidth percentage reservable by RSVP on an interface
**type**\: :py:class:`Subscription <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Subscription>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.interface_name = None
self.authentication = Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Authentication()
self.authentication.parent = self
self.config = Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Config()
self.config.parent = self
self.hellos = Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Hellos()
self.hellos.parent = self
self.protection = Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Protection()
self.protection.parent = self
self.state = Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.State()
self.state.parent = self
self.subscription = Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Subscription()
self.subscription.parent = self
class Config(object):
"""
Configuration of per\-interface RSVP parameters
.. attribute:: interface_name
Name of configured IP interface
**type**\: str
**refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_interfaces.Interfaces.Interface>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.interface_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.interface_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Config']['meta_info']
class State(object):
"""
Per\-interface RSVP protocol and state information
.. attribute:: active_reservation_count
Number of active RSVP reservations
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: bandwidth
Available and reserved bandwidth by priority on the interface
**type**\: list of :py:class:`Bandwidth <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.State.Bandwidth>`
.. attribute:: counters
Interface specific RSVP statistics and counters
**type**\: :py:class:`Counters <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.State.Counters>`
.. attribute:: highwater_mark
Maximum bandwidth ever reserved
**type**\: long
**range:** 0..18446744073709551615
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.active_reservation_count = None
self.bandwidth = YList()
self.bandwidth.parent = self
self.bandwidth.name = 'bandwidth'
self.counters = Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.State.Counters()
self.counters.parent = self
self.highwater_mark = None
class Bandwidth(object):
"""
Available and reserved bandwidth by priority on
the interface.
.. attribute:: priority <key>
RSVP priority level for LSPs traversing the interface
**type**\: int
**range:** 0..7
.. attribute:: available_bandwidth
Bandwidth currently available
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: reserved_bandwidth
Bandwidth currently reserved
**type**\: long
**range:** 0..18446744073709551615
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.priority = None
self.available_bandwidth = None
self.reserved_bandwidth = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.priority is None:
raise YPYModelError('Key property priority is None')
return self.parent._common_path +'/openconfig-mpls:bandwidth[openconfig-mpls:priority = ' + str(self.priority) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.priority is not None:
return True
if self.available_bandwidth is not None:
return True
if self.reserved_bandwidth is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.State.Bandwidth']['meta_info']
class Counters(object):
"""
Interface specific RSVP statistics and counters
.. attribute:: in_ack_messages
Number of received RSVP refresh reduction ack messages
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: in_hello_messages
Number of received RSVP hello messages
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: in_path_error_messages
Number of received RSVP Path Error messages
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: in_path_messages
Number of received RSVP Path messages
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: in_path_tear_messages
Number of received RSVP Path Tear messages
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: in_reservation_error_messages
Number of received RSVP Resv Error messages
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: in_reservation_messages
Number of received RSVP Resv messages
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: in_reservation_tear_messages
Number of received RSVP Resv Tear messages
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: in_srefresh_messages
Number of received RSVP summary refresh messages
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: out_ack_messages
Number of sent RSVP refresh reduction ack messages
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: out_hello_messages
Number of sent RSVP hello messages
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: out_path_error_messages
Number of sent RSVP Path Error messages
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: out_path_messages
Number of sent RSVP PATH messages
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: out_path_tear_messages
Number of sent RSVP Path Tear messages
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: out_reservation_error_messages
Number of sent RSVP Resv Error messages
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: out_reservation_messages
Number of sent RSVP Resv messages
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: out_reservation_tear_messages
Number of sent RSVP Resv Tear messages
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: out_srefresh_messages
Number of sent RSVP summary refresh messages
**type**\: long
**range:** 0..18446744073709551615
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.in_ack_messages = None
self.in_hello_messages = None
self.in_path_error_messages = None
self.in_path_messages = None
self.in_path_tear_messages = None
self.in_reservation_error_messages = None
self.in_reservation_messages = None
self.in_reservation_tear_messages = None
self.in_srefresh_messages = None
self.out_ack_messages = None
self.out_hello_messages = None
self.out_path_error_messages = None
self.out_path_messages = None
self.out_path_tear_messages = None
self.out_reservation_error_messages = None
self.out_reservation_messages = None
self.out_reservation_tear_messages = None
self.out_srefresh_messages = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:counters'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.in_ack_messages is not None:
return True
if self.in_hello_messages is not None:
return True
if self.in_path_error_messages is not None:
return True
if self.in_path_messages is not None:
return True
if self.in_path_tear_messages is not None:
return True
if self.in_reservation_error_messages is not None:
return True
if self.in_reservation_messages is not None:
return True
if self.in_reservation_tear_messages is not None:
return True
if self.in_srefresh_messages is not None:
return True
if self.out_ack_messages is not None:
return True
if self.out_hello_messages is not None:
return True
if self.out_path_error_messages is not None:
return True
if self.out_path_messages is not None:
return True
if self.out_path_tear_messages is not None:
return True
if self.out_reservation_error_messages is not None:
return True
if self.out_reservation_messages is not None:
return True
if self.out_reservation_tear_messages is not None:
return True
if self.out_srefresh_messages is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.State.Counters']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.active_reservation_count is not None:
return True
if self.bandwidth is not None:
for child_ref in self.bandwidth:
if child_ref._has_data():
return True
if self.counters is not None and self.counters._has_data():
return True
if self.highwater_mark is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.State']['meta_info']
class Hellos(object):
"""
Top level container for RSVP hello parameters
.. attribute:: config
Configuration parameters relating to RSVP hellos
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Hellos.Config>`
.. attribute:: state
State information associated with RSVP hellos
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Hellos.State>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.config = Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Hellos.Config()
self.config.parent = self
self.state = Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Hellos.State()
self.state.parent = self
class Config(object):
"""
Configuration parameters relating to RSVP
hellos
.. attribute:: hello_interval
set the interval in ms between RSVP hello messages
**type**\: int
**range:** 1000..60000
.. attribute:: refresh_reduction
enables all RSVP refresh reduction message bundling, RSVP message ID, reliable message delivery and summary refresh
**type**\: bool
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.hello_interval = None
self.refresh_reduction = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.hello_interval is not None:
return True
if self.refresh_reduction is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Hellos.Config']['meta_info']
class State(object):
"""
State information associated with RSVP hellos
.. attribute:: hello_interval
set the interval in ms between RSVP hello messages
**type**\: int
**range:** 1000..60000
.. attribute:: refresh_reduction
enables all RSVP refresh reduction message bundling, RSVP message ID, reliable message delivery and summary refresh
**type**\: bool
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.hello_interval = None
self.refresh_reduction = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.hello_interval is not None:
return True
if self.refresh_reduction is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Hellos.State']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:hellos'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.config is not None and self.config._has_data():
return True
if self.state is not None and self.state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Hellos']['meta_info']
class Authentication(object):
"""
Configuration and state parameters relating to RSVP
authentication as per RFC2747
.. attribute:: config
Configuration parameters relating to authentication
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Authentication.Config>`
.. attribute:: state
State information associated with authentication
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Authentication.State>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.config = Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Authentication.Config()
self.config.parent = self
self.state = Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Authentication.State()
self.state.parent = self
class Config(object):
"""
Configuration parameters relating
to authentication
.. attribute:: authentication_key
authenticate RSVP signaling messages
**type**\: str
**range:** 1..32
.. attribute:: enable
Enables RSVP authentication on the node
**type**\: bool
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.authentication_key = None
self.enable = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.authentication_key is not None:
return True
if self.enable is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Authentication.Config']['meta_info']
class State(object):
"""
State information associated
with authentication
.. attribute:: authentication_key
authenticate RSVP signaling messages
**type**\: str
**range:** 1..32
.. attribute:: enable
Enables RSVP authentication on the node
**type**\: bool
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.authentication_key = None
self.enable = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.authentication_key is not None:
return True
if self.enable is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Authentication.State']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:authentication'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.config is not None and self.config._has_data():
return True
if self.state is not None and self.state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Authentication']['meta_info']
class Subscription(object):
"""
Bandwidth percentage reservable by RSVP
on an interface
.. attribute:: config
Configuration parameters relating to RSVP subscription options
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Subscription.Config>`
.. attribute:: state
State parameters relating to RSVP subscription options
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Subscription.State>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.config = Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Subscription.Config()
self.config.parent = self
self.state = Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Subscription.State()
self.state.parent = self
class Config(object):
"""
Configuration parameters relating to RSVP
subscription options
.. attribute:: subscription
percentage of the interface bandwidth that RSVP can reserve
**type**\: int
**range:** 0..100
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.subscription = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.subscription is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Subscription.Config']['meta_info']
class State(object):
"""
State parameters relating to RSVP
subscription options
.. attribute:: subscription
percentage of the interface bandwidth that RSVP can reserve
**type**\: int
**range:** 0..100
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.subscription = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.subscription is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Subscription.State']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:subscription'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.config is not None and self.config._has_data():
return True
if self.state is not None and self.state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Subscription']['meta_info']
class Protection(object):
"""
link\-protection (NHOP) related configuration
.. attribute:: config
Configuration for link\-protection
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Protection.Config>`
.. attribute:: state
State for link\-protection
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Protection.State>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.config = Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Protection.Config()
self.config.parent = self
self.state = Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Protection.State()
self.state.parent = self
class Config(object):
"""
Configuration for link\-protection
.. attribute:: bypass_optimize_interval
interval between periodic optimization of the bypass LSPs
**type**\: int
**range:** 0..65535
.. attribute:: link_protection_style_requested
Style of mpls frr protection desired\: link, link\-node, or unprotected
**type**\: :py:class:`ProtectionTypeIdentity <ydk.models.openconfig.openconfig_mpls_types.ProtectionTypeIdentity>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.bypass_optimize_interval = None
self.link_protection_style_requested = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.bypass_optimize_interval is not None:
return True
if self.link_protection_style_requested is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Protection.Config']['meta_info']
class State(object):
"""
State for link\-protection
.. attribute:: bypass_optimize_interval
interval between periodic optimization of the bypass LSPs
**type**\: int
**range:** 0..65535
.. attribute:: link_protection_style_requested
Style of mpls frr protection desired\: link, link\-node, or unprotected
**type**\: :py:class:`ProtectionTypeIdentity <ydk.models.openconfig.openconfig_mpls_types.ProtectionTypeIdentity>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.bypass_optimize_interval = None
self.link_protection_style_requested = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.bypass_optimize_interval is not None:
return True
if self.link_protection_style_requested is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Protection.State']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:protection'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.config is not None and self.config._has_data():
return True
if self.state is not None and self.state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface.Protection']['meta_info']
@property
def _common_path(self):
if self.interface_name is None:
raise YPYModelError('Key property interface_name is None')
return '/openconfig-mpls:mpls/openconfig-mpls:signaling-protocols/openconfig-mpls:rsvp-te/openconfig-mpls:interface-attributes/openconfig-mpls:interface[openconfig-mpls:interface-name = ' + str(self.interface_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.interface_name is not None:
return True
if self.authentication is not None and self.authentication._has_data():
return True
if self.config is not None and self.config._has_data():
return True
if self.hellos is not None and self.hellos._has_data():
return True
if self.protection is not None and self.protection._has_data():
return True
if self.state is not None and self.state._has_data():
return True
if self.subscription is not None and self.subscription._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes.Interface']['meta_info']
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:signaling-protocols/openconfig-mpls:rsvp-te/openconfig-mpls:interface-attributes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.interface is not None:
for child_ref in self.interface:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe.InterfaceAttributes']['meta_info']
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:signaling-protocols/openconfig-mpls:rsvp-te'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.global_ is not None and self.global_._has_data():
return True
if self.interface_attributes is not None and self.interface_attributes._has_data():
return True
if self.neighbors is not None and self.neighbors._has_data():
return True
if self.sessions is not None and self.sessions._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.RsvpTe']['meta_info']
class SegmentRouting(object):
"""
SR global signaling config
.. attribute:: interfaces
List of interfaces with associated segment routing configuration
**type**\: list of :py:class:`Interfaces <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.SegmentRouting.Interfaces>`
.. attribute:: srgb
List of Segment Routing Global Block (SRGB) entries. These label blocks are reserved to be allocated as domain\-wide entries
**type**\: list of :py:class:`Srgb <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.SegmentRouting.Srgb>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.interfaces = YList()
self.interfaces.parent = self
self.interfaces.name = 'interfaces'
self.srgb = YList()
self.srgb.parent = self
self.srgb.name = 'srgb'
class Srgb(object):
"""
List of Segment Routing Global Block (SRGB) entries. These
label blocks are reserved to be allocated as domain\-wide
entries.
.. attribute:: lower_bound <key>
Lower value in the block
**type**\: int
**range:** 0..4294967295
.. attribute:: upper_bound <key>
Upper value in the block
**type**\: int
**range:** 0..4294967295
.. attribute:: config
Configuration parameters relating to the Segment Routing Global Block (SRGB)
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.SegmentRouting.Srgb.Config>`
.. attribute:: state
State parameters relating to the Segment Routing Global Block (SRGB)
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.SegmentRouting.Srgb.State>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.lower_bound = None
self.upper_bound = None
self.config = Mpls.SignalingProtocols.SegmentRouting.Srgb.Config()
self.config.parent = self
self.state = Mpls.SignalingProtocols.SegmentRouting.Srgb.State()
self.state.parent = self
class Config(object):
"""
Configuration parameters relating to the Segment Routing
Global Block (SRGB)
.. attribute:: lower_bound
Lower value in the block
**type**\: int
**range:** 0..4294967295
.. attribute:: upper_bound
Upper value in the block
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.lower_bound = None
self.upper_bound = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.lower_bound is not None:
return True
if self.upper_bound is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.SegmentRouting.Srgb.Config']['meta_info']
class State(object):
"""
State parameters relating to the Segment Routing Global
Block (SRGB)
.. attribute:: free
Number of SRGB indexes that have not yet been allocated
**type**\: int
**range:** 0..4294967295
.. attribute:: lower_bound
Lower value in the block
**type**\: int
**range:** 0..4294967295
.. attribute:: size
Number of indexes in the SRGB block
**type**\: int
**range:** 0..4294967295
.. attribute:: upper_bound
Upper value in the block
**type**\: int
**range:** 0..4294967295
.. attribute:: used
Number of SRGB indexes that are currently allocated
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.free = None
self.lower_bound = None
self.size = None
self.upper_bound = None
self.used = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.free is not None:
return True
if self.lower_bound is not None:
return True
if self.size is not None:
return True
if self.upper_bound is not None:
return True
if self.used is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.SegmentRouting.Srgb.State']['meta_info']
@property
def _common_path(self):
if self.lower_bound is None:
raise YPYModelError('Key property lower_bound is None')
if self.upper_bound is None:
raise YPYModelError('Key property upper_bound is None')
return '/openconfig-mpls:mpls/openconfig-mpls:signaling-protocols/openconfig-mpls:segment-routing/openconfig-mpls:srgb[openconfig-mpls:lower-bound = ' + str(self.lower_bound) + '][openconfig-mpls:upper-bound = ' + str(self.upper_bound) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.lower_bound is not None:
return True
if self.upper_bound is not None:
return True
if self.config is not None and self.config._has_data():
return True
if self.state is not None and self.state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.SegmentRouting.Srgb']['meta_info']
class Interfaces(object):
"""
List of interfaces with associated segment routing
configuration
.. attribute:: interface <key>
Reference to the interface for which segment routing configuration is to be applied
**type**\: str
**refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_interfaces.Interfaces.Interface>`
.. attribute:: adjacency_sid
Configuration for Adjacency SIDs that are related to the specified interface
**type**\: :py:class:`AdjacencySid <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.SegmentRouting.Interfaces.AdjacencySid>`
.. attribute:: config
Interface configuration parameters for Segment Routing relating to the specified interface
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.SegmentRouting.Interfaces.Config>`
.. attribute:: state
State parameters for Segment Routing features relating to the specified interface
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.SegmentRouting.Interfaces.State>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.interface = None
self.adjacency_sid = Mpls.SignalingProtocols.SegmentRouting.Interfaces.AdjacencySid()
self.adjacency_sid.parent = self
self.config = Mpls.SignalingProtocols.SegmentRouting.Interfaces.Config()
self.config.parent = self
self.state = Mpls.SignalingProtocols.SegmentRouting.Interfaces.State()
self.state.parent = self
class Config(object):
"""
Interface configuration parameters for Segment Routing
relating to the specified interface
.. attribute:: interface
Reference to the interface for which segment routing configuration is to be applied
**type**\: str
**refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_interfaces.Interfaces.Interface>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.interface = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.interface is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.SegmentRouting.Interfaces.Config']['meta_info']
class State(object):
"""
State parameters for Segment Routing features relating
to the specified interface
.. attribute:: interface
Reference to the interface for which segment routing configuration is to be applied
**type**\: str
**refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_interfaces.Interfaces.Interface>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.interface = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.interface is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.SegmentRouting.Interfaces.State']['meta_info']
class AdjacencySid(object):
"""
Configuration for Adjacency SIDs that are related to
the specified interface
.. attribute:: config
Configuration parameters for the Adjacency\-SIDs that are related to this interface
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.SegmentRouting.Interfaces.AdjacencySid.Config>`
.. attribute:: state
State parameters for the Adjacency\-SIDs that are related to this interface
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.SegmentRouting.Interfaces.AdjacencySid.State>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.config = Mpls.SignalingProtocols.SegmentRouting.Interfaces.AdjacencySid.Config()
self.config.parent = self
self.state = Mpls.SignalingProtocols.SegmentRouting.Interfaces.AdjacencySid.State()
self.state.parent = self
class Config(object):
"""
Configuration parameters for the Adjacency\-SIDs
that are related to this interface
.. attribute:: advertise
Specifies the type of adjacency SID which should be advertised for the specified entity
**type**\: list of :py:class:`AdvertiseEnum <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.SegmentRouting.Interfaces.AdjacencySid.Config.AdvertiseEnum>`
.. attribute:: groups
Specifies the groups to which this interface belongs. Setting a value in this list results in an additional AdjSID being advertised, with the S\-bit set to 1. The AdjSID is assumed to be protected
**type**\: list of int
**range:** 0..4294967295
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.advertise = YLeafList()
self.advertise.parent = self
self.advertise.name = 'advertise'
self.groups = YLeafList()
self.groups.parent = self
self.groups.name = 'groups'
class AdvertiseEnum(Enum):
"""
AdvertiseEnum
Specifies the type of adjacency SID which should be
advertised for the specified entity.
.. data:: PROTECTED = 0
Advertise an Adjacency-SID for this interface, which is
eligible to be protected using a local protection
mechanism on the local LSR. The local protection
mechanism selected is dependent upon the configuration
of RSVP-TE FRR or LFA elsewhere on the system
.. data:: UNPROTECTED = 1
Advertise an Adajcency-SID for this interface, which is
explicitly excluded from being protected by any local
protection mechanism
"""
PROTECTED = 0
UNPROTECTED = 1
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.SegmentRouting.Interfaces.AdjacencySid.Config.AdvertiseEnum']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.advertise is not None:
for child in self.advertise:
if child is not None:
return True
if self.groups is not None:
for child in self.groups:
if child is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.SegmentRouting.Interfaces.AdjacencySid.Config']['meta_info']
class State(object):
"""
State parameters for the Adjacency\-SIDs that are
related to this interface
.. attribute:: advertise
Specifies the type of adjacency SID which should be advertised for the specified entity
**type**\: list of :py:class:`AdvertiseEnum <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.SegmentRouting.Interfaces.AdjacencySid.State.AdvertiseEnum>`
.. attribute:: groups
Specifies the groups to which this interface belongs. Setting a value in this list results in an additional AdjSID being advertised, with the S\-bit set to 1. The AdjSID is assumed to be protected
**type**\: list of int
**range:** 0..4294967295
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.advertise = YLeafList()
self.advertise.parent = self
self.advertise.name = 'advertise'
self.groups = YLeafList()
self.groups.parent = self
self.groups.name = 'groups'
class AdvertiseEnum(Enum):
"""
AdvertiseEnum
Specifies the type of adjacency SID which should be
advertised for the specified entity.
.. data:: PROTECTED = 0
Advertise an Adjacency-SID for this interface, which is
eligible to be protected using a local protection
mechanism on the local LSR. The local protection
mechanism selected is dependent upon the configuration
of RSVP-TE FRR or LFA elsewhere on the system
.. data:: UNPROTECTED = 1
Advertise an Adajcency-SID for this interface, which is
explicitly excluded from being protected by any local
protection mechanism
"""
PROTECTED = 0
UNPROTECTED = 1
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.SegmentRouting.Interfaces.AdjacencySid.State.AdvertiseEnum']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.advertise is not None:
for child in self.advertise:
if child is not None:
return True
if self.groups is not None:
for child in self.groups:
if child is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.SegmentRouting.Interfaces.AdjacencySid.State']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:adjacency-sid'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.config is not None and self.config._has_data():
return True
if self.state is not None and self.state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.SegmentRouting.Interfaces.AdjacencySid']['meta_info']
@property
def _common_path(self):
if self.interface is None:
raise YPYModelError('Key property interface is None')
return '/openconfig-mpls:mpls/openconfig-mpls:signaling-protocols/openconfig-mpls:segment-routing/openconfig-mpls:interfaces[openconfig-mpls:interface = ' + str(self.interface) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.interface is not None:
return True
if self.adjacency_sid is not None and self.adjacency_sid._has_data():
return True
if self.config is not None and self.config._has_data():
return True
if self.state is not None and self.state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.SegmentRouting.Interfaces']['meta_info']
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:signaling-protocols/openconfig-mpls:segment-routing'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.interfaces is not None:
for child_ref in self.interfaces:
if child_ref._has_data():
return True
if self.srgb is not None:
for child_ref in self.srgb:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.SegmentRouting']['meta_info']
class Ldp(object):
"""
LDP global signaling configuration
.. attribute:: timers
LDP timers
**type**\: :py:class:`Timers <ydk.models.openconfig.openconfig_mpls.Mpls.SignalingProtocols.Ldp.Timers>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.timers = Mpls.SignalingProtocols.Ldp.Timers()
self.timers.parent = self
class Timers(object):
"""
LDP timers
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:signaling-protocols/openconfig-mpls:ldp/openconfig-mpls:timers'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.Ldp.Timers']['meta_info']
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:signaling-protocols/openconfig-mpls:ldp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.timers is not None and self.timers._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols.Ldp']['meta_info']
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:signaling-protocols'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.ldp is not None and self.ldp._has_data():
return True
if self.rsvp_te is not None and self.rsvp_te._has_data():
return True
if self.segment_routing is not None and self.segment_routing._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.SignalingProtocols']['meta_info']
class Lsps(object):
"""
LSP definitions and configuration
.. attribute:: constrained_path
traffic\-engineered LSPs supporting different path computation and signaling methods
**type**\: :py:class:`ConstrainedPath <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath>`
.. attribute:: static_lsps
statically configured LSPs, without dynamic signaling
**type**\: :py:class:`StaticLsps <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.StaticLsps>`
.. attribute:: unconstrained_path
LSPs that use the IGP\-determined path, i.e., non traffic\-engineered, or non constrained\-path
**type**\: :py:class:`UnconstrainedPath <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.UnconstrainedPath>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.constrained_path = Mpls.Lsps.ConstrainedPath()
self.constrained_path.parent = self
self.static_lsps = Mpls.Lsps.StaticLsps()
self.static_lsps.parent = self
self.unconstrained_path = Mpls.Lsps.UnconstrainedPath()
self.unconstrained_path.parent = self
class ConstrainedPath(object):
"""
traffic\-engineered LSPs supporting different
path computation and signaling methods
.. attribute:: named_explicit_paths
A list of explicit paths
**type**\: list of :py:class:`NamedExplicitPaths <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.NamedExplicitPaths>`
.. attribute:: tunnel
List of TE tunnels
**type**\: list of :py:class:`Tunnel <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.named_explicit_paths = YList()
self.named_explicit_paths.parent = self
self.named_explicit_paths.name = 'named_explicit_paths'
self.tunnel = YList()
self.tunnel.parent = self
self.tunnel.name = 'tunnel'
class NamedExplicitPaths(object):
"""
A list of explicit paths
.. attribute:: name <key>
A string name that uniquely identifies an explicit path
**type**\: str
**refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.NamedExplicitPaths.Config>`
.. attribute:: config
Configuration parameters relating to named explicit paths
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.NamedExplicitPaths.Config>`
.. attribute:: explicit_route_objects
List of explicit route objects
**type**\: list of :py:class:`ExplicitRouteObjects <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.NamedExplicitPaths.ExplicitRouteObjects>`
.. attribute:: state
Operational state parameters relating to the named explicit paths
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.NamedExplicitPaths.State>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.name = None
self.config = Mpls.Lsps.ConstrainedPath.NamedExplicitPaths.Config()
self.config.parent = self
self.explicit_route_objects = YList()
self.explicit_route_objects.parent = self
self.explicit_route_objects.name = 'explicit_route_objects'
self.state = Mpls.Lsps.ConstrainedPath.NamedExplicitPaths.State()
self.state.parent = self
class Config(object):
"""
Configuration parameters relating to named explicit
paths
.. attribute:: name
A string name that uniquely identifies an explicit path
**type**\: str
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.NamedExplicitPaths.Config']['meta_info']
class State(object):
"""
Operational state parameters relating to the named
explicit paths
.. attribute:: name
A string name that uniquely identifies an explicit path
**type**\: str
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.NamedExplicitPaths.State']['meta_info']
class ExplicitRouteObjects(object):
"""
List of explicit route objects
.. attribute:: index <key>
Index of this explicit route object, to express the order of hops in path
**type**\: int
**range:** 0..255
**refers to**\: :py:class:`index <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.NamedExplicitPaths.ExplicitRouteObjects.Config>`
.. attribute:: config
Configuration parameters relating to an explicit route
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.NamedExplicitPaths.ExplicitRouteObjects.Config>`
.. attribute:: state
State parameters relating to an explicit route
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.NamedExplicitPaths.ExplicitRouteObjects.State>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.index = None
self.config = Mpls.Lsps.ConstrainedPath.NamedExplicitPaths.ExplicitRouteObjects.Config()
self.config.parent = self
self.state = Mpls.Lsps.ConstrainedPath.NamedExplicitPaths.ExplicitRouteObjects.State()
self.state.parent = self
class Config(object):
"""
Configuration parameters relating to an explicit
route
.. attribute:: address
router hop for the LSP path
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: hop_type
strict or loose hop
**type**\: :py:class:`MplsHopTypeEnum <ydk.models.openconfig.openconfig_mpls.MplsHopTypeEnum>`
.. attribute:: index
Index of this explicit route object to express the order of hops in the path
**type**\: int
**range:** 0..255
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.address = None
self.hop_type = None
self.index = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.address is not None:
return True
if self.hop_type is not None:
return True
if self.index is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.NamedExplicitPaths.ExplicitRouteObjects.Config']['meta_info']
class State(object):
"""
State parameters relating to an explicit route
.. attribute:: address
router hop for the LSP path
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: hop_type
strict or loose hop
**type**\: :py:class:`MplsHopTypeEnum <ydk.models.openconfig.openconfig_mpls.MplsHopTypeEnum>`
.. attribute:: index
Index of this explicit route object to express the order of hops in the path
**type**\: int
**range:** 0..255
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.address = None
self.hop_type = None
self.index = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.address is not None:
return True
if self.hop_type is not None:
return True
if self.index is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.NamedExplicitPaths.ExplicitRouteObjects.State']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.index is None:
raise YPYModelError('Key property index is None')
return self.parent._common_path +'/openconfig-mpls:explicit-route-objects[openconfig-mpls:index = ' + str(self.index) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.index is not None:
return True
if self.config is not None and self.config._has_data():
return True
if self.state is not None and self.state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.NamedExplicitPaths.ExplicitRouteObjects']['meta_info']
@property
def _common_path(self):
if self.name is None:
raise YPYModelError('Key property name is None')
return '/openconfig-mpls:mpls/openconfig-mpls:lsps/openconfig-mpls:constrained-path/openconfig-mpls:named-explicit-paths[openconfig-mpls:name = ' + str(self.name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.name is not None:
return True
if self.config is not None and self.config._has_data():
return True
if self.explicit_route_objects is not None:
for child_ref in self.explicit_route_objects:
if child_ref._has_data():
return True
if self.state is not None and self.state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.NamedExplicitPaths']['meta_info']
class Tunnel(object):
"""
List of TE tunnels
.. attribute:: name <key>
The tunnel name
**type**\: str
**refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.Config>`
.. attribute:: type <key>
The tunnel type, p2p or p2mp
**type**\: :py:class:`TunnelTypeIdentity <ydk.models.openconfig.openconfig_mpls_types.TunnelTypeIdentity>`
.. attribute:: bandwidth
Bandwidth configuration for TE LSPs
**type**\: :py:class:`Bandwidth <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth>`
.. attribute:: config
Configuration parameters related to TE tunnels\:
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.Config>`
.. attribute:: p2p_tunnel_attributes
Parameters related to LSPs of type P2P
**type**\: :py:class:`P2PTunnelAttributes <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes>`
.. attribute:: state
State parameters related to TE tunnels
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.State>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.name = None
self.type = None
self.bandwidth = Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth()
self.bandwidth.parent = self
self.config = Mpls.Lsps.ConstrainedPath.Tunnel.Config()
self.config.parent = self
self.p2p_tunnel_attributes = Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes()
self.p2p_tunnel_attributes.parent = self
self.state = Mpls.Lsps.ConstrainedPath.Tunnel.State()
self.state.parent = self
class Config(object):
"""
Configuration parameters related to TE tunnels\:
.. attribute:: admin_status
TE tunnel administrative state
**type**\: :py:class:`TunnelAdminStatusIdentity <ydk.models.openconfig.openconfig_mpls_types.TunnelAdminStatusIdentity>`
.. attribute:: description
optional text description for the tunnel
**type**\: str
.. attribute:: hold_priority
preemption priority once the LSP is established, lower is higher priority; default 0 indicates other LSPs will not preempt the LSPs once established
**type**\: int
**range:** 0..7
.. attribute:: local_id
locally signficant optional identifier for the tunnel; may be a numerical or string value
**type**\: one of the below types:
**type**\: int
**range:** 0..4294967295
----
**type**\: str
----
.. attribute:: metric
LSP metric, either explicit or IGP
**type**\: one of the below types:
**type**\: :py:class:`TeMetricTypeEnum <ydk.models.openconfig.openconfig_mpls.TeMetricTypeEnum>`
----
**type**\: int
**range:** 0..4294967295
----
.. attribute:: name
The tunnel name
**type**\: str
.. attribute:: preference
Specifies a preference for this tunnel. A lower number signifies a better preference
**type**\: int
**range:** 1..255
.. attribute:: protection_style_requested
style of mpls frr protection desired\: can be link, link\-node or unprotected
**type**\: :py:class:`ProtectionTypeIdentity <ydk.models.openconfig.openconfig_mpls_types.ProtectionTypeIdentity>`
.. attribute:: reoptimize_timer
frequency of reoptimization of a traffic engineered LSP
**type**\: int
**range:** 0..65535
.. attribute:: setup_priority
RSVP\-TE preemption priority during LSP setup, lower is higher priority; default 7 indicates that LSP will not preempt established LSPs during setup
**type**\: int
**range:** 0..7
.. attribute:: signaling_protocol
Signaling protocol used to set up this tunnel
**type**\: :py:class:`TunnelTypeIdentity <ydk.models.openconfig.openconfig_mpls_types.TunnelTypeIdentity>`
.. attribute:: soft_preemption
Enables RSVP soft\-preemption on this LSP
**type**\: bool
.. attribute:: source
RSVP\-TE tunnel source address
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: type
Tunnel type, p2p or p2mp
**type**\: :py:class:`TunnelTypeIdentity <ydk.models.openconfig.openconfig_mpls_types.TunnelTypeIdentity>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.admin_status = None
self.description = None
self.hold_priority = None
self.local_id = None
self.metric = None
self.name = None
self.preference = None
self.protection_style_requested = None
self.reoptimize_timer = None
self.setup_priority = None
self.signaling_protocol = None
self.soft_preemption = None
self.source = None
self.type = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.admin_status is not None:
return True
if self.description is not None:
return True
if self.hold_priority is not None:
return True
if self.local_id is not None:
return True
if self.metric is not None:
return True
if self.name is not None:
return True
if self.preference is not None:
return True
if self.protection_style_requested is not None:
return True
if self.reoptimize_timer is not None:
return True
if self.setup_priority is not None:
return True
if self.signaling_protocol is not None:
return True
if self.soft_preemption is not None:
return True
if self.source is not None:
return True
if self.type is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.Tunnel.Config']['meta_info']
class State(object):
"""
State parameters related to TE tunnels
.. attribute:: admin_status
TE tunnel administrative state
**type**\: :py:class:`TunnelAdminStatusIdentity <ydk.models.openconfig.openconfig_mpls_types.TunnelAdminStatusIdentity>`
.. attribute:: counters
State data for MPLS label switched paths. This state data is specific to a single label switched path
**type**\: :py:class:`Counters <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.State.Counters>`
.. attribute:: description
optional text description for the tunnel
**type**\: str
.. attribute:: hold_priority
preemption priority once the LSP is established, lower is higher priority; default 0 indicates other LSPs will not preempt the LSPs once established
**type**\: int
**range:** 0..7
.. attribute:: local_id
locally signficant optional identifier for the tunnel; may be a numerical or string value
**type**\: one of the below types:
**type**\: int
**range:** 0..4294967295
----
**type**\: str
----
.. attribute:: metric
LSP metric, either explicit or IGP
**type**\: one of the below types:
**type**\: :py:class:`TeMetricTypeEnum <ydk.models.openconfig.openconfig_mpls.TeMetricTypeEnum>`
----
**type**\: int
**range:** 0..4294967295
----
.. attribute:: name
The tunnel name
**type**\: str
.. attribute:: oper_status
The operational status of the TE tunnel
**type**\: :py:class:`LspOperStatusIdentity <ydk.models.openconfig.openconfig_mpls_types.LspOperStatusIdentity>`
.. attribute:: preference
Specifies a preference for this tunnel. A lower number signifies a better preference
**type**\: int
**range:** 1..255
.. attribute:: protection_style_requested
style of mpls frr protection desired\: can be link, link\-node or unprotected
**type**\: :py:class:`ProtectionTypeIdentity <ydk.models.openconfig.openconfig_mpls_types.ProtectionTypeIdentity>`
.. attribute:: reoptimize_timer
frequency of reoptimization of a traffic engineered LSP
**type**\: int
**range:** 0..65535
.. attribute:: role
The lsp role at the current node, whether it is headend, transit or tailend
**type**\: :py:class:`LspRoleIdentity <ydk.models.openconfig.openconfig_mpls_types.LspRoleIdentity>`
.. attribute:: setup_priority
RSVP\-TE preemption priority during LSP setup, lower is higher priority; default 7 indicates that LSP will not preempt established LSPs during setup
**type**\: int
**range:** 0..7
.. attribute:: signaling_protocol
Signaling protocol used to set up this tunnel
**type**\: :py:class:`TunnelTypeIdentity <ydk.models.openconfig.openconfig_mpls_types.TunnelTypeIdentity>`
.. attribute:: soft_preemption
Enables RSVP soft\-preemption on this LSP
**type**\: bool
.. attribute:: source
RSVP\-TE tunnel source address
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: type
Tunnel type, p2p or p2mp
**type**\: :py:class:`TunnelTypeIdentity <ydk.models.openconfig.openconfig_mpls_types.TunnelTypeIdentity>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.admin_status = None
self.counters = Mpls.Lsps.ConstrainedPath.Tunnel.State.Counters()
self.counters.parent = self
self.description = None
self.hold_priority = None
self.local_id = None
self.metric = None
self.name = None
self.oper_status = None
self.preference = None
self.protection_style_requested = None
self.reoptimize_timer = None
self.role = None
self.setup_priority = None
self.signaling_protocol = None
self.soft_preemption = None
self.source = None
self.type = None
class Counters(object):
"""
State data for MPLS label switched paths. This state
data is specific to a single label switched path.
.. attribute:: bytes
Number of bytes that have been forwarded over the label switched path
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: current_path_time
Indicates the time the LSP switched onto its current path. This is reset upon a LSP path change
**type**\: str
**pattern:** \\d{4}\-\\d{2}\-\\d{2}T\\d{2}\:\\d{2}\:\\d{2}(\\.\\d+)?(Z\|[\\+\\\-]\\d{2}\:\\d{2})
.. attribute:: next_reoptimization_time
Indicates the next scheduled time the LSP will be reoptimized
**type**\: str
**pattern:** \\d{4}\-\\d{2}\-\\d{2}T\\d{2}\:\\d{2}\:\\d{2}(\\.\\d+)?(Z\|[\\+\\\-]\\d{2}\:\\d{2})
.. attribute:: online_time
Indication of the time the label switched path transitioned to an Oper Up or in\-service state
**type**\: str
**pattern:** \\d{4}\-\\d{2}\-\\d{2}T\\d{2}\:\\d{2}\:\\d{2}(\\.\\d+)?(Z\|[\\+\\\-]\\d{2}\:\\d{2})
.. attribute:: packets
Number of pacets that have been forwarded over the label switched path
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: path_changes
Number of path changes for the label switched path
**type**\: long
**range:** 0..18446744073709551615
.. attribute:: state_changes
Number of state changes for the label switched path
**type**\: long
**range:** 0..18446744073709551615
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.bytes = None
self.current_path_time = None
self.next_reoptimization_time = None
self.online_time = None
self.packets = None
self.path_changes = None
self.state_changes = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:counters'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.bytes is not None:
return True
if self.current_path_time is not None:
return True
if self.next_reoptimization_time is not None:
return True
if self.online_time is not None:
return True
if self.packets is not None:
return True
if self.path_changes is not None:
return True
if self.state_changes is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.Tunnel.State.Counters']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.admin_status is not None:
return True
if self.counters is not None and self.counters._has_data():
return True
if self.description is not None:
return True
if self.hold_priority is not None:
return True
if self.local_id is not None:
return True
if self.metric is not None:
return True
if self.name is not None:
return True
if self.oper_status is not None:
return True
if self.preference is not None:
return True
if self.protection_style_requested is not None:
return True
if self.reoptimize_timer is not None:
return True
if self.role is not None:
return True
if self.setup_priority is not None:
return True
if self.signaling_protocol is not None:
return True
if self.soft_preemption is not None:
return True
if self.source is not None:
return True
if self.type is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.Tunnel.State']['meta_info']
class Bandwidth(object):
"""
Bandwidth configuration for TE LSPs
.. attribute:: auto_bandwidth
Parameters related to auto\-bandwidth
**type**\: :py:class:`AutoBandwidth <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth.AutoBandwidth>`
.. attribute:: config
Configuration parameters related to bandwidth on TE tunnels\:
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth.Config>`
.. attribute:: state
State parameters related to bandwidth configuration of TE tunnels
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth.State>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.auto_bandwidth = Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth.AutoBandwidth()
self.auto_bandwidth.parent = self
self.config = Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth.Config()
self.config.parent = self
self.state = Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth.State()
self.state.parent = self
class Config(object):
"""
Configuration parameters related to bandwidth on TE
tunnels\:
.. attribute:: set_bandwidth
set bandwidth explicitly, e.g., using offline calculation
**type**\: int
**range:** 0..4294967295
.. attribute:: specification_type
The method used for settign the bandwidth, either explicitly specified or configured
**type**\: :py:class:`TeBandwidthTypeEnum <ydk.models.openconfig.openconfig_mpls.TeBandwidthTypeEnum>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.set_bandwidth = None
self.specification_type = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.set_bandwidth is not None:
return True
if self.specification_type is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth.Config']['meta_info']
class State(object):
"""
State parameters related to bandwidth
configuration of TE tunnels
.. attribute:: set_bandwidth
set bandwidth explicitly, e.g., using offline calculation
**type**\: int
**range:** 0..4294967295
.. attribute:: specification_type
The method used for settign the bandwidth, either explicitly specified or configured
**type**\: :py:class:`TeBandwidthTypeEnum <ydk.models.openconfig.openconfig_mpls.TeBandwidthTypeEnum>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.set_bandwidth = None
self.specification_type = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.set_bandwidth is not None:
return True
if self.specification_type is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth.State']['meta_info']
class AutoBandwidth(object):
"""
Parameters related to auto\-bandwidth
.. attribute:: config
Configuration parameters relating to MPLS auto\-bandwidth on the tunnel
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth.AutoBandwidth.Config>`
.. attribute:: overflow
configuration of MPLS overflow bandwidth adjustement for the LSP
**type**\: :py:class:`Overflow <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth.AutoBandwidth.Overflow>`
.. attribute:: state
State parameters relating to MPLS auto\-bandwidth on the tunnel
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth.AutoBandwidth.State>`
.. attribute:: underflow
configuration of MPLS underflow bandwidth adjustement for the LSP
**type**\: :py:class:`Underflow <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth.AutoBandwidth.Underflow>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.config = Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth.AutoBandwidth.Config()
self.config.parent = self
self.overflow = Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth.AutoBandwidth.Overflow()
self.overflow.parent = self
self.state = Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth.AutoBandwidth.State()
self.state.parent = self
self.underflow = Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth.AutoBandwidth.Underflow()
self.underflow.parent = self
class Config(object):
"""
Configuration parameters relating to MPLS
auto\-bandwidth on the tunnel.
.. attribute:: adjust_interval
time in seconds between adjustments to LSP bandwidth
**type**\: int
**range:** 0..4294967295
.. attribute:: adjust_threshold
percentage difference between the LSP's specified bandwidth and its current bandwidth allocation \-\- if the difference is greater than the specified percentage, auto\-bandwidth adjustment is triggered
**type**\: int
**range:** 0..100
.. attribute:: enabled
enables mpls auto\-bandwidth on the lsp
**type**\: bool
.. attribute:: max_bw
set the maximum bandwidth in Mbps for an auto\-bandwidth LSP
**type**\: int
**range:** 0..4294967295
.. attribute:: min_bw
set the minimum bandwidth in Mbps for an auto\-bandwidth LSP
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.adjust_interval = None
self.adjust_threshold = None
self.enabled = None
self.max_bw = None
self.min_bw = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.adjust_interval is not None:
return True
if self.adjust_threshold is not None:
return True
if self.enabled is not None:
return True
if self.max_bw is not None:
return True
if self.min_bw is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth.AutoBandwidth.Config']['meta_info']
class State(object):
"""
State parameters relating to MPLS
auto\-bandwidth on the tunnel.
.. attribute:: adjust_interval
time in seconds between adjustments to LSP bandwidth
**type**\: int
**range:** 0..4294967295
.. attribute:: adjust_threshold
percentage difference between the LSP's specified bandwidth and its current bandwidth allocation \-\- if the difference is greater than the specified percentage, auto\-bandwidth adjustment is triggered
**type**\: int
**range:** 0..100
.. attribute:: enabled
enables mpls auto\-bandwidth on the lsp
**type**\: bool
.. attribute:: max_bw
set the maximum bandwidth in Mbps for an auto\-bandwidth LSP
**type**\: int
**range:** 0..4294967295
.. attribute:: min_bw
set the minimum bandwidth in Mbps for an auto\-bandwidth LSP
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.adjust_interval = None
self.adjust_threshold = None
self.enabled = None
self.max_bw = None
self.min_bw = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.adjust_interval is not None:
return True
if self.adjust_threshold is not None:
return True
if self.enabled is not None:
return True
if self.max_bw is not None:
return True
if self.min_bw is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth.AutoBandwidth.State']['meta_info']
class Overflow(object):
"""
configuration of MPLS overflow bandwidth
adjustement for the LSP
.. attribute:: config
Config information for MPLS overflow bandwidth adjustment
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth.AutoBandwidth.Overflow.Config>`
.. attribute:: state
Config information for MPLS overflow bandwidth adjustment
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth.AutoBandwidth.Overflow.State>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.config = Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth.AutoBandwidth.Overflow.Config()
self.config.parent = self
self.state = Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth.AutoBandwidth.Overflow.State()
self.state.parent = self
class Config(object):
"""
Config information for MPLS overflow bandwidth
adjustment
.. attribute:: enabled
enables mpls lsp bandwidth overflow adjustment on the lsp
**type**\: bool
.. attribute:: overflow_threshold
bandwidth percentage change to trigger an overflow event
**type**\: int
**range:** 0..100
.. attribute:: trigger_event_count
number of consecutive overflow sample events needed to trigger an overflow adjustment
**type**\: int
**range:** 0..65535
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.enabled = None
self.overflow_threshold = None
self.trigger_event_count = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.enabled is not None:
return True
if self.overflow_threshold is not None:
return True
if self.trigger_event_count is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth.AutoBandwidth.Overflow.Config']['meta_info']
class State(object):
"""
Config information for MPLS overflow bandwidth
adjustment
.. attribute:: enabled
enables mpls lsp bandwidth overflow adjustment on the lsp
**type**\: bool
.. attribute:: overflow_threshold
bandwidth percentage change to trigger an overflow event
**type**\: int
**range:** 0..100
.. attribute:: trigger_event_count
number of consecutive overflow sample events needed to trigger an overflow adjustment
**type**\: int
**range:** 0..65535
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.enabled = None
self.overflow_threshold = None
self.trigger_event_count = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.enabled is not None:
return True
if self.overflow_threshold is not None:
return True
if self.trigger_event_count is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth.AutoBandwidth.Overflow.State']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:overflow'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.config is not None and self.config._has_data():
return True
if self.state is not None and self.state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth.AutoBandwidth.Overflow']['meta_info']
class Underflow(object):
"""
configuration of MPLS underflow bandwidth
adjustement for the LSP
.. attribute:: config
Config information for MPLS underflow bandwidth adjustment
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth.AutoBandwidth.Underflow.Config>`
.. attribute:: state
State information for MPLS underflow bandwidth adjustment
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth.AutoBandwidth.Underflow.State>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.config = Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth.AutoBandwidth.Underflow.Config()
self.config.parent = self
self.state = Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth.AutoBandwidth.Underflow.State()
self.state.parent = self
class Config(object):
"""
Config information for MPLS underflow bandwidth
adjustment
.. attribute:: enabled
enables bandwidth underflow adjustment on the lsp
**type**\: bool
.. attribute:: trigger_event_count
number of consecutive underflow sample events needed to trigger an underflow adjustment
**type**\: int
**range:** 0..65535
.. attribute:: underflow_threshold
bandwidth percentage change to trigger and underflow event
**type**\: int
**range:** 0..100
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.enabled = None
self.trigger_event_count = None
self.underflow_threshold = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.enabled is not None:
return True
if self.trigger_event_count is not None:
return True
if self.underflow_threshold is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth.AutoBandwidth.Underflow.Config']['meta_info']
class State(object):
"""
State information for MPLS underflow bandwidth
adjustment
.. attribute:: enabled
enables bandwidth underflow adjustment on the lsp
**type**\: bool
.. attribute:: trigger_event_count
number of consecutive underflow sample events needed to trigger an underflow adjustment
**type**\: int
**range:** 0..65535
.. attribute:: underflow_threshold
bandwidth percentage change to trigger and underflow event
**type**\: int
**range:** 0..100
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.enabled = None
self.trigger_event_count = None
self.underflow_threshold = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.enabled is not None:
return True
if self.trigger_event_count is not None:
return True
if self.underflow_threshold is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth.AutoBandwidth.Underflow.State']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:underflow'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.config is not None and self.config._has_data():
return True
if self.state is not None and self.state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth.AutoBandwidth.Underflow']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:auto-bandwidth'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.config is not None and self.config._has_data():
return True
if self.overflow is not None and self.overflow._has_data():
return True
if self.state is not None and self.state._has_data():
return True
if self.underflow is not None and self.underflow._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth.AutoBandwidth']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:bandwidth'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.auto_bandwidth is not None and self.auto_bandwidth._has_data():
return True
if self.config is not None and self.config._has_data():
return True
if self.state is not None and self.state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.Tunnel.Bandwidth']['meta_info']
class P2PTunnelAttributes(object):
"""
Parameters related to LSPs of type P2P
.. attribute:: config
Configuration parameters for P2P LSPs
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.Config>`
.. attribute:: p2p_primary_paths
List of p2p primary paths for a tunnel
**type**\: list of :py:class:`P2PPrimaryPaths <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PPrimaryPaths>`
.. attribute:: p2p_secondary_paths
List of p2p primary paths for a tunnel
**type**\: list of :py:class:`P2PSecondaryPaths <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PSecondaryPaths>`
.. attribute:: state
State parameters for P2P LSPs
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.State>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.config = Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.Config()
self.config.parent = self
self.p2p_primary_paths = YList()
self.p2p_primary_paths.parent = self
self.p2p_primary_paths.name = 'p2p_primary_paths'
self.p2p_secondary_paths = YList()
self.p2p_secondary_paths.parent = self
self.p2p_secondary_paths.name = 'p2p_secondary_paths'
self.state = Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.State()
self.state.parent = self
class Config(object):
"""
Configuration parameters for P2P LSPs
.. attribute:: destination
P2P tunnel destination address
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.destination = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.destination is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.Config']['meta_info']
class State(object):
"""
State parameters for P2P LSPs
.. attribute:: destination
P2P tunnel destination address
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.destination = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.destination is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.State']['meta_info']
class P2PPrimaryPaths(object):
"""
List of p2p primary paths for a tunnel
.. attribute:: name <key>
Path name
**type**\: str
**refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PPrimaryPaths.Config>`
.. attribute:: admin_groups
Top\-level container for include/exclude constraints for link affinities
**type**\: :py:class:`AdminGroups <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PPrimaryPaths.AdminGroups>`
.. attribute:: candidate_secondary_paths
The set of candidate secondary paths which may be used for this primary path. When secondary paths are specified in the list the path of the secondary LSP in use must be restricted to those path options referenced. The priority of the secondary paths is specified within the list. Higher priority values are less preferred \- that is to say that a path with priority 0 is the most preferred path. In the case that the list is empty, any secondary path option may be utilised when the current primary path is in use
**type**\: :py:class:`CandidateSecondaryPaths <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PPrimaryPaths.CandidateSecondaryPaths>`
.. attribute:: config
Configuration parameters related to paths
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PPrimaryPaths.Config>`
.. attribute:: state
State parameters related to paths
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PPrimaryPaths.State>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.name = None
self.admin_groups = Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PPrimaryPaths.AdminGroups()
self.admin_groups.parent = self
self.candidate_secondary_paths = Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PPrimaryPaths.CandidateSecondaryPaths()
self.candidate_secondary_paths.parent = self
self.config = Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PPrimaryPaths.Config()
self.config.parent = self
self.state = Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PPrimaryPaths.State()
self.state.parent = self
class Config(object):
"""
Configuration parameters related to paths
.. attribute:: cspf_tiebreaker
Determine the tie\-breaking method to choose between equally desirable paths during CSFP computation
**type**\: :py:class:`CspfTieBreakingEnum <ydk.models.openconfig.openconfig_mpls.CspfTieBreakingEnum>`
.. attribute:: explicit_path_name
reference to a defined path
**type**\: str
**refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.NamedExplicitPaths.Config>`
.. attribute:: hold_priority
preemption priority once the LSP is established, lower is higher priority; default 0 indicates other LSPs will not preempt the LSPs once established
**type**\: int
**range:** 0..7
.. attribute:: name
Path name
**type**\: str
.. attribute:: path_computation_method
The method used for computing the path, either locally computed, queried from a server or not computed at all (explicitly configured)
**type**\: :py:class:`PathComputationMethodIdentity <ydk.models.openconfig.openconfig_mpls.PathComputationMethodIdentity>`
.. attribute:: path_computation_server
Address of the external path computation server
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: preference
Specifies a preference for this path. The lower the number higher the preference
**type**\: int
**range:** 1..255
.. attribute:: retry_timer
sets the time between attempts to establish the LSP
**type**\: int
**range:** 1..600
.. attribute:: setup_priority
RSVP\-TE preemption priority during LSP setup, lower is higher priority; default 7 indicates that LSP will not preempt established LSPs during setup
**type**\: int
**range:** 0..7
.. attribute:: use_cspf
Flag to enable CSPF for locally computed LSPs
**type**\: bool
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.cspf_tiebreaker = None
self.explicit_path_name = None
self.hold_priority = None
self.name = None
self.path_computation_method = None
self.path_computation_server = None
self.preference = None
self.retry_timer = None
self.setup_priority = None
self.use_cspf = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.cspf_tiebreaker is not None:
return True
if self.explicit_path_name is not None:
return True
if self.hold_priority is not None:
return True
if self.name is not None:
return True
if self.path_computation_method is not None:
return True
if self.path_computation_server is not None:
return True
if self.preference is not None:
return True
if self.retry_timer is not None:
return True
if self.setup_priority is not None:
return True
if self.use_cspf is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PPrimaryPaths.Config']['meta_info']
class State(object):
"""
State parameters related to paths
.. attribute:: cspf_tiebreaker
Determine the tie\-breaking method to choose between equally desirable paths during CSFP computation
**type**\: :py:class:`CspfTieBreakingEnum <ydk.models.openconfig.openconfig_mpls.CspfTieBreakingEnum>`
.. attribute:: explicit_path_name
reference to a defined path
**type**\: str
**refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.NamedExplicitPaths.Config>`
.. attribute:: hold_priority
preemption priority once the LSP is established, lower is higher priority; default 0 indicates other LSPs will not preempt the LSPs once established
**type**\: int
**range:** 0..7
.. attribute:: name
Path name
**type**\: str
.. attribute:: path_computation_method
The method used for computing the path, either locally computed, queried from a server or not computed at all (explicitly configured)
**type**\: :py:class:`PathComputationMethodIdentity <ydk.models.openconfig.openconfig_mpls.PathComputationMethodIdentity>`
.. attribute:: path_computation_server
Address of the external path computation server
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: preference
Specifies a preference for this path. The lower the number higher the preference
**type**\: int
**range:** 1..255
.. attribute:: retry_timer
sets the time between attempts to establish the LSP
**type**\: int
**range:** 1..600
.. attribute:: setup_priority
RSVP\-TE preemption priority during LSP setup, lower is higher priority; default 7 indicates that LSP will not preempt established LSPs during setup
**type**\: int
**range:** 0..7
.. attribute:: use_cspf
Flag to enable CSPF for locally computed LSPs
**type**\: bool
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.cspf_tiebreaker = None
self.explicit_path_name = None
self.hold_priority = None
self.name = None
self.path_computation_method = None
self.path_computation_server = None
self.preference = None
self.retry_timer = None
self.setup_priority = None
self.use_cspf = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.cspf_tiebreaker is not None:
return True
if self.explicit_path_name is not None:
return True
if self.hold_priority is not None:
return True
if self.name is not None:
return True
if self.path_computation_method is not None:
return True
if self.path_computation_server is not None:
return True
if self.preference is not None:
return True
if self.retry_timer is not None:
return True
if self.setup_priority is not None:
return True
if self.use_cspf is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PPrimaryPaths.State']['meta_info']
class CandidateSecondaryPaths(object):
"""
The set of candidate secondary paths which may be used
for this primary path. When secondary paths are specified
in the list the path of the secondary LSP in use must be
restricted to those path options referenced. The
priority of the secondary paths is specified within the
list. Higher priority values are less preferred \- that is
to say that a path with priority 0 is the most preferred
path. In the case that the list is empty, any secondary
path option may be utilised when the current primary path
is in use.
.. attribute:: candidate_secondary_path
List of secondary paths which may be utilised when the current primary path is in use
**type**\: list of :py:class:`CandidateSecondaryPath <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PPrimaryPaths.CandidateSecondaryPaths.CandidateSecondaryPath>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.candidate_secondary_path = YList()
self.candidate_secondary_path.parent = self
self.candidate_secondary_path.name = 'candidate_secondary_path'
class CandidateSecondaryPath(object):
"""
List of secondary paths which may be utilised when the
current primary path is in use
.. attribute:: secondary_path <key>
A reference to the secondary path option reference which acts as the key of the candidate\-secondary\-path list
**type**\: str
**refers to**\: :py:class:`secondary_path <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PPrimaryPaths.CandidateSecondaryPaths.CandidateSecondaryPath.Config>`
.. attribute:: config
Configuration parameters relating to the candidate secondary path
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PPrimaryPaths.CandidateSecondaryPaths.CandidateSecondaryPath.Config>`
.. attribute:: state
Operational state parameters relating to the candidate secondary path
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PPrimaryPaths.CandidateSecondaryPaths.CandidateSecondaryPath.State>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.secondary_path = None
self.config = Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PPrimaryPaths.CandidateSecondaryPaths.CandidateSecondaryPath.Config()
self.config.parent = self
self.state = Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PPrimaryPaths.CandidateSecondaryPaths.CandidateSecondaryPath.State()
self.state.parent = self
class Config(object):
"""
Configuration parameters relating to the candidate
secondary path
.. attribute:: priority
The priority of the specified secondary path option. Higher priority options are less preferable \- such that a secondary path reference with a priority of 0 is the most preferred
**type**\: int
**range:** 0..65535
.. attribute:: secondary_path
A reference to the secondary path that should be utilised when the containing primary path option is in use
**type**\: str
**refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PSecondaryPaths.Config>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.priority = None
self.secondary_path = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.priority is not None:
return True
if self.secondary_path is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PPrimaryPaths.CandidateSecondaryPaths.CandidateSecondaryPath.Config']['meta_info']
class State(object):
"""
Operational state parameters relating to the candidate
secondary path
.. attribute:: active
Indicates the current active path option that has been selected of the candidate secondary paths
**type**\: bool
.. attribute:: priority
The priority of the specified secondary path option. Higher priority options are less preferable \- such that a secondary path reference with a priority of 0 is the most preferred
**type**\: int
**range:** 0..65535
.. attribute:: secondary_path
A reference to the secondary path that should be utilised when the containing primary path option is in use
**type**\: str
**refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PSecondaryPaths.Config>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.active = None
self.priority = None
self.secondary_path = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.active is not None:
return True
if self.priority is not None:
return True
if self.secondary_path is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PPrimaryPaths.CandidateSecondaryPaths.CandidateSecondaryPath.State']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.secondary_path is None:
raise YPYModelError('Key property secondary_path is None')
return self.parent._common_path +'/openconfig-mpls:candidate-secondary-path[openconfig-mpls:secondary-path = ' + str(self.secondary_path) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.secondary_path is not None:
return True
if self.config is not None and self.config._has_data():
return True
if self.state is not None and self.state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PPrimaryPaths.CandidateSecondaryPaths.CandidateSecondaryPath']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:candidate-secondary-paths'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.candidate_secondary_path is not None:
for child_ref in self.candidate_secondary_path:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PPrimaryPaths.CandidateSecondaryPaths']['meta_info']
class AdminGroups(object):
"""
Top\-level container for include/exclude constraints for
link affinities
.. attribute:: config
Configuration data
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PPrimaryPaths.AdminGroups.Config>`
.. attribute:: state
Operational state data
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PPrimaryPaths.AdminGroups.State>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.config = Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PPrimaryPaths.AdminGroups.Config()
self.config.parent = self
self.state = Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PPrimaryPaths.AdminGroups.State()
self.state.parent = self
class Config(object):
"""
Configuration data
.. attribute:: exclude_group
list of references to named admin\-groups to exclude in path calculation
**type**\: list of str
**refers to**\: :py:class:`admin_group_name <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.MplsAdminGroups.AdminGroup>`
.. attribute:: include_all_group
list of references to named admin\-groups of which all must be included
**type**\: list of str
**refers to**\: :py:class:`admin_group_name <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.MplsAdminGroups.AdminGroup>`
.. attribute:: include_any_group
list of references to named admin\-groups of which one must be included
**type**\: list of str
**refers to**\: :py:class:`admin_group_name <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.MplsAdminGroups.AdminGroup>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.exclude_group = YLeafList()
self.exclude_group.parent = self
self.exclude_group.name = 'exclude_group'
self.include_all_group = YLeafList()
self.include_all_group.parent = self
self.include_all_group.name = 'include_all_group'
self.include_any_group = YLeafList()
self.include_any_group.parent = self
self.include_any_group.name = 'include_any_group'
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.exclude_group is not None:
for child in self.exclude_group:
if child is not None:
return True
if self.include_all_group is not None:
for child in self.include_all_group:
if child is not None:
return True
if self.include_any_group is not None:
for child in self.include_any_group:
if child is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PPrimaryPaths.AdminGroups.Config']['meta_info']
class State(object):
"""
Operational state data
.. attribute:: exclude_group
list of references to named admin\-groups to exclude in path calculation
**type**\: list of str
**refers to**\: :py:class:`admin_group_name <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.MplsAdminGroups.AdminGroup>`
.. attribute:: include_all_group
list of references to named admin\-groups of which all must be included
**type**\: list of str
**refers to**\: :py:class:`admin_group_name <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.MplsAdminGroups.AdminGroup>`
.. attribute:: include_any_group
list of references to named admin\-groups of which one must be included
**type**\: list of str
**refers to**\: :py:class:`admin_group_name <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.MplsAdminGroups.AdminGroup>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.exclude_group = YLeafList()
self.exclude_group.parent = self
self.exclude_group.name = 'exclude_group'
self.include_all_group = YLeafList()
self.include_all_group.parent = self
self.include_all_group.name = 'include_all_group'
self.include_any_group = YLeafList()
self.include_any_group.parent = self
self.include_any_group.name = 'include_any_group'
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.exclude_group is not None:
for child in self.exclude_group:
if child is not None:
return True
if self.include_all_group is not None:
for child in self.include_all_group:
if child is not None:
return True
if self.include_any_group is not None:
for child in self.include_any_group:
if child is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PPrimaryPaths.AdminGroups.State']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:admin-groups'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.config is not None and self.config._has_data():
return True
if self.state is not None and self.state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PPrimaryPaths.AdminGroups']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.name is None:
raise YPYModelError('Key property name is None')
return self.parent._common_path +'/openconfig-mpls:p2p-primary-paths[openconfig-mpls:name = ' + str(self.name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.name is not None:
return True
if self.admin_groups is not None and self.admin_groups._has_data():
return True
if self.candidate_secondary_paths is not None and self.candidate_secondary_paths._has_data():
return True
if self.config is not None and self.config._has_data():
return True
if self.state is not None and self.state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PPrimaryPaths']['meta_info']
class P2PSecondaryPaths(object):
"""
List of p2p primary paths for a tunnel
.. attribute:: name <key>
Path name
**type**\: str
**refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PSecondaryPaths.Config>`
.. attribute:: admin_groups
Top\-level container for include/exclude constraints for link affinities
**type**\: :py:class:`AdminGroups <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PSecondaryPaths.AdminGroups>`
.. attribute:: config
Configuration parameters related to paths
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PSecondaryPaths.Config>`
.. attribute:: state
State parameters related to paths
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PSecondaryPaths.State>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.name = None
self.admin_groups = Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PSecondaryPaths.AdminGroups()
self.admin_groups.parent = self
self.config = Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PSecondaryPaths.Config()
self.config.parent = self
self.state = Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PSecondaryPaths.State()
self.state.parent = self
class Config(object):
"""
Configuration parameters related to paths
.. attribute:: cspf_tiebreaker
Determine the tie\-breaking method to choose between equally desirable paths during CSFP computation
**type**\: :py:class:`CspfTieBreakingEnum <ydk.models.openconfig.openconfig_mpls.CspfTieBreakingEnum>`
.. attribute:: explicit_path_name
reference to a defined path
**type**\: str
**refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.NamedExplicitPaths.Config>`
.. attribute:: hold_priority
preemption priority once the LSP is established, lower is higher priority; default 0 indicates other LSPs will not preempt the LSPs once established
**type**\: int
**range:** 0..7
.. attribute:: name
Path name
**type**\: str
.. attribute:: path_computation_method
The method used for computing the path, either locally computed, queried from a server or not computed at all (explicitly configured)
**type**\: :py:class:`PathComputationMethodIdentity <ydk.models.openconfig.openconfig_mpls.PathComputationMethodIdentity>`
.. attribute:: path_computation_server
Address of the external path computation server
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: preference
Specifies a preference for this path. The lower the number higher the preference
**type**\: int
**range:** 1..255
.. attribute:: retry_timer
sets the time between attempts to establish the LSP
**type**\: int
**range:** 1..600
.. attribute:: setup_priority
RSVP\-TE preemption priority during LSP setup, lower is higher priority; default 7 indicates that LSP will not preempt established LSPs during setup
**type**\: int
**range:** 0..7
.. attribute:: use_cspf
Flag to enable CSPF for locally computed LSPs
**type**\: bool
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.cspf_tiebreaker = None
self.explicit_path_name = None
self.hold_priority = None
self.name = None
self.path_computation_method = None
self.path_computation_server = None
self.preference = None
self.retry_timer = None
self.setup_priority = None
self.use_cspf = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.cspf_tiebreaker is not None:
return True
if self.explicit_path_name is not None:
return True
if self.hold_priority is not None:
return True
if self.name is not None:
return True
if self.path_computation_method is not None:
return True
if self.path_computation_server is not None:
return True
if self.preference is not None:
return True
if self.retry_timer is not None:
return True
if self.setup_priority is not None:
return True
if self.use_cspf is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PSecondaryPaths.Config']['meta_info']
class State(object):
"""
State parameters related to paths
.. attribute:: cspf_tiebreaker
Determine the tie\-breaking method to choose between equally desirable paths during CSFP computation
**type**\: :py:class:`CspfTieBreakingEnum <ydk.models.openconfig.openconfig_mpls.CspfTieBreakingEnum>`
.. attribute:: explicit_path_name
reference to a defined path
**type**\: str
**refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.NamedExplicitPaths.Config>`
.. attribute:: hold_priority
preemption priority once the LSP is established, lower is higher priority; default 0 indicates other LSPs will not preempt the LSPs once established
**type**\: int
**range:** 0..7
.. attribute:: name
Path name
**type**\: str
.. attribute:: path_computation_method
The method used for computing the path, either locally computed, queried from a server or not computed at all (explicitly configured)
**type**\: :py:class:`PathComputationMethodIdentity <ydk.models.openconfig.openconfig_mpls.PathComputationMethodIdentity>`
.. attribute:: path_computation_server
Address of the external path computation server
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: preference
Specifies a preference for this path. The lower the number higher the preference
**type**\: int
**range:** 1..255
.. attribute:: retry_timer
sets the time between attempts to establish the LSP
**type**\: int
**range:** 1..600
.. attribute:: setup_priority
RSVP\-TE preemption priority during LSP setup, lower is higher priority; default 7 indicates that LSP will not preempt established LSPs during setup
**type**\: int
**range:** 0..7
.. attribute:: use_cspf
Flag to enable CSPF for locally computed LSPs
**type**\: bool
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.cspf_tiebreaker = None
self.explicit_path_name = None
self.hold_priority = None
self.name = None
self.path_computation_method = None
self.path_computation_server = None
self.preference = None
self.retry_timer = None
self.setup_priority = None
self.use_cspf = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.cspf_tiebreaker is not None:
return True
if self.explicit_path_name is not None:
return True
if self.hold_priority is not None:
return True
if self.name is not None:
return True
if self.path_computation_method is not None:
return True
if self.path_computation_server is not None:
return True
if self.preference is not None:
return True
if self.retry_timer is not None:
return True
if self.setup_priority is not None:
return True
if self.use_cspf is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PSecondaryPaths.State']['meta_info']
class AdminGroups(object):
"""
Top\-level container for include/exclude constraints for
link affinities
.. attribute:: config
Configuration data
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PSecondaryPaths.AdminGroups.Config>`
.. attribute:: state
Operational state data
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PSecondaryPaths.AdminGroups.State>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.config = Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PSecondaryPaths.AdminGroups.Config()
self.config.parent = self
self.state = Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PSecondaryPaths.AdminGroups.State()
self.state.parent = self
class Config(object):
"""
Configuration data
.. attribute:: exclude_group
list of references to named admin\-groups to exclude in path calculation
**type**\: list of str
**refers to**\: :py:class:`admin_group_name <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.MplsAdminGroups.AdminGroup>`
.. attribute:: include_all_group
list of references to named admin\-groups of which all must be included
**type**\: list of str
**refers to**\: :py:class:`admin_group_name <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.MplsAdminGroups.AdminGroup>`
.. attribute:: include_any_group
list of references to named admin\-groups of which one must be included
**type**\: list of str
**refers to**\: :py:class:`admin_group_name <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.MplsAdminGroups.AdminGroup>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.exclude_group = YLeafList()
self.exclude_group.parent = self
self.exclude_group.name = 'exclude_group'
self.include_all_group = YLeafList()
self.include_all_group.parent = self
self.include_all_group.name = 'include_all_group'
self.include_any_group = YLeafList()
self.include_any_group.parent = self
self.include_any_group.name = 'include_any_group'
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.exclude_group is not None:
for child in self.exclude_group:
if child is not None:
return True
if self.include_all_group is not None:
for child in self.include_all_group:
if child is not None:
return True
if self.include_any_group is not None:
for child in self.include_any_group:
if child is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PSecondaryPaths.AdminGroups.Config']['meta_info']
class State(object):
"""
Operational state data
.. attribute:: exclude_group
list of references to named admin\-groups to exclude in path calculation
**type**\: list of str
**refers to**\: :py:class:`admin_group_name <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.MplsAdminGroups.AdminGroup>`
.. attribute:: include_all_group
list of references to named admin\-groups of which all must be included
**type**\: list of str
**refers to**\: :py:class:`admin_group_name <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.MplsAdminGroups.AdminGroup>`
.. attribute:: include_any_group
list of references to named admin\-groups of which one must be included
**type**\: list of str
**refers to**\: :py:class:`admin_group_name <ydk.models.openconfig.openconfig_mpls.Mpls.TeGlobalAttributes.MplsAdminGroups.AdminGroup>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.exclude_group = YLeafList()
self.exclude_group.parent = self
self.exclude_group.name = 'exclude_group'
self.include_all_group = YLeafList()
self.include_all_group.parent = self
self.include_all_group.name = 'include_all_group'
self.include_any_group = YLeafList()
self.include_any_group.parent = self
self.include_any_group.name = 'include_any_group'
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.exclude_group is not None:
for child in self.exclude_group:
if child is not None:
return True
if self.include_all_group is not None:
for child in self.include_all_group:
if child is not None:
return True
if self.include_any_group is not None:
for child in self.include_any_group:
if child is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PSecondaryPaths.AdminGroups.State']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:admin-groups'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.config is not None and self.config._has_data():
return True
if self.state is not None and self.state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PSecondaryPaths.AdminGroups']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.name is None:
raise YPYModelError('Key property name is None')
return self.parent._common_path +'/openconfig-mpls:p2p-secondary-paths[openconfig-mpls:name = ' + str(self.name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.name is not None:
return True
if self.admin_groups is not None and self.admin_groups._has_data():
return True
if self.config is not None and self.config._has_data():
return True
if self.state is not None and self.state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes.P2PSecondaryPaths']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:p2p-tunnel-attributes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.config is not None and self.config._has_data():
return True
if self.p2p_primary_paths is not None:
for child_ref in self.p2p_primary_paths:
if child_ref._has_data():
return True
if self.p2p_secondary_paths is not None:
for child_ref in self.p2p_secondary_paths:
if child_ref._has_data():
return True
if self.state is not None and self.state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.Tunnel.P2PTunnelAttributes']['meta_info']
@property
def _common_path(self):
if self.name is None:
raise YPYModelError('Key property name is None')
if self.type is None:
raise YPYModelError('Key property type is None')
return '/openconfig-mpls:mpls/openconfig-mpls:lsps/openconfig-mpls:constrained-path/openconfig-mpls:tunnel[openconfig-mpls:name = ' + str(self.name) + '][openconfig-mpls:type = ' + str(self.type) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.name is not None:
return True
if self.type is not None:
return True
if self.bandwidth is not None and self.bandwidth._has_data():
return True
if self.config is not None and self.config._has_data():
return True
if self.p2p_tunnel_attributes is not None and self.p2p_tunnel_attributes._has_data():
return True
if self.state is not None and self.state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath.Tunnel']['meta_info']
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:lsps/openconfig-mpls:constrained-path'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.named_explicit_paths is not None:
for child_ref in self.named_explicit_paths:
if child_ref._has_data():
return True
if self.tunnel is not None:
for child_ref in self.tunnel:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.ConstrainedPath']['meta_info']
class UnconstrainedPath(object):
"""
LSPs that use the IGP\-determined path, i.e., non
traffic\-engineered, or non constrained\-path
.. attribute:: path_setup_protocol
select and configure the signaling method for the LSP
**type**\: :py:class:`PathSetupProtocol <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.UnconstrainedPath.PathSetupProtocol>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.path_setup_protocol = Mpls.Lsps.UnconstrainedPath.PathSetupProtocol()
self.path_setup_protocol.parent = self
class PathSetupProtocol(object):
"""
select and configure the signaling method for
the LSP
.. attribute:: ldp
LDP signaling setup for IGP\-congruent LSPs
**type**\: :py:class:`Ldp <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.Ldp>`
.. attribute:: segment_routing
segment routing signaling extensions for IGP\-confgruent LSPs
**type**\: :py:class:`SegmentRouting <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.SegmentRouting>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.ldp = None
self.segment_routing = None
class Ldp(object):
"""
LDP signaling setup for IGP\-congruent LSPs
.. attribute:: tunnel
contains configuration stanzas for different LSP tunnel types (P2P, P2MP, etc.)
**type**\: :py:class:`Tunnel <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.Ldp.Tunnel>`
.. attribute:: _is_presence
Is present if this instance represents presence container else not
**type**\: bool
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self._is_presence = True
self.tunnel = Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.Ldp.Tunnel()
self.tunnel.parent = self
class Tunnel(object):
"""
contains configuration stanzas for different LSP
tunnel types (P2P, P2MP, etc.)
.. attribute:: ldp_type
specify basic or targeted LDP LSP
**type**\: :py:class:`LdpTypeEnum <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.Ldp.Tunnel.LdpTypeEnum>`
.. attribute:: mp2mp_lsp
properties of multipoint\-to\-multipoint tunnels
**type**\: :py:class:`Mp2MpLsp <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.Ldp.Tunnel.Mp2MpLsp>`
.. attribute:: p2mp_lsp
properties of point\-to\-multipoint tunnels
**type**\: :py:class:`P2MpLsp <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.Ldp.Tunnel.P2MpLsp>`
.. attribute:: p2p_lsp
properties of point\-to\-point tunnels
**type**\: :py:class:`P2PLsp <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.Ldp.Tunnel.P2PLsp>`
.. attribute:: tunnel_type
specifies the type of LSP, e.g., P2P or P2MP
**type**\: :py:class:`TunnelTypeEnum <ydk.models.openconfig.openconfig_mpls_types.TunnelTypeEnum>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.ldp_type = None
self.mp2mp_lsp = Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.Ldp.Tunnel.Mp2MpLsp()
self.mp2mp_lsp.parent = self
self.p2mp_lsp = Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.Ldp.Tunnel.P2MpLsp()
self.p2mp_lsp.parent = self
self.p2p_lsp = Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.Ldp.Tunnel.P2PLsp()
self.p2p_lsp.parent = self
self.tunnel_type = None
class LdpTypeEnum(Enum):
"""
LdpTypeEnum
specify basic or targeted LDP LSP
.. data:: BASIC = 0
basic hop-by-hop LSP
.. data:: TARGETED = 1
tLDP LSP
"""
BASIC = 0
TARGETED = 1
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.Ldp.Tunnel.LdpTypeEnum']
class P2PLsp(object):
"""
properties of point\-to\-point tunnels
.. attribute:: fec_address
Address prefix for packets sharing the same forwarding equivalence class for the IGP\-based LSP
**type**\: one of the below types:
**type**\: list of str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])/(([0\-9])\|([1\-2][0\-9])\|(3[0\-2]))
----
**type**\: list of str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(/(([0\-9])\|([0\-9]{2})\|(1[0\-1][0\-9])\|(12[0\-8])))
----
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.fec_address = YLeafList()
self.fec_address.parent = self
self.fec_address.name = 'fec_address'
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:lsps/openconfig-mpls:unconstrained-path/openconfig-mpls:path-setup-protocol/openconfig-mpls:ldp/openconfig-mpls:tunnel/openconfig-mpls:p2p-lsp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.fec_address is not None:
for child in self.fec_address:
if child is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.Ldp.Tunnel.P2PLsp']['meta_info']
class P2MpLsp(object):
"""
properties of point\-to\-multipoint tunnels
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:lsps/openconfig-mpls:unconstrained-path/openconfig-mpls:path-setup-protocol/openconfig-mpls:ldp/openconfig-mpls:tunnel/openconfig-mpls:p2mp-lsp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.Ldp.Tunnel.P2MpLsp']['meta_info']
class Mp2MpLsp(object):
"""
properties of multipoint\-to\-multipoint tunnels
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:lsps/openconfig-mpls:unconstrained-path/openconfig-mpls:path-setup-protocol/openconfig-mpls:ldp/openconfig-mpls:tunnel/openconfig-mpls:mp2mp-lsp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.Ldp.Tunnel.Mp2MpLsp']['meta_info']
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:lsps/openconfig-mpls:unconstrained-path/openconfig-mpls:path-setup-protocol/openconfig-mpls:ldp/openconfig-mpls:tunnel'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.ldp_type is not None:
return True
if self.mp2mp_lsp is not None and self.mp2mp_lsp._has_data():
return True
if self.p2mp_lsp is not None and self.p2mp_lsp._has_data():
return True
if self.p2p_lsp is not None and self.p2p_lsp._has_data():
return True
if self.tunnel_type is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.Ldp.Tunnel']['meta_info']
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:lsps/openconfig-mpls:unconstrained-path/openconfig-mpls:path-setup-protocol/openconfig-mpls:ldp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self._is_presence:
return True
if self.tunnel is not None and self.tunnel._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.Ldp']['meta_info']
class SegmentRouting(object):
"""
segment routing signaling extensions for
IGP\-confgruent LSPs
.. attribute:: tunnel
contains configuration stanzas for different LSP tunnel types (P2P, P2MP, etc.)
**type**\: :py:class:`Tunnel <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.SegmentRouting.Tunnel>`
.. attribute:: _is_presence
Is present if this instance represents presence container else not
**type**\: bool
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self._is_presence = True
self.tunnel = Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.SegmentRouting.Tunnel()
self.tunnel.parent = self
class Tunnel(object):
"""
contains configuration stanzas for different LSP
tunnel types (P2P, P2MP, etc.)
.. attribute:: p2p_lsp
properties of point\-to\-point tunnels
**type**\: :py:class:`P2PLsp <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.SegmentRouting.Tunnel.P2PLsp>`
.. attribute:: tunnel_type
specifies the type of LSP, e.g., P2P or P2MP
**type**\: :py:class:`TunnelTypeEnum <ydk.models.openconfig.openconfig_mpls_types.TunnelTypeEnum>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.p2p_lsp = Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.SegmentRouting.Tunnel.P2PLsp()
self.p2p_lsp.parent = self
self.tunnel_type = None
class P2PLsp(object):
"""
properties of point\-to\-point tunnels
.. attribute:: fec
List of FECs that are to be originated as SR LSPs
**type**\: list of :py:class:`Fec <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.SegmentRouting.Tunnel.P2PLsp.Fec>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.fec = YList()
self.fec.parent = self
self.fec.name = 'fec'
class Fec(object):
"""
List of FECs that are to be originated as SR LSPs
.. attribute:: fec_address <key>
FEC that is to be advertised as part of the Prefix\-SID
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])/(([0\-9])\|([1\-2][0\-9])\|(3[0\-2]))
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(/(([0\-9])\|([0\-9]{2})\|(1[0\-1][0\-9])\|(12[0\-8])))
----
.. attribute:: config
Configuration parameters relating to the FEC to be advertised by SR
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.SegmentRouting.Tunnel.P2PLsp.Fec.Config>`
.. attribute:: prefix_sid
Parameters relating to the Prefix\-SID used for the originated FEC
**type**\: :py:class:`PrefixSid <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.SegmentRouting.Tunnel.P2PLsp.Fec.PrefixSid>`
.. attribute:: state
Operational state relating to a FEC advertised by SR
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.SegmentRouting.Tunnel.P2PLsp.Fec.State>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.fec_address = None
self.config = Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.SegmentRouting.Tunnel.P2PLsp.Fec.Config()
self.config.parent = self
self.prefix_sid = Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.SegmentRouting.Tunnel.P2PLsp.Fec.PrefixSid()
self.prefix_sid.parent = self
self.state = Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.SegmentRouting.Tunnel.P2PLsp.Fec.State()
self.state.parent = self
class Config(object):
"""
Configuration parameters relating to the FEC to be
advertised by SR
.. attribute:: fec_address
FEC that is to be advertised as part of the Prefix\-SID
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])/(([0\-9])\|([1\-2][0\-9])\|(3[0\-2]))
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(/(([0\-9])\|([0\-9]{2})\|(1[0\-1][0\-9])\|(12[0\-8])))
----
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.fec_address = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.fec_address is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.SegmentRouting.Tunnel.P2PLsp.Fec.Config']['meta_info']
class State(object):
"""
Operational state relating to a FEC advertised by SR
.. attribute:: fec_address
FEC that is to be advertised as part of the Prefix\-SID
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])/(([0\-9])\|([1\-2][0\-9])\|(3[0\-2]))
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(/(([0\-9])\|([0\-9]{2})\|(1[0\-1][0\-9])\|(12[0\-8])))
----
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.fec_address = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.fec_address is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.SegmentRouting.Tunnel.P2PLsp.Fec.State']['meta_info']
class PrefixSid(object):
"""
Parameters relating to the Prefix\-SID
used for the originated FEC
.. attribute:: config
Configuration parameters relating to the Prefix\-SID used for the originated FEC
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.SegmentRouting.Tunnel.P2PLsp.Fec.PrefixSid.Config>`
.. attribute:: state
Operational state parameters relating to the Prefix\-SID used for the originated FEC
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.SegmentRouting.Tunnel.P2PLsp.Fec.PrefixSid.State>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.config = Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.SegmentRouting.Tunnel.P2PLsp.Fec.PrefixSid.Config()
self.config.parent = self
self.state = Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.SegmentRouting.Tunnel.P2PLsp.Fec.PrefixSid.State()
self.state.parent = self
class Config(object):
"""
Configuration parameters relating to the Prefix\-SID
used for the originated FEC
.. attribute:: last_hop_behavior
Configuration relating to the LFIB actions for the Prefix\-SID to be used by the penultimate\-hop
**type**\: :py:class:`LastHopBehaviorEnum <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.SegmentRouting.Tunnel.P2PLsp.Fec.PrefixSid.Config.LastHopBehaviorEnum>`
.. attribute:: node_flag
Specifies that the Prefix\-SID is to be treated as a Node\-SID by setting the N\-flag in the advertised Prefix\-SID TLV in the IGP
**type**\: bool
.. attribute:: type
Specifies how the value of the Prefix\-SID should be interpreted \- whether as an offset to the SRGB, or as an absolute value
**type**\: :py:class:`TypeEnum <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.SegmentRouting.Tunnel.P2PLsp.Fec.PrefixSid.Config.TypeEnum>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.last_hop_behavior = None
self.node_flag = None
self.type = None
class LastHopBehaviorEnum(Enum):
"""
LastHopBehaviorEnum
Configuration relating to the LFIB actions for the
Prefix\-SID to be used by the penultimate\-hop
.. data:: EXPLICIT_NULL = 0
Specifies that the explicit null label is to be used
when the penultimate hop forwards a labelled packet to
this Prefix-SID
.. data:: UNCHANGED = 1
Specicies that the Prefix-SID's label value is to be
left in place when the penultimate hop forwards to this
Prefix-SID
.. data:: PHP = 2
Specicies that the penultimate hop should pop the
Prefix-SID label before forwarding to the eLER
"""
EXPLICIT_NULL = 0
UNCHANGED = 1
PHP = 2
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.SegmentRouting.Tunnel.P2PLsp.Fec.PrefixSid.Config.LastHopBehaviorEnum']
class TypeEnum(Enum):
"""
TypeEnum
Specifies how the value of the Prefix\-SID should be
interpreted \- whether as an offset to the SRGB, or as an
absolute value
.. data:: INDEX = 0
Set when the value of the prefix SID should be specified
as an off-set from the SRGB's zero-value. When multiple
SRGBs are specified, the zero-value is the minimum
of their lower bounds
.. data:: ABSOLUTE = 1
Set when the value of a prefix SID is specified as the
absolute value within an SRGB. It is an error to specify
an absolute value outside of a specified SRGB
"""
INDEX = 0
ABSOLUTE = 1
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.SegmentRouting.Tunnel.P2PLsp.Fec.PrefixSid.Config.TypeEnum']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:config'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.last_hop_behavior is not None:
return True
if self.node_flag is not None:
return True
if self.type is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.SegmentRouting.Tunnel.P2PLsp.Fec.PrefixSid.Config']['meta_info']
class State(object):
"""
Operational state parameters relating to the
Prefix\-SID used for the originated FEC
.. attribute:: last_hop_behavior
Configuration relating to the LFIB actions for the Prefix\-SID to be used by the penultimate\-hop
**type**\: :py:class:`LastHopBehaviorEnum <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.SegmentRouting.Tunnel.P2PLsp.Fec.PrefixSid.State.LastHopBehaviorEnum>`
.. attribute:: node_flag
Specifies that the Prefix\-SID is to be treated as a Node\-SID by setting the N\-flag in the advertised Prefix\-SID TLV in the IGP
**type**\: bool
.. attribute:: type
Specifies how the value of the Prefix\-SID should be interpreted \- whether as an offset to the SRGB, or as an absolute value
**type**\: :py:class:`TypeEnum <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.SegmentRouting.Tunnel.P2PLsp.Fec.PrefixSid.State.TypeEnum>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.last_hop_behavior = None
self.node_flag = None
self.type = None
class LastHopBehaviorEnum(Enum):
"""
LastHopBehaviorEnum
Configuration relating to the LFIB actions for the
Prefix\-SID to be used by the penultimate\-hop
.. data:: EXPLICIT_NULL = 0
Specifies that the explicit null label is to be used
when the penultimate hop forwards a labelled packet to
this Prefix-SID
.. data:: UNCHANGED = 1
Specicies that the Prefix-SID's label value is to be
left in place when the penultimate hop forwards to this
Prefix-SID
.. data:: PHP = 2
Specicies that the penultimate hop should pop the
Prefix-SID label before forwarding to the eLER
"""
EXPLICIT_NULL = 0
UNCHANGED = 1
PHP = 2
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.SegmentRouting.Tunnel.P2PLsp.Fec.PrefixSid.State.LastHopBehaviorEnum']
class TypeEnum(Enum):
"""
TypeEnum
Specifies how the value of the Prefix\-SID should be
interpreted \- whether as an offset to the SRGB, or as an
absolute value
.. data:: INDEX = 0
Set when the value of the prefix SID should be specified
as an off-set from the SRGB's zero-value. When multiple
SRGBs are specified, the zero-value is the minimum
of their lower bounds
.. data:: ABSOLUTE = 1
Set when the value of a prefix SID is specified as the
absolute value within an SRGB. It is an error to specify
an absolute value outside of a specified SRGB
"""
INDEX = 0
ABSOLUTE = 1
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.SegmentRouting.Tunnel.P2PLsp.Fec.PrefixSid.State.TypeEnum']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.last_hop_behavior is not None:
return True
if self.node_flag is not None:
return True
if self.type is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.SegmentRouting.Tunnel.P2PLsp.Fec.PrefixSid.State']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:prefix-sid'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.config is not None and self.config._has_data():
return True
if self.state is not None and self.state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.SegmentRouting.Tunnel.P2PLsp.Fec.PrefixSid']['meta_info']
@property
def _common_path(self):
if self.fec_address is None:
raise YPYModelError('Key property fec_address is None')
return '/openconfig-mpls:mpls/openconfig-mpls:lsps/openconfig-mpls:unconstrained-path/openconfig-mpls:path-setup-protocol/openconfig-mpls:segment-routing/openconfig-mpls:tunnel/openconfig-mpls:p2p-lsp/openconfig-mpls:fec[openconfig-mpls:fec-address = ' + str(self.fec_address) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.fec_address is not None:
return True
if self.config is not None and self.config._has_data():
return True
if self.prefix_sid is not None and self.prefix_sid._has_data():
return True
if self.state is not None and self.state._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.SegmentRouting.Tunnel.P2PLsp.Fec']['meta_info']
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:lsps/openconfig-mpls:unconstrained-path/openconfig-mpls:path-setup-protocol/openconfig-mpls:segment-routing/openconfig-mpls:tunnel/openconfig-mpls:p2p-lsp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.fec is not None:
for child_ref in self.fec:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.SegmentRouting.Tunnel.P2PLsp']['meta_info']
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:lsps/openconfig-mpls:unconstrained-path/openconfig-mpls:path-setup-protocol/openconfig-mpls:segment-routing/openconfig-mpls:tunnel'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.p2p_lsp is not None and self.p2p_lsp._has_data():
return True
if self.tunnel_type is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.SegmentRouting.Tunnel']['meta_info']
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:lsps/openconfig-mpls:unconstrained-path/openconfig-mpls:path-setup-protocol/openconfig-mpls:segment-routing'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self._is_presence:
return True
if self.tunnel is not None and self.tunnel._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.UnconstrainedPath.PathSetupProtocol.SegmentRouting']['meta_info']
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:lsps/openconfig-mpls:unconstrained-path/openconfig-mpls:path-setup-protocol'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.ldp is not None and self.ldp._has_data():
return True
if self.segment_routing is not None and self.segment_routing._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.UnconstrainedPath.PathSetupProtocol']['meta_info']
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:lsps/openconfig-mpls:unconstrained-path'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.path_setup_protocol is not None and self.path_setup_protocol._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.UnconstrainedPath']['meta_info']
class StaticLsps(object):
"""
statically configured LSPs, without dynamic
signaling
.. attribute:: label_switched_path
list of defined static LSPs
**type**\: list of :py:class:`LabelSwitchedPath <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.StaticLsps.LabelSwitchedPath>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.label_switched_path = YList()
self.label_switched_path.parent = self
self.label_switched_path.name = 'label_switched_path'
class LabelSwitchedPath(object):
"""
list of defined static LSPs
.. attribute:: name <key>
name to identify the LSP
**type**\: str
.. attribute:: egress
static LSPs for which the router is a egress node
**type**\: :py:class:`Egress <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.StaticLsps.LabelSwitchedPath.Egress>`
.. attribute:: ingress
Static LSPs for which the router is an ingress node
**type**\: :py:class:`Ingress <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.StaticLsps.LabelSwitchedPath.Ingress>`
.. attribute:: transit
static LSPs for which the router is a transit node
**type**\: :py:class:`Transit <ydk.models.openconfig.openconfig_mpls.Mpls.Lsps.StaticLsps.LabelSwitchedPath.Transit>`
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.name = None
self.egress = Mpls.Lsps.StaticLsps.LabelSwitchedPath.Egress()
self.egress.parent = self
self.ingress = Mpls.Lsps.StaticLsps.LabelSwitchedPath.Ingress()
self.ingress.parent = self
self.transit = Mpls.Lsps.StaticLsps.LabelSwitchedPath.Transit()
self.transit.parent = self
class Ingress(object):
"""
Static LSPs for which the router is an
ingress node
.. attribute:: incoming_label
label value on the incoming packet
**type**\: one of the below types:
**type**\: int
**range:** 16..1048575
----
**type**\: :py:class:`MplsLabelEnum <ydk.models.openconfig.openconfig_mpls_types.MplsLabelEnum>`
----
.. attribute:: next_hop
next hop IP address for the LSP
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: push_label
label value to push at the current hop for the LSP
**type**\: one of the below types:
**type**\: int
**range:** 16..1048575
----
**type**\: :py:class:`MplsLabelEnum <ydk.models.openconfig.openconfig_mpls_types.MplsLabelEnum>`
----
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.incoming_label = None
self.next_hop = None
self.push_label = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:ingress'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.incoming_label is not None:
return True
if self.next_hop is not None:
return True
if self.push_label is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.StaticLsps.LabelSwitchedPath.Ingress']['meta_info']
class Transit(object):
"""
static LSPs for which the router is a
transit node
.. attribute:: incoming_label
label value on the incoming packet
**type**\: one of the below types:
**type**\: int
**range:** 16..1048575
----
**type**\: :py:class:`MplsLabelEnum <ydk.models.openconfig.openconfig_mpls_types.MplsLabelEnum>`
----
.. attribute:: next_hop
next hop IP address for the LSP
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: push_label
label value to push at the current hop for the LSP
**type**\: one of the below types:
**type**\: int
**range:** 16..1048575
----
**type**\: :py:class:`MplsLabelEnum <ydk.models.openconfig.openconfig_mpls_types.MplsLabelEnum>`
----
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.incoming_label = None
self.next_hop = None
self.push_label = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:transit'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.incoming_label is not None:
return True
if self.next_hop is not None:
return True
if self.push_label is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.StaticLsps.LabelSwitchedPath.Transit']['meta_info']
class Egress(object):
"""
static LSPs for which the router is a
egress node
.. attribute:: incoming_label
label value on the incoming packet
**type**\: one of the below types:
**type**\: int
**range:** 16..1048575
----
**type**\: :py:class:`MplsLabelEnum <ydk.models.openconfig.openconfig_mpls_types.MplsLabelEnum>`
----
.. attribute:: next_hop
next hop IP address for the LSP
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: push_label
label value to push at the current hop for the LSP
**type**\: one of the below types:
**type**\: int
**range:** 16..1048575
----
**type**\: :py:class:`MplsLabelEnum <ydk.models.openconfig.openconfig_mpls_types.MplsLabelEnum>`
----
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
self.parent = None
self.incoming_label = None
self.next_hop = None
self.push_label = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/openconfig-mpls:egress'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.incoming_label is not None:
return True
if self.next_hop is not None:
return True
if self.push_label is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.StaticLsps.LabelSwitchedPath.Egress']['meta_info']
@property
def _common_path(self):
if self.name is None:
raise YPYModelError('Key property name is None')
return '/openconfig-mpls:mpls/openconfig-mpls:lsps/openconfig-mpls:static-lsps/openconfig-mpls:label-switched-path[openconfig-mpls:name = ' + str(self.name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.name is not None:
return True
if self.egress is not None and self.egress._has_data():
return True
if self.ingress is not None and self.ingress._has_data():
return True
if self.transit is not None and self.transit._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.StaticLsps.LabelSwitchedPath']['meta_info']
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:lsps/openconfig-mpls:static-lsps'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.label_switched_path is not None:
for child_ref in self.label_switched_path:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps.StaticLsps']['meta_info']
@property
def _common_path(self):
return '/openconfig-mpls:mpls/openconfig-mpls:lsps'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.constrained_path is not None and self.constrained_path._has_data():
return True
if self.static_lsps is not None and self.static_lsps._has_data():
return True
if self.unconstrained_path is not None and self.unconstrained_path._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls.Lsps']['meta_info']
@property
def _common_path(self):
return '/openconfig-mpls:mpls'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self._is_presence:
return True
if self.global_ is not None and self.global_._has_data():
return True
if self.lsps is not None and self.lsps._has_data():
return True
if self.signaling_protocols is not None and self.signaling_protocols._has_data():
return True
if self.te_global_attributes is not None and self.te_global_attributes._has_data():
return True
if self.te_interface_attributes is not None and self.te_interface_attributes._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['Mpls']['meta_info']
class LocallyComputedIdentity(PathComputationMethodIdentity):
"""
indicates a constrained\-path LSP in which the
path is computed by the local LER
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
PathComputationMethodIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['LocallyComputedIdentity']['meta_info']
class ExplicitlyDefinedIdentity(PathComputationMethodIdentity):
"""
constrained\-path LSP in which the path is
explicitly specified as a collection of strict or/and loose
hops
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
PathComputationMethodIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['ExplicitlyDefinedIdentity']['meta_info']
class ExternallyQueriedIdentity(PathComputationMethodIdentity):
"""
constrained\-path LSP in which the path is
obtained by querying an external source, such as a PCE server
"""
_prefix = 'mpls'
_revision = '2015-11-05'
def __init__(self):
PathComputationMethodIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_mpls as meta
return meta._meta_table['ExternallyQueriedIdentity']['meta_info']
| abhikeshav/ydk-py | openconfig/ydk/models/openconfig/openconfig_mpls.py | Python | apache-2.0 | 526,973 |
"""
sphinx.builders.gettext
~~~~~~~~~~~~~~~~~~~~~~~
The MessageCatalogBuilder class.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from codecs import open
from collections import OrderedDict, defaultdict
from datetime import datetime, timedelta, tzinfo
from os import getenv, path, walk
from time import time
from typing import Any, DefaultDict, Dict, Generator, Iterable, List, Set, Tuple, Union
from uuid import uuid4
from docutils import nodes
from docutils.nodes import Element
from sphinx import addnodes, package_dir
from sphinx.application import Sphinx
from sphinx.builders import Builder
from sphinx.domains.python import pairindextypes
from sphinx.errors import ThemeError
from sphinx.locale import __
from sphinx.util import logging, split_index_msg, status_iterator
from sphinx.util.console import bold # type: ignore
from sphinx.util.i18n import CatalogInfo, docname_to_domain
from sphinx.util.nodes import extract_messages, traverse_translatable_index
from sphinx.util.osutil import canon_path, ensuredir, relpath
from sphinx.util.tags import Tags
from sphinx.util.template import SphinxRenderer
logger = logging.getLogger(__name__)
class Message:
"""An entry of translatable message."""
def __init__(self, text: str, locations: List[Tuple[str, int]], uuids: List[str]):
self.text = text
self.locations = locations
self.uuids = uuids
class Catalog:
"""Catalog of translatable messages."""
def __init__(self) -> None:
self.messages: List[str] = [] # retain insertion order, a la OrderedDict
# msgid -> file, line, uid
self.metadata: Dict[str, List[Tuple[str, int, str]]] = OrderedDict()
def add(self, msg: str, origin: Union[Element, "MsgOrigin"]) -> None:
if not hasattr(origin, 'uid'):
# Nodes that are replicated like todo don't have a uid,
# however i18n is also unnecessary.
return
if msg not in self.metadata: # faster lookup in hash
self.messages.append(msg)
self.metadata[msg] = []
self.metadata[msg].append((origin.source, origin.line, origin.uid)) # type: ignore
def __iter__(self) -> Generator[Message, None, None]:
for message in self.messages:
positions = [(source, line) for source, line, uuid in self.metadata[message]]
uuids = [uuid for source, line, uuid in self.metadata[message]]
yield Message(message, positions, uuids)
class MsgOrigin:
"""
Origin holder for Catalog message origin.
"""
def __init__(self, source: str, line: int) -> None:
self.source = source
self.line = line
self.uid = uuid4().hex
class GettextRenderer(SphinxRenderer):
def __init__(self, template_path: str = None, outdir: str = None) -> None:
self.outdir = outdir
if template_path is None:
template_path = path.join(package_dir, 'templates', 'gettext')
super().__init__(template_path)
def escape(s: str) -> str:
s = s.replace('\\', r'\\')
s = s.replace('"', r'\"')
return s.replace('\n', '\\n"\n"')
# use texescape as escape filter
self.env.filters['e'] = escape
self.env.filters['escape'] = escape
def render(self, filename: str, context: Dict) -> str:
def _relpath(s: str) -> str:
return canon_path(relpath(s, self.outdir))
context['relpath'] = _relpath
return super().render(filename, context)
class I18nTags(Tags):
"""Dummy tags module for I18nBuilder.
To translate all text inside of only nodes, this class
always returns True value even if no tags are defined.
"""
def eval_condition(self, condition: Any) -> bool:
return True
class I18nBuilder(Builder):
"""
General i18n builder.
"""
name = 'i18n'
versioning_method = 'text'
versioning_compare: bool = None # be set by `gettext_uuid`
use_message_catalog = False
def init(self) -> None:
super().init()
self.env.set_versioning_method(self.versioning_method,
self.env.config.gettext_uuid)
self.tags = I18nTags()
self.catalogs: DefaultDict[str, Catalog] = defaultdict(Catalog)
def get_target_uri(self, docname: str, typ: str = None) -> str:
return ''
def get_outdated_docs(self) -> Set[str]:
return self.env.found_docs
def prepare_writing(self, docnames: Set[str]) -> None:
return
def compile_catalogs(self, catalogs: Set[CatalogInfo], message: str) -> None:
return
def write_doc(self, docname: str, doctree: nodes.document) -> None:
catalog = self.catalogs[docname_to_domain(docname, self.config.gettext_compact)]
for toctree in self.env.tocs[docname].traverse(addnodes.toctree):
for node, msg in extract_messages(toctree):
node.uid = '' # type: ignore # Hack UUID model
catalog.add(msg, node)
for node, msg in extract_messages(doctree):
catalog.add(msg, node)
if 'index' in self.env.config.gettext_additional_targets:
# Extract translatable messages from index entries.
for node, entries in traverse_translatable_index(doctree):
for typ, msg, tid, main, key_ in entries:
for m in split_index_msg(typ, msg):
if typ == 'pair' and m in pairindextypes.values():
# avoid built-in translated message was incorporated
# in 'sphinx.util.nodes.process_index_entry'
continue
catalog.add(m, node)
# determine tzoffset once to remain unaffected by DST change during build
timestamp = time()
tzdelta = datetime.fromtimestamp(timestamp) - \
datetime.utcfromtimestamp(timestamp)
# set timestamp from SOURCE_DATE_EPOCH if set
# see https://reproducible-builds.org/specs/source-date-epoch/
source_date_epoch = getenv('SOURCE_DATE_EPOCH')
if source_date_epoch is not None:
timestamp = float(source_date_epoch)
tzdelta = timedelta(0)
class LocalTimeZone(tzinfo):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs) # type: ignore
self.tzdelta = tzdelta
def utcoffset(self, dt: datetime) -> timedelta:
return self.tzdelta
def dst(self, dt: datetime) -> timedelta:
return timedelta(0)
ltz = LocalTimeZone()
def should_write(filepath: str, new_content: str) -> bool:
if not path.exists(filepath):
return True
try:
with open(filepath, encoding='utf-8') as oldpot:
old_content = oldpot.read()
old_header_index = old_content.index('"POT-Creation-Date:')
new_header_index = new_content.index('"POT-Creation-Date:')
old_body_index = old_content.index('"PO-Revision-Date:')
new_body_index = new_content.index('"PO-Revision-Date:')
return ((old_content[:old_header_index] != new_content[:new_header_index]) or
(new_content[new_body_index:] != old_content[old_body_index:]))
except ValueError:
pass
return True
class MessageCatalogBuilder(I18nBuilder):
"""
Builds gettext-style message catalogs (.pot files).
"""
name = 'gettext'
epilog = __('The message catalogs are in %(outdir)s.')
def init(self) -> None:
super().init()
self.create_template_bridge()
self.templates.init(self)
def _collect_templates(self) -> Set[str]:
template_files = set()
for template_path in self.config.templates_path:
tmpl_abs_path = path.join(self.app.srcdir, template_path)
for dirpath, dirs, files in walk(tmpl_abs_path):
for fn in files:
if fn.endswith('.html'):
filename = canon_path(path.join(dirpath, fn))
template_files.add(filename)
return template_files
def _extract_from_template(self) -> None:
files = list(self._collect_templates())
files.sort()
logger.info(bold(__('building [%s]: ') % self.name), nonl=True)
logger.info(__('targets for %d template files'), len(files))
extract_translations = self.templates.environment.extract_translations
for template in status_iterator(files, __('reading templates... '), "purple",
len(files), self.app.verbosity):
try:
with open(template, encoding='utf-8') as f:
context = f.read()
for line, meth, msg in extract_translations(context):
origin = MsgOrigin(template, line)
self.catalogs['sphinx'].add(msg, origin)
except Exception as exc:
raise ThemeError('%s: %r' % (template, exc)) from exc
def build(self, docnames: Iterable[str], summary: str = None, method: str = 'update') -> None: # NOQA
self._extract_from_template()
super().build(docnames, summary, method)
def finish(self) -> None:
super().finish()
context = {
'version': self.config.version,
'copyright': self.config.copyright,
'project': self.config.project,
'last_translator': self.config.gettext_last_translator,
'language_team': self.config.gettext_language_team,
'ctime': datetime.fromtimestamp(timestamp, ltz).strftime('%Y-%m-%d %H:%M%z'),
'display_location': self.config.gettext_location,
'display_uuid': self.config.gettext_uuid,
}
for textdomain, catalog in status_iterator(self.catalogs.items(),
__("writing message catalogs... "),
"darkgreen", len(self.catalogs),
self.app.verbosity,
lambda textdomain__: textdomain__[0]):
# noop if config.gettext_compact is set
ensuredir(path.join(self.outdir, path.dirname(textdomain)))
context['messages'] = list(catalog)
content = GettextRenderer(outdir=self.outdir).render('message.pot_t', context)
pofn = path.join(self.outdir, textdomain + '.pot')
if should_write(pofn, content):
with open(pofn, 'w', encoding='utf-8') as pofile:
pofile.write(content)
def setup(app: Sphinx) -> Dict[str, Any]:
app.add_builder(MessageCatalogBuilder)
app.add_config_value('gettext_compact', True, 'gettext', {bool, str})
app.add_config_value('gettext_location', True, 'gettext')
app.add_config_value('gettext_uuid', False, 'gettext')
app.add_config_value('gettext_auto_build', True, 'env')
app.add_config_value('gettext_additional_targets', [], 'env')
app.add_config_value('gettext_last_translator', 'FULL NAME <EMAIL@ADDRESS>', 'gettext')
app.add_config_value('gettext_language_team', 'LANGUAGE <[email protected]>', 'gettext')
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| sonntagsgesicht/regtest | .aux/venv/lib/python3.9/site-packages/sphinx/builders/gettext.py | Python | apache-2.0 | 11,428 |
from kfp.components import InputPath, OutputPath, create_component_from_func
def convert_apache_parquet_to_csv(
data_path: InputPath('ApacheParquet'),
output_data_path: OutputPath('CSV'),
):
'''Converts Apache Parquet to CSV.
[Apache Parquet](https://parquet.apache.org/)
Annotations:
author: Alexey Volkov <[email protected]>
'''
from pyarrow import parquet
data_frame = parquet.read_pandas(data_path).to_pandas()
data_frame.to_csv(
output_data_path,
index=False,
)
if __name__ == '__main__':
convert_apache_parquet_to_csv_op = create_component_from_func(
convert_apache_parquet_to_csv,
output_component_file='component.yaml',
base_image='python:3.7',
packages_to_install=['pyarrow==0.17.1', 'pandas==1.0.3'],
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/_converters/ApacheParquet/to_CSV/component.yaml",
},
)
| kubeflow/pipelines | components/contrib/_converters/ApacheParquet/to_CSV/component.py | Python | apache-2.0 | 1,096 |
'''
Created on 2013-12-19
@author: zpfalpc23
'''
_FATAL_EXCEPTION_FORMAT_ERRORS = False
class TgetException(Exception):
"""
Base Tget Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = "An unknown exception occurred"
def __init__(self, message=None, *args, **kwargs):
if not message:
message = self.message
try:
message = message % kwargs
except Exception:
if _FATAL_EXCEPTION_FORMAT_ERRORS:
raise
else:
# at least get the core message out if something happened
pass
super(TgetException, self).__init__(message)
class DirCreateFailed(TgetException):
message ="The directory %(dir_name) can't be created."
class CheckDirPathFailed(TgetException):
message = "The path %(path_name) is not a directory."
class PeerAlreadyRegistered(TgetException):
message = "The peer %(addr):%(port) has already registered in this master"
class NoSuchUuidException(TgetException):
message = "No such peer %(uuid) registered in master."
class NoSuchPeerException(TgetException):
message = "No such peer (%(addr):%(port)) registered in master."
class InvalidRequestException(TgetException):
message = "Master(peer) has receive an invalid request from client: %(reason)."
class ConfigError(TgetException):
message = "No config at %(path)"
| zhangpf/image-tget | tget/exception.py | Python | apache-2.0 | 1,581 |
"""
Support for Blink Home Camera System.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/blink/
"""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.helpers import (
config_validation as cv, discovery)
from homeassistant.const import (
CONF_USERNAME, CONF_PASSWORD, CONF_NAME, CONF_SCAN_INTERVAL,
CONF_BINARY_SENSORS, CONF_SENSORS, CONF_FILENAME,
CONF_MONITORED_CONDITIONS, TEMP_FAHRENHEIT)
REQUIREMENTS = ['blinkpy==0.11.0']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'blink'
BLINK_DATA = 'blink'
CONF_CAMERA = 'camera'
CONF_ALARM_CONTROL_PANEL = 'alarm_control_panel'
DEFAULT_BRAND = 'Blink'
DEFAULT_ATTRIBUTION = "Data provided by immedia-semi.com"
SIGNAL_UPDATE_BLINK = "blink_update"
DEFAULT_SCAN_INTERVAL = timedelta(seconds=60)
TYPE_CAMERA_ARMED = 'motion_enabled'
TYPE_MOTION_DETECTED = 'motion_detected'
TYPE_TEMPERATURE = 'temperature'
TYPE_BATTERY = 'battery'
TYPE_WIFI_STRENGTH = 'wifi_strength'
SERVICE_REFRESH = 'blink_update'
SERVICE_TRIGGER = 'trigger_camera'
SERVICE_SAVE_VIDEO = 'save_video'
BINARY_SENSORS = {
TYPE_CAMERA_ARMED: ['Camera Armed', 'mdi:verified'],
TYPE_MOTION_DETECTED: ['Motion Detected', 'mdi:run-fast'],
}
SENSORS = {
TYPE_TEMPERATURE: ['Temperature', TEMP_FAHRENHEIT, 'mdi:thermometer'],
TYPE_BATTERY: ['Battery', '%', 'mdi:battery-80'],
TYPE_WIFI_STRENGTH: ['Wifi Signal', 'bars', 'mdi:wifi-strength-2'],
}
BINARY_SENSOR_SCHEMA = vol.Schema({
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(BINARY_SENSORS)):
vol.All(cv.ensure_list, [vol.In(BINARY_SENSORS)])
})
SENSOR_SCHEMA = vol.Schema({
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(SENSORS)):
vol.All(cv.ensure_list, [vol.In(SENSORS)])
})
SERVICE_TRIGGER_SCHEMA = vol.Schema({
vol.Required(CONF_NAME): cv.string
})
SERVICE_SAVE_VIDEO_SCHEMA = vol.Schema({
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_FILENAME): cv.string,
})
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN:
vol.Schema({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL):
cv.time_period,
vol.Optional(CONF_BINARY_SENSORS, default={}):
BINARY_SENSOR_SCHEMA,
vol.Optional(CONF_SENSORS, default={}): SENSOR_SCHEMA,
})
},
extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up Blink System."""
from blinkpy import blinkpy
conf = config[BLINK_DATA]
username = conf[CONF_USERNAME]
password = conf[CONF_PASSWORD]
scan_interval = conf[CONF_SCAN_INTERVAL]
hass.data[BLINK_DATA] = blinkpy.Blink(username=username,
password=password)
hass.data[BLINK_DATA].refresh_rate = scan_interval.total_seconds()
hass.data[BLINK_DATA].start()
platforms = [
('alarm_control_panel', {}),
('binary_sensor', conf[CONF_BINARY_SENSORS]),
('camera', {}),
('sensor', conf[CONF_SENSORS]),
]
for component, schema in platforms:
discovery.load_platform(hass, component, DOMAIN, schema, config)
def trigger_camera(call):
"""Trigger a camera."""
cameras = hass.data[BLINK_DATA].cameras
name = call.data[CONF_NAME]
if name in cameras:
cameras[name].snap_picture()
hass.data[BLINK_DATA].refresh(force_cache=True)
def blink_refresh(event_time):
"""Call blink to refresh info."""
hass.data[BLINK_DATA].refresh(force_cache=True)
async def async_save_video(call):
"""Call save video service handler."""
await async_handle_save_video_service(hass, call)
hass.services.register(DOMAIN, SERVICE_REFRESH, blink_refresh)
hass.services.register(DOMAIN,
SERVICE_TRIGGER,
trigger_camera,
schema=SERVICE_TRIGGER_SCHEMA)
hass.services.register(DOMAIN,
SERVICE_SAVE_VIDEO,
async_save_video,
schema=SERVICE_SAVE_VIDEO_SCHEMA)
return True
async def async_handle_save_video_service(hass, call):
"""Handle save video service calls."""
camera_name = call.data[CONF_NAME]
video_path = call.data[CONF_FILENAME]
if not hass.config.is_allowed_path(video_path):
_LOGGER.error(
"Can't write %s, no access to path!", video_path)
return
def _write_video(camera_name, video_path):
"""Call video write."""
all_cameras = hass.data[BLINK_DATA].cameras
if camera_name in all_cameras:
all_cameras[camera_name].video_to_file(video_path)
try:
await hass.async_add_executor_job(
_write_video, camera_name, video_path)
except OSError as err:
_LOGGER.error("Can't write image to file: %s", err)
| tinloaf/home-assistant | homeassistant/components/blink/__init__.py | Python | apache-2.0 | 5,050 |
# Copyright (c) 2017 Presslabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Django settings for example project.
Generated by 'django-admin startproject' using Django 1.8.13.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5h&v9o=&4qb66*3$gb4tavf_-1nzb#oa99ok9ao)^8dyc=fo52'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'payu',
'example.demo'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'example.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'example', 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'example.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
| PressLabs/django-payu | example/example/settings.py | Python | apache-2.0 | 3,350 |
from mock import call
from scanpointgenerator import CompoundGenerator, LineGenerator
from malcolm.core import Context, Process
from malcolm.modules.pmac.blocks import raw_motor_block
from malcolm.modules.pmac.parts import MotorPreMovePart
from malcolm.modules.scanning.controllers import RunnableController
from malcolm.testutil import ChildTestCase
class TestMotorPreMovePart(ChildTestCase):
def setUp(self):
self.process = Process("test_process")
self.context = Context(self.process)
# Create a raw motor mock to handle axis request
self.child = self.create_child_block(
raw_motor_block, self.process, mri="BS", pv_prefix="PV:PRE"
)
# Add Beam Selector object
self.o = MotorPreMovePart(name="MotorPreMovePart", mri="BS", demand=50)
controller = RunnableController("SCAN", "/tmp", use_git=False)
controller.add_part(self.o)
self.process.add_controller(controller)
self.process.start()
def tearDown(self):
del self.context
self.process.stop(timeout=1)
def test_bs(self):
b = self.context.block_view("SCAN")
generator = CompoundGenerator([LineGenerator("x", "mm", 0, 1, 10)], [], [], 0.1)
b.configure(generator)
self.o.on_configure(self.context)
assert self.child.handled_requests.mock_calls == [call.put("demand", 50)]
| dls-controls/pymalcolm | tests/test_modules/test_pmac/test_motorpremovepart.py | Python | apache-2.0 | 1,398 |
import unittest
from p2bf.builder import BFBuild
from p2bf.emitter import Emitter
import StringIO
from util.run_bf import run
class TestVariableAssignment(unittest.TestCase):
def test_single_assignment(self):
emit_output = StringIO.StringIO()
run_output = StringIO.StringIO()
emitter = Emitter(stdout=emit_output)
python = """v1 = "a" """
builder = BFBuild(python, emit=emitter).emit_bf()
run(emit_output.getvalue(), stdout=run_output)
def test_multi_assignment(self):
emit_output = StringIO.StringIO()
run_output = StringIO.StringIO()
emitter = Emitter(stdout=emit_output)
python = """v3 = v2 = v1 = "a" """
builder = BFBuild(python, emit=emitter).emit_bf()
run(emit_output.getvalue(), stdout=run_output)
def test_variable_to_variable(self):
emit_output = StringIO.StringIO()
run_output = StringIO.StringIO()
emitter = Emitter(stdout=emit_output)
python = """v1 = "a"\nv2 = v1 """
builder = BFBuild(python, emit=emitter).emit_bf()
run(emit_output.getvalue(), stdout=run_output)
def test_setting_integer(self):
emit_output = StringIO.StringIO()
run_output = StringIO.StringIO()
emitter = Emitter(stdout=emit_output)
python = """v1 = 57 """
builder = BFBuild(python, emit=emitter).emit_bf()
memory_space = []
run(emit_output.getvalue(), stdout=run_output)
| vegitron/python2brainfuck | t/py2b/variable_assignment.py | Python | apache-2.0 | 1,480 |
# Copyright 2014 Yajie Miao Carnegie Mellon University
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
# See the Apache 2 License for the specific language governing permissions and
# limitations under the License.
import cPickle
import gzip
import os
import sys
import time
import numpy
import json
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from io_func import smart_open
from io_func.model_io import _file2nnet, log
from io_func.kaldi_feat import KaldiReadIn, KaldiWriteOut
from models.cnn import CNN_Forward
from utils.utils import parse_arguments, string_2_bool
if __name__ == '__main__':
# check the arguments
arg_elements = [sys.argv[i] for i in range(1, len(sys.argv))]
arguments = parse_arguments(arg_elements)
required_arguments = ['in_scp_file', 'out_ark_file', 'cnn_param_file', 'cnn_cfg_file']
for arg in required_arguments:
if arguments.has_key(arg) == False:
print "Error: the argument %s has to be specified" % (arg); exit(1)
# mandatory arguments
in_scp_file = arguments['in_scp_file']
out_ark_file = arguments['out_ark_file']
cnn_param_file = arguments['cnn_param_file']
cnn_cfg_file = arguments['cnn_cfg_file']
# network structure
cfg = cPickle.load(smart_open(cnn_cfg_file,'r'))
conv_configs = cfg.conv_layer_configs
conv_layer_number = len(conv_configs)
for i in xrange(conv_layer_number):
conv_configs[i]['activation'] = cfg.conv_activation
# whether to use the fast mode
use_fast = cfg.use_fast
if arguments.has_key('use_fast'):
use_fast = string_2_bool(arguments['use_fast'])
kaldiread = KaldiReadIn(in_scp_file)
kaldiwrite = KaldiWriteOut(out_ark_file)
log('> ... setting up the CNN convolution layers')
input_shape_train = conv_configs[0]['input_shape']
input_shape_1 = (input_shape_train[1], input_shape_train[2], input_shape_train[3])
rng = numpy.random.RandomState(123)
theano_rng = RandomStreams(rng.randint(2 ** 30))
cnn = CNN_Forward(numpy_rng = rng, theano_rng=theano_rng, conv_layer_configs = conv_configs, use_fast = use_fast)
_file2nnet(cnn.conv_layers, set_layer_num = len(conv_configs), filename=cnn_param_file)
out_function = cnn.build_out_function()
log('> ... processing the data')
while True:
uttid, in_matrix = kaldiread.read_next_utt()
if uttid == '':
break
in_matrix = numpy.reshape(in_matrix, (in_matrix.shape[0],) + input_shape_1)
out_matrix = out_function(in_matrix)
kaldiwrite.write_kaldi_mat(uttid, out_matrix)
kaldiwrite.close()
log('> ... the saved features are %s' % (out_ark_file))
| mclaughlin6464/pdnn | cmds2/run_CnnFeat.py | Python | apache-2.0 | 3,178 |
import copy
from fireant.dataset.fields import Field, is_metric_field
from fireant.queries.finders import find_field_in_modified_field
def _replace_reference_dimension(dimension, offset_func, field_transformer, trunc_date=None):
ref_definition = offset_func(field_transformer(dimension, trunc_date))
field = Field(
alias=dimension.alias,
definition=ref_definition,
data_type=dimension.data_type,
label=dimension.label,
)
if hasattr(dimension, "for_"):
return dimension.for_(field)
return field
def make_reference_dimensions(dimensions, ref_dimension, offset_func, field_transformer, trunc_date):
return [
_replace_reference_dimension(dimension, offset_func, field_transformer, trunc_date)
if ref_dimension is find_field_in_modified_field(dimension)
else dimension
for dimension in dimensions
]
def make_reference_metrics(metrics, ref_key):
return [
Field(
"{}_{}".format(metric.alias, ref_key),
metric.definition,
data_type=metric.data_type,
label=metric.label,
prefix=metric.prefix,
suffix=metric.suffix,
precision=metric.precision,
)
for metric in metrics
]
def make_reference_filters(filters, ref_dimension, offset_func):
"""
Copies and replaces the reference dimension's definition in all of the filters applied to a dataset query.
This is used to shift the dimension filters to fit the reference window.
:param filters:
:param ref_dimension:
:param offset_func:
:return:
"""
reference_filters = []
for ref_filter in filters:
# Metric filters should not be part of the reference
if is_metric_field(ref_filter.field):
continue
if ref_filter.field is ref_dimension:
# NOTE: Important to apply the offset function to the start and stop properties because the date math can
# become expensive over many rows
ref_filter = copy.copy(ref_filter)
ref_filter.start = offset_func(ref_filter.start)
ref_filter.stop = offset_func(ref_filter.stop)
reference_filters.append(ref_filter)
return reference_filters
| kayak/fireant | fireant/queries/references.py | Python | apache-2.0 | 2,281 |
from .services import (HttpPageFetcher, ReadabilityArticleExtractor,
DocumentExtractorService)
from ...async import client_session
_page_fetcher = HttpPageFetcher(client_session)
_article_extractor = ReadabilityArticleExtractor(client_session)
extractor_service = DocumentExtractorService(_page_fetcher, _article_extractor)
| melphi/article-extractor | python/app/modules/extractor/components.py | Python | apache-2.0 | 349 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.window()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class WindowTest(test_base.DatasetTestBase, parameterized.TestCase):
@parameterized.named_parameters(
("1", 20, 14, 7, 1),
("2", 20, 17, 9, 1),
("3", 20, 14, 14, 1),
("4", 20, 10, 14, 1),
("5", 20, 14, 19, 1),
("6", 20, 4, 1, 2),
("7", 20, 2, 1, 6),
("8", 20, 4, 7, 2),
("9", 20, 2, 7, 6),
("10", 1, 10, 4, 1),
("11", 0, 10, 4, 1),
("12", 20, 14, 7, 1, False),
("13", 20, 17, 9, 1, False),
("14", 20, 14, 14, 1, False),
("15", 20, 10, 14, 1, False),
("16", 20, 14, 19, 1, False),
("17", 20, 4, 1, 2, False),
("18", 20, 2, 1, 6, False),
("19", 20, 4, 7, 2, False),
("20", 20, 2, 7, 6, False),
("21", 1, 10, 4, 1, False),
("22", 0, 10, 4, 1, False),
)
def testWindowDataset(self, count, size, shift, stride, drop_remainder=True):
"""Tests a dataset that slides a window its input elements."""
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
def _flat_map_fn(x, y, z):
return dataset_ops.Dataset.zip((x.batch(batch_size=size),
y.batch(batch_size=size),
z.batch(batch_size=size)))
dataset = dataset_ops.Dataset.from_tensor_slices(components).map(
_map_fn).repeat(count).window(
size=size,
shift=shift,
stride=stride,
drop_remainder=drop_remainder).flat_map(_flat_map_fn)
get_next = self.getNext(dataset)
self.assertEqual(
[[None] + list(c.shape[1:]) for c in components],
[ts.as_list() for ts in nest.flatten(dataset.output_shapes)])
num_full_batches = max(0,
(count * 7 - ((size - 1) * stride + 1)) // shift + 1)
for i in range(num_full_batches):
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
for j in range(size):
self.assertAllEqual(component[(i * shift + j * stride) % 7]**2,
result_component[j])
if not drop_remainder:
num_partial_batches = (count * 7) // shift + (
(count * 7) % shift > 0) - num_full_batches
for i in range(num_partial_batches):
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
remaining = (count * 7) - ((num_full_batches + i) * shift)
num_elements = remaining // stride + ((remaining % stride) > 0)
for j in range(num_elements):
self.assertAllEqual(
component[((num_full_batches + i) * shift + j * stride) % 7]**2,
result_component[j])
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@parameterized.named_parameters(
("1", 14, 0, 3, 1),
("2", 14, 3, 0, 1),
("3", 14, 3, 3, 0),
)
def testWindowDatasetInvalid(self, count, size, shift, stride):
dataset = dataset_ops.Dataset.range(10).map(lambda x: x).repeat(
count).window(
size=size, shift=shift,
stride=stride).flat_map(lambda x: x.batch(batch_size=size))
self.assertDatasetProduces(
dataset, expected_error=(errors.InvalidArgumentError, ""))
def testWindowSparse(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=[[0]], values=(i * [1]), dense_shape=[1])
dataset = dataset_ops.Dataset.range(10).map(_sparse).window(
size=5, shift=3,
drop_remainder=True).flat_map(lambda x: x.batch(batch_size=5))
num_batches = (10 - 5) // 3 + 1
expected_output = [
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 0], [2, 0], [3, 0], [4, 0]],
values=[i * 3, i * 3 + 1, i * 3 + 2, i * 3 + 3, i * 3 + 4],
dense_shape=[5, 1]) for i in range(num_batches)
]
self.assertDatasetProduces(dataset, expected_output=expected_output)
def testWindowSparseWithDifferentDenseShapes(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=array_ops.expand_dims(
math_ops.range(i, dtype=dtypes.int64), 1),
values=array_ops.fill([math_ops.to_int32(i)], i),
dense_shape=[i])
dataset = dataset_ops.Dataset.range(10).map(_sparse).window(
size=5, shift=3,
drop_remainder=True).flat_map(lambda x: x.batch(batch_size=5))
expected_output = []
num_batches = (10 - 5) // 3 + 1
for i in range(num_batches):
expected_indices = []
expected_values = []
for j in range(5):
for k in range(i * 3 + j):
expected_indices.append([j, k])
expected_values.append(i * 3 + j)
expected_output.append(
sparse_tensor.SparseTensorValue(
indices=expected_indices,
values=expected_values,
dense_shape=[5, i * 3 + 5 - 1]))
self.assertDatasetProduces(dataset, expected_output=expected_output)
def testNestedWindowSparse(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=[[0]], values=(i * [1]), dense_shape=[1])
dataset = dataset_ops.Dataset.range(10).map(_sparse).window(
size=4, shift=2,
drop_remainder=True).flat_map(lambda x: x.batch(batch_size=4)).window(
size=3, shift=1,
drop_remainder=True).flat_map(lambda x: x.batch(batch_size=3))
expected_output = [
sparse_tensor.SparseTensorValue(
indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0], [1, 0, 0],
[1, 1, 0], [1, 2, 0], [1, 3, 0], [2, 0, 0], [2, 1, 0],
[2, 2, 0], [2, 3, 0]],
values=[0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7],
dense_shape=[3, 4, 1]),
sparse_tensor.SparseTensorValue(
indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0], [1, 0, 0],
[1, 1, 0], [1, 2, 0], [1, 3, 0], [2, 0, 0], [2, 1, 0],
[2, 2, 0], [2, 3, 0]],
values=[2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8, 9],
dense_shape=[3, 4, 1])
]
self.assertDatasetProduces(dataset, expected_output=expected_output)
def testWindowShapeError(self):
def generator():
yield [1.0, 2.0, 3.0]
yield [4.0, 5.0, 6.0]
yield [7.0, 8.0, 9.0, 10.0]
dataset = dataset_ops.Dataset.from_generator(
generator, dtypes.float32, output_shapes=[None]).window(
size=3, shift=1).flat_map(lambda x: x.batch(batch_size=3))
self.assertDatasetProduces(
dataset,
expected_error=(
errors.InvalidArgumentError,
r"Cannot batch tensors with different shapes in component 0. "
r"First element had shape \[3\] and element 2 had shape \[4\]."))
def testWindowIgnoreErrors(self):
input_values = np.float32([1., np.nan, 2., np.nan, 3.])
dataset = dataset_ops.Dataset.from_tensor_slices(input_values).map(
lambda x: array_ops.check_numerics(x, "message")).window(
size=2, shift=2, stride=2,
drop_remainder=True).flat_map(lambda x: x.batch(batch_size=2))
self.assertDatasetProduces(
dataset, expected_output=[np.float32([1., 2.]),
np.float32([2., 3.])])
if __name__ == "__main__":
test.main()
| asimshankar/tensorflow | tensorflow/python/data/kernel_tests/window_test.py | Python | apache-2.0 | 9,036 |
# coding: utf-8
#
# Copyright 2014-2017 Groupon, Inc.
# Copyright 2014-2017 The Billing Project, LLC
#
# The Billing Project, LLC licenses this file to you under the Apache License, version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Kill Bill
Kill Bill is an open-source billing and payments platform # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import killbill
from killbill.configuration import Configuration
from killbill.models.account import Account
from killbill.models.tenant import Tenant
from killbill.models.payment_method import PaymentMethod
from killbill.models.subscription import Subscription
from killbill.models.invoice_dry_run import InvoiceDryRun
from killbill.models.invoice_item import InvoiceItem
from killbill.models.payment_transaction import PaymentTransaction
from random import choice
from string import ascii_lowercase
import time
from killbill.rest import ApiException
class TestIntegration(unittest.TestCase):
"""Account unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def test_integration(self):
# Create tenant
random_api_key = ''.join(choice(ascii_lowercase) for i in range(4))
random_api_secret = ''.join(choice(ascii_lowercase) for i in range(5))
config = Configuration()
config.api_key['X-Killbill-ApiKey'] = random_api_key
config.api_key['X-Killbill-ApiSecret'] = random_api_secret
api_tenant = killbill.api.TenantApi()
body = Tenant(api_key=random_api_key, api_secret=random_api_secret)
api_tenant.create_tenant(body, 'test')
# Get tenant
tenant = api_tenant.get_tenant_by_api_key(api_key=random_api_key)
self.assertIsNotNone(tenant.tenant_id)
self.assertEqual(random_api_key, tenant.api_key)
# Upload XML catalog/Fetch XML catalog
api_catalog = killbill.api.CatalogApi()
xml_catalog = open("../resources/SpyCarBasic.xml", "r+").read()
api_catalog.upload_catalog_xml(xml_catalog, 'test')
# Get catalog
catalog = api_catalog.get_catalog_xml()
self.assertIsNotNone(catalog)
# Create account
api_account = killbill.api.AccountApi()
random_external_key = ''.join(choice(ascii_lowercase) for i in range(6))
body = Account(name='John', external_key=random_external_key, currency='USD', state='CA', country='USA')
api_account.create_account(body, 'test')
# Get account
account = api_account.get_account_by_key(random_external_key)
account_id = account.account_id
self.assertIsNotNone(account.account_id)
self.assertEqual(random_external_key, account.external_key)
self.assertEqual('John', account.name)
self.assertEqual('USD', account.currency)
self.assertEqual('CA', account.state)
self.assertEqual('USA', account.country)
# Add a payment method
body = PaymentMethod(plugin_name='__EXTERNAL_PAYMENT__', plugin_info=None)
api_account.create_payment_method(account_id, body, 'test')
# Get a payment method
payment_method = api_account.get_payment_methods_for_account(account_id)
self.assertIsNotNone(payment_method[0].payment_method_id)
self.assertEqual(account_id, payment_method[0].account_id)
self.assertEqual('__EXTERNAL_PAYMENT__', payment_method[0].plugin_name)
# Tag account as AUTO_INVOICING_OFF
tag = ["00000000-0000-0000-0000-000000000002"]
api_account.create_account_tags(account_id, tag, 'test')
# Get account tags
tags = api_account.get_account_tags(account_id)
self.assertIsNotNone(tags[0].tag_id)
self.assertEqual("00000000-0000-0000-0000-000000000002", tags[0].tag_definition_id)
self.assertEqual("AUTO_INVOICING_OFF", tags[0].tag_definition_name)
# Create a subscription against plan
api_subscription = killbill.api.SubscriptionApi()
body = Subscription(account_id=account_id, plan_name='standard-monthly')
api_subscription.create_subscription(body, 'test')
# Get account bundles
bundles = api_account.get_account_bundles(account_id)
subscription_id = bundles[0].subscriptions[0].subscription_id
# Get subscription
subscription = api_subscription.get_subscription(subscription_id)
self.assertEqual('standard-monthly', subscription.plan_name)
time.sleep(.5)
# Get account invoices
invoices = api_account.get_invoices_for_account(account_id)
self.assertEqual([], invoices)
# Remove AUTO_INVOICING_OFF tag
api_account.delete_account_tags(account_id, 'test', tag_def=tag)
time.sleep(.5)
# Get account invoices
invoices = api_account.get_invoices_for_account(account_id)
self.assertIsNotNone(invoices[0].invoice_id)
# Create a dryRun invoice
api_invoice = killbill.api.InvoiceApi()
body = InvoiceDryRun(dry_run_type='TARGET_DATE')
api_invoice.generate_dry_run_invoice(body, account_id, 'test')
# Modify Plan
body = Subscription(subscription_id=subscription_id, plan_name='super-monthly')
api_subscription.change_subscription_plan(subscription_id, body, 'test')
# Create external charge
body = InvoiceItem(account_id=account_id, amount=50, currency='USD', description='My charge')
api_invoice.create_external_charges(account_id, [body], 'test', auto_commit=True)
# Verify account balance
account = api_account.get_account(account_id, account_with_balance=True)
self.assertEqual(50.0, account.account_balance)
# Pay all unpaid invoices
api_account.pay_all_invoices(account_id, 'test', external_payment=True)
# Verify account balance
account = api_account.get_account(account_id, account_with_balance=True)
self.assertEqual(0, account.account_balance)
# Get account invoice payments
invoice_payments = api_account.get_invoice_payments(account_id)
payment_id = invoice_payments[0].transactions[0].payment_id
self.assertEqual(1, len(invoice_payments[0].transactions))
# Create a refund
api_payment = killbill.api.PaymentApi()
body = PaymentTransaction(payment_id=payment_id, amount=50)
api_payment.refund_payment(payment_id, body, 'test')
# Get account invoice payments
invoice_payments = api_account.get_invoice_payments(account_id)
self.assertEqual(2, len(invoice_payments[0].transactions))
self.assertEqual('REFUND', invoice_payments[0].transactions[1].transaction_type)
# Cancel subscription
api_subscription.cancel_subscription_plan(subscription_id, 'test')
# Get subscription
subscription = api_subscription.get_subscription(subscription_id)
self.assertEqual('CANCELLED', subscription.state)
pass
def test_pagination_and_search(self):
# Create tenant
random_api_key = ''.join(choice(ascii_lowercase) for i in range(4))
random_api_secret = ''.join(choice(ascii_lowercase) for i in range(5))
config = Configuration()
config.api_key['X-Killbill-ApiKey'] = random_api_key
config.api_key['X-Killbill-ApiSecret'] = random_api_secret
api_tenant = killbill.api.TenantApi()
body = Tenant(api_key=random_api_key, api_secret=random_api_secret)
api_tenant.create_tenant(body, 'test')
# Create 10 accounts with payment methods and external charges
for x in range(0, 10):
api_account = killbill.api.AccountApi()
random_external_key = ''.join(choice(ascii_lowercase) for i in range(6))
body = Account(name='John-' + str(x), external_key=random_external_key, currency='USD', state='CA',
country='USA')
api_account.create_account(body, 'test')
# Get account
account = api_account.get_account_by_key(random_external_key)
account_id = account.account_id
# Add a payment method
body = PaymentMethod(plugin_name='__EXTERNAL_PAYMENT__', plugin_info=None)
api_account.create_payment_method(account_id, body, 'test')
# Create external charges
api_invoice = killbill.api.InvoiceApi()
body = InvoiceItem(account_id=account_id, amount=50, currency='USD', description='My charge')
api_invoice.create_external_charges(account_id, [body], 'test',
auto_commit=True)
# Pay all unpaid invoices
api_account.pay_all_invoices(account_id, 'test', external_payment=True)
# Pagination list accounts
accounts = api_account.get_accounts()
self.assertEqual(10, len(accounts))
# Pagination list invoice
api_invoice = killbill.api.InvoiceApi()
invoices = api_invoice.get_invoices()
self.assertEqual(10, len(invoices))
# Pagination list payments
api_payment = killbill.api.PaymentApi()
payments = api_payment.get_payments()
self.assertEqual(10, len(payments))
# Search accounts
accounts = api_account.search_accounts('John-1')
self.assertEqual(1, len(accounts))
# Search payments
payments = api_payment.search_payments('SUCCESS')
self.assertEqual(10, len(payments))
# Search invoices
invoices = api_invoice.search_invoices('USD')
self.assertEqual(10, len(invoices))
pass
if __name__ == '__main__':
unittest.main() | killbill/killbill-client-python | test/test_integration.py | Python | apache-2.0 | 10,301 |
'''
cdrFormula is an example of a package plug-in to both GUI menu and command line/web service
that compiles a CDR Formula Linbase file to be executed by Arelle XBRL Formula processing.
For description of CDR formula see:
http://http://www.ffiec.gov/find/taxonomy/call_report_taxonomy.html
Functions are described in:
http://www.academia.edu/5920257/UBPR_Users_Guide_Technical_Information_Federal_Financial_Institutions_Examination_Council_Users_Guide_for_the_Uniform_Bank_Performance_Report_Technical_Information
This plug-in is a python package, and can be loaded by referencing the containing
directory (usually, "sphinx"), and selecting this "__init__.py" file within the sphinx
directory (such as in a file chooser).
(c) Copyright 2014 Mark V Systems Limited, California US, All rights reserved.
Mark V copyright applies to this software, which is licensed according to the terms of Arelle(r).
'''
import time, os, io, sys
from arelle.ModelValue import qname
from arelle import XmlUtil
from .cdrValidator import hasCdrFormula, validate
logMessage = None
def cdrValidater(val):
if hasCdrFormula(val):
# CDR formulae are loaded, last step in validation
validate(val)
def cdrFilesOpenMenuEntender(cntlr, menu):
pass # ensure plug in loads before model object classes are created by parser
def cdrCommandLineOptionExtender(parser):
pass # ensure plug in loads before model object classes are created by parser
# plugin changes to model object factor classes
from .cdrModelObject import CDR_LINKBASE, CdrFormula, CdrAbsoluteContext, CdrRelativeContext
cdrModelObjectElementSubstitutionClasses = (
(qname(CDR_LINKBASE, "formula"), CdrFormula),
(qname(CDR_LINKBASE, "absoluteContext"), CdrAbsoluteContext),
(qname(CDR_LINKBASE, "relativeContext"), CdrRelativeContext),
)
__pluginInfo__ = {
'name': 'CDR Formula Processor',
'version': '0.9',
'description': "This plug-in provides a CDR formula linkbase processor. ",
'license': 'Apache-2',
'author': 'Mark V Systems Limited',
'copyright': '(c) Copyright 2014 Mark V Systems Limited, All rights reserved.',
# plug-in must load before instance is read (so cdr model classes are initialized before parsing)
'CntlrWinMain.Menu.File.Open': cdrFilesOpenMenuEntender,
'CntlrCmdLine.Options': cdrCommandLineOptionExtender,
# classes of mount points (required)
'ModelObjectFactory.ElementSubstitutionClasses': cdrModelObjectElementSubstitutionClasses,
'Validate.Finally': cdrValidater,
} | sternshus/arelle2.7 | svr-2.7/arelle/plugin/cdrFormula/__init__.py | Python | apache-2.0 | 2,567 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import re
from setuptools import setup, find_packages
readme_filename = "README.md"
current_directory = os.path.dirname(__file__)
readme_path = os.path.join(current_directory, readme_filename)
readme_markdown = ""
try:
with open(readme_path, 'r') as f:
readme_markdown = f.read()
except Exception as e:
print(e)
print("Failed to open %s" % readme_path)
with open('gtfparse/__init__.py', 'r') as f:
version = re.search(
r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
f.read(),
re.MULTILINE).group(1)
if __name__ == '__main__':
setup(
name='gtfparse',
packages=find_packages(),
version=version,
description="GTF Parsing",
long_description=readme_markdown,
long_description_content_type='text/markdown',
url="https://github.com/openvax/gtfparse",
author="Alex Rubinsteyn",
license="http://www.apache.org/licenses/LICENSE-2.0.html",
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
install_requires=[
'numpy>=1.7',
'pandas>=0.15',
],
)
| hammerlab/gtftools | setup.py | Python | apache-2.0 | 2,043 |
from attributes.unit_test.discoverer import TestDiscoverer
class ObjectiveCTestDiscoverer(TestDiscoverer):
def __init__(self):
self.language = 'Objective C'
self.languages = ['Objective C', 'C/C++ Header']
self.extensions = ['*.m', '*.h']
self.frameworks = [
self.__xctest__
]
def __xctest__(self, path, sloc):
pattern = 'XCTest.h'
return self.measure(path, sloc, pattern, whole=True)
| RepoReapers/reaper | attributes/unit_test/discoverer/objectivec.py | Python | apache-2.0 | 464 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="grafeas.v1",
manifest={
"Architecture",
"Distribution",
"Location",
"PackageNote",
"PackageOccurrence",
"Version",
},
)
class Architecture(proto.Enum):
r"""Instruction set architectures supported by various package
managers.
"""
ARCHITECTURE_UNSPECIFIED = 0
X86 = 1
X64 = 2
class Distribution(proto.Message):
r"""This represents a particular channel of distribution for a
given package. E.g., Debian's jessie-backports dpkg mirror.
Attributes:
cpe_uri (str):
Required. The cpe_uri in `CPE
format <https://cpe.mitre.org/specification/>`__ denoting
the package manager version distributing a package.
architecture (grafeas.grafeas_v1.types.Architecture):
The CPU architecture for which packages in
this distribution channel were built.
latest_version (grafeas.grafeas_v1.types.Version):
The latest available version of this package
in this distribution channel.
maintainer (str):
A freeform string denoting the maintainer of
this package.
url (str):
The distribution channel-specific homepage
for this package.
description (str):
The distribution channel-specific description
of this package.
"""
cpe_uri = proto.Field(proto.STRING, number=1,)
architecture = proto.Field(proto.ENUM, number=2, enum="Architecture",)
latest_version = proto.Field(proto.MESSAGE, number=3, message="Version",)
maintainer = proto.Field(proto.STRING, number=4,)
url = proto.Field(proto.STRING, number=5,)
description = proto.Field(proto.STRING, number=6,)
class Location(proto.Message):
r"""An occurrence of a particular package installation found within a
system's filesystem. E.g., glibc was found in
``/var/lib/dpkg/status``.
Attributes:
cpe_uri (str):
Required. The CPE URI in `CPE
format <https://cpe.mitre.org/specification/>`__ denoting
the package manager version distributing a package.
version (grafeas.grafeas_v1.types.Version):
The version installed at this location.
path (str):
The path from which we gathered that this
package/version is installed.
"""
cpe_uri = proto.Field(proto.STRING, number=1,)
version = proto.Field(proto.MESSAGE, number=2, message="Version",)
path = proto.Field(proto.STRING, number=3,)
class PackageNote(proto.Message):
r"""This represents a particular package that is distributed over
various channels. E.g., glibc (aka libc6) is distributed by
many, at various versions.
Attributes:
name (str):
Required. Immutable. The name of the package.
distribution (Sequence[grafeas.grafeas_v1.types.Distribution]):
The various channels by which a package is
distributed.
"""
name = proto.Field(proto.STRING, number=1,)
distribution = proto.RepeatedField(
proto.MESSAGE, number=10, message="Distribution",
)
class PackageOccurrence(proto.Message):
r"""Details on how a particular software package was installed on
a system.
Attributes:
name (str):
Output only. The name of the installed
package.
location (Sequence[grafeas.grafeas_v1.types.Location]):
Required. All of the places within the
filesystem versions of this package have been
found.
"""
name = proto.Field(proto.STRING, number=1,)
location = proto.RepeatedField(proto.MESSAGE, number=2, message="Location",)
class Version(proto.Message):
r"""Version contains structured information about the version of
a package.
Attributes:
epoch (int):
Used to correct mistakes in the version
numbering scheme.
name (str):
Required only when version kind is NORMAL.
The main part of the version name.
revision (str):
The iteration of the package build from the
above version.
inclusive (bool):
Whether this version is specifying part of an
inclusive range. Grafeas does not have the
capability to specify version ranges; instead we
have fields that specify start version and end
versions. At times this is insufficient - we
also need to specify whether the version is
included in the range or is excluded from the
range. This boolean is expected to be set to
true when the version is included in a range.
kind (grafeas.grafeas_v1.types.Version.VersionKind):
Required. Distinguishes between sentinel
MIN/MAX versions and normal versions.
full_name (str):
Human readable version string. This string is
of the form <epoch>:<name>-<revision> and is
only set when kind is NORMAL.
"""
class VersionKind(proto.Enum):
r"""Whether this is an ordinary package version or a sentinel
MIN/MAX version.
"""
VERSION_KIND_UNSPECIFIED = 0
NORMAL = 1
MINIMUM = 2
MAXIMUM = 3
epoch = proto.Field(proto.INT32, number=1,)
name = proto.Field(proto.STRING, number=2,)
revision = proto.Field(proto.STRING, number=3,)
inclusive = proto.Field(proto.BOOL, number=6,)
kind = proto.Field(proto.ENUM, number=4, enum=VersionKind,)
full_name = proto.Field(proto.STRING, number=5,)
__all__ = tuple(sorted(__protobuf__.manifest))
| googleapis/python-grafeas | grafeas/grafeas_v1/types/package.py | Python | apache-2.0 | 6,379 |
from __future__ import print_function
import sys
# from https://github.com/mitsuhiko/flask/blob/master/scripts/make-release.py L92
def fail(message, *args):
print('Error:', message % args, file=sys.stderr)
sys.exit(1)
def check_args(args):
"""Checks that the args are coherent."""
check_args_has_attributes(args)
if args.v:
non_version_attrs = [v for k, v in args.__dict__.items() if k != 'v']
print('non_version_attrs', non_version_attrs)
if len([v for v in non_version_attrs if v is not None]) != 0:
fail('Cannot show the version number with another command.')
return
if args.i is None:
fail('Cannot draw ER diagram of no database.')
if args.o is None:
fail('Cannot draw ER diagram with no output file.')
def check_args_has_attributes(args):
check_args_has_attribute(args, 'i')
check_args_has_attribute(args, 'o')
check_args_has_attribute(args, 'include_tables')
check_args_has_attribute(args, 'include_columns')
check_args_has_attribute(args, 'exclude_tables')
check_args_has_attribute(args, 'exclude_columns')
check_args_has_attribute(args, 's')
def check_args_has_attribute(args, name):
if not hasattr(args, name):
raise Exception('{} should be set'.format(name))
| Alexis-benoist/eralchemy | eralchemy/helpers.py | Python | apache-2.0 | 1,305 |
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for relative_bounds."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import interval_bound_propagation as ibp
from interval_bound_propagation import layer_utils
import numpy as np
import sonnet as snt
import tensorflow.compat.v1 as tf
class RelativeIntervalBoundsTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('float32', tf.float32),
('float64', tf.float64))
def test_linear_bounds_shape(self, dtype):
batch_size = 11
input_size = 7
output_size = 5
w = tf.placeholder(dtype=dtype, shape=(input_size, output_size))
b = tf.placeholder(dtype=dtype, shape=(output_size,))
lb_rel_in = tf.placeholder(dtype=dtype, shape=(batch_size, input_size))
ub_rel_in = tf.placeholder(dtype=dtype, shape=(batch_size, input_size))
nominal = tf.placeholder(dtype=dtype, shape=(batch_size, input_size))
bounds_in = ibp.RelativeIntervalBounds(lb_rel_in, ub_rel_in, nominal)
bounds_out = bounds_in.apply_linear(None, w, b)
lb_out, ub_out = bounds_out.lower, bounds_out.upper
self.assertEqual(dtype, lb_out.dtype)
self.assertEqual(dtype, ub_out.dtype)
self.assertEqual((batch_size, output_size), lb_out.shape)
self.assertEqual((batch_size, output_size), ub_out.shape)
@parameterized.named_parameters(('float32', tf.float32, 1.e-6),
('float64', tf.float64, 1.e-8))
def test_linear_bounds(self, dtype, tol):
w = tf.constant([[1.0, 2.0, 3.0], [4.0, -5.0, 6.0]], dtype=dtype)
b = tf.constant([0.1, 0.2, 0.3], dtype=dtype)
lb_in = tf.constant([[-1.0, -1.0]], dtype=dtype)
ub_in = tf.constant([[2.0, 2.0]], dtype=dtype)
nominal = tf.constant([[3.1, 4.2]], dtype=dtype)
bounds_in = ibp.RelativeIntervalBounds(lb_in - nominal,
ub_in - nominal, nominal)
bounds_out = bounds_in.apply_linear(None, w, b)
lb_out, ub_out = bounds_out.lower, bounds_out.upper
lb_out_exp = np.array([[-4.9, -11.8, -8.7]])
ub_out_exp = np.array([[10.1, 9.2, 18.3]])
with self.test_session() as session:
lb_out_act, ub_out_act = session.run((lb_out, ub_out))
self.assertAllClose(lb_out_exp, lb_out_act, atol=tol, rtol=tol)
self.assertAllClose(ub_out_exp, ub_out_act, atol=tol, rtol=tol)
@parameterized.named_parameters(('float32', tf.float32),
('float64', tf.float64))
def test_conv2d_bounds_shape(self, dtype):
batch_size = 23
input_height = 17
input_width = 7
kernel_height = 3
kernel_width = 4
input_channels = 3
output_channels = 5
padding = 'VALID'
strides = (2, 1)
# Expected output dimensions, based on convolution settings.
output_height = 8
output_width = 4
w = tf.placeholder(dtype=dtype, shape=(
kernel_height, kernel_width, input_channels, output_channels))
b = tf.placeholder(dtype=dtype, shape=(output_channels,))
lb_rel_in = tf.placeholder(dtype=dtype, shape=(
batch_size, input_height, input_width, input_channels))
ub_rel_in = tf.placeholder(dtype=dtype, shape=(
batch_size, input_height, input_width, input_channels))
nominal = tf.placeholder(dtype=dtype, shape=(
batch_size, input_height, input_width, input_channels))
bounds_in = ibp.RelativeIntervalBounds(lb_rel_in, ub_rel_in, nominal)
bounds_out = bounds_in.apply_conv2d(None, w, b, padding, strides)
lb_out, ub_out = bounds_out.lower, bounds_out.upper
self.assertEqual(dtype, lb_out.dtype)
self.assertEqual(dtype, ub_out.dtype)
self.assertEqual((batch_size, output_height, output_width, output_channels),
lb_out.shape)
self.assertEqual((batch_size, output_height, output_width, output_channels),
ub_out.shape)
@parameterized.named_parameters(('float32', tf.float32, 1.e-5),
('float64', tf.float64, 1.e-8))
def test_conv2d_bounds(self, dtype, tol):
batch_size = 53
input_height = 17
input_width = 7
kernel_height = 3
kernel_width = 4
input_channels = 3
output_channels = 2
padding = 'VALID'
strides = (2, 1)
w = tf.random_normal(dtype=dtype, shape=(
kernel_height, kernel_width, input_channels, output_channels))
b = tf.random_normal(dtype=dtype, shape=(output_channels,))
lb_in = tf.random_normal(dtype=dtype, shape=(
batch_size, input_height, input_width, input_channels))
ub_in = tf.random_normal(dtype=dtype, shape=(
batch_size, input_height, input_width, input_channels))
lb_in, ub_in = tf.minimum(lb_in, ub_in), tf.maximum(lb_in, ub_in)
nominal = tf.random_normal(dtype=dtype, shape=(
batch_size, input_height, input_width, input_channels))
bounds_in = ibp.RelativeIntervalBounds(lb_in - nominal,
ub_in - nominal, nominal)
bounds_out = bounds_in.apply_conv2d(None, w, b, padding, strides)
lb_out, ub_out = bounds_out.lower, bounds_out.upper
# Compare against equivalent linear layer.
bounds_out_lin = _materialised_conv_bounds(
w, b, padding, strides, bounds_in)
lb_out_lin, ub_out_lin = bounds_out_lin.lower, bounds_out_lin.upper
with self.test_session() as session:
(lb_out_val, ub_out_val,
lb_out_lin_val, ub_out_lin_val) = session.run((lb_out, ub_out,
lb_out_lin, ub_out_lin))
self.assertAllClose(lb_out_val, lb_out_lin_val, atol=tol, rtol=tol)
self.assertAllClose(ub_out_val, ub_out_lin_val, atol=tol, rtol=tol)
@parameterized.named_parameters(('float32', tf.float32),
('float64', tf.float64))
def test_conv1d_bounds_shape(self, dtype):
batch_size = 23
input_length = 13
kernel_length = 3
input_channels = 3
output_channels = 5
padding = 'VALID'
strides = (2,)
# Expected output dimensions, based on convolution settings.
output_length = 6
w = tf.placeholder(dtype=dtype, shape=(
kernel_length, input_channels, output_channels))
b = tf.placeholder(dtype=dtype, shape=(output_channels,))
lb_rel_in = tf.placeholder(dtype=dtype, shape=(
batch_size, input_length, input_channels))
ub_rel_in = tf.placeholder(dtype=dtype, shape=(
batch_size, input_length, input_channels))
nominal = tf.placeholder(dtype=dtype, shape=(
batch_size, input_length, input_channels))
bounds_in = ibp.RelativeIntervalBounds(lb_rel_in, ub_rel_in, nominal)
bounds_out = bounds_in.apply_conv1d(None, w, b, padding, strides[0])
lb_out, ub_out = bounds_out.lower, bounds_out.upper
self.assertEqual(dtype, lb_out.dtype)
self.assertEqual(dtype, ub_out.dtype)
self.assertEqual((batch_size, output_length, output_channels),
lb_out.shape)
self.assertEqual((batch_size, output_length, output_channels),
ub_out.shape)
@parameterized.named_parameters(('float32', tf.float32, 1.e-5),
('float64', tf.float64, 1.e-8))
def test_conv1d_bounds(self, dtype, tol):
batch_size = 53
input_length = 13
kernel_length = 5
input_channels = 3
output_channels = 2
padding = 'VALID'
strides = (2,)
w = tf.random_normal(dtype=dtype, shape=(
kernel_length, input_channels, output_channels))
b = tf.random_normal(dtype=dtype, shape=(output_channels,))
lb_in = tf.random_normal(dtype=dtype, shape=(
batch_size, input_length, input_channels))
ub_in = tf.random_normal(dtype=dtype, shape=(
batch_size, input_length, input_channels))
lb_in, ub_in = tf.minimum(lb_in, ub_in), tf.maximum(lb_in, ub_in)
nominal = tf.random_normal(dtype=dtype, shape=(
batch_size, input_length, input_channels))
bounds_in = ibp.RelativeIntervalBounds(lb_in - nominal,
ub_in - nominal, nominal)
bounds_out = bounds_in.apply_conv1d(None, w, b, padding, strides[0])
lb_out, ub_out = bounds_out.lower, bounds_out.upper
# Compare against equivalent linear layer.
bounds_out_lin = _materialised_conv_bounds(
w, b, padding, strides, bounds_in)
lb_out_lin, ub_out_lin = bounds_out_lin.lower, bounds_out_lin.upper
with self.test_session() as session:
(lb_out_val, ub_out_val,
lb_out_lin_val, ub_out_lin_val) = session.run((lb_out, ub_out,
lb_out_lin, ub_out_lin))
self.assertAllClose(lb_out_val, lb_out_lin_val, atol=tol, rtol=tol)
self.assertAllClose(ub_out_val, ub_out_lin_val, atol=tol, rtol=tol)
@parameterized.named_parameters(
('float32_snt', snt.BatchNorm, tf.float32, 1.e-5, False),
('float64_snt', snt.BatchNorm, tf.float64, 1.e-8, False),
('float32', ibp.BatchNorm, tf.float32, 1.e-5, False),
('float64', ibp.BatchNorm, tf.float64, 1.e-8, False),
('float32_train', ibp.BatchNorm, tf.float32, 1.e-5, True),
('float64_train', ibp.BatchNorm, tf.float64, 1.e-8, True))
def test_batchnorm_bounds(self, batchnorm_class, dtype, tol, is_training):
batch_size = 11
input_size = 7
output_size = 5
lb_in = tf.random_normal(dtype=dtype, shape=(batch_size, input_size))
ub_in = tf.random_normal(dtype=dtype, shape=(batch_size, input_size))
lb_in, ub_in = tf.minimum(lb_in, ub_in), tf.maximum(lb_in, ub_in)
nominal = tf.random_normal(dtype=dtype, shape=(batch_size, input_size))
# Linear layer.
w = tf.random_normal(dtype=dtype, shape=(input_size, output_size))
b = tf.random_normal(dtype=dtype, shape=(output_size,))
# Batch norm layer.
epsilon = 1.e-2
bn_initializers = {
'beta': tf.random_normal_initializer(),
'gamma': tf.random_uniform_initializer(.1, 3.),
'moving_mean': tf.random_normal_initializer(),
'moving_variance': tf.random_uniform_initializer(.1, 3.)
}
batchnorm_module = batchnorm_class(offset=True, scale=True, eps=epsilon,
initializers=bn_initializers)
# Connect the batchnorm module to the graph.
batchnorm_module(tf.random_normal(dtype=dtype,
shape=(batch_size, output_size)),
is_training=is_training)
bounds_in = ibp.RelativeIntervalBounds(lb_in - nominal,
ub_in - nominal, nominal)
bounds_out = bounds_in.apply_linear(None, w, b)
bounds_out = bounds_out.apply_batch_norm(
batchnorm_module,
batchnorm_module.mean if is_training else batchnorm_module.moving_mean,
batchnorm_module.variance if is_training
else batchnorm_module.moving_variance,
batchnorm_module.gamma,
batchnorm_module.beta,
epsilon)
lb_out, ub_out = bounds_out.lower, bounds_out.upper
# Separately, calculate dual objective by adjusting the linear layer.
wn, bn = layer_utils.combine_with_batchnorm(w, b, batchnorm_module)
bounds_out_lin = bounds_in.apply_linear(None, wn, bn)
lb_out_lin, ub_out_lin = bounds_out_lin.lower, bounds_out_lin.upper
init_op = tf.global_variables_initializer()
with self.test_session() as session:
session.run(init_op)
(lb_out_val, ub_out_val,
lb_out_lin_val, ub_out_lin_val) = session.run((lb_out, ub_out,
lb_out_lin, ub_out_lin))
self.assertAllClose(lb_out_val, lb_out_lin_val, atol=tol, rtol=tol)
self.assertAllClose(ub_out_val, ub_out_lin_val, atol=tol, rtol=tol)
def _materialised_conv_bounds(w, b, padding, strides, bounds_in):
"""Calculates bounds on output of an N-D convolution layer.
The calculation is performed by first materialising the convolution as a
(sparse) fully-connected linear layer. Doing so will affect performance, but
may be useful for investigating numerical stability issues.
Args:
w: (N+2)D tensor of shape (kernel_height, kernel_width, input_channels,
output_channels) containing weights for the convolution.
b: 1D tensor of shape (output_channels) containing biases for the
convolution, or `None` if no bias.
padding: `"VALID"` or `"SAME"`, the convolution's padding algorithm.
strides: Integer list of length N: `[vertical_stride, horizontal_stride]`.
bounds_in: bounds of shape (batch_size, input_height, input_width,
input_channels) containing bounds on the inputs to the
convolution layer.
Returns:
bounds of shape (batch_size, output_height, output_width,
output_channels) with bounds on the outputs of the
convolution layer.
Raises:
ValueError: if an unsupported convolution dimensionality is encountered.
"""
# Flatten the inputs, as the materialised convolution will have no
# spatial structure.
bounds_in_flat = bounds_in.apply_batch_reshape(None, [-1])
# Materialise the convolution as a (sparse) fully connected linear layer.
input_shape = bounds_in.shape[1:]
w_lin, b_lin = layer_utils.materialise_conv(w, b, input_shape,
padding=padding, strides=strides)
bounds_out_flat = bounds_in_flat.apply_linear(None, w_lin, b_lin)
# Unflatten the output bounds.
output_shape = layer_utils.conv_output_shape(input_shape, w, padding, strides)
return bounds_out_flat.apply_batch_reshape(None, output_shape)
if __name__ == '__main__':
tf.test.main()
| deepmind/interval-bound-propagation | interval_bound_propagation/tests/relative_bounds_test.py | Python | apache-2.0 | 14,261 |
Subsets and Splits