filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_20998
|
from unittest.mock import patch
from django.core.management import call_command
from django.db.utils import OperationalError
from django.test import TestCase
class CommandTests(TestCase):
def t2test_wait_for_db_ready(self):
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.return_value = True
call_command('wait_for_db')
self.assertEqual(gi.call_count, 1)
# @patch('time.sleep', return_value=True)
def test_wait_for_db(self, ts):
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.side_effect = [OperationalError] * 5 + [True]
call_command('wait_for_db')
self.assertEqual(gi.call_count, 6)
|
the-stack_106_20999
|
# import gym
import box_world_env
import time
# from PIL import Image
import matplotlib.pyplot as plt
import argparse
import os
parser = argparse.ArgumentParser(description='Run environment with random selected actions.')
parser.add_argument('--rounds', '-r', metavar='rounds', type=int,
help='number of rounds to play (default: 1)', default=1)
parser.add_argument('--steps', '-s', metavar='steps', type=int,
help='maximum number of steps to be played each round (default: 300)', default=300)
parser.add_argument('--save', action='store_true',
help='Save images of single steps')
parser.add_argument('--gifs', action='store_true',
help='Generate Gif files from images')
args = parser.parse_args()
env_name = "boxplot"
n_rounds = args.rounds
n_steps = args.steps
save_images = args.save or args.gifs
generate_gifs = args.gifs
# Creating target directory if images are to be stored
if save_images and not os.path.exists('images'):
try:
os.makedirs('images')
except OSError:
print('Error: Creating images target directory. ')
ts = time.time()
# env = box_world_env.BoxWorld(12, 4, 2, 2)
env = box_world_env.BoxWorld(6, 2, 1, 1, collect_key=False)
ACTION_LOOKUP = env.unwrapped.get_action_lookup()
print("Created environment: {}".format(env_name))
def print_available_actions():
"""
Prints all available actions nicely formatted..
:return:
"""
available_actions_list = []
for i in range(len(ACTION_LOOKUP)):
available_actions_list.append(
'Key: {} - Action: {}'.format(i, ACTION_LOOKUP[i])
)
display_actions = '\n'.join(available_actions_list)
print()
print('Action out of Range!')
print('Available Actions:\n{}'.format(display_a1ctions))
print()
for i_episode in range(n_rounds):
print('Starting new game!')
observation = env.reset()
for t in range(n_steps):
env.render()
action = input('Select action: ')
try:
action = int(action)
if not action in range(len(ACTION_LOOKUP)):
raise ValueError
except ValueError:
print_available_actions()
continue
observation, reward, done, info = env.step(action)
print(ACTION_LOOKUP[action], reward, done, info)
print(len(observation), len(observation[0]), len(observation[0][0]))
if save_images:
# img = Image.fromarray(env.render(mode="return"), 'RGB')
# img.save(os.path.join('images', 'observation_{}_{}.png'.format(i_episode, t)))
img = env.render(mode="rgb_array")
fig = plt.imshow(img, vmin=0, vmax=255, interpolation='none')
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
plt.savefig(os.path.join('images', 'observation_{}_{}.png'.format(i_episode, t)))
if done:
print("Episode finished after {} timesteps".format(t+1))
env.render()
break
if generate_gifs:
print('')
import imageio
with imageio.get_writer(os.path.join('images', 'round_{}.gif'.format(i_episode)), mode='I', fps=2) as writer:
for t in range(n_steps):
try:
filename = os.path.join('images', 'observation_{}_{}.png'.format(i_episode, t))
image = imageio.imread(filename)
writer.append_data(image)
except:
pass
env.close()
time.sleep(10)
|
the-stack_106_21000
|
import sublime
import sublime_plugin
import subprocess
import re
class GitShowGitlabPipelines(sublime_plugin.WindowCommand):
def run(self):
cwd = self.window.folders()[0]
output = subprocess.check_output('git config --get remote.gitlab.url',
shell=True, cwd=cwd)
url_base = output.decode('utf8').strip()
url_base = re.sub('git@', '', url_base)
url_base = re.sub('ssh://', '', url_base)
url_base = re.sub('.git', '', url_base)
url_base = re.sub(':', '/', url_base)
url = 'https://' + url_base + '/-/pipelines/'
run_cmd_parts = ["i3_focus_or_run 'Google-chrome' 'google-chrome'",
'xdg-open ' + url]
run_cmd = '; '.join(run_cmd_parts)
subprocess.call(run_cmd, shell=True)
|
the-stack_106_21002
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class information(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface - based on the path /interface-vlan/vlan/ip/dhcp/relay/information. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__relay_option',)
_yang_name = 'information'
_rest_name = 'information'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__relay_option = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="relay-option", rest_name="option", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Option82', u'alt-name': u'option'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='empty', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'interface-vlan', u'vlan', u'ip', u'dhcp', u'relay', u'information']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'vlan', u'ip', u'dhcp', u'relay', u'information']
def _get_relay_option(self):
"""
Getter method for relay_option, mapped from YANG variable /interface_vlan/vlan/ip/dhcp/relay/information/relay_option (empty)
"""
return self.__relay_option
def _set_relay_option(self, v, load=False):
"""
Setter method for relay_option, mapped from YANG variable /interface_vlan/vlan/ip/dhcp/relay/information/relay_option (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_relay_option is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_relay_option() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="relay-option", rest_name="option", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Option82', u'alt-name': u'option'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """relay_option must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="relay-option", rest_name="option", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Option82', u'alt-name': u'option'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='empty', is_config=True)""",
})
self.__relay_option = t
if hasattr(self, '_set'):
self._set()
def _unset_relay_option(self):
self.__relay_option = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="relay-option", rest_name="option", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Option82', u'alt-name': u'option'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='empty', is_config=True)
relay_option = __builtin__.property(_get_relay_option, _set_relay_option)
_pyangbind_elements = {'relay_option': relay_option, }
|
the-stack_106_21005
|
#!/bin/python3
# encoding: utf-8
import tensorflow as tf
import numpy as np
tf.enable_eager_execution()
X_raw = np.array([2013, 2014, 2015, 2016, 2017], dtype=np.float32)
y_raw = np.array([12000, 14000, 15000, 16500, 17500], dtype=np.float32)
X = (X_raw - X_raw.max()) / (X_raw.max() - X_raw.min())
y = (y_raw - y_raw.max()) / (y_raw.max() - y_raw.min())
X = tf.constant(X)
y = tf.constant(y)
a = tf.get_variable('a', dtype=tf.float32, shape=[], initializer=tf.zeros_initializer)
b = tf.get_variable('b', dtype=tf.float32, shape=[], initializer=tf.zeros_initializer)
variables = [a, b]
num_epoch = 10000
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-3)
for _ in range(num_epoch):
with tf.GradientTape() as tape:
y_pred = a * X + b
loss = 0.5 * tf.reduce_sum(tf.square(y_pred - y))
grads = tape.gradient(loss, variables)
optimizer.apply_gradients(grads_and_vars=zip(grads, variables))
print(a.numpy(), b.numpy())
|
the-stack_106_21006
|
from random import *
def rockPaperScissors(user,npc):
if user == 1 and npc == 1:
print("Tie")
elif user == 1 and npc == 2:
print("NPC Wins")
elif user == 1 and npc == 3:
print("User Wins!")
elif user == 2 and npc == 1:
print("User Wins")
elif user == 2 and npc == 2:
print("Tie")
elif user == 2 and npc == 3:
print("NPC Wins")
elif user == 3 and npc == 1:
print("NPC Wins")
elif user == 3 and npc == 2:
print("User Wins")
else:
print("Tie")
userIn = ""
print("Welcome to RPS!")
userIn = input("Enter Rock, Paper, Scissors, or Quit: ")
if userIn.lower() == "rock":
rInt = randint(1,3)
print(rInt)
rockPaperScissors(1, rInt)
elif userIn.lower() == "paper":
rInt = randint(1,3)
# RPS(2,rInt)
elif userIn.lower() == "scissors":
rInt = randint(1,3)
# RPS(3,rInt)
else:
print("Invalid Entry!")
|
the-stack_106_21008
|
"""
Note: This is an extension of House Robber.
After robbing those houses on that street, the thief has found himself a new
place for his thievery so that he will not get too much attention. This time,
all houses at this place are arranged in a circle. That means the first house
is the neighbor of the last one. Meanwhile, the security system for these
houses remain the same as for those in the previous street.
Given a list of non-negative integers representing the amount of money of each
house, determine the maximum amount of money you can rob tonight without
alerting the police.
"""
class Solution(object):
def rob(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
n = len(nums)
if n == 0:
return 0
elif n == 1:
return nums[0]
return max(self.rob_aux(nums, 0), self.rob_aux(nums, 1))
def rob_aux(self, nums, left):
n = len(nums) - 1
t = [0 for i in range(n + 1)]
if n == 0:
return t[n]
t[1] = nums[left]
if n <= 1:
return t[n]
t[2] = max(nums[left: left + 2])
for i in range(3, n + 1):
t[i] = max(t[i - 2] + nums[left + i - 1], t[i - 1])
return t[n]
a1 = [1]
a2 = [4, 1, 6, 10, 5, 13, 2, 7]
s = Solution()
print(s.rob(a1))
print(s.rob(a2))
|
the-stack_106_21010
|
from logging import *
import logging.config as config
import logging.handlers as handlers
from click import style, echo
from logging import __all__
__all__.extend(["SUCCESS", "success", "ClickHandler", "SuppressExceptionFormatter"])
# adding a new level SUCCESS
SUCCESS = 25
addLevelName(SUCCESS, "SUCCESS")
def success(self, msg, *args, **kwargs):
"""
Log 'msg % args' with level 'SUCCESS'.
"""
if self.isEnabledFor(SUCCESS):
self._log(SUCCESS, msg, args, **kwargs)
Logger.success = success
class ClickHandler(StreamHandler):
'''This class colors the console output'''
def __init__(self, stream=None, levelcolor={}):
super().__init__(stream)
self.levelcolor = levelcolor
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to console using click.echo(). Foregorund
color is chosen according to level and set using click.style(msg, fg).
"""
try:
msg = self.format(record)
# fg = self._LEVELCOLOR[record.levelno]
fg = self.levelcolor.get(record.levelname, None)
echo(style(msg, fg=fg))
except Exception:
self.handleError(record)
class SuppressExceptionFormatter(Formatter):
'''Supresses the Traceback of error'''
def format(self, record):
"""
Format the specified record as text.
The record's attribute dictionary is used as the operand to a
string formatting operation which yields the returned string.
Before formatting the dictionary, a couple of preparatory steps
are carried out. The message attribute of the record is computed
using LogRecord.getMessage(). If the formatting string uses the
time (as determined by a call to usesTime(), formatTime() is
called to format the event time. If there is exception information,
it is discarded.
"""
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
s = self.formatMessage(record)
return s
_getLogger = getLogger
logging_default_cfg = {
"version": 1,
"disable_existing_loggers": True,
"formatters": {
"default": {
"format": "%(name)s %(levelname)s %(message)s"
}
},
"handlers": {
"console": {
"class": "siiptool.common.logging.StreamHandler",
"level": "DEBUG",
"formatter": "default"
},
},
"root": {"handlers": ["console"], "level": "DEBUG"}
}
def getLogger(name, *, logging_cfg=logging_default_cfg):
if logging_cfg:
config.dictConfig(logging_cfg)
logger = _getLogger(name)
return logger
|
the-stack_106_21012
|
import unittest
import numpy as np
import pytest
from scipy.stats import ortho_group
from sklearn.datasets import load_iris
from numpy.testing import assert_array_almost_equal, assert_allclose
from sklearn.utils.testing import ignore_warnings
from metric_learn import (
LMNN, NCA, LFDA, Covariance, MLKR,
LSML_Supervised, ITML_Supervised, SDML_Supervised, RCA_Supervised)
from metric_learn._util import components_from_metric
from metric_learn.exceptions import NonPSDError
class TestTransformerMetricConversion(unittest.TestCase):
@classmethod
def setUpClass(self):
# runs once per test class
iris_data = load_iris()
self.X = iris_data['data']
self.y = iris_data['target']
def test_cov(self):
cov = Covariance()
cov.fit(self.X)
L = cov.components_
assert_array_almost_equal(L.T.dot(L), cov.get_mahalanobis_matrix())
def test_lsml_supervised(self):
seed = np.random.RandomState(1234)
lsml = LSML_Supervised(num_constraints=200, random_state=seed)
lsml.fit(self.X, self.y)
L = lsml.components_
assert_array_almost_equal(L.T.dot(L), lsml.get_mahalanobis_matrix())
def test_itml_supervised(self):
seed = np.random.RandomState(1234)
itml = ITML_Supervised(num_constraints=200, random_state=seed)
itml.fit(self.X, self.y)
L = itml.components_
assert_array_almost_equal(L.T.dot(L), itml.get_mahalanobis_matrix())
def test_lmnn(self):
lmnn = LMNN(k=5, learn_rate=1e-6, verbose=False)
lmnn.fit(self.X, self.y)
L = lmnn.components_
assert_array_almost_equal(L.T.dot(L), lmnn.get_mahalanobis_matrix())
def test_sdml_supervised(self):
seed = np.random.RandomState(1234)
sdml = SDML_Supervised(num_constraints=1500, prior='identity',
balance_param=1e-5, random_state=seed)
sdml.fit(self.X, self.y)
L = sdml.components_
assert_array_almost_equal(L.T.dot(L), sdml.get_mahalanobis_matrix())
def test_nca(self):
n = self.X.shape[0]
nca = NCA(max_iter=(100000 // n))
nca.fit(self.X, self.y)
L = nca.components_
assert_array_almost_equal(L.T.dot(L), nca.get_mahalanobis_matrix())
def test_lfda(self):
lfda = LFDA(k=2, n_components=2)
lfda.fit(self.X, self.y)
L = lfda.components_
assert_array_almost_equal(L.T.dot(L), lfda.get_mahalanobis_matrix())
def test_rca_supervised(self):
rca = RCA_Supervised(n_components=2, num_chunks=30, chunk_size=2)
rca.fit(self.X, self.y)
L = rca.components_
assert_array_almost_equal(L.T.dot(L), rca.get_mahalanobis_matrix())
def test_mlkr(self):
mlkr = MLKR(n_components=2)
mlkr.fit(self.X, self.y)
L = mlkr.components_
assert_array_almost_equal(L.T.dot(L), mlkr.get_mahalanobis_matrix())
@ignore_warnings
def test_components_from_metric_edge_cases(self):
"""Test that components_from_metric returns the right result in various
edge cases"""
rng = np.random.RandomState(42)
# an orthonormal matrix useful for creating matrices with given
# eigenvalues:
P = ortho_group.rvs(7, random_state=rng)
# matrix with all its coefficients very low (to check that the algorithm
# does not consider it as a diagonal matrix)(non regression test for
# https://github.com/scikit-learn-contrib/metric-learn/issues/175)
M = np.diag([1e-15, 2e-16, 3e-15, 4e-16, 5e-15, 6e-16, 7e-15])
M = P.dot(M).dot(P.T)
L = components_from_metric(M)
assert_allclose(L.T.dot(L), M)
# diagonal matrix
M = np.diag(np.abs(rng.randn(5)))
L = components_from_metric(M)
assert_allclose(L.T.dot(L), M)
# low-rank matrix (with zeros)
M = np.zeros((7, 7))
small_random = rng.randn(3, 3)
M[:3, :3] = small_random.T.dot(small_random)
L = components_from_metric(M)
assert_allclose(L.T.dot(L), M)
# low-rank matrix (without necessarily zeros)
R = np.abs(rng.randn(7, 7))
M = R.dot(np.diag([1, 5, 3, 2, 0, 0, 0])).dot(R.T)
L = components_from_metric(M)
assert_allclose(L.T.dot(L), M)
# matrix with a determinant still high but which is
# undefinite w.r.t to numpy standards
M = np.diag([1e5, 1e5, 1e5, 1e5, 1e5, 1e5, 1e-20])
M = P.dot(M).dot(P.T)
assert np.abs(np.linalg.det(M)) > 10
assert np.linalg.slogdet(M)[1] > 1 # (just to show that the computed
# determinant is far from null)
assert np.linalg.matrix_rank(M) < M.shape[0]
# (just to show that this case is indeed considered by numpy as an
# indefinite case)
L = components_from_metric(M)
assert_allclose(L.T.dot(L), M)
# matrix with lots of small nonzeros that make a big zero when multiplied
M = np.diag([1e-3, 1e-3, 1e-3, 1e-3, 1e-3, 1e-3, 1e-3])
L = components_from_metric(M)
assert_allclose(L.T.dot(L), M)
# full rank matrix
M = rng.randn(10, 10)
M = M.T.dot(M)
assert np.linalg.matrix_rank(M) == 10
L = components_from_metric(M)
assert_allclose(L.T.dot(L), M)
def test_non_symmetric_matrix_raises(self):
"""Checks that if a non symmetric matrix is given to
components_from_metric, an error is thrown"""
rng = np.random.RandomState(42)
M = rng.randn(10, 10)
with pytest.raises(ValueError) as raised_error:
components_from_metric(M)
assert str(raised_error.value) == "The input metric should be symmetric."
def test_non_psd_raises(self):
"""Checks that a non PSD matrix (i.e. with negative eigenvalues) will
raise an error when passed to components_from_metric"""
rng = np.random.RandomState(42)
D = np.diag([1, 5, 3, 4.2, -4, -2, 1])
P = ortho_group.rvs(7, random_state=rng)
M = P.dot(D).dot(P.T)
msg = ("Matrix is not positive semidefinite (PSD).")
with pytest.raises(NonPSDError) as raised_error:
components_from_metric(M)
assert str(raised_error.value) == msg
with pytest.raises(NonPSDError) as raised_error:
components_from_metric(D)
assert str(raised_error.value) == msg
def test_almost_psd_dont_raise(self):
"""Checks that if the metric is almost PSD (i.e. it has some negative
eigenvalues very close to zero), then components_from_metric will still
work"""
rng = np.random.RandomState(42)
D = np.diag([1, 5, 3, 4.2, -1e-20, -2e-20, -1e-20])
P = ortho_group.rvs(7, random_state=rng)
M = P.dot(D).dot(P.T)
L = components_from_metric(M)
assert_allclose(L.T.dot(L), M)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_21016
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""BERT token classifier."""
# pylint: disable=g-classes-have-attributes
import tensorflow as tf
@tf.keras.utils.register_keras_serializable(package='Text')
class BertTokenClassifier(tf.keras.Model):
"""Token classifier model based on a BERT-style transformer-based encoder.
This is an implementation of the network structure surrounding a transformer
encoder as described in "BERT: Pre-training of Deep Bidirectional Transformers
for Language Understanding" (https://arxiv.org/abs/1810.04805).
The BertTokenClassifier allows a user to pass in a transformer stack, and
instantiates a token classification network based on the passed `num_classes`
argument.
*Note* that the model is constructed by
[Keras Functional API](https://keras.io/guides/functional_api/).
Arguments:
network: A transformer network. This network should output a sequence output
and a classification output. Furthermore, it should expose its embedding
table via a "get_embedding_table" method.
num_classes: Number of classes to predict from the classification network.
initializer: The initializer (if any) to use in the classification networks.
Defaults to a Glorot uniform initializer.
output: The output style for this network. Can be either 'logits' or
'predictions'.
"""
def __init__(self,
network,
num_classes,
initializer='glorot_uniform',
output='logits',
dropout_rate=0.1,
**kwargs):
self._self_setattr_tracking = False
self._network = network
self._config = {
'network': network,
'num_classes': num_classes,
'initializer': initializer,
'output': output,
}
# We want to use the inputs of the passed network as the inputs to this
# Model. To do this, we need to keep a handle to the network inputs for use
# when we construct the Model object at the end of init.
inputs = network.inputs
# Because we have a copy of inputs to create this Model object, we can
# invoke the Network object with its own input tensors to start the Model.
sequence_output, _ = network(inputs)
sequence_output = tf.keras.layers.Dropout(
rate=dropout_rate)(sequence_output)
self.classifier = tf.keras.layers.Dense(
num_classes,
activation=None,
kernel_initializer=initializer,
name='predictions/transform/logits')
self.logits = self.classifier(sequence_output)
if output == 'logits':
output_tensors = self.logits
elif output == 'predictions':
output_tensors = tf.keras.layers.Activation(tf.nn.log_softmax)(
self.logits)
else:
raise ValueError(
('Unknown `output` value "%s". `output` can be either "logits" or '
'"predictions"') % output)
super(BertTokenClassifier, self).__init__(
inputs=inputs, outputs=output_tensors, **kwargs)
@property
def checkpoint_items(self):
return dict(encoder=self._network)
def get_config(self):
return self._config
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
|
the-stack_106_21017
|
import collections
import datetime
import hashlib
import http.cookiejar
import logging
import re
import time
import urllib.parse
import xml.etree.ElementTree as ET
from enum import Enum
import requests
import requests.exceptions
import requests.packages.urllib3
import requests_toolbelt
from requests.auth import HTTPDigestAuth
from pytos.common.logging.definitions import REQUESTS_LOGGER_NAME
from pytos.common.exceptions import REST_HTTP_Exception, REST_Bad_Gateway, REST_Service_Unavailable_Error, \
REST_Unauthorized_Error
from pytos.common.functions.xml import get_xml_text_value
requests.packages.urllib3.disable_warnings()
try:
from xml.etree.ElementTree import ParseError
except ImportError:
from xml.parsers.expat import ExpatError as ParseError
# Uncomment the two lines below to get more debugging information from httplib
# import http.client
# http.client.HTTPConnection.debuglevel = 1
logger = logging.getLogger(REQUESTS_LOGGER_NAME)
class RESTAuthMethods(Enum):
Digest = "digest"
Basic = "basic"
class REST_Request(object):
"""
This class is the base class from which all other Request objects inherit.
:cvar TIMEOUT: The default timeout for requests.
:cvar MAX_RETRIES: The default amount of times to retry requests that result in connection errors.
:cvar RETRY_INTERVAL: The default interval between request retries, in seconds.
:cvar RETRY_BACKOFF: The default exponential backoff for retries.
"""
RETRY_BACKOFF = 2
TIMEOUT = 300
MAX_RETRIES = 5
RETRY_INTERVAL = 5
MAX_URI_LENGTH = 6500
def __init__(self, hostname, uri, protocol="https", **kwargs):
"""
Constructor for REST_Request
:param uri: The URI the request will access.
:type uri: str
:param protocol: The protocol the request will use.
:type protocol: str
:keyword timeout: (Optional) Set the timeout for the request (Default is 300 seconds).
:type timeout: float
:keyword login_data: The username and password that will be used for HTTP basic authentication for the request
({"username" : username,"password" : password})
:type login_data: dict
:keyword verify_ssl: If set to False, SSL verification for requests is disabled, otherwise it is enabled.
:type verify_ssl: bool
:keyword cookies: If set, the contents will be appended to the cookies sent with the request.
:type cookies: str/dict/cookielib.CookieJar
:keyword headers: Headers to be sent with the request.
:type headers: dict
:keyword max_retries: The amount of times to retry the request if a connection error occurs.
:type max_retries: int
:keyword retry_interval: The interval between retries in seconds.
:type retry_interval: int
:keyword retry_backoff: The exponential backoff for retries.
:type retry_backoff: int
:keyword expected_status_codes: A single integer or a list of integers representing HTTP status codes.
:type expected_status_codes: int or list of ints
:raise REST_HTTP_Exception If expected_status_codes is specified, if the response does not contain at least one
of the status codes, a REST_HTTP_Exception is raised.
:raise requests.exceptions.Timeout: If a timeout error occurs while trying to perform the request.
:raise requests.exceptions.ConnectionError: If an error occurs while trying to connect the specified host.
"""
self.response, self.request = None, None
self.expected_status_codes = None
self.body = None
self.auth_method = kwargs.get("auth_method", RESTAuthMethods.Basic)
if protocol not in ["http", "https"]:
raise ValueError("Protocol must be either http or https!")
else:
self.protocol = protocol
verify_ssl = kwargs.get("verify_ssl")
if verify_ssl is not None:
logger.debug("Setting verify_ssl to '%s'", verify_ssl)
self.verify_ssl = verify_ssl
else:
logger.debug("verify_ssl not set, setting to True by default.")
self.verify_ssl = True
session = kwargs.get("session")
if session is None:
self.session = requests.Session()
else:
self.session = session
proxies = kwargs.get("proxies")
if proxies is not None:
self.session.proxies = proxies
self.hostname = hostname
uri_length = len(uri)
if uri_length <= REST_Request.MAX_URI_LENGTH:
self.uri = uri
else:
raise ValueError("Maximum URI length ({}) exceeded , current URI length is {}, URI is '{}'".format(
REST_Request.MAX_URI_LENGTH, uri_length, uri))
login_data = kwargs.get("login_data")
if login_data is not None:
if all(login_data.values()):
if self.auth_method == RESTAuthMethods.Digest:
self.auth_tuple = HTTPDigestAuth(login_data["username"], login_data["password"])
else:
password_hash = hashlib.sha256()
password_hash.update(login_data["password"].encode("ascii"))
password_hash = password_hash.hexdigest()
logger.debug("Setting login_data to username '%s', SHA256 hashed password '%s'.",
login_data["username"], password_hash)
self.auth_tuple = (login_data["username"], login_data["password"])
else:
raise ValueError("Both username and password must be set.")
else:
self.auth_tuple = None
timeout = kwargs.get("timeout")
if timeout is not None:
logger.debug("Setting request timout to '%s'", timeout)
self.timeout = timeout
else:
self.timeout = REST_Request.TIMEOUT
max_retries = kwargs.get("max_retries")
if max_retries is not None:
logger.debug("Setting maximum retry count to '%s'", max_retries)
self.max_retries = max_retries
else:
self.max_retries = REST_Request.MAX_RETRIES
retry_backoff = kwargs.get("retry_backoff")
if retry_backoff is not None:
logger.debug("Setting retry backoff multiplier to '%s'", retry_backoff)
self.retry_backoff = retry_backoff
else:
self.retry_backoff = REST_Request.RETRY_BACKOFF
retry_interval = kwargs.get("retry_interval")
if retry_interval is not None:
logger.debug("Setting retry interval to '%s'", retry_interval)
self.retry_interval = retry_interval
else:
self.retry_interval = REST_Request.RETRY_INTERVAL
expected_status_codes = kwargs.get("expected_status_codes")
if expected_status_codes is not None:
logger.debug("Setting expected_status_codes to '%s'", expected_status_codes)
self.expected_status_codes = expected_status_codes
cookies = kwargs.get("cookies")
if cookies is not None:
logger.debug("Setting cookies to '%s'", cookies)
if isinstance(cookies, http.cookiejar.CookieJar):
self.cookie_jar = cookies
else:
logger.warning("Unknown cookie type '%s'", type(cookies))
self.cookie_jar = http.cookiejar.CookieJar
else:
self.cookie_jar = http.cookiejar.CookieJar
headers = kwargs.get("headers")
self.headers = {}
if headers is not None:
self.headers.update(headers)
logger.debug("Setting headers to '%s'", headers)
self.url = "{protocol}://{hostname}{uri}".format(protocol=self.protocol, hostname=self.hostname, uri=self.uri)
def get_created_item_id(self):
try:
item_id = self.response.headers["location"].split("/")[-1]
if "?" in item_id:
logger.debug("ID contains a reference to a parameter.")
item_id = re.sub(r"\?.*", "", item_id)
if "-" in item_id:
logger.debug("ID refers to a task.")
return item_id
elif item_id[0].isalpha():
logger.debug("ID refers to a name.")
return item_id
elif "," in item_id:
return [int(item) for item in item_id.split(",")]
return int(item_id)
except (AttributeError, KeyError):
return None
def _ensure_response_status(self):
"""Check if the self.response object contains at least one of HTTP status code in self.expected_status_codes.
:return: Returns True if the specified status code was found in the self.response member object.
:rtype: bool
@raise requests.HTTPError: If the specified status code was not found in the self.response member object.
"""
status_code_ok = True
if not self.expected_status_codes:
return True
try:
self.response.raise_for_status()
except requests.exceptions.HTTPError as local_request_exception:
request_exception = local_request_exception
logger.error("Got the following error while performing request: '%s'.", request_exception)
status_code_ok = False
if status_code_ok:
if isinstance(self.expected_status_codes, collections.Iterable):
if self.response.status_code not in self.expected_status_codes:
status_code_ok = False
elif isinstance(self.expected_status_codes, int):
if self.expected_status_codes != self.response.status_code:
status_code_ok = False
else:
raise ValueError("self.expected_status_codes must either be an int or list of ints.")
if not status_code_ok:
error_message = ""
try:
error_response_xml = ET.fromstring(self.response.content)
api_error_message = get_xml_text_value(error_response_xml, "message")
api_error_code = error_response_xml.find("code").text
if api_error_message is not None:
error_message = "Message from API is '{}'.\n".format(api_error_message)
logger.error(error_message)
error_message += "Error from API is '{}'.".format(api_error_code)
logger.error(error_message)
except (ParseError, AttributeError):
error_message = "Could not parse response from API."
logger.error(error_message)
logger.error("Status code for request is '%s'.", self.response.status_code)
http_exception = REST_HTTP_Exception.get_exception(self.response, self.expected_status_codes)
raise http_exception
else:
logger.info("Status code for request is '%s'.", self.response.status_code)
return True
def _perform_request(self):
start_time = datetime.datetime.now()
exception_copy = None
unauthorized_error = False
for retry_count in range(self.max_retries + 1):
try:
self.response = self.session.send(self.request, verify=self.verify_ssl, timeout=self.timeout)
except requests.exceptions.SSLError as request_exception:
exception_copy = request_exception
logger.error("Connection to '%s://%s%s' failed ('%s').", self.protocol, self.hostname, self.uri,
request_exception.args[0])
except requests.exceptions.ConnectionError as request_exception:
exception_copy = request_exception
message = "Connection to {}://{}{} failed."
try:
message = message.format(self.protocol, self.hostname, self.uri, request_exception.args[0].reason)
except AttributeError:
message = message.format(self.protocol, self.hostname, self.uri, request_exception.args[0])
logger.error(message)
except requests.exceptions.Timeout as request_exception:
exception_copy = request_exception
logger.error("Connection to '%s://%s%s' timed out ('%s' seconds).", self.protocol, self.hostname,
self.uri,
self.timeout)
else:
logger.debug("Sent headers: '%s.", self.headers)
if self.body is not None:
logger.debug("Sent body: '%s'.", self.body)
try:
self._ensure_response_status()
request_duration = datetime.datetime.now() - start_time
logger.debug("Request took '%s' seconds.", request_duration)
logger.info("Received status: '%s'.", self.response.status_code)
logger.debug("Received headers: '%s'.", self.response.headers)
if self.response.content:
logger.debug("Received response body: '%s'", self.response.content)
break
except (REST_Bad_Gateway, REST_Service_Unavailable_Error) as request_exception:
exception_copy = request_exception
self.log_error_details(request_exception)
except REST_Unauthorized_Error as request_exception:
if unauthorized_error:
exception_copy = request_exception
self.log_error_details(request_exception)
break
else:
unauthorized_error = True
REST_Request.cookies = None
except REST_HTTP_Exception as request_exception:
exception_copy = request_exception
self.log_error_details(request_exception)
break
logger.debug("Sleeping for '%s' seconds between retries.", self.retry_interval)
time.sleep(self.retry_interval)
logger.info("Retrying request to '%s', Retry '%s' out of '%s'.", self.url, retry_count + 1,
self.max_retries)
if self.retry_backoff != 1:
self.retry_interval *= self.retry_backoff
logger.debug("Multiplied retry interval with backoff ('%s'), retry_interval is now '%s'.",
self.retry_backoff, self.retry_interval)
if exception_copy is not None:
raise exception_copy
def log_error_details(self, request_exception):
logger.error("Request to '%s://%s%s' resulted in an error from the server: '%s'.",
self.protocol,
self.hostname,
self.uri, request_exception)
logger.error("Sent headers: '%s.", self.headers)
if self.body is not None:
logger.error("Sent body: '%s'.", self.body)
logger.error("Received headers: '%s'.", self.response.headers)
if self.response.content:
logger.error("Received response body: '%s'", self.response.content)
def _encode_body_params(self, params):
logger.debug("Params: '%s'.", params)
for index, key in enumerate(params.keys()):
if index == 0:
self.body = "{}={}".format(key, urllib.parse.quote_plus(str(params[key])))
else:
self.body += "&{}={}".format(key, urllib.parse.quote_plus(str(params[key])))
class GET_Request(REST_Request):
"""
This class wraps a requests GET request.
"""
def __init__(self, hostname, uri, protocol="https", **kwargs):
"""
Constructor
"""
super().__init__(hostname, uri, protocol, **kwargs)
logger.info("Sending GET request to '%s'", self.url)
request_obj = requests.Request("GET", self.url, auth=self.auth_tuple,
params=kwargs.get("params"), headers=self.headers,
cookies=kwargs.get('cookies'))
if self.session:
self.request = self.session.prepare_request(request_obj)
else:
self.request = request_obj.prepare()
self._perform_request()
class POST_Request(REST_Request):
"""
This class wraps a requests POST request.
"""
def __init__(self, hostname, uri, body=None, protocol="https", cgi=False, **kwargs):
"""
Constructor
:param body: Body contents to be sent with the request
:type body: str|dict
:param cgi: If set to True, the content type header for the request will be set to
"application/x-www-form-urlencoded", otherwise it will be set to "application/xml"
:type cgi: bool
:keyword params: If set, these parameters that will be URL encoded and included in the request body.
:type params: dict
:keyword multi_part_form_params: A tuple of parameters that will be encoded in multipart/form encoding.
If the tuple contains 2 items, the first one will be used as the parameter name, the second
will be the parameter value.
If the tuple contains 3 items, the first will be used as the parameter name, the second will
be a open file handle, the third will be the name for the file to be sent.
:type multi_part_form_params: tuple
"""
super().__init__(hostname, uri, protocol, **kwargs)
# Handle parameters in dict form
params = kwargs.get("params")
# Handle files
files = kwargs.get("files")
# Handle multi part params
multi_part_form_params = kwargs.get("multi_part_form_params")
if multi_part_form_params is not None:
logger.debug("Got the following multi-part form params '%s'", multi_part_form_params)
data_types = (params, multi_part_form_params, body)
true_count = sum([1 for data_type in data_types if data_type])
if true_count > 1:
raise ValueError("Only one data type to be sent can be used: body, params or multi_part_form_params.")
if multi_part_form_params is not None:
multi_part_form = requests_toolbelt.MultipartEncoder(fields=multi_part_form_params)
self.headers["Content-Type"] = multi_part_form.content_type
self.body = multi_part_form.to_string()
multi_part_form_length = str(multi_part_form.len) if hasattr(multi_part_form, 'len') else len(multi_part_form)
self.headers["Content-Size"] = multi_part_form_length
self.headers["Accept"] = "*/*"
else:
if params is not None:
self._encode_body_params(params)
else:
self.body = body
if "Content-Type" not in self.headers:
if cgi:
self.headers["Content-Type"] = "application/x-www-form-urlencoded"
else:
self.headers["Content-Type"] = "application/xml"
logger.info("Sending POST request to '%s'", self.url)
request_obj = requests.Request("POST", self.url, data=self.body, auth=self.auth_tuple, headers=self.headers,
files=files, cookies=kwargs.get('cookies'))
if self.session:
self.request = self.session.prepare_request(request_obj)
else:
self.request = request_obj.prepare()
self._perform_request()
class PUT_Request(REST_Request):
"""
This class wraps a requests PUT request.
"""
def __init__(self, hostname, uri, body=None, protocol="https", cgi=False, **kwargs):
"""
Constructor
:param body: Body contents to be sent with the request
:type body: str|dict
:param cgi: If set to True, the content type header for the request will be set to
"application/x-www-form-urlencoded", otherwise it will be set to "application/xml"
:type cgi: bool
:keyword params: If set, these parameters that will be URL encoded and included in the request body.
:type params: dict
"""
super().__init__(hostname, uri, protocol, **kwargs)
# Handle parameters in dict form
params = kwargs.get("params")
data_types = (params, body)
true_count = sum([1 for data_type in data_types if data_type])
if true_count > 1:
raise ValueError("Only one data type to be POSTed can be used: body or params.")
if params is not None:
self._encode_body_params(params)
else:
self.body = body
if self.body is not None:
if "Content-Type" not in self.headers:
if cgi:
self.headers["Content-Type"] = "application/x-www-form-urlencoded"
else:
self.headers["Content-Type"] = "application/xml"
logger.info("Sending PUT request to '%s'", self.url)
request_obj = requests.Request("PUT", self.url, data=self.body, auth=self.auth_tuple, headers=self.headers,
cookies=kwargs.get('cookies'))
if self.session:
self.request = self.session.prepare_request(request_obj)
else:
self.request = request_obj.prepare()
self._perform_request()
class DELETE_Request(REST_Request):
"""
This class wraps a requests DELETE request.
"""
def __init__(self, hostname, uri, protocol="https", cgi=False, **kwargs):
"""
Constructor
:param cgi: If set to True, the content type header for the request will be set to
"application/x-www-form-urlencoded", otherwise it will be set to "application/xml"
:type cgi: bool
"""
super().__init__(hostname, uri, protocol, **kwargs)
if "Content-Type" not in self.headers:
if cgi:
self.headers["Content-Type"] = "application/x-www-form-urlencoded"
else:
self.headers["Content-Type"] = "application/xml"
logger.info("Sending DELETE request to '%s'", self.url)
request_obj = requests.Request("DELETE", self.url, auth=self.auth_tuple, headers=self.headers,
cookies=kwargs.get('cookies'))
if self.session:
self.request = self.session.prepare_request(request_obj)
else:
self.request = request_obj.prepare()
self._perform_request()
|
the-stack_106_21018
|
#!/usr/bin/env python
from weboob.core import Weboob
from weboob.exceptions import ModuleLoadError, ModuleInstallError
from weboob.core.backendscfg import BackendAlreadyExists
from weboob.capabilities.bank import Account
import logging
logger = logging.getLogger(__name__)
class Boobmanage:
""" Class that performs management of needed backend """
def __init__(self):
self.weboob = Weboob()
def has_backend(self, backend):
logger.debug("has_backend(): backend: %s" % backend)
for backend_name, module_name, params in sorted(
self.weboob.backends_config.iter_backends()):
logger.debug('has_backend(): checking module "%r"' % module_name)
try:
self.weboob.modules_loader.get_or_load_module(
module_name)
except ModuleLoadError as e:
self.logger.warning('has_backend(): unable to load module '
'"%r": %s' % (module_name, e))
continue
if backend == backend_name:
logger.debug('has_backend(): backend "%s" found, returning'
' True' % backend)
return True
logger.debug("has_backend(): looping to the next module")
logger.debug('has_backend(): backend "%s" not found, returning'
' False' % backend)
return False
def install_module(self, name):
logger.debug("install_module: name: %s" % name)
try:
self.weboob.repositories.install(name)
except ModuleInstallError as e:
logger.error('install_module(): Unable to install module '
'"%s": %s' % (name, e))
return False
logger.debug("install_module: OK returning True")
return True
def add_backend(self, backend, params):
logger.debug("add_backend(): backend: %s | params: %s" %
(backend, params))
minfo = self.weboob.repositories.get_module_info(backend)
config = None
module = None
instance = None
try:
if minfo is None:
raise ModuleLoadError(backend, "Module for backend "
"%s does not exist" % backend)
logger.debug('add_backend(): module for backend "%s" loaded'
% (backend))
if not minfo.is_installed():
logger.warning('add_backend(): module "%s" is available but '
'not installed' % minfo.name)
self.install_module(minfo)
else:
logger.debug('add_backend(): module "%s" is installed'
% minfo.name)
module = self.weboob.modules_loader.get_or_load_module(backend)
config = module.config
except ModuleLoadError as e:
logger.error('add_backend(): unable to load module "%s": %s'
% (backend, e))
return False
try:
config = config.load(self.weboob, module.name, backend, params,
nofail=True)
for key, value in params.iteritems():
if key not in config:
logger.debug('add_backend(): config named "%s" is not '
'known for the module "%s", ignoring'
% (key, backend))
continue
logger.debug('add_backend(): config named "%s" loaded in '
'module "%s"' % (key, backend))
config[key].set(value)
config.save(edit=False)
logger.debug('add_backend(): backend "%s" loaded and configured, '
'returning True' % backend)
self.weboob.iter_resources(obj=Account)
return True
except BackendAlreadyExists:
logger.warning('add_backend(): backend "%s" already exists, doing'
' nothing' % backend)
return True
|
the-stack_106_21020
|
import machine
import ssd1306
import time
#define the input from light sensor
adc = machine.ADC(0)
pinvib = machine.Pin(12)
pwmvib = machine.PWM(pinvib)
pwmvib.freq(900)
pwmvib.duty(0)
switchA = machine.Pin(0, machine.Pin.IN, machine.Pin.PULL_UP)
switchB = machine.Pin(13, machine.Pin.IN, value = 0)
switchC = machine.Pin(2, machine.Pin.IN, machine.Pin.PULL_UP)
point = 0
settime = (2018, 9, 26, 1, 1, 1, 1, 1)
def switchAcallback(p):
global point
time.sleep(0.1)
if p.value() == 1:
point = point + 1
if(point > 7):
point = 0
def switchBcallback(p):
time.sleep(0.1)
global displaytime
global point
print('in')
temp = list(displaytime)
temp[point] += 1
rtc.datetime(temp)
print('set')
def switchCcallback(p):
time.sleep(0.1)
global displaytime
global point
temp = list(displaytime)
temp[point] -= 1
rtc.datetime(temp)
switchA.irq(trigger=machine.Pin.IRQ_RISING, handler=switchAcallback)
switchB.irq(trigger=machine.Pin.IRQ_RISING, handler=switchBcallback)
switchC.irq(trigger=machine.Pin.IRQ_RISING, handler=switchCcallback)
i2c = machine.I2C(-1, machine.Pin(5), machine.Pin(4))
oled = ssd1306.SSD1306_I2C(128, 32, i2c)
rtc = machine.RTC()
rtc.datetime(settime)
while 1:
oled.fill(0)
displaytime = rtc.datetime()
oled.text(str(displaytime[0]) + '/' + str(displaytime[1]) + '/' + str(displaytime[2]) + 'Week:' + str(displaytime[3]), 0, 0)
oled.text(str(displaytime[4]) + ':' + str(displaytime[5]) + ':' + str(displaytime[6]), 0, 10)
oled.show()
i = adc.read()
oled.contrast(int(i/4))
|
the-stack_106_21021
|
#!/usr/bin/env python
# Copyright (c) 2013 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Zoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Zoin")
return os.path.expanduser("~/.zoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 18332 if testnet else 8332
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
the-stack_106_21023
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
############################
# File Name: thread_4.py
# Author: One Zero
# Mail: [email protected]
# Created Time: 2015-12-29 19:16:22
############################
import threading
import time
class myThread(threading.Thread):
def __init__(self, threadID, name, counter):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.counter = counter
def run(self):
print("Staring " + self.name)
# 获得锁,成功获得锁定后返回True
# 可选的timeout参数不填时一直阻塞直到获得锁定
# 否则超时后将返回False
threadLock.acquire()
print_time(self.name, self.counter, 3)
# 释放锁
threadLock.release()
def print_time(threadName, delay, counter):
while counter:
time.sleep(delay)
print("%s: %s" % (threadName, time.ctime(time.time())))
counter -= 1
threadLock = threading.Lock()
threads = []
# 创建新线程
thread1 = myThread(1, "Thread-1", 1)
thread2 = myThread(2, "Thread-2", 2)
# 开启新线程
thread1.start()
thread2.start()
# 添加线程到线程列表
threads.append(thread1)
threads.append(thread2)
# 等待所有线程完全
for t in threads:
t.join()
print("Exiting Main Thread")
|
the-stack_106_21024
|
import json
import pickle
import argparse
import os
def get_args():
parser = argparse.ArgumentParser("Parsing MS COCO dataset")
parser.add_argument("--input", type=str, default="data/COCO")
parser.add_argument("--type", type=str, default="val2014")
parser.add_argument("--output", type=str, default="data/COCO/anno_pickle")
args = parser.parse_args()
return args
def main(opt):
ann_file = '{}/annotations/instances_{}.json'.format(opt.input, opt.type)
dataset = json.load(open(ann_file, 'r'))
image_dict = {}
invalid_anno = 0
for image in dataset["images"]:
if image["id"] not in image_dict.keys():
image_dict[image["id"]] = {"file_name": image["file_name"], "objects": []}
for ann in dataset["annotations"]:
if ann["image_id"] not in image_dict.keys():
invalid_anno += 1
continue
image_dict[ann["image_id"]]["objects"].append(
[int(ann["bbox"][0]), int(ann["bbox"][1]), int(ann["bbox"][0] + ann["bbox"][2]),
int(ann["bbox"][1] + ann["bbox"][3]), ann["category_id"]])
pickle.dump(image_dict, open(opt.output + os.sep + 'COCO_{}.pkl'.format(opt.type), 'wb'))
print ("There are {} invalid annotation(s)".format(invalid_anno))
if __name__ == "__main__":
opt = get_args()
main(opt)
|
the-stack_106_21025
|
"""
@date: 2021/7/19
@description:
"""
import torch
import loss
from utils.misc import tensor2np
def build_criterion(config, logger):
criterion = {}
device = config.TRAIN.DEVICE
for k in config.TRAIN.CRITERION.keys():
sc = config.TRAIN.CRITERION[k]
if sc.WEIGHT is None or float(sc.WEIGHT) == 0:
continue
criterion[sc.NAME] = {
'loss': getattr(loss, sc.LOSS)(),
'weight': float(sc.WEIGHT),
'sub_weights': sc.WEIGHTS,
'need_all': sc.NEED_ALL
}
criterion[sc.NAME]['loss'] = criterion[sc.NAME]['loss'].to(device)
if config.AMP_OPT_LEVEL != "O0" and 'cuda' in device:
criterion[sc.NAME]['loss'] = criterion[sc.NAME]['loss'].type(torch.float16)
# logger.info(f"Build criterion:{sc.WEIGHT}_{sc.NAME}_{sc.LOSS}_{sc.WEIGHTS}")
return criterion
def calc_criterion(criterion, gt, dt, epoch_loss_d):
loss = None
postfix_d = {}
for k in criterion.keys():
if criterion[k]['need_all']:
single_loss = criterion[k]['loss'](gt, dt)
ws_loss = None
for i, sub_weight in enumerate(criterion[k]['sub_weights']):
if sub_weight == 0:
continue
if ws_loss is None:
ws_loss = single_loss[i] * sub_weight
else:
ws_loss = ws_loss + single_loss[i] * sub_weight
single_loss = ws_loss if ws_loss is not None else single_loss
else:
assert k in gt.keys(), "ground label is None:" + k
assert k in dt.keys(), "detection key is None:" + k
if k == 'ratio' and gt[k].shape[-1] != dt[k].shape[-1]:
gt[k] = gt[k].repeat(1, dt[k].shape[-1])
single_loss = criterion[k]['loss'](gt[k], dt[k])
postfix_d[k] = tensor2np(single_loss)
if k not in epoch_loss_d.keys():
epoch_loss_d[k] = []
epoch_loss_d[k].append(postfix_d[k])
single_loss = single_loss * criterion[k]['weight']
if loss is None:
loss = single_loss
else:
loss = loss + single_loss
k = 'loss'
postfix_d[k] = tensor2np(loss)
if k not in epoch_loss_d.keys():
epoch_loss_d[k] = []
epoch_loss_d[k].append(postfix_d[k])
return loss, postfix_d, epoch_loss_d
|
the-stack_106_21027
|
# Copyright 2019-2020 ETH Zurich and the DaCe authors. All rights reserved.
import dace
from dace.transformation.interstate import StateAssignElimination, StateFusion
def test_eliminate_end_state():
sdfg = dace.SDFG('state_elimination_test')
state1 = sdfg.add_state()
state2 = sdfg.add_state()
state3 = sdfg.add_state()
sdfg.add_edge(state1, state2, dace.InterstateEdge(assignments=dict(k=1)))
sdfg.add_edge(state2, state3,
dace.InterstateEdge(assignments=dict(k='k + 1')))
sdfg.apply_strict_transformations()
assert sdfg.number_of_nodes() == 1
def test_state_assign_elimination():
sdfg = dace.SDFG('state_assign_elimination_test')
sdfg.add_array('A', [10], dace.float32)
sdfg.add_array('B', [10], dace.float32)
state1 = sdfg.add_state()
state2 = sdfg.add_state()
state3 = sdfg.add_state()
state3.add_nedge(state3.add_read('A'), state3.add_write('B'),
dace.Memlet.simple('A', 'k'))
sdfg.add_edge(state1, state2, dace.InterstateEdge(assignments=dict(k=1)))
sdfg.add_edge(state2, state3,
dace.InterstateEdge(assignments=dict(k='k + 1')))
# Assertions before/after transformations
sdfg.apply_transformations_repeated(StateFusion, strict=True)
assert sdfg.number_of_nodes() == 3
assert sdfg.apply_transformations_repeated(StateAssignElimination) == 1
assert str(sdfg.nodes()[-1].edges()[0].data.subset) == 'k + 1'
sdfg.apply_transformations_repeated(StateFusion, strict=True)
assert sdfg.number_of_nodes() == 2
# Applying transformations again should yield one state
assert sdfg.apply_transformations_repeated(StateAssignElimination) == 1
sdfg.apply_strict_transformations()
assert sdfg.number_of_nodes() == 1
assert str(sdfg.nodes()[-1].edges()[0].data.subset) == '2'
def test_sae_scalar():
# Construct SDFG
sdfg = dace.SDFG('state_assign_elimination_test')
sdfg.add_array('A', [20, 20], dace.float64)
sdfg.add_array('B', [1], dace.float64)
sdfg.add_scalar('scal', dace.int32, transient=True)
initstate = sdfg.add_state()
initstate.add_edge(initstate.add_tasklet('do', {}, {'out'}, 'out = 5'),
'out', initstate.add_write('scal'), None,
dace.Memlet('scal'))
state = sdfg.add_state()
sdfg.add_edge(initstate, state,
dace.InterstateEdge(assignments=dict(s2='scal')))
a = state.add_read('A')
t = state.add_tasklet('do', {'inp'}, {'out'}, 'out = inp')
b = state.add_write('B')
state.add_edge(a, None, t, 'inp', dace.Memlet('A[s2, s2 + 1]'))
state.add_edge(t, 'out', b, None, dace.Memlet('B[0]'))
#######################################################
assert sdfg.apply_transformations(StateAssignElimination) == 0
if __name__ == '__main__':
test_eliminate_end_state()
test_state_assign_elimination()
test_sae_scalar()
|
the-stack_106_21028
|
from datetime import datetime
from helper.embed_props import *
import discord
header_text = "Message for the Koders"
def morning_embed(quote, author):
author = "<br />" + "<i>- " + author + "</i>" # Adding proper spacing for author via HTML
message = quote + author
embed = discord.Embed(
description="Good morning, folks!🔅 Let us get started to grind and shine!✨✨✨\n"
"*P.S. drink water and wear your socks.*",
timestamp=timestamp,
color=color
)
embed.set_author(name=header_text)
embed.add_field(name="Before we kick off, let's have a thought", value=message, inline=False)
embed.set_footer(text=footer_text,
icon_url=icon_url)
return embed
def evening_embed():
embed = discord.Embed(
description="Work hard in silence and at your home being socially distant! **But do not forget to log your "
"work** that can make the noise of your work. So, Log your work, okay?",
timestamp=timestamp,
color=color
)
embed.set_author(name=header_text)
embed.set_footer(text=footer_text,
icon_url=icon_url)
return embed
def friday_embed():
embed = discord.Embed(
description="Aye hey, it is your time to take a break 😄. Communication is the key, Remember?\n"
"Do not forget to attend the meeting tonight. Let’s have some fun. You did great this week. "
"We are proud of you.\n"
"**#ItIsFriyay** 🥳🥳🥳.",
timestamp=timestamp,
color=color
)
embed.set_author(name=header_text)
embed.set_footer(text=footer_text,
icon_url=icon_url)
return embed
|
the-stack_106_21030
|
import os
import json
import pathlib
import tempfile
import contextlib
from http import HTTPStatus
from unittest.mock import patch
import aiohttp
from dffml.model.slr import SLRModel
from dffml.source.json import JSONSource
from dffml import Record, Features, DefFeature, save, train, accuracy
from dffml.util.asynctestcase import AsyncTestCase
from dffml_service_http.cli import HTTPService
from dffml_service_http.util.testing import ServerRunner, ServerException
from .test_routes import TestRoutesMultiComm
from .dataflow import formatter, HELLO_BLANK_DATAFLOW, HELLO_WORLD_DATAFLOW
class TestCreateTLS(AsyncTestCase):
async def test_create(self):
with tempfile.TemporaryDirectory() as tempdir:
with self.subTest(certs="server"):
await HTTPService.createtls.server.cli(
"-bits",
"1024",
"-key",
os.path.join(tempdir, "server.key"),
"-cert",
os.path.join(tempdir, "server.pem"),
)
self.assertTrue(
os.path.isfile(os.path.join(tempdir, "server.key"))
)
self.assertTrue(
os.path.isfile(os.path.join(tempdir, "server.pem"))
)
with self.subTest(certs="client"):
await HTTPService.createtls.client.cli(
"-bits",
"1024",
"-key",
os.path.join(tempdir, "client.key"),
"-cert",
os.path.join(tempdir, "client.pem"),
"-csr",
os.path.join(tempdir, "client.csr"),
"-server-key",
os.path.join(tempdir, "server.key"),
"-server-cert",
os.path.join(tempdir, "server.pem"),
)
self.assertTrue(
os.path.isfile(os.path.join(tempdir, "client.key"))
)
self.assertTrue(
os.path.isfile(os.path.join(tempdir, "client.pem"))
)
self.assertTrue(
os.path.isfile(os.path.join(tempdir, "client.csr"))
)
class TestServer(AsyncTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._exit_stack = contextlib.ExitStack()
cls.exit_stack = cls._exit_stack.__enter__()
cls.exit_stack.enter_context(
patch(
"dffml.df.base.OperationImplementation.load",
new=TestRoutesMultiComm.patch_operation_implementation_load,
)
)
@classmethod
def tearDownClass(cls):
super().tearDownClass()
cls._exit_stack.__exit__(None, None, None)
def url(self, cli):
return f"http://{cli.addr}:{cli.port}"
@contextlib.asynccontextmanager
async def get(self, cli, path):
async with aiohttp.ClientSession() as session:
async with session.get(self.url(cli) + path) as r:
if r.status != HTTPStatus.OK:
raise ServerException((await r.json())["error"])
yield r
@contextlib.asynccontextmanager
async def post(self, cli, path, *args, **kwargs):
async with aiohttp.ClientSession() as session:
async with session.post(
self.url(cli) + path, *args, **kwargs
) as r:
if r.status != HTTPStatus.OK:
raise ServerException((await r.json())["error"])
yield r
async def test_insecure_off_by_default(self):
self.assertFalse(HTTPService.server().insecure)
async def test_start_insecure(self):
async with ServerRunner.patch(HTTPService.server) as tserver:
await tserver.start(
HTTPService.server.cli("-port", "0", "-insecure")
)
async def test_start(self):
with tempfile.TemporaryDirectory() as tempdir:
await HTTPService.createtls.server.cli(
"-bits",
"2048",
"-key",
os.path.join(tempdir, "server.key"),
"-cert",
os.path.join(tempdir, "server.pem"),
)
async with ServerRunner.patch(HTTPService.server) as tserver:
await tserver.start(
HTTPService.server.cli(
"-port",
"0",
"-key",
os.path.join(tempdir, "server.key"),
"-cert",
os.path.join(tempdir, "server.pem"),
)
)
async def test_mc_config(self):
with tempfile.TemporaryDirectory() as tempdir:
# URLs for endpoints
hello_world_url: str = "/hello/world"
hello_blank_url: str = "/hello/blank"
# Create the required directory structure
# Create directories for multicomm, dataflow, and dataflow overrides
pathlib.Path(tempdir, "mc").mkdir()
pathlib.Path(tempdir, "mc", "http").mkdir()
pathlib.Path(tempdir, "df").mkdir()
# TODO split config part of dataflow into seperate directory
pathlib.Path(tempdir, "config").mkdir()
# Write out multicomm configs
pathlib.Path(tempdir, "mc", "http", "hello_world.json").write_text(
json.dumps(
{
"path": hello_world_url,
"presentation": "json",
"asynchronous": False,
},
sort_keys=True,
indent=4,
)
)
pathlib.Path(tempdir, "mc", "http", "hello_blank.json").write_text(
json.dumps(
{
"path": hello_blank_url,
"presentation": "json",
"asynchronous": False,
},
sort_keys=True,
indent=4,
)
)
# Write out dataflow configs
pathlib.Path(tempdir, "df", "hello_world.json").write_text(
json.dumps(
HELLO_WORLD_DATAFLOW.export(), sort_keys=True, indent=4
)
)
pathlib.Path(tempdir, "df", "hello_blank.json").write_text(
json.dumps(
HELLO_BLANK_DATAFLOW.export(), sort_keys=True, indent=4
)
)
# Start the server
async with ServerRunner.patch(HTTPService.server) as tserver:
cli = await tserver.start(
HTTPService.server.cli(
"-port",
"0",
"-insecure",
"-mc-config",
tempdir,
"-mc-atomic",
)
)
self.assertEqual(cli.mc_config, tempdir)
# Verify routes were registered and preform as expected
message: str = "Hello World"
with self.subTest(test=message):
# Check that hello world works
async with self.get(cli, hello_world_url) as response:
self.assertEqual(
{"response": message},
list((await response.json()).values())[0],
)
# Check that hello blank works
message: str = "Hello Feedface"
with self.subTest(test=message):
async with self.post(
cli,
hello_blank_url,
json={
"Feedface": [
{
"value": "Feedface",
"definition": formatter.op.inputs[
"data"
].name,
}
]
},
) as response:
self.assertEqual(
{"Feedface": {"response": message}},
await response.json(),
)
async def test_models(self):
with tempfile.TemporaryDirectory() as tempdir:
# Model the HTTP API will pre-load
model = SLRModel(
features=Features(DefFeature("f1", float, 1)),
predict=DefFeature("ans", int, 1),
directory=tempdir,
)
# y = m * x + b for equation SLR is solving for
m = 5
b = 3
# Train the model
await train(
model, *[{"f1": x, "ans": m * x + b} for x in range(0, 10)]
)
await accuracy(
model, *[{"f1": x, "ans": m * x + b} for x in range(10, 20)]
)
async with ServerRunner.patch(HTTPService.server) as tserver:
cli = await tserver.start(
HTTPService.server.cli(
"-insecure",
"-port",
"0",
"-models",
"mymodel=slr",
"-model-mymodel-directory",
tempdir,
"-model-mymodel-features",
"f1:float:1",
"-model-mymodel-predict",
"ans:int:1",
)
)
async with self.post(
cli,
f"/model/mymodel/predict/0",
json={
f"record_{x}": {"features": {"f1": x}}
for x in range(20, 30)
},
) as response:
response = await response.json()
records = response["records"]
self.assertEqual(len(records), 10)
for record in records.values():
should_be = m * record["features"]["f1"] + b
prediction = record["prediction"]["ans"]["value"]
percent_error = abs(should_be - prediction) / should_be
self.assertLess(percent_error, 0.2)
async def test_sources(self):
with tempfile.TemporaryDirectory() as tempdir:
# Source the HTTP API will pre-load
source = JSONSource(
filename=str(pathlib.Path(tempdir, "source.json")),
allowempty=True,
readwrite=True,
)
# Record the source will have in it
myrecord = Record("myrecord", data={"features": {"f1": 0}})
await save(source, myrecord)
async with ServerRunner.patch(HTTPService.server) as tserver:
cli = await tserver.start(
HTTPService.server.cli(
"-insecure",
"-port",
"0",
"-sources",
"mysource=json",
"-source-mysource-filename",
source.config.filename,
)
)
async with self.get(
cli, "/source/mysource/record/myrecord"
) as r:
self.assertEqual(await r.json(), myrecord.export())
|
the-stack_106_21031
|
""" test indexing with ix """
import pytest
from warnings import catch_warnings
import numpy as np
import pandas as pd
from pandas.core.dtypes.common import is_scalar
from pandas.compat import lrange
from pandas import Series, DataFrame, option_context, MultiIndex
from pandas.util import testing as tm
from pandas.errors import PerformanceWarning
def test_ix_deprecation():
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
@pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning")
class TestIX(object):
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470],
'timestamp': [1413840976, 1413842580, 1413760580]})
expected = DataFrame({'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470],
'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580],
unit='s')
})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
assert result == expected
else:
assert expected.equals(result)
# failure cases for .loc, but these work for .ix
df = DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]])]:
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex,
tm.makeTimedeltaIndex]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
pytest.raises(TypeError, lambda: df.loc[key])
df = DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
index=pd.date_range('2012-01-01', periods=5))
for key in ['2012-01-03',
'2012-01-31',
slice('2012-01-03', '2012-01-03'),
slice('2012-01-03', '2012-01-04'),
slice('2012-01-03', '2012-01-06', 2),
slice('2012-01-03', '2012-01-31'),
tuple([[True, True, True, False, True]]), ]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
pytest.raises(KeyError, lambda: df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list('abde'))
result1 = s['a':'c']
with catch_warnings(record=True):
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
# now work rather than raising KeyError
s = Series(range(5), [-2, -1, 1, 2, 3])
with catch_warnings(record=True):
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
tm.assert_series_equal(result1, result2)
with catch_warnings(record=True):
result1 = s.ix[0:3]
result2 = s.loc[0:3]
tm.assert_series_equal(result1, result2)
def test_ix_weird_slicing(self):
# http://stackoverflow.com/q/17056560/1240268
df = DataFrame({'one': [1, 2, 3, np.nan, np.nan],
'two': [1, 2, 3, 4, 5]})
df.loc[df['one'] > 1, 'two'] = -df['two']
expected = DataFrame({'one': {0: 1.0,
1: 2.0,
2: 3.0,
3: np.nan,
4: np.nan},
'two': {0: 1,
1: -2,
2: -3,
3: 4,
4: 5}})
tm.assert_frame_equal(df, expected)
def test_ix_general(self):
# ix general issues
# GH 2817
data = {'amount': {0: 700, 1: 600, 2: 222, 3: 333, 4: 444},
'col': {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0},
'year': {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012}}
df = DataFrame(data).set_index(keys=['col', 'year'])
key = 4.0, 2012
# emits a PerformanceWarning, ok
with tm.assert_produces_warning(PerformanceWarning):
tm.assert_frame_equal(df.loc[key], df.iloc[2:])
# this is ok
df.sort_index(inplace=True)
res = df.loc[key]
# col has float dtype, result should be Float64Index
index = MultiIndex.from_arrays([[4.] * 3, [2012] * 3],
names=['col', 'year'])
expected = DataFrame({'amount': [222, 333, 444]}, index=index)
tm.assert_frame_equal(res, expected)
def test_ix_assign_column_mixed(self):
# GH #1142
df = DataFrame(tm.getSeriesData())
df['foo'] = 'bar'
orig = df.loc[:, 'B'].copy()
df.loc[:, 'B'] = df.loc[:, 'B'] + 1
tm.assert_series_equal(df.B, orig + 1)
# GH 3668, mixed frame with series value
df = DataFrame({'x': lrange(10), 'y': lrange(10, 20), 'z': 'bar'})
expected = df.copy()
for i in range(5):
indexer = i * 2
v = 1000 + i * 200
expected.loc[indexer, 'y'] = v
assert expected.loc[indexer, 'y'] == v
df.loc[df.x % 2 == 0, 'y'] = df.loc[df.x % 2 == 0, 'y'] * 100
tm.assert_frame_equal(df, expected)
# GH 4508, making sure consistency of assignments
df = DataFrame({'a': [1, 2, 3], 'b': [0, 1, 2]})
df.loc[[0, 2, ], 'b'] = [100, -100]
expected = DataFrame({'a': [1, 2, 3], 'b': [100, 1, -100]})
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': lrange(4)})
df['b'] = np.nan
df.loc[[1, 3], 'b'] = [100, -100]
expected = DataFrame({'a': [0, 1, 2, 3],
'b': [np.nan, 100, np.nan, -100]})
tm.assert_frame_equal(df, expected)
# ok, but chained assignments are dangerous
# if we turn off chained assignment it will work
with option_context('chained_assignment', None):
df = DataFrame({'a': lrange(4)})
df['b'] = np.nan
df['b'].loc[[1, 3]] = [100, -100]
tm.assert_frame_equal(df, expected)
def test_ix_get_set_consistency(self):
# GH 4544
# ix/loc get/set not consistent when
# a mixed int/string index
df = DataFrame(np.arange(16).reshape((4, 4)),
columns=['a', 'b', 8, 'c'],
index=['e', 7, 'f', 'g'])
with catch_warnings(record=True):
assert df.ix['e', 8] == 2
assert df.loc['e', 8] == 2
with catch_warnings(record=True):
df.ix['e', 8] = 42
assert df.ix['e', 8] == 42
assert df.loc['e', 8] == 42
df.loc['e', 8] = 45
with catch_warnings(record=True):
assert df.ix['e', 8] == 45
assert df.loc['e', 8] == 45
def test_ix_slicing_strings(self):
# see gh-3836
data = {'Classification':
['SA EQUITY CFD', 'bbb', 'SA EQUITY', 'SA SSF', 'aaa'],
'Random': [1, 2, 3, 4, 5],
'X': ['correct', 'wrong', 'correct', 'correct', 'wrong']}
df = DataFrame(data)
x = df[~df.Classification.isin(['SA EQUITY CFD', 'SA EQUITY', 'SA SSF'
])]
with catch_warnings(record=True):
df.ix[x.index, 'X'] = df['Classification']
expected = DataFrame({'Classification': {0: 'SA EQUITY CFD',
1: 'bbb',
2: 'SA EQUITY',
3: 'SA SSF',
4: 'aaa'},
'Random': {0: 1,
1: 2,
2: 3,
3: 4,
4: 5},
'X': {0: 'correct',
1: 'bbb',
2: 'correct',
3: 'correct',
4: 'aaa'}}) # bug was 4: 'bbb'
tm.assert_frame_equal(df, expected)
def test_ix_setitem_out_of_bounds_axis_0(self):
df = DataFrame(
np.random.randn(2, 5), index=["row%s" % i for i in range(2)],
columns=["col%s" % i for i in range(5)])
with catch_warnings(record=True):
pytest.raises(ValueError, df.ix.__setitem__, (2, 0), 100)
def test_ix_setitem_out_of_bounds_axis_1(self):
df = DataFrame(
np.random.randn(5, 2), index=["row%s" % i for i in range(5)],
columns=["col%s" % i for i in range(2)])
with catch_warnings(record=True):
pytest.raises(ValueError, df.ix.__setitem__, (0, 2), 100)
def test_ix_empty_list_indexer_is_ok(self):
with catch_warnings(record=True):
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(5, 2)
# vertical empty
tm.assert_frame_equal(df.ix[:, []], df.iloc[:, :0],
check_index_type=True,
check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.ix[[], :], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.ix[[]], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
def test_ix_duplicate_returns_series(self):
df = DataFrame(np.random.randn(3, 3), index=[0.1, 0.2, 0.2],
columns=list('abc'))
with catch_warnings(record=True):
r = df.ix[0.2, 'a']
e = df.loc[0.2, 'a']
tm.assert_series_equal(r, e)
|
the-stack_106_21032
|
from typing import Any, Callable, Dict, List, Optional, Set
import logging
import os
from tensorboardX import SummaryWriter
import torch
from allennlp.common.from_params import FromParams
from allennlp.data.dataloader import TensorDict
from allennlp.nn import util as nn_util
from allennlp.training.optimizers import Optimizer
from allennlp.training import util as training_util
from allennlp.models.model import Model
logger = logging.getLogger(__name__)
class TensorboardWriter(FromParams):
"""
Class that handles Tensorboard (and other) logging.
# Parameters
serialization_dir : `str`, optional (default = `None`)
If provided, this is where the Tensorboard logs will be written.
In a typical AllenNLP configuration file, this parameter does not get an entry under the
"tensorboard_writer", it gets passed in separately.
summary_interval : `int`, optional (default = `100`)
Most statistics will be written out only every this many batches.
histogram_interval : `int`, optional (default = `None`)
If provided, activation histograms will be written out every this many batches.
If None, activation histograms will not be written out.
When this parameter is specified, the following additional logging is enabled:
* Histograms of model parameters
* The ratio of parameter update norm to parameter norm
* Histogram of layer activations
We log histograms of the parameters returned by
`model.get_parameters_for_histogram_tensorboard_logging`.
The layer activations are logged for any modules in the `Model` that have
the attribute `should_log_activations` set to `True`. Logging
histograms requires a number of GPU-CPU copies during training and is typically
slow, so we recommend logging histograms relatively infrequently.
Note: only Modules that return tensors, tuples of tensors or dicts
with tensors as values currently support activation logging.
batch_size_interval : `int`, optional, (default = `None`)
If defined, how often to log the average batch size.
should_log_parameter_statistics : `bool`, optional (default = `True`)
Whether to log parameter statistics (mean and standard deviation of parameters and
gradients).
should_log_learning_rate : `bool`, optional (default = `False`)
Whether to log (parameter-specific) learning rate.
get_batch_num_total : `Callable[[], int]`, optional (default = `None`)
A thunk that returns the number of batches so far. Most likely this will
be a closure around an instance variable in your `Trainer` class. Because of circular
dependencies in constructing this object and the `Trainer`, this is typically `None` when
you construct the object, but it gets set inside the constructor of our `Trainer`.
"""
def __init__(
self,
serialization_dir: Optional[str] = None,
summary_interval: int = 100,
histogram_interval: int = None,
batch_size_interval: Optional[int] = None,
should_log_parameter_statistics: bool = True,
should_log_learning_rate: bool = False,
get_batch_num_total: Callable[[], int] = None,
) -> None:
if serialization_dir is not None:
# Create log directories prior to creating SummaryWriter objects
# in order to avoid race conditions during distributed training.
train_ser_dir = os.path.join(serialization_dir, "log", "train")
os.makedirs(train_ser_dir, exist_ok=True)
self._train_log = SummaryWriter(train_ser_dir)
val_ser_dir = os.path.join(serialization_dir, "log", "validation")
os.makedirs(val_ser_dir, exist_ok=True)
self._validation_log = SummaryWriter(val_ser_dir)
else:
self._train_log = self._validation_log = None
self._summary_interval = summary_interval
self._histogram_interval = histogram_interval
self._batch_size_interval = batch_size_interval
self._should_log_parameter_statistics = should_log_parameter_statistics
self._should_log_learning_rate = should_log_learning_rate
self.get_batch_num_total = get_batch_num_total
self._cumulative_batch_group_size = 0
self._batches_this_epoch = 0
self._histogram_parameters: Set[str] = None
@staticmethod
def _item(value: Any):
if hasattr(value, "item"):
val = value.item()
else:
val = value
return val
def log_memory_usage(self, cpu_memory_usage: Dict[int, int], gpu_memory_usage: Dict[int, int]):
cpu_memory_usage_total = 0.0
for worker, mem_bytes in cpu_memory_usage.items():
memory = mem_bytes / (1024 * 1024)
self.add_train_scalar(f"memory_usage/worker_{worker}_cpu", memory)
cpu_memory_usage_total += memory
self.add_train_scalar("memory_usage/cpu", cpu_memory_usage_total)
for gpu, mem_bytes in gpu_memory_usage.items():
memory = mem_bytes / (1024 * 1024)
self.add_train_scalar(f"memory_usage/gpu_{gpu}", memory)
def log_batch(
self,
model: Model,
optimizer: Optimizer,
batch_grad_norm: Optional[float],
metrics: Dict[str, float],
batch_group: List[List[TensorDict]],
param_updates: Optional[Dict[str, torch.Tensor]],
) -> None:
if self.should_log_this_batch():
self.log_parameter_and_gradient_statistics(model, batch_grad_norm)
self.log_learning_rates(model, optimizer)
self.add_train_scalar("loss/loss_train", metrics["loss"])
self.log_metrics({"epoch_metrics/" + k: v for k, v in metrics.items()})
if self.should_log_histograms_this_batch():
self.log_histograms(model)
self.log_gradient_updates(model, param_updates)
if self._batch_size_interval:
# We're assuming here that `log_batch` will get called every batch, and only every
# batch. This is true with our current usage of this code (version 1.0); if that
# assumption becomes wrong, this code will break.
batch_group_size = sum(training_util.get_batch_size(batch) for batch in batch_group)
self._batches_this_epoch += 1
self._cumulative_batch_group_size += batch_group_size
if (self._batches_this_epoch - 1) % self._batch_size_interval == 0:
average = self._cumulative_batch_group_size / self._batches_this_epoch
logger.info(f"current batch size: {batch_group_size} mean batch size: {average}")
self.add_train_scalar("current_batch_size", batch_group_size)
self.add_train_scalar("mean_batch_size", average)
def reset_epoch(self) -> None:
self._cumulative_batch_group_size = 0
self._batches_this_epoch = 0
def should_log_this_batch(self) -> bool:
return self.get_batch_num_total() % self._summary_interval == 0
def should_log_histograms_this_batch(self) -> bool:
return (
self._histogram_interval is not None
and self.get_batch_num_total() % self._histogram_interval == 0
)
def add_train_scalar(self, name: str, value: float, timestep: int = None) -> None:
timestep = timestep or self.get_batch_num_total()
# get the scalar
if self._train_log is not None:
self._train_log.add_scalar(name, self._item(value), timestep)
def add_train_histogram(self, name: str, values: torch.Tensor) -> None:
if self._train_log is not None:
if isinstance(values, torch.Tensor):
values_to_write = values.cpu().data.numpy().flatten()
self._train_log.add_histogram(name, values_to_write, self.get_batch_num_total())
def add_validation_scalar(self, name: str, value: float, timestep: int = None) -> None:
timestep = timestep or self.get_batch_num_total()
if self._validation_log is not None:
self._validation_log.add_scalar(name, self._item(value), timestep)
def log_parameter_and_gradient_statistics(self, model: Model, batch_grad_norm: float) -> None:
"""
Send the mean and std of all parameters and gradients to tensorboard, as well
as logging the average gradient norm.
"""
if self._should_log_parameter_statistics:
# Log parameter values to Tensorboard
for name, param in model.named_parameters():
if param.data.numel() > 0:
self.add_train_scalar("parameter_mean/" + name, param.data.mean())
if param.data.numel() > 1:
self.add_train_scalar("parameter_std/" + name, param.data.std())
if param.grad is not None:
if param.grad.is_sparse:
grad_data = param.grad.data._values()
else:
grad_data = param.grad.data
# skip empty gradients
if torch.prod(torch.tensor(grad_data.shape)).item() > 0:
self.add_train_scalar("gradient_mean/" + name, grad_data.mean())
if grad_data.numel() > 1:
self.add_train_scalar("gradient_std/" + name, grad_data.std())
else:
# no gradient for a parameter with sparse gradients
logger.info("No gradient for %s, skipping tensorboard logging.", name)
# norm of gradients
if batch_grad_norm is not None:
self.add_train_scalar("gradient_norm", batch_grad_norm)
def log_learning_rates(self, model: Model, optimizer: torch.optim.Optimizer):
"""
Send current parameter specific learning rates to tensorboard
"""
if self._should_log_learning_rate:
# optimizer stores lr info keyed by parameter tensor
# we want to log with parameter name
names = {param: name for name, param in model.named_parameters()}
for group in optimizer.param_groups:
if "lr" not in group:
continue
rate = group["lr"]
for param in group["params"]:
# check whether params has requires grad or not
effective_rate = rate * float(param.requires_grad)
self.add_train_scalar("learning_rate/" + names[param], effective_rate)
def log_histograms(self, model: Model) -> None:
"""
Send histograms of parameters to tensorboard.
"""
if not self._histogram_parameters:
# Avoiding calling this every batch. If we ever use two separate models with a single
# writer, this is wrong, but I doubt that will ever happen.
self._histogram_parameters = set(
model.get_parameters_for_histogram_tensorboard_logging()
)
for name, param in model.named_parameters():
if name in self._histogram_parameters:
self.add_train_histogram("parameter_histogram/" + name, param)
def log_gradient_updates(self, model: Model, param_updates: Dict[str, torch.Tensor]) -> None:
for name, param in model.named_parameters():
update_norm = torch.norm(param_updates[name].view(-1))
param_norm = torch.norm(param.view(-1)).cpu()
self.add_train_scalar(
"gradient_update/" + name,
update_norm / (param_norm + nn_util.tiny_value_of_dtype(param_norm.dtype)),
)
def log_metrics(
self,
train_metrics: dict,
val_metrics: dict = None,
epoch: int = None,
log_to_console: bool = False,
) -> None:
"""
Sends all of the train metrics (and validation metrics, if provided) to tensorboard.
"""
metric_names = set(train_metrics.keys())
if val_metrics is not None:
metric_names.update(val_metrics.keys())
val_metrics = val_metrics or {}
# For logging to the console
if log_to_console:
dual_message_template = "%s | %8.3f | %8.3f"
no_val_message_template = "%s | %8.3f | %8s"
no_train_message_template = "%s | %8s | %8.3f"
header_template = "%s | %-10s"
name_length = max(len(x) for x in metric_names)
logger.info(header_template, "Training".rjust(name_length + 13), "Validation")
for name in sorted(metric_names):
# Log to tensorboard
train_metric = train_metrics.get(name)
if train_metric is not None:
self.add_train_scalar(name, train_metric, timestep=epoch)
val_metric = val_metrics.get(name)
if val_metric is not None:
self.add_validation_scalar(name, val_metric, timestep=epoch)
# And maybe log to console
if log_to_console and val_metric is not None and train_metric is not None:
logger.info(
dual_message_template, name.ljust(name_length), train_metric, val_metric
)
elif log_to_console and val_metric is not None:
logger.info(no_train_message_template, name.ljust(name_length), "N/A", val_metric)
elif log_to_console and train_metric is not None:
logger.info(no_val_message_template, name.ljust(name_length), train_metric, "N/A")
def enable_activation_logging(self, model: Model) -> None:
if self._histogram_interval is not None:
# To log activation histograms to the forward pass, we register
# a hook on forward to capture the output tensors.
# This uses a closure to determine whether to log the activations,
# since we don't want them on every call.
for _, module in model.named_modules():
if not getattr(module, "should_log_activations", False):
# skip it
continue
def hook(module_, inputs, outputs):
log_prefix = "activation_histogram/{0}".format(module_.__class__)
if self.should_log_histograms_this_batch():
self.log_activation_histogram(outputs, log_prefix)
module.register_forward_hook(hook)
def log_activation_histogram(self, outputs, log_prefix: str) -> None:
if isinstance(outputs, torch.Tensor):
log_name = log_prefix
self.add_train_histogram(log_name, outputs)
elif isinstance(outputs, (list, tuple)):
for i, output in enumerate(outputs):
log_name = "{0}_{1}".format(log_prefix, i)
self.add_train_histogram(log_name, output)
elif isinstance(outputs, dict):
for k, tensor in outputs.items():
log_name = "{0}_{1}".format(log_prefix, k)
self.add_train_histogram(log_name, tensor)
else:
# skip it
pass
def close(self) -> None:
"""
Calls the `close` method of the `SummaryWriter` s which makes sure that pending
scalars are flushed to disk and the tensorboard event files are closed properly.
"""
if self._train_log is not None:
self._train_log.close()
if self._validation_log is not None:
self._validation_log.close()
|
the-stack_106_21033
|
import tensorflow as tf
import tensorflow_datasets as tfds
AUTO = tf.data.experimental.AUTOTUNE
class DatasetGenerator:
def __init__(self, data_dir, image_size, batch_size):
"""
Args:
data_dir: 데이터셋 상대 경로 ( default : './datasets/' )
image_size: 백본에 따른 이미지 해상도 크기
batch_size: 배치 사이즈 크기
"""
self.data_dir = data_dir
self.image_size = image_size
self.batch_size = batch_size
self.train_data, self.valid_data = self.initial_load()
def initial_load(self):
"""
초기 Tensorflow dataset 로드
:return:
train data, validation data
"""
train_data = tfds.load(name='nyu_depth_v2', data_dir=self.data_dir, split='train')
valid_data = tfds.load(name='nyu_depth_v2', data_dir=self.data_dir, split='validation')
return train_data, valid_data
def preprocess(self, sample):
"""
preprocessing image
:return:
RGB image(H,W,3), Depth map(H,W,1)
"""
img = tf.cast(sample['image'], tf.float32)
depth = sample['depth']
depth = tf.expand_dims(depth, axis=-1)
img = tf.image.resize(img, (self.image_size[0], self.image_size[1]), tf.image.ResizeMethod.BILINEAR)
depth = tf.image.resize(depth, (self.image_size[0], self.image_size[1]), tf.image.ResizeMethod.BILINEAR)
# Format
img = tf.image.convert_image_dtype(img / 255., dtype=tf.float32)
depth = tf.image.convert_image_dtype(depth/255., dtype=tf.float32)
depth = tf.math.divide_no_nan(1000., depth*1000)
depth /= 1000.
return (img, depth)
def get_trainData(self):
"""
Set training dataset iterator
:return:
train data
"""
self.train_data = self.train_data.shuffle(1024, reshuffle_each_iteration=True)
self.train_data = self.train_data.repeat()
self.train_data = self.train_data.map(self.preprocess, num_parallel_calls=AUTO)
self.train_data = self.train_data.padded_batch(self.batch_size)
self.train_data = self.train_data.prefetch(AUTO)
return self.train_data
def get_validData(self):
"""
Set validation dataset iterator
:return:
validation data
"""
self.valid_data = self.valid_data.map(self.preprocess, num_parallel_calls=AUTO)
self.valid_data = self.valid_data.padded_batch(self.batch_size).prefetch(AUTO)
return self.valid_data
|
the-stack_106_21034
|
from typing import List, Tuple, Dict
import streamlit as st
import pandas as pd
# Custom packages
from lib.preprocessing import prepare_data
import streamlit_page.generalstats as generalstats
import streamlit_page.teacherstats as teacherstats
FILE_PATH = 'dataset/subjects_master_2022_modefied.csv'
def main():
path_to_data = ''
df, exception = load_external_data(FILE_PATH)
glb_stats = global_stats(df)
create_layout(df, glb_stats)
@st.cache
def load_external_data(path: str) -> Tuple[pd.DataFrame, Exception]:
""" Load data from a link and preprocess it
Parameters:
-----------
path : str
Path to the data (should be hosted offline)
Returns:
--------
df : pandas.core.frame.DataFrame | False
The data loaded and preprocessed.
If there is an issue loading/preprocessing then it
returns False instead.
exception : False | Exception
If there is something wrong with preprocessing,
return Exception, otherwise return False
"""
exception = False
try:
df = prepare_data(path)
return df, False
except Exception as exception:
return False, exception
@st.cache
def global_stats(df: pd.DataFrame) -> Dict:
""" extract global stats to use it in the pages
Parameters
----------
df : pandas.core.frame.DataFrame
The data to be used for the analyses of thesis subjects.
Returns
-------
dictionary in the form 'global_stat_name:value'
"""
number_of_topics = len(df.index)
number_of_topics_taken = df['Taken'].value_counts()[1]
number_of_topics_not_taken = number_of_topics - number_of_topics_taken
percentage_of_taken = round(number_of_topics_taken / number_of_topics * 100)
# percentage_of_not_taken = round(number_of_topics_not_taken / number_of_topics * 100)
number_of_teachers = len(df['Teacher'].unique())
average_publish_number = round(df['Teacher'].value_counts().to_frame().reset_index().Teacher.mean())
speciality_list = list(df['Priority 1'].unique())
teacher_list = list(df['Teacher'].unique())
return {
'number of topics': number_of_topics,
'number of topics taken': number_of_topics_taken,
'percentage of taken': percentage_of_taken,
'teacher list': teacher_list,
'average publish': average_publish_number,
'speciality list': speciality_list,
}
def load_homepage() -> None:
""" Create Home page"""
st.image("images/badge.png",
use_column_width=True)
st.markdown("> A Dashboard for Exploratory Data Analysis of proposed Master thesis subjects")
st.markdown("""
After the release of the proposed thesis subjects, I was curious, and I had so many questions ... for example:
- Most proposed subject (trending subject)
- Most prioritized specialty in our department
- Percentage of affected/unaffected topics.
- What makes a topic undesirable (Why some topics didn't get chosen)
So to kill my curiosity, I created an Interactive Dashboard to explore the data.
Also, it felt like a nice opportunity to see how much information can be extracted from relatively simple data.
""")
st.markdown(
"You can check the GITHUB repository for more information: [link](https://github.com/khaledbouabdallah/Master_Subjects_Analysis)")
st.markdown("<div align='center'><br>"
"<img src='https://img.shields.io/badge/MADE%20WITH-PYTHON%20-red?style=for-the-badge'"
"alt='API stability' height='25'/>"
"<img src='https://img.shields.io/badge/SERVED%20WITH-Heroku-blue?style=for-the-badge'"
"alt='API stability' height='25'/>"
"<img src='https://img.shields.io/badge/DASHBOARDING%20WITH-Streamlit-green?style=for-the-badge'"
"alt='API stability' height='25'/></div>", unsafe_allow_html=True)
for i in range(3):
st.write(" ")
st.header("📉 The Application 📉")
st.write("This application is a Streamlit dashboard hosted on Heroku that can be used to explore "
"the results from board game matches that I tracked over the last year.")
st.write("There are currently four pages available in the application:")
st.subheader("📄 General Statistics 📄")
st.markdown("* This page contains basic exploratory data analyses for the purpose"
" of getting a general feeling of what the data contains.")
st.subheader("📄 Teacher Statistics 📄")
st.markdown("* Coming Soon ")
st.subheader("📄 Speciality Statistics 📄")
st.markdown("* Coming Soon ")
st.subheader("📄 Subject Statistics 📄")
st.markdown("* Coming Soon ")
def create_layout(df: pd.DataFrame, glb_stats: Dict) -> None:
""" Create the layout after the data has successfully loaded
Parameters:
-----------
df : pandas.core.frame.DataFrame
The data to be used for the analyses of thesis subjects.
glb_stats: Dict
dictionary in the form 'global_stat_name:value'
"""
st.sidebar.title("Menu")
app_mode = st.sidebar.selectbox("Please select a page", ["Homepage",
"General Statistics",
"Teacher Statistics",
"Speciality Statistics",
"Subject Statistics"])
if app_mode == 'Homepage':
load_homepage()
elif app_mode == "Instruction":
body = " ".join(open("files/instructions.md", 'r').readlines())
st.markdown(body, unsafe_allow_html=True)
elif app_mode == "General Statistics":
generalstats.load_page(df)
elif app_mode == "Teacher Statistics":
teacherstats.load_page(df, glb_stats)
elif app_mode == "Speciality Statistics":
st.markdown("* Coming Soon ")
elif app_mode == "Subject Statistics":
st.markdown("* Coming Soon ")
if __name__ == "__main__":
main()
|
the-stack_106_21035
|
from fastapi import APIRouter
import json
import pandas as pd
router = APIRouter()
strain = pd.read_csv('data/strains.csv')
""" Return the data as JSON """
@router.get('/ailments')
async def ailments():
depression = []
for i in range(strain.shape[0]):
if 'Depression' in strain.ailment.iloc[i]:
depression.append(strain.name.iloc[i])
# converting into json
depression_json = json.dumps(depression)
pain = []
for i in range(strain.shape[0]):
if 'Pain' in strain.ailment.iloc[i]:
pain.append(strain.name.iloc[i])
# converting into json
pain_json = json.dumps(pain)
insomnia = []
for i in range(strain.shape[0]):
if 'Insomnia' in strain.ailment.iloc[i]:
insomnia.append(strain.name.iloc[i])
# converting into json
insomnia_json = json.dumps(insomnia)
stress = []
for i in range(strain.shape[0]):
if 'Stress' in strain.ailment.iloc[i]:
stress.append(strain.name.iloc[i])
# converting into json
stress_json = json.dumps(stress)
lack_of_appetite = []
for i in range(strain.shape[0]):
if 'Lack of Appetite' in strain.ailment.iloc[i]:
lack_of_appetite.append(strain.name.iloc[i])
# converting into json
lack_of_appetite_json = json.dumps(lack_of_appetite)
muscle_spasms = []
for i in range(strain.shape[0]):
if 'Muscle Spasms' in strain.ailment.iloc[i]:
muscle_spasms.append(strain.name.iloc[i])
# converting into json
muscle_spasms_json = json.dumps(muscle_spasms)
nausea = []
for i in range(strain.shape[0]):
if 'Nausea' in strain.ailment.iloc[i]:
nausea.append(strain.name.iloc[i])
# converting into json
nausea_json = json.dumps(nausea)
inflammation = []
for i in range(strain.shape[0]):
if 'Inflammation' in strain.ailment.iloc[i]:
inflammation.append(strain.name.iloc[i])
# converting into json
inflammation_json = json.dumps(inflammation)
return 'Depression', depression_json, 'Pain', pain_json, 'Insomnia', insomnia_json,\
'Stress', stress_json, 'Lack of Appetite', lack_of_appetite_json, \
'Muscle Spasms', muscle_spasms_json, 'Inflammation', inflammation_json, \
'Nausea', nausea_json
|
the-stack_106_21036
|
import re
from word2number import w2n
ORDINAL_MAP = {
'first': 1,
'second': 2,
'third': 3,
'fourth': 4,
'fifth': 5,
'sixth': 6,
'seventh': 7,
'eighth': 8,
'ninth': 9
}
DECADE_MAP = {
'twenties': 20,
'thirties': 30,
'forties': 40,
'fifties': 50,
'sixties': 60,
'seventies': 70,
'eighties': 80,
'nineties': 90
}
def clean(amr):
correct_errors(amr)
normalize_tokens(amr)
# Named entity
join_model_name(amr)
split_entity_with_slash(amr)
split_entity_with_non(amr)
split_entity_prefix(amr, 'anti')
split_entity_prefix(amr, 'ex')
split_entity_prefix(amr, 'cross')
split_entity_prefix(amr, 'pro')
replace_NT_dollar_abbr(amr)
# Date
join_time_description(amr)
split_date_duration(amr)
split_numerical_date(amr)
split_year_month(amr)
split_era(amr)
split_911(amr)
split_ratio(amr)
split_unit_with_number(amr)
split_number_with_dash_prefix(amr)
def correct_errors(amr):
while True:
index = None
for i, token in enumerate(amr.tokens):
if token == '570000':
index = i
tokens = ['2005', '07']
pos = ['CD', 'CD']
ner = ['DATE', 'DATE']
break
if token == '990000':
index = i
tokens = ['1999'] if amr.id.startswith('PROXY_AFP_ENG') else ['1990']
pos = ['CD']
ner = ['DATE']
break
if token == '860000':
index = i
tokens = ['1986']
pos = ['CD']
ner = ['DATE']
break
if token == '-20040824':
index = i
tokens = ['2004', '07', '24']
pos = ['CD', 'CD', 'CD']
ner = ['DATE', 'DATE', 'DATE']
break
if amr.id.startswith('PROXY_XIN_ENG_20030709_0070.6') and token == 'July':
index = i
tokens = ['June']
pos = ['NNP']
ner = ['DATE']
break
if amr.id.startswith('PROXY_APW_ENG_20080826_0891.5') and token == 'August':
index = i
tokens = ['July']
pos = ['NNP']
ner = ['DATE']
break
if amr.id.startswith('PROXY_LTW_ENG_20070514_0055.19') and token == 'May':
index = i
tokens = ['March']
pos = ['NNP']
ner = ['DATE']
break
if amr.id.startswith('PROXY_AFP_ENG_20070430_0038.8') and token == 'February':
index = i
tokens = ['April']
pos = ['NNP']
ner = ['DATE']
break
if amr.id.startswith('PROXY_AFP_ENG_20070504_0296.10') and token == '070513':
index = i
tokens = ['20130513']
pos = ['CD']
ner = ['DATE']
break
if amr.id.startswith('PROXY_AFP_ENG_20070504_0296.10') and token == '070514':
index = i
tokens = ['20130514']
pos = ['CD']
ner = ['DATE']
break
if amr.id.startswith('PROXY_AFP_ENG_20070607_0366.8') and token == 'April':
index = i
tokens = ['June']
pos = ['NNP']
ner = ['DATE']
break
if amr.id.startswith('PROXY_AFP_ENG_20070612_0538.6') and token == 'June':
index = i
tokens = ['December']
pos = ['NNP']
ner = ['DATE']
break
if amr.id.startswith('PROXY_AFP_ENG_20070612_0538.6') and token == '12':
index = i
tokens = ['6']
pos = ['CD']
ner = ['DATE']
break
if amr.id.startswith('PROXY_AFP_ENG_20070620_0032.14') and token == 'June':
index = i
tokens = ['6']
pos = ['CD']
ner = ['DATE']
break
if amr.id.startswith('PROXY_AFP_ENG_20070906_0523') and token == 'September':
index = i
tokens = ['9']
pos = ['CD']
ner = ['DATE']
break
if amr.id.startswith('PROXY_AFP_ENG_20070910_0544') and token == 'September':
index = i
tokens = ['9']
pos = ['CD']
ner = ['DATE']
break
if amr.id.startswith('PROXY_AFP_ENG_20071204_0145.25') and token == '200':
index = i
tokens = ['2000']
pos = ['CD']
ner = ['DATE']
break
if amr.id.startswith('PROXY_AFP_ENG_20071206_0630.5') and token == 'November':
index = i
tokens = ['10']
pos = ['CD']
ner = ['DATE']
break
if amr.id.startswith('PROXY_APW_ENG_20080112_0264.5') and token == '080112':
index = i
tokens = ['20081112']
pos = ['CD']
ner = ['DATE']
break
if amr.id.startswith('PROXY_XIN_ENG_20021123_0156.20') and token == 'a-third-party':
index = i
tokens = ['a', 'third', 'party']
pos = ['DT', 'JJ', 'NN']
ner = ['O', 'ORDINAL', 'O']
break
if amr.id.startswith('DF-225-195986-849_0460.9') and token == '2most':
index = i
tokens = ['2', 'most']
pos = ['CD', 'JJS']
ner = ['ORDINAL', 'O']
break
if amr.id.startswith('DF-200-192400-625_7557.16') and token == 'what':
index = i
tokens = ['want']
pos = ['VBP']
ner = ['O']
break
if amr.id.startswith('DF-200-192392-456_1160.5') and token == 'couting':
index = i
tokens = ['count']
pos = ['VBG']
ner = ['O']
break
if amr.id.startswith('bolt-eng-DF-170-181103-8882248_0182.50') and token == '31:10-31':
index = i
tokens = ['31', ':', '10', '-', '31']
pos = ['CD', ':', 'CD', ':', 'CD']
ner = ['ORDINAL', 'O', 'O', 'O', 'O']
break
if ((amr.id.startswith('PROXY_AFP_ENG_20071030_0313.5') or
amr.id.startswith('PROXY_AFP_ENG_20071030_0313.10'))
and token == 'approximately'):
index = i
tokens = []
pos = []
ner = []
break
if amr.id.startswith('PROXY_AFP_ENG_20050603_0056.11') and token == 'first' and amr.tokens[i - 1] == "'s":
index = i
tokens = ['firstly', 'first']
pos = ['NN', 'NN']
ner = ['ORDINAL', 'O']
break
if amr.id.startswith('PROXY_AFP_ENG_20070327_0002.14') and token == 'first' and amr.tokens[i + 1] == 'time':
index = i
tokens = ['first', 'firstly']
pos = ['NN', 'JJ']
ner = ['O', 'ORDINAL']
break
if amr.id.startswith('DF-200-192400-625_7046.5') and token == 'my' and amr.tokens[i + 1] == 'counsellers':
index = i
tokens = ['my', '2']
pos = ['PRP$', 'CD']
ner = ['O', 'NUMBER']
break
if amr.id.startswith('PROXY_LTW_ENG_20081115_0076.19') and token == 'a' and amr.tokens[i + 1] == 'year':
index = list(range(i, i + 5))
tokens = ['1.5', 'year']
pos = ['CD', 'NN']
ner = ['NUMBER', 'DURATION']
break
if amr.id.startswith('PROXY_XIN_ENG_20020905_0122.11') and token == 'separate' and amr.tokens[i + 1] == 'bomb':
index = list(range(i, i + 2))
tokens = ['separate', 'two', 'bomb']
pos = ['JJ', 'CD', 'JJ']
ner = ['O', 'NUMBER', 'O']
break
if (token == 'second' and i + 2 < len(amr.tokens) and
amr.tokens[i + 1] == 'to' and amr.tokens[i + 2] == 'last'):
index = [i, i + 1, i + 2]
tokens = ['-2']
pos = ['CD']
ner = ['ORDINAL']
break
if token.lower() == 'tonight':
index = i
tokens = ['today', 'night']
pos = ['NN', 'NN']
ner = ['DATE', 'DATE']
break
if token == '1ps':
index = i
tokens = ['1', 'pence']
pos = ['CD', 'NN']
ner = ['NUMBER', 'O']
break
else:
break
if not isinstance(index, list):
index = [index]
amr.replace_span(index, tokens, pos, ner)
def normalize_tokens(amr):
while True:
span = None
for i, lemma in enumerate(amr.lemmas):
lemma_lower = lemma.lower()
token_lower = amr.tokens[i].lower()
if lemma_lower == 'midnight':
span = [i]
tokens = ['0:00']
pos = ['CD']
ner = ['TIME']
break
if token_lower in DECADE_MAP:
span = [i]
tokens = [str(DECADE_MAP[token_lower])]
pos = ['CD']
ner = ['TIME']
break
if lemma_lower in ORDINAL_MAP:
span = [i]
tokens = [str(ORDINAL_MAP[lemma_lower])]
pos = ['CD']
ner = ['ORDINAL']
break
if lemma_lower == 'quarter' and i > 0 and amr.pos_tags[i - 1] == 'CD':
span = [i - 1, i]
tokens = [amr.tokens[i - 1]]
pos = [amr.pos_tags[i - 1]]
ner = [amr.ner_tags[i - 1]]
break
else:
break
amr.replace_span(span, tokens, pos, ner)
def join_model_name(amr):
# Joint the words starting with a cap letter which is followed by '^-\d+$'
while True:
span = None
if len(amr.tokens) < 2:
break
for i in range(len(amr.tokens) - 1):
x, y = amr.tokens[i: i + 2]
if x.isalpha() and x.isupper() and re.search(r'^-\d+$', y):
span = list(range(i, i + 2))
joined_tokens = ''.join([x, y])
if joined_tokens in ('K-12'):
continue
break
else:
break
amr.replace_span(span, [joined_tokens], ['NNP'], ['ENTITY'])
def join_time_description(amr):
# 4 o'clock; 4 am; 4 a.m., etc.
while True:
span = None
if len(amr.tokens) < 2:
break
for i in range(1, len(amr.tokens)):
x, y = amr.tokens[i - 1: i + 1]
if y.lower() in ("o'clock", 'am', 'a.m.', 'pm', 'p.m') and re.search(r'^\d+[.:]?\d*[.:]?\d*$', x):
span = list(range(i - 1, i + 1))
joined_tokens = ''.join([x, y])
pos = 'CD'
ner = 'TIME'
break
if y.lower() in ("o'clock", 'am', 'a.m.', 'pm', 'p.m') and x.isalpha():
try:
x = w2n.word_to_num(x)
except:
continue
x = str(x)
span = list(range(i - 1, i + 1))
joined_tokens = ''.join([x, y])
pos = 'CD'
ner = 'TIME'
break
if y == 'Greenwich' and i + 2 < len(amr.tokens) and amr.tokens[i + 1: i + 3] == ['Mean', 'Time']:
span = list(range(i, i + 3))
joined_tokens = 'GMT'
pos = 'NNP'
ner = 'TIME'
break
if y in ('century', 'Century'):
m = re.search(r'^(\d+)(st|nd|rd|th)?$', x)
if m and m.group(1) != '':
span = list(range(i - 1, i + 1))
joined_tokens = ''.join([m.group(1), y.lower()])
pos = 'CD'
ner = 'TIME'
break
elif x == 'first' and amr.tokens[i - 2] == '-' and amr.tokens[i - 3] == 'twenty':
span = list(range(i - 3, i + 1))
joined_tokens = '21century'
pos = 'CD'
ner = 'TIME'
break
elif x.lower() == 'eighth':
span = list(range(i - 1, i + 1))
joined_tokens = '8century'
pos = 'CD'
ner = 'TIME'
break
elif x.lower() == 'fifth':
span = list(range(i - 1, i + 1))
joined_tokens = '5century'
pos = 'CD'
ner = 'TIME'
break
else:
try:
x = w2n.word_to_num(x)
except:
continue
span = list(range(i - 1, i + 1))
joined_tokens = ''.join([str(x), y.lower()])
pos = 'CD'
ner = 'TIME'
break
else:
break
amr.replace_span(span, [joined_tokens], [pos], [ner])
def split_entity_with_slash(amr):
# Split named entity word with '/', e.g. 'Romney/McDonnell'.
while True:
index = None
for i, token in enumerate(amr.tokens):
if (len(token) and token[0].isupper() and '/' in token and
token.index('/') + 1 < len(token) and
token[token.index('/') + 1].isupper()
):
index = i
break
else:
break
pos = amr.pos_tags[index]
ner = amr.ner_tags[index]
x, y = amr.tokens[index].split('/', 1)
amr.replace_span([index], [x, '/', y], [pos, 'SYM', pos], [ner, ner, ner])
def split_entity_with_non(amr):
# Split named entity word with 'non', e.g. 'nonRomney'.
while True:
index = None
for i, token in enumerate(amr.tokens):
if token.startswith('non') and len(token) > 3 and token[3].isupper():
index = i
break
else:
break
pos = amr.pos_tags[index]
ner = amr.ner_tags[index]
x = amr.tokens[index]
amr.replace_span([index], ['non', x[3:]], ['JJ', pos], ['O', ner])
def split_entity_prefix(amr, prefix):
# Split word with 'anti-' prefix.
while True:
index = None
for i, lemma in enumerate(amr.lemmas):
if lemma.lower().startswith(prefix + '-'):
index = i
break
else:
break
pos = amr.pos_tags[index]
ner = amr.ner_tags[index]
_, lemma = amr.lemmas[index].split('-', 1)
if lemma == '':
amr.replace_span([index], [prefix], ['JJ'], ['O'])
else:
amr.replace_span([index], [prefix, lemma], ['JJ', pos], [ner, ner])
def split_unit_with_number(amr):
# Split unit with number, e.g. '30pence'.
while True:
index = None
for i, lemma in enumerate(amr.lemmas):
if re.search(r'^\d+(ps|pence)$', lemma):
index = i
break
else:
break
lemma = amr.lemmas[index]
x = re.split(r'(ps|pence)$', lemma)[0]
y = lemma[len(x):]
amr.replace_span([index], [x, y], ['CD', 'NN'], ['NUMBER', 'O'])
def split_ratio(amr):
# Split ratio with number, e.g. '1:1.4'.
while True:
index = None
for i, lemma in enumerate(amr.lemmas):
if '.' in lemma and re.search(r'^\d+\.?\d*:\d+\.?\d*$', lemma):
index = i
break
else:
break
lemma = amr.lemmas[index]
x, y = lemma.split(':')
amr.replace_span([index], [x, ':', y], ['CD', ':', 'CD'], ['NUMBER', 'O', 'NUMBER'])
def split_number_with_dash_prefix(amr):
# Split number with dash prefix, e.g. '-6'
while True:
index = None
for i, lemma in enumerate(amr.lemmas):
if re.search(r'^-\d+$', lemma):
index = i
break
else:
break
lemma = amr.lemmas[index]
ner_tag = amr.ner_tags[index]
if ner_tag in ('0', 'O'):
ner_tag = 'NUMBER'
x = lemma[0]
y = lemma[1:]
amr.replace_span([index], [x, y], [':', 'CD'], ['O', ner_tag])
def split_date_duration(amr):
# 201005-201006
while True:
index = None
x = None
for i, lemma in enumerate(amr.lemmas):
if re.search(r'^-\d{8}$', lemma) or re.search(r'^-\d{6}$', lemma):
index = i
_, x = lemma.split('-')
break
else:
break
amr.replace_span([index], [x], ['CD'], ['DATE'])
def split_numerical_date(amr):
# Split the numerical date, e.g. 20080710.
while True:
index = None
year, month, day = None, None, None
for i, lemma in enumerate(amr.lemmas):
if (re.search(r'^\d{8}$', lemma) and
1000 < int(lemma[:4]) < 2100 and # year
0 < int(lemma[4:6]) < 13 and # month
0 < int(lemma[6:]) < 32 # day
):
index = i
year, month, day = int(lemma[:4]), int(lemma[4:6]), int(lemma[6:])
month = '{:02d}'.format(month)
day = '{:02d}'.format(day)
break
elif (re.search(r'^\d{5}$', lemma) and
0 < int(lemma[1:3]) < 13 and # month
0 < int(lemma[3:]) < 32 # day
):
index = i
year, month, day = '0' + lemma[0], int(lemma[1:3]), int(lemma[3:])
month = '{:02d}'.format(month)
day = '{:02d}'.format(day)
break
elif (re.search(r'^\d{6}$', lemma) and
0 < int(lemma[2:4]) < 13 and # month
0 <= int(lemma[4:]) < 32 # day
):
index = i
year = int(lemma[:2])
month, day = int(lemma[2:4]), int(lemma[4:])
year = '{:02d}'.format(year)
month = '{:02d}'.format(month)
day = '{:02d}'.format(day)
break
elif re.search(r'^\d+/\d+/\d+$', lemma):
index = i
year, month, day = lemma.split('/')
break
elif re.search(r'^\d+-/\d+-/\d+$', lemma):
index = i
year, month, day = lemma.split('-')
break
else:
break
pos = 'CD'
ner = 'DATE'
amr.replace_span([index], [str(year), str(month), str(day)], [pos] * 3, [ner] * 3)
def split_year_month(amr):
while True:
index = None
year, month = None, None
for i, token in enumerate(amr.tokens):
m = re.search(r'^(\d+)/(\d+)-*$', token)
if m:
index = i
year, month = m.group(1), m.group(2)
break
m = re.search(r'^(\d{4})(\d{2})00$', token)
if m:
index = i
year, month = m.group(1), m.group(2)
break
else:
break
amr.replace_span([index], [year, month], ['CD', 'CD'], ['DATE', 'DATE'])
def split_era(amr):
while True:
index = None
year, era = None, None
for i, token in enumerate(amr.tokens):
if re.search(r'^\d{4}BC$', token):
index = i
year, era = token[:4], token[4:]
break
else:
break
amr.replace_span([index], [year, era], ['CD', 'NN'], ['DATE', 'DATE'])
def split_911(amr):
while True:
index = None
for i, token in enumerate(amr.tokens):
if token == '911':
index = i
break
else:
break
amr.replace_span([index], ['09', '11'], ['CD', 'CD'], ['DATE', 'DATE'])
def replace_NT_dollar_abbr(amr):
# Replace 'NT' in front of '$' with 'Taiwan'.
for i, token in enumerate(amr.tokens):
if token == 'NT' and len(amr.tokens) > i + 1 and amr.tokens[i + 1] in ('$', 'dollars', 'dollar'):
amr.replace_span([i], ['Taiwan'], ['NNP'], ['COUNTRY'])
if __name__ == '__main__':
import argparse
from stog.data.dataset_readers.amr_parsing.io import AMRIO
parser = argparse.ArgumentParser('input_cleaner.py')
parser.add_argument('--amr_files', nargs='+', default=[])
args = parser.parse_args()
for file_path in args.amr_files:
with open(file_path + '.input_clean', 'w', encoding='utf-8') as f:
for i, amr in enumerate(AMRIO.read(file_path)):
try:
clean(amr)
f.write(str(amr) + '\n\n')
except:
print(f"input_cleaner\t\tERROR @ {amr.id}")
continue
|
the-stack_106_21037
|
from contextlib import contextmanager
from copy import copy
# Hard-coded processor for easier use of CSRF protection.
_builtin_context_processors = ('django.template.context_processors.csrf',)
class ContextPopException(Exception):
"pop() has been called more times than push()"
pass
class ContextDict(dict):
def __init__(self, context, *args, **kwargs):
super(ContextDict, self).__init__(*args, **kwargs)
context.dicts.append(self)
self.context = context
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.context.pop()
class BaseContext(object):
def __init__(self, dict_=None):
self._reset_dicts(dict_)
def _reset_dicts(self, value=None):
builtins = {'True': True, 'False': False, 'None': None}
self.dicts = [builtins]
if value is not None:
self.dicts.append(value)
def __copy__(self):
duplicate = copy(super(BaseContext, self))
duplicate.dicts = self.dicts[:]
return duplicate
def __repr__(self):
return repr(self.dicts)
def __iter__(self):
for d in reversed(self.dicts):
yield d
def push(self, *args, **kwargs):
dicts = []
for d in args:
if isinstance(d, BaseContext):
dicts += d.dicts[1:]
else:
dicts.append(d)
return ContextDict(self, *dicts, **kwargs)
def pop(self):
if len(self.dicts) == 1:
raise ContextPopException
return self.dicts.pop()
def __setitem__(self, key, value):
"Set a variable in the current context"
self.dicts[-1][key] = value
def __getitem__(self, key):
"Get a variable's value, starting at the current context and going upward"
for d in reversed(self.dicts):
if key in d:
return d[key]
raise KeyError(key)
def __delitem__(self, key):
"Delete a variable from the current context"
del self.dicts[-1][key]
def has_key(self, key):
for d in self.dicts:
if key in d:
return True
return False
def __contains__(self, key):
return self.has_key(key)
def get(self, key, otherwise=None):
for d in reversed(self.dicts):
if key in d:
return d[key]
return otherwise
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
def new(self, values=None):
"""
Returns a new context with the same properties, but with only the
values given in 'values' stored.
"""
new_context = copy(self)
new_context._reset_dicts(values)
return new_context
def flatten(self):
"""
Returns self.dicts as one dictionary
"""
flat = {}
for d in self.dicts:
flat.update(d)
return flat
def __eq__(self, other):
"""
Compares two contexts by comparing theirs 'dicts' attributes.
"""
if isinstance(other, BaseContext):
# because dictionaries can be put in different order
# we have to flatten them like in templates
return self.flatten() == other.flatten()
# if it's not comparable return false
return False
class Context(BaseContext):
"A stack container for variable context"
def __init__(self, dict_=None, autoescape=True, use_l10n=None, use_tz=None):
self.autoescape = autoescape
self.use_l10n = use_l10n
self.use_tz = use_tz
self.template_name = "unknown"
self.render_context = RenderContext()
# Set to the original template -- as opposed to extended or included
# templates -- during rendering, see bind_template.
self.template = None
super(Context, self).__init__(dict_)
@contextmanager
def bind_template(self, template):
if self.template is not None:
raise RuntimeError("Context is already bound to a template")
self.template = template
try:
yield
finally:
self.template = None
def __copy__(self):
duplicate = super(Context, self).__copy__()
duplicate.render_context = copy(self.render_context)
return duplicate
def update(self, other_dict):
"Pushes other_dict to the stack of dictionaries in the Context"
if not hasattr(other_dict, '__getitem__'):
raise TypeError('other_dict must be a mapping (dictionary-like) object.')
if isinstance(other_dict, BaseContext):
other_dict = other_dict.dicts[1:].pop()
return ContextDict(self, other_dict)
class RenderContext(BaseContext):
"""
A stack container for storing Template state.
RenderContext simplifies the implementation of template Nodes by providing a
safe place to store state between invocations of a node's `render` method.
The RenderContext also provides scoping rules that are more sensible for
'template local' variables. The render context stack is pushed before each
template is rendered, creating a fresh scope with nothing in it. Name
resolution fails if a variable is not found at the top of the RequestContext
stack. Thus, variables are local to a specific template and don't affect the
rendering of other templates as they would if they were stored in the normal
template context.
"""
def __iter__(self):
for d in self.dicts[-1]:
yield d
def has_key(self, key):
return key in self.dicts[-1]
def get(self, key, otherwise=None):
return self.dicts[-1].get(key, otherwise)
def __getitem__(self, key):
return self.dicts[-1][key]
class RequestContext(Context):
"""
This subclass of template.Context automatically populates itself using
the processors defined in the engine's configuration.
Additional processors can be specified as a list of callables
using the "processors" keyword argument.
"""
def __init__(self, request, dict_=None, processors=None, use_l10n=None, use_tz=None, autoescape=True):
super(RequestContext, self).__init__(
dict_, use_l10n=use_l10n, use_tz=use_tz, autoescape=autoescape)
self.request = request
self._processors = () if processors is None else tuple(processors)
self._processors_index = len(self.dicts)
# placeholder for context processors output
self.update({})
# empty dict for any new modifications
# (so that context processors don't overwrite them)
self.update({})
@contextmanager
def bind_template(self, template):
if self.template is not None:
raise RuntimeError("Context is already bound to a template")
self.template = template
# Set context processors according to the template engine's settings.
processors = (template.engine.template_context_processors +
self._processors)
updates = {}
for processor in processors:
updates.update(processor(self.request))
self.dicts[self._processors_index] = updates
try:
yield
finally:
self.template = None
# Unset context processors.
self.dicts[self._processors_index] = {}
def new(self, values=None):
new_context = super(RequestContext, self).new(values)
# This is for backwards-compatibility: RequestContexts created via
# Context.new don't include values from context processors.
if hasattr(new_context, '_processors_index'):
del new_context._processors_index
return new_context
def make_context(context, request=None, **kwargs):
"""
Create a suitable Context from a plain dict and optionally an HttpRequest.
"""
if request is None:
context = Context(context, **kwargs)
else:
# The following pattern is required to ensure values from
# context override those from template context processors.
original_context = context
context = RequestContext(request, **kwargs)
if original_context:
context.push(original_context)
return context
|
the-stack_106_21038
|
from keras.layers import Input, Conv2D, Activation, BatchNormalization, GaussianNoise, add, UpSampling2D, concatenate, Conv2DTranspose, Lambda
from keras.models import Model
from keras.regularizers import l2
import tensorflow as tf
from keras.engine.topology import Layer
from keras.engine import InputSpec
from keras.utils import conv_utils
import keras.backend as K
def spatial_reflection_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None):
"""Pads the 2nd and 3rd dimensions of a 4D tensor.
# Arguments
x: Tensor or variable.
padding: Tuple of 2 tuples, padding pattern.
data_format: One of `channels_last` or `channels_first`.
# Returns
A padded 4D tensor.
# Raises
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
assert len(padding) == 2
assert len(padding[0]) == 2
assert len(padding[1]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
if data_format == 'channels_first':
pattern = [[0, 0],
[0, 0],
list(padding[0]),
list(padding[1])]
else:
pattern = [[0, 0],
list(padding[0]), list(padding[1]),
[0, 0]]
return tf.pad(x, pattern, "REFLECT")
class ReflectionPadding2D(Layer):
"""Reflection-padding layer for 2D input (e.g. picture).
This layer can add rows and columns or zeros
at the top, bottom, left and right side of an image tensor.
# Arguments
padding: int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.
- If int: the same symmetric padding
is applied to width and height.
- If tuple of 2 ints:
interpreted as two different
symmetric padding values for height and width:
`(symmetric_height_pad, symmetric_width_pad)`.
- If tuple of 2 tuples of 2 ints:
interpreted as
`((top_pad, bottom_pad), (left_pad, right_pad))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
# Input shape
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, rows, cols)`
# Output shape
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, padded_rows, padded_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, padded_rows, padded_cols)`
"""
def __init__(self,
padding=(1, 1),
data_format=None,
**kwargs):
super(ReflectionPadding2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(padding, int):
self.padding = ((padding, padding), (padding, padding))
elif hasattr(padding, '__len__'):
if len(padding) != 2:
raise ValueError('`padding` should have two elements. '
'Found: ' + str(padding))
height_padding = conv_utils.normalize_tuple(padding[0], 2,
'1st entry of padding')
width_padding = conv_utils.normalize_tuple(padding[1], 2,
'2nd entry of padding')
self.padding = (height_padding, width_padding)
else:
raise ValueError('`padding` should be either an int, '
'a tuple of 2 ints '
'(symmetric_height_pad, symmetric_width_pad), '
'or a tuple of 2 tuples of 2 ints '
'((top_pad, bottom_pad), (left_pad, right_pad)). '
'Found: ' + str(padding))
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
if input_shape[2] is not None:
rows = input_shape[2] + self.padding[0][0] + self.padding[0][1]
else:
rows = None
if input_shape[3] is not None:
cols = input_shape[3] + self.padding[1][0] + self.padding[1][1]
else:
cols = None
return (input_shape[0],
input_shape[1],
rows,
cols)
elif self.data_format == 'channels_last':
if input_shape[1] is not None:
rows = input_shape[1] + self.padding[0][0] + self.padding[0][1]
else:
rows = None
if input_shape[2] is not None:
cols = input_shape[2] + self.padding[1][0] + self.padding[1][1]
else:
cols = None
return (input_shape[0],
rows,
cols,
input_shape[3])
def call(self, inputs):
return spatial_reflection_2d_padding(inputs,
padding=self.padding,
data_format=self.data_format)
def get_config(self):
config = {'padding': self.padding,
'data_format': self.data_format}
base_config = super(ReflectionPadding2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# def padding(block_function, filters, repetitions, is_first_layer=False):
"""Builds a residual block with repeating bottleneck blocks.
"""
# def f(input):
def convolution(n_filters, kernel_size=3, l2_reg=0.0, strides=1):
def fun(inputs):
x = BatchNormalization()(inputs)
x = Activation('relu')(x)
# x = ReflectionPadding2D(strides)(x)
x = Conv2D(n_filters, (kernel_size, kernel_size), padding='same', kernel_initializer='he_normal',
kernel_regularizer=l2(l2_reg), strides=strides)(x)
return x
return fun
def transposed_convolution(n_filters, kernel_size=3, l2_reg=0.0, strides=1):
def fun(inputs):
x = BatchNormalization()(inputs)
x = Activation('relu')(x)
# x = ReflectionPadding2D(strides)(x)
x = Conv2DTranspose(n_filters, (kernel_size, kernel_size), padding='same', kernel_initializer='he_normal',
kernel_regularizer=l2(l2_reg), strides=strides)(x)
return x
return fun
def deconvolution_module(nx, ny, l2_reg):
def fun(estimation, next_frame, t1, t2, t3):
inputs = concatenate([estimation, next_frame])
A01 = convolution(64, kernel_size=3, l2_reg=l2_reg, strides=1)(inputs)
C11 = convolution(64, kernel_size=3, l2_reg=l2_reg, strides=2)(A01)
C12 = convolution(64, kernel_size=3, l2_reg=l2_reg, strides=1)(C11)
C13 = convolution(64, kernel_size=3, l2_reg=l2_reg, strides=1)(C12)
C14 = convolution(64, kernel_size=3, l2_reg=l2_reg, strides=1)(C13)
C14 = add([C11, C14])
C21 = convolution(64, kernel_size=3, l2_reg=l2_reg, strides=1)(C14)
C22 = convolution(64, kernel_size=3, l2_reg=l2_reg, strides=1)(C21)
C23 = convolution(64, kernel_size=3, l2_reg=l2_reg, strides=1)(C22)
C24 = convolution(64, kernel_size=3, l2_reg=l2_reg, strides=1)(C23)
C24 = add([C21, C24])
C31 = convolution(128, kernel_size=3, l2_reg=l2_reg, strides=2)(C24)
C32 = convolution(128, kernel_size=3, l2_reg=l2_reg, strides=1)(C31)
C33 = convolution(128, kernel_size=3, l2_reg=l2_reg, strides=1)(C32)
C34 = convolution(128, kernel_size=3, l2_reg=l2_reg, strides=1)(C33)
C34 = add([C31, C34])
C41 = convolution(256, kernel_size=3, l2_reg=l2_reg, strides=2)(C34)
C42 = Lambda(lambda x: K.concatenate([x,t1],axis=-1), output_shape=(nx/8,ny/8,512))(C41)
B42 = Conv2D(256, (1, 1), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), strides=1)(C42)
C43 = convolution(256, kernel_size=3, l2_reg=l2_reg, strides=1)(B42)
C44 = convolution(256, kernel_size=3, l2_reg=l2_reg, strides=1)(C43)
C45 = convolution(256, kernel_size=3, l2_reg=l2_reg, strides=1)(C44)
C45 = add([C41, C45])
C51 = transposed_convolution(128, kernel_size=4, l2_reg=l2_reg, strides=2)(C45)
C51 = add([C51, C34])
C52 = Lambda(lambda x: K.concatenate([x,t2],axis=-1),output_shape=(nx/4,ny/4,256))(C51)
B52 = Conv2D(128, (1, 1), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), strides=1)(C52)
C53 = convolution(128, kernel_size=3, l2_reg=l2_reg, strides=1)(B52)
C54 = convolution(128, kernel_size=3, l2_reg=l2_reg, strides=1)(C53)
C55 = convolution(128, kernel_size=3, l2_reg=l2_reg, strides=1)(C54)
C55 = add([C51, C55])
C61 = transposed_convolution(64, kernel_size=4, l2_reg=l2_reg, strides=2)(C55)
C61 = add([C61, C24])
C62 = Lambda(lambda x: K.concatenate([x,t3],axis=-1),output_shape=(nx/2,ny/2,128))(C61)
B62 = Conv2D(128, (1, 1), padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), strides=1)(C62)
C63 = convolution(64, kernel_size=3, l2_reg=l2_reg, strides=1)(B62)
C64 = convolution(64, kernel_size=3, l2_reg=l2_reg, strides=1)(C63)
C65 = convolution(64, kernel_size=3, l2_reg=l2_reg, strides=1)(C64)
C65 = add([C61, C65])
C71 = transposed_convolution(64, kernel_size=4, l2_reg=l2_reg, strides=2)(C65)
C72 = convolution(64, kernel_size=4, l2_reg=l2_reg, strides=1)(C71)
out = convolution(1, kernel_size=3, l2_reg=l2_reg, strides=1)(C72)
return out, C43, C53, C63
return fun
def mfbd(nx, ny, depth=5, l2_reg=0):
"""
Deep residual network that keeps the size of the input throughout the whole network
"""
inputs = [None] * depth
estimation = [None] * depth
for i in range(depth):
inputs[i] = Input(shape=(nx,ny,1))
t1 = K.zeros((10, int(nx/8), int(ny/8), 256))
t2 = K.zeros((10, int(nx/4), int(ny/4), 128))
t3 = K.zeros((10, int(nx/2), int(ny/2), 64))
deconvolution = deconvolution_module(nx, ny, l2_reg)
estimation[0] = inputs[0]
for i in range(depth-1):
estimation[i+1], t1, t2, t3 = deconvolution(estimation[i], inputs[i+1], t1, t2, t3)
return Model(inputs=inputs, outputs=estimation[1:])
|
the-stack_106_21040
|
import os
import re
import numpy as np
import scipy.optimize
from ledsa.core.model import target_function
from ledsa.core.LEDAnalysisData import LEDAnalysisData
import time
sep = os.path.sep
def generate_led_analysis_data(conf, channel, data, debug, iled, img_filename, led_array_idx, search_areas,
window_radius, fit_leds=True):
led_data = LEDAnalysisData(iled, led_array_idx, fit_leds)
center_search_area_x = int(search_areas[iled, 1])
center_search_area_y = int(search_areas[iled, 2])
search_area = np.index_exp[center_search_area_x - window_radius:
center_search_area_x + window_radius,
center_search_area_y - window_radius:
center_search_area_y + window_radius]
if fit_leds:
start_time = time.process_time()
led_data.fit_results, mesh = fit_model_to_led(data[search_area])
end_time = time.process_time()
led_data.fit_time = end_time - start_time
led_data.led_center_x = led_data.fit_results.x[0] + center_search_area_x - window_radius
led_data.led_center_y = led_data.fit_results.x[1] + center_search_area_y - window_radius
if debug:
return led_data.fit_results.x
if not led_data.fit_results.success: # A > 255 or A < 0:
log_warnings(img_filename, channel, led_data, center_search_area_x, center_search_area_y,
data[search_area].shape, window_radius, conf)
led_data.mean_color_value = np.mean(data[search_area])
led_data.sum_color_value = np.sum(data[search_area])
led_data.max_color_value = np.amax(data[search_area])
return led_data
def save_results_in_file(channel, img_data, img_filename, img_id, img_infos, root):
out_file = open('analysis{}channel{}{}{}_led_positions.csv'.format(sep, channel, sep, img_id), 'w')
header = create_header(channel, img_id, img_filename, img_infos, root, img_data[0].fit_leds)
out_file.write(header)
for led_data in img_data:
out_file.write(str(led_data))
out_file.close()
def find_analysed_img_ids(channel):
processed_imgs = []
directory_content = os.listdir('.{}analysis{}channel{}'.format(sep, sep, channel))
for file_name in directory_content:
img = re.search(r"([0-9]+)_led_positions.csv", file_name)
if img is not None:
processed_imgs.append(int(img.group(1)))
return processed_imgs
def save_list_of_remaining_imgs_needed_to_be_processed(remaining_imgs):
out_file = open('images_to_process.csv', 'w')
for i in list(remaining_imgs):
out_file.write('{}\n'.format(i))
out_file.close()
def fit_model_to_led(search_area):
nx = search_area.shape[0]
ny = search_area.shape[1]
center_x = nx // 2
center_y = ny // 2
x0 = np.array([center_x, center_y, 2., 2., 200., 1.0, 1.0, 1.0])
x = np.linspace(0.5, nx - 0.5, nx)
y = np.linspace(0.5, ny - 0.5, ny)
mesh = np.meshgrid(x, y)
res = scipy.optimize.minimize(target_function, x0,
args=(search_area, mesh), method='nelder-mead',
options={'xtol': 1e-8, 'disp': False,
'adaptive': False, 'maxiter': 10000})
return res, mesh
def log_warnings(img_filename, channel, led_data, cx, cy, size_of_search_area, window_radius, conf):
res = ' '.join(np.array_str(led_data.fit_results.x).split()).replace('[ ', '[').replace(' ]', ']').replace(' ', ',')
img_file_path = conf['img_directory'] + img_filename
log = f'Irregularities while fitting:\n {img_file_path} {led_data.led_id} {led_data.led_array} {res} ' \
f'{led_data.fit_results.success} {led_data.fit_results.fun} {led_data.fit_results.nfev} ' \
f'{size_of_search_area[0]} {size_of_search_area[1]} {led_data.led_center_x} {led_data.led_center_y} ' \
f'{window_radius} {cx} {cy} {channel}'
if not os.path.exists('.{}logfiles'.format(sep)):
os.makedirs('.{}logfiles'.format(sep))
logfile = open('.{}logfiles{}warnings.log'.format(sep, sep), 'a')
logfile.write(log)
logfile.write('\n')
logfile.close()
def create_header(channel, img_id, img_filename, img_infos, root, fit_leds):
out_str = f'# image root = {root[-1]}, photo file name = {img_filename}, '
out_str += f"channel = {channel}, "
out_str += f"time[s] = {img_infos[int(img_id) - 1][3]}\n"
out_str += "# id,line,sum_col_value,average_col_value,max_col_value"
if fit_leds:
out_str += ",led_center_x, led_center_y"
out_str += ",x,y,dx,dy,A,alpha,wx,wy,fit_success,fit_fun,fit_nfev,fit_time"
out_str += "// all spatial quantities in pixel coordinates\n"
else:
out_str += "\n"
return out_str
|
the-stack_106_21043
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 2 07:15:26 2022
@author: ACER
"""
# lista en blanco
lista = []
#lista con elementos
ñistElementos = [1,3,4,5]
#acceder a los elementos
listAlumnos = ["adri","rither","jose","juan"]
alumnoPos_1 =listAlumnos[len(listAlumnos)-1] #'juan'
#obtener el tamanio de la lista
tamanioListaAlumnos = len(listAlumnos)
print("el tamaño de la lista alumnos es :",tamanioListaAlumnos)
#insertar elementos a una Lista
lista.appens(1)
lista.appens(2)
lista.appens(5)
# lista [1,2,3]
#lista [1,2,3,5]
# insertar un elemento en un indice de la lista
#insert (indice(0,tamanio_1),elemento)
lista.insert(2,3)
print(lista)
# eliminar elementos de una lista
# lista [1,2,3,5]
lista.pop(0)
# lista [2,3]
print(lista)
listaDocentes = ['jhonny','caballero','haku']
listaDocentes.remove('caballero')
#['jhonny', 'haku']
print(listaDocentes)
# iterar listas
for Docente in listaDocentes:
print(Docente)
tamanioListaDocentes = len(listaDocentes)
for i in range(0, tamanioListaDocentes):
print(listaDocentes[i])
|
the-stack_106_21044
|
#!/usr/bin/env python
import rospy
from sensor_msgs.msg import JointState
from std_msgs.msg import Float64
import gazebo_msgs.msg
import geometry_msgs.msg
from gazebo_msgs.srv import SetPhysicsProperties
def joint_states_cb(msg):
pub_joint1_pos.publish(msg.position[0])
pub_joint2_pos.publish(msg.position[1])
pub_joint3_pos.publish(msg.position[2])
pub_joint4_pos.publish(msg.position[3])
pub_joint5_pos.publish(msg.position[4])
pub_joint6_pos.publish(msg.position[5])
# print(msg.position[0],msg.position[1],msg.position[2],msg.position[3],msg.position[4],msg.position[5])
def set_space_gravity():
set_gravity = rospy.ServiceProxy('/gazebo/set_physics_properties', SetPhysicsProperties)
time_step = Float64(0.001)
max_update_rate = Float64(1000.0)
gravity = geometry_msgs.msg.Vector3()
gravity.x = 0.0
gravity.y = 0.0
gravity.z = -2.5
ode_config = gazebo_msgs.msg.ODEPhysics()
ode_config.auto_disable_bodies = False
ode_config.sor_pgs_precon_iters = 0
ode_config.sor_pgs_iters = 50
ode_config.sor_pgs_w = 1.3
ode_config.sor_pgs_rms_error_tol = 0.0
ode_config.contact_surface_layer = 0.001
ode_config.contact_max_correcting_vel = 0.0
ode_config.cfm = 0.0
ode_config.erp = 0.2
ode_config.max_contacts = 20
set_gravity(time_step.data, max_update_rate.data, gravity, ode_config)
if __name__ == '__main__':
rospy.init_node("sat_serv_teleop")
pub_joint1_pos = rospy.Publisher("/satellite_servicer/joint1_pos_control/command/delayed", Float64, queue_size=1)
pub_joint2_pos = rospy.Publisher("/satellite_servicer/joint2_pos_control/command/delayed", Float64, queue_size=1)
pub_joint3_pos = rospy.Publisher("/satellite_servicer/joint3_pos_control/command/delayed", Float64, queue_size=1)
pub_joint4_pos = rospy.Publisher("/satellite_servicer/joint4_pos_control/command/delayed", Float64, queue_size=1)
pub_joint5_pos = rospy.Publisher("/satellite_servicer/joint5_pos_control/command/delayed", Float64, queue_size=1)
pub_joint6_pos = rospy.Publisher("/satellite_servicer/joint6_pos_control/command/delayed", Float64, queue_size=1)
rospy.Subscriber("/joint_states", JointState, joint_states_cb, queue_size=1)
set_space_gravity()
rospy.spin()
|
the-stack_106_21046
|
import Adafruit_DHT
import time
import RPi.GPIO as GPIO
#Initialize PIN 17 for the DHT22 data;
DHT_SENSOR = Adafruit_DHT.DHT22
DHT_PIN = 17
#sets the mode to broadcom (GPIO pinout);
GPIO.setmode(GPIO.BCM)
# Relay
# Tells python that GPIO 4 is an output;
rpin = 4
GPIO.setup(rpin, GPIO.OUT)
while True:
humidity, temperature = Adafruit_DHT.read_retry(DHT_SENSOR, DHT_PIN)
if humidity is not None and temperature is not None:
print ("Temp={0:0.01f}C Humidity={1:0.1f}%".format(temperature, humidity))
if humidity > 53.0:
GPIO.output(rpin, GPIO.HIGH)
print('Relay 1 on')
time.sleep(5)
else:
GPIO.output(rpin, GPIO.LOW)
time.sleep(5)
print('Relay 1 OFF')
# GPIO.cleanup()
|
the-stack_106_21048
|
import pytest
import numpy
from numpy.testing import assert_allclose
from scipy import sparse
from fvm import Continuation
from fvm import Interface
@pytest.fixture(autouse=True, scope='module')
def import_test():
try:
from fvm import JadaInterface # noqa: F401
except ImportError:
pytest.skip('jadapy not found')
@pytest.fixture(scope='module')
def nx():
return 6
@pytest.fixture(scope='module')
def tol():
return 1e-7
@pytest.fixture(scope='module')
def atol(tol):
return tol * 100
@pytest.fixture(scope='module')
def num_evs():
return 10
@pytest.fixture(scope='module')
def numpy_interface(nx):
dim = 2
dof = 3
ny = nx
nz = 1
parameters = {}
interface = Interface(parameters, nx, ny, nz, dim, dof)
return interface
@pytest.fixture(scope='module')
def numpy_x(numpy_interface):
n = numpy_interface.dof * numpy_interface.nx * numpy_interface.ny * numpy_interface.nz
parameters = {}
continuation = Continuation(numpy_interface, parameters)
x0 = numpy.zeros(n)
x0 = continuation.newton(x0)
start = 0
target = 2000
ds = 100
return continuation.continuation(x0, 'Reynolds Number', start, target, ds)[0]
def check_eigenvalues(A_op, B_op, eigs, v, num_evs, tol):
from jadapy.utils import norm
idx = range(len(eigs))
idx = numpy.array(sorted(idx, key=lambda i: abs(eigs[i])))
for i in range(num_evs):
j = idx[i]
assert norm(A_op @ v[:, j]) > tol
assert_allclose(norm(A_op @ v[:, j] - B_op @ v[:, j] * eigs[j]), 0, rtol=0, atol=tol)
@pytest.fixture(scope='module')
def arpack_eigs(numpy_interface, numpy_x, num_evs, tol, atol):
from fvm import JadaInterface
A_op = JadaInterface.JadaOp(numpy_interface.jacobian(numpy_x))
B_op = JadaInterface.JadaOp(numpy_interface.mass_matrix())
# A_mat = A_op.mat.todense()
# B_mat = B_op.mat.todense()
# eigs, v = scipy.linalg.eig(A_mat, B_mat, left=False, right=True)
eigs, v = sparse.linalg.eigs(A_op, num_evs, B_op, sigma=0.1, tol=tol)
check_eigenvalues(A_op, B_op, eigs, v, num_evs, atol)
eigs = numpy.array(sorted(eigs, key=lambda x: abs(x)))
return eigs[:num_evs]
@pytest.fixture(scope='module')
def interface(numpy_interface):
return numpy_interface
@pytest.fixture(scope='module')
def x(numpy_x):
return numpy_x
|
the-stack_106_21049
|
import datetime
import importlib
import itertools
import warnings
from typing import Any, Callable, Optional, Union
from .timezone import tz_aware
def import_from_str(import_string: Optional[Union[Callable, str]]) -> Any:
"""Import an object defined as import if it is an string.
If `import_string` follows the format `path.to.module.object_name`,
this method imports it; else it just return the object.
"""
if isinstance(import_string, str):
path, field_name = import_string.rsplit(".", 1)
module = importlib.import_module(path)
return getattr(module, field_name)
else:
return import_string
def seq(value, increment_by=1, start=None, suffix=None):
"""Generate a sequence of values based on a running count.
This function can be used to generate sequences of `int`, `float`,
`datetime`, `date`, `time`, or `str`: whatever the `type` is of the
provided `value`.
Args:
value (object): the value at which to begin generation (this will
be ignored for types `datetime`, `date`, and `time`)
increment_by (`int` or `float`, optional): the amount by which to
increment for each generated value (defaults to `1`)
start (`int` or `float`, optional): the value at which the sequence
will begin to add to `value` (if `value` is a `str`, `start` will
be appended to it)
suffix (`str`, optional): for `str` `value` sequences, this will be
appended to the end of each generated value (after the counting
value is appended)
Returns:
object: generated values for sequential data
"""
if type(value) in [datetime.datetime, datetime.date, datetime.time]:
if start:
msg = "start parameter is ignored when using seq with date, time or datetime objects"
warnings.warn(msg)
if type(value) is datetime.date:
date = datetime.datetime.combine(value, datetime.datetime.now().time())
elif type(value) is datetime.time:
date = datetime.datetime.combine(datetime.date.today(), value)
else:
date = value
# convert to epoch time
start = (date - datetime.datetime(1970, 1, 1)).total_seconds()
increment_by = increment_by.total_seconds()
for n in itertools.count(increment_by, increment_by):
series_date = tz_aware(datetime.datetime.utcfromtimestamp(start + n))
if type(value) is datetime.time:
yield series_date.time()
elif type(value) is datetime.date:
yield series_date.date()
else:
yield series_date
else:
if suffix and not isinstance(suffix, str):
raise TypeError("Sequences suffix can only be a string")
for n in itertools.count(start or increment_by, increment_by):
if suffix and not isinstance(value, str):
raise TypeError(
"Sequences with suffix can only be used with text values"
)
elif suffix:
yield value + str(n) + suffix
else:
yield value + type(value)(n)
|
the-stack_106_21050
|
# flake8: noqa
# disable flake check on this file because some constructs are strange
# or redundant on purpose and can't be disable on a line-by-line basis
import ast
import inspect
import linecache
import sys
import textwrap
from types import CodeType
from typing import Any
from typing import Dict
from typing import Optional
import py.path
import pytest
from _pytest._code import Code
from _pytest._code import Frame
from _pytest._code import getfslineno
from _pytest._code import Source
def test_source_str_function() -> None:
x = Source("3")
assert str(x) == "3"
x = Source(" 3")
assert str(x) == "3"
x = Source(
"""
3
"""
)
assert str(x) == "\n3"
def test_source_from_function() -> None:
source = Source(test_source_str_function)
assert str(source).startswith("def test_source_str_function() -> None:")
def test_source_from_method() -> None:
class TestClass:
def test_method(self):
pass
source = Source(TestClass().test_method)
assert source.lines == ["def test_method(self):", " pass"]
def test_source_from_lines() -> None:
lines = ["a \n", "b\n", "c"]
source = Source(lines)
assert source.lines == ["a ", "b", "c"]
def test_source_from_inner_function() -> None:
def f():
raise NotImplementedError()
source = Source(f)
assert str(source).startswith("def f():")
def test_source_strips() -> None:
source = Source("")
assert source == Source()
assert str(source) == ""
assert source.strip() == source
def test_source_strip_multiline() -> None:
source = Source()
source.lines = ["", " hello", " "]
source2 = source.strip()
assert source2.lines == [" hello"]
class TestAccesses:
def setup_class(self) -> None:
self.source = Source(
"""\
def f(x):
pass
def g(x):
pass
"""
)
def test_getrange(self) -> None:
x = self.source[0:2]
assert len(x.lines) == 2
assert str(x) == "def f(x):\n pass"
def test_getrange_step_not_supported(self) -> None:
with pytest.raises(IndexError, match=r"step"):
self.source[::2]
def test_getline(self) -> None:
x = self.source[0]
assert x == "def f(x):"
def test_len(self) -> None:
assert len(self.source) == 4
def test_iter(self) -> None:
values = [x for x in self.source]
assert len(values) == 4
class TestSourceParsing:
def setup_class(self) -> None:
self.source = Source(
"""\
def f(x):
assert (x ==
3 +
4)
"""
).strip()
def test_getstatement(self) -> None:
# print str(self.source)
ass = str(self.source[1:])
for i in range(1, 4):
# print "trying start in line %r" % self.source[i]
s = self.source.getstatement(i)
# x = s.deindent()
assert str(s) == ass
def test_getstatementrange_triple_quoted(self) -> None:
# print str(self.source)
source = Source(
"""hello('''
''')"""
)
s = source.getstatement(0)
assert s == source
s = source.getstatement(1)
assert s == source
def test_getstatementrange_within_constructs(self) -> None:
source = Source(
"""\
try:
try:
raise ValueError
except SomeThing:
pass
finally:
42
"""
)
assert len(source) == 7
# check all lineno's that could occur in a traceback
# assert source.getstatementrange(0) == (0, 7)
# assert source.getstatementrange(1) == (1, 5)
assert source.getstatementrange(2) == (2, 3)
assert source.getstatementrange(3) == (3, 4)
assert source.getstatementrange(4) == (4, 5)
# assert source.getstatementrange(5) == (0, 7)
assert source.getstatementrange(6) == (6, 7)
def test_getstatementrange_bug(self) -> None:
source = Source(
"""\
try:
x = (
y +
z)
except:
pass
"""
)
assert len(source) == 6
assert source.getstatementrange(2) == (1, 4)
def test_getstatementrange_bug2(self) -> None:
source = Source(
"""\
assert (
33
==
[
X(3,
b=1, c=2
),
]
)
"""
)
assert len(source) == 9
assert source.getstatementrange(5) == (0, 9)
def test_getstatementrange_ast_issue58(self) -> None:
source = Source(
"""\
def test_some():
for a in [a for a in
CAUSE_ERROR]: pass
x = 3
"""
)
assert getstatement(2, source).lines == source.lines[2:3]
assert getstatement(3, source).lines == source.lines[3:4]
def test_getstatementrange_out_of_bounds_py3(self) -> None:
source = Source("if xxx:\n from .collections import something")
r = source.getstatementrange(1)
assert r == (1, 2)
def test_getstatementrange_with_syntaxerror_issue7(self) -> None:
source = Source(":")
pytest.raises(SyntaxError, lambda: source.getstatementrange(0))
def test_getstartingblock_singleline() -> None:
class A:
def __init__(self, *args) -> None:
frame = sys._getframe(1)
self.source = Frame(frame).statement
x = A("x", "y")
values = [i for i in x.source.lines if i.strip()]
assert len(values) == 1
def test_getline_finally() -> None:
def c() -> None:
pass
with pytest.raises(TypeError) as excinfo:
teardown = None
try:
c(1) # type: ignore
finally:
if teardown:
teardown() # type: ignore[unreachable]
source = excinfo.traceback[-1].statement
assert str(source).strip() == "c(1) # type: ignore"
def test_getfuncsource_dynamic() -> None:
def f():
raise NotImplementedError()
def g():
pass # pragma: no cover
f_source = Source(f)
g_source = Source(g)
assert str(f_source).strip() == "def f():\n raise NotImplementedError()"
assert str(g_source).strip() == "def g():\n pass # pragma: no cover"
def test_getfuncsource_with_multine_string() -> None:
def f():
c = """while True:
pass
"""
expected = '''\
def f():
c = """while True:
pass
"""
'''
assert str(Source(f)) == expected.rstrip()
def test_deindent() -> None:
from _pytest._code.source import deindent as deindent
assert deindent(["\tfoo", "\tbar"]) == ["foo", "bar"]
source = """\
def f():
def g():
pass
"""
lines = deindent(source.splitlines())
assert lines == ["def f():", " def g():", " pass"]
def test_source_of_class_at_eof_without_newline(tmpdir, _sys_snapshot) -> None:
# this test fails because the implicit inspect.getsource(A) below
# does not return the "x = 1" last line.
source = Source(
"""
class A:
def method(self):
x = 1
"""
)
path = tmpdir.join("a.py")
path.write(source)
s2 = Source(tmpdir.join("a.py").pyimport().A)
assert str(source).strip() == str(s2).strip()
if True:
def x():
pass
def test_source_fallback() -> None:
src = Source(x)
expected = """def x():
pass"""
assert str(src) == expected
def test_findsource_fallback() -> None:
from _pytest._code.source import findsource
src, lineno = findsource(x)
assert src is not None
assert "test_findsource_simple" in str(src)
assert src[lineno] == " def x():"
def test_findsource(monkeypatch) -> None:
from _pytest._code.source import findsource
filename = "<pytest-test_findsource>"
lines = ["if 1:\n", " def x():\n", " pass\n"]
co = compile("".join(lines), filename, "exec")
# Type ignored because linecache.cache is private.
monkeypatch.setitem(linecache.cache, filename, (1, None, lines, filename)) # type: ignore[attr-defined]
src, lineno = findsource(co)
assert src is not None
assert "if 1:" in str(src)
d: Dict[str, Any] = {}
eval(co, d)
src, lineno = findsource(d["x"])
assert src is not None
assert "if 1:" in str(src)
assert src[lineno] == " def x():"
def test_getfslineno() -> None:
def f(x) -> None:
raise NotImplementedError()
fspath, lineno = getfslineno(f)
assert isinstance(fspath, py.path.local)
assert fspath.basename == "test_source.py"
assert lineno == f.__code__.co_firstlineno - 1 # see findsource
class A:
pass
fspath, lineno = getfslineno(A)
_, A_lineno = inspect.findsource(A)
assert isinstance(fspath, py.path.local)
assert fspath.basename == "test_source.py"
assert lineno == A_lineno
assert getfslineno(3) == ("", -1)
class B:
pass
B.__name__ = B.__qualname__ = "B2"
assert getfslineno(B)[1] == -1
def test_code_of_object_instance_with_call() -> None:
class A:
pass
pytest.raises(TypeError, lambda: Source(A()))
class WithCall:
def __call__(self) -> None:
pass
code = Code.from_function(WithCall())
assert "pass" in str(code.source())
class Hello:
def __call__(self) -> None:
pass
pytest.raises(TypeError, lambda: Code.from_function(Hello))
def getstatement(lineno: int, source) -> Source:
from _pytest._code.source import getstatementrange_ast
src = Source(source)
ast, start, end = getstatementrange_ast(lineno, src)
return src[start:end]
def test_oneline() -> None:
source = getstatement(0, "raise ValueError")
assert str(source) == "raise ValueError"
def test_comment_and_no_newline_at_end() -> None:
from _pytest._code.source import getstatementrange_ast
source = Source(
[
"def test_basic_complex():",
" assert 1 == 2",
"# vim: filetype=pyopencl:fdm=marker",
]
)
ast, start, end = getstatementrange_ast(1, source)
assert end == 2
def test_oneline_and_comment() -> None:
source = getstatement(0, "raise ValueError\n#hello")
assert str(source) == "raise ValueError"
def test_comments() -> None:
source = '''def test():
"comment 1"
x = 1
# comment 2
# comment 3
assert False
"""
comment 4
"""
'''
for line in range(2, 6):
assert str(getstatement(line, source)) == " x = 1"
if sys.version_info >= (3, 8) or hasattr(sys, "pypy_version_info"):
tqs_start = 8
else:
tqs_start = 10
assert str(getstatement(10, source)) == '"""'
for line in range(6, tqs_start):
assert str(getstatement(line, source)) == " assert False"
for line in range(tqs_start, 10):
assert str(getstatement(line, source)) == '"""\ncomment 4\n"""'
def test_comment_in_statement() -> None:
source = """test(foo=1,
# comment 1
bar=2)
"""
for line in range(1, 3):
assert (
str(getstatement(line, source))
== "test(foo=1,\n # comment 1\n bar=2)"
)
def test_source_with_decorator() -> None:
"""Test behavior with Source / Code().source with regard to decorators."""
from _pytest.compat import get_real_func
@pytest.mark.foo
def deco_mark():
assert False
src = inspect.getsource(deco_mark)
assert textwrap.indent(str(Source(deco_mark)), " ") + "\n" == src
assert src.startswith(" @pytest.mark.foo")
@pytest.fixture
def deco_fixture():
assert False
src = inspect.getsource(deco_fixture)
assert src == " @pytest.fixture\n def deco_fixture():\n assert False\n"
# currenly Source does not unwrap decorators, testing the
# existing behavior here for explicitness, but perhaps we should revisit/change this
# in the future
assert str(Source(deco_fixture)).startswith("@functools.wraps(function)")
assert (
textwrap.indent(str(Source(get_real_func(deco_fixture))), " ") + "\n" == src
)
def test_single_line_else() -> None:
source = getstatement(1, "if False: 2\nelse: 3")
assert str(source) == "else: 3"
def test_single_line_finally() -> None:
source = getstatement(1, "try: 1\nfinally: 3")
assert str(source) == "finally: 3"
def test_issue55() -> None:
source = (
"def round_trip(dinp):\n assert 1 == dinp\n"
'def test_rt():\n round_trip("""\n""")\n'
)
s = getstatement(3, source)
assert str(s) == ' round_trip("""\n""")'
def test_multiline() -> None:
source = getstatement(
0,
"""\
raise ValueError(
23
)
x = 3
""",
)
assert str(source) == "raise ValueError(\n 23\n)"
class TestTry:
def setup_class(self) -> None:
self.source = """\
try:
raise ValueError
except Something:
raise IndexError(1)
else:
raise KeyError()
"""
def test_body(self) -> None:
source = getstatement(1, self.source)
assert str(source) == " raise ValueError"
def test_except_line(self) -> None:
source = getstatement(2, self.source)
assert str(source) == "except Something:"
def test_except_body(self) -> None:
source = getstatement(3, self.source)
assert str(source) == " raise IndexError(1)"
def test_else(self) -> None:
source = getstatement(5, self.source)
assert str(source) == " raise KeyError()"
class TestTryFinally:
def setup_class(self) -> None:
self.source = """\
try:
raise ValueError
finally:
raise IndexError(1)
"""
def test_body(self) -> None:
source = getstatement(1, self.source)
assert str(source) == " raise ValueError"
def test_finally(self) -> None:
source = getstatement(3, self.source)
assert str(source) == " raise IndexError(1)"
class TestIf:
def setup_class(self) -> None:
self.source = """\
if 1:
y = 3
elif False:
y = 5
else:
y = 7
"""
def test_body(self) -> None:
source = getstatement(1, self.source)
assert str(source) == " y = 3"
def test_elif_clause(self) -> None:
source = getstatement(2, self.source)
assert str(source) == "elif False:"
def test_elif(self) -> None:
source = getstatement(3, self.source)
assert str(source) == " y = 5"
def test_else(self) -> None:
source = getstatement(5, self.source)
assert str(source) == " y = 7"
def test_semicolon() -> None:
s = """\
hello ; pytest.skip()
"""
source = getstatement(0, s)
assert str(source) == s.strip()
def test_def_online() -> None:
s = """\
def func(): raise ValueError(42)
def something():
pass
"""
source = getstatement(0, s)
assert str(source) == "def func(): raise ValueError(42)"
def XXX_test_expression_multiline() -> None:
source = """\
something
'''
'''"""
result = getstatement(1, source)
assert str(result) == "'''\n'''"
def test_getstartingblock_multiline() -> None:
class A:
def __init__(self, *args):
frame = sys._getframe(1)
self.source = Frame(frame).statement
# fmt: off
x = A('x',
'y'
,
'z')
# fmt: on
values = [i for i in x.source.lines if i.strip()]
assert len(values) == 4
|
the-stack_106_21051
|
from datetime import datetime
import logging
import os
from .rest_session import *
from .api.organizations import Organizations
from .api.networks import Networks
from .api.devices import Devices
from .api.appliance import Appliance
from .api.camera import Camera
from .api.cellularGateway import CellularGateway
from .api.insight import Insight
from .api.sm import Sm
from .api.switch import Switch
from .api.wireless import Wireless
from .config import (
API_KEY_ENVIRONMENT_VARIABLE, DEFAULT_BASE_URL, SINGLE_REQUEST_TIMEOUT, CERTIFICATE_PATH, REQUESTS_PROXY,
WAIT_ON_RATE_LIMIT, NGINX_429_RETRY_WAIT_TIME, ACTION_BATCH_RETRY_WAIT_TIME, RETRY_4XX_ERROR,
RETRY_4XX_ERROR_WAIT_TIME, MAXIMUM_RETRIES, OUTPUT_LOG, LOG_PATH, LOG_FILE_PREFIX, PRINT_TO_CONSOLE,
SUPPRESS_LOGGING, SIMULATE_API_CALLS, BE_GEO_ID, MERAKI_PYTHON_SDK_CALLER
)
__version__ = '1.6.2'
class DashboardAPI(object):
"""
**Creates a persistent Meraki dashboard API session**
- api_key (string): API key generated in dashboard; can also be set as an environment variable MERAKI_DASHBOARD_API_KEY
- base_url (string): preceding all endpoint resources
- single_request_timeout (integer): maximum number of seconds for each API call
- certificate_path (string): path for TLS/SSL certificate verification if behind local proxy
- requests_proxy (string): proxy server and port, if needed, for HTTPS
- wait_on_rate_limit (boolean): retry if 429 rate limit error encountered?
- nginx_429_retry_wait_time (integer): Nginx 429 retry wait time
- action_batch_retry_wait_time (integer): action batch concurrency error retry wait time
- retry_4xx_error (boolean): retry if encountering other 4XX error (besides 429)?
- retry_4xx_error_wait_time (integer): other 4XX error retry wait time
- maximum_retries (integer): retry up to this many times when encountering 429s or other server-side errors
- output_log (boolean): create an output log file?
- log_path (string): path to output log; by default, working directory of script if not specified
- log_file_prefix (string): log file name appended with date and timestamp
- print_console (boolean): print logging output to console?
- suppress_logging (boolean): disable all logging? you're on your own then!
- simulate (boolean): simulate POST/PUT/DELETE calls to prevent changes?
- be_geo_id (string): optional partner identifier for API usage tracking; can also be set as an environment variable BE_GEO_ID
- caller (string): optional identifier for API usage tracking; can also be set as an environment variable MERAKI_PYTHON_SDK_CALLER
- use_iterator_for_get_pages (boolean): list* methods will return an iterator with each object instead of a complete list with all items
"""
def __init__(self, api_key=None, base_url=DEFAULT_BASE_URL, single_request_timeout=SINGLE_REQUEST_TIMEOUT,
certificate_path=CERTIFICATE_PATH, requests_proxy=REQUESTS_PROXY,
wait_on_rate_limit=WAIT_ON_RATE_LIMIT, nginx_429_retry_wait_time=NGINX_429_RETRY_WAIT_TIME,
action_batch_retry_wait_time=ACTION_BATCH_RETRY_WAIT_TIME, retry_4xx_error=RETRY_4XX_ERROR,
retry_4xx_error_wait_time=RETRY_4XX_ERROR_WAIT_TIME, maximum_retries=MAXIMUM_RETRIES,
output_log=OUTPUT_LOG, log_path=LOG_PATH, log_file_prefix=LOG_FILE_PREFIX,
print_console=PRINT_TO_CONSOLE, suppress_logging=SUPPRESS_LOGGING, simulate=SIMULATE_API_CALLS,
be_geo_id=BE_GEO_ID, caller=MERAKI_PYTHON_SDK_CALLER, use_iterator_for_get_pages=False):
# Check API key
api_key = api_key or os.environ.get(API_KEY_ENVIRONMENT_VARIABLE)
if not api_key:
raise APIKeyError()
# Pull the BE GEO ID from an environment variable if present
be_geo_id = be_geo_id or os.environ.get('BE_GEO_ID')
# Pull the caller from an environment variable if present
caller = caller or os.environ.get('MERAKI_PYTHON_SDK_CALLER')
# Configure logging
if not suppress_logging:
self._logger = logging.getLogger(__name__)
self._logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
fmt='%(asctime)s %(name)12s: %(levelname)8s > %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
handler_console = logging.StreamHandler()
handler_console.setFormatter(formatter)
if output_log:
if log_path and log_path[-1] != '/':
log_path += '/'
self._log_file = f'{log_path}{log_file_prefix}_log__{datetime.now():%Y-%m-%d_%H-%M-%S}.log'
handler_log = logging.FileHandler(
filename=self._log_file
)
handler_log.setFormatter(formatter)
if output_log and not self._logger.hasHandlers():
self._logger.addHandler(handler_log)
if print_console:
handler_console.setLevel(logging.INFO)
self._logger.addHandler(handler_console)
elif print_console and not self._logger.hasHandlers():
self._logger.addHandler(handler_console)
else:
self._logger = None
# Creates the API session
self._session = RestSession(
logger=self._logger,
api_key=api_key,
base_url=base_url,
single_request_timeout=single_request_timeout,
certificate_path=certificate_path,
requests_proxy=requests_proxy,
wait_on_rate_limit=wait_on_rate_limit,
nginx_429_retry_wait_time=nginx_429_retry_wait_time,
action_batch_retry_wait_time=action_batch_retry_wait_time,
retry_4xx_error=retry_4xx_error,
retry_4xx_error_wait_time=retry_4xx_error_wait_time,
maximum_retries=maximum_retries,
simulate=simulate,
be_geo_id=be_geo_id,
caller=caller,
use_iterator_for_get_pages=use_iterator_for_get_pages,
)
# API endpoints by section
self.organizations = Organizations(self._session)
self.networks = Networks(self._session)
self.devices = Devices(self._session)
self.appliance = Appliance(self._session)
self.camera = Camera(self._session)
self.cellularGateway = CellularGateway(self._session)
self.insight = Insight(self._session)
self.sm = Sm(self._session)
self.switch = Switch(self._session)
self.wireless = Wireless(self._session)
|
the-stack_106_21052
|
"""Test service helpers."""
from collections import OrderedDict
from copy import deepcopy
import unittest
from unittest.mock import Mock, patch
import pytest
import voluptuous as vol
# To prevent circular import when running just this file
from homeassistant import core as ha, exceptions
from homeassistant.auth.permissions import PolicyPermissions
import homeassistant.components # noqa: F401, pylint: disable=unused-import
from homeassistant.const import (
ATTR_ENTITY_ID,
ENTITY_MATCH_ALL,
ENTITY_MATCH_NONE,
STATE_OFF,
STATE_ON,
)
from homeassistant.helpers import (
device_registry as dev_reg,
entity_registry as ent_reg,
service,
template,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.setup import async_setup_component
from tests.common import (
MockEntity,
get_test_home_assistant,
mock_coro,
mock_device_registry,
mock_registry,
mock_service,
)
@pytest.fixture
def mock_handle_entity_call():
"""Mock service platform call."""
with patch(
"homeassistant.helpers.service._handle_entity_call",
side_effect=lambda *args: mock_coro(),
) as mock_call:
yield mock_call
@pytest.fixture
def mock_entities(hass):
"""Return mock entities in an ordered dict."""
kitchen = MockEntity(
entity_id="light.kitchen",
available=True,
should_poll=False,
supported_features=1,
)
living_room = MockEntity(
entity_id="light.living_room",
available=True,
should_poll=False,
supported_features=0,
)
entities = OrderedDict()
entities[kitchen.entity_id] = kitchen
entities[living_room.entity_id] = living_room
return entities
@pytest.fixture
def area_mock(hass):
"""Mock including area info."""
hass.states.async_set("light.Bowl", STATE_ON)
hass.states.async_set("light.Ceiling", STATE_OFF)
hass.states.async_set("light.Kitchen", STATE_OFF)
device_in_area = dev_reg.DeviceEntry(area_id="test-area")
device_no_area = dev_reg.DeviceEntry()
device_diff_area = dev_reg.DeviceEntry(area_id="diff-area")
mock_device_registry(
hass,
{
device_in_area.id: device_in_area,
device_no_area.id: device_no_area,
device_diff_area.id: device_diff_area,
},
)
entity_in_area = ent_reg.RegistryEntry(
entity_id="light.in_area",
unique_id="in-area-id",
platform="test",
device_id=device_in_area.id,
)
entity_no_area = ent_reg.RegistryEntry(
entity_id="light.no_area",
unique_id="no-area-id",
platform="test",
device_id=device_no_area.id,
)
entity_diff_area = ent_reg.RegistryEntry(
entity_id="light.diff_area",
unique_id="diff-area-id",
platform="test",
device_id=device_diff_area.id,
)
mock_registry(
hass,
{
entity_in_area.entity_id: entity_in_area,
entity_no_area.entity_id: entity_no_area,
entity_diff_area.entity_id: entity_diff_area,
},
)
class TestServiceHelpers(unittest.TestCase):
"""Test the Home Assistant service helpers."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.calls = mock_service(self.hass, "test_domain", "test_service")
def tearDown(self): # pylint: disable=invalid-name
"""Stop down everything that was started."""
self.hass.stop()
def test_template_service_call(self):
"""Test service call with templating."""
config = {
"service_template": "{{ 'test_domain.test_service' }}",
"entity_id": "hello.world",
"data_template": {
"hello": "{{ 'goodbye' }}",
"data": {"value": "{{ 'complex' }}", "simple": "simple"},
"list": ["{{ 'list' }}", "2"],
},
}
service.call_from_config(self.hass, config)
self.hass.block_till_done()
assert self.calls[0].data["hello"] == "goodbye"
assert self.calls[0].data["data"]["value"] == "complex"
assert self.calls[0].data["data"]["simple"] == "simple"
assert self.calls[0].data["list"][0] == "list"
def test_passing_variables_to_templates(self):
"""Test passing variables to templates."""
config = {
"service_template": "{{ var_service }}",
"entity_id": "hello.world",
"data_template": {"hello": "{{ var_data }}"},
}
service.call_from_config(
self.hass,
config,
variables={
"var_service": "test_domain.test_service",
"var_data": "goodbye",
},
)
self.hass.block_till_done()
assert self.calls[0].data["hello"] == "goodbye"
def test_bad_template(self):
"""Test passing bad template."""
config = {
"service_template": "{{ var_service }}",
"entity_id": "hello.world",
"data_template": {"hello": "{{ states + unknown_var }}"},
}
service.call_from_config(
self.hass,
config,
variables={
"var_service": "test_domain.test_service",
"var_data": "goodbye",
},
)
self.hass.block_till_done()
assert len(self.calls) == 0
def test_split_entity_string(self):
"""Test splitting of entity string."""
service.call_from_config(
self.hass,
{
"service": "test_domain.test_service",
"entity_id": "hello.world, sensor.beer",
},
)
self.hass.block_till_done()
assert ["hello.world", "sensor.beer"] == self.calls[-1].data.get("entity_id")
def test_not_mutate_input(self):
"""Test for immutable input."""
config = cv.SERVICE_SCHEMA(
{
"service": "test_domain.test_service",
"entity_id": "hello.world, sensor.beer",
"data": {"hello": 1},
"data_template": {"nested": {"value": "{{ 1 + 1 }}"}},
}
)
orig = deepcopy(config)
# Only change after call is each template getting hass attached
template.attach(self.hass, orig)
service.call_from_config(self.hass, config, validate_config=False)
assert orig == config
@patch("homeassistant.helpers.service._LOGGER.error")
def test_fail_silently_if_no_service(self, mock_log):
"""Test failing if service is missing."""
service.call_from_config(self.hass, None)
assert mock_log.call_count == 1
service.call_from_config(self.hass, {})
assert mock_log.call_count == 2
service.call_from_config(self.hass, {"service": "invalid"})
assert mock_log.call_count == 3
async def test_extract_entity_ids(hass):
"""Test extract_entity_ids method."""
hass.states.async_set("light.Bowl", STATE_ON)
hass.states.async_set("light.Ceiling", STATE_OFF)
hass.states.async_set("light.Kitchen", STATE_OFF)
await hass.components.group.Group.async_create_group(
hass, "test", ["light.Ceiling", "light.Kitchen"]
)
call = ha.ServiceCall("light", "turn_on", {ATTR_ENTITY_ID: "light.Bowl"})
assert {"light.bowl"} == await service.async_extract_entity_ids(hass, call)
call = ha.ServiceCall("light", "turn_on", {ATTR_ENTITY_ID: "group.test"})
assert {"light.ceiling", "light.kitchen"} == await service.async_extract_entity_ids(
hass, call
)
assert {"group.test"} == await service.async_extract_entity_ids(
hass, call, expand_group=False
)
assert (
await service.async_extract_entity_ids(
hass,
ha.ServiceCall("light", "turn_on", {ATTR_ENTITY_ID: ENTITY_MATCH_NONE}),
)
== set()
)
async def test_extract_entity_ids_from_area(hass, area_mock):
"""Test extract_entity_ids method with areas."""
call = ha.ServiceCall("light", "turn_on", {"area_id": "test-area"})
assert {"light.in_area"} == await service.async_extract_entity_ids(hass, call)
call = ha.ServiceCall("light", "turn_on", {"area_id": ["test-area", "diff-area"]})
assert {
"light.in_area",
"light.diff_area",
} == await service.async_extract_entity_ids(hass, call)
assert (
await service.async_extract_entity_ids(
hass, ha.ServiceCall("light", "turn_on", {"area_id": ENTITY_MATCH_NONE})
)
== set()
)
async def test_async_get_all_descriptions(hass):
"""Test async_get_all_descriptions."""
group = hass.components.group
group_config = {group.DOMAIN: {}}
await async_setup_component(hass, group.DOMAIN, group_config)
descriptions = await service.async_get_all_descriptions(hass)
assert len(descriptions) == 1
assert "description" in descriptions["group"]["reload"]
assert "fields" in descriptions["group"]["reload"]
logger = hass.components.logger
logger_config = {logger.DOMAIN: {}}
await async_setup_component(hass, logger.DOMAIN, logger_config)
descriptions = await service.async_get_all_descriptions(hass)
assert len(descriptions) == 2
assert "description" in descriptions[logger.DOMAIN]["set_level"]
assert "fields" in descriptions[logger.DOMAIN]["set_level"]
async def test_call_with_required_features(hass, mock_entities):
"""Test service calls invoked only if entity has required feautres."""
test_service_mock = Mock(return_value=mock_coro())
await service.entity_service_call(
hass,
[Mock(entities=mock_entities)],
test_service_mock,
ha.ServiceCall("test_domain", "test_service", {"entity_id": "all"}),
required_features=[1],
)
assert len(mock_entities) == 2
# Called once because only one of the entities had the required features
assert test_service_mock.call_count == 1
async def test_call_with_sync_func(hass, mock_entities):
"""Test invoking sync service calls."""
test_service_mock = Mock()
await service.entity_service_call(
hass,
[Mock(entities=mock_entities)],
test_service_mock,
ha.ServiceCall("test_domain", "test_service", {"entity_id": "light.kitchen"}),
)
assert test_service_mock.call_count == 1
async def test_call_with_sync_attr(hass, mock_entities):
"""Test invoking sync service calls."""
mock_method = mock_entities["light.kitchen"].sync_method = Mock()
await service.entity_service_call(
hass,
[Mock(entities=mock_entities)],
"sync_method",
ha.ServiceCall(
"test_domain",
"test_service",
{"entity_id": "light.kitchen", "area_id": "abcd"},
),
)
assert mock_method.call_count == 1
# We pass empty kwargs because both entity_id and area_id are filtered out
assert mock_method.mock_calls[0][2] == {}
async def test_call_context_user_not_exist(hass):
"""Check we don't allow deleted users to do things."""
with pytest.raises(exceptions.UnknownUser) as err:
await service.entity_service_call(
hass,
[],
Mock(),
ha.ServiceCall(
"test_domain",
"test_service",
context=ha.Context(user_id="non-existing"),
),
)
assert err.value.context.user_id == "non-existing"
async def test_call_context_target_all(hass, mock_handle_entity_call, mock_entities):
"""Check we only target allowed entities if targeting all."""
with patch(
"homeassistant.auth.AuthManager.async_get_user",
return_value=mock_coro(
Mock(
permissions=PolicyPermissions(
{"entities": {"entity_ids": {"light.kitchen": True}}}, None
)
)
),
):
await service.entity_service_call(
hass,
[Mock(entities=mock_entities)],
Mock(),
ha.ServiceCall(
"test_domain",
"test_service",
data={"entity_id": ENTITY_MATCH_ALL},
context=ha.Context(user_id="mock-id"),
),
)
assert len(mock_handle_entity_call.mock_calls) == 1
assert mock_handle_entity_call.mock_calls[0][1][1].entity_id == "light.kitchen"
async def test_call_context_target_specific(
hass, mock_handle_entity_call, mock_entities
):
"""Check targeting specific entities."""
with patch(
"homeassistant.auth.AuthManager.async_get_user",
return_value=mock_coro(
Mock(
permissions=PolicyPermissions(
{"entities": {"entity_ids": {"light.kitchen": True}}}, None
)
)
),
):
await service.entity_service_call(
hass,
[Mock(entities=mock_entities)],
Mock(),
ha.ServiceCall(
"test_domain",
"test_service",
{"entity_id": "light.kitchen"},
context=ha.Context(user_id="mock-id"),
),
)
assert len(mock_handle_entity_call.mock_calls) == 1
assert mock_handle_entity_call.mock_calls[0][1][1].entity_id == "light.kitchen"
async def test_call_context_target_specific_no_auth(
hass, mock_handle_entity_call, mock_entities
):
"""Check targeting specific entities without auth."""
with pytest.raises(exceptions.Unauthorized) as err:
with patch(
"homeassistant.auth.AuthManager.async_get_user",
return_value=mock_coro(Mock(permissions=PolicyPermissions({}, None))),
):
await service.entity_service_call(
hass,
[Mock(entities=mock_entities)],
Mock(),
ha.ServiceCall(
"test_domain",
"test_service",
{"entity_id": "light.kitchen"},
context=ha.Context(user_id="mock-id"),
),
)
assert err.value.context.user_id == "mock-id"
assert err.value.entity_id == "light.kitchen"
async def test_call_no_context_target_all(hass, mock_handle_entity_call, mock_entities):
"""Check we target all if no user context given."""
await service.entity_service_call(
hass,
[Mock(entities=mock_entities)],
Mock(),
ha.ServiceCall(
"test_domain", "test_service", data={"entity_id": ENTITY_MATCH_ALL}
),
)
assert len(mock_handle_entity_call.mock_calls) == 2
assert [call[1][1] for call in mock_handle_entity_call.mock_calls] == list(
mock_entities.values()
)
async def test_call_no_context_target_specific(
hass, mock_handle_entity_call, mock_entities
):
"""Check we can target specified entities."""
await service.entity_service_call(
hass,
[Mock(entities=mock_entities)],
Mock(),
ha.ServiceCall(
"test_domain",
"test_service",
{"entity_id": ["light.kitchen", "light.non-existing"]},
),
)
assert len(mock_handle_entity_call.mock_calls) == 1
assert mock_handle_entity_call.mock_calls[0][1][1].entity_id == "light.kitchen"
async def test_call_with_match_all(
hass, mock_handle_entity_call, mock_entities, caplog
):
"""Check we only target allowed entities if targeting all."""
await service.entity_service_call(
hass,
[Mock(entities=mock_entities)],
Mock(),
ha.ServiceCall("test_domain", "test_service", {"entity_id": "all"}),
)
assert len(mock_handle_entity_call.mock_calls) == 2
assert [call[1][1] for call in mock_handle_entity_call.mock_calls] == list(
mock_entities.values()
)
async def test_call_with_omit_entity_id(hass, mock_handle_entity_call, mock_entities):
"""Check service call if we do not pass an entity ID."""
await service.entity_service_call(
hass,
[Mock(entities=mock_entities)],
Mock(),
ha.ServiceCall("test_domain", "test_service"),
)
assert len(mock_handle_entity_call.mock_calls) == 0
async def test_register_admin_service(hass, hass_read_only_user, hass_admin_user):
"""Test the register admin service."""
calls = []
async def mock_service(call):
calls.append(call)
hass.helpers.service.async_register_admin_service("test", "test", mock_service)
hass.helpers.service.async_register_admin_service(
"test",
"test2",
mock_service,
vol.Schema({vol.Required("required"): cv.boolean}),
)
with pytest.raises(exceptions.UnknownUser):
await hass.services.async_call(
"test",
"test",
{},
blocking=True,
context=ha.Context(user_id="non-existing"),
)
assert len(calls) == 0
with pytest.raises(exceptions.Unauthorized):
await hass.services.async_call(
"test",
"test",
{},
blocking=True,
context=ha.Context(user_id=hass_read_only_user.id),
)
assert len(calls) == 0
with pytest.raises(vol.Invalid):
await hass.services.async_call(
"test",
"test",
{"invalid": True},
blocking=True,
context=ha.Context(user_id=hass_admin_user.id),
)
assert len(calls) == 0
with pytest.raises(vol.Invalid):
await hass.services.async_call(
"test",
"test2",
{},
blocking=True,
context=ha.Context(user_id=hass_admin_user.id),
)
assert len(calls) == 0
await hass.services.async_call(
"test",
"test2",
{"required": True},
blocking=True,
context=ha.Context(user_id=hass_admin_user.id),
)
assert len(calls) == 1
assert calls[0].context.user_id == hass_admin_user.id
async def test_domain_control_not_async(hass, mock_entities):
"""Test domain verification in a service call with an unknown user."""
calls = []
def mock_service_log(call):
"""Define a protected service."""
calls.append(call)
with pytest.raises(exceptions.HomeAssistantError):
hass.helpers.service.verify_domain_control("test_domain")(mock_service_log)
async def test_domain_control_unknown(hass, mock_entities):
"""Test domain verification in a service call with an unknown user."""
calls = []
async def mock_service_log(call):
"""Define a protected service."""
calls.append(call)
with patch(
"homeassistant.helpers.entity_registry.async_get_registry",
return_value=mock_coro(Mock(entities=mock_entities)),
):
protected_mock_service = hass.helpers.service.verify_domain_control(
"test_domain"
)(mock_service_log)
hass.services.async_register(
"test_domain", "test_service", protected_mock_service, schema=None
)
with pytest.raises(exceptions.UnknownUser):
await hass.services.async_call(
"test_domain",
"test_service",
{},
blocking=True,
context=ha.Context(user_id="fake_user_id"),
)
assert len(calls) == 0
async def test_domain_control_unauthorized(hass, hass_read_only_user):
"""Test domain verification in a service call with an unauthorized user."""
mock_registry(
hass,
{
"light.kitchen": ent_reg.RegistryEntry(
entity_id="light.kitchen", unique_id="kitchen", platform="test_domain",
)
},
)
calls = []
async def mock_service_log(call):
"""Define a protected service."""
calls.append(call)
protected_mock_service = hass.helpers.service.verify_domain_control("test_domain")(
mock_service_log
)
hass.services.async_register(
"test_domain", "test_service", protected_mock_service, schema=None
)
with pytest.raises(exceptions.Unauthorized):
await hass.services.async_call(
"test_domain",
"test_service",
{},
blocking=True,
context=ha.Context(user_id=hass_read_only_user.id),
)
assert len(calls) == 0
async def test_domain_control_admin(hass, hass_admin_user):
"""Test domain verification in a service call with an admin user."""
mock_registry(
hass,
{
"light.kitchen": ent_reg.RegistryEntry(
entity_id="light.kitchen", unique_id="kitchen", platform="test_domain",
)
},
)
calls = []
async def mock_service_log(call):
"""Define a protected service."""
calls.append(call)
protected_mock_service = hass.helpers.service.verify_domain_control("test_domain")(
mock_service_log
)
hass.services.async_register(
"test_domain", "test_service", protected_mock_service, schema=None
)
await hass.services.async_call(
"test_domain",
"test_service",
{},
blocking=True,
context=ha.Context(user_id=hass_admin_user.id),
)
assert len(calls) == 1
async def test_domain_control_no_user(hass):
"""Test domain verification in a service call with no user."""
mock_registry(
hass,
{
"light.kitchen": ent_reg.RegistryEntry(
entity_id="light.kitchen", unique_id="kitchen", platform="test_domain",
)
},
)
calls = []
async def mock_service_log(call):
"""Define a protected service."""
calls.append(call)
protected_mock_service = hass.helpers.service.verify_domain_control("test_domain")(
mock_service_log
)
hass.services.async_register(
"test_domain", "test_service", protected_mock_service, schema=None
)
await hass.services.async_call(
"test_domain",
"test_service",
{},
blocking=True,
context=ha.Context(user_id=None),
)
assert len(calls) == 1
async def test_extract_from_service_available_device(hass):
"""Test the extraction of entity from service and device is available."""
entities = [
MockEntity(name="test_1", entity_id="test_domain.test_1"),
MockEntity(name="test_2", entity_id="test_domain.test_2", available=False),
MockEntity(name="test_3", entity_id="test_domain.test_3"),
MockEntity(name="test_4", entity_id="test_domain.test_4", available=False),
]
call_1 = ha.ServiceCall("test", "service", data={"entity_id": ENTITY_MATCH_ALL})
assert ["test_domain.test_1", "test_domain.test_3"] == [
ent.entity_id
for ent in (await service.async_extract_entities(hass, entities, call_1))
]
call_2 = ha.ServiceCall(
"test",
"service",
data={"entity_id": ["test_domain.test_3", "test_domain.test_4"]},
)
assert ["test_domain.test_3"] == [
ent.entity_id
for ent in (await service.async_extract_entities(hass, entities, call_2))
]
assert (
await service.async_extract_entities(
hass,
entities,
ha.ServiceCall("test", "service", data={"entity_id": ENTITY_MATCH_NONE},),
)
== []
)
async def test_extract_from_service_empty_if_no_entity_id(hass):
"""Test the extraction from service without specifying entity."""
entities = [
MockEntity(name="test_1", entity_id="test_domain.test_1"),
MockEntity(name="test_2", entity_id="test_domain.test_2"),
]
call = ha.ServiceCall("test", "service")
assert [] == [
ent.entity_id
for ent in (await service.async_extract_entities(hass, entities, call))
]
async def test_extract_from_service_filter_out_non_existing_entities(hass):
"""Test the extraction of non existing entities from service."""
entities = [
MockEntity(name="test_1", entity_id="test_domain.test_1"),
MockEntity(name="test_2", entity_id="test_domain.test_2"),
]
call = ha.ServiceCall(
"test",
"service",
{"entity_id": ["test_domain.test_2", "test_domain.non_exist"]},
)
assert ["test_domain.test_2"] == [
ent.entity_id
for ent in (await service.async_extract_entities(hass, entities, call))
]
async def test_extract_from_service_area_id(hass, area_mock):
"""Test the extraction using area ID as reference."""
entities = [
MockEntity(name="in_area", entity_id="light.in_area"),
MockEntity(name="no_area", entity_id="light.no_area"),
MockEntity(name="diff_area", entity_id="light.diff_area"),
]
call = ha.ServiceCall("light", "turn_on", {"area_id": "test-area"})
extracted = await service.async_extract_entities(hass, entities, call)
assert len(extracted) == 1
assert extracted[0].entity_id == "light.in_area"
call = ha.ServiceCall("light", "turn_on", {"area_id": ["test-area", "diff-area"]})
extracted = await service.async_extract_entities(hass, entities, call)
assert len(extracted) == 2
assert sorted(ent.entity_id for ent in extracted) == [
"light.diff_area",
"light.in_area",
]
|
the-stack_106_21054
|
#!/usr/bin/env python
import os
from core.utils import get_benchmark, parser
from core.runner.RepairTask import RepairTask
from core.runner.get_runner import get_runner
if __name__ == "__main__":
# Arja -b Bugs.jar -i Wicket-295e73bd
# Arja -b Defects4J -i Chart-1
args = parser.parse_args()
if "benchmark" in args:
args.benchmark = get_benchmark(args.benchmark)
tasks = []
# if without bug-id parameter, use all bug-ids.
bugs = args.benchmark.get_bugs()
if args.id is not None:
bugs = []
for bug_id in args.id:
bugs.append(args.benchmark.get_bug(bug_id))
for bug in bugs:
args.bug = bug
# Arja init (identify Arja main, jar file)
tool = args.func(args)
task = RepairTask(tool, args.benchmark, bug)
if not args.continue_execution or not os.path.exists(os.path.join(task.log_dir(), "repair.log")):
tasks.append(task)
get_runner(tasks, args).execute()
|
the-stack_106_21055
|
import copy
import json
import os
import random
import platform
import subprocess
import sys
from collections import defaultdict
import numpy as np
import pytest
import ray
from ray.external_storage import (create_url_with_offset,
parse_url_with_offset)
from ray.test_utils import wait_for_condition, run_string_as_driver
from ray.internal.internal_api import memory_summary
# -- Smart open param --
bucket_name = "object-spilling-test"
# -- File system param --
spill_local_path = "/tmp/spill"
# -- Spilling configs --
file_system_object_spilling_config = {
"type": "filesystem",
"params": {
"directory_path": spill_local_path
}
}
# Since we have differet protocol for a local external storage (e.g., fs)
# and distributed external storage (e.g., S3), we need to test both cases.
# This mocks the distributed fs with cluster utils.
mock_distributed_fs_object_spilling_config = {
"type": "mock_distributed_fs",
"params": {
"directory_path": spill_local_path
}
}
smart_open_object_spilling_config = {
"type": "smart_open",
"params": {
"uri": f"s3://{bucket_name}/"
}
}
def create_object_spilling_config(request, tmp_path):
temp_folder = tmp_path / "spill"
temp_folder.mkdir()
if (request.param["type"] == "filesystem"
or request.param["type"] == "mock_distributed_fs"):
request.param["params"]["directory_path"] = str(temp_folder)
return json.dumps(request.param), temp_folder
@pytest.fixture(
scope="function",
params=[
file_system_object_spilling_config,
# TODO(sang): Add a mock dependency to test S3.
# smart_open_object_spilling_config,
])
def object_spilling_config(request, tmp_path):
yield create_object_spilling_config(request, tmp_path)
@pytest.fixture(
scope="function",
params=[
file_system_object_spilling_config,
mock_distributed_fs_object_spilling_config
])
def multi_node_object_spilling_config(request, tmp_path):
yield create_object_spilling_config(request, tmp_path)
def run_basic_workload():
"""Run the workload that requires spilling."""
arr = np.random.rand(5 * 1024 * 1024) # 40 MB
refs = []
refs.append([ray.put(arr) for _ in range(2)])
ray.get(ray.put(arr))
def is_dir_empty(temp_folder,
append_path=ray.ray_constants.DEFAULT_OBJECT_PREFIX):
# append_path is used because the file based spilling will append
# new directory path.
num_files = 0
temp_folder = temp_folder / append_path
for path in temp_folder.iterdir():
num_files += 1
return num_files == 0
def assert_no_thrashing(address):
state = ray.state.GlobalState()
state._initialize_global_state(address,
ray.ray_constants.REDIS_DEFAULT_PASSWORD)
summary = memory_summary(address=address, stats_only=True)
restored_bytes = 0
consumed_bytes = 0
for line in summary.split("\n"):
if "Restored" in line:
restored_bytes = int(line.split(" ")[1])
if "consumed" in line:
consumed_bytes = int(line.split(" ")[-2])
assert consumed_bytes >= restored_bytes, (
f"consumed: {consumed_bytes}, restored: {restored_bytes}")
def test_invalid_config_raises_exception(shutdown_only):
# Make sure ray.init raises an exception before
# it starts processes when invalid object spilling
# config is given.
with pytest.raises(ValueError):
ray.init(_system_config={
"object_spilling_config": json.dumps({
"type": "abc"
}),
})
with pytest.raises(Exception):
copied_config = copy.deepcopy(file_system_object_spilling_config)
# Add invalid params to the config.
copied_config["params"].update({"random_arg": "abc"})
ray.init(_system_config={
"object_spilling_config": json.dumps(copied_config),
})
def test_url_generation_and_parse():
url = "s3://abc/def/ray_good"
offset = 10
size = 30
url_with_offset = create_url_with_offset(url=url, offset=offset, size=size)
parsed_result = parse_url_with_offset(url_with_offset)
assert parsed_result.base_url == url
assert parsed_result.offset == offset
assert parsed_result.size == size
@pytest.mark.skipif(
platform.system() == "Windows", reason="Failing on Windows.")
def test_default_config(shutdown_only):
ray.init(num_cpus=0, object_store_memory=75 * 1024 * 1024)
# Make sure the object spilling configuration is properly set.
config = json.loads(
ray.worker._global_node._config["object_spilling_config"])
assert config["type"] == "filesystem"
assert (config["params"]["directory_path"] ==
ray.worker._global_node._session_dir)
# Make sure the basic workload can succeed.
run_basic_workload()
ray.shutdown()
# Make sure config is not initalized if spilling is not enabled..
ray.init(
num_cpus=0,
object_store_memory=75 * 1024 * 1024,
_system_config={
"automatic_object_spilling_enabled": False,
"object_store_full_delay_ms": 100
})
assert "object_spilling_config" not in ray.worker._global_node._config
with pytest.raises(ray.exceptions.ObjectStoreFullError):
run_basic_workload()
ray.shutdown()
# Make sure when we use a different config, it is reflected.
ray.init(
num_cpus=0,
_system_config={
"object_spilling_config": (
json.dumps(mock_distributed_fs_object_spilling_config))
})
config = json.loads(
ray.worker._global_node._config["object_spilling_config"])
assert config["type"] == "mock_distributed_fs"
@pytest.mark.skipif(
platform.system() == "Windows", reason="Failing on Windows.")
def test_default_config_cluster(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=0)
ray.init(cluster.address)
worker_nodes = []
worker_nodes.append(
cluster.add_node(num_cpus=1, object_store_memory=75 * 1024 * 1024))
cluster.wait_for_nodes()
# Run the basic spilling workload on both
# worker nodes and make sure they are working.
@ray.remote
def task():
arr = np.random.rand(5 * 1024 * 1024) # 40 MB
refs = []
refs.append([ray.put(arr) for _ in range(2)])
ray.get(ray.put(arr))
ray.get([task.remote() for _ in range(2)])
@pytest.mark.skipif(
platform.system() == "Windows", reason="Failing on Windows.")
def test_spilling_not_done_for_pinned_object(object_spilling_config,
shutdown_only):
# Limit our object store to 75 MiB of memory.
object_spilling_config, temp_folder = object_spilling_config
address = ray.init(
object_store_memory=75 * 1024 * 1024,
_system_config={
"max_io_workers": 4,
"automatic_object_spilling_enabled": True,
"object_store_full_delay_ms": 100,
"object_spilling_config": object_spilling_config,
"min_spilling_size": 0,
})
arr = np.random.rand(5 * 1024 * 1024) # 40 MB
ref = ray.get(ray.put(arr)) # noqa
# Since the ref exists, it should raise OOM.
with pytest.raises(ray.exceptions.ObjectStoreFullError):
ref2 = ray.put(arr) # noqa
wait_for_condition(lambda: is_dir_empty(temp_folder))
assert_no_thrashing(address["redis_address"])
@pytest.mark.skipif(
platform.system() == "Windows", reason="Failing on Windows.")
def test_spill_remote_object(ray_start_cluster,
multi_node_object_spilling_config):
cluster = ray_start_cluster
object_spilling_config, _ = multi_node_object_spilling_config
cluster.add_node(
num_cpus=0,
object_store_memory=75 * 1024 * 1024,
_system_config={
"automatic_object_spilling_enabled": True,
"object_store_full_delay_ms": 100,
"max_io_workers": 4,
"object_spilling_config": object_spilling_config,
"min_spilling_size": 0,
})
ray.init(address=cluster.address)
cluster.add_node(object_store_memory=75 * 1024 * 1024)
cluster.wait_for_nodes()
@ray.remote
def put():
return np.random.rand(5 * 1024 * 1024) # 40 MB data
@ray.remote
def depends(arg):
return
ref = put.remote()
copy = np.copy(ray.get(ref))
# Evict local copy.
ray.put(np.random.rand(5 * 1024 * 1024)) # 40 MB data
# Remote copy should cause first remote object to get spilled.
ray.get(put.remote())
sample = ray.get(ref)
assert np.array_equal(sample, copy)
# Evict the spilled object.
del sample
ray.get(put.remote())
ray.put(np.random.rand(5 * 1024 * 1024)) # 40 MB data
# Test passing the spilled object as an arg to another task.
ray.get(depends.remote(ref))
assert_no_thrashing(cluster.address)
@pytest.mark.skipif(
platform.system() == "Windows", reason="Failing on Windows.")
def test_spill_objects_automatically(object_spilling_config, shutdown_only):
# Limit our object store to 75 MiB of memory.
object_spilling_config, _ = object_spilling_config
address = ray.init(
num_cpus=1,
object_store_memory=75 * 1024 * 1024,
_system_config={
"max_io_workers": 4,
"automatic_object_spilling_enabled": True,
"object_store_full_delay_ms": 100,
"object_spilling_config": object_spilling_config,
"min_spilling_size": 0
})
replay_buffer = []
solution_buffer = []
buffer_length = 100
# Create objects of more than 800 MiB.
for _ in range(buffer_length):
ref = None
while ref is None:
multiplier = random.choice([1, 2, 3])
arr = np.random.rand(multiplier * 1024 * 1024)
ref = ray.put(arr)
replay_buffer.append(ref)
solution_buffer.append(arr)
print("spill done.")
# randomly sample objects
for _ in range(1000):
index = random.choice(list(range(buffer_length)))
ref = replay_buffer[index]
solution = solution_buffer[index]
sample = ray.get(ref, timeout=0)
assert np.array_equal(sample, solution)
assert_no_thrashing(address["redis_address"])
@pytest.mark.skipif(
platform.system() in ["Windows"], reason="Failing on Windows.")
def test_spill_stats(object_spilling_config, shutdown_only):
# Limit our object store to 75 MiB of memory.
object_spilling_config, _ = object_spilling_config
address = ray.init(
num_cpus=1,
object_store_memory=100 * 1024 * 1024,
_system_config={
"automatic_object_spilling_enabled": True,
"max_io_workers": 100,
"min_spilling_size": 1,
"object_spilling_config": object_spilling_config
},
)
@ray.remote
def f():
return np.zeros(50 * 1024 * 1024, dtype=np.uint8)
ids = []
for _ in range(4):
x = f.remote()
ids.append(x)
while ids:
print(ray.get(ids.pop()))
x_id = f.remote() # noqa
ray.get(x_id)
s = memory_summary(address=address["redis_address"], stats_only=True)
assert "Plasma memory usage 50 MiB, 1 objects, 50.0% full" in s, s
assert "Spilled 200 MiB, 4 objects" in s, s
assert "Restored 150 MiB, 3 objects" in s, s
# Test if consumed bytes are correctly calculated.
obj = ray.put(np.zeros(30 * 1024 * 1024, dtype=np.uint8))
@ray.remote
def func_with_ref(obj):
return True
ray.get(func_with_ref.remote(obj))
s = memory_summary(address=address["redis_address"], stats_only=True)
# 50MB * 5 references + 30MB used for task execution.
assert "Objects consumed by Ray tasks: 280 MiB." in s, s
assert_no_thrashing(address["redis_address"])
@pytest.mark.skipif(
platform.system() == "Windows", reason="Failing on Windows.")
def test_spill_during_get(object_spilling_config, shutdown_only):
object_spilling_config, _ = object_spilling_config
address = ray.init(
num_cpus=4,
object_store_memory=100 * 1024 * 1024,
_system_config={
"automatic_object_spilling_enabled": True,
"object_store_full_delay_ms": 100,
"max_io_workers": 1,
"object_spilling_config": object_spilling_config,
"min_spilling_size": 0,
},
)
@ray.remote
def f():
return np.zeros(10 * 1024 * 1024)
ids = []
for i in range(10):
x = f.remote()
print(i, x)
ids.append(x)
# Concurrent gets, which require restoring from external storage, while
# objects are being created.
for x in ids:
print(ray.get(x).shape)
assert_no_thrashing(address["redis_address"])
@pytest.mark.skipif(
platform.system() == "Windows", reason="Failing on Windows.")
def test_spill_deadlock(object_spilling_config, shutdown_only):
object_spilling_config, _ = object_spilling_config
# Limit our object store to 75 MiB of memory.
address = ray.init(
object_store_memory=75 * 1024 * 1024,
_system_config={
"max_io_workers": 1,
"automatic_object_spilling_enabled": True,
"object_store_full_delay_ms": 100,
"object_spilling_config": object_spilling_config,
"min_spilling_size": 0,
})
arr = np.random.rand(1024 * 1024) # 8 MB data
replay_buffer = []
# Create objects of more than 400 MiB.
for _ in range(50):
ref = None
while ref is None:
ref = ray.put(arr)
replay_buffer.append(ref)
# This is doing random sampling with 50% prob.
if random.randint(0, 9) < 5:
for _ in range(5):
ref = random.choice(replay_buffer)
sample = ray.get(ref, timeout=0)
assert np.array_equal(sample, arr)
assert_no_thrashing(address["redis_address"])
@pytest.mark.skipif(
platform.system() == "Windows", reason="Failing on Windows.")
def test_delete_objects(object_spilling_config, shutdown_only):
# Limit our object store to 75 MiB of memory.
object_spilling_config, temp_folder = object_spilling_config
address = ray.init(
object_store_memory=75 * 1024 * 1024,
_system_config={
"max_io_workers": 1,
"min_spilling_size": 0,
"automatic_object_spilling_enabled": True,
"object_store_full_delay_ms": 100,
"object_spilling_config": object_spilling_config,
})
arr = np.random.rand(1024 * 1024) # 8 MB data
replay_buffer = []
for _ in range(80):
ref = None
while ref is None:
ref = ray.put(arr)
replay_buffer.append(ref)
print("-----------------------------------")
del replay_buffer
del ref
wait_for_condition(lambda: is_dir_empty(temp_folder))
assert_no_thrashing(address["redis_address"])
@pytest.mark.skipif(
platform.system() in ["Windows"], reason="Failing on Windows.")
def test_delete_objects_delete_while_creating(object_spilling_config,
shutdown_only):
# Limit our object store to 75 MiB of memory.
object_spilling_config, temp_folder = object_spilling_config
address = ray.init(
object_store_memory=75 * 1024 * 1024,
_system_config={
"max_io_workers": 4,
"min_spilling_size": 0,
"automatic_object_spilling_enabled": True,
"object_store_full_delay_ms": 100,
"object_spilling_config": object_spilling_config,
})
arr = np.random.rand(1024 * 1024) # 8 MB data
replay_buffer = []
for _ in range(80):
ref = None
while ref is None:
ref = ray.put(arr)
replay_buffer.append(ref)
# Remove the replay buffer with 60% probability.
if random.randint(0, 9) < 6:
replay_buffer.pop()
# Do random sampling.
for _ in range(200):
ref = random.choice(replay_buffer)
sample = ray.get(ref, timeout=0)
assert np.array_equal(sample, arr)
# After all, make sure all objects are killed without race condition.
del replay_buffer
del ref
wait_for_condition(lambda: is_dir_empty(temp_folder))
assert_no_thrashing(address["redis_address"])
@pytest.mark.skipif(
platform.system() in ["Windows"], reason="Failing on Windows.")
def test_delete_objects_on_worker_failure(object_spilling_config,
shutdown_only):
# Limit our object store to 75 MiB of memory.
object_spilling_config, temp_folder = object_spilling_config
address = ray.init(
object_store_memory=75 * 1024 * 1024,
_system_config={
"max_io_workers": 4,
"automatic_object_spilling_enabled": True,
"object_store_full_delay_ms": 100,
"object_spilling_config": object_spilling_config,
"min_spilling_size": 0,
})
arr = np.random.rand(1024 * 1024) # 8 MB data
@ray.remote
class Actor:
def __init__(self):
self.replay_buffer = []
def get_pid(self):
return os.getpid()
def create_objects(self):
for _ in range(80):
ref = None
while ref is None:
ref = ray.put(arr)
self.replay_buffer.append(ref)
# Remove the replay buffer with 60% probability.
if random.randint(0, 9) < 6:
self.replay_buffer.pop()
# Do random sampling.
for _ in range(200):
ref = random.choice(self.replay_buffer)
sample = ray.get(ref, timeout=0)
assert np.array_equal(sample, arr)
a = Actor.remote()
actor_pid = ray.get(a.get_pid.remote())
ray.get(a.create_objects.remote())
os.kill(actor_pid, 9)
def wait_until_actor_dead():
try:
ray.get(a.get_pid.remote())
except ray.exceptions.RayActorError:
return True
return False
wait_for_condition(wait_until_actor_dead)
# After all, make sure all objects are deleted upon worker failures.
wait_for_condition(lambda: is_dir_empty(temp_folder))
assert_no_thrashing(address["redis_address"])
@pytest.mark.skipif(
platform.system() in ["Windows"], reason="Failing on Windows and MacOS.")
def test_delete_objects_multi_node(multi_node_object_spilling_config,
ray_start_cluster):
# Limit our object store to 75 MiB of memory.
object_spilling_config, temp_folder = multi_node_object_spilling_config
cluster = ray_start_cluster
# Head node.
cluster.add_node(
num_cpus=1,
object_store_memory=75 * 1024 * 1024,
_system_config={
"max_io_workers": 2,
"min_spilling_size": 20 * 1024 * 1024,
"automatic_object_spilling_enabled": True,
"object_store_full_delay_ms": 100,
"object_spilling_config": object_spilling_config,
})
ray.init(address=cluster.address)
# Add 2 worker nodes.
for _ in range(2):
cluster.add_node(num_cpus=1, object_store_memory=75 * 1024 * 1024)
cluster.wait_for_nodes()
arr = np.random.rand(1024 * 1024) # 8 MB data
@ray.remote(num_cpus=1)
class Actor:
def __init__(self):
self.replay_buffer = []
def ping(self):
return
def create_objects(self):
for _ in range(80):
ref = None
while ref is None:
ref = ray.put(arr)
self.replay_buffer.append(ref)
# Remove the replay buffer with 60% probability.
if random.randint(0, 9) < 6:
self.replay_buffer.pop()
# Do random sampling.
for _ in range(50):
ref = random.choice(self.replay_buffer)
sample = ray.get(ref, timeout=10)
assert np.array_equal(sample, arr)
actors = [Actor.remote() for _ in range(3)]
ray.get([actor.create_objects.remote() for actor in actors])
def wait_until_actor_dead(actor):
try:
ray.get(actor.ping.remote())
except ray.exceptions.RayActorError:
return True
return False
# Kill actors to remove all references.
for actor in actors:
ray.kill(actor)
wait_for_condition(lambda: wait_until_actor_dead(actor))
# The multi node deletion should work.
wait_for_condition(lambda: is_dir_empty(temp_folder))
assert_no_thrashing(cluster.address)
@pytest.mark.skipif(platform.system() == "Windows", reason="Flaky on Windows.")
def test_fusion_objects(object_spilling_config, shutdown_only):
# Limit our object store to 75 MiB of memory.
object_spilling_config, temp_folder = object_spilling_config
min_spilling_size = 10 * 1024 * 1024
address = ray.init(
object_store_memory=75 * 1024 * 1024,
_system_config={
"max_io_workers": 3,
"automatic_object_spilling_enabled": True,
"object_store_full_delay_ms": 100,
"object_spilling_config": object_spilling_config,
"min_spilling_size": min_spilling_size,
})
replay_buffer = []
solution_buffer = []
buffer_length = 100
# Create objects of more than 800 MiB.
for _ in range(buffer_length):
ref = None
while ref is None:
multiplier = random.choice([1, 2, 3])
arr = np.random.rand(multiplier * 1024 * 1024)
ref = ray.put(arr)
replay_buffer.append(ref)
solution_buffer.append(arr)
print("-----------------------------------")
# randomly sample objects
for _ in range(1000):
index = random.choice(list(range(buffer_length)))
ref = replay_buffer[index]
solution = solution_buffer[index]
sample = ray.get(ref, timeout=0)
assert np.array_equal(sample, solution)
is_test_passing = False
# Since we'd like to see the temp directory that stores the files,
# we need to append this directory.
temp_folder = temp_folder / ray.ray_constants.DEFAULT_OBJECT_PREFIX
for path in temp_folder.iterdir():
file_size = path.stat().st_size
# Make sure there are at least one
# file_size that exceeds the min_spilling_size.
# If we don't fusion correctly, this cannot happen.
if file_size >= min_spilling_size:
is_test_passing = True
assert is_test_passing
assert_no_thrashing(address["redis_address"])
# https://github.com/ray-project/ray/issues/12912
def do_test_release_resource(object_spilling_config, expect_released):
object_spilling_config, temp_folder = object_spilling_config
address = ray.init(
num_cpus=1,
object_store_memory=75 * 1024 * 1024,
_system_config={
"max_io_workers": 1,
"release_resources_during_plasma_fetch": expect_released,
"automatic_object_spilling_enabled": True,
"object_spilling_config": object_spilling_config,
})
plasma_obj = ray.put(np.ones(50 * 1024 * 1024, dtype=np.uint8))
for _ in range(5):
ray.put(np.ones(50 * 1024 * 1024, dtype=np.uint8)) # Force spilling
@ray.remote
def sneaky_task_tries_to_steal_released_resources():
print("resources were released!")
@ray.remote
def f(dep):
while True:
try:
ray.get(dep[0], timeout=0.001)
except ray.exceptions.GetTimeoutError:
pass
done = f.remote([plasma_obj]) # noqa
canary = sneaky_task_tries_to_steal_released_resources.remote()
ready, _ = ray.wait([canary], timeout=2)
if expect_released:
assert ready
else:
assert not ready
assert_no_thrashing(address["redis_address"])
@pytest.mark.skipif(
platform.system() == "Windows", reason="Failing on Windows.")
def test_no_release_during_plasma_fetch(object_spilling_config, shutdown_only):
do_test_release_resource(object_spilling_config, expect_released=False)
@pytest.mark.skipif(
platform.system() == "Windows", reason="Failing on Windows.")
def test_release_during_plasma_fetch(object_spilling_config, shutdown_only):
do_test_release_resource(object_spilling_config, expect_released=True)
@pytest.mark.skipif(
platform.system() == "Windows", reason="Failing on Windows.")
@pytest.mark.timeout(30)
def test_spill_objects_on_object_transfer(object_spilling_config,
ray_start_cluster):
object_spilling_config, _ = object_spilling_config
# This test checks that objects get spilled to make room for transferred
# objects.
cluster = ray_start_cluster
object_size = int(1e7)
num_objects = 10
num_tasks = 10
# Head node can fit all of the objects at once.
cluster.add_node(
num_cpus=0,
object_store_memory=2 * num_tasks * num_objects * object_size,
_system_config={
"max_io_workers": 1,
"automatic_object_spilling_enabled": True,
"object_store_full_delay_ms": 100,
"object_spilling_config": object_spilling_config,
"min_spilling_size": 0
})
cluster.wait_for_nodes()
ray.init(address=cluster.address)
# Worker node can fit 1 tasks at a time.
cluster.add_node(
num_cpus=1, object_store_memory=1.5 * num_objects * object_size)
cluster.wait_for_nodes()
@ray.remote
def foo(*args):
return
@ray.remote
def allocate(*args):
return np.zeros(object_size, dtype=np.uint8)
# Allocate some objects that must be spilled to make room for foo's
# arguments.
allocated = [allocate.remote() for _ in range(num_objects)]
ray.get(allocated)
print("done allocating")
args = []
for _ in range(num_tasks):
task_args = [
ray.put(np.zeros(object_size, dtype=np.uint8))
for _ in range(num_objects)
]
args.append(task_args)
# Check that tasks scheduled to the worker node have enough room after
# spilling.
tasks = [foo.remote(*task_args) for task_args in args]
ray.get(tasks)
assert_no_thrashing(cluster.address)
@pytest.mark.skipif(
platform.system() in ["Windows"], reason="Failing on "
"Windows and Mac.")
def test_file_deleted_when_driver_exits(tmp_path, shutdown_only):
temp_folder = tmp_path / "spill"
temp_folder.mkdir()
driver = """
import json
import os
import signal
import numpy as np
import ray
ray.init(
object_store_memory=75 * 1024 * 1024,
_system_config={{
"max_io_workers": 2,
"min_spilling_size": 0,
"automatic_object_spilling_enabled": True,
"object_store_full_delay_ms": 100,
"object_spilling_config": json.dumps({{
"type": "filesystem",
"params": {{
"directory_path": "{temp_dir}"
}}
}}),
}})
arr = np.random.rand(1024 * 1024) # 8 MB data
replay_buffer = []
# Spill lots of objects
for _ in range(30):
ref = None
while ref is None:
ref = ray.put(arr)
replay_buffer.append(ref)
# Send sigterm to itself.
signum = {signum}
sig = None
if signum == 2:
sig = signal.SIGINT
elif signum == 15:
sig = signal.SIGTERM
os.kill(os.getpid(), sig)
"""
# Run a driver with sigint.
print("Sending sigint...")
with pytest.raises(subprocess.CalledProcessError):
print(
run_string_as_driver(
driver.format(temp_dir=str(temp_folder), signum=2)))
wait_for_condition(lambda: is_dir_empty(temp_folder, append_path=""))
@pytest.mark.skipif(
platform.system() in ["Windows"], reason="Failing on "
"Windows.")
def test_multiple_directories(tmp_path, shutdown_only):
num_dirs = 3
temp_dirs = []
for i in range(num_dirs):
temp_folder = tmp_path / f"spill_{i}"
temp_folder.mkdir()
temp_dirs.append(temp_folder)
# Limit our object store to 75 MiB of memory.
min_spilling_size = 0
object_spilling_config = json.dumps({
"type": "filesystem",
"params": {
"directory_path": [str(directory) for directory in temp_dirs]
}
})
address = ray.init(
object_store_memory=75 * 1024 * 1024,
_system_config={
"max_io_workers": 5,
"object_store_full_delay_ms": 100,
"object_spilling_config": object_spilling_config,
"min_spilling_size": min_spilling_size,
})
arr = np.ones(74 * 1024 * 1024, dtype=np.uint8) # 74MB.
object_refs = []
# Now the storage is full.
object_refs.append(ray.put(arr))
num_object_spilled = 20
for _ in range(num_object_spilled):
object_refs.append(ray.put(arr))
num_files = defaultdict(int)
for temp_dir in temp_dirs:
temp_folder = temp_dir / ray.ray_constants.DEFAULT_OBJECT_PREFIX
for path in temp_folder.iterdir():
num_files[str(temp_folder)] += 1
for ref in object_refs:
assert np.array_equal(ray.get(ref), arr)
print("Check distribution...")
min_count = 5
is_distributed = [n_files >= min_count for n_files in num_files.values()]
assert all(is_distributed)
print("Check deletion...")
# Empty object refs.
object_refs = []
# Add a new object so that the last entry is evicted.
ref = ray.put(arr)
for temp_dir in temp_dirs:
temp_folder = temp_dir
wait_for_condition(lambda: is_dir_empty(temp_folder))
assert_no_thrashing(address["redis_address"])
# Now kill ray and see all directories are deleted.
print("Check directories are deleted...")
ray.shutdown()
for temp_dir in temp_dirs:
wait_for_condition(lambda: is_dir_empty(temp_dir, append_path=""))
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
|
the-stack_106_21057
|
from model.contact import Contact
from model.grroup import Group
import random
def test_del_contact_to_group(app, db, orm):
# предусловие на наличие группы
if len(db.get_group_list()) == 0:
app.group.create(Group(name='test'))
# предусловие на наличие контакта
if len(db.get_contact_list()) == 0:
app.contact.add_contact_form(Contact(name = "Boris", middlename = "Mc", lastname = "Smith", nickname = "Gus", title = "-",company = "IT", address = "Atlantida",
homephone = "5556677", mobile='7775558899', work="4182369", fax = "4569632", email="[email protected]", email2="-", email3="-", homepage="-", bday="5",
bmonth="January", byear="2000", address2="world", phone2="7418526", notes="-"))
app.contact.submit()
app.contact.return_home_page()
groups = db.get_group_list()
group = Group()
old_contacts_in_group = []
for gr in groups:
old_contacts_in_group = orm.get_contacts_in_group(gr)
if len(old_contacts_in_group) != 0:
group = gr
break
contact_for_del = Contact()
# предусловие на наличие этого контакта в этой группе
if len(old_contacts_in_group) == 0:
group = random.choice(groups)
contacts = db.get_contact_list()
contact_for_del = random.choice(contacts)
app.contact.add_contact_by_id_to_group(contact_for_del.id, group.id)
old_contacts_in_group = orm.get_contacts_in_group(group)
else:
contact_for_del = random.choice(old_contacts_in_group)
app.contact.del_contact_by_id_from_group(contact_for_del.id, group.id)
new_contacts_in_group = orm.get_contacts_in_group(group)
old_contacts_in_group.remove(contact_for_del)
assert old_contacts_in_group == new_contacts_in_group
|
the-stack_106_21059
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
screen_capture.py
Description:
Author: PhatLuu
Contact: [email protected]
Created on: 2021/03/27
"""
#%%
# ================================IMPORT PACKAGES====================================
# Standard Packages
import os
import shutil
import sys
# Visualization Packages
import cv2
# GUI and User Interfaces Packages
import pyautogui
import pyperclip
# Utilities
import time
from datetime import datetime
# =====================================START===========================================
user_dir = os.path.abspath(os.path.expanduser("~"))
script_dir = os.path.dirname(sys.argv[0])
project_dir = os.path.dirname(script_dir)
# set output_dir based on active VS code windows
def makedir(inputDir, remove=False):
"""Summary:
--------
Make directory
Inputs:
-------
inputDir (str): fullpath to directory to be created
remove (bool): option to remove current existing folder
"""
if remove is True and os.path.exists(inputDir):
print("Remove existing folder")
shutil.rmtree(inputDir)
if not os.path.exists(inputDir):
print(f"Making directory: {inputDir}")
os.makedirs(inputDir)
else:
print(f"mkdir: Directory already exist: {inputDir}")
output_dir = os.path.join(user_dir, "Downloads")
timestamp = datetime.now().strftime("%y%m%d_%H%M%S")
filename = f"T{timestamp}_screenshot.png"
temp_fullscreen = os.path.join(output_dir, "temp_screenshot.png")
window_name = "Image Capture"
onClick = False
point1 = (0, 0)
def click(event, x, y, flags, params):
global onClick, point1
img, output_filepath = params
if event == cv2.EVENT_LBUTTONDOWN:
# if mousedown, store the x,y position of the mous
onClick = True
point1 = (x, y)
elif event == cv2.EVENT_MOUSEMOVE and onClick:
# when dragging pressed, draw rectangle in image
img_copy = img.copy()
cv2.rectangle(img_copy, point1, (x, y), (0, 0, 255), 2)
cv2.imshow(window_name, img_copy)
elif event == cv2.EVENT_LBUTTONUP:
# on mouseUp, create subimage
try:
onClick = False
sub_img = img[point1[1] : y, point1[0] : x]
print(f"Write image to: {output_filepath}")
cv2.imwrite(output_filepath, sub_img)
cv2.destroyAllWindows()
# remove fullscreen file
os.remove(temp_fullscreen)
except:
cv2.destroyAllWindows()
os.remove(temp_fullscreen)
def paste_mkdocs_link(filename):
basefilename, ext = os.path.splitext(filename)
mkdocs_link = (
f'<div style="text-align:left">\r\n'
f'\t<img src="images/{filename}" width=60% alt="{basefilename}"">\r\n'
f"\t<figcaption>{basefilename}</figcaption>\r\n"
f"</div>"
)
pyperclip.copy(mkdocs_link)
# pyautogui.click()
# pyautogui.hotkey("ctrl", "v", interval=0.15)
def main():
# Close bash window if exists
time.sleep(0.5)
wins = pyautogui.getAllTitles()
for win in wins:
if "/usr/bin/bash" in win:
pyautogui.getWindowsWithTitle(win)[0].minimize()
time.sleep(0.5)
# Obtain output_dir based on active VS Code window
# pyautogui.click()
# time.sleep(1.0)
# wins = pyautogui.getAllTitles()
# for winname in wins:
# if "Visual Studio Code" in winname:
# win = pyautogui.getWindowsWithTitle(winname)[0]
# if win.isActive:
# project_name = winname.split(sep=" - ")[1]
# project_name = project_name.split(sep=" ")[0]
# output_dir = os.path.join(user_dir, f"{project_name}/docs/images")
# makedir(output_dir)
output_dir = os.path.join(project_dir, "docs/images")
makedir(output_dir)
output_filepath = os.path.join(output_dir, filename)
# Take screen shot
pyautogui.screenshot(temp_fullscreen)
img = cv2.imread(temp_fullscreen, 1)
# Display image
cv2.namedWindow(window_name)
cv2.moveWindow(window_name, 0, 0)
cv2.imshow(window_name, img)
img_win = pyautogui.getWindowsWithTitle(window_name)[0]
img_win.minimize() # pyautogui bug: win.isMaximized is True but it's minimized
time.sleep(0.5)
img_win.maximize()
# Set mouse callback to click function
params = [img, output_filepath]
cv2.setMouseCallback(window_name, click, params)
cv2.waitKey(0)
paste_mkdocs_link(os.path.basename(output_filepath))
# ==========================DEBUG===================================
if __name__ == "__main__":
main()
|
the-stack_106_21060
|
from django.test import override_settings
from django.urls import reverse, reverse_lazy
from django_webtest import WebTestMixin
from reversion.models import Version
from test_plus.test import TestCase
from company.factories import CompanyFactory
from pola.tests.test_views import PermissionMixin
from pola.users.factories import StaffFactory
from product.factories import ProductFactory
from product.models import Product
class TemplateUsedMixin:
def test_template_used(self):
self.login()
resp = self.client.get(self.url)
self.assertTemplateUsed(resp, self.template_name)
class InstanceMixin:
def setUp(self):
super().setUp()
self.instance = ProductFactory()
def test_contains_name(self):
self.login()
resp = self.client.get(self.url)
self.assertContains(resp, self.instance.name)
class TestProductDetailView(PermissionMixin, InstanceMixin, TestCase):
template_name = 'product/product_detail.html'
def setUp(self):
super().setUp()
self.url = reverse('product:detail', args=[self.instance.code])
class TestProductListView(PermissionMixin, WebTestMixin, TestCase):
url = reverse_lazy('product:list')
template_name = 'product/product_filter.html'
def test_empty(self):
self.login()
resp = self.client.get(self.url)
self.assertContains(resp, "Nic nie znaleziono")
def test_filled(self):
products = ProductFactory.create_batch(100)
page = self.app.get(self.url, user=self.user)
# self.assertTrue("1 z 4" in page)
self.assertTrue(str(products[-1]) in page)
page2 = page.click("Następne")
page2.click("Poprzednie")
class TestProductCreate(PermissionMixin, TemplateUsedMixin, TestCase):
url = reverse_lazy('product:create')
template_name = 'product/product_form.html'
class TestProductUpdate(PermissionMixin, InstanceMixin, TestCase):
template_name = 'product/product_update_form.html'
def setUp(self):
super().setUp()
self.url = reverse('product:edit', args=[self.instance.code])
class TestProductUpdateWeb(WebTestMixin, TestCase):
def setUp(self):
super().setUp()
self.instance = ProductFactory(code="123")
self.url = reverse('product:edit', args=[self.instance.code])
self.user = StaffFactory()
def test_form_success(self):
page = self.app.get(self.url, user=self.user)
page.form['name'] = "New name"
page.form['commit_desc'] = "Commit description"
page = page.form.submit()
self.assertRedirects(page, self.instance.get_absolute_url())
self.instance.refresh_from_db()
versions = Version.objects.get_for_object(self.instance)
self.assertEqual(versions[0].revision.comment, "Commit description")
self.assertEqual(versions[0].revision.user, self.user)
self.assertEqual(self.instance.name, "New name")
@override_settings(LANGUAGE_CODE='en-EN')
def test_form_commit_desc_required(self):
page = self.app.get(self.url, user=self.user)
page.form['name'] = "New name"
page = page.form.submit()
self.assertContains(page, "This field is required.")
page.form['commit_desc'] = "AAA"
page = page.form.submit()
self.assertRedirects(page, self.instance.get_absolute_url())
@override_settings(LANGUAGE_CODE='en-EN')
def test_form_readonly_fields(self):
page = self.app.get(self.url, user=self.user)
self.assertEqual(page.form['code'].attrs['disabled'], 'true')
page.form['code'] = "789789789"
page.form['commit_desc'] = "Commit desc"
page = page.form.submit()
self.assertRedirects(page, self.instance.get_absolute_url())
self.instance.refresh_from_db()
self.assertEqual(self.instance.code, "123")
class TestProductDeleteView(PermissionMixin, InstanceMixin, TestCase):
template_name = 'product/product_detail.html'
def setUp(self):
super().setUp()
self.url = reverse('product:delete', args=[self.instance.code])
def test_success_delete(self):
self.login()
resp = self.post(self.url, follow=True)
self.assertRedirects(resp, expected_url=reverse('product:list'))
self.assertContains(resp, "Product deleted!")
self.assertFalse(Product.objects.filter(pk=self.instance.pk).exists())
class TestProductHistoryView(PermissionMixin, InstanceMixin, TestCase):
template_name = 'product/product_history.html'
def setUp(self):
super().setUp()
self.url = reverse('product:view-history', args=[self.instance.code])
class TestProductGetImage(PermissionMixin, TestCase):
url = reverse_lazy('product:image')
def setUp(self):
super().setUp()
self.instance = ProductFactory()
self.url = reverse("product:image", args=[self.instance.code])
def test_valid_content_type(self):
self.login()
resp = self.client.get(self.url)
content_type = resp['Content-Type']
self.assertEqual(content_type, "image/png")
class TestProductAutocomplete(PermissionMixin, TestCase):
url = reverse_lazy('product:product-autocomplete')
def test_filters(self):
self.login()
ProductFactory(id=1, name="A1")
ProductFactory(id=2, name="A2", company=CompanyFactory(name="PrefixB2"))
ProductFactory(id=3, name="A3", company=CompanyFactory(official_name="B3Suffix"))
ProductFactory(id=4, name="A4", company=CompanyFactory(common_name="PefixB4Suffix"))
response = self.client.get("{}?q={}".format(self.url, "A1"))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), self._get_expected_result([('1', 'A1')]))
response = self.client.get("{}?q={}".format(self.url, "B2"))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), self._get_expected_result([('2', 'A2')]))
response = self.client.get("{}?q={}".format(self.url, "B3"))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), self._get_expected_result([('3', 'A3')]))
response = self.client.get("{}?q={}".format(self.url, "B4"))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), self._get_expected_result([('4', 'A4')]))
def _get_expected_result(self, elements):
return {
'pagination': {'more': False},
'results': [{'text': o[1], 'selected_text': o[1], 'id': o[0]} for o in elements],
}
class TestUrls(TestCase):
def test_should_render_url(self):
self.assertEqual("/cms/product/create", reverse('product:create'))
self.assertEqual("/cms/product/product-autocomplete/", reverse('product:product-autocomplete'))
self.assertEqual("/cms/product/123/image", reverse('product:image', args=[123]))
self.assertEqual("/cms/product/123/edit", reverse('product:edit', args=[123]))
self.assertEqual("/cms/product/123/delete", reverse('product:delete', args=[123]))
self.assertEqual("/cms/product/123/history", reverse('product:view-history', args=[123]))
self.assertEqual("/cms/product/123/", reverse('product:detail', args=[123]))
self.assertEqual("/cms/product/", reverse('product:list'))
|
the-stack_106_21061
|
import pandas as pd
import numpy as np
import math
import random
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.layers.recurrent import LSTM
#% matplotlib inline
import matplotlib.pyplot as plt
random.seed(0)
# 乱数の係数
random_factor = 0.05
# サイクルあたりのステップ数
steps_per_cycle = 80
# 生成するサイクル数
number_of_cycles = 50
# 入力の長さ
length_of_sequences = 100
# 入力値・出力値の次元の大きさ
in_out_neurons = 1
# 隠れ要素のニューロン数
hidden_neurons = 300
np_ary = np.arange(steps_per_cycle * number_of_cycles + 1);
data = np.load(input("「株価データ.npy」を入力 : "))
arg_data = data[1:] / data[:len(data)-1] - 1
df = pd.DataFrame(arg_data, columns=["stock"])
#df = pd.DataFrame(np_ary, columns=["x"])
#pi_t = 2 * math.pi / steps_per_cycle
#df["sin(x)"] = df.x.apply(lambda t: math.sin(t * pi_t + random.uniform(-1.0, +1.0) * random_factor))
df[["stock"]].head(steps_per_cycle * 2).plot()
################################################################
def Input_Ans_Extract(data, input_num = 100):
InputList, AnsList = [], []
for i in range(len(data) - input_num):
InputData = data.iloc[i:i+input_num].as_matrix()
AnsData = data.iloc[i+input_num].as_matrix()
InputList.append(InputData)
AnsList.append(AnsData)
InputList_np = np.array(InputList)
AnsList_np = np.array(AnsList)
return InputList_np, AnsList_np
def Data_Split(df, test_size=0.1, input_num = 100):
train_size = round(len(df) * (1 - test_size))
train_size = int(train_size)
Input_train, Ans_train = Input_Ans_Extract(df.iloc[0:train_size], input_num)
Input_test, Ans_test = Input_Ans_Extract(df.iloc[train_size:], input_num)
return (Input_train, Ans_train), (Input_test, Ans_test)
(Input_train, Ans_train), (Input_test, Ans_test) = Data_Split(df[["stock"]], input_num = length_of_sequences)
################################################################
model = Sequential()
model.add(LSTM(hidden_neurons, batch_input_shape=(None, length_of_sequences, in_out_neurons), return_sequences=False))
model.add(Dense(in_out_neurons))
model.add(Activation("linear"))
model.compile(loss="mean_squared_error", optimizer="rmsprop")
model.fit(Input_train, Ans_train, batch_size=60, nb_epoch=3, validation_split=0.05)
################################################################
predicted = model.predict(Input_test)
################################################################
dataf = pd.DataFrame(predicted[:200])
dataf.columns = ["predict"]
dataf.plot()
dataf["answer"] = Ans_test[:200]
dataf.plot()
plt.show()
|
the-stack_106_21065
|
#Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#This program is free software; you can redistribute it and/or modify it under the terms of the BSD 0-Clause License.
#This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the BSD 0-Clause License for more details.
import torch
import torch.nn as nn
import numpy as np
import math
# FFCC paper, final step: fit a "Gaussian" in a torus
# see section 4: https://storage.googleapis.com/pub-tools-public-publication-data/pdf/6002678db6270c4a69f26d6fcef820e44b134951.pdf
class BivariateVonMises(nn.Module):
def __init__(self, num_bins, epsilon=1.0):
super(BivariateVonMises, self).__init__()
self._num_bins = num_bins
self._epsilon = epsilon
theta = torch.Tensor( (2*math.pi*np.arange(num_bins))/num_bins )
self.register_buffer('theta', theta)
bins = torch.Tensor( np.arange(num_bins) )
self.register_buffer('bins', bins)
def forward(self, bin_probability):
# Fit approximate bivariate von Mises
# i -> u, j -> v
pi = torch.sum(bin_probability, 3, keepdim=True)
pj = torch.sum(bin_probability, 2, keepdim=True)
sin_theta = torch.sin(self.theta)
cos_theta = torch.cos(self.theta)
sin_theta_pi = sin_theta.unsqueeze(0).unsqueeze(0).unsqueeze(-1).expand_as(pi)
cos_theta_pi = cos_theta.unsqueeze(0).unsqueeze(0).unsqueeze(-1).expand_as(pi)
sin_theta_pj = sin_theta.unsqueeze(0).unsqueeze(0).unsqueeze(0).expand_as(pj)
cos_theta_pj = cos_theta.unsqueeze(0).unsqueeze(0).unsqueeze(0).expand_as(pj)
yi = torch.sum(torch.mul(pi, sin_theta_pi), 2, keepdim=True)
xi = torch.sum(torch.mul(pi, cos_theta_pi), 2, keepdim=True)
yj = torch.sum(torch.mul(pj, sin_theta_pj), 3, keepdim=True)
xj = torch.sum(torch.mul(pj, cos_theta_pj), 3, keepdim=True)
# 1. compute the mean (with gray light de-alisaing)
m_u_idx = torch.remainder(torch.mul(torch.atan2(yi, xi), self._num_bins)/(2*math.pi), self._num_bins).squeeze(-1).squeeze(-1)
m_v_idx = torch.remainder(torch.mul(torch.atan2(yj, xj), self._num_bins)/(2*math.pi), self._num_bins).squeeze(-1).squeeze(-1)
mu_idx = torch.cat([m_u_idx, m_v_idx], 1)
# 2. compute the covariance matrix
warp_i = torch.remainder(self.bins - m_u_idx + (self._num_bins/2) - 1, self._num_bins).unsqueeze(1).unsqueeze(-1)
warp_j = torch.remainder(self.bins - m_v_idx + (self._num_bins/2) - 1, self._num_bins).unsqueeze(1).unsqueeze(1)
E_i = torch.sum(pi * warp_i, 2, keepdim=True)
E_ii = torch.sum(pi * warp_i * warp_i, 2, keepdim=True)
E_j = torch.sum(pj * warp_j, 3, keepdim=True)
E_jj = torch.sum(pj * warp_j * warp_j, 3, keepdim=True)
sigma_00 = self._epsilon + E_ii - E_i*E_i
sigma_11 = self._epsilon + E_jj - E_j*E_j
sigma_o = torch.mul(bin_probability, torch.matmul(warp_i, warp_j)).sum(2, keepdim=True).sum(3, keepdim=True) - E_i*E_j
sigma = torch.cat([sigma_00, sigma_o, sigma_o, sigma_11], 1).squeeze(-1).squeeze(-1)
sigma = sigma.view(sigma.shape[0], 1, 2, 2).squeeze(1)
out = {'mu': mu_idx, 'sigma': sigma}
return out
if __name__ == '__main__':
import cv2
n_bins = 1024
bin_size = 1
starting_uv = 0
mean = np.array([100, 600])
covariance = np.array([[500.5886, 400],[0, 500.7801]])
n = 10000
dist = np.random.multivariate_normal(mean, covariance, n)
dist = np.round(dist).astype(np.int)
u = dist[:, 0]
v = dist[:, 1]
hist, xedges, yedges = np.histogram2d(u, v, n_bins, [[0, n_bins], [0, n_bins]])
hist = hist / hist.sum()
hist_torch = torch.FloatTensor(hist).unsqueeze(0).unsqueeze(0)
bvm = BivariateVonMises(n_bins, bin_size, starting_uv)
output = bvm(hist_torch)
id = 0
u = int(output['mu'][id, 0])
v = int(output['mu'][id, 1])
sigma = output['sigma'].data.cpu().numpy()[id, :, :] # B x 2 x 2
print(u, v)
print(sigma)
eigenvalues, eigenvectors = np.linalg.eig(sigma)
angle = math.atan2(eigenvectors[1, 0], eigenvectors[0, 0])
if angle < 0:
angle += 2*math.pi
angle = angle*180.0/math.pi
chisquare_val = 2.4477 # 95% confidence interval
axis_len = (round(chisquare_val*math.sqrt(eigenvalues[1])), round(chisquare_val*math.sqrt(eigenvalues[0])))
mean = (v, u)
hist_im = np.stack(((255*hist/hist.max()).astype(np.uint8),)*3, axis=2)
hist_im[u, v, :] = (0, 255, 0)
cv2.ellipse(hist_im, mean, axis_len, -angle, 0.0, 360.0, (0, 255, 0), 1)
cv2.imwrite('hist.png', hist_im)
|
the-stack_106_21067
|
import os
import pickle
import tempfile
from contextlib import contextmanager
import nbformat
import pytest
from dagstermill import DagstermillError, define_dagstermill_solid
from dagstermill.compat import ExecutionError
from jupyter_client.kernelspec import NoSuchKernel
from nbconvert.preprocessors import ExecutePreprocessor
from dagster import execute_pipeline, pipeline
from dagster.check import CheckError
from dagster.core.definitions.metadata import PathMetadataValue
from dagster.core.definitions.reconstruct import ReconstructablePipeline
from dagster.core.test_utils import instance_for_test
from dagster.utils import file_relative_path, safe_tempfile_path
try:
import dagster_pandas as _
DAGSTER_PANDAS_PRESENT = True
except ImportError:
DAGSTER_PANDAS_PRESENT = False
try:
import sklearn as _
SKLEARN_PRESENT = True
except ImportError:
SKLEARN_PRESENT = False
try:
import matplotlib as _
MATPLOTLIB_PRESENT = True
except ImportError:
MATPLOTLIB_PRESENT = False
def get_path(materialization_event):
for (
metadata_entry
) in materialization_event.event_specific_data.materialization.metadata_entries:
if isinstance(metadata_entry.entry_data, PathMetadataValue):
return metadata_entry.entry_data.path
def cleanup_result_notebook(result):
if not result:
return
materialization_events = [
x for x in result.step_event_list if x.event_type_value == "ASSET_MATERIALIZATION"
]
for materialization_event in materialization_events:
result_path = get_path(materialization_event)
if os.path.exists(result_path):
os.unlink(result_path)
@contextmanager
def exec_for_test(fn_name, env=None, raise_on_error=True, **kwargs):
result = None
recon_pipeline = ReconstructablePipeline.for_module("dagstermill.examples.repository", fn_name)
with instance_for_test() as instance:
try:
result = execute_pipeline(
recon_pipeline,
env,
instance=instance,
raise_on_error=raise_on_error,
**kwargs,
)
yield result
finally:
if result:
cleanup_result_notebook(result)
@pytest.mark.notebook_test
def test_hello_world():
with exec_for_test("hello_world_pipeline") as result:
assert result.success
@pytest.mark.notebook_test
def test_hello_world_with_config():
with exec_for_test("hello_world_config_pipeline") as result:
assert result.success
assert result.output_for_solid("hello_world_config") == "hello"
@pytest.mark.notebook_test
def test_hello_world_with_config_escape():
with exec_for_test(
"hello_world_config_pipeline",
env={"solids": {"hello_world_config": {"config": {"greeting": "'"}}}},
) as result:
assert result.success
assert result.output_for_solid("hello_world_config") == "'"
with exec_for_test(
"hello_world_config_pipeline",
env={"solids": {"hello_world_config": {"config": {"greeting": '"'}}}},
) as result:
assert result.success
assert result.output_for_solid("hello_world_config") == '"'
with exec_for_test(
"hello_world_config_pipeline",
env={"solids": {"hello_world_config": {"config": {"greeting": "\\"}}}},
) as result:
assert result.success
assert result.output_for_solid("hello_world_config") == "\\"
with exec_for_test(
"hello_world_config_pipeline",
env={"solids": {"hello_world_config": {"config": {"greeting": "}"}}}},
) as result:
assert result.success
assert result.output_for_solid("hello_world_config") == "}"
with exec_for_test(
"hello_world_config_pipeline",
env={"solids": {"hello_world_config": {"config": {"greeting": "\n"}}}},
) as result:
assert result.success
assert result.output_for_solid("hello_world_config") == "\n"
@pytest.mark.notebook_test
def test_alias_with_config():
with exec_for_test(
"alias_config_pipeline",
env={"solids": {"aliased_greeting": {"config": {"greeting": "boo"}}}},
) as result:
assert result.success
assert result.output_for_solid("aliased_greeting") == "boo"
@pytest.mark.notebook_test
def test_reexecute_result_notebook():
def _strip_execution_metadata(nb):
cells = nb["cells"]
for cell in cells:
if "metadata" in cell:
if "execution" in cell["metadata"]:
del cell["metadata"]["execution"]
nb["cells"] = cells
return nb
with exec_for_test(
"hello_world_pipeline", {"loggers": {"console": {"config": {"log_level": "ERROR"}}}}
) as result:
assert result.success
materialization_events = [
x for x in result.step_event_list if x.event_type_value == "ASSET_MATERIALIZATION"
]
for materialization_event in materialization_events:
result_path = get_path(materialization_event)
if result_path.endswith(".ipynb"):
with open(result_path) as fd:
nb = nbformat.read(fd, as_version=4)
ep = ExecutePreprocessor()
ep.preprocess(nb)
with open(result_path) as fd:
expected = _strip_execution_metadata(nb)
actual = _strip_execution_metadata(nbformat.read(fd, as_version=4))
assert actual == expected
@pytest.mark.notebook_test
def test_hello_world_with_output():
with exec_for_test("hello_world_output_pipeline") as result:
assert result.success
assert result.result_for_solid("hello_world_output").output_value() == "hello, world"
@pytest.mark.notebook_test
def test_hello_world_explicit_yield():
with exec_for_test("hello_world_explicit_yield_pipeline") as result:
materializations = [
x for x in result.event_list if x.event_type_value == "ASSET_MATERIALIZATION"
]
assert len(materializations) == 2
assert get_path(materializations[1]) == "/path/to/file"
@pytest.mark.notebook_test
def test_add_pipeline():
with exec_for_test(
"add_pipeline", {"loggers": {"console": {"config": {"log_level": "ERROR"}}}}
) as result:
assert result.success
assert result.result_for_solid("add_two_numbers").output_value() == 3
@pytest.mark.notebook_test
def test_double_add_pipeline():
with exec_for_test(
"double_add_pipeline", {"loggers": {"console": {"config": {"log_level": "ERROR"}}}}
) as result:
assert result.success
assert result.result_for_solid("add_two_numbers_1").output_value() == 3
assert result.result_for_solid("add_two_numbers_2").output_value() == 7
@pytest.mark.notebook_test
def test_fan_in_notebook_pipeline():
with exec_for_test(
"fan_in_notebook_pipeline",
{
"execution": {"multiprocess": {}},
"solids": {
"solid_1": {"inputs": {"obj": "hello"}},
"solid_2": {"inputs": {"obj": "world"}},
},
},
) as result:
assert result.success
assert result.result_for_solid("solid_1").output_value() == "hello"
assert result.result_for_solid("solid_2").output_value() == "world"
assert result.result_for_solid("fan_in").output_value() == "hello world"
@pytest.mark.notebook_test
def test_composite_pipeline():
with exec_for_test(
"composite_pipeline",
{
"execution": {"multiprocess": {}},
"solids": {"outer": {"solids": {"yield_something": {"inputs": {"obj": "hello"}}}}},
},
) as result:
assert result.success
assert (
result.result_for_solid("outer").result_for_solid("yield_something").output_value()
== "hello"
)
@pytest.mark.notebook_test
def test_fan_in_notebook_pipeline_in_mem():
with exec_for_test(
"fan_in_notebook_pipeline_in_mem",
{
"solids": {
"solid_1": {"inputs": {"obj": "hello"}},
"solid_2": {"inputs": {"obj": "world"}},
},
},
raise_on_error=False,
) as result:
# # TODO error at definition time that dagstermill solids require "multiprocessing.shared_memory"
assert not result.success
@pytest.mark.notebook_test
def test_notebook_dag():
with exec_for_test(
"notebook_dag_pipeline",
{"solids": {"load_a": {"config": 1}, "load_b": {"config": 2}}},
) as result:
assert result.success
assert result.result_for_solid("add_two_numbers").output_value() == 3
assert result.result_for_solid("mult_two_numbers").output_value() == 6
@pytest.mark.notebook_test
def test_error_notebook():
with pytest.raises(ExecutionError) as exc:
with exec_for_test("error_pipeline") as result:
pass
assert "Someone set up us the bomb" in str(exc.value)
with exec_for_test("error_pipeline", raise_on_error=False) as result:
assert not result.success
assert result.step_event_list[1].event_type.value == "STEP_FAILURE"
@pytest.mark.nettest
@pytest.mark.notebook_test
@pytest.mark.skipif(
not (DAGSTER_PANDAS_PRESENT and SKLEARN_PRESENT and MATPLOTLIB_PRESENT),
reason="tutorial_pipeline reqs not present: dagster_pandas, sklearn, matplotlib",
)
def test_tutorial_pipeline():
with exec_for_test(
"tutorial_pipeline", {"loggers": {"console": {"config": {"log_level": "DEBUG"}}}}
) as result:
assert result.success
@pytest.mark.notebook_test
def test_hello_world_reexecution():
with exec_for_test("hello_world_pipeline") as result:
assert result.success
output_notebook_path = get_path(
[x for x in result.step_event_list if x.event_type_value == "ASSET_MATERIALIZATION"][0]
)
with tempfile.NamedTemporaryFile("w+", suffix=".py") as reexecution_notebook_file:
reexecution_notebook_file.write(
(
"from dagster import pipeline\n"
"from dagstermill import define_dagstermill_solid\n\n\n"
"reexecution_solid = define_dagstermill_solid(\n"
" 'hello_world_reexecution', '{output_notebook_path}'\n"
")\n\n"
"@pipeline\n"
"def reexecution_pipeline():\n"
" reexecution_solid()\n"
).format(output_notebook_path=output_notebook_path)
)
reexecution_notebook_file.flush()
result = None
reexecution_pipeline = ReconstructablePipeline.for_file(
reexecution_notebook_file.name, "reexecution_pipeline"
)
reexecution_result = None
with instance_for_test() as instance:
try:
reexecution_result = execute_pipeline(reexecution_pipeline, instance=instance)
assert reexecution_result.success
finally:
if reexecution_result:
cleanup_result_notebook(reexecution_result)
@pytest.mark.notebook_test
def test_resources_notebook():
with safe_tempfile_path() as path:
with exec_for_test(
"resource_pipeline",
{"resources": {"list": {"config": path}}},
mode="prod",
) as result:
assert result.success
# Expect something like:
# ['e8d636: Opened', 'e8d636: Hello, solid!', '9d438e: Opened',
# '9d438e: Hello, notebook!', '9d438e: Closed', 'e8d636: Closed']
with open(path, "rb") as fd:
messages = pickle.load(fd)
messages = [message.split(": ") for message in messages]
resource_ids = [x[0] for x in messages]
assert len(set(resource_ids)) == 2
assert resource_ids[0] == resource_ids[1] == resource_ids[5]
assert resource_ids[2] == resource_ids[3] == resource_ids[4]
msgs = [x[1] for x in messages]
assert msgs[0] == msgs[2] == "Opened"
assert msgs[4] == msgs[5] == "Closed"
assert msgs[1] == "Hello, solid!"
assert msgs[3] == "Hello, notebook!"
# https://github.com/dagster-io/dagster/issues/3722
@pytest.mark.skip
@pytest.mark.notebook_test
def test_resources_notebook_with_exception():
result = None
with safe_tempfile_path() as path:
with exec_for_test(
"resource_with_exception_pipeline",
{"resources": {"list": {"config": path}}},
raise_on_error=False,
) as result:
assert not result.success
assert result.step_event_list[8].event_type.value == "STEP_FAILURE"
assert (
"raise Exception()"
in result.step_event_list[8].event_specific_data.error.cause.message
)
# Expect something like:
# ['e8d636: Opened', 'e8d636: Hello, solid!', '9d438e: Opened',
# '9d438e: Hello, notebook!', '9d438e: Closed', 'e8d636: Closed']
with open(path, "rb") as fd:
messages = pickle.load(fd)
messages = [message.split(": ") for message in messages]
resource_ids = [x[0] for x in messages]
assert len(set(resource_ids)) == 2
assert resource_ids[0] == resource_ids[1] == resource_ids[5]
assert resource_ids[2] == resource_ids[3] == resource_ids[4]
msgs = [x[1] for x in messages]
assert msgs[0] == msgs[2] == "Opened"
assert msgs[4] == msgs[5] == "Closed"
assert msgs[1] == "Hello, solid!"
assert msgs[3] == "Hello, notebook!"
@pytest.mark.notebook_test
def test_bad_kernel_pipeline():
with pytest.raises(NoSuchKernel):
with exec_for_test("bad_kernel_pipeline"):
pass
@pytest.mark.notebook_test
def test_hello_logging():
with exec_for_test("hello_logging_pipeline") as result:
assert result.success
@pytest.mark.notebook_test
def test_reimport():
with exec_for_test("reimport_pipeline") as result:
assert result.success
assert result.result_for_solid("reimport").output_value() == 6
@pytest.mark.notebook_test
def test_yield_3_pipeline():
with exec_for_test("yield_3_pipeline") as result:
assert result.success
assert result.result_for_solid("yield_3").output_value() == 3
@pytest.mark.notebook_test
def test_yield_obj_pipeline():
with exec_for_test("yield_obj_pipeline") as result:
assert result.success
assert result.result_for_solid("yield_obj").output_value().x == 3
@pytest.mark.notebook_test
def test_hello_world_with_custom_tags_and_description_pipeline():
with exec_for_test("hello_world_with_custom_tags_and_description_pipeline") as result:
assert result.success
def test_non_reconstructable_pipeline():
foo_solid = define_dagstermill_solid("foo", file_relative_path(__file__, "notebooks/foo.ipynb"))
@pipeline
def non_reconstructable():
foo_solid()
with pytest.raises(DagstermillError, match="pipeline that is not reconstructable."):
execute_pipeline(non_reconstructable)
# Test Solid Tags & Description
BACKING_NB_NAME = "hello_world"
BACKING_NB_PATH = file_relative_path(__file__, f"notebooks/{BACKING_NB_NAME}.ipynb")
def test_default_tags():
test_solid_default_tags = define_dagstermill_solid(BACKING_NB_NAME, BACKING_NB_PATH)
assert test_solid_default_tags.tags == {
"kind": "ipynb",
"notebook_path": BACKING_NB_PATH,
}
def test_custom_tags():
test_solid_custom_tags = define_dagstermill_solid(
BACKING_NB_NAME, BACKING_NB_PATH, tags={"foo": "bar"}
)
assert test_solid_custom_tags.tags == {
"kind": "ipynb",
"notebook_path": BACKING_NB_PATH,
"foo": "bar",
}
def test_reserved_tags_not_overridden():
with pytest.raises(CheckError, match="key is reserved for use by Dagster"):
define_dagstermill_solid(BACKING_NB_NAME, BACKING_NB_PATH, tags={"notebook_path": "~"})
with pytest.raises(CheckError, match="key is reserved for use by Dagster"):
define_dagstermill_solid(BACKING_NB_NAME, BACKING_NB_PATH, tags={"kind": "py"})
def test_default_description():
test_solid = define_dagstermill_solid(BACKING_NB_NAME, BACKING_NB_PATH)
assert test_solid.description.startswith("This solid is backed by the notebook at ")
def test_custom_description():
test_description = "custom description"
test_solid = define_dagstermill_solid(
BACKING_NB_NAME, BACKING_NB_PATH, description=test_description
)
assert test_solid.description == test_description
@pytest.mark.notebook_test
def test_retries(capsys):
with exec_for_test("retries_pipeline", raise_on_error=False) as result:
assert result.result_for_solid("yield_retry").retry_attempts == 1
# the raise_retry solid should trigger a warning to use yield_event
warn_found = False
captured = capsys.readouterr()
for line in captured.err.split("\n"):
if "Use dagstermill.yield_event with RetryRequested or Failure" in line:
warn_found = True
assert warn_found
@pytest.mark.notebook_test
def test_failure(capsys):
with exec_for_test("failure_pipeline", raise_on_error=False) as result:
assert (
result.result_for_solid("yield_failure").failure_data.user_failure_data.description
== "bad bad notebook"
)
# the raise_failure solid should trigger a warning to use yield_event
warn_found = False
captured = capsys.readouterr()
for line in captured.err.split("\n"):
if "Use dagstermill.yield_event with RetryRequested or Failure" in line:
warn_found = True
assert warn_found
@pytest.mark.notebook_test
def test_hello_world_graph():
from dagstermill.examples.repository import build_hello_world_job
from dagster import reconstructable
with instance_for_test() as instance:
result = None
try:
result = execute_pipeline(
reconstructable(build_hello_world_job),
instance=instance,
)
assert result.success
finally:
if result:
cleanup_result_notebook(result)
|
the-stack_106_21068
|
# -*- coding: utf-8 -*-
"""
Copyright [2009-2020] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import typing as ty
from functools import lru_cache
import pytest
from rnacentral_pipeline.databases.ensembl.data import Division
from rnacentral_pipeline.databases.ensembl.genomes import urls
@lru_cache()
def species(division: Division) -> ty.List[str]:
found = urls.urls_for(division, "ftp.ensemblgenomes.org")
return [f[1] for f in found]
@pytest.mark.parametrize(
"division,expected,found",
[
(Division.fungi, "aspergillus_oryzae_gca_002007945", False),
(Division.fungi, "aspergillus_fumigatus_var_rp_2014_gca_000731615", False),
(Division.fungi, "aspergillus_oryzae", True),
],
)
def test_can_generate_reasonable_species(division, expected, found):
if found:
assert expected in species(division)
else:
assert expected not in species(division)
|
the-stack_106_21070
|
"""Module containing utility functions to compare parameters and results"""
__author__ = 'Robert Meyer'
from collections import Sequence, Mapping
import numpy as np
import pandas as pd
import scipy.sparse as spsp
import pypet.slots as slots
def results_equal(a, b):
"""Compares two result instances
Checks full name and all data. Does not consider the comment.
:return: True or False
:raises: ValueError if both inputs are no result instances
"""
if a.v_is_parameter and b.v_is_parameter:
raise ValueError('Both inputs are not results.')
if a.v_is_parameter or b.v_is_parameter:
return False
if a.v_full_name != b.v_full_name:
return False
if hasattr(a, '_data') and not hasattr(b, '_data'):
return False
if hasattr(a, '_data'):
akeyset = set(a._data.keys())
bkeyset = set(b._data.keys())
if akeyset != bkeyset:
return False
for key in a._data:
val = a._data[key]
bval = b._data[key]
if not nested_equal(val, bval):
return False
return True
def parameters_equal(a, b):
"""Compares two parameter instances
Checks full name, data, and ranges. Does not consider the comment.
:return: True or False
:raises: ValueError if both inputs are no parameter instances
"""
if (not b.v_is_parameter and
not a.v_is_parameter):
raise ValueError('Both inputs are not parameters')
if (not b.v_is_parameter or
not a.v_is_parameter):
return False
if a.v_full_name != b.v_full_name:
return False
if a.f_is_empty() and b.f_is_empty():
return True
if a.f_is_empty() != b.f_is_empty():
return False
if not a._values_of_same_type(a.f_get(), b.f_get()):
return False
if not a._equal_values(a.f_get(), b.f_get()):
return False
if a.f_has_range() != b.f_has_range():
return False
if a.f_has_range():
if a.f_get_range_length() != b.f_get_range_length():
return False
for myitem, bitem in zip(a.f_get_range(copy=False), b.f_get_range(copy=False)):
if not a._values_of_same_type(myitem, bitem):
return False
if not a._equal_values(myitem, bitem):
return False
return True
def get_all_attributes(instance):
"""Returns an attribute value dictionary much like `__dict__` but incorporates `__slots__`"""
try:
result_dict = instance.__dict__.copy()
except AttributeError:
result_dict = {}
if hasattr(instance, '__all_slots__'):
all_slots = instance.__all_slots__
else:
all_slots = slots.get_all_slots(instance.__class__)
for slot in all_slots:
result_dict[slot] = getattr(instance, slot)
result_dict.pop('__dict__', None)
result_dict.pop('__weakref__', None)
return result_dict
def nested_equal(a, b):
"""Compares two objects recursively by their elements.
Also handles numpy arrays, pandas data and sparse matrices.
First checks if the data falls into the above categories.
If not, it is checked if a or b are some type of sequence or mapping and
the contained elements are compared.
If this is not the case, it is checked if a or b do provide a custom `__eq__` that
evaluates to a single boolean value.
If this is not the case, the attributes of a and b are compared.
If this does not help either, normal `==` is used.
Assumes hashable items are not mutable in a way that affects equality.
Based on the suggestion from HERE_, thanks again Lauritz V. Thaulow :-)
.. _HERE: http://stackoverflow.com/questions/18376935/best-practice-for-equality-in-python
"""
if a is b:
return True
if a is None or b is None:
return False
a_sparse = spsp.isspmatrix(a)
b_sparse = spsp.isspmatrix(b)
if a_sparse != b_sparse:
return False
if a_sparse:
if a.nnz == 0:
return b.nnz == 0
else:
return not np.any((a != b).data)
a_series = isinstance(a, pd.Series)
b_series = isinstance(b, pd.Series)
if a_series != b_series:
return False
if a_series:
try:
eq = (a == b).all()
return eq
except (TypeError, ValueError):
# If Sequence itself contains numpy arrays we get here
if not len(a) == len(b):
return False
for idx, itema in enumerate(a):
itemb = b[idx]
if not nested_equal(itema, itemb):
return False
return True
a_frame = isinstance(a, pd.DataFrame)
b_frame = isinstance(b, pd.DataFrame)
if a_frame != b_frame:
return False
if a_frame:
try:
if a.empty and b.empty:
return True
new_frame = a == b
new_frame = new_frame | (pd.isnull(a) & pd.isnull(b))
if isinstance(new_frame, pd.DataFrame):
return np.all(new_frame.values)
except (ValueError, TypeError):
# The Value Error can happen if the data frame is of dtype=object and contains
# numpy arrays. Numpy array comparisons do not evaluate to a single truth value
for name in a:
cola = a[name]
if not name in b:
return False
colb = b[name]
if not len(cola) == len(colb):
return False
for idx, itema in enumerate(cola):
itemb = colb[idx]
if not nested_equal(itema, itemb):
return False
return True
a_array = isinstance(a, np.ndarray)
b_array = isinstance(b, np.ndarray)
if a_array != b_array:
return False
if a_array:
if a.shape != b.shape:
return False
return np.all(a == b)
a_list = isinstance(a, (Sequence, list, tuple))
b_list = isinstance(b, (Sequence, list, tuple))
if a_list != b_list:
return False
if a_list:
return all(nested_equal(x, y) for x, y in zip(a, b))
a_mapping = isinstance(a, (Mapping, dict))
b_mapping = isinstance(b, (Mapping, dict))
if a_mapping != b_mapping:
return False
if a_mapping:
keys_a = a.keys()
if set(keys_a) != set(b.keys()):
return False
return all(nested_equal(a[k], b[k]) for k in keys_a)
# Equality for general objects
# for types that support __eq__ or __cmp__
equality = NotImplemented
try:
equality = a.__eq__(b)
except (AttributeError, NotImplementedError, TypeError, ValueError):
pass
if equality is NotImplemented:
try:
equality = b.__eq__(a)
except (AttributeError, NotImplementedError, TypeError, ValueError):
pass
if equality is NotImplemented:
try:
cmp = a.__cmp__(b)
if cmp is not NotImplemented:
equality = cmp == 0
except (AttributeError, NotImplementedError, TypeError, ValueError):
pass
if equality is NotImplemented:
try:
cmp = b.__cmp__(a)
if cmp is not NotImplemented:
equality = cmp == 0
except (AttributeError, NotImplementedError, TypeError, ValueError):
pass
if equality is not NotImplemented:
try:
return bool(equality)
except (AttributeError, NotImplementedError, TypeError, ValueError):
pass
# Compare objects based on their attributes
attributes_a = get_all_attributes(a)
attributes_b = get_all_attributes(b)
if len(attributes_a) != len(attributes_b):
return False
if len(attributes_a) > 0:
keys_a = list(attributes_a.keys())
if set(keys_a) != set(attributes_b.keys()):
return False
return all(nested_equal(attributes_a[k], attributes_b[k]) for k in keys_a)
# Ok they are really not equal
return False
|
the-stack_106_21071
|
# Copyright (C) 2021-2022, Mindee.
# This program is licensed under the Apache License version 2.
# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
# Credits: post-processing adapted from https://github.com/xuannianz/DifferentiableBinarization
from copy import deepcopy
from typing import Any, Dict, List, Optional, Tuple
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.applications import ResNet50
from doctr.models.utils import IntermediateLayerGetter, conv_sequence, load_pretrained_params
from doctr.utils.repr import NestedObject
from ...classification import mobilenet_v3_large
from .base import DBPostProcessor, _DBNet
__all__ = ['DBNet', 'db_resnet50', 'db_mobilenet_v3_large']
default_cfgs: Dict[str, Dict[str, Any]] = {
'db_resnet50': {
'mean': (0.798, 0.785, 0.772),
'std': (0.264, 0.2749, 0.287),
'input_shape': (1024, 1024, 3),
'url': 'https://github.com/mindee/doctr/releases/download/v0.2.0/db_resnet50-adcafc63.zip',
},
'db_mobilenet_v3_large': {
'mean': (0.798, 0.785, 0.772),
'std': (0.264, 0.2749, 0.287),
'input_shape': (1024, 1024, 3),
'url': 'https://github.com/mindee/doctr/releases/download/v0.3.1/db_mobilenet_v3_large-8c16d5bf.zip',
},
}
class FeaturePyramidNetwork(layers.Layer, NestedObject):
"""Feature Pyramid Network as described in `"Feature Pyramid Networks for Object Detection"
<https://arxiv.org/pdf/1612.03144.pdf>`_.
Args:
channels: number of channel to output
"""
def __init__(
self,
channels: int,
) -> None:
super().__init__()
self.channels = channels
self.upsample = layers.UpSampling2D(size=(2, 2), interpolation='nearest')
self.inner_blocks = [layers.Conv2D(channels, 1, strides=1, kernel_initializer='he_normal') for _ in range(4)]
self.layer_blocks = [self.build_upsampling(channels, dilation_factor=2 ** idx) for idx in range(4)]
@staticmethod
def build_upsampling(
channels: int,
dilation_factor: int = 1,
) -> layers.Layer:
"""Module which performs a 3x3 convolution followed by up-sampling
Args:
channels: number of output channels
dilation_factor (int): dilation factor to scale the convolution output before concatenation
Returns:
a keras.layers.Layer object, wrapping these operations in a sequential module
"""
_layers = conv_sequence(channels, 'relu', True, kernel_size=3)
if dilation_factor > 1:
_layers.append(layers.UpSampling2D(size=(dilation_factor, dilation_factor), interpolation='nearest'))
module = keras.Sequential(_layers)
return module
def extra_repr(self) -> str:
return f"channels={self.channels}"
def call(
self,
x: List[tf.Tensor],
**kwargs: Any,
) -> tf.Tensor:
# Channel mapping
results = [block(fmap, **kwargs) for block, fmap in zip(self.inner_blocks, x)]
# Upsample & sum
for idx in range(len(results) - 1, -1):
results[idx] += self.upsample(results[idx + 1])
# Conv & upsample
results = [block(fmap, **kwargs) for block, fmap in zip(self.layer_blocks, results)]
return layers.concatenate(results)
class DBNet(_DBNet, keras.Model, NestedObject):
"""DBNet as described in `"Real-time Scene Text Detection with Differentiable Binarization"
<https://arxiv.org/pdf/1911.08947.pdf>`_.
Args:
feature extractor: the backbone serving as feature extractor
fpn_channels: number of channels each extracted feature maps is mapped to
num_classes: number of output channels in the segmentation map
assume_straight_pages: if True, fit straight bounding boxes only
cfg: the configuration dict of the model
"""
_children_names: List[str] = ['feat_extractor', 'fpn', 'probability_head', 'threshold_head', 'postprocessor']
def __init__(
self,
feature_extractor: IntermediateLayerGetter,
fpn_channels: int = 128, # to be set to 256 to represent the author's initial idea
num_classes: int = 1,
assume_straight_pages: bool = True,
cfg: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__()
self.cfg = cfg
self.feat_extractor = feature_extractor
self.assume_straight_pages = assume_straight_pages
self.fpn = FeaturePyramidNetwork(channels=fpn_channels)
# Initialize kernels
_inputs = [layers.Input(shape=in_shape[1:]) for in_shape in self.feat_extractor.output_shape]
output_shape = tuple(self.fpn(_inputs).shape)
self.probability_head = keras.Sequential(
[
*conv_sequence(64, 'relu', True, kernel_size=3, input_shape=output_shape[1:]),
layers.Conv2DTranspose(64, 2, strides=2, use_bias=False, kernel_initializer='he_normal'),
layers.BatchNormalization(),
layers.Activation('relu'),
layers.Conv2DTranspose(num_classes, 2, strides=2, kernel_initializer='he_normal'),
]
)
self.threshold_head = keras.Sequential(
[
*conv_sequence(64, 'relu', True, kernel_size=3, input_shape=output_shape[1:]),
layers.Conv2DTranspose(64, 2, strides=2, use_bias=False, kernel_initializer='he_normal'),
layers.BatchNormalization(),
layers.Activation('relu'),
layers.Conv2DTranspose(num_classes, 2, strides=2, kernel_initializer='he_normal'),
]
)
self.postprocessor = DBPostProcessor(assume_straight_pages=assume_straight_pages)
def compute_loss(
self,
out_map: tf.Tensor,
thresh_map: tf.Tensor,
target: List[np.ndarray]
) -> tf.Tensor:
"""Compute a batch of gts, masks, thresh_gts, thresh_masks from a list of boxes
and a list of masks for each image. From there it computes the loss with the model output
Args:
out_map: output feature map of the model of shape (N, H, W, C)
thresh_map: threshold map of shape (N, H, W, C)
target: list of dictionary where each dict has a `boxes` and a `flags` entry
Returns:
A loss tensor
"""
prob_map = tf.math.sigmoid(tf.squeeze(out_map, axis=[-1]))
thresh_map = tf.math.sigmoid(tf.squeeze(thresh_map, axis=[-1]))
seg_target, seg_mask, thresh_target, thresh_mask = self.build_target(target, out_map.shape[:3])
seg_target = tf.convert_to_tensor(seg_target, dtype=out_map.dtype)
seg_mask = tf.convert_to_tensor(seg_mask, dtype=tf.bool)
thresh_target = tf.convert_to_tensor(thresh_target, dtype=out_map.dtype)
thresh_mask = tf.convert_to_tensor(thresh_mask, dtype=tf.bool)
# Compute balanced BCE loss for proba_map
bce_scale = 5.
bce_loss = tf.keras.losses.binary_crossentropy(seg_target[..., None], out_map, from_logits=True)[seg_mask]
neg_target = 1 - seg_target[seg_mask]
positive_count = tf.math.reduce_sum(seg_target[seg_mask])
negative_count = tf.math.reduce_min([tf.math.reduce_sum(neg_target), 3. * positive_count])
negative_loss = bce_loss * neg_target
negative_loss, _ = tf.nn.top_k(negative_loss, tf.cast(negative_count, tf.int32))
sum_losses = tf.math.reduce_sum(bce_loss * seg_target[seg_mask]) + tf.math.reduce_sum(negative_loss)
balanced_bce_loss = sum_losses / (positive_count + negative_count + 1e-6)
# Compute dice loss for approxbin_map
bin_map = 1 / (1 + tf.exp(-50. * (prob_map[seg_mask] - thresh_map[seg_mask])))
bce_min = tf.math.reduce_min(bce_loss)
weights = (bce_loss - bce_min) / (tf.math.reduce_max(bce_loss) - bce_min) + 1.
inter = tf.math.reduce_sum(bin_map * seg_target[seg_mask] * weights)
union = tf.math.reduce_sum(bin_map) + tf.math.reduce_sum(seg_target[seg_mask]) + 1e-8
dice_loss = 1 - 2.0 * inter / union
# Compute l1 loss for thresh_map
l1_scale = 10.
if tf.reduce_any(thresh_mask):
l1_loss = tf.math.reduce_mean(tf.math.abs(thresh_map[thresh_mask] - thresh_target[thresh_mask]))
else:
l1_loss = tf.constant(0.)
return l1_scale * l1_loss + bce_scale * balanced_bce_loss + dice_loss
def call(
self,
x: tf.Tensor,
target: Optional[List[np.ndarray]] = None,
return_model_output: bool = False,
return_preds: bool = False,
**kwargs: Any,
) -> Dict[str, Any]:
feat_maps = self.feat_extractor(x, **kwargs)
feat_concat = self.fpn(feat_maps, **kwargs)
logits = self.probability_head(feat_concat, **kwargs)
out: Dict[str, tf.Tensor] = {}
if return_model_output or target is None or return_preds:
prob_map = tf.math.sigmoid(logits)
if return_model_output:
out["out_map"] = prob_map
if target is None or return_preds:
# Post-process boxes (keep only text predictions)
out["preds"] = [preds[0] for preds in self.postprocessor(prob_map.numpy())]
if target is not None:
thresh_map = self.threshold_head(feat_concat, **kwargs)
loss = self.compute_loss(logits, thresh_map, target)
out['loss'] = loss
return out
def _db_resnet(
arch: str,
pretrained: bool,
backbone_fn,
fpn_layers: List[str],
pretrained_backbone: bool = True,
input_shape: Optional[Tuple[int, int, int]] = None,
**kwargs: Any,
) -> DBNet:
pretrained_backbone = pretrained_backbone and not pretrained
# Patch the config
_cfg = deepcopy(default_cfgs[arch])
_cfg['input_shape'] = input_shape or _cfg['input_shape']
# Feature extractor
feat_extractor = IntermediateLayerGetter(
backbone_fn(
weights='imagenet' if pretrained_backbone else None,
include_top=False,
pooling=None,
input_shape=_cfg['input_shape'],
),
fpn_layers,
)
# Build the model
model = DBNet(feat_extractor, cfg=_cfg, **kwargs)
# Load pretrained parameters
if pretrained:
load_pretrained_params(model, _cfg['url'])
return model
def _db_mobilenet(
arch: str,
pretrained: bool,
backbone_fn,
fpn_layers: List[str],
pretrained_backbone: bool = True,
input_shape: Optional[Tuple[int, int, int]] = None,
**kwargs: Any,
) -> DBNet:
pretrained_backbone = pretrained_backbone and not pretrained
# Patch the config
_cfg = deepcopy(default_cfgs[arch])
_cfg['input_shape'] = input_shape or _cfg['input_shape']
# Feature extractor
feat_extractor = IntermediateLayerGetter(
backbone_fn(
input_shape=_cfg['input_shape'],
include_top=False,
pretrained=pretrained_backbone,
),
fpn_layers,
)
# Build the model
model = DBNet(feat_extractor, cfg=_cfg, **kwargs)
# Load pretrained parameters
if pretrained:
load_pretrained_params(model, _cfg['url'])
return model
def db_resnet50(pretrained: bool = False, **kwargs: Any) -> DBNet:
"""DBNet as described in `"Real-time Scene Text Detection with Differentiable Binarization"
<https://arxiv.org/pdf/1911.08947.pdf>`_, using a ResNet-50 backbone.
>>> import tensorflow as tf
>>> from doctr.models import db_resnet50
>>> model = db_resnet50(pretrained=True)
>>> input_tensor = tf.random.uniform(shape=[1, 1024, 1024, 3], maxval=1, dtype=tf.float32)
>>> out = model(input_tensor)
Args:
pretrained (bool): If True, returns a model pre-trained on our text detection dataset
Returns:
text detection architecture
"""
return _db_resnet(
'db_resnet50',
pretrained,
ResNet50,
["conv2_block3_out", "conv3_block4_out", "conv4_block6_out", "conv5_block3_out"],
**kwargs,
)
def db_mobilenet_v3_large(pretrained: bool = False, **kwargs: Any) -> DBNet:
"""DBNet as described in `"Real-time Scene Text Detection with Differentiable Binarization"
<https://arxiv.org/pdf/1911.08947.pdf>`_, using a mobilenet v3 large backbone.
>>> import tensorflow as tf
>>> from doctr.models import db_mobilenet_v3_large
>>> model = db_mobilenet_v3_large(pretrained=True)
>>> input_tensor = tf.random.uniform(shape=[1, 1024, 1024, 3], maxval=1, dtype=tf.float32)
>>> out = model(input_tensor)
Args:
pretrained (bool): If True, returns a model pre-trained on our text detection dataset
Returns:
text detection architecture
"""
return _db_mobilenet(
'db_mobilenet_v3_large',
pretrained,
mobilenet_v3_large,
["inverted_2", "inverted_5", "inverted_11", "final_block"],
**kwargs,
)
|
the-stack_106_21072
|
def check_membership(username, allowed=[], banned_sets={}):
"""
This function is badly written, but for once it is intentional.
:param username:
:param allowed:
:return:
"""
"""One more thing"""
found = True
if username == None: # noqa: B003
return False, f"Username not provided!" # noqa: B007
for banned in banned_sets:
if username in banned:
return (False, f"User was banned!")
if not username in allowed:
found = False
if found == False or found != 42 and found == True:
return (False, f"User is not allowed!")
with ctx():
with recorder() as rec:
a = 42
with rollback():
logging.info(f"Username {username} is logged in.")
return (True, f"Hello, {username}") # noqa
constant = 42
_private_value = 53
def main():
return check_membership("test", ["root", "another_user"], banned_sets={["hacker1"]})
|
the-stack_106_21073
|
"""Verifica se o número recebido possui ao menos um digíto adjacente igual a ele"""
numero = int(input('Digite um número inteiro: '))
tem_adjacente = False
while numero > 0:
ultimo_numero = numero % 10
penultimo_numero = (numero // 10) % 10
if ultimo_numero == penultimo_numero:
tem_adjacente = True
break
numero = numero // 10
# só entra no 'if' caso tem_adjacente = True
if tem_adjacente:
print('sim')
else:
print('não')
|
the-stack_106_21075
|
"""
@file
@brief Calls :epkg:`nbconvert` in command line for latex and pdf.
"""
import sys
import warnings
try:
from nbconvert.nbconvertapp import main as nbconvert_main
except AttributeError as e:
raise ImportError("Unable to import nbconvert") from e
def run_nbconvert(argv):
try:
nbconvert_main(argv=argv)
except Exception as ee:
warnings.warn(
"[run_nbconvert-ERROR] Unable to to convert a notebook with "
"args=%r due to %r." % (argv, ee), RuntimeWarning)
def main():
run_nbconvert(sys.argv[1:])
if __name__ == "__main__":
main()
|
the-stack_106_21076
|
# -*- coding: utf-8 -*-
# GUI Application automation and testing library
# Copyright (C) 2006-2019 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Linux AtspiElementInfo class"""
from .atspi_objects import AtspiAccessible, AtspiComponent, AtspiStateEnum, AtspiAction, AtspiValue, \
IATSPI, RECT
from ..element_info import ElementInfo
class AtspiElementInfo(ElementInfo):
"""Search class and hierarchy walker for AT-SPI elements"""
atspi_accessible = AtspiAccessible()
re_props = ["class_name", "name", "control_type"]
exact_only_props = ["handle", "pid", "control_id", "visible", "enabled", "rectangle",
"framework_id", "framework_name", "atspi_version", "runtime_id", "description"]
search_order = ["handle", "control_type", "class_name", "pid", "control_id",
"visible", "enabled", "name", "rectangle",
"framework_id", "framework_name", "atspi_version", "runtime_id", "description"]
# "auto_id", "full_control_type"
assert set(re_props + exact_only_props) == set(search_order)
renamed_props = {
"title": ("name", None),
"title_re": ("name_re", None),
"process": ("pid", None),
"visible_only": ("visible", {True: True, False: None}),
"enabled_only": ("enabled", {True: True, False: None}),
"top_level_only": ("depth", {True: 1, False: None}),
}
def __init__(self, handle=None):
"""Create element by handle (default is root element)"""
if handle is None:
self._handle = self.atspi_accessible.get_desktop(0)
else:
self._handle = handle
# Cache non-mutable element IDs
self._pid = self.atspi_accessible.get_process_id(self._handle, None)
self._root_id = self.atspi_accessible.get_id(self._handle, None)
self._runtime_id = self.atspi_accessible.get_index_in_parent(self._handle, None)
def __get_elements(self, root, tree, **kwargs):
tree.append(root)
for el in root.children(**kwargs):
self.__get_elements(el, tree, **kwargs)
def __hash__(self):
"""Return a unique hash value based on the element's handle"""
return hash((self._pid, self._root_id, self._runtime_id))
def __eq__(self, other):
"""Check if two AtspiElementInfo objects describe the same element"""
if not isinstance(other, AtspiElementInfo):
return False
if self.control_type == "Application" and other.control_type == "Application":
return self.process_id == other.process_id
return self.rectangle == other.rectangle
def __ne__(self, other):
"""Check if two AtspiElementInfo objects describe different elements"""
return not (self == other)
@staticmethod
def _get_states_as_string(states):
string_states = []
for i, state in AtspiStateEnum.items():
if states & (1 << i):
string_states.append(state)
return string_states
@property
def handle(self):
"""Return the handle of the window"""
return self._handle
@property
def name(self):
"""Return the text of the window"""
return self.atspi_accessible.get_name(self._handle, None).decode(encoding='UTF-8')
@property
def control_id(self):
"""Return the ID of the window"""
return self.atspi_accessible.get_role(self._handle, None)
@property
def runtime_id(self):
"""Return the runtime ID of the element"""
return self._runtime_id
@property
def process_id(self):
"""Return the ID of process that controls this window"""
return self._pid
pid = process_id
@property
def class_name(self):
"""Return the class name of the element"""
role = self.atspi_accessible.get_role_name(self._handle, None)
return "".join([part.capitalize() for part in role.decode("utf-8").split()])
@property
def rich_text(self):
"""Return the text of the element"""
return self.name
@property
def control_type(self):
"""Return the class name of the element"""
role_id = self.atspi_accessible.get_role(self._handle, None)
try:
return IATSPI().known_control_type_ids[role_id]
except KeyError:
raise NotImplementedError('Unknown role ID has been retrieved: {0}'.format(role_id))
@property
def parent(self):
"""Return the parent of the element"""
if self == AtspiElementInfo():
return None
return AtspiElementInfo(self.atspi_accessible.get_parent(self._handle, None))
def children(self, **kwargs):
"""Return children of the element"""
process = kwargs.get("process", None)
class_name = kwargs.get("class_name", None)
name = kwargs.get("name", None)
control_type = kwargs.get("control_type", None)
cnt = self.atspi_accessible.get_child_count(self._handle, None)
childrens = []
for i in range(cnt):
child = AtspiElementInfo(self.atspi_accessible.get_child_at_index(self._handle, i, None))
if class_name is not None and class_name != child.class_name:
continue
if name is not None and name != child.rich_text:
continue
if control_type is not None and control_type != child.control_type:
continue
if process is not None and process != child.process_id:
continue
childrens.append(child)
return childrens
@property
def component(self):
component = self.atspi_accessible.get_component(self._handle)
return AtspiComponent(component)
def descendants(self, **kwargs):
"""Return descendants of the element"""
tree = []
for obj in self.children(**kwargs):
self.__get_elements(obj, tree, **kwargs)
depth = kwargs.get("depth", None)
tree = self.filter_with_depth(tree, self, depth)
return tree
def description(self):
return self.atspi_accessible.get_description(self._handle, None).decode(encoding='UTF-8')
def framework_id(self):
return self.atspi_accessible.get_toolkit_version(self._handle, None).decode(encoding='UTF-8')
def framework_name(self):
return self.atspi_accessible.get_toolkit_name(self._handle, None).decode(encoding='UTF-8')
def atspi_version(self):
return self.atspi_accessible.get_atspi_version(self._handle, None).decode(encoding='UTF-8')
def get_layer(self):
"""Return rectangle of element"""
if self.control_type == "Application":
return self.children()[0].get_layer()
return self.component.get_layer()
def get_order(self):
if self.control_type == "Application":
return self.children()[0].get_order()
return self.component.get_mdi_z_order()
def get_state_set(self):
val = self.atspi_accessible.get_state_set(self.handle)
return self._get_states_as_string(val.contents.states)
def get_action(self):
if self.atspi_accessible.is_action(self.handle):
return AtspiAction(self.atspi_accessible.get_action(self.handle))
else:
return None
def get_value_property(self):
return AtspiValue(self.atspi_accessible.get_value(self.handle))
@property
def visible(self):
states = self.get_state_set()
if self.control_type == "Application":
children = self.children()
if children:
states = children[0].get_state_set()
else:
return False
return "STATE_VISIBLE" in states and "STATE_SHOWING" in states and "STATE_ICONIFIED" not in states
def set_cache_strategy(self, cached):
"""Set a cache strategy for frequently used attributes of the element"""
pass # TODO: implement a cache strategy for atspi elements
@property
def enabled(self):
states = self.get_state_set()
if self.control_type == "Application":
children = self.children()
if children:
states = children[0].get_state_set()
else:
return False
return "STATE_ENABLED" in states
@property
def rectangle(self):
"""Return rectangle of element"""
if self.control_type == "Application":
# Application object have`t rectangle. It`s just a fake container which contain base application
# info such as process ID, window name etc. Will return application frame rectangle
children = self.children()
if children:
return self.children()[0].rectangle
else:
return RECT()
elif self.control_type == "Invalid":
return RECT()
return self.component.get_rectangle(coord_type="screen")
def get_value(self):
return AtspiValue(self.atspi_accessible.get_value(self.handle))
|
the-stack_106_21077
|
from MessageSentiment import MessageSentiment
from Author import Author
from Message import Message
from HelperFunctions import find_author, make_authors
import re, datetime, numpy, json, nltk, pickle, sys, math
class ChatStat:
"""Base class for ChatStat"""
__version__ = '1.0'
def __init__(self, raw_messages, mood_training_strength = 1000):
if raw_messages is None:
raise ValueError("No messages provided")
# Parse messages from bytes-like objects to string, and remove whitespace
self.raw_messages = [msg.decode("utf-8").strip() for msg in raw_messages]
self.authors = make_authors(self.raw_messages)
self.parsed_messages = self.parse_messages()
self.make_leave_counts()
self.populate_enumerable_properties()
self.message_classifier = MessageSentiment(mood_training_strength)
def parse_messages(self):
"""Combines multi line messages into one line. Returns array of Message objects."""
ret = []
for line in self.raw_messages:
try:
# Check for non-time stamped lines (so a multi-line message), add to prev msg
if not(re.match(r'\d{4}', line)):
ret[-1] += line
# check for author (denoted by "Author: Message Text")
elif ':' in line.split(' - ')[1]:
ret.append(line)
except TypeError:
continue
except IndexError:
# Occurs for special messages like addresses that start with new line
ret[-1] += line
return [Message(idx, message_text, self.authors) for (idx, message_text) in enumerate(ret)]
def make_leave_counts(self):
"""Takes raw messages, and returns dictionary of leave counts by author."""
for line in self.raw_messages:
if re.match(r'\d{4}', line) and ' left' in line and not(':' in line.split(' - ')[1]):
try:
author = find_author(line.split(' - ')[1].split(' left')[0].strip(), self.authors)
author.leave_count += 1
except ValueError:
# This was a person who never sent any messages, so ignore
pass
def populate_enumerable_properties(self):
"""Loop through messages, find who kills conversation, update author messages,
and update mentions for that author
"""
for (idx, msg) in enumerate(self.parsed_messages):
# Update messages for author
msg.author._messages.append(msg)
# Ignore first message, and if prev msg was same person
if idx == 0: continue
prev_msg = self.parsed_messages[idx-1]
if msg.author == prev_msg.author: continue
msg_dt = datetime.datetime.strptime(msg.get_date_time_text, '%Y-%m-%d %I:%M %p')
prev_msg_dt = datetime.datetime.strptime(prev_msg.get_date_time_text, '%Y-%m-%d %I:%M %p')
time_delta = (msg_dt - prev_msg_dt).total_seconds()
prev_msg.author._time_deltas.append(time_delta)
@property
def messages_by_month(self):
months = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0}
for m in self.parsed_messages:
months[m.month] += 1
return months
@property
def messages_by_day(self):
days = {}
for m in self.parsed_messages:
if not(m.day_of_week in days):
days[m.day_of_week] = 0
days[m.day_of_week] += 1
return days
@property
def messages_by_time(self):
times = {}
for m in self.parsed_messages:
if not(m.time in times):
times[m.time] = 0
times[m.time] += 1
return times
@property
def convo_killer(self):
sorted_authors = sorted(self.authors, key=lambda x: x.get_avg_response_time)
return [{'author': a, 'avg_response_time': a.get_avg_response_time} for a in sorted_authors]
@property
def total_number_of_posts(self):
return len(self.parsed_messages)
def ashton(self):
ret = {}
last_auth = None
for a in self.parsed_messages:
if not(a.author in ret):
ret[a.author] = 0
if last_auth is None or last_auth != a.author:
last_auth = a.author
ret[a.author] += 1
return sorted([{'author': a, 'ashton_adjusted_count': ct} for (a, ct) in ret.items()], key=lambda x: x['ashton_adjusted_count'], reverse=True)
@property
def post_count_by_author(self):
return list(map(lambda x: {'author': x, 'count': x.message_count},
sorted(self.authors, key=lambda x: x.message_count, reverse=True)))
@property
def longest_messages(self):
return list(
map(lambda x: {'author': x, 'longest_msg': x.longest_message, 'longest_msg_len': len(x.longest_message.text)},
sorted(self.authors, key=lambda x: len(x.longest_message.text), reverse=True)))
@property
def shortest_messages(self):
return list(
map(lambda x: {'author': x, 'shortest_msg': x.shortest_message, 'shortest_msg_len': len(x.shortest_message.text)},
sorted(self.authors, key=lambda x: len(x.shortest_message.text), reverse=False)))
@property
def average_author_sentiment(self):
ret = {}
skip_count = 0
for author in self.authors:
ret[author] = {'positive': 0, 'negative': 0, 'neutral': 0, 'negativity_ratio': 0}
for msg in author._messages:
if msg.mood is None:
skip_count += 1
else:
ret[author][msg.mood[0]] += 1
try:
ret[author]['negativity_ratio'] = ret[author]['negative'] / ret[author]['positive']
except ZeroDivisionError:
ret[author]['negativity_ratio'] = 0
if skip_count != 0:
print("{} messages were skipped, as they have not been classified".format(skip_count))
return sorted([{'author': a, 'message_mood': data} for (a, data) in ret.items()], key=lambda x: x['message_mood']['negativity_ratio'], reverse=True)
@property
def leave_counts(self):
x = [{'author': a.name, 'rage_quit_count': a.leave_count} for a in self.authors]
return sorted(x, key=lambda x: x['rage_quit_count'])
def classify_messages(self):
if input("This may take awhile for a lot of messages. Proceed? [y / (n)]") == "y":
one_bar = round(self.total_number_of_posts / 20)
for (i, msg) in enumerate(self.parsed_messages):
msg.mood = self.message_classifier.get_mood(msg.text)
progress = '#' * math.floor(i / one_bar) + ' ' * (19 - (round(i / one_bar)))
sys.stdout.write("Classifying Messages [{}] {:06d}/{:06d}\r".format(progress, i+1, self.total_number_of_posts))
sys.stdout.flush()
print("\n")
else:
print("Aborting message classification")
def __repr__(self):
return "<ChatStat Object: {} messages>".format(len(self))
def __len__(self):
return self.total_number_of_posts
# with open('chat.txt') as data:
# raw_data = data.readlines()
# x = ChatStat(raw_data)
# print(x.messages_by_month)
# x.classify_messages()
# print(x.average_author_sentiment)
# for msg in x.parsed_messages:
# print (msg.text[0:50], "\t", msg.mood)
# print(x.authors[7].message_length_histogram)
# print (len(x.authors[7].message_length_histogram))
# y = MessageSentiment()
# print (x.authors[7].message_length_stdev)
# for author in x.authors:
# print(author.name, "\n", len(author.longest_message.text), "\n", author.longest_message.text,"\n\n")
# What can be done:
# messages_by_month
# messages_by_day
# messages_by_time
# convo_killer
# total_number_of_posts
# post_count_by_author
# for any author::::
# message_count
# longest_message
# shortest_message
# get_max_response_time
# get_min_response_time
# get_avg_response_time
# plot leave count vs messages sent
# import matplotlib.pyplot as plt
# with open('chat2.txt') as data:
# raw_data = data.readlines()
# messages = ChatStat(raw_data)
# print(messages.ashton())
|
the-stack_106_21080
|
import networkx as nx
import markdown as md
def parse_concepts(filename):
"""Takes a markdown file with with a certain structure
and parses it to separate the concept and the relations
between the concepts.
Structure:
# [Title]
## [Concept]
[Some text]
[Even Latex math]
### [Any subtitle]
### Utiliza:
- [Related concept 1]
- [Related concept 2]
## [Concept]
...
The functions returns an array of dicts and a string with the Title.
Each dictionary correspond to a concept and has keys for:
id -> The position of the dict in the array. This is useful to build the network.
name -> The title of the concept. What appears as [Concept] in the structure.
uses -> Array of the indexes of the concepts in the "Utiliza:" section.
content -> All of the plain text beetween the Concept title and the "Utiliza:" section.
"""
# Open the markdown file
with open(filename, "r") as file:
text = file.read()
# Create list of concepts and save title
Concepts = []
index = 0
sections = text.split("\n## ")
Title = sections[0].strip("# ").strip("\n")
for con in sections[1:]:
concept = {}
lines = [i for i in con.split("\n") if i != ""]
concept["id"] = index
concept["name"] = lines[0]
try:
end_index = lines.index("### Utiliza:")
concept["uses"] = [line.strip("- ") for line in lines[end_index+1:]]
except:
concept["uses"] = []
end_index = len(lines)
concept["content"] = "\n".join(lines[1:end_index])
concept["content"] = "##"+concept["name"]+ "\n" + concept["content"]
Concepts.append(concept)
index += 1
# Update relative indexes
for con in Concepts:
uses_index = []
for i in Concepts:
if i["name"] in con["uses"]:
uses_index.append(i["id"])
con["uses"] = uses_index
return Concepts, Title
def build_concept_network(filename):
"""
Uses NetworkX to build a network of concepts with the data
parsed from the file passed as argument.
The network only saves conections, it does not keep the direction
of the conections.
Returns a tuple with:
- NetworkX graph object
- Number of nodes
- The dictionary with the concept data
- The title for the network
"""
concept_data, Title = parse_concepts(filename)
N = len(concept_data)
G = nx.Graph()
G.add_nodes_from(list(range(0, N)))
# Build edges
for concept in concept_data:
for use in concept["uses"]:
G.add_edge(concept["id"], use)
return (G, N, concept_data, Title)
def get_graph_data(filename):
""" Wrapper for the build_concept_network and parse_concepts functions.
Returns all nesessary data to draw the graph.
"""
G, N, concept_data, Title = build_concept_network(filename)
titles = [concept["name"] for concept in concept_data]
html_content = [md.markdown(concept["content"]) for concept in concept_data]
# Get conection info about the nodes
node_conections = [list(nx.neighbors(G, n)) for n in range(N)]
node_conectivity = [len(con) for con in node_conections]
return (G, N, concept_data, Title, titles, html_content, node_conections, node_conectivity)
|
the-stack_106_21082
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0046_articlepage_include_main_image_overlay'),
]
operations = [
migrations.AddField(
model_name='topiclistpage',
name='articles_per_page',
field=models.IntegerField(default=20),
),
]
|
the-stack_106_21083
|
import re
import pygraphviz as pgv
import sys
from ..graph_helper import get_label, reserve_and_get_next_available_numbered_node_name, attr_encode, attr_decode
from .. import DecisionGraphNode, decision_graph_from_agraph
class DecisionBaseNode(DecisionGraphNode):
def __init__(self, taken_succs=(), unsat_succs=(), **data):
super(DecisionBaseNode, self).__init__()
self._data = data
self.taken_successors = taken_succs
self.unsat_successors = unsat_succs
def set_successors(self, succs):
raise ValueError("set_successors() should not be used on a DecisionBaseNode! Got: {}".format(succs))
def get_successors(self):
return self.taken_successors + self.unsat_successors
@classmethod
def from_graph(cls, g, node_name):
"""
:param g:
:type g: pgv.AGraph
:param node_name:
:return:
"""
name_match = re.match(cls.get_node_name_regex(), node_name)
if not name_match:
raise ValueError("Trying to parse a node with invalid name as {}: {}".format(cls, node_name))
outbound = [(g.get_edge(edge[0], edge[1]), decision_graph_from_agraph(g, edge[1])) for edge in g.out_edges()]
taken_succs = [node for edge, node in outbound if edge.attr['color'] == 'green']
unsat_succs = [node for edge, node in outbound if edge.attr['color'] == 'red']
decoded_data = attr_decode(get_label(g, node_name))
instance = cls(taken_succs=taken_succs, unsat_succs=unsat_succs, **decoded_data)
return instance
def to_graph(self, g, node_to_name_map):
name = node_to_name_map[self]
try:
g.add_node(name, shape='diamond', label=attr_encode(self._data))
except:
import sys, traceback, ipdb
type, value, tb = sys.exc_info()
traceback.print_exc()
ipdb.post_mortem(tb)
for succ in self.taken_successors:
g.add_edge(name, node_to_name_map[succ], color='green')
for succ in self.unsat_successors:
g.add_edge(name, node_to_name_map[succ], color='red')
return name
def update_successors(self, visitor, replacements):
"""
:type visitor: DecisionGraphVisitor
:type node: DecisionBaseNode
:return:
"""
taken_succs = [x for succ in self.taken_successors for x in replacements[succ]]
unsat_succs = [x for succ in self.unsat_successors for x in replacements[succ]]
self.taken_successors = taken_succs
self.unsat_successors = unsat_succs
|
the-stack_106_21084
|
import FWCore.ParameterSet.Config as cms
from TrackPropagation.SteppingHelixPropagator.SteppingHelixPropagatorAny_cfi import *
cosmicMuonsBarrelOnlyFilter = cms.EDFilter("HLTMuonPointingFilter",
SALabel = cms.InputTag("cosmicMuons"),
PropagatorName = cms.string("SteppingHelixPropagatorAny"),
radius = cms.double(10.0),
maxZ = cms.double(50.0),
)
cosmicMuonsFilter = cms.EDFilter("HLTMuonPointingFilter",
SALabel = cms.InputTag("cosmicMuons"),
PropagatorName = cms.string("SteppingHelixPropagatorAny"),
radius = cms.double(10.0),
maxZ = cms.double(50.0),
)
cosmicMuons1LegFilter = cms.EDFilter("HLTMuonPointingFilter",
SALabel = cms.InputTag("cosmicMuons1Leg"),
PropagatorName = cms.string("SteppingHelixPropagatorAny"),
radius = cms.double(10.0),
maxZ = cms.double(50.0),
)
globalCosmicMuonsBarrelOnlyFilter = cms.EDFilter("HLTMuonPointingFilter",
SALabel = cms.InputTag("globalCosmicMuons"),
PropagatorName = cms.string("SteppingHelixPropagatorAny"),
radius = cms.double(10.0),
maxZ = cms.double(50.0),
)
cosmictrackfinderP5Filter = cms.EDFilter("HLTMuonPointingFilter",
SALabel = cms.InputTag("cosmictrackfinderP5"),
PropagatorName = cms.string("SteppingHelixPropagatorAny"),
radius = cms.double(10.0),
maxZ = cms.double(50.0),
)
globalCosmicMuonsFilter = cms.EDFilter("HLTMuonPointingFilter",
SALabel = cms.InputTag("globalCosmicMuons"),
PropagatorName = cms.string("SteppingHelixPropagatorAny"),
radius = cms.double(10.0),
maxZ = cms.double(50.0),
)
rsWithMaterialTracksP5Filter = cms.EDFilter("HLTMuonPointingFilter",
SALabel = cms.InputTag("rsWithMaterialTracksP5"),
PropagatorName = cms.string("SteppingHelixPropagatorAny"),
radius = cms.double(10.0),
maxZ = cms.double(50.0),
)
globalCosmicMuons1LegFilter = cms.EDFilter("HLTMuonPointingFilter",
SALabel = cms.InputTag("globalCosmicMuons1Leg"),
PropagatorName = cms.string("SteppingHelixPropagatorAny"),
radius = cms.double(10.0),
maxZ = cms.double(50.0),
)
ctfWithMaterialTracksP5Filter = cms.EDFilter("HLTMuonPointingFilter",
SALabel = cms.InputTag("ctfWithMaterialTracksP5"),
PropagatorName = cms.string("SteppingHelixPropagatorAny"),
radius = cms.double(10.0),
maxZ = cms.double(50.0),
)
cosmicMuonsBarrelOnlySequence = cms.Sequence(cosmicMuonsBarrelOnlyFilter)
cosmicMuonsSequence = cms.Sequence(cosmicMuonsFilter)
cosmicMuons1LegSequence = cms.Sequence(cosmicMuons1LegFilter)
globalCosmicMuonsBarrelOnlySequence = cms.Sequence(globalCosmicMuonsBarrelOnlyFilter)
cosmictrackfinderP5Sequence = cms.Sequence(cosmictrackfinderP5Filter)
globalCosmicMuonsSequence = cms.Sequence(globalCosmicMuonsFilter)
rsWithMaterialTracksP5Sequence = cms.Sequence(rsWithMaterialTracksP5Filter)
globalCosmicMuons1LegSequence = cms.Sequence(globalCosmicMuons1LegFilter)
ctfWithMaterialTracksP5Sequence = cms.Sequence(ctfWithMaterialTracksP5Filter)
|
the-stack_106_21085
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A set of functions that are used for visualization.
These functions often receive an image, perform some visualization on the image.
The functions do not return a value, instead they modify the image itself.
"""
import collections
import functools
# Set headless-friendly backend.
import matplotlib; matplotlib.use('Agg') # pylint: disable=multiple-statements
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
import numpy as np
import PIL.Image as Image
import PIL.ImageColor as ImageColor
import PIL.ImageDraw as ImageDraw
import PIL.ImageFont as ImageFont
import six
import tensorflow.compat.v1 as tf
from utils.object_detection import shape_utils
_TITLE_LEFT_MARGIN = 10
_TITLE_TOP_MARGIN = 10
STANDARD_COLORS = [
'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque',
'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',
'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan',
'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',
'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',
'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',
'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod',
'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',
'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',
'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',
'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',
'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',
'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',
'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',
'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',
'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',
'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',
'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',
'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown',
'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',
'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',
'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',
'WhiteSmoke', 'Yellow', 'YellowGreen'
]
def save_image_array_as_png(image, output_path):
"""Saves an image (represented as a numpy array) to PNG.
Args:
image: a numpy array with shape [height, width, 3].
output_path: path to which image should be written.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
with tf.gfile.Open(output_path, 'w') as fid:
image_pil.save(fid, 'PNG')
def encode_image_array_as_png_str(image):
"""Encodes a numpy array into a PNG string.
Args:
image: a numpy array with shape [height, width, 3].
Returns:
PNG encoded image string.
"""
image_pil = Image.fromarray(np.uint8(image))
output = six.BytesIO()
image_pil.save(output, format='PNG')
png_string = output.getvalue()
output.close()
return png_string
def draw_bounding_box_on_image_array(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image (numpy array).
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Args:
image: a numpy array with shape [height, width, 3].
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box
(each to be shown on its own line).
use_normalized_coordinates: If True (default), treat coordinates
ymin, xmin, ymax, xmax as relative to the image. Otherwise treat
coordinates as absolute.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_bounding_box_on_image(image_pil, ymin, xmin, ymax, xmax, color,
thickness, display_str_list,
use_normalized_coordinates)
np.copyto(image, np.array(image_pil))
def draw_bounding_box_on_image(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image.
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Each string in display_str_list is displayed on a separate line above the
bounding box in black text on a rectangle filled with the input 'color'.
If the top of the bounding box extends to the edge of the image, the strings
are displayed below the bounding box.
Args:
image: a PIL.Image object.
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box
(each to be shown on its own line).
use_normalized_coordinates: If True (default), treat coordinates
ymin, xmin, ymax, xmax as relative to the image. Otherwise treat
coordinates as absolute.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
if use_normalized_coordinates:
(left, right, top, bottom) = (xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
else:
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
draw.line([(left, top), (left, bottom), (right, bottom),
(right, top), (left, top)], width=thickness, fill=color)
try:
font = ImageFont.truetype('arial.ttf', 24)
except IOError:
font = ImageFont.load_default()
# If the total height of the display strings added to the top of the bounding
# box exceeds the top of the image, stack the strings below the bounding box
# instead of above.
display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]
# Each display_str has a top and bottom margin of 0.05x.
total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)
if top > total_display_str_height:
text_bottom = top
else:
text_bottom = bottom + total_display_str_height
# Reverse list and print from bottom to top.
for display_str in display_str_list[::-1]:
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle(
[(left, text_bottom - text_height - 2 * margin), (left + text_width,
text_bottom)],
fill=color)
draw.text(
(left + margin, text_bottom - text_height - margin),
display_str,
fill='black',
font=font)
text_bottom -= text_height - 2 * margin
def draw_bounding_boxes_on_image_array(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image (numpy array).
Args:
image: a numpy array object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).
The coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings.
a list of strings for each bounding box.
The reason to pass a list of strings for a
bounding box is that it might contain
multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
image_pil = Image.fromarray(image)
draw_bounding_boxes_on_image(image_pil, boxes, color, thickness,
display_str_list_list)
np.copyto(image, np.array(image_pil))
def draw_bounding_boxes_on_image(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image.
Args:
image: a PIL.Image object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).
The coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings.
a list of strings for each bounding box.
The reason to pass a list of strings for a
bounding box is that it might contain
multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
boxes_shape = boxes.shape
if not boxes_shape:
return
if len(boxes_shape) != 2 or boxes_shape[1] != 4:
raise ValueError('Input must be of size [N, 4]')
for i in range(boxes_shape[0]):
display_str_list = ()
if display_str_list_list:
display_str_list = display_str_list_list[i]
draw_bounding_box_on_image(image, boxes[i, 0], boxes[i, 1], boxes[i, 2],
boxes[i, 3], color, thickness, display_str_list)
def _visualize_boxes(image, boxes, classes, scores, category_index, **kwargs):
return visualize_boxes_and_labels_on_image_array(
image, boxes, classes, scores, category_index=category_index, **kwargs)
def _visualize_boxes_and_masks(image, boxes, classes, scores, masks,
category_index, **kwargs):
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
instance_masks=masks,
**kwargs)
def _visualize_boxes_and_keypoints(image, boxes, classes, scores, keypoints,
category_index, **kwargs):
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
keypoints=keypoints,
**kwargs)
def _visualize_boxes_and_masks_and_keypoints(
image, boxes, classes, scores, masks, keypoints, category_index, **kwargs):
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
instance_masks=masks,
keypoints=keypoints,
**kwargs)
def _resize_original_image(image, image_shape):
image = tf.expand_dims(image, 0)
image = tf.image.resize_images(
image,
image_shape,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=True)
return tf.cast(tf.squeeze(image, 0), tf.uint8)
def draw_bounding_boxes_on_image_tensors(images,
boxes,
classes,
scores,
category_index,
original_image_spatial_shape=None,
true_image_shape=None,
instance_masks=None,
keypoints=None,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True):
"""Draws bounding boxes, masks, and keypoints on batch of image tensors.
Args:
images: A 4D uint8 image tensor of shape [N, H, W, C]. If C > 3, additional
channels will be ignored. If C = 1, then we convert the images to RGB
images.
boxes: [N, max_detections, 4] float32 tensor of detection boxes.
classes: [N, max_detections] int tensor of detection classes. Note that
classes are 1-indexed.
scores: [N, max_detections] float32 tensor of detection scores.
category_index: a dict that maps integer ids to category dicts. e.g.
{1: {1: 'dog'}, 2: {2: 'cat'}, ...}
original_image_spatial_shape: [N, 2] tensor containing the spatial size of
the original image.
true_image_shape: [N, 3] tensor containing the spatial size of unpadded
original_image.
instance_masks: A 4D uint8 tensor of shape [N, max_detection, H, W] with
instance masks.
keypoints: A 4D float32 tensor of shape [N, max_detection, num_keypoints, 2]
with keypoints.
max_boxes_to_draw: Maximum number of boxes to draw on an image. Default 20.
min_score_thresh: Minimum score threshold for visualization. Default 0.2.
use_normalized_coordinates: Whether to assume boxes and kepoints are in
normalized coordinates (as opposed to absolute coordiantes).
Default is True.
Returns:
4D image tensor of type uint8, with boxes drawn on top.
"""
# Additional channels are being ignored.
if images.shape[3] > 3:
images = images[:, :, :, 0:3]
elif images.shape[3] == 1:
images = tf.image.grayscale_to_rgb(images)
visualization_keyword_args = {
'use_normalized_coordinates': use_normalized_coordinates,
'max_boxes_to_draw': max_boxes_to_draw,
'min_score_thresh': min_score_thresh,
'agnostic_mode': False,
'line_thickness': 4
}
if true_image_shape is None:
true_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 3])
else:
true_shapes = true_image_shape
if original_image_spatial_shape is None:
original_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 2])
else:
original_shapes = original_image_spatial_shape
if instance_masks is not None and keypoints is None:
visualize_boxes_fn = functools.partial(
_visualize_boxes_and_masks,
category_index=category_index,
**visualization_keyword_args)
elems = [
true_shapes, original_shapes, images, boxes, classes, scores,
instance_masks
]
elif instance_masks is None and keypoints is not None:
visualize_boxes_fn = functools.partial(
_visualize_boxes_and_keypoints,
category_index=category_index,
**visualization_keyword_args)
elems = [
true_shapes, original_shapes, images, boxes, classes, scores, keypoints
]
elif instance_masks is not None and keypoints is not None:
visualize_boxes_fn = functools.partial(
_visualize_boxes_and_masks_and_keypoints,
category_index=category_index,
**visualization_keyword_args)
elems = [
true_shapes, original_shapes, images, boxes, classes, scores,
instance_masks, keypoints
]
else:
visualize_boxes_fn = functools.partial(
_visualize_boxes,
category_index=category_index,
**visualization_keyword_args)
elems = [
true_shapes, original_shapes, images, boxes, classes, scores
]
def draw_boxes(image_and_detections):
"""Draws boxes on image."""
true_shape = image_and_detections[0]
original_shape = image_and_detections[1]
if true_image_shape is not None:
image = shape_utils.pad_or_clip_nd(
image_and_detections[2], [true_shape[0], true_shape[1], 3])
if original_image_spatial_shape is not None:
image_and_detections[2] = _resize_original_image(image, original_shape)
image_with_boxes = tf.py_func(visualize_boxes_fn, image_and_detections[2:],
tf.uint8)
return image_with_boxes
images = tf.map_fn(draw_boxes, elems, dtype=tf.uint8, back_prop=False)
return images
def draw_keypoints_on_image_array(image,
keypoints,
color='red',
radius=2,
use_normalized_coordinates=True):
"""Draws keypoints on an image (numpy array).
Args:
image: a numpy array with shape [height, width, 3].
keypoints: a numpy array with shape [num_keypoints, 2].
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_keypoints_on_image(image_pil, keypoints, color, radius,
use_normalized_coordinates)
np.copyto(image, np.array(image_pil))
def draw_keypoints_on_image(image,
keypoints,
color='red',
radius=2,
use_normalized_coordinates=True):
"""Draws keypoints on an image.
Args:
image: a PIL.Image object.
keypoints: a numpy array with shape [num_keypoints, 2].
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
keypoints_x = [k[1] for k in keypoints]
keypoints_y = [k[0] for k in keypoints]
if use_normalized_coordinates:
keypoints_x = tuple([im_width * x for x in keypoints_x])
keypoints_y = tuple([im_height * y for y in keypoints_y])
for keypoint_x, keypoint_y in zip(keypoints_x, keypoints_y):
draw.ellipse([(keypoint_x - radius, keypoint_y - radius),
(keypoint_x + radius, keypoint_y + radius)],
outline=color, fill=color)
def draw_mask_on_image_array(image, mask, color='red', alpha=0.4):
"""Draws mask on an image.
Args:
image: uint8 numpy array with shape (img_height, img_height, 3)
mask: a uint8 numpy array of shape (img_height, img_height) with
values between either 0 or 1.
color: color to draw the keypoints with. Default is red.
alpha: transparency value between 0 and 1. (default: 0.4)
Raises:
ValueError: On incorrect data type for image or masks.
"""
if image.dtype != np.uint8:
raise ValueError('`image` not of type np.uint8')
if mask.dtype != np.uint8:
raise ValueError('`mask` not of type np.uint8')
if np.any(np.logical_and(mask != 1, mask != 0)):
raise ValueError('`mask` elements should be in [0, 1]')
if image.shape[:2] != mask.shape:
raise ValueError('The image has spatial dimensions %s but the mask has '
'dimensions %s' % (image.shape[:2], mask.shape))
rgb = ImageColor.getrgb(color)
pil_image = Image.fromarray(image)
solid_color = np.expand_dims(
np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3])
pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA')
pil_mask = Image.fromarray(np.uint8(255.0*alpha*mask)).convert('L')
pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)
np.copyto(image, np.array(pil_image.convert('RGB')))
def visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index,
instance_masks=None,
instance_boundaries=None,
keypoints=None,
use_normalized_coordinates=False,
max_boxes_to_draw=20,
min_score_thresh=.5,
agnostic_mode=False,
line_thickness=4,
groundtruth_box_visualization_color='black',
skip_scores=False,
skip_labels=False):
"""Overlay labeled boxes on an image with formatted scores and label names.
This function groups boxes that correspond to the same location
and creates a display string for each detection and overlays these
on the image. Note that this function modifies the image in place, and returns
that same image.
Args:
image: uint8 numpy array with shape (img_height, img_width, 3)
boxes: a numpy array of shape [N, 4]
classes: a numpy array of shape [N]. Note that class indices are 1-based,
and match the keys in the label map.
scores: a numpy array of shape [N] or None. If scores=None, then
this function assumes that the boxes to be plotted are groundtruth
boxes and plot all boxes as black with no classes or scores.
category_index: a dict containing category dictionaries (each holding
category index `id` and category name `name`) keyed by category indices.
instance_masks: a numpy array of shape [N, image_height, image_width] with
values ranging between 0 and 1, can be None.
instance_boundaries: a numpy array of shape [N, image_height, image_width]
with values ranging between 0 and 1, can be None.
keypoints: a numpy array of shape [N, num_keypoints, 2], can
be None
use_normalized_coordinates: whether boxes is to be interpreted as
normalized coordinates or not.
max_boxes_to_draw: maximum number of boxes to visualize. If None, draw
all boxes.
min_score_thresh: minimum score threshold for a box to be visualized
agnostic_mode: boolean (default: False) controlling whether to evaluate in
class-agnostic mode or not. This mode will display scores but ignore
classes.
line_thickness: integer (default: 4) controlling line width of the boxes.
groundtruth_box_visualization_color: box color for visualizing groundtruth
boxes
skip_scores: whether to skip score when drawing a single detection
skip_labels: whether to skip label when drawing a single detection
Returns:
uint8 numpy array with shape (img_height, img_width, 3) with overlaid boxes.
"""
# Create a display string (and color) for every box location, group any boxes
# that correspond to the same location.
box_to_display_str_map = collections.defaultdict(list)
box_to_color_map = collections.defaultdict(str)
box_to_instance_masks_map = {}
box_to_instance_boundaries_map = {}
box_to_keypoints_map = collections.defaultdict(list)
if not max_boxes_to_draw:
max_boxes_to_draw = boxes.shape[0]
for i in range(min(max_boxes_to_draw, boxes.shape[0])):
if scores is None or scores[i] > min_score_thresh:
box = tuple(boxes[i].tolist())
if instance_masks is not None:
box_to_instance_masks_map[box] = instance_masks[i]
if instance_boundaries is not None:
box_to_instance_boundaries_map[box] = instance_boundaries[i]
if keypoints is not None:
box_to_keypoints_map[box].extend(keypoints[i])
if scores is None:
box_to_color_map[box] = groundtruth_box_visualization_color
else:
display_str = ''
if not skip_labels:
if not agnostic_mode:
if classes[i] in category_index.keys():
class_name = category_index[classes[i]]['name']
else:
class_name = 'N/A'
display_str = str(class_name)
if not skip_scores:
if not display_str:
display_str = '{}%'.format(int(100*scores[i]))
else:
display_str = '{}: {}%'.format(display_str, int(100*scores[i]))
box_to_display_str_map[box].append(display_str)
if agnostic_mode:
box_to_color_map[box] = 'DarkOrange'
else:
box_to_color_map[box] = STANDARD_COLORS[
classes[i] % len(STANDARD_COLORS)]
# Draw all boxes onto image.
for box, color in box_to_color_map.items():
ymin, xmin, ymax, xmax = box
if instance_masks is not None:
draw_mask_on_image_array(
image,
box_to_instance_masks_map[box],
color=color
)
if instance_boundaries is not None:
draw_mask_on_image_array(
image,
box_to_instance_boundaries_map[box],
color='red',
alpha=1.0
)
draw_bounding_box_on_image_array(
image,
ymin,
xmin,
ymax,
xmax,
color=color,
thickness=line_thickness,
display_str_list=box_to_display_str_map[box],
use_normalized_coordinates=use_normalized_coordinates)
if keypoints is not None:
draw_keypoints_on_image_array(
image,
box_to_keypoints_map[box],
color=color,
radius=line_thickness / 2,
use_normalized_coordinates=use_normalized_coordinates)
return image
def add_cdf_image_summary(values, name):
"""Adds a tf.summary.image for a CDF plot of the values.
Normalizes `values` such that they sum to 1, plots the cumulative distribution
function and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
name: name for the image summary.
"""
def cdf_plot(values):
"""Numpy function to plot CDF."""
normalized_values = values / np.sum(values)
sorted_values = np.sort(normalized_values)
cumulative_values = np.cumsum(sorted_values)
fraction_of_examples = (np.arange(cumulative_values.size, dtype=np.float32)
/ cumulative_values.size)
fig = plt.figure(frameon=False)
ax = fig.add_subplot('111')
ax.plot(fraction_of_examples, cumulative_values)
ax.set_ylabel('cumulative normalized values')
ax.set_xlabel('fraction of examples')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(fig.canvas.tostring_rgb(), dtype='uint8').reshape(
1, int(height), int(width), 3)
return image
cdf_plot = tf.py_func(cdf_plot, [values], tf.uint8)
tf.summary.image(name, cdf_plot)
def add_hist_image_summary(values, bins, name):
"""Adds a tf.summary.image for a histogram plot of the values.
Plots the histogram of values and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
bins: bin edges which will be directly passed to np.histogram.
name: name for the image summary.
"""
def hist_plot(values, bins):
"""Numpy function to plot hist."""
fig = plt.figure(frameon=False)
ax = fig.add_subplot('111')
y, x = np.histogram(values, bins=bins)
ax.plot(x[:-1], y)
ax.set_ylabel('count')
ax.set_xlabel('value')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(
fig.canvas.tostring_rgb(), dtype='uint8').reshape(
1, int(height), int(width), 3)
return image
hist_plot = tf.py_func(hist_plot, [values, bins], tf.uint8)
tf.summary.image(name, hist_plot)
|
the-stack_106_21086
|
# Time: O((m * n) * (m * n)!)
# Space: O((m * n) * (m * n)!)
# On a 2x3 board, there are 5 tiles represented by the integers 1 through 5,
# and an empty square represented by 0.
#
# A move consists of choosing 0 and a 4-directionally adjacent number and swapping it.
#
# The state of the board is solved if and only if the board is [[1,2,3],[4,5,0]].
#
# Given a puzzle board, return the least number of moves required
# so that the state of the board is solved. If it is impossible
# for the state of the board to be solved, return -1.
#
# Examples:
#
# Input: board = [[1,2,3],[4,0,5]]
# Output: 1
# Explanation: Swap the 0 and the 5 in one move.
# Input: board = [[1,2,3],[5,4,0]]
# Output: -1
# Explanation: No number of moves will make the board solved.
# Input: board = [[4,1,2],[5,0,3]]
# Output: 5
# Explanation: 5 is the smallest number of moves that solves the board.
# An example path:
# After move 0: [[4,1,2],[5,0,3]]
# After move 1: [[4,1,2],[0,5,3]]
# After move 2: [[0,1,2],[4,5,3]]
# After move 3: [[1,0,2],[4,5,3]]
# After move 4: [[1,2,0],[4,5,3]]
# After move 5: [[1,2,3],[4,5,0]]
# Input: board = [[3,2,4],[1,5,0]]
# Output: 14
#
# Note:
# - board will be a 2 x 3 array as described above.
# - board[i][j] will be a permutation of [0, 1, 2, 3, 4, 5].
import heapq
import itertools
# A* Search Algorithm
class Solution(object):
def slidingPuzzle(self, board):
"""
:type board: List[List[int]]
:rtype: int
"""
def dot(p1, p2):
return p1[0]*p2[0]+p1[1]*p2[1]
def heuristic_estimate(board, R, C, expected):
result = 0
for i in xrange(R):
for j in xrange(C):
val = board[C*i + j]
if val == 0: continue
r, c = expected[val]
result += abs(r-i) + abs(c-j)
return result
R, C = len(board), len(board[0])
begin = tuple(itertools.chain(*board))
end = tuple(range(1, R*C) + [0])
expected = {(C*i+j+1) % (R*C) : (i, j)
for i in xrange(R) for j in xrange(C)}
min_steps = heuristic_estimate(begin, R, C, expected)
closer, detour = [(begin.index(0), begin)], []
lookup = set()
while True:
if not closer:
if not detour:
return -1
min_steps += 2
closer, detour = detour, closer
zero, board = closer.pop()
if board == end:
return min_steps
if board not in lookup:
lookup.add(board)
r, c = divmod(zero, C)
for direction in ((-1, 0), (1, 0), (0, -1), (0, 1)):
i, j = r+direction[0], c+direction[1]
if 0 <= i < R and 0 <= j < C:
new_zero = i*C+j
tmp = list(board)
tmp[zero], tmp[new_zero] = tmp[new_zero], tmp[zero]
new_board = tuple(tmp)
r2, c2 = expected[board[new_zero]]
r1, c1 = divmod(zero, C)
r0, c0 = divmod(new_zero, C)
is_closer = dot((r1-r0, c1-c0), (r2-r0, c2-c0)) > 0
(closer if is_closer else detour).append((new_zero, new_board))
return min_steps
# Time: O((m * n) * (m * n)! * log((m * n)!))
# Space: O((m * n) * (m * n)!)
# A* Search Algorithm
class Solution2(object):
def slidingPuzzle(self, board):
"""
:type board: List[List[int]]
:rtype: int
"""
def heuristic_estimate(board, R, C, expected):
result = 0
for i in xrange(R):
for j in xrange(C):
val = board[C*i + j]
if val == 0: continue
r, c = expected[val]
result += abs(r-i) + abs(c-j)
return result
R, C = len(board), len(board[0])
begin = tuple(itertools.chain(*board))
end = tuple(range(1, R*C) + [0])
end_wrong = tuple(range(1, R*C-2) + [R*C-1, R*C-2, 0])
expected = {(C*i+j+1) % (R*C) : (i, j)
for i in xrange(R) for j in xrange(C)}
min_heap = [(0, 0, begin.index(0), begin)]
lookup = {begin: 0}
while min_heap:
f, g, zero, board = heapq.heappop(min_heap)
if board == end: return g
if board == end_wrong: return -1
if f > lookup[board]: continue
r, c = divmod(zero, C)
for direction in ((-1, 0), (1, 0), (0, -1), (0, 1)):
i, j = r+direction[0], c+direction[1]
if 0 <= i < R and 0 <= j < C:
new_zero = C*i+j
tmp = list(board)
tmp[zero], tmp[new_zero] = tmp[new_zero], tmp[zero]
new_board = tuple(tmp)
f = g+1+heuristic_estimate(new_board, R, C, expected)
if f < lookup.get(new_board, float("inf")):
lookup[new_board] = f
heapq.heappush(min_heap, (f, g+1, new_zero, new_board))
return -1
|
the-stack_106_21089
|
def test_create_yaml_files(tmp_path):
from infra2salt.utility import create_yaml_files, get_yaml_files
input_data = {"first":
{
"first_a": "AAA",
"first_b": "BBB",
"first_c": "CCC"
},
"second": {
"second_a": "AAA",
"second_b": "BBB",
"second_c": "CCC"
}
}
create_yaml_files(str(tmp_path), input_data)
result = get_yaml_files(str(tmp_path))
assert len(result['first.yml']) == 3
assert len(result['second.yml']) == 3
|
the-stack_106_21092
|
#!/usr/bin/python
#coding:utf-8
import os
import time
from selenium import webdriver
import codecs
# const variable
SIZE_WIDTH = 780
SIZE_HEIGHT = 480
POS_WIDTH = 520
POS_HEIGHT = 1
# 様々なブラウザ連携を行う
class Browser():
# ブラウザの初期設定
def __init__(self):
# ブラウザの定義
# obj_options = webdriver.ChromeOptions()
# obj_options.add_argument('--disable-javascript')
# self.obj_browser = webdriver.Chrome(executable_path=r".\\chromedriver_win32\\chromedriver.exe", chrome_options=obj_options)
self.obj_browser = webdriver.Chrome(executable_path=r".\\chromedriver_win32\\chromedriver.exe")
# 受信したレスポンスを出力するファイルパス
self.str_html_file_path = os.path.join('.\\result', 'response.html')
# 初回ブラウザ起動
def start_browser(self):
# print self.obj_browser.get_window_size(windowHandle='current')
# print self.obj_browser.get_window_position(windowHandle='current')
self.obj_browser.set_window_size(SIZE_WIDTH, SIZE_HEIGHT)
self.obj_browser.set_window_position(POS_WIDTH, POS_HEIGHT)
self.obj_browser.get("file://C:\\Users\\itaka\\PycharmProjects\\saivs\\result\\response.html")
# レスポンスのファイル出力
def write_response_to_html(self, str_response, str_charset):
obj_fout = codecs.open(self.str_html_file_path, 'w', str_charset)
obj_fout.write(str_response)
obj_fout.close()
# レスポンスをブラウザで開く
def refresh_browser(self):
# ページを更新する
try:
self.obj_browser.refresh()
except:
self.obj_browser.switch_to.alert.accept()
# ドライバを閉じる
def close_browser(self):
# クローズ
self.obj_browser.close()
|
the-stack_106_21097
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import yaml
from gdwrap.Gdwrap import Gdwrap
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
KEY_FILE = os.path.join(CURRENT_DIR, 'keys', 'keys.json')
CREDENTIAL_FILE = os.path.join(CURRENT_DIR, 'keys', 'credential.json')
def main():
gd = Gdwrap(KEY_FILE, CREDENTIAL_FILE)
# get config from yaml file
# In the actual case, recommend get from Gdwrap.file_list response.
config = yaml.load(open(os.path.join(CURRENT_DIR, 'config', 'config.yml'), 'r', -1, 'utf-8'))
item_id = config['file_download']['item_id']
item_name = config['file_download']['item_name']
download_dir = os.path.join(CURRENT_DIR, 'download')
res = gd.file_download(item_id, item_name, download_dir)
print(res)
if __name__ == '__main__':
main()
|
the-stack_106_21099
|
import numpy as np
from .. import utils
class SDFT(utils.Window):
"""Sliding Discrete Fourier Transform (SDFT).
Initially, the coefficients are all equal to 0, up until enough values have been seen. A call
to `numpy.fft.fft` is triggered once ``window_size`` values have been seen. Subsequent values
will update the coefficients online. This is much faster than recomputing an FFT from scratch
for every new value.
Parameters:
window_size (int): The size of the window.
Attributes:
fft (numpy array of complex numbers): The Fourier components.
Example:
::
>>> X = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> window_size = 5
>>> sdft = SDFT(window_size)
>>> for i, x in enumerate(X):
... sdft = sdft.update(x)
...
... if i + 1 >= window_size:
... assert np.allclose(
... sdft.fft,
... np.fft.fft(X[i+1 - window_size:i+1])
... )
"""
def __init__(self, window_size):
super().__init__(window_size=window_size)
self.fft = np.zeros(window_size)
def update(self, x):
# Simply append the new value if the window isn't full yet
if len(self) < self.window_size - 1:
self.append(x)
# Compute an initial FFT the first time the window is full
elif len(self) == self.window_size - 1:
self.append(x)
self.fft = np.fft.fft(self).tolist()
# Update the coefficients for subsequent values
else:
diff = x - self[0]
for i in range(self.window_size):
self.fft[i] = (self.fft[i] + diff) * np.exp(2j * np.pi * i / self.window_size)
self.append(x)
return self
|
the-stack_106_21101
|
#!/usr/bin/env python
# encoding: utf-8
import os
import sys
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
from spiders.tweet import TweetSpider
from spiders.comment import CommentSpider
from spiders.follower import FollowerSpider
from spiders.user import UserSpider
from spiders.fan import FanSpider
from spiders.repost import RepostSpider
from bson import ObjectId
from pymongo import MongoClient
user = 'weibo'
pwd = '123456'
host = '127.0.0.1'
port = '27017'
db_name = 'weibo'
uri = "mongodb://%s:%s@%s" % (user, pwd, host + ":" + port + "/" + db_name)
client = MongoClient(uri)
mongodb = client.weibo
if __name__ == '__main__':
mode = sys.argv[1]
os.environ['SCRAPY_SETTINGS_MODULE'] = f'settings'
settings = get_project_settings()
process = CrawlerProcess(settings)
mode_to_spider = {
'comment': CommentSpider,
'fan': FanSpider,
'follow': FollowerSpider,
'tweet': TweetSpider,
'user': UserSpider,
'repost': RepostSpider,
}
list1 = mongodb['Relationships'].find().distinct("fan_id")
if mode == "user":
list2 = mongodb['uid_list'].find().distinct("uid")
# list2 = mongodb['Users'].find().distinct("_id")
elif mode == "fan":
# list2 = mongodb['Relationships'].find().distinct("follow_id")
list2 = mongodb['uid_list'].find().distinct("uid")
elif mode == "follow":
list2 = mongodb['Users'].find().distinct("_id")
# list2 = mongodb['User'].find()
elif mode == "tweet":
list2 = mongodb['tweet'].find().distinct("user_id")
# diff_list = list(set(list1) - set(list2))
diff_list = list2
# print(len(diff_list))
# print(diff_list)
# diff_list = ['1193491727', '1900093290', '5726715057', '2683882661', '1880143303']
# 调用进程池的map_async()方法,接收一个函数(爬虫函数)和一个列表(用户ID)
# 官方网站标准库文档里边map_async用法如下:p.may_async(func,[1,2,3])
# 函数会依次取出列表的每个元素作为参数来执行func(1), func(2), func(3)
process.crawl(mode_to_spider[mode], diff_list)
# the script will block here until the crawling is finished
process.start()
|
the-stack_106_21103
|
from ..utils import current_ts, get_class
from .config import Configurable
from .datasets import BaseDataset
class BaseTask(Configurable):
def __init__(self, model, config):
super().__init__(config)
self.model = model
# generate a task ID if not specified
id = self.config.id
if not id:
id = str(self.generate_task_id())
self.id = id
@staticmethod
def create_dataset(dataset_config):
""" Builds the input dataset using the provided configuration. """
classname, config = dataset_config.classname, dataset_config.config
cls = get_class(classname)
if not issubclass(cls, BaseDataset):
raise ValueError("Data input class does not inherit from BaseInput.")
dataset = cls(config)
return dataset
def generate_task_id(self):
"""Generates a task ID"""
return current_ts()
def run(self):
"""Runs the task."""
raise NotImplementedError
|
the-stack_106_21105
|
# GNU MediaGoblin -- federated, autonomous media hosting
# Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import chardet
import os
import Image
import logging
from mediagoblin import mg_globals as mgg
from mediagoblin.processing import create_pub_filepath
from mediagoblin.media_types.ascii import asciitoimage
_log = logging.getLogger(__name__)
SUPPORTED_EXTENSIONS = ['txt', 'asc', 'nfo']
def sniff_handler(media_file, **kw):
if kw.get('media') is not None:
name, ext = os.path.splitext(kw['media'].filename)
clean_ext = ext[1:].lower()
if clean_ext in SUPPORTED_EXTENSIONS:
return True
return False
def process_ascii(entry):
'''
Code to process a txt file
'''
ascii_config = mgg.global_config['media_type:mediagoblin.media_types.ascii']
workbench = mgg.workbench_manager.create_workbench()
# Conversions subdirectory to avoid collisions
conversions_subdir = os.path.join(
workbench.dir, 'conversions')
os.mkdir(conversions_subdir)
queued_filepath = entry.queued_media_file
queued_filename = workbench.localized_file(
mgg.queue_store, queued_filepath,
'source')
queued_file = file(queued_filename, 'rb')
with queued_file:
queued_file_charset = chardet.detect(queued_file.read())
# Only select a non-utf-8 charset if chardet is *really* sure
# Tested with "Feli\x0109an superjaron", which was detecte
if queued_file_charset['confidence'] < 0.9:
interpreted_charset = 'utf-8'
else:
interpreted_charset = queued_file_charset['encoding']
_log.info('Charset detected: {0}\nWill interpret as: {1}'.format(
queued_file_charset,
interpreted_charset))
queued_file.seek(0) # Rewind the queued file
thumb_filepath = create_pub_filepath(
entry, 'thumbnail.png')
tmp_thumb_filename = os.path.join(
conversions_subdir, thumb_filepath[-1])
ascii_converter_args = {}
if ascii_config['thumbnail_font']:
ascii_converter_args.update(
{'font': ascii_config['thumbnail_font']})
converter = asciitoimage.AsciiToImage(
**ascii_converter_args)
thumb = converter._create_image(
queued_file.read())
with file(tmp_thumb_filename, 'w') as thumb_file:
thumb.thumbnail(
(mgg.global_config['media:thumb']['max_width'],
mgg.global_config['media:thumb']['max_height']),
Image.ANTIALIAS)
thumb.save(thumb_file)
_log.debug('Copying local file to public storage')
mgg.public_store.copy_local_to_storage(
tmp_thumb_filename, thumb_filepath)
queued_file.seek(0)
original_filepath = create_pub_filepath(entry, queued_filepath[-1])
with mgg.public_store.get_file(original_filepath, 'wb') \
as original_file:
original_file.write(queued_file.read())
queued_file.seek(0) # Rewind *again*
unicode_filepath = create_pub_filepath(entry, 'ascii-portable.txt')
with mgg.public_store.get_file(unicode_filepath, 'wb') \
as unicode_file:
# Decode the original file from its detected charset (or UTF8)
# Encode the unicode instance to ASCII and replace any non-ASCII
# with an HTML entity (&#
unicode_file.write(
unicode(queued_file.read().decode(
interpreted_charset)).encode(
'ascii',
'xmlcharrefreplace'))
mgg.queue_store.delete_file(queued_filepath)
entry.queued_media_file = []
media_files_dict = entry.setdefault('media_files', {})
media_files_dict['thumb'] = thumb_filepath
media_files_dict['unicode'] = unicode_filepath
media_files_dict['original'] = original_filepath
entry.save()
|
the-stack_106_21106
|
#!/usr/bin/env python
# coding: utf-8
# In[9]:
import cv2
import numpy as np
import os
import pytesseract
from collections import Counter
# In[10]:
CONFIDENCE = 0.5 #threshold probability for a label
SCORE_THRESHOLD = 0.5 #a threshold used to filter boxes by score.
IOU_THRESHOLD = 0.5 #threshold intersection value for multiple bounding
config_path = "yolov3-tiny-obj.cfg"
weights = "yolov3-tiny-obj_last.weights"
labels = open("obj.names").read().strip().split("\n")
colors = np.random.randint(0, 255, size=(len(labels), 3), dtype="uint8")
# In[11]:
net = cv2.dnn.readNetFromDarknet(config_path, weights)
# In[14]:
table = str.maketrans({"(":None, ")":None, "{": None, "}": None, "[": None, "]": None, "|":None, ",":None, "=":None})
table = str.maketrans(dict.fromkeys("(){}[]|,="))
def detect():
cap = cv2.VideoCapture(0)
count = 0
folder_path = "Output" # path of output folder
while True:
ret, image = cap.read()
h,w = image.shape[:2]
blob = cv2.dnn.blobFromImage(image, 1/255.0, (416,416), swapRB = True, crop = False)
net.setInput(blob)
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
layer_outputs = net.forward(ln)
boxes, confidences, class_ids = [], [], []
for output in layer_outputs:
for detection in output:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence>CONFIDENCE:
box = detection[:4]*np.array([w,h,w,h])
(centerX, centerY, width, height) = box.astype("int")
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
boxes.append([x,y,int(width), int(height)])
confidences.append(float(confidence))
class_ids.append(class_id)
idxs = cv2.dnn.NMSBoxes(boxes, confidences, SCORE_THRESHOLD, IOU_THRESHOLD)
font_scale = 1
thickness = 1
if len(idxs)>0:
for i in idxs.flatten():
count = count + 1
img_path = folder_path + "/" + str(count) + ".jpg"
x,y = boxes[i][0], boxes[i][1]
w,h = boxes[i][2], boxes[i][3]
if x>0 and y>0:
cropped = image[y:y+h, x:x+w]
cv2.imwrite(img_path, cropped)
color = [int(c) for c in colors[class_ids[i]]]
cv2.rectangle(image, (x,y), (x+w, y+h), color = color, thickness= thickness)
text = f"{labels[class_ids[i]]}: {confidences[i]:.2f}"
(text_width, text_height) = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, fontScale=font_scale, thickness=thickness)[0]
text_offset_x = x
text_offset_y = y - 5
box_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width + 2, text_offset_y - text_height))
overlay = image.copy()
cv2.rectangle(overlay, box_coords[0], box_coords[1], color=color, thickness=cv2.FILLED)
image = cv2.addWeighted(overlay, 0.6, image, 0.4, 0)
cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,
fontScale=font_scale, color=(0, 0, 0), thickness=thickness)
cv2.imshow("output", image)
if cv2.waitKey(1)==13:
break
cap.release()
cv2.destroyAllWindows()
l = []
for filename in os.listdir(folder_path):
img_path = folder_path + "/" + filename
text = pytesseract.image_to_string(img_path)
if len(text)>=10:
text = text.translate(table)
l.append(text)
c = Counter(l)
for item,count in c.items():
if count>=2:
print(item)
# In[15]:
detect()
|
the-stack_106_21109
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "exconfig-"
cfg.versionfile_source = "exconfig/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
|
the-stack_106_21110
|
# -*- coding: utf-8 -*-
import os
import shutil
import pytest
from constants import (
FILE,
FILE_EXISTS,
FILE_NOT_FOUND,
FILES,
FOLDER,
FOLDERS,
SCOPE,
TEST_DATE,
TEST_DIR,
does_not_raise,
)
from setups import (
mk_dir,
setup_contains,
setup_ls,
setup_mk,
setup_rm,
setup_since,
)
@pytest.fixture(scope=SCOPE, params=[
('{0}/empty_dir-ls'.format(TEST_DIR), {FOLDERS: set(), FILES: set()}),
('{0}/folders-ls'.format(TEST_DIR), {FOLDERS: {FOLDER}, FILES: set()}),
('{0}/files-ls'.format(TEST_DIR), {FOLDERS: set(), FILES: {FILE}}),
(
'{0}/files_folders-ls'.format(TEST_DIR),
{FOLDERS: {'dir1', 'dir2'}, FILES: {'file1', 'file2'}},
),
])
def fixture_ls(request):
"""A fixture to test the ls function."""
mk_dir(TEST_DIR)
setup_ls(request.param)
yield request.param
def teardown():
"""Delete the test folder after tests execution."""
if os.path.exists(TEST_DIR):
shutil.rmtree(TEST_DIR)
@pytest.fixture(scope=SCOPE, params=[
('{0}/тест-мк.txt'.format(TEST_DIR), does_not_raise()),
('{0}/test-mk.txt'.format(TEST_DIR), does_not_raise()),
('{0}/test-mk.txt'.format(TEST_DIR), FILE_EXISTS),
('{0}/..'.format(TEST_DIR), pytest.raises(Exception)),
])
def fixture_mk(request):
"""A fixture to test the mk function."""
mk_dir(TEST_DIR)
setup_mk(request.param)
yield request.param
teardown()
@pytest.fixture(scope=SCOPE, params=[
(FILE, '{0}/test-rm.txt'.format(TEST_DIR), does_not_raise()),
(FOLDER, '{0}/folder-rm'.format(TEST_DIR), pytest.raises(Exception)),
(FILE, '{0}/nonexistent-rm.txt'.format(TEST_DIR), FILE_NOT_FOUND),
])
def fixture_rm(request):
"""A fixture to test the rm function."""
mk_dir(TEST_DIR)
setup_rm(request.param)
yield request.param
teardown()
@pytest.fixture(scope=SCOPE, params=[
(FILE, '{0}/test-contains.txt'.format(TEST_DIR), '1'),
(FILE, '{0}/nonexistent-contains.txt'.format(TEST_DIR), '0'),
(FOLDER, '{0}/test-contains'.format(TEST_DIR), '0'),
])
def fixture_contains(request):
"""A fixture to test the contains function."""
mk_dir(TEST_DIR)
setup_contains(request.param)
yield request.param
teardown()
@pytest.fixture(scope=SCOPE, params=[
(
'{0}/empty_dir-since'.format(TEST_DIR),
{FOLDERS: set(), FILES: set()},
TEST_DATE,
does_not_raise(),
),
(
'{0}/folders-since'.format(TEST_DIR),
{FOLDERS: {FOLDER}, FILES: set()},
TEST_DATE,
does_not_raise(),
),
(
'{0}/files-since'.format(TEST_DIR),
{FOLDERS: set(), FILES: {FILE}},
TEST_DATE,
does_not_raise(),
),
(
'{0}/files_folders-since'.format(TEST_DIR),
{FOLDERS: {'dir1', 'dir2'}, FILES: {'file1', 'file2'}},
TEST_DATE,
does_not_raise(),
),
(
'{0}/other-since'.format(TEST_DIR),
{FOLDERS: set(), FILES: set()},
'not-valid-date',
pytest.raises(Exception),
),
])
def fixture_since(request):
"""A fixture to test the ls function."""
mk_dir(TEST_DIR)
setup_since(request.param)
yield request.param
teardown()
@pytest.fixture(scope=SCOPE, params=[
# command, argument, expected result
('ls', '', 0),
('mk', 'test.py', 0),
('contains', 'test.py', 0),
('since', TEST_DATE, 0),
('rm', 'test.py', 0),
])
def fixture_integration(request):
"""A fixture to perform integration testing."""
yield request.param
teardown()
|
the-stack_106_21111
|
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import os
from setuptools import setup, find_packages
__version__ = '2.5.5'
requirements_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'requirements.txt')
with open(requirements_path) as requirements_file:
requirements = requirements_file.readlines()
setup(
name='amundsen-metadata',
version=__version__,
description='Metadata service for Amundsen',
url='https://www.github.com/amundsen-io/amundsenmetadatalibrary',
maintainer='Amundsen TSC',
maintainer_email='[email protected]',
packages=find_packages(exclude=['tests*']),
include_package_data=True,
zip_safe=False,
dependency_links=[],
install_requires=requirements,
extras_require={
'oidc': ['flaskoidc==0.1.1']
},
python_requires=">=3.6",
classifiers=[
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
|
the-stack_106_21113
|
class Solution:
def reverse(self, x: int) -> int:
'''
题目:Given a 32-bit signed integer, reverse digits of an integer.
Assume we are dealing with an environment which could only store integers within the 32-bit signed integer range: [−2^31, 2^31 − 1].
For the purpose of this problem, assume that your function returns 0 when the reversed integer overflows.
思路:先获取符号位,然后取绝对值转换为list,调用reverse()再转换回去
'''
# my function starts
if x>=0:
sflag=True
else:
sflag=False
xabsS=str(abs(x))
xVList=list(xabsS)
xVList.reverse()
#注意此处.reverse()是无返回值的,但是xVList已经被反转了
SR="".join(xVList)
xVR=int(SR)
if not(sflag) : xVR=-xVR
if xVR < -(2**31) or xVR > (2**31 -1):
return 0
#注意,提出的需求不仅x要求在32bit内,而且反转后的值不在32bit内则返回0
else:
return xVR
|
the-stack_106_21114
|
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
'''
#-- @testpoint:opengauss关键字every(非保留),作为目录对象名
'''
import unittest
from testcase.utils.Logger import Logger
from testcase.utils.Constant import Constant
from testcase.utils.CommonSH import CommonSH
logger = Logger()
commonsh = CommonSH('dbuser')
constant = Constant()
class Hostname(unittest.TestCase):
def setUp(self):
logger.info("------------------------ Opengauss_Function_Keyword_Every_Case0020 开始执行--------------------------")
# 关键字作为目录对象名不带双引号 - 成功
def test_every_1(self):
SqlMdg = commonsh.execut_db_sql('''create directory every as '/tmp/';
drop directory every;''')
logger.info(SqlMdg)
self.assertIn(constant.CREATE_DIRECTORY_SUCCESS_MSG, SqlMdg)
self.assertIn(constant.DROP_DIRECTORY_SUCCESS_MSG, SqlMdg)
# 关键字作为目录对象名带双引号—成功
def test_every_2(self):
SqlMdg = commonsh.execut_db_sql('''create directory "every" as '/tmp/';
drop directory "every";''')
logger.info(SqlMdg)
self.assertIn(constant.CREATE_DIRECTORY_SUCCESS_MSG, SqlMdg)
self.assertIn(constant.DROP_DIRECTORY_SUCCESS_MSG, SqlMdg)
# 关键字作为目录对象名带单引号 - 合理报错
def test_every_3(self):
SqlMdg = commonsh.execut_db_sql('''drop directory if exists 'every';''')
logger.info(SqlMdg)
self.assertIn(constant.SYNTAX_ERROR_MSG, SqlMdg)
SqlMdg = commonsh.execut_db_sql(''' create directory 'every' as '/tmp/';''')
logger.info(SqlMdg)
self.assertIn(constant.SYNTAX_ERROR_MSG, SqlMdg)
#关键字作为目录对象名带反引号 - 合理报错
def test_every_4(self):
SqlMdg = commonsh.execut_db_sql('''drop directory if exists \`every\`;''')
logger.info(SqlMdg)
self.assertIn(constant.SYNTAX_ERROR_MSG, SqlMdg)
SqlMdg = commonsh.execut_db_sql('''create directory \`every\` as '/tmp/';''')
logger.info(SqlMdg)
self.assertIn(constant.SYNTAX_ERROR_MSG, SqlMdg)
def tearDown(self):
logger.info('------------------------ Opengauss_Function_Keyword_Every_Case0020 执行结束--------------------------')
|
the-stack_106_21115
|
'''
:created: 2019-09-24
@author: Leandro (cerberus1746) Benedet Garcia'''
from pathlib import Path
from typing import Union, Mapping, Iterable, List, Dict, Any
PathType = Union[Path, str]
DataDict = List[Dict[str, Any]]
JsonValue = Union[int, str]
JsonMapping = Mapping[str, JsonValue]
JsonTypes = Union[JsonMapping, int, str]
JsonIterable = Iterable[JsonTypes]
JsonInput = Union[str, JsonMapping, JsonIterable]
|
the-stack_106_21116
|
"""
:codeauthor: Alexander Schwartz <[email protected]>
"""
import salt.client
from salt.cloud.clouds import saltify
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import ANY, MagicMock, patch
from tests.support.unit import TestCase
TEST_PROFILES = {
"testprofile1": NotImplemented,
"testprofile2": { # this profile is used in test_saltify_destroy()
"ssh_username": "fred",
"remove_config_on_destroy": False, # expected for test
"shutdown_on_destroy": True, # expected value for test
},
"testprofile3": { # this profile is used in test_create_wake_on_lan()
"wake_on_lan_mac": "aa-bb-cc-dd-ee-ff",
"wol_sender_node": "friend1",
"wol_boot_wait": 0.01, # we want the wait to be very short
},
}
TEST_PROFILE_NAMES = ["testprofile1", "testprofile2", "testprofile3"]
class SaltifyTestCase(TestCase, LoaderModuleMockMixin):
"""
Test cases for salt.cloud.clouds.saltify
"""
LOCAL_OPTS = {
"providers": {
"sfy1": {"saltify": {"driver": "saltify", "profiles": TEST_PROFILES}},
},
"profiles": TEST_PROFILES,
"sock_dir": "/var/sockxxx",
"transport": "tcp",
}
def setup_loader_modules(self):
saltify_globals = {
"__active_provider_name__": "",
"__utils__": {
"cloud.bootstrap": MagicMock(),
"cloud.fire_event": MagicMock(),
},
"__opts__": self.LOCAL_OPTS,
}
return {saltify: saltify_globals}
def test_create_no_deploy(self):
"""
Test if deployment fails. This is the most basic test as saltify doesn't contain much logic
"""
with patch("salt.cloud.clouds.saltify._verify", MagicMock(return_value=True)):
vm = {"deploy": False, "driver": "saltify", "name": "dummy"}
self.assertTrue(saltify.create(vm))
def test_create_and_deploy(self):
"""
Test if deployment can be done.
"""
mock_cmd = MagicMock(return_value=True)
with patch.dict(
"salt.cloud.clouds.saltify.__utils__", {"cloud.bootstrap": mock_cmd}
):
vm_ = {
"deploy": True,
"driver": "saltify",
"name": "new2",
"profile": "testprofile2",
}
result = saltify.create(vm_)
mock_cmd.assert_called_once_with(vm_, ANY)
self.assertTrue(result)
def test_create_no_ssh_host(self):
"""
Test that ssh_host is set to the vm name if not defined
"""
mock_cmd = MagicMock(return_value=True)
with patch.dict(
"salt.cloud.clouds.saltify.__utils__", {"cloud.bootstrap": mock_cmd}
):
vm_ = {
"deploy": True,
"driver": "saltify",
"name": "new2",
"profile": "testprofile2",
}
result = saltify.create(vm_)
mock_cmd.assert_called_once_with(vm_, ANY)
assert result
# Make sure that ssh_host was added to the vm. Note that this is
# done in two asserts so that the failure is more explicit about
# what is wrong. If ssh_host wasn't inserted in the vm_ dict, the
# failure would be a KeyError, which would be harder to
# troubleshoot.
assert "ssh_host" in vm_
assert vm_["ssh_host"] == "new2"
def test_create_wake_on_lan(self):
"""
Test if wake on lan works
"""
mock_sleep = MagicMock()
mock_cmd = MagicMock(return_value=True)
mm_cmd = MagicMock(return_value={"friend1": True})
with salt.client.LocalClient() as lcl:
lcl.cmd = mm_cmd
with patch("time.sleep", mock_sleep):
with patch("salt.client.LocalClient", return_value=lcl):
with patch.dict(
"salt.cloud.clouds.saltify.__utils__",
{"cloud.bootstrap": mock_cmd},
):
vm_ = {
"deploy": True,
"driver": "saltify",
"name": "new1",
"profile": "testprofile3",
}
result = saltify.create(vm_)
mock_cmd.assert_called_once_with(vm_, ANY)
mm_cmd.assert_called_with(
"friend1", "network.wol", ["aa-bb-cc-dd-ee-ff"]
)
# The test suite might call time.sleep, look for any call
# that has the expected wait time.
mock_sleep.assert_any_call(0.01)
self.assertTrue(result)
def test_avail_locations(self):
"""
Test the avail_locations will always return {}
"""
self.assertEqual(saltify.avail_locations(), {})
def test_avail_sizes(self):
"""
Test the avail_sizes will always return {}
"""
self.assertEqual(saltify.avail_sizes(), {})
def test_avail_images(self):
"""
Test the avail_images will return profiles
"""
testlist = list(TEST_PROFILE_NAMES) # copy
self.assertEqual(saltify.avail_images()["Profiles"].sort(), testlist.sort())
def test_list_nodes(self):
"""
Test list_nodes will return required fields only
"""
testgrains = {
"nodeX1": {
"id": "nodeX1",
"ipv4": ["127.0.0.1", "192.1.2.22", "172.16.17.18"],
"ipv6": ["::1", "fdef:bad:add::f00", "3001:DB8::F00D"],
"salt-cloud": {
"driver": "saltify",
"provider": "saltyfy",
"profile": "testprofile2",
},
"extra_stuff": "does not belong",
}
}
expected_result = {
"nodeX1": {
"id": "nodeX1",
"image": "testprofile2",
"private_ips": ["172.16.17.18", "fdef:bad:add::f00"],
"public_ips": ["192.1.2.22", "3001:DB8::F00D"],
"size": "",
"state": "running",
}
}
mm_cmd = MagicMock(return_value=testgrains)
with salt.client.LocalClient() as lcl:
lcl.cmd = mm_cmd
with patch("salt.client.LocalClient", return_value=lcl):
self.assertEqual(saltify.list_nodes(), expected_result)
def test_saltify_reboot(self):
mm_cmd = MagicMock(return_value=True)
with salt.client.LocalClient() as lcl:
lcl.cmd = mm_cmd
with patch("salt.client.LocalClient", return_value=lcl):
result = saltify.reboot("nodeS1", "action")
mm_cmd.assert_called_with("nodeS1", "system.reboot")
self.assertTrue(result)
def test_saltify_destroy(self):
# destroy calls local.cmd several times and expects
# different results, so we will provide a list of
# results. Each call will get the next value.
# NOTE: this assumes that the call order never changes,
# so to keep things simple, we will not use remove_config...
result_list = [
{
"nodeS1": { # first call is grains.get
"driver": "saltify",
"provider": "saltify",
"profile": "testprofile2",
}
},
# Note:
# testprofile2 has remove_config_on_destroy: False
# and shutdown_on_destroy: True
{
"nodeS1": ( # last call shuts down the minion
"a system.shutdown worked message"
)
},
]
mm_cmd = MagicMock(side_effect=result_list)
with salt.client.LocalClient() as lcl:
lcl.cmd = mm_cmd
with patch("salt.client.LocalClient", return_value=lcl):
result = saltify.destroy("nodeS1", "action")
mm_cmd.assert_called_with("nodeS1", "system.shutdown")
self.assertTrue(result)
|
the-stack_106_21118
|
#!/usr/bin/env python
# coding: UTF-8
import glob
from PIL import Image, ImageChops
def crop_alpha(img):
bg = Image.new(img.mode, img.size, (0, 0, 0, 0))
diff = ImageChops.difference(img, bg)
bbox = diff.getbbox()
if bbox:
return img.crop(bbox)
return img
def align_resize(img, size=512):
if img.size == (size, size):
print('the same size')
return img
rate = 1.0
if img.size[0] > img.size[1]:
rate = float(size) / img.size[0]
else:
rate = float(size) / img.size[1]
print(rate)
img = img.resize((int(img.size[0] * rate), int(img.size[1] * rate)),
Image.ANTIALIAS)
canvasImg = Image.new("RGBA", (size, size))
canvasImg.paste(img, ((size - img.size[0]) / 2,
(size - img.size[1]) / 2))
return canvasImg
print('saved : %s' % path)
if __name__ == '__main__':
paths = glob.glob('source/*.png')
for path in paths:
img = Image.open(path)
img = crop_alpha(img)
img = align_resize(img, 256)
img.save(path)
|
the-stack_106_21121
|
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.storage.account_data import AccountDataWorkerStore
from synapse.util.caches.descriptors import cached
from twisted.internet import defer
from canonicaljson import json
import logging
from six.moves import range
logger = logging.getLogger(__name__)
class TagsWorkerStore(AccountDataWorkerStore):
@cached()
def get_tags_for_user(self, user_id):
"""Get all the tags for a user.
Args:
user_id(str): The user to get the tags for.
Returns:
A deferred dict mapping from room_id strings to dicts mapping from
tag strings to tag content.
"""
deferred = self._simple_select_list(
"room_tags", {"user_id": user_id}, ["room_id", "tag", "content"]
)
@deferred.addCallback
def tags_by_room(rows):
tags_by_room = {}
for row in rows:
room_tags = tags_by_room.setdefault(row["room_id"], {})
room_tags[row["tag"]] = json.loads(row["content"])
return tags_by_room
return deferred
@defer.inlineCallbacks
def get_all_updated_tags(self, last_id, current_id, limit):
"""Get all the client tags that have changed on the server
Args:
last_id(int): The position to fetch from.
current_id(int): The position to fetch up to.
Returns:
A deferred list of tuples of stream_id int, user_id string,
room_id string, tag string and content string.
"""
if last_id == current_id:
defer.returnValue([])
def get_all_updated_tags_txn(txn):
sql = (
"SELECT stream_id, user_id, room_id"
" FROM room_tags_revisions as r"
" WHERE ? < stream_id AND stream_id <= ?"
" ORDER BY stream_id ASC LIMIT ?"
)
txn.execute(sql, (last_id, current_id, limit))
return txn.fetchall()
tag_ids = yield self.runInteraction(
"get_all_updated_tags", get_all_updated_tags_txn
)
def get_tag_content(txn, tag_ids):
sql = (
"SELECT tag, content"
" FROM room_tags"
" WHERE user_id=? AND room_id=?"
)
results = []
for stream_id, user_id, room_id in tag_ids:
txn.execute(sql, (user_id, room_id))
tags = []
for tag, content in txn:
tags.append(json.dumps(tag) + ":" + content)
tag_json = "{" + ",".join(tags) + "}"
results.append((stream_id, user_id, room_id, tag_json))
return results
batch_size = 50
results = []
for i in range(0, len(tag_ids), batch_size):
tags = yield self.runInteraction(
"get_all_updated_tag_content",
get_tag_content,
tag_ids[i:i + batch_size],
)
results.extend(tags)
defer.returnValue(results)
@defer.inlineCallbacks
def get_updated_tags(self, user_id, stream_id):
"""Get all the tags for the rooms where the tags have changed since the
given version
Args:
user_id(str): The user to get the tags for.
stream_id(int): The earliest update to get for the user.
Returns:
A deferred dict mapping from room_id strings to lists of tag
strings for all the rooms that changed since the stream_id token.
"""
def get_updated_tags_txn(txn):
sql = (
"SELECT room_id from room_tags_revisions"
" WHERE user_id = ? AND stream_id > ?"
)
txn.execute(sql, (user_id, stream_id))
room_ids = [row[0] for row in txn]
return room_ids
changed = self._account_data_stream_cache.has_entity_changed(
user_id, int(stream_id)
)
if not changed:
defer.returnValue({})
room_ids = yield self.runInteraction(
"get_updated_tags", get_updated_tags_txn
)
results = {}
if room_ids:
tags_by_room = yield self.get_tags_for_user(user_id)
for room_id in room_ids:
results[room_id] = tags_by_room.get(room_id, {})
defer.returnValue(results)
def get_tags_for_room(self, user_id, room_id):
"""Get all the tags for the given room
Args:
user_id(str): The user to get tags for
room_id(str): The room to get tags for
Returns:
A deferred list of string tags.
"""
return self._simple_select_list(
table="room_tags",
keyvalues={"user_id": user_id, "room_id": room_id},
retcols=("tag", "content"),
desc="get_tags_for_room",
).addCallback(lambda rows: {
row["tag"]: json.loads(row["content"]) for row in rows
})
class TagsStore(TagsWorkerStore):
@defer.inlineCallbacks
def add_tag_to_room(self, user_id, room_id, tag, content):
"""Add a tag to a room for a user.
Args:
user_id(str): The user to add a tag for.
room_id(str): The room to add a tag for.
tag(str): The tag name to add.
content(dict): A json object to associate with the tag.
Returns:
A deferred that completes once the tag has been added.
"""
content_json = json.dumps(content)
def add_tag_txn(txn, next_id):
self._simple_upsert_txn(
txn,
table="room_tags",
keyvalues={
"user_id": user_id,
"room_id": room_id,
"tag": tag,
},
values={
"content": content_json,
}
)
self._update_revision_txn(txn, user_id, room_id, next_id)
with self._account_data_id_gen.get_next() as next_id:
yield self.runInteraction("add_tag", add_tag_txn, next_id)
self.get_tags_for_user.invalidate((user_id,))
result = self._account_data_id_gen.get_current_token()
defer.returnValue(result)
@defer.inlineCallbacks
def remove_tag_from_room(self, user_id, room_id, tag):
"""Remove a tag from a room for a user.
Returns:
A deferred that completes once the tag has been removed
"""
def remove_tag_txn(txn, next_id):
sql = (
"DELETE FROM room_tags "
" WHERE user_id = ? AND room_id = ? AND tag = ?"
)
txn.execute(sql, (user_id, room_id, tag))
self._update_revision_txn(txn, user_id, room_id, next_id)
with self._account_data_id_gen.get_next() as next_id:
yield self.runInteraction("remove_tag", remove_tag_txn, next_id)
self.get_tags_for_user.invalidate((user_id,))
result = self._account_data_id_gen.get_current_token()
defer.returnValue(result)
def _update_revision_txn(self, txn, user_id, room_id, next_id):
"""Update the latest revision of the tags for the given user and room.
Args:
txn: The database cursor
user_id(str): The ID of the user.
room_id(str): The ID of the room.
next_id(int): The the revision to advance to.
"""
txn.call_after(
self._account_data_stream_cache.entity_has_changed,
user_id, next_id
)
update_max_id_sql = (
"UPDATE account_data_max_stream_id"
" SET stream_id = ?"
" WHERE stream_id < ?"
)
txn.execute(update_max_id_sql, (next_id, next_id))
update_sql = (
"UPDATE room_tags_revisions"
" SET stream_id = ?"
" WHERE user_id = ?"
" AND room_id = ?"
)
txn.execute(update_sql, (next_id, user_id, room_id))
if txn.rowcount == 0:
insert_sql = (
"INSERT INTO room_tags_revisions (user_id, room_id, stream_id)"
" VALUES (?, ?, ?)"
)
try:
txn.execute(insert_sql, (user_id, room_id, next_id))
except self.database_engine.module.IntegrityError:
# Ignore insertion errors. It doesn't matter if the row wasn't
# inserted because if two updates happend concurrently the one
# with the higher stream_id will not be reported to a client
# unless the previous update has completed. It doesn't matter
# which stream_id ends up in the table, as long as it is higher
# than the id that the client has.
pass
|
the-stack_106_21123
|
from entityservice.async_worker import celery, logger
from entityservice.database import DBConn, get_created_runs_and_queue, get_uploaded_encoding_sizes, \
get_project_schema_encoding_size, get_project_encoding_size, set_project_encoding_size, \
update_project_mark_all_runs_failed
from entityservice.models.run import progress_run_stage as progress_stage
from entityservice.settings import Config as config
from entityservice.tasks.base_task import TracedTask
from entityservice.error_checking import handle_invalid_encoding_data
from entityservice.tasks.run import prerun_check
from entityservice.utils import clks_uploaded_to_project
@celery.task(base=TracedTask, ignore_result=True, args_as_tags=('project_id',))
def check_for_executable_runs(project_id, parent_span=None):
"""
This is called when a run is posted (if project is ready for runs), and also
after all dataproviders have uploaded CLKs, and the CLKS are ready.
"""
log = logger.bind(pid=project_id)
log.debug("Checking for runs that need to be executed")
if not clks_uploaded_to_project(project_id, check_data_ready=True):
return
with DBConn() as conn:
try:
check_and_set_project_encoding_size(project_id, conn)
except ValueError as e:
log.warning(e.args[0])
# make sure this error can be exposed to user by marking the run/s as failed
update_project_mark_all_runs_failed(conn, project_id, str(e))
return
new_runs = get_created_runs_and_queue(conn, project_id)
log.debug("Progressing run stages")
for qr in new_runs:
# Record that the run has reached a new stage
run_id = qr[0]
progress_stage(conn, run_id)
# commit db changes before scheduling following tasks
log.debug("Creating tasks for {} created runs for project {}".format(len(new_runs), project_id))
for qr in new_runs:
run_id = qr[0]
log.info('Queueing run for computation', run_id=run_id)
prerun_check.delay(project_id, run_id, check_for_executable_runs.get_serialized_span())
def check_and_set_project_encoding_size(project_id, conn):
# Check for consistency between uploaded encodings and commit to a
# project encoding size if one wasn't provided in the linkage schema
log = logger.bind(pid=project_id)
uploaded_encoding_sizes = get_uploaded_encoding_sizes(conn, project_id)
first_uploaded_size = uploaded_encoding_sizes[0][1]
schema_encoding_size = get_project_schema_encoding_size(conn, project_id)
project_encoding_size = get_project_encoding_size(conn, project_id)
# In order of preference:
encoding_size = project_encoding_size or schema_encoding_size or first_uploaded_size
log.debug(f"Uploaded encoding sizes: {uploaded_encoding_sizes}")
log.debug(f"Encoding size set in schema: {schema_encoding_size}")
log.debug(f"Project encoding size: {project_encoding_size}")
log.info(f"Verifying uploads all have encoding size of {encoding_size} bytes.")
for dp_id, enc_size in uploaded_encoding_sizes:
if enc_size != encoding_size:
log.warning(f"Set the encodings' upload state to error for dp={dp_id} and aborting processing")
handle_invalid_encoding_data(project_id, dp_id)
raise ValueError("Mismatch in encoding sizes. Stopping")
if project_encoding_size is None:
set_project_encoding_size(conn, project_id, encoding_size)
if not config.MIN_ENCODING_SIZE <= encoding_size <= config.MAX_ENCODING_SIZE:
# Set all uploads to error state
for dp_id, _ in uploaded_encoding_sizes:
handle_invalid_encoding_data(project_id, dp_id)
raise ValueError("Encoding size out of configured bounds")
if encoding_size % 8:
raise ValueError("Encoding size must be multiple of 8 bytes (64 bits)")
|
the-stack_106_21124
|
from django.contrib.contenttypes.models import ContentType
from drf_yasg.utils import swagger_serializer_method
from rest_framework import serializers
from rest_framework.validators import UniqueTogetherValidator
from taggit_serializer.serializers import TaggitSerializer, TagListSerializerField
from dcim.choices import *
from dcim.constants import *
from dcim.models import (
Cable, ConsolePort, ConsolePortTemplate, ConsoleServerPort, ConsoleServerPortTemplate, Device, DeviceBay,
DeviceBayTemplate, DeviceType, DeviceRole, FrontPort, FrontPortTemplate, Interface, InterfaceTemplate,
Manufacturer, InventoryItem, Platform, PowerFeed, PowerOutlet, PowerOutletTemplate, PowerPanel, PowerPort,
PowerPortTemplate, Rack, RackGroup, RackReservation, RackRole, RearPort, RearPortTemplate, Region, Site,
VirtualChassis,
)
from extras.api.customfields import CustomFieldModelSerializer
from ipam.api.nested_serializers import NestedIPAddressSerializer, NestedVLANSerializer
from ipam.models import VLAN
from tenancy.api.nested_serializers import NestedTenantSerializer
from users.api.nested_serializers import NestedUserSerializer
from utilities.api import (
ChoiceField, ContentTypeField, SerializedPKRelatedField, TimeZoneField, ValidatedModelSerializer,
WritableNestedSerializer, get_serializer_for_model,
)
from virtualization.api.nested_serializers import NestedClusterSerializer
from .nested_serializers import *
class ConnectedEndpointSerializer(ValidatedModelSerializer):
connected_endpoint_type = serializers.SerializerMethodField(read_only=True)
connected_endpoint = serializers.SerializerMethodField(read_only=True)
connection_status = ChoiceField(choices=CONNECTION_STATUS_CHOICES, read_only=True)
def get_connected_endpoint_type(self, obj):
if hasattr(obj, 'connected_endpoint') and obj.connected_endpoint is not None:
return '{}.{}'.format(
obj.connected_endpoint._meta.app_label,
obj.connected_endpoint._meta.model_name
)
return None
@swagger_serializer_method(serializer_or_field=serializers.DictField)
def get_connected_endpoint(self, obj):
"""
Return the appropriate serializer for the type of connected object.
"""
if getattr(obj, 'connected_endpoint', None) is None:
return None
serializer = get_serializer_for_model(obj.connected_endpoint, prefix='Nested')
context = {'request': self.context['request']}
data = serializer(obj.connected_endpoint, context=context).data
return data
#
# Regions/sites
#
class RegionSerializer(serializers.ModelSerializer):
parent = NestedRegionSerializer(required=False, allow_null=True)
site_count = serializers.IntegerField(read_only=True)
class Meta:
model = Region
fields = ['id', 'name', 'slug', 'parent', 'description', 'site_count']
class SiteSerializer(TaggitSerializer, CustomFieldModelSerializer):
status = ChoiceField(choices=SiteStatusChoices, required=False)
region = NestedRegionSerializer(required=False, allow_null=True)
tenant = NestedTenantSerializer(required=False, allow_null=True)
time_zone = TimeZoneField(required=False)
tags = TagListSerializerField(required=False)
circuit_count = serializers.IntegerField(read_only=True)
device_count = serializers.IntegerField(read_only=True)
prefix_count = serializers.IntegerField(read_only=True)
rack_count = serializers.IntegerField(read_only=True)
virtualmachine_count = serializers.IntegerField(read_only=True)
vlan_count = serializers.IntegerField(read_only=True)
class Meta:
model = Site
fields = [
'id', 'name', 'slug', 'status', 'region', 'tenant', 'facility', 'asn', 'time_zone', 'description',
'physical_address', 'shipping_address', 'latitude', 'longitude', 'contact_name', 'contact_phone',
'contact_email', 'comments', 'tags', 'custom_fields', 'created', 'last_updated', 'circuit_count',
'device_count', 'prefix_count', 'rack_count', 'virtualmachine_count', 'vlan_count',
]
#
# Racks
#
class RackGroupSerializer(ValidatedModelSerializer):
site = NestedSiteSerializer()
parent = NestedRackGroupSerializer(required=False, allow_null=True)
rack_count = serializers.IntegerField(read_only=True)
class Meta:
model = RackGroup
fields = ['id', 'name', 'slug', 'site', 'parent', 'description', 'rack_count']
class RackRoleSerializer(ValidatedModelSerializer):
rack_count = serializers.IntegerField(read_only=True)
class Meta:
model = RackRole
fields = ['id', 'name', 'slug', 'color', 'description', 'rack_count']
class RackSerializer(TaggitSerializer, CustomFieldModelSerializer):
site = NestedSiteSerializer()
group = NestedRackGroupSerializer(required=False, allow_null=True, default=None)
tenant = NestedTenantSerializer(required=False, allow_null=True)
status = ChoiceField(choices=RackStatusChoices, required=False)
role = NestedRackRoleSerializer(required=False, allow_null=True)
type = ChoiceField(choices=RackTypeChoices, allow_blank=True, required=False)
width = ChoiceField(choices=RackWidthChoices, required=False)
outer_unit = ChoiceField(choices=RackDimensionUnitChoices, allow_blank=True, required=False)
tags = TagListSerializerField(required=False)
device_count = serializers.IntegerField(read_only=True)
powerfeed_count = serializers.IntegerField(read_only=True)
class Meta:
model = Rack
fields = [
'id', 'name', 'facility_id', 'display_name', 'site', 'group', 'tenant', 'status', 'role', 'serial',
'asset_tag', 'type', 'width', 'u_height', 'desc_units', 'outer_width', 'outer_depth', 'outer_unit',
'comments', 'tags', 'custom_fields', 'created', 'last_updated', 'device_count', 'powerfeed_count',
]
# Omit the UniqueTogetherValidator that would be automatically added to validate (group, facility_id). This
# prevents facility_id from being interpreted as a required field.
validators = [
UniqueTogetherValidator(queryset=Rack.objects.all(), fields=('group', 'name'))
]
def validate(self, data):
# Validate uniqueness of (group, facility_id) since we omitted the automatically-created validator from Meta.
if data.get('facility_id', None):
validator = UniqueTogetherValidator(queryset=Rack.objects.all(), fields=('group', 'facility_id'))
validator.set_context(self)
validator(data)
# Enforce model validation
super().validate(data)
return data
class RackUnitSerializer(serializers.Serializer):
"""
A rack unit is an abstraction formed by the set (rack, position, face); it does not exist as a row in the database.
"""
id = serializers.IntegerField(read_only=True)
name = serializers.CharField(read_only=True)
face = ChoiceField(choices=DeviceFaceChoices, read_only=True)
device = NestedDeviceSerializer(read_only=True)
class RackReservationSerializer(ValidatedModelSerializer):
rack = NestedRackSerializer()
user = NestedUserSerializer()
tenant = NestedTenantSerializer(required=False, allow_null=True)
class Meta:
model = RackReservation
fields = ['id', 'rack', 'units', 'created', 'user', 'tenant', 'description']
class RackElevationDetailFilterSerializer(serializers.Serializer):
q = serializers.CharField(
required=False,
default=None
)
face = serializers.ChoiceField(
choices=DeviceFaceChoices,
default=DeviceFaceChoices.FACE_FRONT
)
render = serializers.ChoiceField(
choices=RackElevationDetailRenderChoices,
default=RackElevationDetailRenderChoices.RENDER_JSON
)
unit_width = serializers.IntegerField(
default=RACK_ELEVATION_UNIT_WIDTH_DEFAULT
)
unit_height = serializers.IntegerField(
default=RACK_ELEVATION_UNIT_HEIGHT_DEFAULT
)
legend_width = serializers.IntegerField(
default=RACK_ELEVATION_LEGEND_WIDTH_DEFAULT
)
exclude = serializers.IntegerField(
required=False,
default=None
)
expand_devices = serializers.BooleanField(
required=False,
default=True
)
include_images = serializers.BooleanField(
required=False,
default=True
)
#
# Device types
#
class ManufacturerSerializer(ValidatedModelSerializer):
devicetype_count = serializers.IntegerField(read_only=True)
inventoryitem_count = serializers.IntegerField(read_only=True)
platform_count = serializers.IntegerField(read_only=True)
class Meta:
model = Manufacturer
fields = [
'id', 'name', 'slug', 'description', 'devicetype_count', 'inventoryitem_count', 'platform_count',
]
class DeviceTypeSerializer(TaggitSerializer, CustomFieldModelSerializer):
manufacturer = NestedManufacturerSerializer()
subdevice_role = ChoiceField(choices=SubdeviceRoleChoices, allow_blank=True, required=False)
tags = TagListSerializerField(required=False)
device_count = serializers.IntegerField(read_only=True)
class Meta:
model = DeviceType
fields = [
'id', 'manufacturer', 'model', 'slug', 'display_name', 'part_number', 'u_height', 'is_full_depth',
'subdevice_role', 'front_image', 'rear_image', 'comments', 'tags', 'custom_fields', 'created',
'last_updated', 'device_count',
]
class ConsolePortTemplateSerializer(ValidatedModelSerializer):
device_type = NestedDeviceTypeSerializer()
type = ChoiceField(
choices=ConsolePortTypeChoices,
allow_blank=True,
required=False
)
class Meta:
model = ConsolePortTemplate
fields = ['id', 'device_type', 'name', 'type']
class ConsoleServerPortTemplateSerializer(ValidatedModelSerializer):
device_type = NestedDeviceTypeSerializer()
type = ChoiceField(
choices=ConsolePortTypeChoices,
allow_blank=True,
required=False
)
class Meta:
model = ConsoleServerPortTemplate
fields = ['id', 'device_type', 'name', 'type']
class PowerPortTemplateSerializer(ValidatedModelSerializer):
device_type = NestedDeviceTypeSerializer()
type = ChoiceField(
choices=PowerPortTypeChoices,
allow_blank=True,
required=False
)
class Meta:
model = PowerPortTemplate
fields = ['id', 'device_type', 'name', 'type', 'maximum_draw', 'allocated_draw']
class PowerOutletTemplateSerializer(ValidatedModelSerializer):
device_type = NestedDeviceTypeSerializer()
type = ChoiceField(
choices=PowerOutletTypeChoices,
allow_blank=True,
required=False
)
power_port = NestedPowerPortTemplateSerializer(
required=False
)
feed_leg = ChoiceField(
choices=PowerOutletFeedLegChoices,
allow_blank=True,
required=False
)
class Meta:
model = PowerOutletTemplate
fields = ['id', 'device_type', 'name', 'type', 'power_port', 'feed_leg']
class InterfaceTemplateSerializer(ValidatedModelSerializer):
device_type = NestedDeviceTypeSerializer()
type = ChoiceField(choices=InterfaceTypeChoices)
class Meta:
model = InterfaceTemplate
fields = ['id', 'device_type', 'name', 'type', 'mgmt_only']
class RearPortTemplateSerializer(ValidatedModelSerializer):
device_type = NestedDeviceTypeSerializer()
type = ChoiceField(choices=PortTypeChoices)
class Meta:
model = RearPortTemplate
fields = ['id', 'device_type', 'name', 'type', 'positions']
class FrontPortTemplateSerializer(ValidatedModelSerializer):
device_type = NestedDeviceTypeSerializer()
type = ChoiceField(choices=PortTypeChoices)
rear_port = NestedRearPortTemplateSerializer()
class Meta:
model = FrontPortTemplate
fields = ['id', 'device_type', 'name', 'type', 'rear_port', 'rear_port_position']
class DeviceBayTemplateSerializer(ValidatedModelSerializer):
device_type = NestedDeviceTypeSerializer()
class Meta:
model = DeviceBayTemplate
fields = ['id', 'device_type', 'name']
#
# Devices
#
class DeviceRoleSerializer(ValidatedModelSerializer):
device_count = serializers.IntegerField(read_only=True)
virtualmachine_count = serializers.IntegerField(read_only=True)
class Meta:
model = DeviceRole
fields = [
'id', 'name', 'slug', 'color', 'vm_role', 'description', 'device_count', 'virtualmachine_count',
]
class PlatformSerializer(ValidatedModelSerializer):
manufacturer = NestedManufacturerSerializer(required=False, allow_null=True)
device_count = serializers.IntegerField(read_only=True)
virtualmachine_count = serializers.IntegerField(read_only=True)
class Meta:
model = Platform
fields = [
'id', 'name', 'slug', 'manufacturer', 'napalm_driver', 'napalm_args', 'description', 'device_count',
'virtualmachine_count',
]
class DeviceSerializer(TaggitSerializer, CustomFieldModelSerializer):
device_type = NestedDeviceTypeSerializer()
device_role = NestedDeviceRoleSerializer()
tenant = NestedTenantSerializer(required=False, allow_null=True)
platform = NestedPlatformSerializer(required=False, allow_null=True)
site = NestedSiteSerializer()
rack = NestedRackSerializer(required=False, allow_null=True)
face = ChoiceField(choices=DeviceFaceChoices, allow_blank=True, required=False)
status = ChoiceField(choices=DeviceStatusChoices, required=False)
primary_ip = NestedIPAddressSerializer(read_only=True)
primary_ip4 = NestedIPAddressSerializer(required=False, allow_null=True)
primary_ip6 = NestedIPAddressSerializer(required=False, allow_null=True)
parent_device = serializers.SerializerMethodField()
cluster = NestedClusterSerializer(required=False, allow_null=True)
virtual_chassis = NestedVirtualChassisSerializer(required=False, allow_null=True)
tags = TagListSerializerField(required=False)
class Meta:
model = Device
fields = [
'id', 'name', 'display_name', 'device_type', 'device_role', 'tenant', 'platform', 'serial', 'asset_tag',
'site', 'rack', 'position', 'face', 'parent_device', 'status', 'primary_ip', 'primary_ip4', 'primary_ip6',
'cluster', 'virtual_chassis', 'vc_position', 'vc_priority', 'comments', 'local_context_data', 'tags',
'custom_fields', 'created', 'last_updated',
]
validators = []
def validate(self, data):
# Validate uniqueness of (rack, position, face) since we omitted the automatically-created validator from Meta.
if data.get('rack') and data.get('position') and data.get('face'):
validator = UniqueTogetherValidator(queryset=Device.objects.all(), fields=('rack', 'position', 'face'))
validator.set_context(self)
validator(data)
# Enforce model validation
super().validate(data)
return data
@swagger_serializer_method(serializer_or_field=NestedDeviceSerializer)
def get_parent_device(self, obj):
try:
device_bay = obj.parent_bay
except DeviceBay.DoesNotExist:
return None
context = {'request': self.context['request']}
data = NestedDeviceSerializer(instance=device_bay.device, context=context).data
data['device_bay'] = NestedDeviceBaySerializer(instance=device_bay, context=context).data
return data
class DeviceWithConfigContextSerializer(DeviceSerializer):
config_context = serializers.SerializerMethodField()
class Meta(DeviceSerializer.Meta):
fields = [
'id', 'name', 'display_name', 'device_type', 'device_role', 'tenant', 'platform', 'serial', 'asset_tag',
'site', 'rack', 'position', 'face', 'parent_device', 'status', 'primary_ip', 'primary_ip4', 'primary_ip6',
'cluster', 'virtual_chassis', 'vc_position', 'vc_priority', 'comments', 'local_context_data', 'tags',
'custom_fields', 'config_context', 'created', 'last_updated',
]
@swagger_serializer_method(serializer_or_field=serializers.DictField)
def get_config_context(self, obj):
return obj.get_config_context()
class DeviceNAPALMSerializer(serializers.Serializer):
method = serializers.DictField()
class ConsoleServerPortSerializer(TaggitSerializer, ConnectedEndpointSerializer):
device = NestedDeviceSerializer()
type = ChoiceField(
choices=ConsolePortTypeChoices,
allow_blank=True,
required=False
)
cable = NestedCableSerializer(read_only=True)
tags = TagListSerializerField(required=False)
class Meta:
model = ConsoleServerPort
fields = [
'id', 'device', 'name', 'type', 'description', 'connected_endpoint_type', 'connected_endpoint',
'connection_status', 'cable', 'tags',
]
class ConsolePortSerializer(TaggitSerializer, ConnectedEndpointSerializer):
device = NestedDeviceSerializer()
type = ChoiceField(
choices=ConsolePortTypeChoices,
allow_blank=True,
required=False
)
cable = NestedCableSerializer(read_only=True)
tags = TagListSerializerField(required=False)
class Meta:
model = ConsolePort
fields = [
'id', 'device', 'name', 'type', 'description', 'connected_endpoint_type', 'connected_endpoint',
'connection_status', 'cable', 'tags',
]
class PowerOutletSerializer(TaggitSerializer, ConnectedEndpointSerializer):
device = NestedDeviceSerializer()
type = ChoiceField(
choices=PowerOutletTypeChoices,
allow_blank=True,
required=False
)
power_port = NestedPowerPortSerializer(
required=False
)
feed_leg = ChoiceField(
choices=PowerOutletFeedLegChoices,
allow_blank=True,
required=False
)
cable = NestedCableSerializer(
read_only=True
)
tags = TagListSerializerField(
required=False
)
class Meta:
model = PowerOutlet
fields = [
'id', 'device', 'name', 'type', 'power_port', 'feed_leg', 'description', 'connected_endpoint_type',
'connected_endpoint', 'connection_status', 'cable', 'tags',
]
class PowerPortSerializer(TaggitSerializer, ConnectedEndpointSerializer):
device = NestedDeviceSerializer()
type = ChoiceField(
choices=PowerPortTypeChoices,
allow_blank=True,
required=False
)
cable = NestedCableSerializer(read_only=True)
tags = TagListSerializerField(required=False)
class Meta:
model = PowerPort
fields = [
'id', 'device', 'name', 'type', 'maximum_draw', 'allocated_draw', 'description', 'connected_endpoint_type',
'connected_endpoint', 'connection_status', 'cable', 'tags',
]
class InterfaceSerializer(TaggitSerializer, ConnectedEndpointSerializer):
device = NestedDeviceSerializer()
type = ChoiceField(choices=InterfaceTypeChoices)
lag = NestedInterfaceSerializer(required=False, allow_null=True)
mode = ChoiceField(choices=InterfaceModeChoices, allow_blank=True, required=False)
untagged_vlan = NestedVLANSerializer(required=False, allow_null=True)
tagged_vlans = SerializedPKRelatedField(
queryset=VLAN.objects.all(),
serializer=NestedVLANSerializer,
required=False,
many=True
)
cable = NestedCableSerializer(read_only=True)
tags = TagListSerializerField(required=False)
count_ipaddresses = serializers.IntegerField(read_only=True)
class Meta:
model = Interface
fields = [
'id', 'device', 'name', 'type', 'enabled', 'lag', 'mtu', 'mac_address', 'mgmt_only', 'description',
'connected_endpoint_type', 'connected_endpoint', 'connection_status', 'cable', 'mode', 'untagged_vlan',
'tagged_vlans', 'tags', 'count_ipaddresses',
]
# TODO: This validation should be handled by Interface.clean()
def validate(self, data):
# All associated VLANs be global or assigned to the parent device's site.
device = self.instance.device if self.instance else data.get('device')
untagged_vlan = data.get('untagged_vlan')
if untagged_vlan and untagged_vlan.site not in [device.site, None]:
raise serializers.ValidationError({
'untagged_vlan': "VLAN {} must belong to the same site as the interface's parent device, or it must be "
"global.".format(untagged_vlan)
})
for vlan in data.get('tagged_vlans', []):
if vlan.site not in [device.site, None]:
raise serializers.ValidationError({
'tagged_vlans': "VLAN {} must belong to the same site as the interface's parent device, or it must "
"be global.".format(vlan)
})
return super().validate(data)
class RearPortSerializer(TaggitSerializer, ValidatedModelSerializer):
device = NestedDeviceSerializer()
type = ChoiceField(choices=PortTypeChoices)
cable = NestedCableSerializer(read_only=True)
tags = TagListSerializerField(required=False)
class Meta:
model = RearPort
fields = ['id', 'device', 'name', 'type', 'positions', 'description', 'cable', 'tags']
class FrontPortRearPortSerializer(WritableNestedSerializer):
"""
NestedRearPortSerializer but with parent device omitted (since front and rear ports must belong to same device)
"""
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:rearport-detail')
class Meta:
model = RearPort
fields = ['id', 'url', 'name']
class FrontPortSerializer(TaggitSerializer, ValidatedModelSerializer):
device = NestedDeviceSerializer()
type = ChoiceField(choices=PortTypeChoices)
rear_port = FrontPortRearPortSerializer()
cable = NestedCableSerializer(read_only=True)
tags = TagListSerializerField(required=False)
class Meta:
model = FrontPort
fields = ['id', 'device', 'name', 'type', 'rear_port', 'rear_port_position', 'description', 'cable', 'tags']
class DeviceBaySerializer(TaggitSerializer, ValidatedModelSerializer):
device = NestedDeviceSerializer()
installed_device = NestedDeviceSerializer(required=False, allow_null=True)
tags = TagListSerializerField(required=False)
class Meta:
model = DeviceBay
fields = ['id', 'device', 'name', 'description', 'installed_device', 'tags']
#
# Inventory items
#
class InventoryItemSerializer(TaggitSerializer, ValidatedModelSerializer):
device = NestedDeviceSerializer()
# Provide a default value to satisfy UniqueTogetherValidator
parent = serializers.PrimaryKeyRelatedField(queryset=InventoryItem.objects.all(), allow_null=True, default=None)
manufacturer = NestedManufacturerSerializer(required=False, allow_null=True, default=None)
tags = TagListSerializerField(required=False)
class Meta:
model = InventoryItem
fields = [
'id', 'device', 'parent', 'name', 'manufacturer', 'part_id', 'serial', 'asset_tag', 'discovered',
'description', 'tags',
]
#
# Cables
#
class CableSerializer(ValidatedModelSerializer):
termination_a_type = ContentTypeField(
queryset=ContentType.objects.filter(CABLE_TERMINATION_MODELS)
)
termination_b_type = ContentTypeField(
queryset=ContentType.objects.filter(CABLE_TERMINATION_MODELS)
)
termination_a = serializers.SerializerMethodField(read_only=True)
termination_b = serializers.SerializerMethodField(read_only=True)
status = ChoiceField(choices=CableStatusChoices, required=False)
length_unit = ChoiceField(choices=CableLengthUnitChoices, allow_blank=True, required=False)
class Meta:
model = Cable
fields = [
'id', 'termination_a_type', 'termination_a_id', 'termination_a', 'termination_b_type', 'termination_b_id',
'termination_b', 'type', 'status', 'label', 'color', 'length', 'length_unit',
]
def _get_termination(self, obj, side):
"""
Serialize a nested representation of a termination.
"""
if side.lower() not in ['a', 'b']:
raise ValueError("Termination side must be either A or B.")
termination = getattr(obj, 'termination_{}'.format(side.lower()))
if termination is None:
return None
serializer = get_serializer_for_model(termination, prefix='Nested')
context = {'request': self.context['request']}
data = serializer(termination, context=context).data
return data
@swagger_serializer_method(serializer_or_field=serializers.DictField)
def get_termination_a(self, obj):
return self._get_termination(obj, 'a')
@swagger_serializer_method(serializer_or_field=serializers.DictField)
def get_termination_b(self, obj):
return self._get_termination(obj, 'b')
class TracedCableSerializer(serializers.ModelSerializer):
"""
Used only while tracing a cable path.
"""
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:cable-detail')
class Meta:
model = Cable
fields = [
'id', 'url', 'type', 'status', 'label', 'color', 'length', 'length_unit',
]
#
# Interface connections
#
class InterfaceConnectionSerializer(ValidatedModelSerializer):
interface_a = serializers.SerializerMethodField()
interface_b = NestedInterfaceSerializer(source='connected_endpoint')
connection_status = ChoiceField(choices=CONNECTION_STATUS_CHOICES, required=False)
class Meta:
model = Interface
fields = ['interface_a', 'interface_b', 'connection_status']
@swagger_serializer_method(serializer_or_field=NestedInterfaceSerializer)
def get_interface_a(self, obj):
context = {'request': self.context['request']}
return NestedInterfaceSerializer(instance=obj, context=context).data
#
# Virtual chassis
#
class VirtualChassisSerializer(TaggitSerializer, ValidatedModelSerializer):
master = NestedDeviceSerializer()
tags = TagListSerializerField(required=False)
member_count = serializers.IntegerField(read_only=True)
class Meta:
model = VirtualChassis
fields = ['id', 'master', 'domain', 'tags', 'member_count']
#
# Power panels
#
class PowerPanelSerializer(ValidatedModelSerializer):
site = NestedSiteSerializer()
rack_group = NestedRackGroupSerializer(
required=False,
allow_null=True,
default=None
)
powerfeed_count = serializers.IntegerField(read_only=True)
class Meta:
model = PowerPanel
fields = ['id', 'site', 'rack_group', 'name', 'powerfeed_count']
class PowerFeedSerializer(TaggitSerializer, CustomFieldModelSerializer):
power_panel = NestedPowerPanelSerializer()
rack = NestedRackSerializer(
required=False,
allow_null=True,
default=None
)
type = ChoiceField(
choices=PowerFeedTypeChoices,
default=PowerFeedTypeChoices.TYPE_PRIMARY
)
status = ChoiceField(
choices=PowerFeedStatusChoices,
default=PowerFeedStatusChoices.STATUS_ACTIVE
)
supply = ChoiceField(
choices=PowerFeedSupplyChoices,
default=PowerFeedSupplyChoices.SUPPLY_AC
)
phase = ChoiceField(
choices=PowerFeedPhaseChoices,
default=PowerFeedPhaseChoices.PHASE_SINGLE
)
tags = TagListSerializerField(
required=False
)
class Meta:
model = PowerFeed
fields = [
'id', 'power_panel', 'rack', 'name', 'status', 'type', 'supply', 'phase', 'voltage', 'amperage',
'max_utilization', 'comments', 'tags', 'custom_fields', 'created', 'last_updated',
]
|
the-stack_106_21125
|
import os
from typing import Any
import numpy as np
import pytest
from jina.drivers.cache import BaseCacheDriver
from jina.executors.indexers.cache import DocIDCache
from jina.proto import jina_pb2, uid
from tests import random_docs, rm_files
filename = 'test-tmp.bin'
class MockCacheDriver(BaseCacheDriver):
@property
def exec_fn(self):
return self._exec_fn
def on_hit(self, req_doc: 'jina_pb2.Document', hit_result: Any) -> None:
raise NotImplementedError
def test_cache_driver_twice():
docs = list(random_docs(10))
driver = MockCacheDriver()
with DocIDCache(filename) as executor:
assert not executor.handler_mutex
driver.attach(executor=executor, pea=None)
driver._traverse_apply(docs)
with pytest.raises(NotImplementedError):
# duplicate docs
driver._traverse_apply(docs)
# new docs
docs = list(random_docs(10))
driver._traverse_apply(docs)
# check persistence
assert os.path.exists(filename)
rm_files([filename])
def test_cache_driver_tmpfile():
docs = list(random_docs(10))
driver = MockCacheDriver()
with DocIDCache() as executor:
assert not executor.handler_mutex
driver.attach(executor=executor, pea=None)
driver._traverse_apply(docs)
with pytest.raises(NotImplementedError):
# duplicate docs
driver._traverse_apply(docs)
# new docs
docs = list(random_docs(10))
driver._traverse_apply(docs)
# check persistence
assert os.path.exists(executor.index_abspath)
def test_cache_driver_from_file():
docs = list(random_docs(10))
with open(filename, 'wb') as fp:
fp.write(np.array([uid.id2hash(d.id) for d in docs], dtype=np.int64).tobytes())
driver = MockCacheDriver()
with DocIDCache(filename) as executor:
assert not executor.handler_mutex
driver.attach(executor=executor, pea=None)
with pytest.raises(NotImplementedError):
# duplicate docs
driver._traverse_apply(docs)
# new docs
docs = list(random_docs(10))
driver._traverse_apply(docs)
# check persistence
assert os.path.exists(filename)
rm_files([filename])
|
the-stack_106_21127
|
import datetime
import time
from urllib.parse import urlencode
import requests
from pandas import read_csv
from geodataimport.compat import StringIO, binary_type, bytes_to_str
from geodataimport.utils import RemoteDataError, _init_session, _sanitize_dates
class _GeoData(object):
"""
Parameters
----------
symbols : {str, List[str]}
String symbol of like of symbols
start : string, int, date, datetime, Timestamp
Starting date. Parses many different kind of date
representations (e.g., 'JAN-01-2010', '1/1/10', 'Jan, 1, 1980')
end : string, int, date, datetime, Timestamp
Ending date
retry_count : int, default 3
Number of times to retry query request.
pause : float, default 0.1
Time, in seconds, of the pause between retries.
session : Session, default None
requests.sessions.Session instance to be used
freq : {str, None}
Frequency to use in select readers
"""
_chunk_size = 1024 * 1024
_format = "string"
def __init__(
self,
symbols,
start=None,
end=None,
retry_count=5,
pause=0.1,
timeout=30,
session=None,
freq=None,
asynchronous=False,
**kwargs,
):
self.symbols = symbols
start, end = _sanitize_dates(start or self.default_start_date, end)
self.start = start
self.end = end
if not isinstance(retry_count, int) or retry_count < 0:
raise ValueError("'retry_count' must be integer larger than 0")
self.retry_count = retry_count
self.pause = pause
self.timeout = timeout
self.pause_multiplier = 1
self.session = _init_session(
session, retry=retry_count, asynchronous=asynchronous
)
self.freq = freq
def close(self):
"""Close network session"""
self.session.close()
@property
def default_start_date(self):
"""Default start date for reader. Defaults to 5 years before current date"""
today = datetime.date.today()
return today - datetime.timedelta(days=365 * 5)
@property
def url(self):
"""API URL"""
# must be overridden in subclass
raise NotImplementedError
@property
def params(self):
"""Parameters to use in API calls"""
return None
def _read_one_data(self, url, params):
""" read one data from specified URL """
if self._format == "string":
out = self._read_url_as_StringIO(url, params=params)
elif self._format == "json":
out = self._get_response(url, params=params).json()
else:
raise NotImplementedError(self._format)
return self._read_lines(out)
def _read_url_as_StringIO(self, url, params=None):
"""
Open url (and retry)
"""
response = self._get_response(url, params=params)
text = self._sanitize_response(response)
out = StringIO()
if len(text) == 0:
service = self.__class__.__name__
raise IOError(
"{} request returned no data; check URL for invalid "
"inputs: {}".format(service, self.url)
)
if isinstance(text, binary_type):
out.write(bytes_to_str(text))
else:
out.write(text)
out.seek(0)
return out
@staticmethod
def _sanitize_response(response):
"""
Hook to allow subclasses to clean up response data
"""
return response.content
def _get_response(self, url, params=None, headers=None):
""" send raw HTTP request to get requests.Response from the specified url
Parameters
----------
url : str
target URL
params : dict or None
parameters passed to the URL
"""
# initial attempt + retry
pause = self.pause
last_response_text = ""
for _ in range(self.retry_count + 1):
response = self.session.get(
url, params=params, headers=headers, timeout=self.timeout
)
if response.status_code == requests.codes["ok"]:
return response
if response.encoding:
last_response_text = response.text.encode(response.encoding)
time.sleep(pause)
# Increase time between subsequent requests, per subclass.
pause *= self.pause_multiplier
# Get a new breadcrumb if necessary, in case ours is invalidated
if isinstance(params, list) and "crumb" in params:
params["crumb"] = self._get_crumb(self.retry_count)
# If our output error function returns True, exit the loop.
if self._output_error(response):
break
if params is not None and len(params) > 0:
url = url + "?" + urlencode(params)
msg = "Unable to read URL: {0}".format(url)
if last_response_text:
msg += "\nResponse Text:\n{0}".format(last_response_text)
raise RemoteDataError(msg)
def _output_error(self, out):
"""If necessary, a service can implement an interpreter for any non-200
HTTP responses.
Parameters
----------
out: bytes
The raw output from an HTTP request
Returns
-------
boolean
"""
return False
def _get_crumb(self, *args):
""" To be implemented by subclass """
raise NotImplementedError("Subclass has not implemented method.")
def _read_lines(self, out):
rs = read_csv(out, index_col=0, parse_dates=True, na_values=("-", "null"))[::-1]
# Needed to remove blank space character in header names
rs.columns = list(map(lambda x: x.strip(), rs.columns.values.tolist()))
# Yahoo! Finance sometimes does this awesome thing where they
# return 2 rows for the most recent business day
if len(rs) > 2 and rs.index[-1] == rs.index[-2]: # pragma: no cover
rs = rs[:-1]
# Get rid of unicode characters in index name.
try:
rs.index.name = rs.index.name.decode("unicode_escape").encode(
"ascii", "ignore"
)
except AttributeError:
# Python 3 string has no decode method.
rs.index.name = rs.index.name.encode("ascii", "ignore").decode()
return rs
|
the-stack_106_21128
|
import speech_recognition as sr
def reconhece():
rec = sr.Recognizer()
with sr.Microphone() as s:
rec.adjust_for_ambient_noise(s)
while True:
try:
audio = rec.listen(s)
entrada = rec.recognize_google(audio, language="pt")
return "Você disse: {}".format(entrada)
except sr.UnknownValueError:
return "Não entendi nada"
print("Ouvindo...\n-----------------\n")
while True:
fala = reconhece()
print(fala)
|
the-stack_106_21130
|
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : policy
Case Name : 密码为a-z小写字母的最少要求个数password_min_lowercase=1000
Description :
1.gs_guc reload -N all -I all -c "failed_login_attempts=1000",重启数据库生效
Expect :
1.报错,参数值无效
History :
"""
import unittest
from yat.test import Node
from yat.test import macro
from testcase.utils.Common import Common
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
logger = Logger()
common = Common()
sh_primy = CommonSH('PrimaryDbUser')
class Policy(unittest.TestCase):
def setUp(self):
logger.info('---Opengauss_Function_Security_Policy_Case0036 start---')
self.userNode = Node('PrimaryDbUser')
self.DB_ENV_PATH = macro.DB_ENV_PATH
self.DB_INSTANCE_PATH = macro.DB_INSTANCE_PATH
self.Constant = Constant()
def test_policy(self):
logger.info('-----------create user-----------')
excute_cmd1 = f'source {self.DB_ENV_PATH};' \
f'gs_guc set -N all -I all ' \
f'-c "password_min_lowercase=1000"'
logger.info(excute_cmd1)
msg1 = self.userNode.sh(excute_cmd1).result()
logger.info(msg1)
self.assertIn(self.Constant.OUTSIDE_VALID_RANGE_MSG, msg1)
def tearDown(self):
logger.info(
'-----Opengauss_Function_Security_Policy_Case0036 finish----')
|
the-stack_106_21131
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import csv
import datetime
import os
import signal
import sys
from time import time
import uuid
from warnings import warn
from configobj import ConfigObj, ConfigObjError, flatten_errors
from validate import Validator
from logfile import write_metapop_data, write_population_data, write_population_genotypes
from Metapopulation import *
from misc import *
from Topology import *
__version__ = '1.0.5'
def parse_arguments():
"""Parse command line arguments"""
parser = argparse.ArgumentParser(prog='ncsimulate.py',
description='Run a simluation')
parser.add_argument('--config', '-c', metavar='FILE', help='Configuration '\
'file to use (default: base.cfg)', default='base.cfg',
dest='configfile')
parser.add_argument('--checkconfig', '-C', action='store_true',
default=False,
help='Check the given configuration file and quit (note: includes parameters specified with --param')
parser.add_argument('--data_dir', '-d', metavar='DIR',
help='Directory to store data (default: data)')
parser.add_argument('--param', '-p', nargs=3, metavar=('SECTION', 'NAME',
'VALUE'),
action='append', help='Set a parameter value')
parser.add_argument('--seed', '-s', metavar='S', help='Set the '\
'pseudorandom number generator seed', type=int)
parser.add_argument('--quiet', '-q', action='store_true', default=False,
help='Suppress output messages')
parser.add_argument('--version', action='version', version=__version__)
return parser.parse_args()
def ncsimulate():
"""Run a simulation"""
start_time = time()
# Print a status message when SIGINFO (ctrl-T) is received on BSD or
# OS X systems or SIGUSR1 is received on POSIX systems
def handle_siginfo(signum, frame):
try:
print("Cycle {c}: Size {ps}, {pc:.0%} cooperators".format(c=cycle, ps=metapop.shape[0], pc=metapop.Coop.mean()))
except NameError:
print("Simulation has not yet begun")
signal.signal(signal.SIGUSR1, handle_siginfo)
if hasattr(signal, 'SIGINFO'):
signal.signal(signal.SIGINFO, handle_siginfo)
# Some scheduling systems send SIGTERM before killing a job. If SIGTERM
# is received, flush all of the log files
def handle_sigterm(signum, frame):
if log_metapopulation:
try:
outfilemp.flush()
except NameError:
pass
if log_population:
try:
outfilep.flush()
except NameError:
pass
if log_genotypes:
try:
outfileg.flush()
except NameError:
pass
signal.signal(signal.SIGTERM, handle_sigterm)
# Get the command line arguments
args = parse_arguments()
# Read the configuration file
try:
config = ConfigObj(infile=args.configfile, configspec='configspec.ini',
file_error=True)
except (ConfigObjError, OSError) as e:
print("Error: {e}".format(e=e))
sys.exit(1)
# Add any parameters specified on the command line to the configuration
if args.param:
for param in args.param:
config[param[0]][param[1]] = param[2]
# Validate the configuration
validation = config.validate(Validator(), copy=True)
if validation != True:
errors = flatten_errors(config, validation)
print("Found {n} error(s) in configuration:".format(n=len(errors)))
for (section_list, key, _) in errors:
if key is not None:
print("\t* Invalid value for '{k}' in Section '{s}'".format(k=key, s=section_list[0]))
else:
print("\t* Missing required section '{s}'".format(s=section_list[0]))
sys.exit(2)
if args.checkconfig:
print("No errors found in configuration file {f}".format(f=args.configfile))
sys.exit(0)
# If the random number generator seed specified, add it to the config,
# overwriting any previous value. Otherwise, if it wasn't in the
# supplied configuration file, create one.
if args.seed:
config['Simulation']['seed'] = args.seed
elif 'seed' not in config['Simulation'] or config['Simulation']['seed']==None:
seed = np.random.randint(low=0, high=np.iinfo(np.uint32).max)
config['Simulation']['seed'] = seed
np.random.seed(seed=config['Simulation']['seed'])
# Generate a universally unique identifier (UUID) for this run
config['Simulation']['UUID'] = str(uuid.uuid4())
# If the data directory is specified, add it to the config, overwriting any
# previous value
if args.data_dir:
config['Simulation']['data_dir'] = args.data_dir
# If the data_dir already exists, append the current date and time to
# data_dir, and use that. Afterwards, create the directory.
if os.path.exists(config['Simulation']['data_dir']):
newname = '{o}-{d}'.format(o=config['Simulation']['data_dir'],
d=datetime.datetime.now().strftime("%Y%m%d%H%M%S"))
msg = '{d} already exists. Using {new} instead.'.format(d=config['Simulation']['data_dir'],
new=newname)
warn(msg)
config['Simulation']['data_dir'] = newname
os.mkdir(config['Simulation']['data_dir'])
# Write information about the run
infofile = os.path.join(config['Simulation']['data_dir'], 'run_info.txt')
write_run_information(filename=infofile, config=config)
# Write the configuration file
config.filename = os.path.join(config['Simulation']['data_dir'],
'configuration.cfg')
config.write()
config.num_births = 0
# Create the log file of metapopulation-level data if enabled
log_metapopulation = config['MetapopulationLog']['enabled']
if log_metapopulation:
log_metapopulation_freq = config['MetapopulationLog']['frequency']
# Config options for logging metapopulation. Name, frequency, etc.
fieldnames = ['Time', 'Births', 'PopulationSize', 'CooperatorProportion',
'MinCooperatorFitness', 'MaxCooperatorFitness',
'MeanCooperatorFitness', 'MinDefectorFitness',
'MaxDefectorFitness', 'MeanDefectorFitness',
'ShannonIndex', 'SimpsonIndex']
outfilemp = open(os.path.join(config['Simulation']['data_dir'],
config['MetapopulationLog']['filename']), 'w')
writermp = csv.DictWriter(outfilemp, fieldnames=fieldnames)
writermp.writeheader()
# Create the log file of population-level data if enabled
log_population = config['PopulationLog']['enabled']
if log_population:
log_population_freq = config['PopulationLog']['frequency']
# Config options for logging population. Name, frequency, etc.
fieldnames = ['Time', 'Population', 'X', 'Y', 'PopulationSize',
'CooperatorProportion', 'MinCooperatorFitness',
'MaxCooperatorFitness', 'MeanCooperatorFitness',
'MinDefectorFitness', 'MaxDefectorFitness',
'MeanDefectorFitness', 'ShannonIndex', 'SimpsonIndex']
outfilep = open(os.path.join(config['Simulation']['data_dir'],
config['PopulationLog']['filename']), 'w')
writerp = csv.DictWriter(outfilep, fieldnames=fieldnames)
writerp.writeheader()
log_genotypes = config['GenotypeLog']['enabled']
if log_genotypes:
log_genotypes_freq = config['GenotypeLog']['frequency']
fieldnames = ['Time', 'Population', 'X', 'Y', 'Genotype']
outfileg = open(os.path.join(config['Simulation']['data_dir'],
config['GenotypeLog']['filename']), 'w')
writerg = csv.DictWriter(outfileg, fieldnames=fieldnames)
writerg.writeheader()
# Create the migration topology. This is a graph where each population is a
# node, and the edges between nodes represent potential paths for migration
topology = build_topology(config=config)
if config['Simulation']['export_topology']:
fn = os.path.join(config['Simulation']['data_dir'], 'topology.gml')
export_topology(topology=topology, filename=fn)
# Create the metapopulation and apply the initial stress bottleneck
metapop = create_metapopulation(config=config, topology=topology,
initial_state=config['Metapopulation']['initial_state'])
stress_tolerance = config['Population']['mutation_rate_tolerance']
if stress_tolerance < 1:
metapop = bottleneck(population=metapop,
survival_pct=config['Population']['mutation_rate_tolerance'])
else:
metapop = bottleneck(population=metapop,
survival_pct=config['Population']['dilution_factor'])
# Keep track of how often the metapopulation should be mixed
mix_frequency = config['Metapopulation']['mix_frequency']
adaptive_columns = adaptive_colnames(L=config['Population']['genome_length'])
# Iterate through each cycle of the simulation
for cycle in range(config['Simulation']['num_cycles']):
if not args.quiet:
if len(adaptive_columns) > 0:
c1 = (metapop.loc[metapop.Coop==1, adaptive_columns] > 0).sum(axis=1).max()
d1 = (metapop.loc[metapop.Coop==0, adaptive_columns] > 0).sum(axis=1).max()
else:
c1 = d1 = 'NA'
print("Cycle {c}: Size {ps}, Populations {pops}, {pc:.0%} cooperators, Fitness: {f:.02}, C1: {c1}, D1: {d1} ]".format(c=cycle, ps=metapop.shape[0], pops=metapop.Population.unique().shape[0], pc=metapop.Coop.mean(), f=metapop.Fitness.mean(), c1=c1, d1=d1))
if log_metapopulation and cycle % log_metapopulation_freq == 0:
write_metapop_data(writer=writermp, metapop=metapop,
topology=topology, cycle=cycle, config=config)
if log_population and cycle % log_population_freq == 0:
write_population_data(writer=writerp, metapop=metapop,
topology=topology, cycle=cycle, config=config)
if log_genotypes and cycle % log_genotypes_freq == 0:
write_population_genotypes(writer=writerg, metapop=metapop,
topology=topology, cycle=cycle,
config=config)
# Grow the population to carrying capacity, potentially mutating
# offspring
metapop = grow(M=metapop, config=config)
#print("----- Num births: {nb}".format(nb=config.num_births))
# Migrate individuals among subpopulations
metapop = migrate(M=metapop, topology=topology,
rate=config['Metapopulation']['migration_rate'])
# Mix the metapopulation (if configured)
if mix_frequency > 0 and cycle > 0 and (cycle % mix_frequency == 0):
metapop = mix(M=metapop, topology=topology)
# Dilution
metapop = bottleneck(population=metapop,
survival_pct=config['Population']['dilution_factor'])
if config['Simulation']['stop_when_empty'] and \
metapop.shape[0] == 0:
break
elif config['Simulation']['num_births'] and config.num_births > config['Simulation']['num_births']:
break
if log_metapopulation:
write_metapop_data(writer=writermp, metapop=metapop, topology=topology,
cycle=cycle+1, config=config)
if log_population:
write_population_data(writer=writerp, metapop=metapop,
topology=topology, cycle=cycle, config=config)
rt_string = 'Run Time: {t}\n'.format(t=datetime.timedelta(seconds=time()-start_time))
append_run_information(filename=infofile, string=rt_string)
#-------------------------------------------------------------------------
if __name__ == "__main__":
ncsimulate()
|
the-stack_106_21133
|
'''
Created on Mar 4, 2013
@author: Devindra
The main class. Links the program together and then starts the fun.
'''
from guppy import hpy
import atexit
import pyglet
from regicide.mvc import State
from regicide import model, view, controller
from regicide.view import window
def on_exit():
print("Exiting...")
#print(hpy().heap())
# Previous values,
# 33,010,620 / 41,962,120 | Jun 1, 2013
def init_states(window):
game_model = model.game.Game()
global_controller = controller.controller.Controller()
print("Initializing Game States...")
State('game',
window = window,
model = game_model,
view = view.game.GameView(window),
controller = global_controller,
commands = controller.game.commands,
)
State('properties',
window = window,
model = game_model,
view = view.properties.PropertiesView(window),
controller = global_controller,
commands = controller.properties.commands,
)
State('traits',
window = window,
model = game_model,
view = view.traits.TraitsView(window),
controller = global_controller,
commands = controller.properties.commands,
)
State('actions',
window = window,
model = game_model,
view = view.actions.ActionsView(window),
controller = global_controller,
commands = controller.properties.commands,
)
State('inventory',
window = window,
model = game_model,
view = view.inventory.InventoryView(window),
controller = global_controller,
commands = controller.properties.commands,
)
State('world',
window = window,
model = game_model,
view = view.world.WorldView(window),
controller = global_controller,
commands = controller.properties.commands,
)
State.set_current('game')
game_model.next_turn()
if __name__ == '__main__':
window = window.MasterView()
init_states(window)
atexit.register(on_exit)
print("Starting Game Loop...")
pyglet.app.run()
|
the-stack_106_21134
|
import argparse
import logging
import os
import pdb
from torch.autograd import Variable
import os.path as osp
import torch
from torch.optim.lr_scheduler import StepLR, MultiStepLR
import numpy as np
import resource
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from utils.utils import *
import torch.backends.cudnn as cudnn
from utils.criterion import CriterionDSN, CriterionOhemDSN, CriterionPixelWise, \
CriterionAdv, CriterionAdvForG, CriterionAdditionalGP, CriterionPairWiseforWholeFeatAfterPool
import utils.parallel as parallel_old
from networks.pspnet_combine import Res_pspnet, BasicBlock, Bottleneck
from networks.sagan_models import Discriminator
from networks.evaluate import evaluate_main
torch_ver = torch.__version__[:3]
class NetModel():
def name(self):
return 'kd_seg'
def DataParallelModelProcess(self, model, ParallelModelType = 1, is_eval = 'train', device = 'cuda'):
if ParallelModelType == 1:
parallel_model = DataParallelModel(model)
elif ParallelModelType == 2:
parallel_model = parallel_old.DataParallelModel(model)
else:
raise ValueError('ParallelModelType should be 1 or 2')
if is_eval == 'eval':
parallel_model.eval()
elif is_eval == 'train':
parallel_model.train()
else:
raise ValueError('is_eval should be eval or train')
parallel_model.float()
parallel_model.to(device)
return parallel_model
def DataParallelCriterionProcess(self, criterion, device = 'cuda'):
criterion = parallel_old.my_DataParallelCriterion(criterion)
criterion.cuda()
return criterion
def __init__(self, args):
cudnn.enabled = True
self.args = args
device = args.device
student = Res_pspnet(BasicBlock, [2, 2, 2, 2], num_classes = args.classes_num)
load_S_model(args, student, False)
print_model_parm_nums(student, 'student_model')
self.parallel_student = self.DataParallelModelProcess(student, 2, 'train', device)
self.student = student
teacher = Res_pspnet(Bottleneck, [3, 4, 23, 3], num_classes = args.classes_num)
load_T_model(teacher, args.T_ckpt_path)
print_model_parm_nums(teacher, 'teacher_model')
self.parallel_teacher = self.DataParallelModelProcess(teacher, 2, 'eval', device)
self.teacher = teacher
D_model = Discriminator(args.preprocess_GAN_mode, args.classes_num, args.batch_size, args.imsize_for_adv, args.adv_conv_dim)
load_D_model(args, D_model, False)
print_model_parm_nums(D_model, 'D_model')
self.parallel_D = self.DataParallelModelProcess(D_model, 2, 'train', device)
self.G_solver = optim.SGD([{'params': filter(lambda p: p.requires_grad, self.student.parameters()), 'initial_lr': args.lr_g}], args.lr_g, momentum=args.momentum, weight_decay=args.weight_decay)
self.D_solver = optim.SGD([{'params': filter(lambda p: p.requires_grad, D_model.parameters()), 'initial_lr': args.lr_d}], args.lr_d, momentum=args.momentum, weight_decay=args.weight_decay)
self.best_mean_IU = args.best_mean_IU
self.criterion = self.DataParallelCriterionProcess(CriterionDSN()) #CriterionCrossEntropy()
self.criterion_pixel_wise = self.DataParallelCriterionProcess(CriterionPixelWise())
#self.criterion_pair_wise_for_interfeat = [self.DataParallelCriterionProcess(CriterionPairWiseforWholeFeatAfterPool(scale=args.pool_scale[ind], feat_ind=-(ind+1))) for ind in range(len(args.lambda_pa))]
self.criterion_pair_wise_for_interfeat = self.DataParallelCriterionProcess(CriterionPairWiseforWholeFeatAfterPool(scale=args.pool_scale, feat_ind=-5))
self.criterion_adv = self.DataParallelCriterionProcess(CriterionAdv(args.adv_loss_type))
if args.adv_loss_type == 'wgan-gp':
self.criterion_AdditionalGP = self.DataParallelCriterionProcess(CriterionAdditionalGP(self.parallel_D, args.lambda_gp))
self.criterion_adv_for_G = self.DataParallelCriterionProcess(CriterionAdvForG(args.adv_loss_type))
self.mc_G_loss = 0.0
self.pi_G_loss = 0.0
self.pa_G_loss = 0.0
self.D_loss = 0.0
cudnn.benchmark = True
if not os.path.exists(args.snapshot_dir):
os.makedirs(args.snapshot_dir)
def AverageMeter_init(self):
self.parallel_top1_train = AverageMeter()
self.top1_train = AverageMeter()
def set_input(self, data):
args = self.args
images, labels, _, _ = data
self.images = images.cuda()
self.labels = labels.long().cuda()
if torch_ver == "0.3":
self.images = Variable(images)
self.labels = Variable(labels)
def lr_poly(self, base_lr, iter, max_iter, power):
return base_lr*((1-float(iter)/max_iter)**(power))
def adjust_learning_rate(self, base_lr, optimizer, i_iter):
args = self.args
lr = self.lr_poly(base_lr, i_iter, args.num_steps, args.power)
optimizer.param_groups[0]['lr'] = lr
return lr
def forward(self):
args = self.args
with torch.no_grad():
self.preds_T = self.parallel_teacher.eval()(self.images, parallel=args.parallel)
self.preds_S = self.parallel_student.train()(self.images, parallel=args.parallel)
def student_backward(self):
args = self.args
G_loss = 0.0
temp = self.criterion(self.preds_S, self.labels, is_target_scattered = False)
temp_T = self.criterion(self.preds_T, self.labels, is_target_scattered = False)
self.mc_G_loss = temp.item()
G_loss = G_loss + temp
if args.pi == True:
temp = args.lambda_pi*self.criterion_pixel_wise(self.preds_S, self.preds_T, is_target_scattered = True)
self.pi_G_loss = temp.item()
G_loss = G_loss + temp
if args.pa == True:
#for ind in range(len(args.lambda_pa)):
# if args.lambda_pa[ind] != 0.0:
# temp1 = self.criterion_pair_wise_for_interfeat[ind](self.preds_S, self.preds_T, is_target_scattered = True)
# self.pa_G_loss[ind] = temp1.item()
# G_loss = G_loss + args.lambda_pa[ind]*temp1
# elif args.lambda_pa[ind] == 0.0:
# self.pa_G_loss[ind] = 0.0
temp1 = self.criterion_pair_wise_for_interfeat(self.preds_S, self.preds_T, is_target_scattered = True)
self.pa_G_loss = temp1.item()
G_loss = G_loss + args.lambda_pa*temp1
if self.args.ho == True:
d_out_S = self.parallel_D(eval(compile(to_tuple_str('self.preds_S', args.gpu_num, '[0]'), '<string>', 'eval')), parallel=args.parallel)
G_loss = G_loss + args.lambda_d*self.criterion_adv_for_G(d_out_S, d_out_S, is_target_scattered = True)
G_loss.backward()
self.G_loss = G_loss.item()
def discriminator_backward(self):
self.D_solver.zero_grad()
args = self.args
d_out_T = self.parallel_D(eval(compile(to_tuple_str('self.preds_T', args.gpu_num, '[0].detach()'), '<string>', 'eval')), parallel=True)
d_out_S = self.parallel_D(eval(compile(to_tuple_str('self.preds_S', args.gpu_num, '[0].detach()'), '<string>', 'eval')), parallel=True)
d_loss = args.lambda_d*self.criterion_adv(d_out_S, d_out_T, is_target_scattered = True)
if args.adv_loss_type == 'wgan-gp':
d_loss += args.lambda_d*self.criterion_AdditionalGP(self.preds_S, self.preds_T, is_target_scattered = True)
d_loss.backward()
self.D_loss = d_loss.item()
self.D_solver.step()
def optimize_parameters(self):
self.forward()
self.G_solver.zero_grad()
self.student_backward()
self.G_solver.step()
if self.args.ho == True:
self.discriminator_backward()
def evalute_model(self, model, loader, gpu_id, input_size, num_classes, whole):
mean_IU, IU_array = evaluate_main(model=model, loader = loader,
gpu_id = gpu_id,
input_size = input_size,
num_classes = num_classes,
whole = whole)
return mean_IU, IU_array
def print_info(self, epoch, step):
logging.info('step:{:5d} G_lr:{:.6f} G_loss:{:.5f}(mc:{:.5f} pixelwise:{:.5f} pairwise:{:.5f}) D_lr:{:.6f} D_loss:{:.5f}'.format(
step, self.G_solver.param_groups[-1]['lr'],
self.G_loss, self.mc_G_loss, self.pi_G_loss, self.pa_G_loss,
self.D_solver.param_groups[-1]['lr'], self.D_loss))
def __del__(self):
pass
def save_ckpt(self, epoch, step, mean_IU, IU_array):
torch.save(self.student.state_dict(),osp.join(self.args.snapshot_dir, 'CS_scenes_'+str(step)+'_'+str(mean_IU)+'.pth'))
|
the-stack_106_21137
|
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import MassGridTestFramework
from test_framework.util import *
def read_dump(file_name, addrs, hd_master_addr_old):
"""
Read the given dump, count the addrs that match, count change and reserve.
Also check that the old hd_master is inactive
"""
with open(file_name, encoding='utf8') as inputfile:
found_addr = 0
found_addr_chg = 0
found_addr_rsv = 0
hd_master_addr_ret = None
for line in inputfile:
# only read non comment lines
if line[0] != "#" and len(line) > 10:
# split out some data
key_label, comment = line.split("#")
# key = key_label.split(" ")[0]
keytype = key_label.split(" ")[2]
if len(comment) > 1:
addr_keypath = comment.split(" addr=")[1]
addr = addr_keypath.split(" ")[0]
keypath = None
if keytype == "inactivehdmaster=1":
# ensure the old master is still available
assert(hd_master_addr_old == addr)
elif keytype == "hdmaster=1":
# ensure we have generated a new hd master key
assert(hd_master_addr_old != addr)
hd_master_addr_ret = addr
else:
keypath = addr_keypath.rstrip().split("hdkeypath=")[1]
# count key types
for addrObj in addrs:
if addrObj['address'] == addr and addrObj['hdkeypath'] == keypath and keytype == "label=":
found_addr += 1
break
elif keytype == "change=1":
found_addr_chg += 1
break
elif keytype == "reserve=1":
found_addr_rsv += 1
break
return found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_ret
class WalletDumpTest(MassGridTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = False
self.num_nodes = 1
self.extra_args = [["-keypool=90", "-usehd=1"]]
def setup_chain(self):
# TODO remove this when usehd=1 becomes the default
# use our own cache and -usehd=1 as extra arg as the default cache is run with -usehd=0
initialize_chain(self.options.tmpdir, self.num_nodes, self.options.cachedir + "/hd", ["-usehd=1"], redirect_stderr=True)
set_cache_mocktime()
def setup_network(self, split=False):
# Use 1 minute timeout because the initial getnewaddress RPC can take
# longer than the default 30 seconds due to an expensive
# CWallet::TopUpKeyPool call, and the encryptwallet RPC made later in
# the test often takes even longer.
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args, timewait=60, redirect_stderr=True)
def run_test (self):
tmpdir = self.options.tmpdir
# generate 20 addresses to compare against the dump
test_addr_count = 20
addrs = []
for i in range(0,test_addr_count):
addr = self.nodes[0].getnewaddress()
vaddr= self.nodes[0].validateaddress(addr) #required to get hd keypath
addrs.append(vaddr)
# Should be a no-op:
self.nodes[0].keypoolrefill()
# dump unencrypted wallet
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.unencrypted.dump")
found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = \
read_dump(tmpdir + "/node0/wallet.unencrypted.dump", addrs, None)
assert_equal(found_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_addr_chg, 50) # 50 blocks where mined
assert_equal(found_addr_rsv, 180) # keypool size (external+internal)
#encrypt wallet, restart, unlock and dump
self.nodes[0].encryptwallet('test')
massgridd_processes[0].wait()
self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0])
self.nodes[0].walletpassphrase('test', 10)
# Should be a no-op:
self.nodes[0].keypoolrefill()
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.encrypted.dump")
found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_enc = \
read_dump(tmpdir + "/node0/wallet.encrypted.dump", addrs, hd_master_addr_unenc)
assert_equal(found_addr, test_addr_count)
# TODO clarify if we want the behavior that is tested below in MassGrid (only when HD seed was generated and not user-provided)
# assert_equal(found_addr_chg, 180 + 50) # old reserve keys are marked as change now
assert_equal(found_addr_rsv, 180) # keypool size
if __name__ == '__main__':
WalletDumpTest().main ()
|
the-stack_106_21138
|
import json, pycurl
from io import BytesIO
class Couchpotato():
def __init__(self, host, port, api_key):
self.__host = host
self.__port = port
self.__api_key = api_key
self.reply = {}
def __apicall(self, host, port, api_key, endpoint):
buffer = BytesIO()
c = pycurl.Curl()
c.setopt(pycurl.URL, 'http://' + host + ':' + port + '/api/' + api_key + endpoint)
c.setopt(pycurl.HTTPHEADER, ['Content-Type: application/json', 'Accept: application/json'])
c.setopt(pycurl.TIMEOUT, 10)
c.setopt(c.WRITEDATA, buffer)
try:
c.perform()
c.close()
except pycurl.error:
return 'Failed communicating with Couchpotato.'
try:
return buffer.getvalue().decode('utf-8')
except json.JSONDecodeError:
return 'JSON API failure'
def addmovie(self, imdb_id):
request = self.__apicall(self.__host, self.__port, self.__api_key, '/movie.add?identifier=' + imdb_id + '&force_readd=False')
try:
request_json = json.loads(request)
if request_json['success'] == True:
self.reply = request_json
return True
else:
self.reply = {
'message': request_json,
'success': False,
}
return False
except json.JSONDecodeError:
self.reply = {
'message': 'Failed communicating with Couchpotato.',
'success': False,
}
return False
def deletemovie(self, cp_id):
request = self.__apicall(self.__host, self.__port, self.__api_key, '/movie.delete?id=' + cp_id + '&from=wanted')
try:
request_json = json.loads(request)
if request_json['success'] == True:
self.reply = request_json
return True
else:
self.reply = {
'message': 'Failed to delete movie.',
'success': False,
}
return False
except json.JSONDecodeError:
self.reply = {
'message': request,
'success': False,
}
return False
|
the-stack_106_21140
|
from glob import glob
from pathlib import Path
from typing import Callable, List, Optional
import torch
from torch_geometric.data import InMemoryDataset, extract_zip
from torch_geometric.io import read_ply
class CoMA(InMemoryDataset):
r"""The CoMA 3D faces dataset from the `"Generating 3D faces using
Convolutional Mesh Autoencoders" <https://arxiv.org/abs/1807.10267>`_
paper, containing 20,466 meshes of extreme expressions captured over 12
different subjects.
.. note::
Data objects hold mesh faces instead of edge indices.
To convert the mesh to a graph, use the
:obj:`torch_geometric.transforms.FaceToEdge` as :obj:`pre_transform`.
To convert the mesh to a point cloud, use the
:obj:`torch_geometric.transforms.SamplePoints` as :obj:`transform` to
sample a fixed number of points on the mesh faces according to their
face area.
Args:
root (string): Root directory where the dataset should be saved.
train (bool, optional): If :obj:`True`, loads the training dataset,
otherwise the test dataset. (default: :obj:`True`)
transform (callable, optional): A function/transform that takes in an
:obj:`torch_geometric.data.Data` object and returns a transformed
version. The data object will be transformed before every access.
(default: :obj:`None`)
pre_transform (callable, optional): A function/transform that takes in
an :obj:`torch_geometric.data.Data` object and returns a
transformed version. The data object will be transformed before
being saved to disk. (default: :obj:`None`)
pre_filter (callable, optional): A function that takes in an
:obj:`torch_geometric.data.Data` object and returns a boolean
value, indicating whether the data object should be included in the
final dataset. (default: :obj:`None`)
"""
url = 'https://coma.is.tue.mpg.de/'
categories = [
'bareteeth',
'cheeks_in',
'eyebrow',
'high_smile',
'lips_back',
'lips_up',
'mouth_down',
'mouth_extreme',
'mouth_middle',
'mouth_open',
'mouth_side',
'mouth_up',
]
def __init__(self, root: str, train: bool = True,
transform: Optional[Callable] = None,
pre_transform: Optional[Callable] = None,
pre_filter: Optional[Callable] = None):
super().__init__(root, transform, pre_transform, pre_filter)
path = self.processed_paths[0] if train else self.processed_paths[1]
self.data, self.slices = torch.load(path)
@property
def raw_file_names(self) -> str:
return 'COMA_data.zip'
@property
def processed_file_names(self) -> List[str]:
return ['training.pt', 'test.pt']
def download(self):
raise RuntimeError(
f"Dataset not found. Please download 'COMA_data.zip' from "
f"'{self.url}' and move it to '{self.raw_dir}'")
def process(self):
folders = sorted(glob(Path.joinpath(Path(self.raw_dir), 'FaceTalk_*')))
if len(folders) == 0:
extract_zip(self.raw_paths[0], self.raw_dir, log=False)
folders = sorted(
glob(Path.joinpath(Path(self.raw_dir), 'FaceTalk_*')))
train_data_list, test_data_list = [], []
for folder in folders:
for i, category in enumerate(self.categories):
files = sorted(
glob(Path.joinpath(Path(folder), category, '*.ply')))
for j, f in enumerate(files):
data = read_ply(f)
data.y = torch.tensor([i], dtype=torch.long)
if self.pre_filter is not None and\
not self.pre_filter(data):
continue
if self.pre_transform is not None:
data = self.pre_transform(data)
if (j % 100) < 90:
train_data_list.append(data)
else:
test_data_list.append(data)
torch.save(self.collate(train_data_list), self.processed_paths[0])
torch.save(self.collate(test_data_list), self.processed_paths[1])
|
the-stack_106_21141
|
# ------------------------------------------------------------------------
# MIT License
#
# Copyright (c) [2021] [Avinash Ranganath]
#
# This code is part of the library PyDL <https://github.com/nash911/PyDL>
# This code is licensed under MIT license (see LICENSE.txt for details)
# ------------------------------------------------------------------------
import numpy as np
from sklearn.datasets import fetch_openml
import matplotlib.pyplot as plt
from pydl.nn.layers import FC
from pydl.nn.nn import NN
from pydl.training.adam import Adam
from pydl import conf
def main():
mnist = fetch_openml('mnist_784')
X = np.array(mnist.data, dtype=conf.dtype)
y = np.array(mnist.target, dtype=np.int)
K = np.max(y) + 1
# plot first few images
fig = plt.figure()
for i, r in enumerate(np.random.randint(0, y.size, 9)):
# define subplot
plt.subplot(330 + 1 + i)
# plot raw pixel data
plt.imshow(np.reshape(X[r], (-1, 28)), cmap=plt.get_cmap('gray'))
plt.title(str(y[r]))
# show the figure
plt.draw()
plt.waitforbuttonpress(0)
plt.close(fig)
weight_scale = 1.0
l1 = FC(X, num_neurons=200, bias=True, weight_scale=weight_scale, xavier=True,
activation_fn='Tanh', batchnorm=True)
l2 = FC(l1, num_neurons=100, bias=True, weight_scale=weight_scale, xavier=True,
activation_fn='Tanh', batchnorm=True)
l3 = FC(l2, num_neurons=50, bias=True, weight_scale=weight_scale, xavier=True,
activation_fn='Tanh', batchnorm=True)
l4 = FC(l3, num_neurons=25, bias=True, weight_scale=weight_scale, xavier=True,
activation_fn='Tanh', batchnorm=True)
l5 = FC(l4, num_neurons=15, bias=True, weight_scale=weight_scale, xavier=True,
activation_fn='Tanh', batchnorm=True)
l6 = FC(l5, num_neurons=K, bias=True, weight_scale=weight_scale, xavier=True,
activation_fn='SoftMax')
layers = [l1, l2, l3, l4, l5, l6]
nn = NN(X, layers)
adam = Adam(nn, step_size=1e-3, beta_1=0.9, beta_2=0.999, reg_lambda=1e-1, train_size=60000,
test_size=10000)
adam.train(X, y, normalize='pca', dims=0.97, shuffle=False, epochs=10000, plot='MNIST - Adam',
log_freq=1)
input("Press Enter to continue...")
if __name__ == '__main__':
main()
|
the-stack_106_21142
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.plot([1,2,3], label="test")
l = ax.legend()
d1 = l.draggable()
xy = 1, 2
txt = ax.annotate("Test", xy, xytext=(-30, 30),
textcoords="offset points",
bbox=dict(boxstyle="round",fc=(0.2, 1, 1)),
arrowprops=dict(arrowstyle="->"))
d2 = txt.draggable()
from matplotlib._png import read_png
from matplotlib.cbook import get_sample_data
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
fn = get_sample_data("ada.png", asfileobj=False)
arr_ada = read_png(fn)
imagebox = OffsetImage(arr_ada, zoom=0.2)
ab = AnnotationBbox(imagebox, xy,
xybox=(120., -80.),
xycoords='data',
boxcoords="offset points",
pad=0.5,
arrowprops=dict(arrowstyle="->",
connectionstyle="angle,angleA=0,angleB=90,rad=3")
)
ax.add_artist(ab)
d3 = ab.draggable(use_blit=True)
plt.show()
|
the-stack_106_21144
|
# Standard imports
from typing import Union
from pathlib import Path
import functools
# External imports
import tqdm
import numpy as np
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
_DEFAULT_DATASET_ROOT = "/opt/Datasets"
_DEFAULT_MNIST_DIGIT = 6
# _MNIST_MEAN = 0.1309
# _MNIST_STD = 0.3084
_MNIST_MEAN = 0.5
_MNIST_STD = 0.5
def get_dataloaders(dataset_root: Union[str, Path],
cuda: bool,
batch_size: int = 64,
n_threads: int = 4,
dataset: str = "MNIST",
val_size: float = 0.2,
small_experiment: bool = False):
"""
Build and return the pytorch dataloaders
Args:
dataset_root (str, Path) : the root path of the datasets
cuda (bool): whether or not to use cuda
batch_size (int) : the size of the minibatches
n_threads (int): the number of threads to use for dataloading
dataset (str): the dataset to load
val_size (float): the proportion of data for the validation set
small_experiment (bool): wheter or not to use a small
dataset (usefull for debuging)
"""
datasets = ["MNIST", "FashionMNIST", "EMNIST", "SVHN"]
if dataset not in datasets:
raise NotImplementedError(f"Cannot import the dataset {dataset}."
f" Available datasets are {datasets}")
dataset_loader = getattr(torchvision.datasets, f"{dataset}")
train_kwargs = {}
test_kwargs = {}
if dataset in ["MNIST", "FashionMNIST", "EMNIST"]:
train_kwargs['train'] = True
test_kwargs['train'] = False
if dataset == "EMNIST":
train_kwargs['split'] = 'balanced'
elif dataset == "SVHN":
train_kwargs['split'] = 'train'
test_kwargs['split'] = 'test'
# Get the two datasets, make them tensors in [0, 1]
transform= transforms.Compose([
transforms.ToTensor(),
transforms.Normalize( (_MNIST_MEAN,), (_MNIST_STD,))
]
)
train_dataset = dataset_loader(root=dataset_root,
**train_kwargs,
download=True,
transform=transform
)
test_dataset = dataset_loader(root=dataset_root,
**test_kwargs,
download=True,
transform=transform
)
dataset = torch.utils.data.ConcatDataset([train_dataset,
test_dataset])
# Compute the channel-wise normalization coefficients
# mean = std = 0
# img, _ = dataset[0]
# print(img.shape)
# N = len(dataset) * img.shape[1] * img.shape[2]
# for img, _ in tqdm.tqdm(dataset):
# mean += img.sum()/N
# for img, _ in tqdm.tqdm(dataset):
# std += ((img - mean)**2).sum()/N
# std = np.sqrt(std)
# print(mean, std)
if small_experiment:
dataset = torch.utils.data.Subset(dataset, range(batch_size))
# Split the dataset in train/valid
indices = np.arange(len(dataset))
np.random.shuffle(indices)
split_idx = int(val_size * len(dataset))
valid_indices, train_indices = indices[:split_idx], indices[split_idx:]
train_dataset = torch.utils.data.Subset(dataset, train_indices)
valid_dataset = torch.utils.data.Subset(dataset, valid_indices)
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=n_threads)
valid_loader = torch.utils.data.DataLoader(valid_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=n_threads)
img_shape = dataset[0][0].shape # C, H, W
return train_loader, valid_loader, img_shape
def test_mnist():
import matplotlib.pyplot as plt
train_loader, valid_loader, img_shape = get_dataloaders(dataset_root=_DEFAULT_DATASET_ROOT,
batch_size=16,
cuda=False,
dataset="MNIST")
print(f"I loaded {len(train_loader)} train minibatches. The images"
f" are of shape {img_shape}")
X, y = next(iter(train_loader))
grid = torchvision.utils.make_grid(X, nrow=4)
print(grid.min(), grid.max())
print(grid.shape)
plt.figure()
plt.imshow(np.transpose(grid.numpy(), (1, 2, 0)), cmap='gray_r')
plt.show()
if __name__ == '__main__':
test_mnist()
|
the-stack_106_21145
|
"""PyTorch trainer module.
- Author: Jongkuk Lim, Junghoon Kim
- Contact: [email protected], [email protected]
"""
import os
import shutil
from typing import Optional, Tuple, Union
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
from sklearn.metrics import f1_score
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.sampler import SequentialSampler, SubsetRandomSampler
from tqdm import tqdm
from src.utils.torch_utils import save_model
from src.utils.common import save_classification_report
def _get_n_data_from_dataloader(dataloader: DataLoader) -> int:
"""Get a number of data in dataloader.
Args:
dataloader: torch dataloader
Returns:
A number of data in dataloader
"""
if isinstance(dataloader.sampler, SubsetRandomSampler):
n_data = len(dataloader.sampler.indices)
elif isinstance(dataloader.sampler, SequentialSampler):
n_data = len(dataloader.sampler.data_source)
else:
n_data = len(dataloader) * dataloader.batch_size if dataloader.batch_size else 1
return n_data
def _get_n_batch_from_dataloader(dataloader: DataLoader) -> int:
"""Get a batch number in dataloader.
Args:
dataloader: torch dataloader
Returns:
A batch number in dataloader
"""
n_data = _get_n_data_from_dataloader(dataloader)
n_batch = dataloader.batch_size if dataloader.batch_size else 1
return n_data // n_batch
def _get_len_label_from_dataset(dataset: Dataset) -> int:
"""Get length of label from dataset.
Args:
dataset: torch dataset
Returns:
A number of label in set.
"""
if isinstance(dataset, torchvision.datasets.ImageFolder) or isinstance(
dataset, torchvision.datasets.vision.VisionDataset
):
return len(dataset.classes)
elif isinstance(dataset, torch.utils.data.Subset):
return _get_len_label_from_dataset(dataset.dataset)
else:
raise NotImplementedError
class TorchTrainer:
"""Pytorch Trainer."""
def __init__(
self,
model: nn.Module,
criterion: nn.Module,
optimizer: optim.Optimizer,
scheduler,
log_dir: str,
scaler=None,
device: torch.device = "cpu",
verbose: int = 1,
early_stopping: bool = False,
early_stopping_threshold: int = 5
) -> None:
"""Initialize TorchTrainer class.
Args:
model: model to train
criterion: loss function module
optimizer: optimization module
device: torch device
verbose: verbosity level.
"""
self.model = model
self.log_dir = log_dir
self.criterion = criterion
self.optimizer = optimizer
self.scheduler = scheduler
self.scaler = scaler
self.verbose = verbose
self.device = device
self.early_stopping = early_stopping
self.stopping_count = 0
self.stopping_threshold = early_stopping_threshold
def train(
self,
train_dataloader: DataLoader,
n_epoch: int,
val_dataloader: Optional[DataLoader] = None,
) -> Tuple[float, float]:
"""Train model.
Args:
train_dataloader: data loader module which is a iterator that returns (data, labels)
n_epoch: number of total epochs for training
val_dataloader: dataloader for validation
Returns:
loss and accuracy
"""
best_test_acc = -1.0
best_test_f1 = -1.0
num_classes = _get_len_label_from_dataset(train_dataloader.dataset)
label_list = [i for i in range(num_classes)]
for epoch in range(n_epoch):
running_loss, correct, total = 0.0, 0, 0
preds, gt = [], []
pbar = tqdm(enumerate(train_dataloader), total=len(train_dataloader), ncols=150)
self.model.train()
for batch, (data, labels) in pbar:
data, labels = data.to(self.device), labels.to(self.device)
if self.scaler:
with torch.cuda.amp.autocast():
outputs = self.model(data)
else:
outputs = self.model(data)
outputs = torch.squeeze(outputs)
loss = self.criterion(outputs, labels)
self.optimizer.zero_grad()
if self.scaler:
self.scaler.scale(loss).backward()
self.scaler.step(self.optimizer)
self.scaler.update()
else:
loss.backward()
self.optimizer.step()
self.scheduler.step()
_, pred = torch.max(outputs, 1)
total += labels.size(0)
correct += (pred == labels).sum().item()
preds += pred.to("cpu").tolist()
gt += labels.to("cpu").tolist()
running_loss += loss.item()
pbar.update()
pbar.set_description(
f"Train: [{epoch + 1:03d}] "
f"Loss: {(running_loss / (batch + 1)):.3f}, "
f"Acc: {(correct / total) * 100:.2f}% "
f"F1(macro): {f1_score(y_true=gt, y_pred=preds, labels=label_list, average='macro', zero_division=0):.2f}, "
f"lr: {self.optimizer.param_groups[0]['lr']:.7f}"
)
pbar.close()
_, test_f1, test_acc, preds, gt = self.test(
model=self.model, test_dataloader=val_dataloader
)
if epoch == 9 and best_test_f1 < 0.20:
return best_test_acc, best_test_f1
elif epoch == 25 and best_test_f1 < 0.25:
return best_test_acc, best_test_f1
elif epoch == 49 and best_test_f1 < 0.45:
return best_test_acc, best_test_f1
if best_test_f1 > test_f1:
if self.early_stopping:
self.stopping_count += 1
if self.stopping_count == self.stopping_threshold:
print(f"Early Stopping !, epoch: {epoch}")
return best_test_acc, best_test_f1
continue
best_test_acc = test_acc
best_test_f1 = test_f1
self.stopping_count = 0
print(f"Model saved. Current best test f1: {best_test_f1:.3f}")
if best_test_f1 > 0.5:
save_model(
model=self.model,
path=os.path.join(self.log_dir, "best.pt"),
data=data,
device=self.device,
)
save_classification_report(path=self.log_dir, preds=preds, gt=gt)
return best_test_acc, best_test_f1
@torch.no_grad()
def test(
self, model: nn.Module, test_dataloader: DataLoader
) -> Tuple[float, float, float]:
"""Test model.
Args:
test_dataloader: test data loader module which is a iterator that returns (data, labels)
Returns:
loss, f1, accuracy
"""
n_batch = _get_n_batch_from_dataloader(test_dataloader)
running_loss = 0.0
preds = []
gt = []
correct = 0
total = 0
num_classes = _get_len_label_from_dataset(test_dataloader.dataset)
label_list = [i for i in range(num_classes)]
pbar = tqdm(enumerate(test_dataloader), total=len(test_dataloader), ncols=150)
model.to(self.device)
model.eval()
for batch, (data, labels) in pbar:
data, labels = data.to(self.device), labels.to(self.device)
if self.scaler:
with torch.cuda.amp.autocast():
outputs = model(data)
else:
outputs = model(data)
outputs = torch.squeeze(outputs)
running_loss += self.criterion(outputs, labels).item()
_, pred = torch.max(outputs, 1)
total += labels.size(0)
correct += (pred == labels).sum().item()
preds += pred.to("cpu").tolist()
gt += labels.to("cpu").tolist()
pbar.update()
pbar.set_description(
f" Val: {'':5} Loss: {(running_loss / (batch + 1)):.3f}, "
f"Acc: {(correct / total) * 100:.2f}% "
f"F1(macro): {f1_score(y_true=gt, y_pred=preds, labels=label_list, average='macro', zero_division=0):.2f}"
)
loss = running_loss / len(test_dataloader)
accuracy = correct / total
f1 = f1_score(
y_true=gt, y_pred=preds, labels=label_list, average="macro", zero_division=0
)
return loss, f1, accuracy, preds, gt
def count_model_params(
model: torch.nn.Module,
) -> int:
"""Count model's parameters"""
return sum(p.numel() for p in model.parameters() if p.requires_grad)
|
the-stack_106_21146
|
# %%
# read data for tests
import pandas as pd
df = pd.read_csv('/Users/lukasgehrke/Documents/temp/chatham/LG_data_crdPhase1/df_scenario1_random_sample.csv')
# df = df.sample(100000) # select random rows for faster debugging
# df.to_csv('/Users/lukasgehrke/Documents/temp/chatham/LG_data_crdPhase1/df_scenario1_random_sample.csv', index=False)
data = df[['X', 'Y']]
design = df[['pID' ,'Activity', 'Workload', 'Intensity', 'GTLX']]
design.head()
# %%
import pandas as pd
import numpy as np
participants = 20
size = 100
X = []
Y = []
pID = []
some_cat_between_factor = []
for p in range(participants):
if p < participants/2:
level = ["A"]
else:
level = ["B"]
X = X+np.random.random(size).tolist()
Y = Y+np.random.random(size).tolist()
pID = pID+([p]*size)
some_cat_between_factor = some_cat_between_factor+(level*size)
data = {"X":X, "Y": Y, "pID":pID, "some_cat_between_factor": some_cat_between_factor}
df = pd.DataFrame.from_dict(data)
this_df = pd.DataFrame.from_dict(d)
# %%
import pandas as pd
import numpy as np
size = 100
d = {'X': np.random.random(size), 'Y': np.random.random(size), 'pID':[4]*size, 'some_cat_between_factor': ["A"]*size}
this_df = pd.DataFrame.from_dict(d)
# %%
|
the-stack_106_21147
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A connection to a hypervisor through libvirt.
Supports KVM, LXC, QEMU, UML, and XEN.
**Related Flags**
:libvirt_type: Libvirt domain type. Can be kvm, qemu, uml, xen
(default: kvm).
:libvirt_uri: Override for the default libvirt URI (depends on libvirt_type).
:libvirt_disk_prefix: Override the default disk prefix for the devices
attached to a server.
:rescue_image_id: Rescue ami image (None = original image).
:rescue_kernel_id: Rescue aki image (None = original image).
:rescue_ramdisk_id: Rescue ari image (None = original image).
:injected_network_template: Template file for injected network
:allow_same_net_traffic: Whether to allow in project network traffic
"""
import errno
import eventlet
import functools
import glob
import os
import shutil
import socket
import sys
import tempfile
import threading
import time
import uuid
from eventlet import greenio
from eventlet import greenthread
from eventlet import patcher
from eventlet import tpool
from eventlet import util as eventlet_util
from lxml import etree
from oslo.config import cfg
from nova.api.metadata import base as instance_metadata
from nova import block_device
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_mode
from nova import context as nova_context
from nova import exception
from nova.image import glance
from nova import notifier
from nova.objects import instance as instance_obj
from nova.openstack.common import excutils
from nova.openstack.common import fileutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.openstack.common import processutils
from nova.openstack.common import xmlutils
from nova.pci import pci_manager
from nova.pci import pci_utils
from nova.pci import pci_whitelist
from nova import utils
from nova import version
from nova.virt import configdrive
from nova.virt.disk import api as disk
from nova.virt import driver
from nova.virt import event as virtevent
from nova.virt import firewall
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import firewall as libvirt_firewall
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import imagecache
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt import netutils
from nova import volume
from nova.volume import encryptors
native_threading = patcher.original("threading")
native_Queue = patcher.original("Queue")
libvirt = None
LOG = logging.getLogger(__name__)
libvirt_opts = [
cfg.StrOpt('rescue_image_id',
help='Rescue ami image'),
cfg.StrOpt('rescue_kernel_id',
help='Rescue aki image'),
cfg.StrOpt('rescue_ramdisk_id',
help='Rescue ari image'),
cfg.StrOpt('libvirt_type',
default='kvm',
help='Libvirt domain type (valid options are: '
'kvm, lxc, qemu, uml, xen)'),
cfg.StrOpt('libvirt_uri',
default='',
help='Override the default libvirt URI '
'(which is dependent on libvirt_type)'),
cfg.BoolOpt('libvirt_inject_password',
default=False,
help='Inject the admin password at boot time, '
'without an agent.'),
cfg.BoolOpt('libvirt_inject_key',
default=True,
help='Inject the ssh public key at boot time'),
cfg.IntOpt('libvirt_inject_partition',
default=1,
help='The partition to inject to : '
'-2 => disable, -1 => inspect (libguestfs only), '
'0 => not partitioned, >0 => partition number'),
cfg.BoolOpt('use_usb_tablet',
default=True,
help='Sync virtual and real mouse cursors in Windows VMs'),
cfg.StrOpt('live_migration_uri',
default="qemu+tcp://%s/system",
help='Migration target URI '
'(any included "%s" is replaced with '
'the migration target hostname)'),
cfg.StrOpt('live_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER',
help='Migration flags to be set for live migration'),
cfg.StrOpt('block_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_NON_SHARED_INC',
help='Migration flags to be set for block migration'),
cfg.IntOpt('live_migration_bandwidth',
default=0,
help='Maximum bandwidth to be used during migration, in Mbps'),
cfg.StrOpt('snapshot_image_format',
help='Snapshot image format (valid options are : '
'raw, qcow2, vmdk, vdi). '
'Defaults to same as source image'),
cfg.StrOpt('libvirt_vif_driver',
default='nova.virt.libvirt.vif.LibvirtGenericVIFDriver',
help='The libvirt VIF driver to configure the VIFs.'),
cfg.ListOpt('libvirt_volume_drivers',
default=[
'iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver',
'iser=nova.virt.libvirt.volume.LibvirtISERVolumeDriver',
'local=nova.virt.libvirt.volume.LibvirtVolumeDriver',
'fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver',
'rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
'sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
'nfs=nova.virt.libvirt.volume.LibvirtNFSVolumeDriver',
'aoe=nova.virt.libvirt.volume.LibvirtAOEVolumeDriver',
'glusterfs='
'nova.virt.libvirt.volume.LibvirtGlusterfsVolumeDriver',
'fibre_channel=nova.virt.libvirt.volume.'
'LibvirtFibreChannelVolumeDriver',
'scality='
'nova.virt.libvirt.volume.LibvirtScalityVolumeDriver',
],
help='Libvirt handlers for remote volumes.'),
cfg.StrOpt('libvirt_disk_prefix',
help='Override the default disk prefix for the devices attached'
' to a server, which is dependent on libvirt_type. '
'(valid options are: sd, xvd, uvd, vd)'),
cfg.IntOpt('libvirt_wait_soft_reboot_seconds',
default=120,
help='Number of seconds to wait for instance to shut down after'
' soft reboot request is made. We fall back to hard reboot'
' if instance does not shutdown within this window.'),
cfg.BoolOpt('libvirt_nonblocking',
default=True,
help='Use a separated OS thread pool to realize non-blocking'
' libvirt calls'),
cfg.StrOpt('libvirt_cpu_mode',
help='Set to "host-model" to clone the host CPU feature flags; '
'to "host-passthrough" to use the host CPU model exactly; '
'to "custom" to use a named CPU model; '
'to "none" to not set any CPU model. '
'If libvirt_type="kvm|qemu", it will default to '
'"host-model", otherwise it will default to "none"'),
cfg.StrOpt('libvirt_cpu_model',
help='Set to a named libvirt CPU model (see names listed '
'in /usr/share/libvirt/cpu_map.xml). Only has effect if '
'libvirt_cpu_mode="custom" and libvirt_type="kvm|qemu"'),
cfg.StrOpt('libvirt_snapshots_directory',
default='$instances_path/snapshots',
help='Location where libvirt driver will store snapshots '
'before uploading them to image service'),
cfg.StrOpt('xen_hvmloader_path',
default='/usr/lib/xen/boot/hvmloader',
help='Location where the Xen hvmloader is kept'),
cfg.ListOpt('disk_cachemodes',
default=[],
help='Specific cachemodes to use for different disk types '
'e.g: ["file=directsync","block=none"]'),
cfg.StrOpt('vcpu_pin_set',
help='Which pcpus can be used by vcpus of instance '
'e.g: "4-12,^8,15"'),
]
CONF = cfg.CONF
CONF.register_opts(libvirt_opts)
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
CONF.import_opt('use_cow_images', 'nova.virt.driver')
CONF.import_opt('live_migration_retry_count', 'nova.compute.manager')
CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc')
CONF.import_opt('server_proxyclient_address', 'nova.spice', group='spice')
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
libvirt_firewall.__name__,
libvirt_firewall.IptablesFirewallDriver.__name__)
MAX_CONSOLE_BYTES = 102400
def patch_tpool_proxy():
"""eventlet.tpool.Proxy doesn't work with old-style class in __str__()
or __repr__() calls. See bug #962840 for details.
We perform a monkey patch to replace those two instance methods.
"""
def str_method(self):
return str(self._obj)
def repr_method(self):
return repr(self._obj)
tpool.Proxy.__str__ = str_method
tpool.Proxy.__repr__ = repr_method
patch_tpool_proxy()
VIR_DOMAIN_NOSTATE = 0
VIR_DOMAIN_RUNNING = 1
VIR_DOMAIN_BLOCKED = 2
VIR_DOMAIN_PAUSED = 3
VIR_DOMAIN_SHUTDOWN = 4
VIR_DOMAIN_SHUTOFF = 5
VIR_DOMAIN_CRASHED = 6
VIR_DOMAIN_PMSUSPENDED = 7
LIBVIRT_POWER_STATE = {
VIR_DOMAIN_NOSTATE: power_state.NOSTATE,
VIR_DOMAIN_RUNNING: power_state.RUNNING,
# NOTE(maoy): The DOMAIN_BLOCKED state is only valid in Xen.
# It means that the VM is running and the vCPU is idle. So,
# we map it to RUNNING
VIR_DOMAIN_BLOCKED: power_state.RUNNING,
VIR_DOMAIN_PAUSED: power_state.PAUSED,
# NOTE(maoy): The libvirt API doc says that DOMAIN_SHUTDOWN
# means the domain is being shut down. So technically the domain
# is still running. SHUTOFF is the real powered off state.
# But we will map both to SHUTDOWN anyway.
# http://libvirt.org/html/libvirt-libvirt.html
VIR_DOMAIN_SHUTDOWN: power_state.SHUTDOWN,
VIR_DOMAIN_SHUTOFF: power_state.SHUTDOWN,
VIR_DOMAIN_CRASHED: power_state.CRASHED,
VIR_DOMAIN_PMSUSPENDED: power_state.SUSPENDED,
}
MIN_LIBVIRT_VERSION = (0, 9, 6)
# When the above version matches/exceeds this version
# delete it & corresponding code using it
MIN_LIBVIRT_HOST_CPU_VERSION = (0, 9, 10)
MIN_LIBVIRT_DEVICE_CALLBACK_VERSION = (1, 1, 1)
# Live snapshot requirements
REQ_HYPERVISOR_LIVESNAPSHOT = "QEMU"
MIN_LIBVIRT_LIVESNAPSHOT_VERSION = (1, 0, 0)
MIN_QEMU_LIVESNAPSHOT_VERSION = (1, 3, 0)
# block size tuning requirements
MIN_LIBVIRT_BLOCKIO_VERSION = (0, 10, 2)
# BlockJobInfo management requirement
MIN_LIBVIRT_BLOCKJOBINFO_VERSION = (1, 1, 1)
def libvirt_error_handler(context, err):
# Just ignore instead of default outputting to stderr.
pass
class LibvirtDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": True,
"supports_recreate": True,
}
def __init__(self, virtapi, read_only=False):
super(LibvirtDriver, self).__init__(virtapi)
global libvirt
if libvirt is None:
libvirt = __import__('libvirt')
self._host_state = None
self._initiator = None
self._fc_wwnns = None
self._fc_wwpns = None
self._wrapped_conn = None
self._wrapped_conn_lock = threading.Lock()
self._caps = None
self._vcpu_total = 0
self.read_only = read_only
self.firewall_driver = firewall.load_driver(
DEFAULT_FIREWALL_DRIVER,
self.virtapi,
get_connection=self._get_connection)
vif_class = importutils.import_class(CONF.libvirt_vif_driver)
self.vif_driver = vif_class(self._get_connection)
self.volume_drivers = driver.driver_dict_from_config(
CONF.libvirt_volume_drivers, self)
self.dev_filter = pci_whitelist.get_pci_devices_filter()
self._event_queue = None
self._disk_cachemode = None
self.image_cache_manager = imagecache.ImageCacheManager()
self.image_backend = imagebackend.Backend(CONF.use_cow_images)
self.disk_cachemodes = {}
self.valid_cachemodes = ["default",
"none",
"writethrough",
"writeback",
"directsync",
"unsafe",
]
for mode_str in CONF.disk_cachemodes:
disk_type, sep, cache_mode = mode_str.partition('=')
if cache_mode not in self.valid_cachemodes:
LOG.warn(_('Invalid cachemode %(cache_mode)s specified '
'for disk type %(disk_type)s.'),
{'cache_mode': cache_mode, 'disk_type': disk_type})
continue
self.disk_cachemodes[disk_type] = cache_mode
self._volume_api = volume.API()
@property
def disk_cachemode(self):
if self._disk_cachemode is None:
# We prefer 'none' for consistent performance, host crash
# safety & migration correctness by avoiding host page cache.
# Some filesystems (eg GlusterFS via FUSE) don't support
# O_DIRECT though. For those we fallback to 'writethrough'
# which gives host crash safety, and is safe for migration
# provided the filesystem is cache coherant (cluster filesystems
# typically are, but things like NFS are not).
self._disk_cachemode = "none"
if not self._supports_direct_io(CONF.instances_path):
self._disk_cachemode = "writethrough"
return self._disk_cachemode
@property
def host_state(self):
if not self._host_state:
self._host_state = HostState(self)
return self._host_state
def set_cache_mode(self, conf):
"""Set cache mode on LibvirtConfigGuestDisk object."""
try:
source_type = conf.source_type
driver_cache = conf.driver_cache
except AttributeError:
return
cache_mode = self.disk_cachemodes.get(source_type,
driver_cache)
conf.driver_cache = cache_mode
@staticmethod
def _has_min_version(conn, lv_ver=None, hv_ver=None, hv_type=None):
try:
if lv_ver is not None:
libvirt_version = conn.getLibVersion()
if libvirt_version < utils.convert_version_to_int(lv_ver):
return False
if hv_ver is not None:
hypervisor_version = conn.getVersion()
if hypervisor_version < utils.convert_version_to_int(hv_ver):
return False
if hv_type is not None:
hypervisor_type = conn.getType()
if hypervisor_type != hv_type:
return False
return True
except Exception:
return False
def has_min_version(self, lv_ver=None, hv_ver=None, hv_type=None):
return self._has_min_version(self._conn, lv_ver, hv_ver, hv_type)
def _native_thread(self):
"""Receives async events coming in from libvirtd.
This is a native thread which runs the default
libvirt event loop implementation. This processes
any incoming async events from libvirtd and queues
them for later dispatch. This thread is only
permitted to use libvirt python APIs, and the
driver.queue_event method. In particular any use
of logging is forbidden, since it will confuse
eventlet's greenthread integration
"""
while True:
libvirt.virEventRunDefaultImpl()
def _dispatch_thread(self):
"""Dispatches async events coming in from libvirtd.
This is a green thread which waits for events to
arrive from the libvirt event loop thread. This
then dispatches the events to the compute manager.
"""
while True:
self._dispatch_events()
@staticmethod
def _event_lifecycle_callback(conn, dom, event, detail, opaque):
"""Receives lifecycle events from libvirt.
NB: this method is executing in a native thread, not
an eventlet coroutine. It can only invoke other libvirt
APIs, or use self.queue_event(). Any use of logging APIs
in particular is forbidden.
"""
self = opaque
uuid = dom.UUIDString()
transition = None
if event == libvirt.VIR_DOMAIN_EVENT_STOPPED:
transition = virtevent.EVENT_LIFECYCLE_STOPPED
elif event == libvirt.VIR_DOMAIN_EVENT_STARTED:
transition = virtevent.EVENT_LIFECYCLE_STARTED
elif event == libvirt.VIR_DOMAIN_EVENT_SUSPENDED:
transition = virtevent.EVENT_LIFECYCLE_PAUSED
elif event == libvirt.VIR_DOMAIN_EVENT_RESUMED:
transition = virtevent.EVENT_LIFECYCLE_RESUMED
if transition is not None:
self._queue_event(virtevent.LifecycleEvent(uuid, transition))
def _queue_event(self, event):
"""Puts an event on the queue for dispatch.
This method is called by the native event thread to
put events on the queue for later dispatch by the
green thread.
"""
if self._event_queue is None:
LOG.debug(_("Event loop thread is not active, "
"discarding event %s") % event)
return
# Queue the event...
self._event_queue.put(event)
# ...then wakeup the green thread to dispatch it
c = ' '.encode()
self._event_notify_send.write(c)
self._event_notify_send.flush()
def _dispatch_events(self):
"""Wait for & dispatch events from native thread
Blocks until native thread indicates some events
are ready. Then dispatches all queued events.
"""
# Wait to be notified that there are some
# events pending
try:
_c = self._event_notify_recv.read(1)
assert _c
except ValueError:
return # will be raised when pipe is closed
# Process as many events as possible without
# blocking
while not self._event_queue.empty():
try:
event = self._event_queue.get(block=False)
self.emit_event(event)
except native_Queue.Empty:
pass
def _init_events_pipe(self):
"""Create a self-pipe for the native thread to synchronize on.
This code is taken from the eventlet tpool module, under terms
of the Apache License v2.0.
"""
self._event_queue = native_Queue.Queue()
try:
rpipe, wpipe = os.pipe()
self._event_notify_send = greenio.GreenPipe(wpipe, 'wb', 0)
self._event_notify_recv = greenio.GreenPipe(rpipe, 'rb', 0)
except (ImportError, NotImplementedError):
# This is Windows compatibility -- use a socket instead
# of a pipe because pipes don't really exist on Windows.
sock = eventlet_util.__original_socket__(socket.AF_INET,
socket.SOCK_STREAM)
sock.bind(('localhost', 0))
sock.listen(50)
csock = eventlet_util.__original_socket__(socket.AF_INET,
socket.SOCK_STREAM)
csock.connect(('localhost', sock.getsockname()[1]))
nsock, addr = sock.accept()
self._event_notify_send = nsock.makefile('wb', 0)
gsock = greenio.GreenSocket(csock)
self._event_notify_recv = gsock.makefile('rb', 0)
def _init_events(self):
"""Initializes the libvirt events subsystem.
This requires running a native thread to provide the
libvirt event loop integration. This forwards events
to a green thread which does the actual dispatching.
"""
self._init_events_pipe()
LOG.debug(_("Starting native event thread"))
event_thread = native_threading.Thread(target=self._native_thread)
event_thread.setDaemon(True)
event_thread.start()
LOG.debug(_("Starting green dispatch thread"))
eventlet.spawn(self._dispatch_thread)
def init_host(self, host):
libvirt.registerErrorHandler(libvirt_error_handler, None)
libvirt.virEventRegisterDefaultImpl()
if not self.has_min_version(MIN_LIBVIRT_VERSION):
major = MIN_LIBVIRT_VERSION[0]
minor = MIN_LIBVIRT_VERSION[1]
micro = MIN_LIBVIRT_VERSION[2]
LOG.error(_('Nova requires libvirt version '
'%(major)i.%(minor)i.%(micro)i or greater.'),
{'major': major, 'minor': minor, 'micro': micro})
self._init_events()
def _get_new_connection(self):
# call with _wrapped_conn_lock held
LOG.debug(_('Connecting to libvirt: %s'), self.uri())
wrapped_conn = self._connect(self.uri(), self.read_only)
self._wrapped_conn = wrapped_conn
try:
LOG.debug(_("Registering for lifecycle events %s"), self)
wrapped_conn.domainEventRegisterAny(
None,
libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
self._event_lifecycle_callback,
self)
except Exception as e:
LOG.warn(_("URI %(uri)s does not support events: %(error)s"),
{'uri': self.uri(), 'error': e})
try:
LOG.debug(_("Registering for connection events: %s") %
str(self))
wrapped_conn.registerCloseCallback(self._close_callback, None)
except (TypeError, AttributeError) as e:
# NOTE: The registerCloseCallback of python-libvirt 1.0.1+
# is defined with 3 arguments, and the above registerClose-
# Callback succeeds. However, the one of python-libvirt 1.0.0
# is defined with 4 arguments and TypeError happens here.
# Then python-libvirt 0.9 does not define a method register-
# CloseCallback.
LOG.debug(_("The version of python-libvirt does not support "
"registerCloseCallback or is too old: %s"), e)
except libvirt.libvirtError as e:
LOG.warn(_("URI %(uri)s does not support connection"
" events: %(error)s"),
{'uri': self.uri(), 'error': e})
return wrapped_conn
def _get_connection(self):
# multiple concurrent connections are protected by _wrapped_conn_lock
with self._wrapped_conn_lock:
wrapped_conn = self._wrapped_conn
if not wrapped_conn or not self._test_connection(wrapped_conn):
wrapped_conn = self._get_new_connection()
return wrapped_conn
_conn = property(_get_connection)
def _close_callback(self, conn, reason, opaque):
with self._wrapped_conn_lock:
if conn == self._wrapped_conn:
LOG.info(_("Connection to libvirt lost: %s") % reason)
self._wrapped_conn = None
@staticmethod
def _test_connection(conn):
try:
conn.getLibVersion()
return True
except libvirt.libvirtError as e:
if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR,
libvirt.VIR_ERR_INTERNAL_ERROR) and
e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
libvirt.VIR_FROM_RPC)):
LOG.debug(_('Connection to libvirt broke'))
return False
raise
@staticmethod
def uri():
if CONF.libvirt_type == 'uml':
uri = CONF.libvirt_uri or 'uml:///system'
elif CONF.libvirt_type == 'xen':
uri = CONF.libvirt_uri or 'xen:///'
elif CONF.libvirt_type == 'lxc':
uri = CONF.libvirt_uri or 'lxc:///'
else:
uri = CONF.libvirt_uri or 'qemu:///system'
return uri
@staticmethod
def _connect(uri, read_only):
def _connect_auth_cb(creds, opaque):
if len(creds) == 0:
return 0
LOG.warning(
_("Can not handle authentication request for %d credentials")
% len(creds))
raise exception.NovaException(
_("Can not handle authentication request for %d credentials")
% len(creds))
auth = [[libvirt.VIR_CRED_AUTHNAME,
libvirt.VIR_CRED_ECHOPROMPT,
libvirt.VIR_CRED_REALM,
libvirt.VIR_CRED_PASSPHRASE,
libvirt.VIR_CRED_NOECHOPROMPT,
libvirt.VIR_CRED_EXTERNAL],
_connect_auth_cb,
None]
try:
flags = 0
if read_only:
flags = libvirt.VIR_CONNECT_RO
if not CONF.libvirt_nonblocking:
return libvirt.openAuth(uri, auth, flags)
else:
# tpool.proxy_call creates a native thread. Due to limitations
# with eventlet locking we cannot use the logging API inside
# the called function.
return tpool.proxy_call(
(libvirt.virDomain, libvirt.virConnect),
libvirt.openAuth, uri, auth, flags)
except libvirt.libvirtError as ex:
LOG.exception(_("Connection to libvirt failed: %s"), ex)
payload = dict(ip=LibvirtDriver.get_host_ip_addr(),
method='_connect',
reason=ex)
notifier.get_notifier('compute').error(
nova_context.get_admin_context(),
'compute.libvirt.error', payload)
def get_num_instances(self):
"""Efficient override of base instance_exists method."""
return self._conn.numOfDomains()
def instance_exists(self, instance_name):
"""Efficient override of base instance_exists method."""
try:
self._lookup_by_name(instance_name)
return True
except exception.NovaException:
return False
# TODO(Shrews): Remove when libvirt Bugzilla bug # 836647 is fixed.
def list_instance_ids(self):
if self._conn.numOfDomains() == 0:
return []
return self._conn.listDomainsID()
def list_instances(self):
names = []
for domain_id in self.list_instance_ids():
try:
# We skip domains with ID 0 (hypervisors).
if domain_id != 0:
domain = self._lookup_by_id(domain_id)
names.append(domain.name())
except exception.InstanceNotFound:
# Ignore deleted instance while listing
continue
# extend instance list to contain also defined domains
names.extend([vm for vm in self._conn.listDefinedDomains()
if vm not in names])
return names
def list_instance_uuids(self):
uuids = set()
for domain_id in self.list_instance_ids():
try:
# We skip domains with ID 0 (hypervisors).
if domain_id != 0:
domain = self._lookup_by_id(domain_id)
uuids.add(domain.UUIDString())
except exception.InstanceNotFound:
# Ignore deleted instance while listing
continue
# extend instance list to contain also defined domains
for domain_name in self._conn.listDefinedDomains():
try:
uuids.add(self._lookup_by_name(domain_name).UUIDString())
except exception.InstanceNotFound:
# Ignore deleted instance while listing
continue
return list(uuids)
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
for vif in network_info:
self.vif_driver.plug(instance, vif)
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
for vif in network_info:
self.vif_driver.unplug(instance, vif)
def _destroy(self, instance):
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.InstanceNotFound:
virt_dom = None
# If the instance is already terminated, we're still happy
# Otherwise, destroy it
old_domid = -1
if virt_dom is not None:
try:
old_domid = virt_dom.ID()
virt_dom.destroy()
except libvirt.libvirtError as e:
is_okay = False
errcode = e.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_INVALID:
# If the instance is already shut off, we get this:
# Code=55 Error=Requested operation is not valid:
# domain is not running
(state, _max_mem, _mem, _cpus, _t) = virt_dom.info()
state = LIBVIRT_POWER_STATE[state]
if state == power_state.SHUTDOWN:
is_okay = True
elif errcode == libvirt.VIR_ERR_OPERATION_TIMEOUT:
LOG.warn(_("Cannot destroy instance, operation time out"),
instance=instance)
reason = _("operation time out")
raise exception.InstancePowerOffFailure(reason=reason)
if not is_okay:
with excutils.save_and_reraise_exception():
LOG.error(_('Error from libvirt during destroy. '
'Code=%(errcode)s Error=%(e)s'),
{'errcode': errcode, 'e': e},
instance=instance)
def _wait_for_destroy(expected_domid):
"""Called at an interval until the VM is gone."""
# NOTE(vish): If the instance disappears during the destroy
# we ignore it so the cleanup can still be
# attempted because we would prefer destroy to
# never fail.
try:
dom_info = self.get_info(instance)
state = dom_info['state']
new_domid = dom_info['id']
except exception.InstanceNotFound:
LOG.error(_("During wait destroy, instance disappeared."),
instance=instance)
raise loopingcall.LoopingCallDone()
if state == power_state.SHUTDOWN:
LOG.info(_("Instance destroyed successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
# NOTE(wangpan): If the instance was booted again after destroy,
# this may be a endless loop, so check the id of
# domain here, if it changed and the instance is
# still running, we should destroy it again.
# see https://bugs.launchpad.net/nova/+bug/1111213 for more details
if new_domid != expected_domid:
LOG.info(_("Instance may be started again."),
instance=instance)
kwargs['is_running'] = True
raise loopingcall.LoopingCallDone()
kwargs = {'is_running': False}
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_destroy,
old_domid)
timer.start(interval=0.5).wait()
if kwargs['is_running']:
LOG.info(_("Going to destroy instance again."), instance=instance)
self._destroy(instance)
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True, context=None):
self._destroy(instance)
self._cleanup(instance, network_info, block_device_info,
destroy_disks, context=context)
def _undefine_domain(self, instance):
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.InstanceNotFound:
virt_dom = None
if virt_dom:
try:
try:
virt_dom.undefineFlags(
libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE)
except libvirt.libvirtError:
LOG.debug(_("Error from libvirt during undefineFlags."
" Retrying with undefine"), instance=instance)
virt_dom.undefine()
except AttributeError:
# NOTE(vish): Older versions of libvirt don't support
# undefine flags, so attempt to do the
# right thing.
try:
if virt_dom.hasManagedSaveImage(0):
virt_dom.managedSaveRemove(0)
except AttributeError:
pass
virt_dom.undefine()
except libvirt.libvirtError as e:
with excutils.save_and_reraise_exception():
errcode = e.get_error_code()
LOG.error(_('Error from libvirt during undefine. '
'Code=%(errcode)s Error=%(e)s') %
{'errcode': errcode, 'e': e}, instance=instance)
def _cleanup(self, instance, network_info, block_device_info,
destroy_disks, context=None):
self._undefine_domain(instance)
self.unplug_vifs(instance, network_info)
retry = True
while retry:
try:
self.firewall_driver.unfilter_instance(instance,
network_info=network_info)
except libvirt.libvirtError as e:
try:
state = self.get_info(instance)['state']
except exception.InstanceNotFound:
state = power_state.SHUTDOWN
if state != power_state.SHUTDOWN:
LOG.warn(_("Instance may be still running, destroy "
"it again."), instance=instance)
self._destroy(instance)
else:
retry = False
errcode = e.get_error_code()
LOG.error(_('Error from libvirt during unfilter. '
'Code=%(errcode)s Error=%(e)s') %
{'errcode': errcode, 'e': e},
instance=instance)
reason = "Error unfiltering instance."
raise exception.InstanceTerminationFailure(reason=reason)
except Exception:
retry = False
raise
else:
retry = False
# FIXME(wangpan): if the instance is booted again here, such as the
# the soft reboot operation boot it here, it will
# become "running deleted", should we check and destroy
# it at the end of this method?
# NOTE(vish): we disconnect from volumes regardless
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
if ('data' in connection_info and
'volume_id' in connection_info['data']):
volume_id = connection_info['data']['volume_id']
encryption = encryptors.get_encryption_metadata(
context, self._volume_api, volume_id, connection_info)
if encryption:
# The volume must be detached from the VM before
# disconnecting it from its encryptor. Otherwise, the
# encryptor may report that the volume is still in use.
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.detach_volume(**encryption)
try:
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
except Exception as exc:
with excutils.save_and_reraise_exception() as ctxt:
if destroy_disks:
# Don't block on Volume errors if we're trying to
# delete the instance as we may be patially created
# or deleted
ctxt.reraise = False
LOG.warn(_("Ignoring Volume Error on vol %(vol_id)s "
"during delete %(exc)s"),
{'vol_id': vol.get('volume_id'), 'exc': exc},
instance=instance)
if destroy_disks:
self._delete_instance_files(instance)
self._cleanup_lvm(instance)
#NOTE(haomai): destory volumes if needed
if CONF.libvirt_images_type == 'rbd':
self._cleanup_rbd(instance)
def _cleanup_rbd(self, instance):
pool = CONF.libvirt_images_rbd_pool
volumes = libvirt_utils.list_rbd_volumes(pool)
pattern = instance['name']
def belongs_to_instance(disk):
return disk.startswith(pattern)
volumes = filter(belongs_to_instance, volumes)
if volumes:
libvirt_utils.remove_rbd_volumes(pool, *volumes)
def _cleanup_lvm(self, instance):
"""Delete all LVM disks for given instance object."""
disks = self._lvm_disks(instance)
if disks:
libvirt_utils.remove_logical_volumes(*disks)
def _lvm_disks(self, instance):
"""Returns all LVM disks for given instance object."""
if CONF.libvirt_images_volume_group:
vg = os.path.join('/dev', CONF.libvirt_images_volume_group)
if not os.path.exists(vg):
return []
pattern = '%s_' % instance['name']
def belongs_to_instance(disk):
return disk.startswith(pattern)
def fullpath(name):
return os.path.join(vg, name)
logical_volumes = libvirt_utils.list_logical_volumes(vg)
disk_names = filter(belongs_to_instance, logical_volumes)
disks = map(fullpath, disk_names)
return disks
return []
def get_volume_connector(self, instance):
if not self._initiator:
self._initiator = libvirt_utils.get_iscsi_initiator()
if not self._initiator:
LOG.debug(_('Could not determine iscsi initiator name'),
instance=instance)
if not self._fc_wwnns:
self._fc_wwnns = libvirt_utils.get_fc_wwnns()
if not self._fc_wwnns or len(self._fc_wwnns) == 0:
LOG.debug(_('Could not determine fibre channel '
'world wide node names'),
instance=instance)
if not self._fc_wwpns:
self._fc_wwpns = libvirt_utils.get_fc_wwpns()
if not self._fc_wwpns or len(self._fc_wwpns) == 0:
LOG.debug(_('Could not determine fibre channel '
'world wide port names'),
instance=instance)
connector = {'ip': CONF.my_ip,
'host': CONF.host}
if self._initiator:
connector['initiator'] = self._initiator
if self._fc_wwnns and self._fc_wwpns:
connector["wwnns"] = self._fc_wwnns
connector["wwpns"] = self._fc_wwpns
return connector
def _cleanup_resize(self, instance, network_info):
target = libvirt_utils.get_instance_path(instance) + "_resize"
if os.path.exists(target):
# Deletion can fail over NFS, so retry the deletion as required.
# Set maximum attempt as 5, most test can remove the directory
# for the second time.
utils.execute('rm', '-rf', target, delay_on_retry=True,
attempts=5)
if instance['host'] != CONF.host:
self._undefine_domain(instance)
self.unplug_vifs(instance, network_info)
self.firewall_driver.unfilter_instance(instance, network_info)
def volume_driver_method(self, method_name, connection_info,
*args, **kwargs):
driver_type = connection_info.get('driver_volume_type')
if driver_type not in self.volume_drivers:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
driver = self.volume_drivers[driver_type]
method = getattr(driver, method_name)
return method(connection_info, *args, **kwargs)
def _get_volume_encryptor(self, connection_info, encryption):
encryptor = encryptors.get_volume_encryptor(connection_info,
**encryption)
return encryptor
def attach_volume(self, context, connection_info, instance, mountpoint,
encryption=None):
instance_name = instance['name']
virt_dom = self._lookup_by_name(instance_name)
disk_dev = mountpoint.rpartition("/")[2]
disk_info = {
'dev': disk_dev,
'bus': blockinfo.get_disk_bus_for_disk_dev(CONF.libvirt_type,
disk_dev),
'type': 'disk',
}
# Note(cfb): If the volume has a custom block size, check that
# that we are using QEMU/KVM and libvirt >= 0.10.2. The
# prescence of a block size is considered mandatory by
# cinder so we fail if we can't honor the request.
data = {}
if ('data' in connection_info):
data = connection_info['data']
if ('logical_block_size' in data or 'physical_block_size' in data):
if CONF.libvirt_type != "kvm" and CONF.libvirt_type != "qemu":
msg = _("Volume sets block size, but the current "
"libvirt hypervisor '%s' does not support custom "
"block size") % CONF.libvirt_type
raise exception.InvalidHypervisorType(msg)
if not self.has_min_version(MIN_LIBVIRT_BLOCKIO_VERSION):
ver = ".".join([str(x) for x in MIN_LIBVIRT_BLOCKIO_VERSION])
msg = _("Volume sets block size, but libvirt '%s' or later is "
"required.") % ver
raise exception.Invalid(msg)
conf = self.volume_driver_method('connect_volume',
connection_info,
disk_info)
self.set_cache_mode(conf)
try:
# NOTE(vish): We can always affect config because our
# domains are persistent, but we should only
# affect live if the domain is running.
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.RUNNING:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
# cache device_path in connection_info -- required by encryptors
if 'data' in connection_info:
connection_info['data']['device_path'] = conf.source_path
if encryption:
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.attach_volume(context, **encryption)
virt_dom.attachDeviceFlags(conf.to_xml(), flags)
except Exception as ex:
if isinstance(ex, libvirt.libvirtError):
errcode = ex.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_FAILED:
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
raise exception.DeviceIsBusy(device=disk_dev)
with excutils.save_and_reraise_exception():
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
def _swap_volume(self, domain, disk_path, new_path):
"""Swap existing disk with a new block device."""
# Save a copy of the domain's running XML file
xml = domain.XMLDesc(0)
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended.
try:
domain.blockJobAbort(disk_path, 0)
except Exception:
pass
try:
# NOTE (rmk): blockRebase cannot be executed on persistent
# domains, so we need to temporarily undefine it.
# If any part of this block fails, the domain is
# re-defined regardless.
if domain.isPersistent():
domain.undefine()
# Start copy with VIR_DOMAIN_REBASE_REUSE_EXT flag to
# allow writing to existing external volume file
domain.blockRebase(disk_path, new_path, 0,
libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT)
while self._wait_for_block_job(domain, disk_path):
time.sleep(0.5)
domain.blockJobAbort(disk_path,
libvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT)
finally:
self._conn.defineXML(xml)
def swap_volume(self, old_connection_info,
new_connection_info, instance, mountpoint):
instance_name = instance['name']
virt_dom = self._lookup_by_name(instance_name)
disk_dev = mountpoint.rpartition("/")[2]
xml = self._get_disk_xml(virt_dom.XMLDesc(0), disk_dev)
if not xml:
raise exception.DiskNotFound(location=disk_dev)
disk_info = {
'dev': disk_dev,
'bus': blockinfo.get_disk_bus_for_disk_dev(CONF.libvirt_type,
disk_dev),
'type': 'disk',
}
conf = self.volume_driver_method('connect_volume',
new_connection_info,
disk_info)
if not conf.source_path:
self.volume_driver_method('disconnect_volume',
new_connection_info,
disk_dev)
raise NotImplementedError(_("Swap only supports host devices"))
self._swap_volume(virt_dom, disk_dev, conf.source_path)
self.volume_driver_method('disconnect_volume',
old_connection_info,
disk_dev)
@staticmethod
def _get_disk_xml(xml, device):
"""Returns the xml for the disk mounted at device."""
try:
doc = etree.fromstring(xml)
except Exception:
return None
ret = doc.findall('./devices/disk')
for node in ret:
for child in node.getchildren():
if child.tag == 'target':
if child.get('dev') == device:
return etree.tostring(node)
def _get_existing_domain_xml(self, instance, network_info,
block_device_info=None):
try:
virt_dom = self._lookup_by_name(instance['name'])
xml = virt_dom.XMLDesc(0)
except exception.InstanceNotFound:
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info)
xml = self.to_xml(nova_context.get_admin_context(),
instance, network_info, disk_info,
block_device_info=block_device_info)
return xml
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
instance_name = instance['name']
disk_dev = mountpoint.rpartition("/")[2]
try:
virt_dom = self._lookup_by_name(instance_name)
xml = self._get_disk_xml(virt_dom.XMLDesc(0), disk_dev)
if not xml:
raise exception.DiskNotFound(location=disk_dev)
else:
# NOTE(vish): We can always affect config because our
# domains are persistent, but we should only
# affect live if the domain is running.
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.RUNNING:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.detachDeviceFlags(xml, flags)
if encryption:
# The volume must be detached from the VM before
# disconnecting it from its encryptor. Otherwise, the
# encryptor may report that the volume is still in use.
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.detach_volume(**encryption)
except libvirt.libvirtError as ex:
# NOTE(vish): This is called to cleanup volumes after live
# migration, so we should still disconnect even if
# the instance doesn't exist here anymore.
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
# NOTE(vish):
LOG.warn(_("During detach_volume, instance disappeared."))
else:
raise
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
def attach_interface(self, instance, image_meta, vif):
virt_dom = self._lookup_by_name(instance['name'])
inst_type = self.virtapi.instance_type_get(
nova_context.get_admin_context(read_deleted='yes'),
instance['instance_type_id'])
self.vif_driver.plug(instance, vif)
self.firewall_driver.setup_basic_filtering(instance, [vif])
cfg = self.vif_driver.get_config(instance, vif, image_meta,
inst_type)
try:
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.RUNNING:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.attachDeviceFlags(cfg.to_xml(), flags)
except libvirt.libvirtError:
LOG.error(_('attaching network adapter failed.'),
instance=instance)
self.vif_driver.unplug(instance, vif)
raise exception.InterfaceAttachFailed(instance)
def detach_interface(self, instance, vif):
virt_dom = self._lookup_by_name(instance['name'])
inst_type = self.virtapi.instance_type_get(
nova_context.get_admin_context(read_deleted='yes'),
instance['instance_type_id'])
cfg = self.vif_driver.get_config(instance, vif, None, inst_type)
try:
self.vif_driver.unplug(instance, vif)
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.RUNNING:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.detachDeviceFlags(cfg.to_xml(), flags)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
LOG.warn(_("During detach_interface, "
"instance disappeared."),
instance=instance)
else:
LOG.error(_('detaching network adapter failed.'),
instance=instance)
raise exception.InterfaceDetachFailed(instance)
def snapshot(self, context, instance, image_href, update_task_state):
"""Create snapshot from a running VM instance.
This command only works with qemu 0.14+
"""
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
(image_service, image_id) = glance.get_remote_image_service(
context, instance['image_ref'])
base = compute_utils.get_image_metadata(
context, image_service, image_id, instance)
_image_service = glance.get_remote_image_service(context, image_href)
snapshot_image_service, snapshot_image_id = _image_service
snapshot = snapshot_image_service.show(context, snapshot_image_id)
metadata = {'is_public': False,
'status': 'active',
'name': snapshot['name'],
'properties': {
'kernel_id': instance['kernel_id'],
'image_location': 'snapshot',
'image_state': 'available',
'owner_id': instance['project_id'],
'ramdisk_id': instance['ramdisk_id'],
'os_type': instance['os_type'],
}
}
disk_path = libvirt_utils.find_disk(virt_dom)
source_format = libvirt_utils.get_disk_type(disk_path)
image_format = CONF.snapshot_image_format or source_format
# NOTE(bfilippov): save lvm and rbd as raw
if image_format == 'lvm' or image_format == 'rbd':
image_format = 'raw'
# NOTE(vish): glance forces ami disk format to be ami
if base.get('disk_format') == 'ami':
metadata['disk_format'] = 'ami'
else:
metadata['disk_format'] = image_format
metadata['container_format'] = base.get('container_format', 'bare')
snapshot_name = uuid.uuid4().hex
(state, _max_mem, _mem, _cpus, _t) = virt_dom.info()
state = LIBVIRT_POWER_STATE[state]
# NOTE(rmk): Live snapshots require QEMU 1.3 and Libvirt 1.0.0.
# These restrictions can be relaxed as other configurations
# can be validated.
if self.has_min_version(MIN_LIBVIRT_LIVESNAPSHOT_VERSION,
MIN_QEMU_LIVESNAPSHOT_VERSION,
REQ_HYPERVISOR_LIVESNAPSHOT) \
and not source_format == "lvm" and not source_format == 'rbd':
live_snapshot = True
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended. This operation also
# confims the running instance, as opposed to the system as a
# whole, has a new enough version of the hypervisor (bug 1193146).
try:
virt_dom.blockJobAbort(disk_path, 0)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_CONFIG_UNSUPPORTED:
live_snapshot = False
else:
pass
else:
live_snapshot = False
# NOTE(rmk): We cannot perform live snapshots when a managedSave
# file is present, so we will use the cold/legacy method
# for instances which are shutdown.
if state == power_state.SHUTDOWN:
live_snapshot = False
# NOTE(dkang): managedSave does not work for LXC
if CONF.libvirt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING or state == power_state.PAUSED:
self._detach_pci_devices(virt_dom,
pci_manager.get_instance_pci_devs(instance))
virt_dom.managedSave(0)
snapshot_backend = self.image_backend.snapshot(disk_path,
image_type=source_format)
if live_snapshot:
LOG.info(_("Beginning live snapshot process"),
instance=instance)
else:
LOG.info(_("Beginning cold snapshot process"),
instance=instance)
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
snapshot_directory = CONF.libvirt_snapshots_directory
fileutils.ensure_tree(snapshot_directory)
with utils.tempdir(dir=snapshot_directory) as tmpdir:
try:
out_path = os.path.join(tmpdir, snapshot_name)
if live_snapshot:
# NOTE(xqueralt): libvirt needs o+x in the temp directory
os.chmod(tmpdir, 0o701)
self._live_snapshot(virt_dom, disk_path, out_path,
image_format)
else:
snapshot_backend.snapshot_extract(out_path, image_format)
finally:
new_dom = None
# NOTE(dkang): because previous managedSave is not called
# for LXC, _create_domain must not be called.
if CONF.libvirt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING:
new_dom = self._create_domain(domain=virt_dom)
elif state == power_state.PAUSED:
new_dom = self._create_domain(domain=virt_dom,
launch_flags=libvirt.VIR_DOMAIN_START_PAUSED)
if new_dom is not None:
self._attach_pci_devices(new_dom,
pci_manager.get_instance_pci_devs(instance))
LOG.info(_("Snapshot extracted, beginning image upload"),
instance=instance)
# Upload that image to the image service
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
with libvirt_utils.file_open(out_path) as image_file:
image_service.update(context,
image_href,
metadata,
image_file)
LOG.info(_("Snapshot image upload complete"),
instance=instance)
@staticmethod
def _wait_for_block_job(domain, disk_path, abort_on_error=False):
"""Wait for libvirt block job to complete.
Libvirt may return either cur==end or an empty dict when
the job is complete, depending on whether the job has been
cleaned up by libvirt yet, or not.
:returns: True if still in progress
False if completed
"""
status = domain.blockJobInfo(disk_path, 0)
if status == -1 and abort_on_error:
msg = _('libvirt error while requesting blockjob info.')
raise exception.NovaException(msg)
try:
cur = status.get('cur', 0)
end = status.get('end', 0)
except Exception:
return False
if cur == end:
return False
else:
return True
def _live_snapshot(self, domain, disk_path, out_path, image_format):
"""Snapshot an instance without downtime."""
# Save a copy of the domain's running XML file
xml = domain.XMLDesc(0)
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended.
try:
domain.blockJobAbort(disk_path, 0)
except Exception:
pass
# NOTE (rmk): We are using shallow rebases as a workaround to a bug
# in QEMU 1.3. In order to do this, we need to create
# a destination image with the original backing file
# and matching size of the instance root disk.
src_disk_size = libvirt_utils.get_disk_size(disk_path)
src_back_path = libvirt_utils.get_disk_backing_file(disk_path,
basename=False)
disk_delta = out_path + '.delta'
libvirt_utils.create_cow_image(src_back_path, disk_delta,
src_disk_size)
try:
# NOTE (rmk): blockRebase cannot be executed on persistent
# domains, so we need to temporarily undefine it.
# If any part of this block fails, the domain is
# re-defined regardless.
if domain.isPersistent():
domain.undefine()
# NOTE (rmk): Establish a temporary mirror of our root disk and
# issue an abort once we have a complete copy.
domain.blockRebase(disk_path, disk_delta, 0,
libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT |
libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW)
while self._wait_for_block_job(domain, disk_path):
time.sleep(0.5)
domain.blockJobAbort(disk_path, 0)
libvirt_utils.chown(disk_delta, os.getuid())
finally:
self._conn.defineXML(xml)
# Convert the delta (CoW) image with a backing file to a flat
# image with no backing file.
libvirt_utils.extract_snapshot(disk_delta, 'qcow2',
out_path, image_format)
def _volume_snapshot_update_status(self, context, snapshot_id, status):
"""Send a snapshot status update to Cinder.
This method captures and logs exceptions that occur
since callers cannot do anything useful with these exceptions.
Operations on the Cinder side waiting for this will time out if
a failure occurs sending the update.
:param context: security context
:param snapshot_id: id of snapshot being updated
:param status: new status value
"""
try:
self._volume_api.update_snapshot_status(context,
snapshot_id,
status)
except Exception:
msg = _('Failed to send updated snapshot status '
'to volume service.')
LOG.exception(msg)
def _volume_snapshot_create(self, context, instance, domain,
volume_id, snapshot_id, new_file):
"""Perform volume snapshot.
:param domain: VM that volume is attached to
:param volume_id: volume UUID to snapshot
:param snapshot_id: UUID of snapshot being created
:param new_file: relative path to new qcow2 file present on share
"""
xml = domain.XMLDesc(0)
xml_doc = etree.fromstring(xml)
device_info = vconfig.LibvirtConfigGuest()
device_info.parse_dom(xml_doc)
disks_to_snap = [] # to be snapshotted by libvirt
disks_to_skip = [] # local disks not snapshotted
for disk in device_info.devices:
if (disk.root_name != 'disk'):
continue
if (disk.target_dev is None):
continue
if (disk.serial is None or disk.serial != volume_id):
disks_to_skip.append(disk.source_path)
continue
# disk is a Cinder volume with the correct volume_id
disk_info = {
'dev': disk.target_dev,
'serial': disk.serial,
'current_file': disk.source_path
}
# Determine path for new_file based on current path
current_file = disk_info['current_file']
new_file_path = os.path.join(os.path.dirname(current_file),
new_file)
disks_to_snap.append((current_file, new_file_path))
if not disks_to_snap:
msg = _('Found no disk to snapshot.')
raise exception.NovaException(msg)
snapshot = vconfig.LibvirtConfigGuestSnapshot()
disks = []
for current_name, new_filename in disks_to_snap:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = current_name
snap_disk.source_path = new_filename
snap_disk.source_type = 'file'
snap_disk.snapshot = 'external'
snap_disk.driver_name = 'qcow2'
snapshot.add_disk(snap_disk)
for dev in disks_to_skip:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = dev
snap_disk.snapshot = 'no'
snapshot.add_disk(snap_disk)
snapshot_xml = snapshot.to_xml()
LOG.debug(_("snap xml: %s") % snapshot_xml)
snap_flags = (libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
QUIESCE = libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE
try:
domain.snapshotCreateXML(snapshot_xml,
snap_flags | QUIESCE)
return
except libvirt.libvirtError:
msg = _('Unable to create quiesced VM snapshot, '
'attempting again with quiescing disabled.')
LOG.exception(msg)
try:
domain.snapshotCreateXML(snapshot_xml, snap_flags)
except libvirt.libvirtError:
msg = _('Unable to create VM snapshot, '
'failing volume_snapshot operation.')
LOG.exception(msg)
raise
def volume_snapshot_create(self, context, instance, volume_id,
create_info):
"""Create snapshots of a Cinder volume via libvirt.
:param instance: VM instance reference
:param volume_id: id of volume being snapshotted
:param create_info: dict of information used to create snapshots
- snapshot_id : ID of snapshot
- type : qcow2 / <other>
- new_file : qcow2 file created by Cinder which
becomes the VM's active image after
the snapshot is complete
"""
LOG.debug(_("volume_snapshot_create: instance: %(instance)s "
"create_info: %(c_info)s") % {'instance': instance['uuid'],
'c_info': create_info})
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
if create_info['type'] != 'qcow2':
raise exception.NovaException(_('Unknown type: %s') %
create_info['type'])
snapshot_id = create_info.get('snapshot_id', None)
if snapshot_id is None:
raise exception.NovaException(_('snapshot_id required '
'in create_info'))
try:
self._volume_snapshot_create(context, instance, virt_dom,
volume_id, snapshot_id,
create_info['new_file'])
except Exception:
with excutils.save_and_reraise_exception():
msg = _('Error occurred during volume_snapshot_create, '
'sending error status to Cinder.')
LOG.exception(msg)
self._volume_snapshot_update_status(
context, snapshot_id, 'error')
self._volume_snapshot_update_status(
context, snapshot_id, 'creating')
def _volume_snapshot_delete(self, context, instance, volume_id,
snapshot_id, delete_info=None):
"""
Note:
if file being merged into == active image:
do a blockRebase (pull) operation
else:
do a blockCommit operation
Files must be adjacent in snap chain.
:param instance: instance reference
:param volume_id: volume UUID
:param snapshot_id: snapshot UUID (unused currently)
:param delete_info: {
'type': 'qcow2',
'file_to_merge': 'a.img',
'merge_target_file': 'b.img' or None (if merging file_to_merge into
active image)
}
Libvirt blockjob handling required for this method is broken
in versions of libvirt that do not contain:
http://libvirt.org/git/?p=libvirt.git;h=0f9e67bfad (1.1.1)
(Patch is pending in 1.0.5-maint branch as well, but we cannot detect
libvirt 1.0.5.5 vs. 1.0.5.6 here.)
"""
if not self.has_min_version(MIN_LIBVIRT_BLOCKJOBINFO_VERSION):
ver = '.'.join([str(x) for x in MIN_LIBVIRT_BLOCKJOBINFO_VERSION])
msg = _("Libvirt '%s' or later is required for online deletion "
"of volume snapshots.") % ver
raise exception.Invalid(msg)
LOG.debug(_('volume_snapshot_delete: delete_info: %s') % delete_info)
if delete_info['type'] != 'qcow2':
msg = _('Unknown delete_info type %s') % delete_info['type']
raise exception.NovaException(msg)
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
##### Find dev name
my_dev = None
active_disk = None
xml = virt_dom.XMLDesc(0)
xml_doc = etree.fromstring(xml)
device_info = vconfig.LibvirtConfigGuest()
device_info.parse_dom(xml_doc)
for disk in device_info.devices:
if (disk.root_name != 'disk'):
continue
if (disk.target_dev is None or disk.serial is None):
continue
if disk.serial == volume_id:
my_dev = disk.target_dev
active_disk = disk.source_path
if my_dev is None or active_disk is None:
msg = _('Unable to locate disk matching id: %s') % volume_id
raise exception.NovaException(msg)
LOG.debug("found dev, it's %s, with active disk: %s" %
(my_dev, active_disk))
if delete_info['merge_target_file'] is None:
# pull via blockRebase()
# Merge the most recent snapshot into the active image
rebase_disk = my_dev
rebase_base = delete_info['file_to_merge']
rebase_bw = 0
rebase_flags = 0
LOG.debug(_('disk: %(disk)s, base: %(base)s, '
'bw: %(bw)s, flags: %(flags)s') %
{'disk': rebase_disk,
'base': rebase_base,
'bw': rebase_bw,
'flags': rebase_flags})
result = virt_dom.blockRebase(rebase_disk, rebase_base,
rebase_bw, rebase_flags)
if result == 0:
LOG.debug(_('blockRebase started successfully'))
while self._wait_for_block_job(virt_dom, rebase_disk,
abort_on_error=True):
LOG.debug(_('waiting for blockRebase job completion'))
time.sleep(0.5)
else:
# commit with blockCommit()
commit_disk = my_dev
commit_base = delete_info['merge_target_file']
commit_top = delete_info['file_to_merge']
bandwidth = 0
flags = 0
result = virt_dom.blockCommit(commit_disk, commit_base, commit_top,
bandwidth, flags)
if result == 0:
LOG.debug(_('blockCommit started successfully'))
while self._wait_for_block_job(virt_dom, commit_disk,
abort_on_error=True):
LOG.debug(_('waiting for blockCommit job completion'))
time.sleep(0.5)
def volume_snapshot_delete(self, context, instance, volume_id, snapshot_id,
delete_info=None):
try:
self._volume_snapshot_delete(context, instance, volume_id,
snapshot_id, delete_info=delete_info)
except Exception:
with excutils.save_and_reraise_exception():
msg = _('Error occurred during volume_snapshot_delete, '
'sending error status to Cinder.')
LOG.exception(msg)
self._volume_snapshot_update_status(
context, snapshot_id, 'error_deleting')
self._volume_snapshot_update_status(context, snapshot_id, 'deleting')
def reboot(self, context, instance, network_info, reboot_type='SOFT',
block_device_info=None, bad_volumes_callback=None):
"""Reboot a virtual machine, given an instance reference."""
if reboot_type == 'SOFT':
# NOTE(vish): This will attempt to do a graceful shutdown/restart.
try:
soft_reboot_success = self._soft_reboot(instance)
except libvirt.libvirtError as e:
LOG.debug(_("Instance soft reboot failed: %s"), e)
soft_reboot_success = False
if soft_reboot_success:
LOG.info(_("Instance soft rebooted successfully."),
instance=instance)
return
else:
LOG.warn(_("Failed to soft reboot instance. "
"Trying hard reboot."),
instance=instance)
return self._hard_reboot(context, instance, network_info,
block_device_info)
def _soft_reboot(self, instance):
"""Attempt to shutdown and restart the instance gracefully.
We use shutdown and create here so we can return if the guest
responded and actually rebooted. Note that this method only
succeeds if the guest responds to acpi. Therefore we return
success or failure so we can fall back to a hard reboot if
necessary.
:returns: True if the reboot succeeded
"""
dom = self._lookup_by_name(instance["name"])
(state, _max_mem, _mem, _cpus, _t) = dom.info()
state = LIBVIRT_POWER_STATE[state]
old_domid = dom.ID()
# NOTE(vish): This check allows us to reboot an instance that
# is already shutdown.
if state == power_state.RUNNING:
dom.shutdown()
# NOTE(vish): This actually could take slighty longer than the
# FLAG defines depending on how long the get_info
# call takes to return.
self._prepare_pci_devices_for_use(
pci_manager.get_instance_pci_devs(instance))
for x in xrange(CONF.libvirt_wait_soft_reboot_seconds):
dom = self._lookup_by_name(instance["name"])
(state, _max_mem, _mem, _cpus, _t) = dom.info()
state = LIBVIRT_POWER_STATE[state]
new_domid = dom.ID()
# NOTE(ivoks): By checking domain IDs, we make sure we are
# not recreating domain that's already running.
if old_domid != new_domid:
if state in [power_state.SHUTDOWN,
power_state.CRASHED]:
LOG.info(_("Instance shutdown successfully."),
instance=instance)
self._create_domain(domain=dom)
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running, instance)
timer.start(interval=0.5).wait()
return True
else:
LOG.info(_("Instance may have been rebooted during soft "
"reboot, so return now."), instance=instance)
return True
greenthread.sleep(1)
return False
def _hard_reboot(self, context, instance, network_info,
block_device_info=None):
"""Reboot a virtual machine, given an instance reference.
Performs a Libvirt reset (if supported) on the domain.
If Libvirt reset is unavailable this method actually destroys and
re-creates the domain to ensure the reboot happens, as the guest
OS cannot ignore this action.
If xml is set, it uses the passed in xml in place of the xml from the
existing domain.
"""
self._destroy(instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info)
# NOTE(vish): This could generate the wrong device_format if we are
# using the raw backend and the images don't exist yet.
# The create_images_and_backing below doesn't properly
# regenerate raw backend images, however, so when it
# does we need to (re)generate the xml after the images
# are in place.
xml = self.to_xml(context, instance, network_info, disk_info,
block_device_info=block_device_info,
write_to_disk=True)
# NOTE (rmk): Re-populate any missing backing files.
disk_info_json = self.get_instance_disk_info(instance['name'], xml,
block_device_info)
instance_dir = libvirt_utils.get_instance_path(instance)
self._create_images_and_backing(context, instance, instance_dir,
disk_info_json)
# Initialize all the necessary networking, block devices and
# start the instance.
self._create_domain_and_network(xml, instance, network_info,
block_device_info, context=context,
reboot=True)
self._prepare_pci_devices_for_use(
pci_manager.get_instance_pci_devs(instance))
def _wait_for_reboot():
"""Called at an interval until the VM is running again."""
state = self.get_info(instance)['state']
if state == power_state.RUNNING:
LOG.info(_("Instance rebooted successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_reboot)
timer.start(interval=0.5).wait()
def pause(self, instance):
"""Pause VM instance."""
dom = self._lookup_by_name(instance['name'])
dom.suspend()
def unpause(self, instance):
"""Unpause paused VM instance."""
dom = self._lookup_by_name(instance['name'])
dom.resume()
def power_off(self, instance):
"""Power off the specified instance."""
self._destroy(instance)
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
# We use _hard_reboot here to ensure that all backing files,
# network, and block device connections, etc. are established
# and available before we attempt to start the instance.
self._hard_reboot(context, instance, network_info, block_device_info)
def suspend(self, instance):
"""Suspend the specified instance."""
dom = self._lookup_by_name(instance['name'])
self._detach_pci_devices(dom,
pci_manager.get_instance_pci_devs(instance))
dom.managedSave(0)
def resume(self, context, instance, network_info, block_device_info=None):
"""resume the specified instance."""
xml = self._get_existing_domain_xml(instance, network_info,
block_device_info)
dom = self._create_domain_and_network(xml, instance, network_info,
block_device_info=block_device_info, context=context)
self._attach_pci_devices(dom,
pci_manager.get_instance_pci_devs(instance))
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
# Check if the instance is running already and avoid doing
# anything if it is.
if self.instance_exists(instance['name']):
domain = self._lookup_by_name(instance['name'])
state = LIBVIRT_POWER_STATE[domain.info()[0]]
ignored_states = (power_state.RUNNING,
power_state.SUSPENDED,
power_state.NOSTATE,
power_state.PAUSED)
if state in ignored_states:
return
# Instance is not up and could be in an unknown state.
# Be as absolute as possible about getting it back into
# a known and running state.
self._hard_reboot(context, instance, network_info, block_device_info)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Loads a VM using rescue images.
A rescue is normally performed when something goes wrong with the
primary images and data needs to be corrected/recovered. Rescuing
should not edit or over-ride the original image, only allow for
data recovery.
"""
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml = self._get_existing_domain_xml(instance, network_info)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml)
rescue_images = {
'image_id': CONF.rescue_image_id or instance['image_ref'],
'kernel_id': CONF.rescue_kernel_id or instance['kernel_id'],
'ramdisk_id': CONF.rescue_ramdisk_id or instance['ramdisk_id'],
}
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
None,
image_meta,
rescue=True)
self._create_image(context, instance,
disk_info['mapping'],
'.rescue', rescue_images,
network_info=network_info,
admin_pass=rescue_password)
xml = self.to_xml(context, instance, network_info, disk_info,
image_meta, rescue=rescue_images,
write_to_disk=True)
self._destroy(instance)
self._create_domain(xml)
def unrescue(self, instance, network_info):
"""Reboot the VM which is being rescued back into primary images.
"""
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
xml = libvirt_utils.load_file(unrescue_xml_path)
virt_dom = self._lookup_by_name(instance['name'])
self._destroy(instance)
self._create_domain(xml, virt_dom)
libvirt_utils.file_delete(unrescue_xml_path)
rescue_files = os.path.join(instance_dir, "*.rescue")
for rescue_file in glob.iglob(rescue_files):
libvirt_utils.file_delete(rescue_file)
def poll_rebooting_instances(self, timeout, instances):
pass
def _enable_hairpin(self, xml):
interfaces = self.get_interfaces(xml)
for interface in interfaces:
utils.execute('tee',
'/sys/class/net/%s/brport/hairpin_mode' % interface,
process_input='1',
run_as_root=True,
check_exit_code=[0, 1])
# NOTE(ilyaalekseyev): Implementation like in multinics
# for xenapi(tr3buchet)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info,
image_meta)
self._create_image(context, instance,
disk_info['mapping'],
network_info=network_info,
block_device_info=block_device_info,
files=injected_files,
admin_pass=admin_password)
xml = self.to_xml(context, instance, network_info,
disk_info, image_meta,
block_device_info=block_device_info,
write_to_disk=True)
self._create_domain_and_network(xml, instance, network_info,
block_device_info, context=context)
LOG.debug(_("Instance is running"), instance=instance)
def _wait_for_boot():
"""Called at an interval until the VM is running."""
state = self.get_info(instance)['state']
if state == power_state.RUNNING:
LOG.info(_("Instance spawned successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_boot)
timer.start(interval=0.5).wait()
def _flush_libvirt_console(self, pty):
out, err = utils.execute('dd',
'if=%s' % pty,
'iflag=nonblock',
run_as_root=True,
check_exit_code=False)
return out
def _append_to_file(self, data, fpath):
LOG.info(_('data: %(data)r, fpath: %(fpath)r'),
{'data': data, 'fpath': fpath})
fp = open(fpath, 'a+')
fp.write(data)
return fpath
def get_console_output(self, instance):
virt_dom = self._lookup_by_name(instance['name'])
xml = virt_dom.XMLDesc(0)
tree = etree.fromstring(xml)
console_types = {}
# NOTE(comstud): We want to try 'file' types first, then try 'pty'
# types. We can't use Python 2.7 syntax of:
# tree.find("./devices/console[@type='file']/source")
# because we need to support 2.6.
console_nodes = tree.findall('./devices/console')
for console_node in console_nodes:
console_type = console_node.get('type')
console_types.setdefault(console_type, [])
console_types[console_type].append(console_node)
# If the guest has a console logging to a file prefer to use that
if console_types.get('file'):
for file_console in console_types.get('file'):
source_node = file_console.find('./source')
if source_node is None:
continue
path = source_node.get("path")
if not path:
continue
libvirt_utils.chown(path, os.getuid())
with libvirt_utils.file_open(path, 'rb') as fp:
log_data, remaining = utils.last_bytes(fp,
MAX_CONSOLE_BYTES)
if remaining > 0:
LOG.info(_('Truncated console log returned, %d bytes '
'ignored'), remaining, instance=instance)
return log_data
# Try 'pty' types
if console_types.get('pty'):
for pty_console in console_types.get('pty'):
source_node = pty_console.find('./source')
if source_node is None:
continue
pty = source_node.get("path")
if not pty:
continue
break
else:
msg = _("Guest does not have a console available")
raise exception.NovaException(msg)
self._chown_console_log_for_instance(instance)
data = self._flush_libvirt_console(pty)
console_log = self._get_console_log_path(instance)
fpath = self._append_to_file(data, console_log)
with libvirt_utils.file_open(fpath, 'rb') as fp:
log_data, remaining = utils.last_bytes(fp, MAX_CONSOLE_BYTES)
if remaining > 0:
LOG.info(_('Truncated console log returned, %d bytes ignored'),
remaining, instance=instance)
return log_data
@staticmethod
def get_host_ip_addr():
return CONF.my_ip
def get_vnc_console(self, instance):
def get_vnc_port_for_instance(instance_name):
virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
dom = xmlutils.safe_minidom_parse_string(xml)
for graphic in dom.getElementsByTagName('graphics'):
if graphic.getAttribute('type') == 'vnc':
return graphic.getAttribute('port')
# NOTE(rmk): We had VNC consoles enabled but the instance in
# question is not actually listening for connections.
raise exception.ConsoleTypeUnavailable(console_type='vnc')
port = get_vnc_port_for_instance(instance['name'])
host = CONF.vncserver_proxyclient_address
return {'host': host, 'port': port, 'internal_access_path': None}
def get_spice_console(self, instance):
def get_spice_ports_for_instance(instance_name):
virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
# TODO(sleepsonthefloor): use etree instead of minidom
dom = xmlutils.safe_minidom_parse_string(xml)
for graphic in dom.getElementsByTagName('graphics'):
if graphic.getAttribute('type') == 'spice':
return (graphic.getAttribute('port'),
graphic.getAttribute('tlsPort'))
# NOTE(rmk): We had Spice consoles enabled but the instance in
# question is not actually listening for connections.
raise exception.ConsoleTypeUnavailable(console_type='spice')
ports = get_spice_ports_for_instance(instance['name'])
host = CONF.spice.server_proxyclient_address
return {'host': host, 'port': ports[0],
'tlsPort': ports[1], 'internal_access_path': None}
@staticmethod
def _supports_direct_io(dirpath):
if not hasattr(os, 'O_DIRECT'):
LOG.debug(_("This python runtime does not support direct I/O"))
return False
testfile = os.path.join(dirpath, ".directio.test")
hasDirectIO = True
try:
f = os.open(testfile, os.O_CREAT | os.O_WRONLY | os.O_DIRECT)
os.close(f)
LOG.debug(_("Path '%(path)s' supports direct I/O") %
{'path': dirpath})
except OSError as e:
if e.errno == errno.EINVAL:
LOG.debug(_("Path '%(path)s' does not support direct I/O: "
"'%(ex)s'") % {'path': dirpath, 'ex': str(e)})
hasDirectIO = False
else:
with excutils.save_and_reraise_exception():
LOG.error(_("Error on '%(path)s' while checking "
"direct I/O: '%(ex)s'") %
{'path': dirpath, 'ex': str(e)})
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_("Error on '%(path)s' while checking direct I/O: "
"'%(ex)s'") % {'path': dirpath, 'ex': str(e)})
finally:
try:
os.unlink(testfile)
except Exception:
pass
return hasDirectIO
@staticmethod
def _create_local(target, local_size, unit='G',
fs_format=None, label=None):
"""Create a blank image of specified size."""
if not fs_format:
fs_format = CONF.default_ephemeral_format
if not CONF.libvirt_images_type == "lvm":
libvirt_utils.create_image('raw', target,
'%d%c' % (local_size, unit))
if fs_format:
utils.mkfs(fs_format, target, label)
def _create_ephemeral(self, target, ephemeral_size, fs_label, os_type,
max_size=None):
self._create_local(target, ephemeral_size)
disk.mkfs(os_type, fs_label, target)
@staticmethod
def _create_swap(target, swap_mb, max_size=None):
"""Create a swap file of specified size."""
libvirt_utils.create_image('raw', target, '%dM' % swap_mb)
utils.mkfs('swap', target)
@staticmethod
def _get_console_log_path(instance):
return os.path.join(libvirt_utils.get_instance_path(instance),
'console.log')
@staticmethod
def _get_disk_config_path(instance):
return os.path.join(libvirt_utils.get_instance_path(instance),
'disk.config')
def _chown_console_log_for_instance(self, instance):
console_log = self._get_console_log_path(instance)
if os.path.exists(console_log):
libvirt_utils.chown(console_log, os.getuid())
def _chown_disk_config_for_instance(self, instance):
disk_config = self._get_disk_config_path(instance)
if os.path.exists(disk_config):
libvirt_utils.chown(disk_config, os.getuid())
def _create_image(self, context, instance,
disk_mapping, suffix='',
disk_images=None, network_info=None,
block_device_info=None, files=None,
admin_pass=None, inject_files=True):
if not suffix:
suffix = ''
booted_from_volume = (
(not bool(instance.get('image_ref')))
or 'disk' not in disk_mapping
)
# syntactic nicety
def basepath(fname='', suffix=suffix):
return os.path.join(libvirt_utils.get_instance_path(instance),
fname + suffix)
def image(fname, image_type=CONF.libvirt_images_type):
return self.image_backend.image(instance,
fname + suffix, image_type)
def raw(fname):
return image(fname, image_type='raw')
# ensure directories exist and are writable
fileutils.ensure_tree(basepath(suffix=''))
LOG.info(_('Creating image'), instance=instance)
# NOTE(dprince): for rescue console.log may already exist... chown it.
self._chown_console_log_for_instance(instance)
# NOTE(yaguang): For evacuate disk.config already exist in shared
# storage, chown it.
self._chown_disk_config_for_instance(instance)
# NOTE(vish): No need add the suffix to console.log
libvirt_utils.write_to_file(
self._get_console_log_path(instance), '', 7)
if not disk_images:
disk_images = {'image_id': instance['image_ref'],
'kernel_id': instance['kernel_id'],
'ramdisk_id': instance['ramdisk_id']}
if disk_images['kernel_id']:
fname = imagecache.get_cache_fname(disk_images, 'kernel_id')
raw('kernel').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=fname,
image_id=disk_images['kernel_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])
if disk_images['ramdisk_id']:
fname = imagecache.get_cache_fname(disk_images, 'ramdisk_id')
raw('ramdisk').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=fname,
image_id=disk_images['ramdisk_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])
inst_type = flavors.extract_flavor(instance)
# NOTE(ndipanov): Even if disk_mapping was passed in, which
# currently happens only on rescue - we still don't want to
# create a base image.
if not booted_from_volume:
root_fname = imagecache.get_cache_fname(disk_images, 'image_id')
size = instance['root_gb'] * 1024 * 1024 * 1024
if size == 0 or suffix == '.rescue':
size = None
image('disk').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=root_fname,
size=size,
image_id=disk_images['image_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])
# Lookup the filesystem type if required
os_type_with_default = disk.get_fs_type_for_os_type(
instance['os_type'])
ephemeral_gb = instance['ephemeral_gb']
if 'disk.local' in disk_mapping:
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral0',
os_type=instance["os_type"])
fname = "ephemeral_%s_%s" % (ephemeral_gb, os_type_with_default)
size = ephemeral_gb * 1024 * 1024 * 1024
image('disk.local').cache(fetch_func=fn,
filename=fname,
size=size,
ephemeral_size=ephemeral_gb)
for idx, eph in enumerate(driver.block_device_info_get_ephemerals(
block_device_info)):
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral%d' % idx,
os_type=instance["os_type"])
size = eph['size'] * 1024 * 1024 * 1024
fname = "ephemeral_%s_%s" % (eph['size'], os_type_with_default)
image(blockinfo.get_eph_disk(idx)).cache(
fetch_func=fn,
filename=fname,
size=size,
ephemeral_size=eph['size'])
if 'disk.swap' in disk_mapping:
mapping = disk_mapping['disk.swap']
swap_mb = 0
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
swap_mb = swap['swap_size']
elif (inst_type['swap'] > 0 and
not block_device.volume_in_mapping(
mapping['dev'], block_device_info)):
swap_mb = inst_type['swap']
if swap_mb > 0:
size = swap_mb * 1024 * 1024
image('disk.swap').cache(fetch_func=self._create_swap,
filename="swap_%s" % swap_mb,
size=size,
swap_mb=swap_mb)
# Config drive
if configdrive.required_by(instance):
LOG.info(_('Using config drive'), instance=instance)
extra_md = {}
if admin_pass:
extra_md['admin_pass'] = admin_pass
inst_md = instance_metadata.InstanceMetadata(instance,
content=files, extra_md=extra_md, network_info=network_info)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
configdrive_path = basepath(fname='disk.config')
LOG.info(_('Creating config drive at %(path)s'),
{'path': configdrive_path}, instance=instance)
try:
cdb.make_drive(configdrive_path)
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.error(_('Creating config drive failed '
'with error: %s'),
e, instance=instance)
# File injection only if needed
elif inject_files and CONF.libvirt_inject_partition != -2:
if booted_from_volume:
LOG.warn(_('File injection into a boot from volume '
'instance is not supported'), instance=instance)
target_partition = None
if not instance['kernel_id']:
target_partition = CONF.libvirt_inject_partition
if target_partition == 0:
target_partition = None
if CONF.libvirt_type == 'lxc':
target_partition = None
if CONF.libvirt_inject_key and instance['key_data']:
key = str(instance['key_data'])
else:
key = None
net = netutils.get_injected_network_template(network_info)
metadata = instance.get('metadata')
if not CONF.libvirt_inject_password:
admin_pass = None
if any((key, net, metadata, admin_pass, files)):
# If we're not using config_drive, inject into root fs
injection_path = image('disk').path
img_id = instance['image_ref']
for inj, val in [('key', key),
('net', net),
('metadata', metadata),
('admin_pass', admin_pass),
('files', files)]:
if val:
LOG.info(_('Injecting %(inj)s into image '
'%(img_id)s'),
{'inj': inj, 'img_id': img_id},
instance=instance)
try:
disk.inject_data(injection_path,
key, net, metadata, admin_pass, files,
partition=target_partition,
use_cow=CONF.use_cow_images,
mandatory=('files',))
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_('Error injecting data into image '
'%(img_id)s (%(e)s)'),
{'img_id': img_id, 'e': e},
instance=instance)
if CONF.libvirt_type == 'uml':
libvirt_utils.chown(image('disk').path, 'root')
def _prepare_pci_devices_for_use(self, pci_devices):
# kvm , qemu support managed mode
# In managed mode, the configured device will be automatically
# detached from the host OS drivers when the guest is started,
# and then re-attached when the guest shuts down.
if CONF.libvirt_type not in ('xen'):
# we do manual detach only for xen
return
try:
for dev in pci_devices:
libvirt_dev_addr = dev['hypervisor_name']
libvirt_dev = \
self._conn.nodeDeviceLookupByName(libvirt_dev_addr)
# Note(yjiang5) Spelling for 'dettach' is correct, see
# http://libvirt.org/html/libvirt-libvirt.html.
libvirt_dev.dettach()
# Note(yjiang5): A reset of one PCI device may impact other
# devices on the same bus, thus we need two separated loops
# to detach and then reset it.
for dev in pci_devices:
libvirt_dev_addr = dev['hypervisor_name']
libvirt_dev = \
self._conn.nodeDeviceLookupByName(libvirt_dev_addr)
libvirt_dev.reset()
except libvirt.libvirtError as exc:
raise exception.PciDevicePrepareFailed(id=dev['id'],
instance_uuid=
dev['instance_uuid'],
reason=str(exc))
def _detach_pci_devices(self, dom, pci_devs):
# for libvirt version < 1.1.1, this is race condition
# so forbiden detach if not had this version
if not self.has_min_version(MIN_LIBVIRT_DEVICE_CALLBACK_VERSION):
if pci_devs:
reason = (_("Detaching PCI devices with libvirt < %(ver)s"
" is not permitted") %
{'ver': MIN_LIBVIRT_DEVICE_CALLBACK_VERSION})
raise exception.PciDeviceDetachFailed(reason=reason,
dev=pci_devs)
try:
for dev in pci_devs:
dom.detachDeviceFlags(self.get_guest_pci_device(dev).to_xml(),
libvirt.VIR_DOMAIN_AFFECT_LIVE)
# after detachDeviceFlags returned, we should check the dom to
# ensure the detaching is finished
xml = dom.XMLDesc(0)
xml_doc = etree.fromstring(xml)
guest_config = vconfig.LibvirtConfigGuest()
guest_config.parse_dom(xml_doc)
for hdev in [d for d in guest_config.devices
if d.type == 'pci']:
hdbsf = [hdev.domain, hdev.bus, hdev.slot, hdev.function]
dbsf = pci_utils.parse_address(dev['address'])
if [int(x, 16) for x in hdbsf] ==\
[int(x, 16) for x in dbsf]:
raise exception.PciDeviceDetachFailed(reason=
"timeout",
dev=dev)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
LOG.warn(_("Instance disappeared while detaching "
"a PCI device from it."))
else:
raise
def _attach_pci_devices(self, dom, pci_devs):
try:
for dev in pci_devs:
dom.attachDevice(self.get_guest_pci_device(dev).to_xml())
except libvirt.libvirtError:
LOG.error(_('Attaching PCI devices %(dev)s to %(dom)s failed.')
% {'dev': pci_devs, 'dom': dom.ID()})
raise
def get_host_capabilities(self):
"""Returns an instance of config.LibvirtConfigCaps representing
the capabilities of the host.
"""
if not self._caps:
xmlstr = self._conn.getCapabilities()
self._caps = vconfig.LibvirtConfigCaps()
self._caps.parse_str(xmlstr)
return self._caps
def get_host_uuid(self):
"""Returns a UUID representing the host."""
caps = self.get_host_capabilities()
return caps.host.uuid
def get_host_cpu_for_guest(self):
"""Returns an instance of config.LibvirtConfigGuestCPU
representing the host's CPU model & topology with
policy for configuring a guest to match
"""
caps = self.get_host_capabilities()
hostcpu = caps.host.cpu
guestcpu = vconfig.LibvirtConfigGuestCPU()
guestcpu.model = hostcpu.model
guestcpu.vendor = hostcpu.vendor
guestcpu.arch = hostcpu.arch
guestcpu.match = "exact"
for hostfeat in hostcpu.features:
guestfeat = vconfig.LibvirtConfigGuestCPUFeature(hostfeat.name)
guestfeat.policy = "require"
guestcpu.features.append(guestfeat)
return guestcpu
def get_guest_cpu_config(self):
mode = CONF.libvirt_cpu_mode
model = CONF.libvirt_cpu_model
if mode is None:
if CONF.libvirt_type == "kvm" or CONF.libvirt_type == "qemu":
mode = "host-model"
else:
mode = "none"
if mode == "none":
return None
if CONF.libvirt_type != "kvm" and CONF.libvirt_type != "qemu":
msg = _("Config requested an explicit CPU model, but "
"the current libvirt hypervisor '%s' does not "
"support selecting CPU models") % CONF.libvirt_type
raise exception.Invalid(msg)
if mode == "custom" and model is None:
msg = _("Config requested a custom CPU model, but no "
"model name was provided")
raise exception.Invalid(msg)
elif mode != "custom" and model is not None:
msg = _("A CPU model name should not be set when a "
"host CPU model is requested")
raise exception.Invalid(msg)
LOG.debug(_("CPU mode '%(mode)s' model '%(model)s' was chosen")
% {'mode': mode, 'model': (model or "")})
# TODO(berrange): in the future, when MIN_LIBVIRT_VERSION is
# updated to be at least this new, we can kill off the elif
# blocks here
if self.has_min_version(MIN_LIBVIRT_HOST_CPU_VERSION):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.mode = mode
cpu.model = model
elif mode == "custom":
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.model = model
elif mode == "host-model":
cpu = self.get_host_cpu_for_guest()
elif mode == "host-passthrough":
msg = _("Passthrough of the host CPU was requested but "
"this libvirt version does not support this feature")
raise exception.NovaException(msg)
return cpu
def get_guest_disk_config(self, instance, name, disk_mapping, inst_type,
image_type=None):
image = self.image_backend.image(instance,
name,
image_type)
disk_info = disk_mapping[name]
return image.libvirt_info(disk_info['bus'],
disk_info['dev'],
disk_info['type'],
self.disk_cachemode,
inst_type['extra_specs'],
self.get_hypervisor_version())
def get_guest_storage_config(self, instance, image_meta,
disk_info,
rescue, block_device_info,
inst_type):
devices = []
disk_mapping = disk_info['mapping']
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
if CONF.libvirt_type == "lxc":
fs = vconfig.LibvirtConfigGuestFilesys()
fs.source_type = "mount"
fs.source_dir = os.path.join(
libvirt_utils.get_instance_path(instance), 'rootfs')
devices.append(fs)
else:
if rescue:
diskrescue = self.get_guest_disk_config(instance,
'disk.rescue',
disk_mapping,
inst_type)
devices.append(diskrescue)
diskos = self.get_guest_disk_config(instance,
'disk',
disk_mapping,
inst_type)
devices.append(diskos)
else:
if 'disk' in disk_mapping:
diskos = self.get_guest_disk_config(instance,
'disk',
disk_mapping,
inst_type)
devices.append(diskos)
if 'disk.local' in disk_mapping:
disklocal = self.get_guest_disk_config(instance,
'disk.local',
disk_mapping,
inst_type)
devices.append(disklocal)
self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'default_ephemeral_device':
block_device.prepend_dev(disklocal.target_dev)})
for idx, eph in enumerate(
driver.block_device_info_get_ephemerals(
block_device_info)):
diskeph = self.get_guest_disk_config(
instance,
blockinfo.get_eph_disk(idx),
disk_mapping, inst_type)
devices.append(diskeph)
if 'disk.swap' in disk_mapping:
diskswap = self.get_guest_disk_config(instance,
'disk.swap',
disk_mapping,
inst_type)
devices.append(diskswap)
self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'default_swap_device': block_device.prepend_dev(
diskswap.target_dev)})
for vol in block_device_mapping:
connection_info = vol['connection_info']
vol_dev = block_device.prepend_dev(vol['mount_device'])
info = disk_mapping[vol_dev]
cfg = self.volume_driver_method('connect_volume',
connection_info,
info)
devices.append(cfg)
self.virtapi.block_device_mapping_update(
nova_context.get_admin_context(), vol.id,
{'connection_info': jsonutils.dumps(connection_info)})
if 'disk.config' in disk_mapping:
diskconfig = self.get_guest_disk_config(instance,
'disk.config',
disk_mapping,
inst_type,
'raw')
devices.append(diskconfig)
for d in devices:
self.set_cache_mode(d)
return devices
def get_guest_config_sysinfo(self, instance):
sysinfo = vconfig.LibvirtConfigGuestSysinfo()
sysinfo.system_manufacturer = version.vendor_string()
sysinfo.system_product = version.product_string()
sysinfo.system_version = version.version_string_with_package()
sysinfo.system_serial = self.get_host_uuid()
sysinfo.system_uuid = instance['uuid']
return sysinfo
def get_guest_pci_device(self, pci_device):
dbsf = pci_utils.parse_address(pci_device['address'])
dev = vconfig.LibvirtConfigGuestHostdevPCI()
dev.domain, dev.bus, dev.slot, dev.function = dbsf
# only kvm support managed mode
if CONF.libvirt_type in ('xen'):
dev.managed = 'no'
if CONF.libvirt_type in ('kvm', 'qemu'):
dev.managed = 'yes'
return dev
def get_guest_config(self, instance, network_info, image_meta,
disk_info, rescue=None, block_device_info=None):
"""Get config data for parameters.
:param rescue: optional dictionary that should contain the key
'ramdisk_id' if a ramdisk is needed for the rescue image and
'kernel_id' if a kernel is needed for the rescue image.
"""
inst_type = self.virtapi.instance_type_get(
nova_context.get_admin_context(read_deleted='yes'),
instance['instance_type_id'])
inst_path = libvirt_utils.get_instance_path(instance)
disk_mapping = disk_info['mapping']
CONSOLE = "console=tty0 console=ttyS0"
guest = vconfig.LibvirtConfigGuest()
guest.virt_type = CONF.libvirt_type
guest.name = instance['name']
guest.uuid = instance['uuid']
guest.memory = inst_type['memory_mb'] * 1024
guest.vcpus = inst_type['vcpus']
guest.cpuset = CONF.vcpu_pin_set
quota_items = ['cpu_shares', 'cpu_period', 'cpu_quota']
for key, value in inst_type['extra_specs'].iteritems():
scope = key.split(':')
if len(scope) > 1 and scope[0] == 'quota':
if scope[1] in quota_items:
setattr(guest, scope[1], value)
guest.cpu = self.get_guest_cpu_config()
if 'root' in disk_mapping:
root_device_name = block_device.prepend_dev(
disk_mapping['root']['dev'])
else:
root_device_name = None
if root_device_name:
# NOTE(yamahata):
# for nova.api.ec2.cloud.CloudController.get_metadata()
self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'root_device_name': root_device_name})
guest.os_type = vm_mode.get_from_instance(instance)
if guest.os_type is None:
if CONF.libvirt_type == "lxc":
guest.os_type = vm_mode.EXE
elif CONF.libvirt_type == "uml":
guest.os_type = vm_mode.UML
elif CONF.libvirt_type == "xen":
guest.os_type = vm_mode.XEN
else:
guest.os_type = vm_mode.HVM
if CONF.libvirt_type == "xen" and guest.os_type == vm_mode.HVM:
guest.os_loader = CONF.xen_hvmloader_path
if CONF.libvirt_type in ("kvm", "qemu"):
caps = self.get_host_capabilities()
if caps.host.cpu.arch in ("i686", "x86_64"):
guest.sysinfo = self.get_guest_config_sysinfo(instance)
guest.os_smbios = vconfig.LibvirtConfigGuestSMBIOS()
if CONF.libvirt_type == "lxc":
guest.os_type = vm_mode.EXE
guest.os_init_path = "/sbin/init"
guest.os_cmdline = CONSOLE
elif CONF.libvirt_type == "uml":
guest.os_type = vm_mode.UML
guest.os_kernel = "/usr/bin/linux"
guest.os_root = root_device_name
else:
if CONF.libvirt_type == "xen" and guest.os_type == vm_mode.XEN:
guest.os_root = root_device_name
else:
guest.os_type = vm_mode.HVM
if rescue:
if rescue.get('kernel_id'):
guest.os_kernel = os.path.join(inst_path, "kernel.rescue")
if CONF.libvirt_type == "xen":
guest.os_cmdline = "ro"
else:
guest.os_cmdline = ("root=%s %s" % (root_device_name,
CONSOLE))
if rescue.get('ramdisk_id'):
guest.os_initrd = os.path.join(inst_path, "ramdisk.rescue")
elif instance['kernel_id']:
guest.os_kernel = os.path.join(inst_path, "kernel")
if CONF.libvirt_type == "xen":
guest.os_cmdline = "ro"
else:
guest.os_cmdline = ("root=%s %s" % (root_device_name,
CONSOLE))
if instance['ramdisk_id']:
guest.os_initrd = os.path.join(inst_path, "ramdisk")
else:
guest.os_boot_dev = "hd"
if CONF.libvirt_type != "lxc" and CONF.libvirt_type != "uml":
guest.acpi = True
guest.apic = True
# NOTE(mikal): Microsoft Windows expects the clock to be in
# "localtime". If the clock is set to UTC, then you can use a
# registry key to let windows know, but Microsoft says this is
# buggy in http://support.microsoft.com/kb/2687252
clk = vconfig.LibvirtConfigGuestClock()
if instance['os_type'] == 'windows':
LOG.info(_('Configuring timezone for windows instance to '
'localtime'), instance=instance)
clk.offset = 'localtime'
else:
clk.offset = 'utc'
guest.set_clock(clk)
if CONF.libvirt_type == "kvm":
# TODO(berrange) One day this should be per-guest
# OS type configurable
tmpit = vconfig.LibvirtConfigGuestTimer()
tmpit.name = "pit"
tmpit.tickpolicy = "delay"
tmrtc = vconfig.LibvirtConfigGuestTimer()
tmrtc.name = "rtc"
tmrtc.tickpolicy = "catchup"
clk.add_timer(tmpit)
clk.add_timer(tmrtc)
for cfg in self.get_guest_storage_config(instance,
image_meta,
disk_info,
rescue,
block_device_info,
inst_type):
guest.add_device(cfg)
for vif in network_info:
cfg = self.vif_driver.get_config(instance,
vif,
image_meta,
inst_type)
guest.add_device(cfg)
if CONF.libvirt_type == "qemu" or CONF.libvirt_type == "kvm":
# The QEMU 'pty' driver throws away any data if no
# client app is connected. Thus we can't get away
# with a single type=pty console. Instead we have
# to configure two separate consoles.
consolelog = vconfig.LibvirtConfigGuestSerial()
consolelog.type = "file"
consolelog.source_path = self._get_console_log_path(instance)
guest.add_device(consolelog)
consolepty = vconfig.LibvirtConfigGuestSerial()
consolepty.type = "pty"
guest.add_device(consolepty)
else:
consolepty = vconfig.LibvirtConfigGuestConsole()
consolepty.type = "pty"
guest.add_device(consolepty)
# We want a tablet if VNC is enabled,
# or SPICE is enabled and the SPICE agent is disabled
# NB: this implies that if both SPICE + VNC are enabled
# at the same time, we'll get the tablet whether the
# SPICE agent is used or not.
need_usb_tablet = False
if CONF.vnc_enabled:
need_usb_tablet = CONF.use_usb_tablet
elif CONF.spice.enabled and not CONF.spice.agent_enabled:
need_usb_tablet = CONF.use_usb_tablet
if need_usb_tablet and guest.os_type == vm_mode.HVM:
tablet = vconfig.LibvirtConfigGuestInput()
tablet.type = "tablet"
tablet.bus = "usb"
guest.add_device(tablet)
if CONF.spice.enabled and CONF.spice.agent_enabled and \
CONF.libvirt_type not in ('lxc', 'uml', 'xen'):
channel = vconfig.LibvirtConfigGuestChannel()
channel.target_name = "com.redhat.spice.0"
guest.add_device(channel)
# NB some versions of libvirt support both SPICE and VNC
# at the same time. We're not trying to second guess which
# those versions are. We'll just let libvirt report the
# errors appropriately if the user enables both.
if CONF.vnc_enabled and CONF.libvirt_type not in ('lxc', 'uml'):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "vnc"
graphics.keymap = CONF.vnc_keymap
graphics.listen = CONF.vncserver_listen
guest.add_device(graphics)
if CONF.spice.enabled and \
CONF.libvirt_type not in ('lxc', 'uml', 'xen'):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "spice"
graphics.keymap = CONF.spice.keymap
graphics.listen = CONF.spice.server_listen
guest.add_device(graphics)
# Qemu guest agent only support 'qemu' and 'kvm' hypervisor
if CONF.libvirt_type in ('qemu', 'kvm'):
qga_enabled = False
# Enable qga only if the 'hw_qemu_guest_agent' property is set
if (image_meta is not None and image_meta.get('properties') and
image_meta['properties'].get('hw_qemu_guest_agent')
is not None):
hw_qga = image_meta['properties']['hw_qemu_guest_agent']
if hw_qga.lower() == 'yes':
LOG.debug(_("Qemu guest agent is enabled through image "
"metadata"), instance=instance)
qga_enabled = True
if qga_enabled:
qga = vconfig.LibvirtConfigGuestChannel()
qga.type = "unix"
qga.target_name = "org.qemu.guest_agent.0"
qga.source_path = ("/var/lib/libvirt/qemu/%s.%s.sock" %
("org.qemu.guest_agent.0", instance['name']))
guest.add_device(qga)
if CONF.libvirt_type in ('xen', 'qemu', 'kvm'):
for pci_dev in pci_manager.get_instance_pci_devs(instance):
guest.add_device(self.get_guest_pci_device(pci_dev))
else:
if len(pci_manager.get_instance_pci_devs(instance)) > 0:
raise exception.PciDeviceUnsupportedHypervisor(
type=CONF.libvirt_type)
return guest
def to_xml(self, context, instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None, write_to_disk=False):
# We should get image metadata everytime for generating xml
if image_meta is None:
(image_service, image_id) = glance.get_remote_image_service(
context, instance['image_ref'])
image_meta = compute_utils.get_image_metadata(
context, image_service, image_id, instance)
# NOTE(danms): Stringifying a NetworkInfo will take a lock. Do
# this ahead of time so that we don't acquire it while also
# holding the logging lock.
network_info_str = str(network_info)
LOG.debug(_('Start to_xml '
'network_info=%(network_info)s '
'disk_info=%(disk_info)s '
'image_meta=%(image_meta)s rescue=%(rescue)s'
'block_device_info=%(block_device_info)s'),
{'network_info': network_info_str, 'disk_info': disk_info,
'image_meta': image_meta, 'rescue': rescue,
'block_device_info': block_device_info})
conf = self.get_guest_config(instance, network_info, image_meta,
disk_info, rescue, block_device_info)
xml = conf.to_xml()
if write_to_disk:
instance_dir = libvirt_utils.get_instance_path(instance)
xml_path = os.path.join(instance_dir, 'libvirt.xml')
libvirt_utils.write_to_file(xml_path, xml)
LOG.debug(_('End to_xml instance=%(instance)s xml=%(xml)s'),
{'instance': instance, 'xml': xml})
return xml
def _lookup_by_id(self, instance_id):
"""Retrieve libvirt domain object given an instance id.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
"""
try:
return self._conn.lookupByID(instance_id)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance_id)
msg = (_("Error from libvirt while looking up %(instance_id)s: "
"[Error Code %(error_code)s] %(ex)s")
% {'instance_id': instance_id,
'error_code': error_code,
'ex': ex})
raise exception.NovaException(msg)
def _lookup_by_name(self, instance_name):
"""Retrieve libvirt domain object given an instance name.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
"""
try:
return self._conn.lookupByName(instance_name)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance_name)
msg = (_('Error from libvirt while looking up %(instance_name)s: '
'[Error Code %(error_code)s] %(ex)s') %
{'instance_name': instance_name,
'error_code': error_code,
'ex': ex})
raise exception.NovaException(msg)
def get_info(self, instance):
"""Retrieve information from libvirt for a specific instance name.
If a libvirt error is encountered during lookup, we might raise a
NotFound exception or Error exception depending on how severe the
libvirt error is.
"""
virt_dom = self._lookup_by_name(instance['name'])
(state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info()
return {'state': LIBVIRT_POWER_STATE[state],
'max_mem': max_mem,
'mem': mem,
'num_cpu': num_cpu,
'cpu_time': cpu_time,
'id': virt_dom.ID()}
def _create_domain(self, xml=None, domain=None,
instance=None, launch_flags=0, power_on=True):
"""Create a domain.
Either domain or xml must be passed in. If both are passed, then
the domain definition is overwritten from the xml.
"""
inst_path = None
if instance:
inst_path = libvirt_utils.get_instance_path(instance)
if CONF.libvirt_type == 'lxc':
if not inst_path:
inst_path = None
container_dir = os.path.join(inst_path, 'rootfs')
fileutils.ensure_tree(container_dir)
image = self.image_backend.image(instance, 'disk')
disk.setup_container(image.path,
container_dir=container_dir,
use_cow=CONF.use_cow_images)
if xml:
try:
domain = self._conn.defineXML(xml)
except Exception as e:
LOG.error(_("An error occurred while trying to define a domain"
" with xml: %s") % xml)
raise e
if power_on:
try:
domain.createWithFlags(launch_flags)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_("An error occurred while trying to launch a "
"defined domain with xml: %s") %
domain.XMLDesc(0))
try:
self._enable_hairpin(domain.XMLDesc(0))
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_("An error occurred while enabling hairpin mode on "
"domain with xml: %s") % domain.XMLDesc(0))
# NOTE(uni): Now the container is running with its own private mount
# namespace and so there is no need to keep the container rootfs
# mounted in the host namespace
if CONF.libvirt_type == 'lxc':
state = self.get_info(instance)['state']
container_dir = os.path.join(inst_path, 'rootfs')
if state == power_state.RUNNING:
disk.clean_lxc_namespace(container_dir=container_dir)
else:
disk.teardown_container(container_dir=container_dir)
return domain
def _create_domain_and_network(self, xml, instance, network_info,
block_device_info=None, power_on=True,
context=None, reboot=False):
"""Do required network setup and create domain."""
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
disk_info = blockinfo.get_info_from_bdm(CONF.libvirt_type, vol)
conf = self.volume_driver_method('connect_volume',
connection_info,
disk_info)
# cache device_path in connection_info -- required by encryptors
if (not reboot and 'data' in connection_info and
'volume_id' in connection_info['data']):
connection_info['data']['device_path'] = conf.source_path
self.virtapi.block_device_mapping_update(context, vol.id,
{'connection_info': jsonutils.dumps(connection_info)})
volume_id = connection_info['data']['volume_id']
encryption = encryptors.get_encryption_metadata(
context, self._volume_api, volume_id, connection_info)
if encryption:
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.attach_volume(context, **encryption)
self.plug_vifs(instance, network_info)
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
domain = self._create_domain(xml, instance=instance, power_on=power_on)
self.firewall_driver.apply_instance_filter(instance, network_info)
return domain
def get_all_block_devices(self):
"""
Return all block devices in use on this node.
"""
devices = []
for dom_id in self.list_instance_ids():
try:
domain = self._lookup_by_id(dom_id)
doc = etree.fromstring(domain.XMLDesc(0))
except exception.InstanceNotFound:
LOG.info(_("libvirt can't find a domain with id: %s") % dom_id)
continue
except Exception:
continue
ret = doc.findall('./devices/disk')
for node in ret:
if node.get('type') != 'block':
continue
for child in node.getchildren():
if child.tag == 'source':
devices.append(child.get('dev'))
return devices
def get_disks(self, instance_name):
"""
Note that this function takes an instance name.
Returns a list of all block devices for this domain.
"""
domain = self._lookup_by_name(instance_name)
xml = domain.XMLDesc(0)
try:
doc = etree.fromstring(xml)
except Exception:
return []
return filter(bool,
[target.get("dev")
for target in doc.findall('devices/disk/target')])
def get_interfaces(self, xml):
"""
Note that this function takes a domain xml.
Returns a list of all network interfaces for this instance.
"""
doc = None
try:
doc = etree.fromstring(xml)
except Exception:
return []
interfaces = []
ret = doc.findall('./devices/interface')
for node in ret:
devdst = None
for child in list(node):
if child.tag == 'target':
devdst = child.attrib['dev']
if devdst is None:
continue
interfaces.append(devdst)
return interfaces
def _get_cpuset_ids(self):
"""
Parsing vcpu_pin_set config.
Returns a list of pcpu ids can be used by instances.
"""
cpuset_ids = set()
cpuset_reject_ids = set()
for rule in CONF.vcpu_pin_set.split(','):
rule = rule.strip()
# Handle multi ','
if len(rule) < 1:
continue
# Note the count limit in the .split() call
range_parts = rule.split('-', 1)
if len(range_parts) > 1:
# So, this was a range; start by converting the parts to ints
try:
start, end = [int(p.strip()) for p in range_parts]
except ValueError:
raise exception.Invalid(_("Invalid range expression %r")
% rule)
# Make sure it's a valid range
if start > end:
raise exception.Invalid(_("Invalid range expression %r")
% rule)
# Add available pcpu ids to set
cpuset_ids |= set(range(start, end + 1))
elif rule[0] == '^':
# Not a range, the rule is an exclusion rule; convert to int
try:
cpuset_reject_ids.add(int(rule[1:].strip()))
except ValueError:
raise exception.Invalid(_("Invalid exclusion "
"expression %r") % rule)
else:
# OK, a single PCPU to include; convert to int
try:
cpuset_ids.add(int(rule))
except ValueError:
raise exception.Invalid(_("Invalid inclusion "
"expression %r") % rule)
# Use sets to handle the exclusion rules for us
cpuset_ids -= cpuset_reject_ids
if not cpuset_ids:
raise exception.Invalid(_("No CPUs available after parsing %r") %
CONF.vcpu_pin_set)
# This will convert the set to a sorted list for us
return sorted(cpuset_ids)
def get_vcpu_total(self):
"""Get available vcpu number of physical computer.
:returns: the number of cpu core instances can be used.
"""
if self._vcpu_total != 0:
return self._vcpu_total
try:
total_pcpus = self._conn.getInfo()[2]
except libvirt.libvirtError:
LOG.warn(_("Cannot get the number of cpu, because this "
"function is not implemented for this platform. "))
return 0
if CONF.vcpu_pin_set is None:
self._vcpu_total = total_pcpus
return self._vcpu_total
available_ids = self._get_cpuset_ids()
if available_ids[-1] >= total_pcpus:
raise exception.Invalid(_("Invalid vcpu_pin_set config, "
"out of hypervisor cpu range."))
self._vcpu_total = len(available_ids)
return self._vcpu_total
def get_memory_mb_total(self):
"""Get the total memory size(MB) of physical computer.
:returns: the total amount of memory(MB).
"""
return self._conn.getInfo()[1]
@staticmethod
def get_local_gb_info():
"""Get local storage info of the compute node in GB.
:returns: A dict containing:
:total: How big the overall usable filesystem is (in gigabytes)
:free: How much space is free (in gigabytes)
:used: How much space is used (in gigabytes)
"""
if CONF.libvirt_images_type == 'lvm':
info = libvirt_utils.get_volume_group_info(
CONF.libvirt_images_volume_group)
else:
info = libvirt_utils.get_fs_info(CONF.instances_path)
for (k, v) in info.iteritems():
info[k] = v / (1024 ** 3)
return info
def get_vcpu_used(self):
"""Get vcpu usage number of physical computer.
:returns: The total number of vcpu that currently used.
"""
total = 0
if CONF.libvirt_type == 'lxc':
return total + 1
dom_ids = self.list_instance_ids()
for dom_id in dom_ids:
try:
dom = self._lookup_by_id(dom_id)
try:
vcpus = dom.vcpus()
except libvirt.libvirtError as e:
LOG.warn(_("couldn't obtain the vpu count from domain id:"
" %(id)s, exception: %(ex)s") %
{"id": dom_id, "ex": e})
else:
total += len(vcpus[1])
except exception.InstanceNotFound:
LOG.info(_("libvirt can't find a domain with id: %s") % dom_id)
continue
# NOTE(gtt116): give change to do other task.
greenthread.sleep(0)
return total
def get_memory_mb_used(self):
"""Get the free memory size(MB) of physical computer.
:returns: the total usage of memory(MB).
"""
if sys.platform.upper() not in ['LINUX2', 'LINUX3']:
return 0
m = open('/proc/meminfo').read().split()
idx1 = m.index('MemFree:')
idx2 = m.index('Buffers:')
idx3 = m.index('Cached:')
if CONF.libvirt_type == 'xen':
used = 0
for domain_id in self.list_instance_ids():
try:
dom_mem = int(self._lookup_by_id(domain_id).info()[2])
except exception.InstanceNotFound:
LOG.info(_("libvirt can't find a domain with id: %s")
% domain_id)
continue
# skip dom0
if domain_id != 0:
used += dom_mem
else:
# the mem reported by dom0 is be greater of what
# it is being used
used += (dom_mem -
(int(m[idx1 + 1]) +
int(m[idx2 + 1]) +
int(m[idx3 + 1])))
# Convert it to MB
return used / 1024
else:
avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1]))
# Convert it to MB
return self.get_memory_mb_total() - avail / 1024
def get_hypervisor_type(self):
"""Get hypervisor type.
:returns: hypervisor type (ex. qemu)
"""
return self._conn.getType()
def get_hypervisor_version(self):
"""Get hypervisor version.
:returns: hypervisor version (ex. 12003)
"""
# NOTE(justinsb): getVersion moved between libvirt versions
# Trying to do be compatible with older versions is a lost cause
# But ... we can at least give the user a nice message
method = getattr(self._conn, 'getVersion', None)
if method is None:
raise exception.NovaException(_("libvirt version is too old"
" (does not support getVersion)"))
# NOTE(justinsb): If we wanted to get the version, we could:
# method = getattr(libvirt, 'getVersion', None)
# NOTE(justinsb): This would then rely on a proper version check
return method()
def get_hypervisor_hostname(self):
"""Returns the hostname of the hypervisor."""
hostname = self._conn.getHostname()
if not hasattr(self, '_hypervisor_hostname'):
self._hypervisor_hostname = hostname
elif hostname != self._hypervisor_hostname:
LOG.error(_('Hostname has changed from %(old)s '
'to %(new)s. A restart is required to take effect.'
) % {'old': self._hypervisor_hostname,
'new': hostname})
return self._hypervisor_hostname
def get_instance_capabilities(self):
"""Get hypervisor instance capabilities
Returns a list of tuples that describe instances the
hypervisor is capable of hosting. Each tuple consists
of the triplet (arch, hypervisor_type, vm_mode).
:returns: List of tuples describing instance capabilities
"""
caps = self.get_host_capabilities()
instance_caps = list()
for g in caps.guests:
for dt in g.domtype:
instance_cap = (g.arch, dt, g.ostype)
instance_caps.append(instance_cap)
return instance_caps
def get_cpu_info(self):
"""Get cpuinfo information.
Obtains cpu feature from virConnect.getCapabilities,
and returns as a json string.
:return: see above description
"""
caps = self.get_host_capabilities()
cpu_info = dict()
cpu_info['arch'] = caps.host.cpu.arch
cpu_info['model'] = caps.host.cpu.model
cpu_info['vendor'] = caps.host.cpu.vendor
topology = dict()
topology['sockets'] = caps.host.cpu.sockets
topology['cores'] = caps.host.cpu.cores
topology['threads'] = caps.host.cpu.threads
cpu_info['topology'] = topology
features = list()
for f in caps.host.cpu.features:
features.append(f.name)
cpu_info['features'] = features
# TODO(berrange): why do we bother converting the
# libvirt capabilities XML into a special JSON format ?
# The data format is different across all the drivers
# so we could just return the raw capabilties XML
# which 'compare_cpu' could use directly
#
# That said, arch_filter.py now seems to rely on
# the libvirt drivers format which suggests this
# data format needs to be standardized across drivers
return jsonutils.dumps(cpu_info)
def _get_pcidev_info(self, devname):
"""Returns a dict of PCI device."""
def _get_device_type(cfgdev):
"""Get a PCI device's device type.
An assignable PCI device can be a normal PCI device,
a SR-IOV Physical Function (PF), or a SR-IOV Virtual
Function (VF). Only normal PCI devices or SR-IOV VFs
are assignable, while SR-IOV PFs are always owned by
hypervisor.
Please notice that a PCI device with SR-IOV
capability but not enabled is reported as normal PCI device.
"""
for fun_cap in cfgdev.pci_capability.fun_capability:
if len(fun_cap.device_addrs) != 0:
if fun_cap.type == 'virt_functions':
return {'dev_type': 'type-PF'}
if fun_cap.type == 'phys_function':
return {'dev_type': 'type-VF',
'phys_function': fun_cap.device_addrs}
return {'dev_type': 'type-PCI'}
virtdev = self._conn.nodeDeviceLookupByName(devname)
xmlstr = virtdev.XMLDesc(0)
cfgdev = vconfig.LibvirtConfigNodeDevice()
cfgdev.parse_str(xmlstr)
address = "%04x:%02x:%02x.%1x" % (
cfgdev.pci_capability.domain,
cfgdev.pci_capability.bus,
cfgdev.pci_capability.slot,
cfgdev.pci_capability.function)
device = {
"dev_id": cfgdev.name,
"address": address,
"product_id": cfgdev.pci_capability.product_id[2:6],
"vendor_id": cfgdev.pci_capability.vendor_id[2:6],
}
#requirement by DataBase Model
device['label'] = 'label_%(vendor_id)s_%(product_id)s' % device
device.update(_get_device_type(cfgdev))
return device
def _pci_device_assignbale(self, device):
if device['dev_type'] == 'type-PF':
return False
return self.dev_filter.device_assignable(device)
def get_pci_passthrough_devices(self):
"""Get host pci devices information.
Obtains pci devices information from libvirt, and returns
as a json string.
Each device information is a dictionary, with mandatory keys
of 'address', 'vendor_id', 'product_id', 'dev_type', 'dev_id',
'label' and other optional device specific information.
Refer to the objects/pci_device.py for more idea of these keys.
:returns: a list of the assignable pci devices information
"""
pci_info = []
dev_names = self._conn.listDevices('pci', 0) or []
for name in dev_names:
pci_dev = self._get_pcidev_info(name)
if self._pci_device_assignbale(pci_dev):
pci_info.append(pci_dev)
return jsonutils.dumps(pci_info)
def get_all_volume_usage(self, context, compute_host_bdms):
"""Return usage info for volumes attached to vms on
a given host.
"""
vol_usage = []
for instance_bdms in compute_host_bdms:
instance = instance_bdms['instance']
for bdm in instance_bdms['instance_bdms']:
vol_stats = []
mountpoint = bdm['device_name']
if mountpoint.startswith('/dev/'):
mountpoint = mountpoint[5:]
volume_id = bdm['volume_id']
LOG.debug(_("Trying to get stats for the volume %s"),
volume_id)
vol_stats = self.block_stats(instance['name'], mountpoint)
if vol_stats:
stats = dict(volume=volume_id,
instance=instance,
rd_req=vol_stats[0],
rd_bytes=vol_stats[1],
wr_req=vol_stats[2],
wr_bytes=vol_stats[3],
flush_operations=vol_stats[4])
LOG.debug(
_("Got volume usage stats for the volume=%(volume)s,"
" instance=%(instance)s, rd_req=%(rd_req)d,"
" rd_bytes=%(rd_bytes)d, wr_req=%(wr_req)d,"
" wr_bytes=%(wr_bytes)d")
% stats)
vol_usage.append(stats)
return vol_usage
def block_stats(self, instance_name, disk):
"""
Note that this function takes an instance name.
"""
try:
domain = self._lookup_by_name(instance_name)
return domain.blockStats(disk)
except libvirt.libvirtError as e:
errcode = e.get_error_code()
LOG.info(_('Getting block stats failed, device might have '
'been detached. Instance=%(instance_name)s '
'Disk=%(disk)s Code=%(errcode)s Error=%(e)s'),
{'instance_name': instance_name, 'disk': disk,
'errcode': errcode, 'e': e})
except exception.InstanceNotFound:
LOG.info(_('Could not find domain in libvirt for instance %s. '
'Cannot get block stats for device'), instance_name)
def interface_stats(self, instance_name, interface):
"""
Note that this function takes an instance name.
"""
domain = self._lookup_by_name(instance_name)
return domain.interfaceStats(interface)
def get_console_pool_info(self, console_type):
#TODO(mdragon): console proxy should be implemented for libvirt,
# in case someone wants to use it with kvm or
# such. For now return fake data.
return {'address': '127.0.0.1',
'username': 'fakeuser',
'password': 'fakepassword'}
def refresh_security_group_rules(self, security_group_id):
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
self.firewall_driver.refresh_security_group_members(security_group_id)
def refresh_instance_security_rules(self, instance):
self.firewall_driver.refresh_instance_security_rules(instance)
def refresh_provider_fw_rules(self):
self.firewall_driver.refresh_provider_fw_rules()
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task that records the results in the DB.
:param nodename: will be put in PCI device
:returns: dictionary containing resource info
"""
# Temporary: convert supported_instances into a string, while keeping
# the RPC version as JSON. Can be changed when RPC broadcast is removed
stats = self.host_state.get_host_stats(refresh=True)
stats['supported_instances'] = jsonutils.dumps(
stats['supported_instances'])
return stats
def check_instance_shared_storage_local(self, context, instance):
dirpath = libvirt_utils.get_instance_path(instance)
if not os.path.exists(dirpath):
return None
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug(_("Creating tmpfile %s to verify with other "
"compute node that the instance is on "
"the same shared storage.") % tmp_file)
os.close(fd)
return {"filename": tmp_file}
def check_instance_shared_storage_remote(self, context, data):
return os.path.exists(data['filename'])
def check_instance_shared_storage_cleanup(self, context, data):
fileutils.delete_if_exists(data["filename"])
def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
:returns: a dict containing:
:filename: name of the tmpfile under CONF.instances_path
:block_migration: whether this is block migration
:disk_over_commit: disk-over-commit factor on dest host
:disk_available_mb: available disk space on dest host
"""
disk_available_mb = None
if block_migration:
disk_available_gb = dst_compute_info['disk_available_least']
disk_available_mb = \
(disk_available_gb * 1024) - CONF.reserved_host_disk_mb
# Compare CPU
source_cpu_info = src_compute_info['cpu_info']
self._compare_cpu(source_cpu_info)
# Create file on storage, to be checked on source host
filename = self._create_shared_storage_test_file()
return {"filename": filename,
"block_migration": block_migration,
"disk_over_commit": disk_over_commit,
"disk_available_mb": disk_available_mb}
def check_can_live_migrate_destination_cleanup(self, context,
dest_check_data):
"""Do required cleanup on dest host after check_can_live_migrate calls
:param context: security context
"""
filename = dest_check_data["filename"]
self._cleanup_shared_storage_test_file(filename)
def check_can_live_migrate_source(self, context, instance,
dest_check_data):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
:returns: a dict containing migration info
"""
# Checking shared storage connectivity
# if block migration, instances_paths should not be on shared storage.
source = CONF.host
filename = dest_check_data["filename"]
block_migration = dest_check_data["block_migration"]
is_volume_backed = dest_check_data.get('is_volume_backed', False)
has_local_disks = bool(
jsonutils.loads(self.get_instance_disk_info(instance['name'])))
shared = self._check_shared_storage_test_file(filename)
if block_migration:
if shared:
reason = _("Block migration can not be used "
"with shared storage.")
raise exception.InvalidLocalStorage(reason=reason, path=source)
self._assert_dest_node_has_enough_disk(context, instance,
dest_check_data['disk_available_mb'],
dest_check_data['disk_over_commit'])
elif not shared and (not is_volume_backed or has_local_disks):
reason = _("Live migration can not be used "
"without shared storage.")
raise exception.InvalidSharedStorage(reason=reason, path=source)
dest_check_data.update({"is_shared_storage": shared})
# NOTE(mikal): include the instance directory name here because it
# doesn't yet exist on the destination but we want to force that
# same name to be used
instance_path = libvirt_utils.get_instance_path(instance,
relative=True)
dest_check_data['instance_relative_path'] = instance_path
return dest_check_data
def _assert_dest_node_has_enough_disk(self, context, instance,
available_mb, disk_over_commit):
"""Checks if destination has enough disk for block migration."""
# Libvirt supports qcow2 disk format,which is usually compressed
# on compute nodes.
# Real disk image (compressed) may enlarged to "virtual disk size",
# that is specified as the maximum disk size.
# (See qemu-img -f path-to-disk)
# Scheduler recognizes destination host still has enough disk space
# if real disk size < available disk size
# if disk_over_commit is True,
# otherwise virtual disk size < available disk size.
available = 0
if available_mb:
available = available_mb * (1024 ** 2)
ret = self.get_instance_disk_info(instance['name'])
disk_infos = jsonutils.loads(ret)
necessary = 0
if disk_over_commit:
for info in disk_infos:
necessary += int(info['disk_size'])
else:
for info in disk_infos:
necessary += int(info['virt_disk_size'])
# Check that available disk > necessary disk
if (available - necessary) < 0:
reason = (_('Unable to migrate %(instance_uuid)s: '
'Disk of instance is too large(available'
' on destination host:%(available)s '
'< need:%(necessary)s)') %
{'instance_uuid': instance['uuid'],
'available': available,
'necessary': necessary})
raise exception.MigrationPreCheckError(reason=reason)
def _compare_cpu(self, cpu_info):
"""Checks the host cpu is compatible to a cpu given by xml.
"xml" must be a part of libvirt.openAuth(...).getCapabilities().
return values follows by virCPUCompareResult.
if 0 > return value, do live migration.
'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult'
:param cpu_info: json string that shows cpu feature(see get_cpu_info())
:returns:
None. if given cpu info is not compatible to this server,
raise exception.
"""
# NOTE(berendt): virConnectCompareCPU not working for Xen
if CONF.libvirt_type == 'xen':
return 1
info = jsonutils.loads(cpu_info)
LOG.info(_('Instance launched has CPU info:\n%s') % cpu_info)
cpu = vconfig.LibvirtConfigCPU()
cpu.arch = info['arch']
cpu.model = info['model']
cpu.vendor = info['vendor']
cpu.sockets = info['topology']['sockets']
cpu.cores = info['topology']['cores']
cpu.threads = info['topology']['threads']
for f in info['features']:
cpu.add_feature(vconfig.LibvirtConfigCPUFeature(f))
u = "http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult"
m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s")
# unknown character exists in xml, then libvirt complains
try:
ret = self._conn.compareCPU(cpu.to_xml(), 0)
except libvirt.libvirtError as e:
with excutils.save_and_reraise_exception():
ret = e.message
LOG.error(m, {'ret': ret, 'u': u})
if ret <= 0:
LOG.error(m, {'ret': ret, 'u': u})
raise exception.InvalidCPUInfo(reason=m % {'ret': ret, 'u': u})
def _create_shared_storage_test_file(self):
"""Makes tmpfile under CONF.instances_path."""
dirpath = CONF.instances_path
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug(_("Creating tmpfile %s to notify to other "
"compute nodes that they should mount "
"the same storage.") % tmp_file)
os.close(fd)
return os.path.basename(tmp_file)
def _check_shared_storage_test_file(self, filename):
"""Confirms existence of the tmpfile under CONF.instances_path.
Cannot confirm tmpfile return False.
"""
tmp_file = os.path.join(CONF.instances_path, filename)
if not os.path.exists(tmp_file):
return False
else:
return True
def _cleanup_shared_storage_test_file(self, filename):
"""Removes existence of the tmpfile under CONF.instances_path."""
tmp_file = os.path.join(CONF.instances_path, filename)
os.remove(tmp_file)
def ensure_filtering_rules_for_instance(self, instance, network_info,
time_module=None):
"""Ensure that an instance's filtering rules are enabled.
When migrating an instance, we need the filtering rules to
be configured on the destination host before starting the
migration.
Also, when restarting the compute service, we need to ensure
that filtering rules exist for all running services.
"""
if not time_module:
time_module = greenthread
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance,
network_info)
# nwfilters may be defined in a separate thread in the case
# of libvirt non-blocking mode, so we wait for completion
timeout_count = range(CONF.live_migration_retry_count)
while timeout_count:
if self.firewall_driver.instance_filter_exists(instance,
network_info):
break
timeout_count.pop()
if len(timeout_count) == 0:
msg = _('The firewall filter for %s does not exist')
raise exception.NovaException(msg % instance["name"])
time_module.sleep(1)
def filter_defer_apply_on(self):
self.firewall_driver.filter_defer_apply_on()
def filter_defer_apply_off(self):
self.firewall_driver.filter_defer_apply_off()
def live_migration(self, context, instance, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Spawning live_migration operation for distributing high-load.
:params context: security context
:params instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params dest: destination host
:params block_migration: destination host
:params post_method:
post operation method.
expected nova.compute.manager.post_live_migration.
:params recover_method:
recovery method when any exception occurs.
expected nova.compute.manager.recover_live_migration.
:params block_migration: if true, do block migration.
:params migrate_data: implementation specific params
"""
greenthread.spawn(self._live_migration, context, instance, dest,
post_method, recover_method, block_migration,
migrate_data)
def _live_migration(self, context, instance, dest, post_method,
recover_method, block_migration=False,
migrate_data=None):
"""Do live migration.
:params context: security context
:params instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params dest: destination host
:params post_method:
post operation method.
expected nova.compute.manager.post_live_migration.
:params recover_method:
recovery method when any exception occurs.
expected nova.compute.manager.recover_live_migration.
:params migrate_data: implementation specific params
"""
# Do live migration.
try:
if block_migration:
flaglist = CONF.block_migration_flag.split(',')
else:
flaglist = CONF.live_migration_flag.split(',')
flagvals = [getattr(libvirt, x.strip()) for x in flaglist]
logical_sum = reduce(lambda x, y: x | y, flagvals)
dom = self._lookup_by_name(instance["name"])
dom.migrateToURI(CONF.live_migration_uri % dest,
logical_sum,
None,
CONF.live_migration_bandwidth)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_("Live Migration failure: %s"), e,
instance=instance)
recover_method(context, instance, dest, block_migration)
# Waiting for completion of live_migration.
timer = loopingcall.FixedIntervalLoopingCall(f=None)
def wait_for_live_migration():
"""waiting for live migration completion."""
try:
self.get_info(instance)['state']
except exception.InstanceNotFound:
timer.stop()
post_method(context, instance, dest, block_migration,
migrate_data)
timer.f = wait_for_live_migration
timer.start(interval=0.5).wait()
def _fetch_instance_kernel_ramdisk(self, context, instance):
"""Download kernel and ramdisk for instance in instance directory."""
instance_dir = libvirt_utils.get_instance_path(instance)
if instance['kernel_id']:
libvirt_utils.fetch_image(context,
os.path.join(instance_dir, 'kernel'),
instance['kernel_id'],
instance['user_id'],
instance['project_id'])
if instance['ramdisk_id']:
libvirt_utils.fetch_image(context,
os.path.join(instance_dir,
'ramdisk'),
instance['ramdisk_id'],
instance['user_id'],
instance['project_id'])
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk_info, migrate_data=None):
"""Preparation live migration."""
# Steps for volume backed instance live migration w/o shared storage.
is_shared_storage = True
is_volume_backed = False
is_block_migration = True
instance_relative_path = None
if migrate_data:
is_shared_storage = migrate_data.get('is_shared_storage', True)
is_volume_backed = migrate_data.get('is_volume_backed', False)
is_block_migration = migrate_data.get('block_migration', True)
instance_relative_path = migrate_data.get('instance_relative_path')
if not is_shared_storage:
# NOTE(mikal): this doesn't use libvirt_utils.get_instance_path
# because we are ensuring that the same instance directory name
# is used as was at the source
if instance_relative_path:
instance_dir = os.path.join(CONF.instances_path,
instance_relative_path)
else:
instance_dir = libvirt_utils.get_instance_path(instance)
if os.path.exists(instance_dir):
raise exception.DestinationDiskExists(path=instance_dir)
os.mkdir(instance_dir)
# Ensure images and backing files are present.
self._create_images_and_backing(context, instance, instance_dir,
disk_info)
if is_volume_backed and not (is_block_migration or is_shared_storage):
# Touch the console.log file, required by libvirt.
console_file = self._get_console_log_path(instance)
libvirt_utils.file_open(console_file, 'a').close()
# if image has kernel and ramdisk, just download
# following normal way.
self._fetch_instance_kernel_ramdisk(context, instance)
# Establishing connection to volume server.
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
disk_info = blockinfo.get_info_from_bdm(CONF.libvirt_type, vol)
self.volume_driver_method('connect_volume',
connection_info,
disk_info)
# We call plug_vifs before the compute manager calls
# ensure_filtering_rules_for_instance, to ensure bridge is set up
# Retry operation is necessary because continuously request comes,
# concorrent request occurs to iptables, then it complains.
max_retry = CONF.live_migration_retry_count
for cnt in range(max_retry):
try:
self.plug_vifs(instance, network_info)
break
except processutils.ProcessExecutionError:
if cnt == max_retry - 1:
raise
else:
LOG.warn(_('plug_vifs() failed %(cnt)d. Retry up to '
'%(max_retry)d.'),
{'cnt': cnt,
'max_retry': max_retry},
instance=instance)
greenthread.sleep(1)
def _create_images_and_backing(self, context, instance, instance_dir,
disk_info_json):
"""
:params context: security context
:params instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params instance_dir:
instance path to use, calculated externally to handle block
migrating an instance with an old style instance path
:params disk_info_json:
json strings specified in get_instance_disk_info
"""
if not disk_info_json:
disk_info = []
else:
disk_info = jsonutils.loads(disk_info_json)
for info in disk_info:
base = os.path.basename(info['path'])
# Get image type and create empty disk image, and
# create backing file in case of qcow2.
instance_disk = os.path.join(instance_dir, base)
if not info['backing_file'] and not os.path.exists(instance_disk):
libvirt_utils.create_image(info['type'], instance_disk,
info['virt_disk_size'])
elif info['backing_file']:
# Creating backing file follows same way as spawning instances.
cache_name = os.path.basename(info['backing_file'])
image = self.image_backend.image(instance,
instance_disk,
CONF.libvirt_images_type)
if cache_name.startswith('ephemeral'):
image.cache(fetch_func=self._create_ephemeral,
fs_label=cache_name,
os_type=instance["os_type"],
filename=cache_name,
size=info['virt_disk_size'],
ephemeral_size=instance['ephemeral_gb'])
elif cache_name.startswith('swap'):
inst_type = flavors.extract_flavor(instance)
swap_mb = inst_type['swap']
image.cache(fetch_func=self._create_swap,
filename="swap_%s" % swap_mb,
size=swap_mb * (1024 ** 2),
swap_mb=swap_mb)
else:
image.cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=cache_name,
image_id=instance['image_ref'],
user_id=instance['user_id'],
project_id=instance['project_id'],
size=info['virt_disk_size'])
# if image has kernel and ramdisk, just download
# following normal way.
self._fetch_instance_kernel_ramdisk(context, instance)
def post_live_migration(self, context, instance, block_device_info):
# Disconnect from volume server
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
def post_live_migration_at_destination(self, context,
instance,
network_info,
block_migration,
block_device_info=None):
"""Post operation of live migration at destination host.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param network_info: instance network information
:param block_migration: if true, post operation of block_migraiton.
"""
# Define migrated instance, otherwise, suspend/destroy does not work.
dom_list = self._conn.listDefinedDomains()
if instance["name"] not in dom_list:
# In case of block migration, destination does not have
# libvirt.xml
disk_info = blockinfo.get_disk_info(
CONF.libvirt_type, instance, block_device_info)
self.to_xml(context, instance, network_info, disk_info,
block_device_info=block_device_info,
write_to_disk=True)
# libvirt.xml should be made by to_xml(), but libvirt
# does not accept to_xml() result, since uuid is not
# included in to_xml() result.
dom = self._lookup_by_name(instance["name"])
self._conn.defineXML(dom.XMLDesc(0))
def get_instance_disk_info(self, instance_name, xml=None,
block_device_info=None):
"""Preparation block migration.
:params instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:return:
json strings with below format::
"[{'path':'disk', 'type':'raw',
'virt_disk_size':'10737418240',
'backing_file':'backing_file',
'disk_size':'83886080'},...]"
"""
# NOTE (rmk): Passing the domain XML into this function is optional.
# When it is not passed, we attempt to extract it from
# the pre-existing definition.
if xml is None:
try:
virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
msg = (_('Error from libvirt while getting description of '
'%(instance_name)s: [Error Code %(error_code)s] '
'%(ex)s') %
{'instance_name': instance_name,
'error_code': error_code,
'ex': ex})
LOG.warn(msg)
raise exception.InstanceNotFound(instance_id=instance_name)
# NOTE (rmk): When block_device_info is provided, we will use it to
# filter out devices which are actually volumes.
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
volume_devices = set()
for vol in block_device_mapping:
disk_dev = vol['mount_device'].rpartition("/")[2]
volume_devices.add(disk_dev)
disk_info = []
doc = etree.fromstring(xml)
disk_nodes = doc.findall('.//devices/disk')
path_nodes = doc.findall('.//devices/disk/source')
driver_nodes = doc.findall('.//devices/disk/driver')
target_nodes = doc.findall('.//devices/disk/target')
for cnt, path_node in enumerate(path_nodes):
disk_type = disk_nodes[cnt].get('type')
path = path_node.get('file')
target = target_nodes[cnt].attrib['dev']
if disk_type != 'file':
LOG.debug(_('skipping %s since it looks like volume'), path)
continue
if not path:
LOG.debug(_('skipping disk for %s as it does not have a path'),
instance_name)
continue
if target in volume_devices:
LOG.debug(_('skipping disk %(path)s (%(target)s) as it is a '
'volume'), {'path': path, 'target': target})
continue
# get the real disk size or
# raise a localized error if image is unavailable
dk_size = int(os.path.getsize(path))
disk_type = driver_nodes[cnt].get('type')
if disk_type == "qcow2":
backing_file = libvirt_utils.get_disk_backing_file(path)
virt_size = disk.get_disk_size(path)
over_commit_size = int(virt_size) - dk_size
else:
backing_file = ""
virt_size = dk_size
over_commit_size = 0
disk_info.append({'type': disk_type,
'path': path,
'virt_disk_size': virt_size,
'backing_file': backing_file,
'disk_size': dk_size,
'over_committed_disk_size': over_commit_size})
return jsonutils.dumps(disk_info)
def get_disk_over_committed_size_total(self):
"""Return total over committed disk size for all instances."""
# Disk size that all instance uses : virtual_size - disk_size
instances_name = self.list_instances()
disk_over_committed_size = 0
for i_name in instances_name:
try:
disk_infos = jsonutils.loads(
self.get_instance_disk_info(i_name))
for info in disk_infos:
disk_over_committed_size += int(
info['over_committed_disk_size'])
except OSError as e:
if e.errno == errno.ENOENT:
LOG.error(_('Getting disk size of %(i_name)s: %(e)s'),
{'i_name': i_name, 'e': e})
else:
raise
except exception.InstanceNotFound:
# Instance was deleted during the check so ignore it
pass
# NOTE(gtt116): give change to do other task.
greenthread.sleep(0)
return disk_over_committed_size
def unfilter_instance(self, instance, network_info):
"""See comments of same method in firewall_driver."""
self.firewall_driver.unfilter_instance(instance,
network_info=network_info)
def get_host_stats(self, refresh=False):
"""Return the current state of the host.
If 'refresh' is True, run update the stats first.
"""
return self.host_state.get_host_stats(refresh=refresh)
def get_host_uptime(self, host):
"""Returns the result of calling "uptime"."""
#NOTE(dprince): host seems to be ignored for this call and in
# other compute drivers as well. Perhaps we should remove it?
out, err = utils.execute('env', 'LANG=C', 'uptime')
return out
def manage_image_cache(self, context, all_instances):
"""Manage the local cache of images."""
self.image_cache_manager.verify_base_images(context, all_instances)
def _cleanup_remote_migration(self, dest, inst_base, inst_base_resize,
shared_storage=False):
"""Used only for cleanup in case migrate_disk_and_power_off fails."""
try:
if os.path.exists(inst_base_resize):
utils.execute('rm', '-rf', inst_base)
utils.execute('mv', inst_base_resize, inst_base)
if not shared_storage:
utils.execute('ssh', dest, 'rm', '-rf', inst_base)
except Exception:
pass
def _is_storage_shared_with(self, dest, inst_base):
# NOTE (rmk): There are two methods of determining whether we are
# on the same filesystem: the source and dest IP are the
# same, or we create a file on the dest system via SSH
# and check whether the source system can also see it.
shared_storage = (dest == self.get_host_ip_addr())
if not shared_storage:
tmp_file = uuid.uuid4().hex + '.tmp'
tmp_path = os.path.join(inst_base, tmp_file)
try:
utils.execute('ssh', dest, 'touch', tmp_path)
if os.path.exists(tmp_path):
shared_storage = True
os.unlink(tmp_path)
else:
utils.execute('ssh', dest, 'rm', tmp_path)
except Exception:
pass
return shared_storage
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type, network_info,
block_device_info=None):
LOG.debug(_("Starting migrate_disk_and_power_off"),
instance=instance)
disk_info_text = self.get_instance_disk_info(instance['name'],
block_device_info=block_device_info)
disk_info = jsonutils.loads(disk_info_text)
# copy disks to destination
# rename instance dir to +_resize at first for using
# shared storage for instance dir (eg. NFS).
inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
shared_storage = self._is_storage_shared_with(dest, inst_base)
# try to create the directory on the remote compute node
# if this fails we pass the exception up the stack so we can catch
# failures here earlier
if not shared_storage:
utils.execute('ssh', dest, 'mkdir', '-p', inst_base)
self.power_off(instance)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
self.volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
try:
utils.execute('mv', inst_base, inst_base_resize)
# if we are migrating the instance with shared storage then
# create the directory. If it is a remote node the directory
# has already been created
if shared_storage:
dest = None
utils.execute('mkdir', '-p', inst_base)
for info in disk_info:
# assume inst_base == dirname(info['path'])
img_path = info['path']
fname = os.path.basename(img_path)
from_path = os.path.join(inst_base_resize, fname)
if info['type'] == 'qcow2' and info['backing_file']:
tmp_path = from_path + "_rbase"
# merge backing file
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'qcow2', from_path, tmp_path)
if shared_storage:
utils.execute('mv', tmp_path, img_path)
else:
libvirt_utils.copy_image(tmp_path, img_path, host=dest)
utils.execute('rm', '-f', tmp_path)
else: # raw or qcow2 with no backing file
libvirt_utils.copy_image(from_path, img_path, host=dest)
except Exception:
with excutils.save_and_reraise_exception():
self._cleanup_remote_migration(dest, inst_base,
inst_base_resize,
shared_storage)
return disk_info_text
def _wait_for_running(self, instance):
state = self.get_info(instance)['state']
if state == power_state.RUNNING:
LOG.info(_("Instance running successfully."), instance=instance)
raise loopingcall.LoopingCallDone()
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
LOG.debug(_("Starting finish_migration"), instance=instance)
# resize disks. only "disk" and "disk.local" are necessary.
disk_info = jsonutils.loads(disk_info)
for info in disk_info:
fname = os.path.basename(info['path'])
if fname == 'disk':
size = instance['root_gb']
elif fname == 'disk.local':
size = instance['ephemeral_gb']
else:
size = 0
size *= 1024 * 1024 * 1024
# If we have a non partitioned image that we can extend
# then ensure we're in 'raw' format so we can extend file system.
fmt = info['type']
if (size and fmt == 'qcow2' and
disk.can_resize_image(info['path'], size) and
disk.is_image_partitionless(info['path'], use_cow=True)):
path_raw = info['path'] + '_raw'
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'raw', info['path'], path_raw)
utils.execute('mv', path_raw, info['path'])
fmt = 'raw'
if size:
use_cow = fmt == 'qcow2'
disk.extend(info['path'], size, use_cow=use_cow)
if fmt == 'raw' and CONF.use_cow_images:
# back to qcow2 (no backing_file though) so that snapshot
# will be available
path_qcow = info['path'] + '_qcow'
utils.execute('qemu-img', 'convert', '-f', 'raw',
'-O', 'qcow2', info['path'], path_qcow)
utils.execute('mv', path_qcow, info['path'])
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info,
image_meta)
# assume _create_image do nothing if a target file exists.
self._create_image(context, instance,
disk_mapping=disk_info['mapping'],
network_info=network_info,
block_device_info=None, inject_files=False)
xml = self.to_xml(context, instance, network_info, disk_info,
block_device_info=block_device_info,
write_to_disk=True)
self._create_domain_and_network(xml, instance, network_info,
block_device_info, power_on,
context=context)
if power_on:
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
def _cleanup_failed_migration(self, inst_base):
"""Make sure that a failed migrate doesn't prevent us from rolling
back in a revert.
"""
try:
shutil.rmtree(inst_base)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def finish_revert_migration(self, instance, network_info,
block_device_info=None, power_on=True):
LOG.debug(_("Starting finish_revert_migration"),
instance=instance)
inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
# NOTE(danms): if we're recovering from a failed migration,
# make sure we don't have a left-over same-host base directory
# that would conflict. Also, don't fail on the rename if the
# failure happened early.
if os.path.exists(inst_base_resize):
self._cleanup_failed_migration(inst_base)
utils.execute('mv', inst_base_resize, inst_base)
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info)
xml = self.to_xml(nova_context.get_admin_context(),
instance, network_info, disk_info,
block_device_info=block_device_info)
self._create_domain_and_network(xml, instance, network_info,
block_device_info, power_on)
if power_on:
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
self._cleanup_resize(instance, network_info)
def get_diagnostics(self, instance):
def get_io_devices(xml_doc):
"""get the list of io devices from the xml document."""
result = {"volumes": [], "ifaces": []}
try:
doc = etree.fromstring(xml_doc)
except Exception:
return result
blocks = [('./devices/disk', 'volumes'),
('./devices/interface', 'ifaces')]
for block, key in blocks:
section = doc.findall(block)
for node in section:
for child in node.getchildren():
if child.tag == 'target' and child.get('dev'):
result[key].append(child.get('dev'))
return result
domain = self._lookup_by_name(instance['name'])
output = {}
# get cpu time, might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
cputime = domain.vcpus()[0]
for i in range(len(cputime)):
output["cpu" + str(i) + "_time"] = cputime[i][2]
except libvirt.libvirtError:
pass
# get io status
xml = domain.XMLDesc(0)
dom_io = get_io_devices(xml)
for disk in dom_io["volumes"]:
try:
# blockStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.blockStats(disk)
output[disk + "_read_req"] = stats[0]
output[disk + "_read"] = stats[1]
output[disk + "_write_req"] = stats[2]
output[disk + "_write"] = stats[3]
output[disk + "_errors"] = stats[4]
except libvirt.libvirtError:
pass
for interface in dom_io["ifaces"]:
try:
# interfaceStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.interfaceStats(interface)
output[interface + "_rx"] = stats[0]
output[interface + "_rx_packets"] = stats[1]
output[interface + "_rx_errors"] = stats[2]
output[interface + "_rx_drop"] = stats[3]
output[interface + "_tx"] = stats[4]
output[interface + "_tx_packets"] = stats[5]
output[interface + "_tx_errors"] = stats[6]
output[interface + "_tx_drop"] = stats[7]
except libvirt.libvirtError:
pass
output["memory"] = domain.maxMemory()
# memoryStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
mem = domain.memoryStats()
for key in mem.keys():
output["memory-" + key] = mem[key]
except (libvirt.libvirtError, AttributeError):
pass
return output
def add_to_aggregate(self, context, aggregate, host, **kwargs):
"""Add a compute host to an aggregate."""
#NOTE(jogo) Currently only used for XenAPI-Pool
pass
def remove_from_aggregate(self, context, aggregate, host, **kwargs):
"""Remove a compute host from an aggregate."""
pass
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error=True):
"""only used for Resource Pools."""
pass
def instance_on_disk(self, instance):
# ensure directories exist and are writable
instance_path = libvirt_utils.get_instance_path(instance)
LOG.debug(_('Checking instance files accessability %s'), instance_path)
return os.access(instance_path, os.W_OK)
def inject_network_info(self, instance, nw_info):
self.firewall_driver.setup_basic_filtering(instance, nw_info)
def _delete_instance_files(self, instance):
# NOTE(mikal): a shim to handle this file not using instance objects
# everywhere. Remove this when that conversion happens.
context = nova_context.get_admin_context(read_deleted='yes')
inst_obj = instance_obj.Instance.get_by_uuid(context, instance['uuid'])
# NOTE(mikal): this code should be pushed up a layer when this shim is
# removed.
attempts = int(inst_obj.system_metadata.get('clean_attempts', '0'))
success = self.delete_instance_files(inst_obj)
inst_obj.system_metadata['clean_attempts'] = str(attempts + 1)
if success:
inst_obj.cleaned = True
inst_obj.save(context)
def delete_instance_files(self, instance):
target = libvirt_utils.get_instance_path(instance)
if os.path.exists(target):
LOG.info(_('Deleting instance files %s'), target,
instance=instance)
try:
shutil.rmtree(target)
except OSError as e:
LOG.error(_('Failed to cleanup directory %(target)s: '
'%(e)s'), {'target': target, 'e': e},
instance=instance)
# It is possible that the delete failed, if so don't mark the instance
# as cleaned.
if os.path.exists(target):
LOG.info(_('Deletion of %s failed'), target, instance=instance)
return False
LOG.info(_('Deletion of %s complete'), target, instance=instance)
return True
@property
def need_legacy_block_device_info(self):
return False
def default_root_device_name(self, instance, image_meta, root_bdm):
disk_bus = blockinfo.get_disk_bus_for_device_type(CONF.libvirt_type,
image_meta,
"disk")
cdrom_bus = blockinfo.get_disk_bus_for_device_type(CONF.libvirt_type,
image_meta,
"cdrom")
root_info = blockinfo.get_root_info(CONF.libvirt_type,
image_meta, root_bdm,
disk_bus, cdrom_bus)
return block_device.prepend_dev(root_info['dev'])
def default_device_names_for_instance(self, instance, root_device_name,
*block_device_lists):
ephemerals, swap, block_device_mapping = block_device_lists[:3]
def _update_func(bdm):
bdm_id = bdm.get('id')
self.virtapi.block_device_mapping_update(
nova_context.get_admin_context(),
bdm_id, bdm)
blockinfo.default_device_names(CONF.libvirt_type,
instance, root_device_name,
_update_func,
ephemerals, swap,
block_device_mapping)
class HostState(object):
"""Manages information about the compute node through libvirt."""
def __init__(self, driver):
super(HostState, self).__init__()
self._stats = {}
self.driver = driver
self.update_status()
def get_host_stats(self, refresh=False):
"""Return the current state of the host.
If 'refresh' is True, run update the stats first.
"""
if refresh or not self._stats:
self.update_status()
return self._stats
def update_status(self):
"""Retrieve status info from libvirt."""
def _get_disk_available_least():
"""Return total real disk available least size.
The size of available disk, when block_migration command given
disk_over_commit param is FALSE.
The size that deducted real instance disk size from the total size
of the virtual disk of all instances.
"""
disk_free_gb = disk_info_dict['free']
disk_over_committed = (self.driver.
get_disk_over_committed_size_total())
# Disk available least size
available_least = disk_free_gb * (1024 ** 3) - disk_over_committed
return (available_least / (1024 ** 3))
LOG.debug(_("Updating host stats"))
disk_info_dict = self.driver.get_local_gb_info()
data = {}
#NOTE(dprince): calling capabilities before getVersion works around
# an initialization issue with some versions of Libvirt (1.0.5.5).
# See: https://bugzilla.redhat.com/show_bug.cgi?id=1000116
# See: https://bugs.launchpad.net/nova/+bug/1215593
data["supported_instances"] = \
self.driver.get_instance_capabilities()
data["vcpus"] = self.driver.get_vcpu_total()
data["memory_mb"] = self.driver.get_memory_mb_total()
data["local_gb"] = disk_info_dict['total']
data["vcpus_used"] = self.driver.get_vcpu_used()
data["memory_mb_used"] = self.driver.get_memory_mb_used()
data["local_gb_used"] = disk_info_dict['used']
data["hypervisor_type"] = self.driver.get_hypervisor_type()
data["hypervisor_version"] = self.driver.get_hypervisor_version()
data["hypervisor_hostname"] = self.driver.get_hypervisor_hostname()
data["cpu_info"] = self.driver.get_cpu_info()
data['disk_available_least'] = _get_disk_available_least()
data['pci_passthrough_devices'] = \
self.driver.get_pci_passthrough_devices()
self._stats = data
return data
|
the-stack_106_21148
|
import warnings
import numpy as np
import torch
import torch.nn.functional as F
from sklearn import metrics
from torch.utils.data import DataLoader, SequentialSampler, TensorDataset
from tqdm import tqdm
from datasets.bow_processors.abstract_processor import StreamingSparseDataset
# Suppress warnings from sklearn.metrics
warnings.filterwarnings('ignore')
class BagOfWordsEvaluator(object):
def __init__(self, model, vectorizer, processor, args, split='dev'):
self.args = args
self.model = model
self.processor = processor
self.vectorizer = vectorizer
if split == 'test':
eval_examples = self.processor.get_test_examples(args.data_dir)
else:
eval_examples = self.processor.get_dev_examples(args.data_dir)
self.eval_features = vectorizer.transform([x.text for x in eval_examples])
self.eval_labels = [[float(x) for x in document.label] for document in eval_examples]
def get_scores(self, silent=False):
self.model.eval()
eval_data = StreamingSparseDataset(self.eval_features, self.eval_labels)
eval_dataloader = DataLoader(eval_data, shuffle=True, batch_size=self.args.batch_size)
total_loss = 0
nb_eval_steps = 0
target_labels = list()
predicted_labels = list()
for features, labels in tqdm(eval_dataloader, desc="Evaluating", disable=silent):
features = features.to(self.args.device)
labels = labels.to(self.args.device)
with torch.no_grad():
logits = self.model(features)
if self.args.n_gpu > 1:
logits = logits.view(labels.size())
if self.args.is_multilabel:
predicted_labels.extend(F.sigmoid(logits).round().long().cpu().detach().numpy())
target_labels.extend(labels.cpu().detach().numpy())
loss = F.binary_cross_entropy_with_logits(logits, labels.float(), size_average=False)
else:
predicted_labels.extend(torch.argmax(logits, dim=1).cpu().detach().numpy())
target_labels.extend(torch.argmax(labels, dim=1).cpu().detach().numpy())
loss = F.cross_entropy(logits, torch.argmax(labels, dim=1))
if self.args.n_gpu > 1:
loss = loss.mean()
total_loss += loss.item()
nb_eval_steps += 1
predicted_labels, target_labels = np.array(predicted_labels), np.array(target_labels)
accuracy = metrics.accuracy_score(target_labels, predicted_labels)
precision = metrics.precision_score(target_labels, predicted_labels, average='micro')
recall = metrics.recall_score(target_labels, predicted_labels, average='micro')
f1 = metrics.f1_score(target_labels, predicted_labels, average='micro')
avg_loss = total_loss / nb_eval_steps
return [accuracy, precision, recall, f1, avg_loss], ['accuracy', 'precision', 'recall', 'f1', 'avg_loss']
|
the-stack_106_21149
|
import numpy as np
from bayes_implicit_solvent.molecule import Molecule
from simtk import unit
def sample_path_to_unitted_snapshots(path_to_npy_samples):
xyz = np.load(path_to_npy_samples)
traj = [snapshot * unit.nanometer for snapshot in xyz]
return traj
from glob import glob
from pkg_resources import resource_filename
data_path = resource_filename('bayes_implicit_solvent',
'data')
ll = 'gaussian' # or 'student-t'
randomize_theta0 = False
n_conf = 25
path_to_vacuum_samples = resource_filename('bayes_implicit_solvent',
'vacuum_samples/short_run/vacuum_samples_*.npy')
paths_to_samples = glob(path_to_vacuum_samples)
np.random.seed(0)
np.random.shuffle(paths_to_samples)
paths_to_samples = paths_to_samples[::2]
print('number of molecules being considered: {}'.format(len(paths_to_samples)))
def extract_cid_key(path):
i = path.find('mobley_')
j = path.find('.npy')
return path[i:j]
cids = list(map(extract_cid_key, paths_to_samples))
print('first few CIDs', cids[:5])
mols = []
n_configuration_samples = n_conf # TODO: Since this is cheaper, can probably modify this a bit...
name = 'n_config={}_{}_ll'.format(n_configuration_samples, ll)
from bayes_implicit_solvent.freesolv import cid_to_smiles
from bayes_implicit_solvent.constants import beta
def unreduce(value):
"""Input value is in units of kB T, turn it into units of kilocalorie_per_mole"""
return value / (beta * unit.kilocalorie_per_mole)
for path in paths_to_samples:
cid = extract_cid_key(path)
smiles = cid_to_smiles[cid]
vacuum_samples = sample_path_to_unitted_snapshots(path)
thinning = int(len(vacuum_samples) / n_configuration_samples)
mol = Molecule(smiles, vacuum_samples=vacuum_samples[::thinning], ll=ll)
if (unreduce(mol.experimental_value) > -15) and (unreduce(mol.experimental_value) < 5):
mols.append(mol)
else:
print('discarding {} ({}) because its free energy was outside of the range [-15, +5] kcal/mol'.format(smiles, cid))
import numpy as np
element_inds = []
all_elements = ['S', 'Cl', 'F', 'C', 'I', 'N', 'Br', 'H', 'P', 'O']
N = len(all_elements)
element_dict = dict(zip(all_elements, range(len(all_elements))))
initial_radius_dict = dict(H=0.12, C=0.17, N=0.155, O=0.15, F=0.15,
P=0.185, S=0.18, Cl=0.17, Br=0.15, I=0.15)
initial_scaling_factor_dict = dict(H=0.85, C=0.72, N=0.79, O=0.85, F=0.88,
P=0.86, S=0.96, Cl=0.80, Br=0.80, I=0.80)
for mol in mols:
element_inds.append(np.array([element_dict[a.element.symbol] for a in list(mol.top.atoms())]))
from jax import jit, vmap
from bayes_implicit_solvent.gb_models.jax_gb_models import compute_OBC_energy_vectorized
from bayes_implicit_solvent.solvation_free_energy import kj_mol_to_kT, one_sided_exp
def fast_predict_factory(distance_matrices, charges, element_ind_array):
@jit
def predict_solvation_free_energy_jax(theta):
radii_, scaling_factors_ = theta[:N], theta[N:]
radii = radii_[element_ind_array]
scaling_factors = scaling_factors_[element_ind_array]
@jit
def compute_component(distance_matrix):
return compute_OBC_energy_vectorized(distance_matrix, radii, scaling_factors, charges)
W_F = vmap(compute_component)(distance_matrices)
w_F = W_F * kj_mol_to_kT
return one_sided_exp(w_F)
_ = predict_solvation_free_energy_jax(np.ones(N * 2))
return predict_solvation_free_energy_jax
from tqdm import tqdm
print('jit-compiling things...')
fast_predictors = [fast_predict_factory(mol.distance_matrices, mol.charges, element_inds[i]) for i, mol in enumerate(tqdm(mols))]
def get_predictions(theta):
return np.array([f(theta) for f in fast_predictors])
expt_means = np.array([mol.experimental_value for mol in mols])
expt_uncs = np.array([mol.experimental_uncertainty for mol in mols])
from scipy.stats import t as student_t
from scipy.stats import norm
def log_likelihood(predictions):
#return np.sum(norm.logpdf(predictions, loc=expt_means, scale=expt_uncs))
return np.sum(student_t.logpdf(predictions, loc=expt_means,
scale=expt_uncs,
df=7))
initial_radius_array = [initial_radius_dict[a] for a in all_elements]
initial_scaling_factor_array = [initial_scaling_factor_dict[a] for a in all_elements]
prior_location = np.array(initial_radius_array + initial_scaling_factor_array) # mbondi2 set, except not differentiation H from HN...
#prior_location = np.array([0.17, 0.12, 0.72, 0.85]) # mbondi2 set
if __name__ == '__main__':
def log_likelihood_of_params(theta):
predictions = get_predictions(theta)
return log_likelihood(predictions)
from bayes_implicit_solvent.samplers import random_walk_mh
#x0 = prior_location
#np.random.seed(5)
#x0[:N] += np.random.randn(N) * 0.01
#x0[N:] += np.random.randn(N) * 0.05
x0 = np.zeros(len(prior_location))
np.random.seed(0)
x0[:N] = np.random.rand(0.01, 0.5, N)
x0[N:] = np.random.rand(0.5, 1.5, N)
def log_prob_fun(theta):
if (min(theta) < 0.01) or (max(theta) > 5):
return -np.inf
else:
return np.sum(norm.logpdf(theta - prior_location)) + log_likelihood_of_params(theta)
mh_result = random_walk_mh(x0, log_prob_fun, n_steps=100000, stepsize=0.001)
np.savez('freesolv_mh_jax_lotso_iterations_different_start.npz',
traj=mh_result[0],
log_prob_traj=mh_result[1],
expt_means=expt_means,
expt_uncs=expt_uncs,
cids=cids,
elements=all_elements,
)
|
the-stack_106_21152
|
# Manga API
from bs4 import BeautifulSoup
import requests
# Finder #
Manga_Name = {'Mangakakalot' : [['ul', 'class', 'manga-info-text'], 'h1'],
'Manganelo' : [['div', 'class', 'story-info-right'], 'h1']
}
Image_Link = {'Mangakakalot' : [['div', 'class', 'manga-info-pic']],
'Manganelo' : [['div', 'class', 'story-info-left']]
}
Manga_Genres = {'Mangakakalot' : [['ul', 'class', 'manga-info-text'], ['li', 6]],
'Manganelo' : [['div', 'class', 'story-info-right'], ['tr', 3]]
}
Manga_Desc = {'Mangakakalot' : [['div', 'id', 'noidungm']],
'Manganelo' : [['div', 'id', 'panel-story-info-description']]
}
Hentai_Details = {'nhentai' : [['div', 'id', 'cover'], # Cover
['div', 'id', 'info'], # Container
['h1', 'class', 'title'], # Title
['section', 'id', 'tags'], # Inside Container
['div'], # Detail Sections
['span', 'class', 'name'], # Details
]
}
##########
class Hentai:
def __init__(Self):
Self.StartLink = 'https://nhentai.net/g/'
# Private
def _GetDetails(Self, N_Code) -> dict:
# Create Dict
_ret_details_ = {'Code':N_Code, 'Image':'None', 'Title':'None', 'Parodies':'None', 'Characters':'None', 'Tags':'None', 'Artists':'None', 'Groups':'None', 'Languages':'None', 'Categories':'None', 'Pages':'None'}
_details_lineup_ = ['Parodies', 'Characters', 'Tags', 'Artists', 'Groups', 'Languages', 'Categories', 'Pages', 'BREAK']
# Get Sauce
res = requests.get(Self.StartLink + N_Code)
H_Src = BeautifulSoup(res.content, 'html.parser')
# Set Finder
Finder = Hentai_Details.get('nhentai')
# Get Image
H_Image = H_Src.find(Finder[0][0], {Finder[0][1]:Finder[0][2]}).find('img')['data-src']
_ret_details_['Image'] = H_Image # Store to Dictionary
# Get Container
H_Src = H_Src.find(Finder[1][0], {Finder[1][1]:Finder[1][2]})
## Get Title
H_Title = H_Src.find(Finder[2][0], {Finder[2][1]:Finder[2][2]}).text
_ret_details_['Title'] = H_Title # Store to Dictionary
## Get Inside Container
H_Src = H_Src.find(Finder[3][0], {Finder[3][1]:Finder[3][2]})
### Get Details
Store_Details = []
Details = H_Src.find_all(Finder[4][0])
for Index, Detail in enumerate(Details):
# Set Detail Title
_DetailTitle = _details_lineup_[Index]
if _DetailTitle == 'BREAK':
break
# Get Details Under Each Div
_DetailPacker = []
_details = Detail.find_all(Finder[5][0], {Finder[5][1]:Finder[5][2]})
for _detail in _details:
_DetailPacker.append(_detail.text)
# Check For Detail Packer Content
if len(_DetailPacker) == 0:
_DetailPacker = ['None']
# Push To Dictionary
_ret_details_[_DetailTitle] = _DetailPacker
#### Return Results ####
return _ret_details_
# Public
def generate_random_sauce(Self) -> dict:
# Get Sauce
Code = requests.head('https://nhentai.net/random/').headers["Location"]
Code = Code[3:-1]
# Get Sauce Details
r_sauce_jar = Self._GetDetails(Code)
# Return To Me As Dictionary
return r_sauce_jar
|
the-stack_106_21153
|
import _plotly_utils.basevalidators
class ColorbarValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="colorbar", parent_name="streamtube", **kwargs):
super(ColorbarValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "ColorBar"),
data_docs=kwargs.pop(
"data_docs",
"""
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing
this color bar.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure
excludes the padding of both ends. That is, the
color bar length is this length minus the
padding on both ends.
lenmode
Determines whether this color bar's length
(i.e. the measure in the color variation
direction) is set in units of plot "fraction"
or in *pixels. Use `len` to set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This
measure excludes the size of the padding, ticks
and labels.
thicknessmode
Determines whether this color bar's thickness
(i.e. the measure in the constant color
direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the
value.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-time-
format#locale_format We add one item to d3's
date formatter: "%{n}f" for fractional seconds
with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f"
would display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.streamt
ube.colorbar.Tickformatstop` instances or dicts
with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.streamtube.colorbar.tickformatstopdefaults),
sets the default property values to use for
elements of streamtube.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud
for ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud
for tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.streamtube.colorba
r.Title` instance or dict with compatible
properties
titlefont
Deprecated: Please use
streamtube.colorbar.title.font instead. Sets
this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
titleside
Deprecated: Please use
streamtube.colorbar.title.side instead.
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the color
bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
""",
),
**kwargs
)
|
the-stack_106_21156
|
import sublime
import os
import time
import base64
import logging
import tempfile
import threading
from queue import Queue, Empty
from .ptty import TerminalPtyProcess, TerminalScreen, TerminalStream
from .utils import responsive, intermission
from .view import panel_window, view_size
from .key import get_key_code
from .image import get_image_info, image_resize
IMAGE = """
<style>
body {{
margin: 1px;
}}
</style>
<img src="data:image/{what};base64,{data}" width="{width}" height="{height}"/>
"""
logger = logging.getLogger('Terminus')
class Terminal:
_terminals = {}
_detached_terminals = []
def __init__(self, view=None):
self._title = ""
self.view = view
self._cached_cursor = [0, 0]
self._cached_cursor_is_hidden = [True]
self.image_count = 0
self.images = {}
self._strings = Queue()
self._pending_to_send_string = [False]
self._pending_to_clear_scrollback = [False]
self._pending_to_reset = [None]
self.lock = threading.Lock()
@classmethod
def from_id(cls, vid):
if vid not in cls._terminals:
return None
return cls._terminals[vid]
@classmethod
def from_tag(cls, tag):
for terminal in cls._terminals.values():
if terminal.tag == tag:
return terminal
return None
@classmethod
def cull_terminals(cls):
terminals_to_close = []
for terminal in cls._terminals.values():
if not terminal.is_hosted():
terminals_to_close.append(terminal)
for terminal in terminals_to_close:
terminal.close()
def attach_view(self, view, offset=None):
with self.lock:
self.view = view
self.detached = False
Terminal._terminals[view.id()] = self
if self in Terminal._detached_terminals:
Terminal._detached_terminals.remove(self)
self.view.settings().erase("terminus_view.detached")
# allow screen to be rerendered
self.screen.dirty.update(range(self.screen.lines))
self.set_offset(offset)
def detach_view(self):
with self.lock:
self.detached = True
Terminal._detached_terminals.append(self)
if self.view.id() in Terminal._terminals:
del Terminal._terminals[self.view.id()]
self.view.settings().set("terminus_view.detached", True)
self.view = None
@responsive(period=1, default=True)
def is_hosted(self):
if self.detached:
# irrelevant if terminal is detached
return True
if self.panel_name:
return panel_window(self.view) or False
else:
return self.view.window() or False
def _need_to_render(self):
flag = False
if self.screen.dirty:
flag = True
elif self.screen.cursor.x != self._cached_cursor[0] or \
self.screen.cursor.y != self._cached_cursor[1]:
flag = True
elif self.screen.cursor.hidden != self._cached_cursor_is_hidden[0]:
flag = True
if flag:
self._cached_cursor[0] = self.screen.cursor.x
self._cached_cursor[1] = self.screen.cursor.y
self._cached_cursor_is_hidden[0] = self.screen.cursor.hidden
return flag
def _start_rendering(self):
data = [""]
done = [False]
@responsive(period=1, default=False)
def was_resized():
size = view_size(self.view)
return self.screen.lines != size[0] or self.screen.columns != size[1]
def reader():
while True:
try:
temp = self.process.read(1024)
except EOFError:
break
with self.lock:
data[0] += temp
if done[0] or not self.is_hosted():
logger.debug("reader breaks")
break
done[0] = True
threading.Thread(target=reader).start()
def renderer():
def feed_data():
if len(data[0]) > 0:
logger.debug("receieved: {}".format(data[0]))
self.stream.feed(data[0])
data[0] = ""
while True:
with intermission(period=0.03), self.lock:
feed_data()
if not self.detached:
if was_resized():
self.handle_resize()
self.view.run_command("terminus_show_cursor")
if self._need_to_render():
self.view.run_command("terminus_render")
self.screen.dirty.clear()
if done[0] or not self.is_hosted():
logger.debug("renderer breaks")
break
feed_data()
done[0] = True
sublime.set_timeout(lambda: self.cleanup())
threading.Thread(target=renderer).start()
def set_offset(self, offset=None):
if offset is not None:
self.offset = offset
else:
if self.view and self.view.size() > 0:
view = self.view
self.offset = view.rowcol(view.size())[0] + 1
else:
self.offset = 0
logger.debug("activating with offset %s", self.offset)
def activate(
self, config_name, cmd, cwd=None, env=None, title=None,
panel_name=None, tag=None, auto_close=True, cancellable=False, timeit=False):
view = self.view
if view:
self.detached = False
Terminal._terminals[view.id()] = self
else:
Terminal._detached_terminals.append(self)
self.detached = True
self.config_name = config_name
self.panel_name = panel_name
self.tag = tag
self.auto_close = auto_close
self.cancellable = cancellable
self.timeit = timeit
if timeit:
self.start_time = time.time()
self.default_title = view.name() if view.name() else title
if view:
self.title = title
self.set_offset()
size = view_size(view or sublime.active_window().active_view(), (40, 80))
logger.debug("view size: {}".format(str(size)))
_env = os.environ.copy()
_env.update(env)
self.process = TerminalPtyProcess.spawn(cmd, cwd=cwd, env=_env, dimensions=size)
self.screen = TerminalScreen(
size[1], size[0], process=self.process, history=10000,
clear_callback=self.clear_callback, reset_callback=self.reset_callback)
self.stream = TerminalStream(self.screen)
self.screen.set_show_image_callback(self.show_image)
self._start_rendering()
def close(self):
logger.debug("close")
self.process.terminate()
vid = self.view.id()
if vid in self._terminals:
del self._terminals[vid]
def cleanup(self, by_user=False):
logger.debug("cleanup")
if not self.view or self.view.id() not in self._terminals:
return
if self.view.settings().get("terminus_view.closed"):
return
self.view.run_command("terminus_render")
# process might became orphan, make sure the process is terminated
# however, we do not immediately from it from _terminals to allow
# copy, paste etc to be functional
self.process.terminate()
if self.process.exitstatus == 0 and self.auto_close:
self.view.run_command("terminus_close")
self.view.run_command("terminus_trim_trailing_lines")
if by_user:
self.view.run_command("append", {"characters": "[Cancelled]"})
elif self.timeit:
if self.process.exitstatus == 0:
self.view.run_command(
"append",
{"characters": "[Finished in {:0.2f}s]".format(time.time() - self.start_time)})
else:
self.view.run_command(
"append",
{"characters": "[Finished in {:0.2f}s with exit code {}]".format(
time.time() - self.start_time, self.process.exitstatus)})
elif self.process.exitstatus is not None:
self.view.run_command(
"append",
{"characters": "process is terminated with return code {}.".format(
self.process.exitstatus)})
self.view.sel().clear()
if not self.panel_name and self.view.settings().get("result_file_regex"):
# if it is a tab based build, we will to refocus to enable next_result
window = self.view.window()
if window:
active_view = window.active_view()
self.view.window().focus_view(self.view)
if active_view:
self.view.window().focus_view(active_view)
# to avoid being reactivated
self.view.settings().set("terminus_view.closed", True)
def handle_resize(self):
size = view_size(self.view)
logger.debug("handle resize {} {} -> {} {}".format(
self.screen.lines, self.screen.columns, size[0], size[1]))
try:
# pywinpty will rasie an runtime error
self.process.setwinsize(*size)
self.screen.resize(*size)
except RuntimeError:
pass
@property
def title(self):
return self._title
@title.setter
def title(self, value):
if not self.detached:
value = value if value else self.config_name
self._title = value
self.view.set_name(value)
def clear_callback(self):
self._pending_to_clear_scrollback[0] = True
def reset_callback(self):
if self._pending_to_reset[0] is None:
self._pending_to_reset[0] = False
else:
self._pending_to_reset[0] = True
def send_key(self, *args, **kwargs):
kwargs["application_mode"] = self.application_mode_enabled()
kwargs["new_line_mode"] = self.new_line_mode_enabled()
self.send_string(get_key_code(*args, **kwargs), normalized=False)
def send_string(self, string, normalized=True):
if normalized:
# normalize CR and CRLF to CR (or CRLF if LNM)
string = string.replace("\r\n", "\n")
if self.new_line_mode_enabled():
string = string.replace("\n", "\r\n")
else:
string = string.replace("\n", "\r")
no_queue = not self._pending_to_send_string[0]
if no_queue and len(string) <= 512:
logger.debug("sent: {}".format(string[0:64] if len(string) > 64 else string))
self.process.write(string)
else:
for i in range(0, len(string), 512):
self._strings.put(string[i:i+512])
if no_queue:
self._pending_to_send_string[0] = True
threading.Thread(target=self.process_send_string).start()
def process_send_string(self):
while True:
try:
string = self._strings.get(False)
logger.debug("sent: {}".format(string[0:64] if len(string) > 64 else string))
self.process.write(string)
except Empty:
self._pending_to_send_string[0] = False
return
else:
time.sleep(0.1)
def bracketed_paste_mode_enabled(self):
return (2004 << 5) in self.screen.mode
def new_line_mode_enabled(self):
return (20 << 5) in self.screen.mode
def application_mode_enabled(self):
return (1 << 5) in self.screen.mode
def find_image(self, pt):
view = self.view
for pid in self.images:
region = view.query_phantom(pid)[0]
if region.end() == pt:
return pid
return None
def show_image(self, data, args, cr=None):
view = self.view
if "inline" not in args or not args["inline"]:
return
cursor = self.screen.cursor
pt = view.text_point(self.offset + cursor.y, cursor.x)
databytes = base64.decodebytes(data.encode())
image_info = get_image_info(databytes)
if not image_info:
logger.error("cannot get image info")
return
what, width, height = image_info
_, image_path = tempfile.mkstemp(suffix="." + what)
with open(image_path, "wb") as f:
f.write(databytes)
width, height = image_resize(
width,
height,
args["width"] if "width" in args else None,
args["height"] if "height" in args else None,
view.em_width(),
view.viewport_extent()[0] - 3 * view.em_width(),
args["preserveAspectRatio"] if "preserveAspectRatio" in args else 1
)
if self.find_image(pt):
self.view.run_command("terminus_insert", {"point": pt, "character": " "})
pt += 1
self.image_count += 1
p = view.add_phantom(
"terminus_image#{}".format(self.image_count),
sublime.Region(pt, pt),
IMAGE.format(
what=what,
data=data,
width=width,
height=height,
count=self.image_count),
sublime.LAYOUT_INLINE,
)
self.images[p] = image_path
if cr:
self.screen.index()
def clean_images(self):
view = self.view
for pid in list(self.images.keys()):
region = view.query_phantom(pid)[0]
if region.empty() and region.begin() == 0:
view.erase_phantom_by_id(pid)
if pid in self.images:
try:
os.remove(self.images[pid])
except Exception:
pass
del self.images[pid]
def __del__(self):
# make sure the process is terminated
self.process.terminate(force=True)
# remove images
for image_path in list(self.images.values()):
try:
os.remove(image_path)
except Exception:
pass
if self.process.isalive():
logger.debug("process becomes orphaned")
else:
logger.debug("process is terminated")
|
the-stack_106_21157
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('video/<str:video_code>/', views.watch_video, name='watch_video'),
path('video/add_comment', views.add_comment, name='add_comment'),
path('video/add_like/<str:video_code>/', views.add_like, name='add_like'),
path('profile/<str:session_username>/', views.profile, name='profile'),
path('dashboard/<str:session_username>/', views.dashboard, name='dashboard'),
path('add_subscriber/<viewer>/', views.add_sub, name='add_subscriber'),
path('upload/', views.upload_video, name='upload'),
path('edit_video/<str:video_code>', views.edit_video, name='edit_video'),
path('delete_video/', views.delete_video, name='delete_video'),
path('update_details/', views.update_details, name='update_details'),
path('signup/', views.signup, name='signup'),
path('login/', views.user_login, name='login'),
path('logout/', views.user_logout, name='logout'),
path('search/', views.search, name='search'),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.