code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/env python
# Copyright 2015 The ChEMBL group.
# Author: Nathan Dedman <[email protected]>
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of an S3-like storage server, using Pymongo, MongoDB and Tornado.
Useful to test features that will eventually run on S3, or if you want to
run something locally that was once running on S3.
We don't support all the features of S3, but it does work with the
standard S3 client for the most basic semantics. To use the standard
S3 client with this module:
c = S3.AWSAuthConnection("", "", server="localhost", port=8888,
is_secure=False)
c.create_bucket("mybucket")
c.put("mybucket", "mykey", "a value")
print c.get("mybucket", "mykey").body
Use s3cmd command line tool:
s3cmd mb s3://wibble
s3cmd put mytestfile.txt s3://wibble
s3cmd rb s3://wibble --force --recursive
"""
import bisect
import datetime
import hashlib
import os
import os.path
import urllib
import logging
import glob
import getpass
import re
from tornado import escape
from tornado import httpserver
from tornado import ioloop
from tornado import web
from pymongo import MongoClient
from pymongo import ASCENDING
import bson
from bson.binary import Binary
from tornado.log import enable_pretty_logging
def start(port,debug=False):
"""Starts the pymongo S3 server"""
application = mongoS3(debug)
http_server = httpserver.HTTPServer(application)
# Utilize all CPUs
if not debug:
http_server.bind(port)
http_server.start(0)
else:
enable_pretty_logging()
http_server.listen(port)
ioloop.IOLoop.current().start()
class mongoS3(web.Application):
"""Implementation of an S3-like storage server based on MongoDB using PyMongo
* Added compatibility with the s3cmd command line utility
* File names of arbitrary length are supported (stored as meta data)
* Multipart upload suported
"""
def __init__(self, debug=False):
web.Application.__init__(self, [
(r"/", RootHandler),
(r"/([^/]+)/(.+)", ObjectHandler),
(r"/([^/]+)/", BucketHandler),
(r"/ping",StatusHandler),
(r'/(favicon.ico)', web.StaticFileHandler, {"path": ""}),
# s3cmd
('http://s3.amazonaws.com/', s3cmdlHandler),
(r"(http://.+.s3.amazonaws.com.*)", s3cmdlHandler),
],debug=debug)
# Lazy connect the client
self.client = MongoClient(connect=False)
self.S3 = self.client.S3
self.metadata = self.client.metadata
class StatusHandler(web.RequestHandler):
SUPPORTED_METHODS = ("GET")
# Send a simple 'PONG' to show we're alive!
def get(self):
self.set_header('Content-Type', 'application/json')
self.finish({'response':'pong','UTC':datetime.datetime.now().isoformat()})
class BaseRequestHandler(web.RequestHandler):
SUPPORTED_METHODS = ("PUT", "GET", "DELETE", "HEAD","POST","OPTIONS")
def _get_bucket_names(self):
return self.application.S3.collection_names(include_system_collections=False)
def render_xml(self, value,**kwargs):
assert isinstance(value, dict) and len(value) == 1
self.set_header("Content-Type", "application/xml; charset=UTF-8")
name = value.keys()[0]
parts = []
parts.append('<' + escape.utf8(name) +' xmlns="http://s3.amazonaws.com/doc/2006-03-01/">')
parts.append('<Owner><ID>'+getpass.getuser()+'</ID><DisplayName>'+getpass.getuser()+'</DisplayName></Owner>')
self._render_parts(value.values()[0], parts)
parts.append('</' + escape.utf8(name) + '>')
if 'code' in kwargs.keys():
self.set_status(kwargs['code'])
self.finish('<?xml version="1.0" encoding="UTF-8"?>' +
''.join(parts))
def _render_parts(self, value, parts=[]):
if isinstance(value, (unicode, bytes)):
parts.append(escape.xhtml_escape(value))
elif isinstance(value, int) or isinstance(value, long):
parts.append(str(value))
elif isinstance(value, datetime.datetime):
parts.append(value.strftime("%Y-%m-%dT%H:%M:%S.000Z"))
elif isinstance(value, dict):
for name, subvalue in value.iteritems():
if not isinstance(subvalue, list):
subvalue = [subvalue]
for subsubvalue in subvalue:
parts.append('<' + escape.utf8(name) + '>')
self._render_parts(subsubvalue, parts)
parts.append('</' + escape.utf8(name) + '>')
else:
raise Exception("Unknown S3 value type %r", value)
def _error(self,**kwargs):
bucket_name = object_name = None
if hasattr(self,'bucket_name'):
bucket_name = self.bucket_name
if hasattr(self,'object_name'):
object_name = self.object_name
s3errorcodes_bucket = {'NSK':'NoSuchKey','NSB':'NoSuchBucket','BNE':'BucketNotEmpty',"BAE":"BucketAlreadyExists"}
s3errorcodes_object = {'NSB':'NoSuchBucket','NSK':'NoSuchKey'}
errormessage_object = {404:'The specified key does not exist.'}
errormessage_bucket = {404:{'NSB':'The specified bucket does not exist.'},409:{'BNE':'The bucket you tried to delete is not empty.','BAE':'The requested bucket name is not available. Please select a different name and try again.'}}
if self.__class__.__name__== 'BucketHandler':
s3errorcodes = s3errorcodes_bucket
errormessage = errormessage_bucket
bucket_name = self.bucket_name
object_name = None
if self.__class__.__name__== 'ObjectHandler':
s3errorcodes = s3errorcodes_object
errormessage = errormessage_object
if hasattr(self,'s3cmd'):
returnDict = {'Error':{}}
errorDict = returnDict['Error']
errorDict['Code'] = s3errorcodes[kwargs['s3code']]
if self.__class__.__name__ == 'BucketHandler':
errorDict['Message'] = errormessage[kwargs['code']][kwargs['s3code']]
else:
errorDict['Message'] = errormessage[kwargs['code']]
errorDict['Resource'] = '/%s/%s' % (bucket_name,object_name)
self.render_xml(returnDict,code=kwargs['code'])
else:
raise web.HTTPError(kwargs['code'])
class s3cmdlHandler(web.RequestHandler):
def prepare(self):
# Handle s3 urls here
self.s3cmd = True
if self.application.settings['debug']:
print "%s %s" % (self.__class__.__name__, self.request.method)
s3match = re.match('(?:http://)(.+)(?:.s3.amazonaws.com\/)(.*)',self.request.uri)
self.prefix = self.get_argument("prefix", u"")
self.delimiter = self.get_argument("delimiter", u"")
self.partNumber = self.get_argument("partNumber",u"")
self.uploadId = self.get_argument("uploadId",u"")
try:
bucket_name = s3match.group(1)
except:
bucket_name = False
try:
if s3match.group(2).startswith('?'):
object_name = prefix
else:
object_name = s3match.group(2)
except:
object_name = False
if object_name:
if '?uploads' in object_name:
self.uploads = True
if '?delete' in object_name:
self.delete = True
if object_name:
object_name = object_name.split('?')[0]
if self.request.uri == 'http://s3.amazonaws.com/':
self.__class__ = RootHandler
if bucket_name and not object_name:
self.__class__ = BucketHandler
self.bucket_name = bucket_name
if bucket_name and object_name:
self.__class__ = ObjectHandler
self.bucket_name = bucket_name
self.object_name = object_name
class RootHandler(BaseRequestHandler):
def get(self):
buckets = []
bucket_names = self._get_bucket_names()
for bucket_name in bucket_names:
bucket_meta = self.application.metadata[bucket_name].find()
buckets.append({
"Name": bucket_name,
"CreationDate":bucket_meta.next()['created'],
})
self.render_xml({"ListAllMyBucketsResult": {
"Buckets": {"Bucket": buckets},
}})
class BucketHandler(BaseRequestHandler):
def _get_bucket_cursor(self,bucket_name):
return self.application.S3[bucket_name]
def _remove_bucket(self,bucket_name):
self.application.S3[bucket_name].drop()
self.application.metadata[bucket_name].drop()
def get(self, bucket_name):
if hasattr(self,'bucket_name'):
bucket_name = self.bucket_name
prefix = self.get_argument("prefix", u"")
marker = self.get_argument("marker", u"")
max_keys = int(self.get_argument("max-keys", 50000))
terse = int(self.get_argument("terse", 0))
if bucket_name not in self._get_bucket_names():
self._error(code=404,s3code='NSB')
return
objects = []
contents = []
for bucket_object in self._get_bucket_cursor(bucket_name).find({'partNumber': None}):
objects.append(bucket_object)
start_pos = 0
# To do:
# Fix bisection by dict lookup
if marker:
start_pos = bisect.bisect_right(objects, marker, start_pos)
if prefix:
start_pos = bisect.bisect_left(objects, prefix, start_pos)
truncated = False
for _object in objects[start_pos:]:
if not _object['object_name'].startswith(prefix):
break
if len(contents) >= max_keys:
truncated = True
break
c = {"Key": _object['object_name'],"ETag":_object['md5']}
if not terse:
c.update({
"LastModified":_object['added'],
"Size":_object['size'],
})
contents.append(c)
marker = _object['object_name']
self.render_xml({"ListBucketResult": {
"Name": bucket_name,
"Prefix": prefix,
"Marker": marker,
"MaxKeys": max_keys,
"IsTruncated": truncated,
"Contents": contents
}})
def put(self, bucket_name):
# Create bucket and metadata
if hasattr(self,'bucket_name'):
bucket_name = self.bucket_name
if bucket_name in self._get_bucket_names():
self._error(code=409,s3code='BAE')
return
self.application.S3.create_collection(bucket_name)
self.application.metadata[bucket_name].insert({"created":datetime.datetime.utcnow()})
self.application.S3[bucket_name].ensure_index([("partNumber",ASCENDING)])
self.finish()
def delete(self, bucket_name):
if hasattr(self,'bucket_name'):
bucket_name = self.bucket_name
if bucket_name not in self._get_bucket_names():
self._error(code=404,s3code='NSB')
return
if self.application.S3[bucket_name].count() > 0:
self._error(code=409,s3code='BNE')
return
self._remove_bucket(bucket_name)
self.set_status(204)
self.finish()
def post(self, bucket_name):
if hasattr(self,'bucket_name'):
bucket_name = self.bucket_name
if bucket_name not in self._get_bucket_names():
self._error(code=404,s3code='NSB')
return
self._remove_bucket(bucket_name)
self.set_status(204)
self.finish()
def head(self,bucket_name):
if hasattr(self,'bucket_name'):
bucket_name = self.bucket_name
if bucket_name not in self._get_bucket_names():
self._error(code=404,s3code='NSB')
return
self.set_header('Date', '"%s"' % datetime.datetime.utcnow())
self.finish()
class ObjectHandler(BaseRequestHandler):
def _object_md5(self,bucket_object):
object_md5 = hashlib.md5()
object_md5.update(bucket_object)
return object_md5.hexdigest()
def _get_bucket_object(self,**kwargs):
if '_id' in kwargs.keys():
object_id = kwargs['_id']
object_field = '_id'
if 'object_name' in kwargs.keys():
object_id = kwargs['object_name']
object_field = 'object_name'
if 'bucket_name' in kwargs.keys():
bucket_name = kwargs['bucket_name']
return self.application.S3[bucket_name].find_one({object_field:object_id},{'partNumber': None})
def get(self,*args):
if hasattr(self,'bucket_name') and hasattr(self,'object_name'):
bucket_name = self.bucket_name
object_name = self.object_name
else:
bucket_name,object_name = args
prefix = self.get_argument("prefix", u"")
marker = self.get_argument("marker", u"")
acl = self.get_argument("acl", u"")
object_name = urllib.unquote(object_name)
if bucket_name not in self._get_bucket_names():
self._error(code=404,s3code='NSB')
return
bucket_object = self._get_bucket_object(bucket_name=bucket_name,object_name=object_name)
if bucket_object:
self.set_header("Content-Type", "application/unknown")
self.set_header('etag', '"%s"' % bucket_object['md5'])
self.set_header("Last-Modified", bucket_object['added'])
if 'multipart' in bucket_object.keys():
print "MULTIPART"
self.set_header("Content-Length",bucket_object['size'])
for parts in self.application.S3[bucket_name].find({'object_name':object_name},{'partNumber': {'$exists':'true'}}):
print parts['partNumber']
self.write(parts['object'])
self.flush()
self.finish()
else:
self.finish(bucket_object['object'])
else:
self._error(code=404,s3code='NSK')
return
def put(self, *args):
if self.bucket_name and self.object_name:
bucket_name = self.bucket_name
object_name = self.object_name
else:
bucket_name,object_name = args
original_name = urllib.unquote(object_name)
if bucket_name not in self._get_bucket_names():
self._error(code=404,s3code='NSB')
return
# Insert object and then calculate computed md5 of stored object, size, then update and return
# If the object already exists, delete contents and add updated timestamp and update
existance = self.application.S3[bucket_name].find({"object_name":original_name})
if existance.count() > 0 and self.partNumber == None:
existance_id = existance.next()['_id']
update_object = Binary(self.request.body)
object_size = update_object.__len__()
object_md5 = self._object_md5(update_object)
self.application.S3[bucket_name].update({"_id":existance_id},{'$set': {'object':update_object,'md5':object_md5,'updated':datetime.datetime.utcnow(),'size':object_size}})
self.set_header('etag', '"%s"' % object_md5)
self.finish()
return
if self.partNumber:
tobeinserted = {'object_name':original_name,'object':Binary(self.request.body),'partNumber':self.partNumber}
else:
tobeinserted = {'object_name':original_name,'object':Binary(self.request.body)}
inserted_object_id = self.application.S3[bucket_name].insert_one(tobeinserted).inserted_id
inserted_object = self._get_bucket_object(bucket_name=bucket_name,_id=inserted_object_id)
object_size = inserted_object['object'].__len__()
object_md5 = self._object_md5(inserted_object['object'])
self.application.S3[bucket_name].update({'_id':inserted_object_id},{'$set': {'md5':object_md5,'updated':datetime.datetime.utcnow(),'added':datetime.datetime.utcnow(),'size':object_size}})
self.set_header('etag', '"%s"' % object_md5)
self.finish()
def post(self, *args):
# Add entry into bucket and flag as multipart upload
if self.bucket_name and self.object_name:
bucket_name = self.bucket_name
object_name = self.object_name
else:
bucket_name,object_name = args
if bucket_name not in self._get_bucket_names():
self._error(code=404,s3code='NSB')
return
original_name = urllib.unquote(object_name)
bucket_object = Binary(self.request.body)
object_size = bucket_object.__len__()
object_md5 = self._object_md5(bucket_object)
if self.uploadId:
# We have a multipart upload, so iterate over the parts to generate the md5 hash and calculate size
# This is the last call made after the mutlipart upload with the uploadId
mupmd5 = hashlib.md5()
mupsize = 0
for mup in self.application.S3[bucket_name].find({'object_name':object_name}):
mupmd5.update(mup['object'])
mupsize += mup['size']
self.application.S3[bucket_name].insert_one({'object_name':object_name,'object':bucket_object,'multipart':True,'md5':mupmd5.hexdigest(),'size':mupsize,'added':datetime.datetime.utcnow(),'updated':datetime.datetime.utcnow(),})
self.render_xml({"InitiateMultipartUploadResult": {
"Bucket": bucket_name,
"Prefix": self.prefix,
"Key":object_name,
"UploadId":object_name
}})
def delete(self, *args):
if self.bucket_name and self.object_name:
bucket_name = self.bucket_name
object_name = self.object_name
else:
bucket_name,object_name = args
original_name = urllib.unquote(object_name)
bucket_object = self._get_bucket_object(bucket_name=bucket_name,object_name=object_name)
if bucket_object:
self.set_status(204)
self.application.S3[bucket_name].remove({"_id":bucket_object['_id']})
self.finish()
else:
self._error(code=404,s3code='NSK')
return
def head(self, *args):
if hasattr(self,'bucket_name') and hasattr(self,'object_name'):
bucket_name = self.bucket_name
object_name = self.object_name
else:
bucket_name,object_name = args
object_name = urllib.unquote(object_name)
bucket_object = self._get_bucket_object(bucket_name=bucket_name,object_name=object_name)
if bucket_object:
self.set_header('etag', '"%s"' % bucket_object['md5'])
self.set_header('last-modified', '"%s"' % bucket_object['updated'])
self.finish()
else:
self._error(code=404,s3code='NSK')
return
if __name__ == "__main__":
start(8080,debug=False)
| chembl/the-S3-amongos | the-S3-amongos.py | Python | apache-2.0 | 21,343 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the debug events writer Python class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import debug_event_pb2
from tensorflow.python.debug.lib import debug_events_monitors
from tensorflow.python.debug.lib import debug_events_reader
from tensorflow.python.debug.lib import dumping_callback
from tensorflow.python.debug.lib import dumping_callback_test_lib
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
class TestMonitor(debug_events_monitors.BaseMonitor):
def __init__(self, debug_data_reader):
super(TestMonitor, self).__init__(debug_data_reader)
# Mapping execution index to Execution data objects.
self.executions = dict()
# Mapping graph execution trace index to GraphExecutionTrace data objects.
self.graph_execution_traces = dict()
def on_execution(self, execution_index, execution):
if execution_index in self.executions:
raise ValueError("Duplicate execution index: %d" % execution_index)
self.executions[execution_index] = execution
def on_graph_execution_trace(self, graph_execution_trace_index,
graph_execution_trace):
if graph_execution_trace_index in self.graph_execution_traces:
raise ValueError("Duplicate graph-execution-trace index: %d" %
graph_execution_trace_index)
self.graph_execution_traces[
graph_execution_trace_index] = graph_execution_trace
class DebugEventsMonitorTest(dumping_callback_test_lib.DumpingCallbackTestBase,
parameterized.TestCase):
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("ConciseHealth", "CONCISE_HEALTH"),
("FullTensor", "FULL_TENSOR"),
)
def testOnExecutionIsCalled(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
x = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float32)
y = constant_op.constant([[-1], [1]], dtype=dtypes.float32)
math_ops.matmul(x, y)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
test_monitor = TestMonitor(reader)
reader.update()
self.assertLen(test_monitor.executions, 1)
self.assertEmpty(test_monitor.graph_execution_traces)
execution = test_monitor.executions[0]
self.assertTrue(execution.wall_time)
self.assertEqual(execution.op_type, "MatMul")
self.assertLen(execution.output_tensor_device_ids, 1)
self.assertLen(execution.input_tensor_ids, 2)
self.assertLen(execution.output_tensor_ids, 1)
self.assertEqual(execution.num_outputs, 1)
self.assertEqual(execution.graph_id, "")
if tensor_debug_mode == "NO_TENSOR":
self.assertIsNone(execution.debug_tensor_values)
elif tensor_debug_mode == "CONCISE_HEALTH":
self.assertLen(execution.debug_tensor_values, 1)
# [tensor_id, element_count, neg_inf_count, pos_inf_count, nan_count].
self.assertLen(execution.debug_tensor_values[0], 5)
elif tensor_debug_mode == "FULL_TENSOR":
# Full tensor values are not stored in the debug_tensor_values field.
self.assertIsNone(execution.debug_tensor_values)
self.assertAllClose(
reader.execution_to_tensor_values(execution), [[[1.], [1.]]])
@parameterized.named_parameters(
("ConciseHealth", "CONCISE_HEALTH"),
("FullTensor", "FULL_TENSOR"),
)
def testOnGraphExecutionTraceIsCalled(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
@def_function.function
def unique_sum(xs):
"""Sum over the unique values, for testing."""
unique_xs, indices = array_ops.unique(xs)
return math_ops.reduce_sum(unique_xs), indices
xs = constant_op.constant([2., 6., 8., 1., 2.], dtype=dtypes.float32)
unique_sum(xs)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
test_monitor = TestMonitor(reader)
reader.update()
self.assertLen(test_monitor.executions, 1)
execution = test_monitor.executions[0]
self.assertTrue(execution.wall_time)
self.assertStartsWith(execution.op_type, "__inference_unique_sum")
self.assertLen(execution.output_tensor_device_ids, 2)
self.assertLen(execution.input_tensor_ids, 1)
self.assertLen(execution.output_tensor_ids, 2)
self.assertEqual(execution.num_outputs, 2)
self.assertTrue(execution.graph_id)
traces = test_monitor.graph_execution_traces
if tensor_debug_mode == "CONCISE_HEALTH":
self.assertLen(traces, 3) # [Placeholder:0, Unique:0 , Sum:0].
self.assertEqual(traces[0].op_type, "Placeholder")
self.assertEqual(traces[0].output_slot, 0)
self.assertEqual(traces[1].op_type, "Unique")
self.assertEqual(traces[1].output_slot, 0)
# Unique:1 is not traced under CONCISE_HEALTH mode, as it's int-dtype.
self.assertEqual(traces[2].op_type, "Sum")
self.assertEqual(traces[2].output_slot, 0)
# [tensor_id, element_count, neg_inf_count, pos_inf_count, nan_count].
self.assertLen(traces[0].debug_tensor_value, 5)
self.assertLen(traces[1].debug_tensor_value, 5)
self.assertLen(traces[2].debug_tensor_value, 5)
elif tensor_debug_mode == "FULL_TENSOR":
self.assertLen(traces, 4) # [Placeholder:0, Unique:0, Unique:1, Sum:0].
self.assertEqual(traces[0].op_type, "Placeholder")
self.assertEqual(traces[0].output_slot, 0)
self.assertIsNone(traces[0].debug_tensor_value)
self.assertAllEqual(
reader.graph_execution_trace_to_tensor_value(traces[0]),
[2., 6., 8., 1., 2.])
self.assertEqual(traces[1].op_type, "Unique")
self.assertEqual(traces[1].output_slot, 0)
self.assertIsNone(traces[1].debug_tensor_value)
self.assertAllEqual(
reader.graph_execution_trace_to_tensor_value(traces[1]),
[2., 6., 8., 1.])
self.assertEqual(traces[2].op_type, "Unique")
self.assertEqual(traces[2].output_slot, 1)
self.assertIsNone(traces[2].debug_tensor_value)
self.assertAllEqual(
reader.graph_execution_trace_to_tensor_value(traces[2]),
[0, 1, 2, 3, 0])
self.assertEqual(traces[3].op_type, "Sum")
self.assertEqual(traces[3].output_slot, 0)
self.assertIsNone(traces[3].debug_tensor_value)
self.assertAllClose(
reader.graph_execution_trace_to_tensor_value(traces[3]), 17.)
class AlertDataObjectsTest(test_util.TensorFlowTestCase):
"""Unit tests for alert-class objects."""
def testInfNanMonitor(self):
alert = debug_events_monitors.InfNanAlert(
1234,
"FooOp",
1,
size=1000,
num_neg_inf=5,
num_pos_inf=10,
num_nan=20,
execution_index=777,
graph_execution_trace_index=888)
self.assertEqual(alert.wall_time, 1234)
self.assertEqual(alert.op_type, "FooOp")
self.assertEqual(alert.output_slot, 1)
self.assertEqual(alert.size, 1000)
self.assertEqual(alert.num_neg_inf, 5)
self.assertEqual(alert.num_pos_inf, 10)
self.assertEqual(alert.num_nan, 20)
self.assertEqual(alert.execution_index, 777)
self.assertEqual(alert.graph_execution_trace_index, 888)
class InfNanMonitorTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def testInfNanMonitorStartsWithEmptyAlerts(self):
mock_reader = test.mock.MagicMock()
monitor = debug_events_monitors.InfNanMonitor(mock_reader)
self.assertEmpty(monitor.alerts())
def testInfNanMonitorOnExecutionUnderCurtHealthMode(self):
mock_reader = test.mock.MagicMock()
monitor = debug_events_monitors.InfNanMonitor(mock_reader)
execution_digest = debug_events_reader.ExecutionDigest(
1234, 1, "FooOp", output_tensor_device_ids=[0, 1])
execution = debug_events_reader.Execution(
execution_digest,
"worker01", ["a1", "b2", "e3"],
debug_event_pb2.TensorDebugMode.CURT_HEALTH,
graph_id=None,
input_tensor_ids=[12, 34],
output_tensor_ids=[56, 78],
debug_tensor_values=[[-1, 0], [-1, 1]]) # [tensor_id, any_inf_nan].
monitor.on_execution(50, execution)
self.assertLen(monitor.alerts(), 1)
alert = monitor.alerts()[0]
self.assertEqual(alert.wall_time, 1234)
self.assertEqual(alert.op_type, "FooOp")
self.assertEqual(alert.output_slot, 1)
# The four fields below are unavailable under CURT_HEALTH mode by design.
self.assertIsNone(alert.size)
self.assertIsNone(alert.num_neg_inf)
self.assertIsNone(alert.num_pos_inf)
self.assertIsNone(alert.num_nan)
self.assertEqual(alert.execution_index, 50)
self.assertIsNone(alert.graph_execution_trace_index)
def testInfNanMonitorOnExecutionUnderConciseHealthMode(self):
mock_reader = test.mock.MagicMock()
monitor = debug_events_monitors.InfNanMonitor(mock_reader)
execution_digest = debug_events_reader.ExecutionDigest(
1234, 1, "BarOp", output_tensor_device_ids=[0, 1])
execution = debug_events_reader.Execution(
execution_digest,
"worker01",
["a1", "b2", "e3"],
debug_event_pb2.TensorDebugMode.CONCISE_HEALTH,
graph_id=None,
input_tensor_ids=[12, 34],
output_tensor_ids=[56, 78],
# [tensor_id, size, num_neg_inf, num_pos_inf, num_nan].
debug_tensor_values=[[-1, 10, 1, 2, 3], [-1, 100, 0, 0, 0]])
monitor.on_execution(60, execution)
self.assertLen(monitor.alerts(), 1)
alert = monitor.alerts()[0]
self.assertEqual(alert.wall_time, 1234)
self.assertEqual(alert.op_type, "BarOp")
self.assertEqual(alert.output_slot, 0)
self.assertEqual(alert.size, 10)
self.assertEqual(alert.num_neg_inf, 1)
self.assertEqual(alert.num_pos_inf, 2)
self.assertEqual(alert.num_nan, 3)
self.assertEqual(alert.execution_index, 60)
self.assertIsNone(alert.graph_execution_trace_index)
@parameterized.named_parameters(
("FloatsScalarWithInfAndNan", np.inf, np.float32, 1, 0, 1, 0),
("Floats2DWithInfAndNan", [[0, np.nan, np.nan, -np.inf]
], np.float32, 4, 1, 0, 2),
("Floats1DWithoutInfOrNan", [0, -1e6, 1e6, 9e5], np.float32, 4, 0, 0, 0),
("Integers", [[0, 1000, -200, -300]], np.int32, 4, 0, 0, 0),
("Booleans", [False, True, False, False], np.int32, 4, 0, 0, 0),
)
def testInfNanMonitorOnExecutionUnderFullTensorModeWorks(
self, tensor_value, dtype, expected_size, expected_num_neg_inf,
expected_num_pos_inf, expected_num_nan):
mock_reader = test.mock.MagicMock()
mock_reader.execution_to_tensor_values.return_value = [
np.array([[0.0, -1.0, 1.0]]),
np.array(tensor_value, dtype=dtype)
]
monitor = debug_events_monitors.InfNanMonitor(mock_reader)
execution_digest = debug_events_reader.ExecutionDigest(
1234,
1,
"__inference_bar_function_1234",
output_tensor_device_ids=[0, 1])
execution = debug_events_reader.Execution(
execution_digest,
"worker01", ["a1", "b2", "e3"],
debug_event_pb2.TensorDebugMode.FULL_TENSOR,
graph_id=None,
input_tensor_ids=[12, 34],
output_tensor_ids=[56, 78])
monitor.on_execution(70, execution)
if expected_num_neg_inf or expected_num_pos_inf or expected_num_nan:
self.assertLen(monitor.alerts(), 1)
alert = monitor.alerts()[0]
self.assertEqual(alert.wall_time, 1234)
self.assertEqual(alert.op_type, "__inference_bar_function_1234")
self.assertEqual(alert.output_slot, 1)
self.assertEqual(alert.size, expected_size)
self.assertEqual(alert.num_neg_inf, expected_num_neg_inf)
self.assertEqual(alert.num_pos_inf, expected_num_pos_inf)
self.assertEqual(alert.num_nan, expected_num_nan)
self.assertEqual(alert.execution_index, 70)
self.assertIsNone(alert.graph_execution_trace_index, 70)
else:
self.assertEmpty(monitor.alerts())
def testInfNaNMonitorOnGraphExecutionTraceCurtHealthMode(self):
mock_reader = test.mock.MagicMock()
monitor = debug_events_monitors.InfNanMonitor(mock_reader)
trace_digest = debug_events_reader.GraphExecutionTraceDigest(
1234, 1, "FooOp", "FooOp_1", 2, "g1")
trace = debug_events_reader.GraphExecutionTrace(
trace_digest, ["g0", "g1"],
debug_event_pb2.TensorDebugMode.CURT_HEALTH,
debug_tensor_value=[9, 1]) # [tensor_id, any_inf_nan].
monitor.on_graph_execution_trace(55, trace)
self.assertLen(monitor.alerts(), 1)
alert = monitor.alerts()[0]
self.assertEqual(alert.wall_time, 1234)
self.assertEqual(alert.op_type, "FooOp")
self.assertEqual(alert.output_slot, 2)
# The four fields below are unavailable under CURT_HEALTH mode by design.
self.assertIsNone(alert.size)
self.assertIsNone(alert.num_neg_inf)
self.assertIsNone(alert.num_pos_inf)
self.assertIsNone(alert.num_nan)
self.assertIsNone(alert.execution_index)
self.assertEqual(alert.graph_execution_trace_index, 55)
def testInfNaNMonitorOnGraphExecutionTraceConciseHealthMode(self):
mock_reader = test.mock.MagicMock()
monitor = debug_events_monitors.InfNanMonitor(mock_reader)
trace_digest = debug_events_reader.GraphExecutionTraceDigest(
1234, 1, "FooOp", "FooOp_1", 2, "g1")
trace = debug_events_reader.GraphExecutionTrace(
trace_digest,
["g0", "g1"],
debug_event_pb2.TensorDebugMode.CONCISE_HEALTH,
# [tensor_id, size, num_neg_inf, num_pos_inf, num_nan].
debug_tensor_value=[9, 100, 3, 2, 1])
monitor.on_graph_execution_trace(55, trace)
self.assertLen(monitor.alerts(), 1)
alert = monitor.alerts()[0]
self.assertEqual(alert.wall_time, 1234)
self.assertEqual(alert.op_type, "FooOp")
self.assertEqual(alert.output_slot, 2)
self.assertEqual(alert.size, 100)
self.assertEqual(alert.num_neg_inf, 3)
self.assertEqual(alert.num_pos_inf, 2)
self.assertEqual(alert.num_nan, 1)
self.assertEqual(alert.graph_execution_trace_index, 55)
@parameterized.named_parameters(
("FloatsScalarWithInfAndNan", np.inf, np.float32, 1, 0, 1, 0),
("Floats2DWithInfAndNan", [[0, np.nan, np.nan, -np.inf]
], np.float32, 4, 1, 0, 2),
("Floats1DWithoutInfOrNan", [0, -1e6, 1e6, 9e5], np.float32, 4, 0, 0, 0),
("Integers", [[0, 1000, -200, -300]], np.int32, 4, 0, 0, 0),
("Booleans", [False, True, False, False], np.int32, 4, 0, 0, 0),
)
def testInfNanMonitorOnGraphExecutionTraceUnderFullTensorModeWorks(
self, tensor_value, dtype, expected_size, expected_num_neg_inf,
expected_num_pos_inf, expected_num_nan):
mock_reader = test.mock.MagicMock()
mock_reader.graph_execution_trace_to_tensor_value.return_value = np.array(
tensor_value, dtype=dtype)
monitor = debug_events_monitors.InfNanMonitor(mock_reader)
trace_digest = debug_events_reader.GraphExecutionTraceDigest(
1234, 1, "BazOp", "name_scope_3/BazOp_1", 2, "g1")
trace = debug_events_reader.GraphExecutionTrace(
trace_digest, ["g0", "g1"], debug_event_pb2.TensorDebugMode.FULL_TENSOR)
monitor.on_graph_execution_trace(80, trace)
if expected_num_neg_inf or expected_num_pos_inf or expected_num_nan:
self.assertLen(monitor.alerts(), 1)
alert = monitor.alerts()[0]
self.assertEqual(alert.wall_time, 1234)
self.assertEqual(alert.op_type, "BazOp")
self.assertEqual(alert.output_slot, 2)
self.assertEqual(alert.size, expected_size)
self.assertEqual(alert.num_neg_inf, expected_num_neg_inf)
self.assertEqual(alert.num_pos_inf, expected_num_pos_inf)
self.assertEqual(alert.num_nan, expected_num_nan)
self.assertIsNone(alert.execution_index)
self.assertEqual(alert.graph_execution_trace_index, 80)
else:
self.assertEmpty(monitor.alerts())
def testLimitingInfNanMonitorAlertCountWorks(self):
mock_reader = test.mock.MagicMock()
monitor = debug_events_monitors.InfNanMonitor(mock_reader, limit=3)
for i in range(10):
execution_digest = debug_events_reader.ExecutionDigest(
i * 1000, 1, "FooOp", output_tensor_device_ids=[0, 1])
execution = debug_events_reader.Execution(
execution_digest,
"worker01", ["a1", "b2", "e3"],
debug_event_pb2.TensorDebugMode.CURT_HEALTH,
graph_id=None,
input_tensor_ids=[12, 34],
output_tensor_ids=[56, 78],
debug_tensor_values=[[-1, 0], [-1, 1]]) # [tensor_id, any_inf_nan].
monitor.on_execution(i, execution)
alerts = monitor.alerts()
self.assertLen(alerts, 3)
for i, alert in enumerate(alerts):
self.assertEqual(alert.wall_time, i * 1000)
self.assertEqual(alert.op_type, "FooOp")
self.assertEqual(alert.output_slot, 1)
# The four fields below are unavailable under CURT_HEALTH mode by design.
self.assertIsNone(alert.size)
self.assertIsNone(alert.num_neg_inf)
self.assertIsNone(alert.num_pos_inf)
self.assertIsNone(alert.num_nan)
self.assertEqual(alert.execution_index, i)
self.assertIsNone(alert.graph_execution_trace_index)
if __name__ == "__main__":
ops.enable_eager_execution()
googletest.main()
| jhseu/tensorflow | tensorflow/python/debug/lib/debug_events_monitors_test.py | Python | apache-2.0 | 18,786 |
__author__ = 'Conscience'
from django.conf.urls import url
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^$', views.post_list, name='post_list'),
url(r'^post/(?P<pk>[0-9]+)/$', views.post_detail, name='post_detail'),
url(r'^post/$', views.post_list, name='post_list'),
url(r'^post/new/$', views.post_new, name='post_new'),
url(r'^post/(?P<pk>[0-9]+)/edit/$', views.post_edit, name='post_edit'),
] | Rivares/MyBlog | blog/urls.py | Python | apache-2.0 | 446 |
# Copyright (c) 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import desc
from webob import exc
from murano.api.v1 import request_statistics
from murano.common import policy
from murano.common import utils
from murano.db import models
from murano.db import session as db_session
from murano.openstack.common.gettextutils import _ # noqa
from murano.openstack.common import log as logging
from murano.openstack.common import wsgi
LOG = logging.getLogger(__name__)
API_NAME = 'Deployments'
class Controller(object):
@request_statistics.stats_count(API_NAME, 'Index')
def index(self, request, environment_id):
target = {"environment_id": environment_id}
policy.check("list_deployments", request.context, target)
unit = db_session.get_session()
verify_and_get_env(unit, environment_id, request)
query = unit.query(models.Deployment) \
.filter_by(environment_id=environment_id) \
.order_by(desc(models.Deployment.created))
result = query.all()
deployments = [set_dep_state(deployment, unit).to_dict() for deployment
in result]
return {'deployments': deployments}
@request_statistics.stats_count(API_NAME, 'Statuses')
def statuses(self, request, environment_id, deployment_id):
target = {"environment_id": environment_id,
"deployment_id": deployment_id}
policy.check("statuses_deployments", request.context, target)
unit = db_session.get_session()
query = unit.query(models.Status) \
.filter_by(deployment_id=deployment_id) \
.order_by(models.Status.created)
deployment = verify_and_get_deployment(unit, environment_id,
deployment_id)
if 'service_id' in request.GET:
service_id_set = set(request.GET.getall('service_id'))
environment = deployment.description
entity_ids = []
for service in environment.get('services', []):
if service['?']['id'] in service_id_set:
id_map = utils.build_entity_map(service)
entity_ids = entity_ids + id_map.keys()
if entity_ids:
query = query.filter(models.Status.entity_id.in_(entity_ids))
else:
return {'reports': []}
result = query.all()
return {'reports': [status.to_dict() for status in result]}
def verify_and_get_env(db_session, environment_id, request):
environment = db_session.query(models.Environment).get(environment_id)
if not environment:
LOG.info(_('Environment with id {0} not found').format(environment_id))
raise exc.HTTPNotFound
if environment.tenant_id != request.context.tenant:
LOG.info(_('User is not authorized to access this tenant resources.'))
raise exc.HTTPUnauthorized
return environment
def _patch_description(description):
description['services'] = description.get('applications', [])
del description['applications']
def verify_and_get_deployment(db_session, environment_id, deployment_id):
deployment = db_session.query(models.Deployment).get(deployment_id)
if not deployment:
LOG.info(_('Deployment with id {0} not found').format(deployment_id))
raise exc.HTTPNotFound
if deployment.environment_id != environment_id:
LOG.info(_('Deployment with id {0} not found'
' in environment {1}').format(deployment_id,
environment_id))
raise exc.HTTPBadRequest
_patch_description(deployment.description)
return deployment
def create_resource():
return wsgi.Resource(Controller())
def set_dep_state(deployment, unit):
num_errors = unit.query(models.Status).filter_by(
level='error',
deployment_id=deployment.id).count()
num_warnings = unit.query(models.Status).filter_by(
level='warning',
deployment_id=deployment.id).count()
if deployment.finished:
if num_errors:
deployment.state = 'completed_w_errors'
elif num_warnings:
deployment.state = 'completed_w_warnings'
else:
deployment.state = 'success'
else:
if num_errors:
deployment.state = 'running_w_errors'
elif num_warnings:
deployment.state = 'running_w_warnings'
else:
deployment.state = 'running'
_patch_description(deployment.description)
return deployment
| ativelkov/murano-api | murano/api/v1/deployments.py | Python | apache-2.0 | 5,151 |
"""
WSGI config for lot project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
application = get_wsgi_application()
| Ecotrust/COMPASS | mp/wsgi.py | Python | apache-2.0 | 378 |
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
data = open('output.dex', 'rb').read()
with open('output.txt', 'wb') as f:
f.write(str(map(ord, data))) | google/google-ctf | 2017/quals/2017-re-food/dex_to_bytes.py | Python | apache-2.0 | 705 |
"""The tests for local file camera component."""
import asyncio
from unittest import mock
# Using third party package because of a bug reading binary data in Python 3.4
# https://bugs.python.org/issue23004
from mock_open import MockOpen
from homeassistant.bootstrap import setup_component
from tests.common import mock_http_component
import logging
@asyncio.coroutine
def test_loading_file(hass, test_client):
"""Test that it loads image from disk."""
@mock.patch('os.path.isfile', mock.Mock(return_value=True))
@mock.patch('os.access', mock.Mock(return_value=True))
def setup_platform():
"""Setup platform inside callback."""
assert setup_component(hass, 'camera', {
'camera': {
'name': 'config_test',
'platform': 'local_file',
'file_path': 'mock.file',
}})
yield from hass.loop.run_in_executor(None, setup_platform)
client = yield from test_client(hass.http.app)
m_open = MockOpen(read_data=b'hello')
with mock.patch(
'homeassistant.components.camera.local_file.open',
m_open, create=True
):
resp = yield from client.get('/api/camera_proxy/camera.config_test')
assert resp.status == 200
body = yield from resp.text()
assert body == 'hello'
@asyncio.coroutine
def test_file_not_readable(hass, caplog):
"""Test a warning is shown setup when file is not readable."""
mock_http_component(hass)
@mock.patch('os.path.isfile', mock.Mock(return_value=True))
@mock.patch('os.access', mock.Mock(return_value=False))
def run_test():
caplog.set_level(
logging.WARNING, logger='requests.packages.urllib3.connectionpool')
assert setup_component(hass, 'camera', {
'camera': {
'name': 'config_test',
'platform': 'local_file',
'file_path': 'mock.file',
}})
assert 'Could not read' in caplog.text
assert 'config_test' in caplog.text
assert 'mock.file' in caplog.text
yield from hass.loop.run_in_executor(None, run_test)
| open-homeautomation/home-assistant | tests/components/camera/test_local_file.py | Python | apache-2.0 | 2,135 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A sample app that operates on GCS files with blobstore API's BlobReader."""
import cloudstorage
from google.appengine.api import app_identity
from google.appengine.ext import blobstore
import webapp2
class BlobreaderHandler(webapp2.RequestHandler):
def get(self):
# Get the default Cloud Storage Bucket name and create a file name for
# the object in Cloud Storage.
bucket = app_identity.get_default_gcs_bucket_name()
# Cloud Storage file names are in the format /bucket/object.
filename = '/{}/blobreader_demo'.format(bucket)
# Create a file in Google Cloud Storage and write something to it.
with cloudstorage.open(filename, 'w') as filehandle:
filehandle.write('abcde\n')
# In order to read the contents of the file using the Blobstore API,
# you must create a blob_key from the Cloud Storage file name.
# Blobstore expects the filename to be in the format of:
# /gs/bucket/object
blobstore_filename = '/gs{}'.format(filename)
blob_key = blobstore.create_gs_key(blobstore_filename)
# [START gae_blobstore_reader]
# Instantiate a BlobReader for a given Blobstore blob_key.
blob_reader = blobstore.BlobReader(blob_key)
# Instantiate a BlobReader for a given Blobstore blob_key, setting the
# buffer size to 1 MB.
blob_reader = blobstore.BlobReader(blob_key, buffer_size=1048576)
# Instantiate a BlobReader for a given Blobstore blob_key, setting the
# initial read position.
blob_reader = blobstore.BlobReader(blob_key, position=0)
# Read the entire value into memory. This may take a while depending
# on the size of the value and the size of the read buffer, and is not
# recommended for large values.
blob_reader_data = blob_reader.read()
# Write the contents to the response.
self.response.headers['Content-Type'] = 'text/plain'
self.response.write(blob_reader_data)
# Set the read position back to 0, then read and write 3 bytes.
blob_reader.seek(0)
blob_reader_data = blob_reader.read(3)
self.response.write(blob_reader_data)
self.response.write('\n')
# Set the read position back to 0, then read and write one line (up to
# and including a '\n' character) at a time.
blob_reader.seek(0)
for line in blob_reader:
self.response.write(line)
# [END gae_blobstore_reader]
# Delete the file from Google Cloud Storage using the blob_key.
blobstore.delete(blob_key)
app = webapp2.WSGIApplication([
('/', BlobreaderHandler),
('/blobreader', BlobreaderHandler)], debug=True)
| GoogleCloudPlatform/python-docs-samples | appengine/standard/blobstore/blobreader/main.py | Python | apache-2.0 | 3,332 |
# Copyright 2019, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import threading
import time
import requests
from azure.core.exceptions import ClientAuthenticationError
from azure.identity._exceptions import CredentialUnavailableError
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
logger = logging.getLogger(__name__)
_MAX_CONSECUTIVE_REDIRECTS = 10
_MONITOR_OAUTH_SCOPE = "https://monitor.azure.com//.default"
_requests_lock = threading.Lock()
_requests_map = {}
class TransportMixin(object):
def _check_stats_collection(self):
return not os.environ.get("APPLICATIONINSIGHTS_STATSBEAT_DISABLED_ALL") and (not hasattr(self, '_is_stats') or not self._is_stats) # noqa: E501
def _transmit_from_storage(self):
if self.storage:
for blob in self.storage.gets():
# give a few more seconds for blob lease operation
# to reduce the chance of race (for perf consideration)
if blob.lease(self.options.timeout + 5):
envelopes = blob.get()
result = self._transmit(envelopes)
if result > 0:
blob.lease(result)
else:
blob.delete()
def _transmit(self, envelopes):
"""
Transmit the data envelopes to the ingestion service.
Return a negative value for partial success or non-retryable failure.
Return 0 if all envelopes have been successfully ingested.
Return the next retry time in seconds for retryable failure.
This function should never throw exception.
"""
if not envelopes:
return 0
exception = None
try:
start_time = time.time()
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json; charset=utf-8',
}
endpoint = self.options.endpoint
if self.options.credential:
token = self.options.credential.get_token(_MONITOR_OAUTH_SCOPE)
headers["Authorization"] = "Bearer {}".format(token.token)
endpoint += '/v2.1/track'
if self._check_stats_collection():
with _requests_lock:
_requests_map['count'] = _requests_map.get('count', 0) + 1 # noqa: E501
response = requests.post(
url=endpoint,
data=json.dumps(envelopes),
headers=headers,
timeout=self.options.timeout,
proxies=json.loads(self.options.proxies),
allow_redirects=False,
)
except requests.Timeout:
logger.warning(
'Request time out. Ingestion may be backed up. Retrying.')
exception = self.options.minimum_retry_interval
except requests.RequestException as ex:
logger.warning(
'Retrying due to transient client side error %s.', ex)
if self._check_stats_collection():
with _requests_lock:
_requests_map['retry'] = _requests_map.get('retry', 0) + 1 # noqa: E501
# client side error (retryable)
exception = self.options.minimum_retry_interval
except CredentialUnavailableError as ex:
logger.warning('Credential error. %s. Dropping telemetry.', ex)
exception = -1
except ClientAuthenticationError as ex:
logger.warning('Authentication error %s', ex)
exception = self.options.minimum_retry_interval
except Exception as ex:
logger.warning(
'Error when sending request %s. Dropping telemetry.', ex)
if self._check_stats_collection():
with _requests_lock:
_requests_map['exception'] = _requests_map.get('exception', 0) + 1 # noqa: E501
# Extraneous error (non-retryable)
exception = -1
finally:
end_time = time.time()
if self._check_stats_collection():
with _requests_lock:
duration = _requests_map.get('duration', 0)
_requests_map['duration'] = duration + (end_time - start_time) # noqa: E501
if exception is not None:
return exception
text = 'N/A'
data = None
try:
text = response.text
except Exception as ex:
logger.warning('Error while reading response body %s.', ex)
else:
try:
data = json.loads(text)
except Exception:
pass
if response.status_code == 200:
self._consecutive_redirects = 0
if self._check_stats_collection():
with _requests_lock:
_requests_map['success'] = _requests_map.get('success', 0) + 1 # noqa: E501
return 0
# Status code not 200 counts as failure
if self._check_stats_collection():
with _requests_lock:
_requests_map['failure'] = _requests_map.get('failure', 0) + 1 # noqa: E501
if response.status_code == 206: # Partial Content
if data:
try:
resend_envelopes = []
for error in data['errors']:
if error['statusCode'] in (
429, # Too Many Requests
500, # Internal Server Error
503, # Service Unavailable
):
resend_envelopes.append(envelopes[error['index']])
else:
logger.error(
'Data drop %s: %s %s.',
error['statusCode'],
error['message'],
envelopes[error['index']],
)
if resend_envelopes:
self.storage.put(resend_envelopes)
except Exception as ex:
logger.error(
'Error while processing %s: %s %s.',
response.status_code,
text,
ex,
)
if self._check_stats_collection():
with _requests_lock:
_requests_map['retry'] = _requests_map.get('retry', 0) + 1 # noqa: E501
return -response.status_code
# cannot parse response body, fallback to retry
if response.status_code in (
206, # Partial Content
429, # Too Many Requests
500, # Internal Server Error
503, # Service Unavailable
):
logger.warning(
'Transient server side error %s: %s.',
response.status_code,
text,
)
# server side error (retryable)
if self._check_stats_collection():
with _requests_lock:
# 429 counts as throttle instead of retry
if response.status_code == 429:
_requests_map['throttle'] = _requests_map.get('throttle', 0) + 1 # noqa: E501
else:
_requests_map['retry'] = _requests_map.get('retry', 0) + 1 # noqa: E501
return self.options.minimum_retry_interval
# Authentication error
if response.status_code == 401:
logger.warning(
'Authentication error %s: %s.',
response.status_code,
text,
)
if self._check_stats_collection():
with _requests_lock:
_requests_map['retry'] = _requests_map.get('retry', 0) + 1 # noqa: E501
return self.options.minimum_retry_interval
# Forbidden error
# Can occur when v2 endpoint is used while AI resource is configured
# with disableLocalAuth
if response.status_code == 403:
logger.warning(
'Forbidden error %s: %s.',
response.status_code,
text,
)
if self._check_stats_collection():
with _requests_lock:
_requests_map['retry'] = _requests_map.get('retry', 0) + 1 # noqa: E501
return self.options.minimum_retry_interval
# Redirect
if response.status_code in (307, 308):
self._consecutive_redirects += 1
if self._consecutive_redirects < _MAX_CONSECUTIVE_REDIRECTS:
if response.headers:
location = response.headers.get("location")
if location:
url = urlparse(location)
if url.scheme and url.netloc:
# Change the host to the new redirected host
self.options.endpoint = "{}://{}".format(url.scheme, url.netloc) # noqa: E501
# Attempt to export again
return self._transmit(envelopes)
logger.error(
"Error parsing redirect information."
)
logger.error(
"Error sending telemetry because of circular redirects."
" Please check the integrity of your connection string."
)
logger.error(
'Non-retryable server side error %s: %s.',
response.status_code,
text,
)
# server side error (non-retryable)
return -response.status_code
| census-instrumentation/opencensus-python | contrib/opencensus-ext-azure/opencensus/ext/azure/common/transport.py | Python | apache-2.0 | 10,424 |
#!/usr/bin/python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the RankerRoot model
"""
__authors__ = [
'"Lennard de Rijk" <[email protected]>',
]
from google.appengine.ext import db
import soc.models.linkable
class RankerRoot(soc.models.linkable.Linkable):
"""Links the Root of a RankList tree to an owner and also
gives it an unique ID.
"""
#: A required reference property to the root of the RankList tree
root = db.ReferenceProperty(required=True,
collection_name='roots')
| jamslevy/gsoc | app/soc/models/ranker_root.py | Python | apache-2.0 | 1,077 |
'''
Parse links in html and css pages.
'''
import logging
import re
import hashlib
import urllib.parse
from functools import partial
import multidict
import html
from bs4 import BeautifulSoup
from . import stats
from .urls import URL
from . import facet
from . import config
LOGGER = logging.getLogger(__name__)
async def do_parser(body, body_bytes, resp_headers, url, crawler):
if len(body) > int(config.read('Multiprocess', 'ParseInBurnerSize')):
stats.stats_sum('parser in burner thread', 1)
# headers is a multidict.CIMultiDictProxy case-blind dict
# and the Proxy form of it doesn't pickle, so convert to one that does
resp_headers = multidict.CIMultiDict(resp_headers)
links, embeds, sha1, facets, base = await crawler.burner.burn(
partial(do_burner_work_html, body, body_bytes, resp_headers,
burn_prefix='burner ', url=url),
url=url)
else:
stats.stats_sum('parser in main thread', 1)
# no coroutine state because this is a burn, not an await
links, embeds, sha1, facets, base = do_burner_work_html(
body, body_bytes, resp_headers, burn_prefix='main ', url=url)
return links, embeds, sha1, facets, base
def do_burner_work_html(html, html_bytes, headers, burn_prefix='', url=None):
stats.stats_sum('parser html bytes', len(html_bytes))
# This embodies a minimal parsing policy; it needs to be made pluggable/configurable
# split head/body
# soup the head so we can accurately get base and other details
# regex the body for links and embeds, for speed
with stats.record_burn(burn_prefix+'split_head_body', url=url):
head, body = split_head_body(html, url=url)
'''
beautiful soup + lxml2 parses only about 4-16 MB/s
'''
stats.stats_sum('head soup bytes', len(head))
with stats.record_burn(burn_prefix+'head soup', url=url):
try:
head_soup = BeautifulSoup(head, 'lxml')
except Exception as e:
LOGGER.info('url %s threw the %r exception in BeautifulSoup', url, e)
stats.stats_sum('head soup exception '+str(e), 1)
raise
base = head_soup.find('base') or {}
base = base.get('href')
if base:
# base can be relative, e.g. 'subdir/' or '.'
base = urllib.parse.urljoin(url.url, base)
base_or_url = base or url
with stats.record_burn(burn_prefix+'find_head_links_soup', url=url):
links, embeds = find_head_links_soup(head_soup)
with stats.record_burn(burn_prefix+'find_body_links_re', url=url):
lbody, ebody = find_body_links_re(body)
links += lbody
embeds += ebody
embeds = clean_link_objects(embeds, ('javascript:', 'data:'))
links = clean_link_objects(links, ('javascript:',))
with stats.record_burn(burn_prefix+'url_clean_join', url=url):
links = url_clean_join(links, url=base_or_url)
embeds = url_clean_join(embeds, url=base_or_url)
with stats.record_burn(burn_prefix+'sha1 html', url=url):
sha1 = 'sha1:' + hashlib.sha1(html_bytes).hexdigest()
with stats.record_burn(burn_prefix+'facets', url=url):
facets = facet.compute_all(html, head, body, headers, links, embeds, head_soup=head_soup, url=url)
links = collapse_links(links)
embeds = collapse_links(embeds)
return links, embeds, sha1, facets, base
def collapse_links(links):
ret = []
for link in links:
l = link.get('href')
if not l:
l = link.get('src')
if l:
ret.append(l)
return ret
def clean_link_objects(link_objects, schemes):
'''
Drop all elements of the link_objects that are in schemes.
'''
schemes = tuple(schemes)
ret = []
for link_object in link_objects:
if link_object is None:
continue
u = link_object.get('href') or link_object.get('src')
if u and u.startswith(schemes):
continue
ret.append(link_object)
return ret
def find_html_links_re(html):
'''
Find the outgoing links and embeds in html, both head and body.
This can't tell the difference between links and embeds, so we
call them all links.
On a 3.4ghz x86 core, runs at ~ 50 megabytes/sec.
'''
stats.stats_sum('html_links_re parser bytes', len(html))
delims = set(
[m[1] for m in re.findall(r'''\s(?:href|src)\s{,3}=\s{,3}(?P<delim>['"])(.*?)(?P=delim)''', html, re.I | re.S)]
)
no_delims = set(re.findall(r'''\s(?:href|src)\s{,3}=\s{,3}([^\s'"<>]+)''', html, re.I))
links = delims.union(no_delims)
links = [{'href': h} for h in links]
return links, []
def find_body_links_re(body):
'''
Find links in an html body, divided among links and embeds.
On a 3.4 ghz x86 core, runs at ~ 25 megabyte/sec.
'''
stats.stats_sum('body_links_re parser bytes', len(body))
embeds_delims = set(
[m[1] for m in re.findall(r'''\ssrc\s{,3}=\s{,3}(?P<delim>['"])(.*?)(?P=delim)''', body, re.I | re.S)]
)
embeds_no_delims = set(re.findall(r'''\ssrc\s{,3}=\s{,3}([^\s'"<>]+)''', body, re.I))
embeds = embeds_delims.union(embeds_no_delims)
links_delims = set(
[m[1] for m in re.findall(r'''\shref\s{,3}=\s{,3}(?P<delim>['"])(.*?)(?P=delim)''', body, re.I | re.S)]
)
links_no_delims = set(re.findall(r'''\shref\s{,3}=\s{,3}([^\s'"<>]+)''', body, re.I))
links = links_delims.union(links_no_delims)
embeds = [{'src': s} for s in embeds]
links = [{'href': h} for h in links]
return links, embeds
def find_body_links_anchors_re(body):
'''
Find links and anchors in an html body, divided among links and embeds.
On a 3.4 ghz x86 core, runs at ~ NN megabyte/sec.
'''
stats.stats_sum('body_links_anchors_re parser bytes', len(body))
embeds_delims = set(
[m[1] for m in re.findall(r'''\ssrc\s{,3}=\s{,3}(?P<delim>['"])(.*?)(?P=delim)''', body, re.I | re.S | re.X)]
)
embeds_no_delims = set(re.findall(r'''\ssrc\s{,3}=\s{,3}([^\s'"<>]+)''', body, re.I | re.X))
embeds = embeds_delims.union(embeds_no_delims)
links_delims = set()
for m in re.finditer(r'''\shref\s{,3}=\s{,3}(?P<delim>['"])(.*?)(?P=delim) [^>]{,400} >''', body, re.I | re.S | re.X):
delim = m.group(1)
href = m.group(2)
if delim in href:
# this happens when the size above isn't big enough.
href = href.split(delim, 1)[0]
stats.stats_sum('body_links_anchors_re parser extra delim split needed', 1)
if href.startswith('#'):
continue
end = m.end(0)
anchor = body[end:]
mm = re.match(r'(.{,101}?)</a>', anchor, re.I | re.S)
if mm:
anchor = mm.group(1)
else:
anchor = anchor.split('<', 1)[0]
links_delims.add((href, anchor))
links_no_delims = set()
for m in re.finditer(r'''\shref\s{,3}=\s{,3}([^'">\s]+) [^>]{,200} >''', body, re.I | re.S | re.X):
href = m.group(1)
if href == '#':
continue
end = m.end(0)
anchor = body[end:]
mm = re.match(r'(.{,101}?)</a>', anchor, re.I | re.S)
if mm:
anchor = mm.group(1)
else:
anchor = anchor.split('<', 1)[0]
links_no_delims.add((href, anchor))
links = links_delims.union(links_no_delims)
embeds = [{'src': s} for s in embeds]
links = [dict((('href', h[0]), *trim_anchor(h[1]))) for h in links]
return links, embeds
def find_css_links_re(css):
'''
Finds the links embedded in css files
'''
stats.stats_sum('css_links_re parser bytes', len(css))
embeds_delims = set(
[m[1] for m in re.findall(r'''\surl\(\s?(?P<delim>['"])(.*?)(?P=delim)''', css, re.I | re.S)]
)
embeds_no_delims = set(re.findall(r'''\surl\(\s?([^\s'"<>()]+)''', css, re.I))
return [], list(embeds_delims.union(embeds_no_delims))
def find_head_links_soup(head_soup):
embeds = []
for tag in head_soup.find_all(src=True):
lo = build_link_object(tag)
if lo:
embeds.append(lo)
for tag in head_soup.find_all(href=True):
lo = build_link_object(tag)
if lo:
embeds.append(lo)
return [], embeds
def trim_anchor(anchor):
ret = []
# pull out <img alt=""> here?
anchor = re.sub(r'<.*?>', '', anchor, re.S).strip()
anchor = html.unescape(anchor)
if len(anchor) > 100:
anchor = anchor[:100]
ret.append(('anchor_truncated', True))
if anchor:
ret.append(('anchor', anchor))
return ret
def build_link_object(tag):
ret = {'tag': tag.name}
if tag.get('href'):
ret['href'] = tag.get('href')
if tag.get('src'):
ret['src'] = tag.get('src')
if 'href' not in ret and 'src' not in ret:
# href or src was present but false
return
if tag.name == 'a':
try:
parts = tag.itertext(with_tail=False)
except TypeError:
parts = None
if parts:
anchor = ' '.join(parts)
anchor = re.sub(r'\s+', ' ', anchor).strip()
ret.update(*trim_anchor(anchor))
if tag.get('target'):
ret['target'] = tag.get('target')
if tag.name == 'iframe':
if tag.get('name'):
ret['name'] = tag.get('name')
return ret
def find_body_links_soup(body_soup):
embeds = []
links = []
for tag in body_soup.find_all(src=True):
if tag.name == 'iframe':
lo = build_link_object(tag)
if lo:
links.append(lo)
else:
lo = build_link_object(tag)
if lo:
embeds.append(lo)
for tag in body_soup.find_all(href=True):
if tag.name == 'link':
rel = tag.get('rel', [None])[0]
if rel == 'stylesheet':
lo = build_link_object(tag)
if lo:
embeds.append(lo)
else:
pass # discard other body-ok like 'prefetch'
else:
lo = build_link_object(tag)
if lo:
links.append(lo)
return links, embeds
def url_clean_join(links, url=None):
ret = []
for link in links:
# you can have both, for example <link> tags occasionally incorectly have both in the wild
if 'href' in link:
link['href'] = URL(link['href'], urljoin=url)
if 'src' in link:
link['src'] = URL(link['src'], urljoin=url)
ret.append(link)
return ret
def url_dedup(link_objects):
ret = []
dedup = set()
for link_object in link_objects:
link = link_object.get('href') or link_object.get('src')
if link:
if link in dedup:
continue
dedup.add(link)
ret.append(link_object)
return ret
def report():
# XXX fix these names
# XXX how does this get just the burner thread? use the prefix
b = stats.stat_value('parser html bytes')
c = stats.stat_value('find_html_links re')
LOGGER.info('Burner thread report:')
if c is not None and c > 0:
LOGGER.info(' Burner thread parsed %.1f MB/cpu-second', b / c / 1000000)
d = stats.stat_value('sha1 html')
if d is not None and d > 0:
LOGGER.info(' Burner thread sha1 %.1f MB/cpu-second', b / d / 1000000)
t, c = stats.burn_values('find_html_links url_clean_join')
if c is not None and c > 0 and t is not None and t > 0:
LOGGER.info(' Burner thread cleaned %.1f kilo-urls/cpu-second', c / t / 1000)
def split_head_body(html, url=None):
'''
Efficiently split the head from the body, so we can use different
parsers on each. There's no point doing this split if it's
expensive.
It's legal for webpages to leave off <head> and <body>; the HTML5
standard requires browsers to figure it out based on the html
tags. We can't do that efficiently, so we punt for such webpages,
and return the entire page as body.
'''
# heuristic: if there's a <head> tag at all, it's early in the document
m = re.search(r'<head[\s>]', html[:2000], re.I)
if not m:
stats.stats_sum('parser split short fail', 1)
# well darn. try the same re as below, but with limited size
m = re.search(r'<(?:/head>|body[\s>])', html[:50000], re.I)
if m:
stats.stats_sum('parser split short fail save', 1)
return html[:m.start()], html[m.end():]
else:
return '', html
# having seen <head>, we're willing to parse for a long time for </head or <body
m = re.search(r'<(?:/head>|body[\s>])', html[:1000000], re.I)
if not m:
stats.stats_sum('parser split long fail', 1)
return '', html
return html[:m.start()], html[m.end():] # matched text is not included in either
def parse_refresh(s):
'''
https://www.w3.org/TR/html5/document-metadata.html#statedef-http-equiv-refresh
See in real life and not standard-conforming, in order of popularity:
whitespace after t before the ';'
starting with a decimal point
starting with a minus sign
empty time, starts with ';'
url= but missing the ';'
None of these actually work in modern FF, Chrome, or Safari
'''
t = None
refresh = r'\s* (\d+) (?:\.[\d\.]*)? [;,] \s* ([Uu][Rr][Ll] \s* = \s* ["\']?)? (.*)'
m = re.match(refresh, s, re.X)
if m:
t, sep, url = m.groups()
if sep and sep.endswith('"') and '"' in url:
url = url[:url.index('"')]
if sep and sep.endswith("'") and "'" in url:
url = url[:url.index("'")]
try:
t = int(t)
except ValueError:
t = None
else:
if s.isdigit():
t = int(s)
url = None
return t, url
'''
Helpers to minimize how many bytes we have to html parse.
Of course, these are all dangerous, but they might be useful
if the <head> of a webpage is abnormally large
'''
def regex_out_comments(html):
# I think whitespace is allowed: < \s* !-- .* -- \s* > XXX
return re.sub(r'<!--.*?-->', '', html, flags=re.S)
def regex_out_some_scripts(html):
'''
This nukes <script>...</script>, but does not nuke <script type="...
'''
return re.sub(r'<script>.*?</script>', '', html, flags=re.S)
def regex_out_all_scripts(html):
return re.sub(r'<script[\s>].*?</script>', '', html, flags=re.S)
| cocrawler/cocrawler | cocrawler/parse.py | Python | apache-2.0 | 14,541 |
from ut_arena.settings import * | blstream/ut-arena | ut_arena_py_api/ut_arena/envs/test/settings.py | Python | apache-2.0 | 31 |
# -*- coding: utf-8 -*-
# pylint: disable=locally-disabled, star-args
""" WebDAV upload method for dput.
Install to "/usr/share/dput/webdav.py".
"""
from __future__ import with_statement
import re
import os
import sys
import cgi
import netrc
import socket
import fnmatch
import getpass
import httplib
import urllib2
import urlparse
import unittest
from contextlib import closing
from email import parser as rfc2822_parser
try:
import dputhelper
except ImportError:
sys.path.insert(0, "/usr/share/dput/helper")
import dputhelper
# Block size for upload streaming
CHUNK_SIZE = 16 * 1024
def trace(msg, **kwargs):
"""Emit log traces in debug mode."""
if trace.debug:
print("D: webdav: " + (msg % kwargs))
trace.debug = False
def log(msg, **kwargs):
"""Emit log message to stderr."""
sys.stdout.flush()
sys.stderr.write("webdav: " + (msg % kwargs) + "\n")
sys.stderr.flush()
def _resolve_credentials(fqdn, login):
"""Look up special forms of credential references."""
result = login
if "$" in result:
result = os.path.expandvars(result)
if result.startswith("netrc:"):
result = result.split(':', 1)[1]
if result:
result = os.path.abspath(os.path.expanduser(result))
accounts = netrc.netrc(result or None)
account = accounts.authenticators(fqdn)
if not account or not(account[0] or account[1]):
raise dputhelper.DputUploadFatalException("Cannot find account for host %s in %s netrc file" % (
fqdn, result or "default"))
# account is (login, account, password)
user, pwd = account[0] or account[1], account[2] or ""
result = "%s:%s" % (user, pwd)
else:
if result.startswith("file:"):
result = os.path.abspath(os.path.expanduser(result.split(':', 1)[1]))
with closing(open(result, "r")) as handle:
result = handle.read().strip()
try:
user, pwd = result.split(':', 1)
except ValueError:
user, pwd = result, ""
trace("Resolved login credentials to %(user)s:%(pwd)s", user=user, pwd='*' * len(pwd))
return result
class PromptingPasswordMgr(urllib2.HTTPPasswordMgr):
""" Custom password manager that prompts for a password once, if none is available otherwise.
Based on code in dput 0.9.6 (http method).
"""
def __init__(self, login):
urllib2.HTTPPasswordMgr.__init__(self)
self.login = login
def find_user_password(self, realm, authuri):
"""Prompt for a password once and remember it, unless already provided in the configuration."""
authuri = self.reduce_uri(authuri)[0]
authinfo = urllib2.HTTPPasswordMgr.find_user_password(self, realm, authuri)
if authinfo == (None, None):
credentials = self.login
if ':' in credentials:
authinfo = credentials.split(':', 1)
else:
password = getpass.getpass(" Password for %s:" % realm)
self.add_password(realm, authuri, credentials, password)
authinfo = credentials, password
return authinfo
def _distro2repo(distro, repo_mappings):
"""Map distribution names to repo names according to config settings."""
# Parse the mapping config
mappings = [(i.split('=', 1) if '=' in i else (i, i)) for i in repo_mappings.split()]
# Try to find a match
result = distro
for pattern, target in mappings:
if fnmatch.fnmatchcase(distro.lower(), pattern.lower()):
result = target
break
trace("Mapped distro '%(distro)s' to '%(repo)s'", distro=distro, repo=result)
return result
def _resolve_incoming(fqdn, login, incoming, changes=None, cli_params=None, repo_mappings=""):
"""Resolve the given `incoming` value to a working URL."""
# Build fully qualified URL
scheme, netloc, path, params, query, anchor = urlparse.urlparse(incoming, scheme="http", allow_fragments=True)
if scheme not in ("http", "https"):
raise dputhelper.DputUploadFatalException("Unsupported URL scheme '%s'" % scheme)
url = urlparse.urlunparse((scheme, netloc or fqdn, path.rstrip('/') + '/', params, query, None))
# Parse anchor to parameters
url_params = dict(cgi.parse_qsl(anchor or '', keep_blank_values=True))
# Read changes from stream or file
pkgdata = {}
if changes:
try:
changes.read # pylint: disable=maybe-no-member
except AttributeError:
with closing(open(changes, "r")) as handle:
changes = handle.read()
else:
changes = changes.read() # pylint: disable=maybe-no-member
if changes.startswith("-----BEGIN PGP SIGNED MESSAGE-----"):
# Let someone else check this, we don't care a bit; gimme the data already
trace("Extracting package metadata from PGP signed message...")
changes = changes.split("-----BEGIN PGP")[1].replace('\r', '').split('\n\n', 1)[1]
pkgdata = dict([(key.lower().replace('-', '_'), val.strip())
for key, val in rfc2822_parser.HeaderParser().parsestr(changes).items()
])
# Extend changes metadata
pkgdata["loginuser"] = login.split(':')[0]
if "version" in pkgdata:
pkgdata["upstream"] = re.split(r"[-~]", pkgdata["version"])[0]
pkgdata.update(dict(
fqdn=fqdn, repo=_distro2repo(pkgdata.get("distribution", "unknown"), repo_mappings),
))
pkgdata.update(cli_params or {}) # CLI options can overwrite anything
trace("Collected metadata:\n %(meta)s", meta="\n ".join(["%s = %s" % (key, val)
for key, val in sorted(pkgdata.items())
if '\n' not in val # only print 'simple' values
]))
# Interpolate `url`
try:
try:
url.format
except AttributeError:
url = url % pkgdata # Python 2.5
else:
url = url.format(**pkgdata) # Python 2.6+
except KeyError, exc:
raise dputhelper.DputUploadFatalException("Unknown key (%s) in incoming templates '%s'" % (exc, incoming))
trace("Resolved incoming to `%(url)s' params=%(params)r", url=url, params=url_params)
return url, url_params
def _url_connection(url, method, skip_host=False, skip_accept_encoding=False):
"""Create HTTP[S] connection for `url`."""
scheme, netloc, path, params, query, _ = urlparse.urlparse(url)
result = conn = (httplib.HTTPSConnection if scheme == "https" else httplib.HTTPConnection)(netloc)
conn.debuglevel = int(trace.debug)
try:
conn.putrequest(method, urlparse.urlunparse((None, None, path, params, query, None)), skip_host, skip_accept_encoding)
conn.putheader("User-Agent", "dput")
conn.putheader("Connection", "close")
conn = None
finally:
if conn:
conn.close() # close in case of errors
return result
def _file_url(filepath, url):
"""Return URL for the given `filepath` in the DAV collection `url`."""
basename = os.path.basename(filepath)
return urlparse.urljoin(url.rstrip('/') + '/', basename)
def _dav_put(filepath, url, login, progress=None):
"""Upload `filepath` to given `url` (referring to a WebDAV collection)."""
fileurl = _file_url(filepath, url)
sys.stdout.write(" Uploading %s: " % os.path.basename(filepath))
sys.stdout.flush()
size = os.path.getsize(filepath)
with closing(open(filepath, 'r')) as handle:
if progress:
handle = dputhelper.FileWithProgress(handle, ptype=progress, progressf=sys.stdout, size=size)
trace("HTTP PUT to URL: %s" % fileurl)
try:
conn = _url_connection(fileurl, "PUT")
try:
conn.putheader("Authorization", 'Basic %s' % login.encode('base64').replace('\n', '').strip())
conn.putheader("Content-Length", str(size))
conn.endheaders()
conn.debuglevel = 0
while True:
data = handle.read(CHUNK_SIZE)
if not data:
break
conn.send(data)
conn.debuglevel = int(trace.debug)
resp = conn.getresponse()
if 200 <= resp.status <= 299:
print " done."
#elif res.status == 401 and not auth_headers:
#print "need authentication."
#auth_headers = AuthHandlerHackAround(url, res.msg, pwman).get_auth_headers()
elif resp.status == 401:
print " unauthorized."
raise urllib2.URLError("Upload failed as unauthorized (%s),"
" maybe wrong username or password?" % resp.reason)
else:
print " failed."
raise urllib2.URLError("Unexpected HTTP status %d %s" % (resp.status, resp.reason))
resp.read() # eat response body
finally:
conn.close()
except httplib.HTTPException, exc:
raise urllib2.URLError(exc)
def _check_url(url, allowed, mindepth=0):
"""Check if HTTP GET `url` returns a status code in `allowed`."""
if mindepth:
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
path = '/'.join(path.split('/')[:mindepth+1]).rstrip('/') + '/'
url = urlparse.urlunparse((scheme, netloc, path, params, query, fragment))
trace("Checking URL '%(url)s'", url=url)
try:
# TODO: Check requests need to use login credentials
with closing(urllib2.urlopen(url)) as handle:
handle.read()
code = handle.code
if code not in allowed:
raise urllib2.HTTPError(url, code,
"Unallowed HTTP status %d (%s)" % (code, handle.msg),
handle.headers, None)
except urllib2.HTTPError, exc:
code = exc.code
if code not in allowed:
raise
trace("Code %(code)d OK for URL '%(url)s'", url=url, code=code)
def _get_host_argument(fqdn):
""" We have to jump through several hoops to get to our config section,
which in turn is the only place where the host argument is available.
"""
import __main__ as dput # if only we would get passed our config section...
config = dput.config # pylint: disable=no-member
result = ""
for section in config.sections():
if (config.has_option(section, "fqdn")
and config.get(section, "fqdn") == fqdn
and config.has_option(section, section)):
result = config.get(section, section)
return result
def _get_config_data(fqdn):
"""Get configuration section for the chosen host, and CLI host parameters."""
# Without the patch applied, fall back to ugly hacks
if not upload.extended_info:
try:
caller = sys._getframe(2) # pylint: disable=protected-access
except AttributeError:
pass # somehow not CPython
else:
config = caller.f_globals.get("config")
host = caller.f_locals.get("host")
del caller
if config and host:
upload.extended_info = dict(config=config, host=host)
if upload.extended_info:
host_config = dict(upload.extended_info["config"].items(upload.extended_info["host"]))
host_argument = host_config.get(upload.extended_info["host"], "")
else:
host_config = {}
host_argument = _get_host_argument(fqdn)
log("WARN: Extended host configuration not available!")
# Parse "host:key=val;..." argument from command line into a dict
cli_params = dict(cgi.parse_qsl(host_argument.replace(',', ';'), keep_blank_values=True))
return host_config, cli_params
def upload(fqdn, login, incoming, files_to_upload, # pylint: disable=too-many-arguments
debug, dummy, progress=None):
"""Upload the files via WebDAV."""
assert sys.version_info >= (2, 5), "Your snake is a rotting corpse (Python 2.5+ required)"
trace.debug = bool(debug)
try:
host_config, cli_params = _get_config_data(fqdn)
login = _resolve_credentials(fqdn, login)
# Handle .changes file
changes_file = [i for i in files_to_upload if i.endswith(".changes")]
if not changes_file:
log("WARN: No changes file found in %(n)d files to upload", n=len(files_to_upload))
changes_file = None
else:
if len(changes_file) > 1:
log("WARN: More than one changes file found in %(n)d files to upload,"
" taking the 1st:\n %(changes)s",
n=len(files_to_upload), changes="\n ".join(changes_file))
changes_file = changes_file[0]
# Prepare for uploading
incoming, repo_params = _resolve_incoming(fqdn, login, incoming, changes=changes_file,
cli_params=cli_params, repo_mappings=host_config.get("repo_mappings", ""))
log("INFO: Destination base URL is\n %(url)s", url=urllib2.quote(incoming, safe=":/~;#"))
repo_params.update(cli_params)
mindepth = int(repo_params.get("mindepth", "0"), 10)
overwrite = int(repo_params.get("overwrite", "0"), 10)
# TODO: Add ability to enter missing password via terminal
# auth_handler = PromptingPasswordMgr(login)
# Special handling for integration test code
if "integration-test" in cli_params:
import pprint
print "upload arguments = ",
pprint.pprint(dict((k, v) for k, v in locals().iteritems() if k in (
"fqdn", "login", "incoming", "files_to_upload", "debug", "dummy", "progress")))
print "host config = ",
pprint.pprint(host_config)
print "host arguments = ",
pprint.pprint(cli_params)
else:
# TODO: "bintray" REST API support
# POST /packages/:subject/:repo
# POST /packages/:subject/:repo/:package/versions
# Check if .changes file already exists
if not overwrite and changes_file:
try:
_check_url(_file_url(changes_file, incoming), [404])
except urllib2.HTTPError, exc:
raise dputhelper.DputUploadFatalException("Overwriting existing changes at '%s' not allowed: %s" % (
_file_url(changes_file, incoming), exc))
# Check for existence of target path with minimal depth
if mindepth:
try:
_check_url(incoming, range(200, 300), mindepth=mindepth)
except urllib2.HTTPError, exc:
raise dputhelper.DputUploadFatalException("Required repository path '%s' doesn't exist: %s" % (
exc.filename, exc))
# Upload the files in the given order
for filepath in files_to_upload:
if "simulate" in cli_params:
log("WOULD upload '%(filename)s'", filename=os.path.basename(filepath))
else:
_dav_put(filepath, incoming, login, progress)
except (dputhelper.DputUploadFatalException, socket.error, urllib2.URLError, EnvironmentError), exc:
log("FATAL: %(exc)s", exc=exc)
sys.exit(1)
upload.extended_info = {}
#
# Unit Tests
#
def py25_format(template):
"""Helper for testing under Python 2.5."""
return template if sys.version_info >= (2, 6) else template.replace("{", "%(").replace("}", ")s")
class WebdavTest(unittest.TestCase): # pylint: disable=too-many-public-methods
"""Local unittests."""
DISTRO2REPO_DATA = [
("unknown", "incoming"),
("foobar", "incoming"),
("unstable", "snapshots"),
("snapshots", "snapshots"),
("foo-experimental", "snapshots"),
("bar-experimental", "snapshots"),
]
def test_distro2repo(self):
"""Test distribution mapping."""
cfg = "snapshots unstable=snapshots *-experimental=snapshots *=incoming"
for distro, repo in self.DISTRO2REPO_DATA:
result = _distro2repo(distro, cfg)
self.assertEquals(result, repo)
def test_resolve_incoming(self):
"""Test URL resolving."""
result, params = _resolve_incoming("repo.example.com:80", "", "incoming")
self.assertEquals(result, "http://repo.example.com:80/incoming/")
self.assertEquals(params, {})
result, _ = _resolve_incoming("repo.example.com:80", "", "https:///incoming/")
self.assertEquals(result, "https://repo.example.com:80/incoming/")
result, _ = _resolve_incoming("repo.example.com:80", "", "//explicit/incoming/")
self.assertEquals(result, "http://explicit/incoming/")
result, _ = _resolve_incoming("repo.example.com:80", "", py25_format("//{fqdn}/incoming/"))
self.assertEquals(result, "http://repo.example.com:80/incoming/")
_, params = _resolve_incoming("", "", "incoming#a=1&b=c")
self.assertEquals(params, dict(a="1", b="c"))
result, _ = _resolve_incoming("repo.example.com:80", "johndoe", py25_format("incoming/{loginuser}"))
self.assertEquals(result, "http://repo.example.com:80/incoming/johndoe/")
# Unsupported URL scheme
self.assertRaises(dputhelper.DputUploadFatalException, _resolve_incoming, "", "", "file:///incoming/")
# Unknown key
self.assertRaises(dputhelper.DputUploadFatalException, _resolve_incoming,
"", "", py25_format("http://example.com/incoming/{not_defined_ever}/"))
if __name__ == "__main__":
print("artifactory webdav plugin tests")
unittest.main()
| 1and1/artifactory-debian | dput-webdav/webdav.py | Python | apache-2.0 | 17,864 |
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import nova.conf
from nova.tests.functional.api_sample_tests import test_servers
CONF = nova.conf.CONF
class AvailabilityZoneJsonTest(test_servers.ServersSampleBase):
ADMIN_API = True
extension_name = "os-availability-zone"
def _get_flags(self):
f = super(AvailabilityZoneJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.availability_zone.'
'Availability_zone')
return f
def test_availability_zone_list(self):
response = self._do_get('os-availability-zone')
self._verify_response('availability-zone-list-resp', {}, response, 200)
def test_availability_zone_detail(self):
response = self._do_get('os-availability-zone/detail')
self._verify_response('availability-zone-detail-resp', {}, response,
200)
def test_availability_zone_post(self):
self._post_server()
| bigswitch/nova | nova/tests/functional/api_sample_tests/test_availability_zone.py | Python | apache-2.0 | 1,634 |
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
from taskflow.utils import reflection
class Flow(six.with_metaclass(abc.ABCMeta)):
"""The base abstract class of all flow implementations.
A flow is a structure that defines relationships between tasks. You can
add tasks and other flows (as subflows) to the flow, and the flow provides
a way to implicitly or explicitly define how they are interdependent.
Exact structure of the relationships is defined by concrete
implementation, while this class defines common interface and adds
human-readable (not necessary unique) name.
NOTE(harlowja): if a flow is placed in another flow as a subflow, a desired
way to compose flows together, then it is valid and permissible that during
execution the subflow & parent flow may be flattened into a new flow. Since
a flow is just a 'structuring' concept this is typically a behavior that
should not be worried about (as it is not visible to the user), but it is
worth mentioning here.
Flows are expected to provide the following methods/properties:
- add
- __len__
- requires
- provides
"""
def __init__(self, name):
self._name = str(name)
@property
def name(self):
"""A non-unique name for this flow (human readable)"""
return self._name
@abc.abstractmethod
def __len__(self):
"""Returns how many items are in this flow."""
def __str__(self):
lines = ["%s: %s" % (reflection.get_class_name(self), self.name)]
lines.append("%s" % (len(self)))
return "; ".join(lines)
@abc.abstractmethod
def add(self, *items):
"""Adds a given item/items to this flow."""
@abc.abstractproperty
def requires(self):
"""Browse argument requirement names this flow requires to run."""
@abc.abstractproperty
def provides(self):
"""Browse argument names provided by the flow."""
| ntt-sic/taskflow | taskflow/flow.py | Python | apache-2.0 | 2,637 |
"""
CNN on mnist data using fluid api of paddlepaddle
"""
import paddle
import paddle.fluid as fluid
def mnist_cnn_model(img):
"""
Mnist cnn model
Args:
img(Varaible): the input image to be recognized
Returns:
Variable: the label prediction
"""
conv_pool_1 = fluid.nets.simple_img_conv_pool(
input=img,
num_filters=20,
filter_size=5,
pool_size=2,
pool_stride=2,
act='relu')
conv_pool_2 = fluid.nets.simple_img_conv_pool(
input=conv_pool_1,
num_filters=50,
filter_size=5,
pool_size=2,
pool_stride=2,
act='relu')
fc = fluid.layers.fc(input=conv_pool_2, size=50, act='relu')
logits = fluid.layers.fc(input=fc, size=10, act='softmax')
return logits
def main():
"""
Train the cnn model on mnist datasets
"""
img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
logits = mnist_cnn_model(img)
cost = fluid.layers.cross_entropy(input=logits, label=label)
avg_cost = fluid.layers.mean(x=cost)
optimizer = fluid.optimizer.Adam(learning_rate=0.01)
optimizer.minimize(avg_cost)
batch_size = fluid.layers.create_tensor(dtype='int64')
batch_acc = fluid.layers.accuracy(
input=logits, label=label, total=batch_size)
BATCH_SIZE = 50
PASS_NUM = 3
ACC_THRESHOLD = 0.98
LOSS_THRESHOLD = 10.0
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.mnist.train(), buf_size=500),
batch_size=BATCH_SIZE)
# use CPU
place = fluid.CPUPlace()
# use GPU
# place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
feeder = fluid.DataFeeder(feed_list=[img, label], place=place)
exe.run(fluid.default_startup_program())
pass_acc = fluid.average.WeightedAverage()
for pass_id in range(PASS_NUM):
pass_acc.reset()
for data in train_reader():
loss, acc, b_size = exe.run(
fluid.default_main_program(),
feed=feeder.feed(data),
fetch_list=[avg_cost, batch_acc, batch_size])
pass_acc.add(value=acc, weight=b_size)
pass_acc_val = pass_acc.eval()[0]
print("pass_id=" + str(pass_id) + " acc=" + str(acc[0]) +
" pass_acc=" + str(pass_acc_val))
if loss < LOSS_THRESHOLD and pass_acc_val > ACC_THRESHOLD:
# early stop
break
print("pass_id=" + str(pass_id) + " pass_acc=" + str(pass_acc.eval()[
0]))
fluid.io.save_params(
exe, dirname='./mnist', main_program=fluid.default_main_program())
print('train mnist done')
if __name__ == '__main__':
main()
| kuke/models | fluid/adversarial/tutorials/mnist_model.py | Python | apache-2.0 | 2,821 |
# -*- coding: utf-8 -*-
from kubb_match.data.models import GridPosition, Round, Game
class BattleService(object):
def __init__(self):
pass
def create_games(self, positions):
games = []
nr = int(len(positions) / 5)
field = 1
for row in ('A', 'B', 'C', 'D', 'E'):
for x in range(1, nr + 1, 2):
print(row + str(x))
print(row + str(x + 1))
team1 = next((pos for pos in positions if pos.position == row + str(x)))
team2 = next((pos for pos in positions if pos.position == row + str(x + 1)))
game = Game(team1_id=team1.team_id, team2_id=team2.team_id, field=field)
games.append(game)
field += 1
return games
def calculate_next_round(self, round, final=False):
positions = self.calculate_positions(round)
new_round = Round()
new_round.positions = positions
if not final:
new_round.games = self.create_games(positions)
return new_round
def calculate_positions(self, round):
positions = []
losers = []
games = round.games
games.sort(key=lambda x: x.field, reverse=False)
for game in games:
if game.winner == game.team1_id:
grid_pos = self.position_winner(round, positions, game.team1_id, True)
positions.append(grid_pos)
losers.append(game.team2_id)
else:
grid_pos = self.position_winner(round, positions, game.team2_id, False)
positions.append(grid_pos)
losers.append(game.team1_id)
for loser_id in losers:
grid_pos = self.position_loser(round, positions, loser_id)
positions.append(grid_pos)
return positions
def position_winner(self, round, positions, winner_id, pos_1):
pos = next((pos for pos in round.positions if pos.team_id == winner_id))
key = pos.position
if key[0] == 'A':
key = key
row = key[0]
else:
row = self.move_row_up(key[0])
pos = int(key[1])
key = row + str(pos)
#if key in [pos.position for pos in positions]:
# pos = pos + 1 if pos_1 else pos - 1
# key = row + str(pos)
pos = int(key[1])
while key in [pos.position for pos in positions]:
pos += 1
if pos > 8:
pos -= 8
key = row + str(pos)
grid_pos = GridPosition(position=key, team_id=winner_id)
return grid_pos
def position_loser(self, round, positions, loser_id):
pos = next((pos for pos in round.positions if pos.team_id == loser_id))
key = pos.position
if key[0] == 'E':
row = 'E'
pos = int(key[1])
while key in [pos.position for pos in positions]:
pos += 1
if pos > 8:
pos -= 8
key = row + str(pos)
else:
row = self.move_row_down(key[0])
pos = int(key[1]) + 2
if pos > 8:
pos -= 8
key = row + str(pos)
while key in [pos.position for pos in positions]:
pos += 1
if pos > 8:
pos -= 8
key = row + str(pos)
grid_pos = GridPosition(position=key, team_id=loser_id)
return grid_pos
@staticmethod
def move_row_up(row):
return chr(ord(row) - 1)
@staticmethod
def move_row_down(row):
return chr(ord(row) + 1)
| BartSaelen/kubb_match | kubb_match/service/battle_service.py | Python | apache-2.0 | 3,670 |
from gittle import Gittle
repo = Gittle('.')
lastest = [
info['sha']
for info in repo.commit_info()[1:3]
]
print(repo.diff(*lastest, diff_type='classic'))
print("""
Last Diff
""")
print(list(repo.diff('HEAD')))
| 0asa/gittle | examples/diff.py | Python | apache-2.0 | 227 |
from setuptools import setup, find_packages
from dist_job_mgr.version import VERSION
setup(
name='dist_job_mgr',
version=VERSION,
author='genForma Corp',
author_email='[email protected]',
url='',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
entry_points = {
'console_scripts': [
'djmctl = dist_job_mgr.djmctl:main',
'djm-worker = dist_job_mgr.worker_main:main'
]},
install_requires=['lockfile>=0.9',], # 'python-daemon'],
license='Apache V2.0',
description='Distributed Job Manager',
long_description="description"
)
| quaddra/dist_job_mgr | setup.py | Python | apache-2.0 | 639 |
#
# Validator for "idlebg" Test
#
from pscheduler import json_validate
MAX_SCHEMA = 1
def spec_is_valid(json):
schema = {
"type": "object",
"properties": {
"schema": { "$ref": "#/pScheduler/Cardinal" },
"duration": { "$ref": "#/pScheduler/Duration" },
"host": { "$ref": "#/pScheduler/Host" },
"host-node": { "$ref": "#/pScheduler/URLHostPort" },
"interval": { "$ref": "#/pScheduler/Duration" },
"parting-comment": { "$ref": "#/pScheduler/String" },
"starting-comment": { "$ref": "#/pScheduler/String" },
},
"required": [
"duration"
]
}
return json_validate(json, schema, max_schema=MAX_SCHEMA)
def result_is_valid(json):
schema = {
"type": "object",
"properties": {
"schema": { "$ref": "#/pScheduler/Cardinal" },
"succeeded": { "$ref": "#/pScheduler/Boolean" },
"error": { "$ref": "#/pScheduler/String" },
"diags": { "$ref": "#/pScheduler/String" },
"time-slept": { "$ref": "#/pScheduler/Duration" },
},
"required": [
"succeeded",
"time-slept",
]
}
return json_validate(json, schema)
def limit_is_valid(json):
schema = {
"type": "object",
"properties": {
"schema": { "$ref": "#/pScheduler/Cardinal" },
"duration": { "$ref": "#/pScheduler/Limit/Duration" },
"starting-comment": { "$ref": "#/pScheduler/Limit/String" },
"parting-comment": { "$ref": "#/pScheduler/Limit/String" }
},
"additionalProperties": False
}
return json_validate(json, schema)
| perfsonar/pscheduler | pscheduler-test-idlebgm/idlebgm/validate.py | Python | apache-2.0 | 1,872 |
# -*- coding: utf-8 -*-
#
# phaxio-python documentation build configuration file, created by
# sphinx-quickstart on Sun Jan 8 20:17:15 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.ifconfig',
# 'sphinx.ext.githubpages',
# 'sphinx.ext.autosectionlabel',
'sphinx.ext.autosummary'
]
autosummary_generate = True
autodoc_default_flags = ['members', 'undoc-members']
# skips dccumenting to_dict and to_str in model types
def skip_member(app, what, name, obj, skip, options):
if name in ['to_dict', 'to_str']:
return True
return skip
# skips all docstrings in model types, but leave the :rtype: tags so we have type information and links
def remove_module_docstring(app, what, name, obj, options, lines):
if name.startswith("phaxio.swagger_client"):
lines[:] = [x for x in lines if 'rtype' in x]
def setup(app):
app.connect('autodoc-skip-member', skip_member)
app.connect("autodoc-process-docstring", remove_module_docstring)
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'phaxio-python'
copyright = u'2017, Ari Polsky'
author = u'Ari Polsky'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'.2'
# The full version, including alpha/beta/rc tags.
release = u'.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'phaxio-pythondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'phaxio-python.tex', u'phaxio-python Documentation',
u'Ari Polsky', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'phaxio-python', u'phaxio-python Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'phaxio-python', u'phaxio-python Documentation',
author, 'phaxio-python', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| anpolsky/phaxio-python | docs/source/conf.py | Python | apache-2.0 | 5,715 |
from datetime import date, datetime, time
from decimal import Decimal
from django.core.files import File
from django.core.files.storage import default_storage
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase
from django.utils.timezone import now
from i18nfield.strings import LazyI18nString
from pretix.base import settings
from pretix.base.models import Event, Organizer, User
from pretix.base.settings import SettingsSandbox
from pretix.control.forms.global_settings import GlobalSettingsObject
class SettingsTestCase(TestCase):
def setUp(self):
settings.DEFAULTS['test_default'] = {
'default': 'def',
'type': str
}
self.global_settings = GlobalSettingsObject()
self.global_settings.settings._flush()
self.organizer = Organizer.objects.create(name='Dummy', slug='dummy')
self.organizer.settings._flush()
self.event = Event.objects.create(
organizer=self.organizer, name='Dummy', slug='dummy',
date_from=now(),
)
self.event.settings._flush()
def test_global_set_explicit(self):
self.global_settings.settings.test = 'foo'
self.assertEqual(self.global_settings.settings.test, 'foo')
# Reload object
self.global_settings = GlobalSettingsObject()
self.assertEqual(self.global_settings.settings.test, 'foo')
def test_organizer_set_explicit(self):
self.organizer.settings.test = 'foo'
self.assertEqual(self.organizer.settings.test, 'foo')
# Reload object
self.organizer = Organizer.objects.get(id=self.organizer.id)
self.assertEqual(self.organizer.settings.test, 'foo')
def test_event_set_explicit(self):
self.event.settings.test = 'foo'
self.assertEqual(self.event.settings.test, 'foo')
# Reload object
self.event = Event.objects.get(id=self.event.id)
self.assertEqual(self.event.settings.test, 'foo')
def test_event_set_twice(self):
self.event.settings.test = 'bar'
self.event.settings.test = 'foo'
self.assertEqual(self.event.settings.test, 'foo')
# Reload object
self.event = Event.objects.get(id=self.event.id)
self.assertEqual(self.event.settings.test, 'foo')
def test_organizer_set_on_global(self):
self.global_settings.settings.test = 'foo'
self.assertEqual(self.global_settings.settings.test, 'foo')
self.assertEqual(self.organizer.settings.test, 'foo')
# Reload object
self.global_settings = GlobalSettingsObject()
self.assertEqual(self.global_settings.settings.test, 'foo')
self.assertEqual(self.organizer.settings.test, 'foo')
def test_event_set_on_global(self):
self.global_settings.settings.test = 'foo'
self.assertEqual(self.global_settings.settings.test, 'foo')
self.assertEqual(self.event.settings.test, 'foo')
# Reload object
self.global_settings = GlobalSettingsObject()
self.assertEqual(self.global_settings.settings.test, 'foo')
self.assertEqual(self.event.settings.test, 'foo')
def test_event_set_on_organizer(self):
self.organizer.settings.test = 'foo'
self.assertEqual(self.organizer.settings.test, 'foo')
self.assertEqual(self.event.settings.test, 'foo')
# Reload object
self.organizer = Organizer.objects.get(id=self.organizer.id)
self.assertEqual(self.organizer.settings.test, 'foo')
self.assertEqual(self.event.settings.test, 'foo')
def test_event_override_organizer(self):
self.organizer.settings.test = 'foo'
self.event.settings.test = 'bar'
self.assertEqual(self.organizer.settings.test, 'foo')
self.assertEqual(self.event.settings.test, 'bar')
# Reload object
self.organizer = Organizer.objects.get(id=self.organizer.id)
self.event = Event.objects.get(id=self.event.id)
self.assertEqual(self.organizer.settings.test, 'foo')
self.assertEqual(self.event.settings.test, 'bar')
def test_event_override_global(self):
self.global_settings.settings.test = 'foo'
self.event.settings.test = 'bar'
self.assertEqual(self.global_settings.settings.test, 'foo')
self.assertEqual(self.event.settings.test, 'bar')
# Reload object
self.global_settings = GlobalSettingsObject()
self.event = Event.objects.get(id=self.event.id)
self.assertEqual(self.global_settings.settings.test, 'foo')
self.assertEqual(self.event.settings.test, 'bar')
def test_default(self):
self.assertEqual(self.global_settings.settings.test_default, 'def')
self.assertEqual(self.organizer.settings.test_default, 'def')
self.assertEqual(self.event.settings.test_default, 'def')
self.assertEqual(self.event.settings.get('nonexistant', default='abc'), 'abc')
def test_default_typing(self):
self.assertIs(type(self.event.settings.get('nonexistant', as_type=Decimal, default=0)), Decimal)
def test_item_access(self):
self.event.settings['foo'] = 'abc'
self.assertEqual(self.event.settings['foo'], 'abc')
del self.event.settings['foo']
self.assertIsNone(self.event.settings['foo'])
def test_delete(self):
self.organizer.settings.test = 'foo'
self.event.settings.test = 'bar'
self.assertEqual(self.organizer.settings.test, 'foo')
self.assertEqual(self.event.settings.test, 'bar')
del self.event.settings.test
self.assertEqual(self.event.settings.test, 'foo')
self.event = Event.objects.get(id=self.event.id)
self.assertEqual(self.event.settings.test, 'foo')
del self.organizer.settings.test
self.assertIsNone(self.organizer.settings.test)
self.organizer = Organizer.objects.get(id=self.organizer.id)
self.assertIsNone(self.organizer.settings.test)
def test_serialize_str(self):
self._test_serialization('ABC', as_type=str)
def test_serialize_float(self):
self._test_serialization(2.3, float)
def test_serialize_int(self):
self._test_serialization(2, int)
def test_serialize_datetime(self):
self._test_serialization(now(), datetime)
def test_serialize_time(self):
self._test_serialization(now().time(), time)
def test_serialize_date(self):
self._test_serialization(now().date(), date)
def test_serialize_decimal(self):
self._test_serialization(Decimal('2.3'), Decimal)
def test_serialize_dict(self):
self._test_serialization({'a': 'b', 'c': 'd'}, dict)
def test_serialize_list(self):
self._test_serialization([1, 2, 'a'], list)
def test_serialize_lazyi18nstring(self):
self._test_serialization(LazyI18nString({'de': 'Hallo', 'en': 'Hello'}), LazyI18nString)
def test_serialize_bool(self):
self._test_serialization(True, bool)
self._test_serialization(False, bool)
def test_serialize_bool_implicit(self):
self.event.settings.set('test', True)
self.event.settings._flush()
self.assertIs(self.event.settings.get('test', as_type=None), True)
self.event.settings.set('test', False)
self.event.settings._flush()
self.assertIs(self.event.settings.get('test', as_type=None), False)
def test_serialize_versionable(self):
self._test_serialization(self.event, Event)
def test_serialize_model(self):
self._test_serialization(User.objects.create_user('[email protected]', 'dummy'), User)
def test_serialize_unknown(self):
class Type:
pass
try:
self._test_serialization(Type(), Type)
self.assertTrue(False, 'No exception thrown!')
except TypeError:
pass
def test_serialize_file(self):
val = SimpleUploadedFile("sample_invalid_image.jpg", b"file_content", content_type="image/jpeg")
default_storage.save(val.name, val)
val.close()
self.event.settings.set('test', val)
self.event.settings._flush()
f = self.event.settings.get('test', as_type=File)
self.assertIsInstance(f, File)
self.assertTrue(f.name.endswith(val.name))
f.close()
def test_unserialize_file_value(self):
val = SimpleUploadedFile("sample_invalid_image.jpg", b"file_content", content_type="image/jpeg")
default_storage.save(val.name, val)
val.close()
self.event.settings.set('test', val)
self.event.settings._flush()
f = self.event.settings.get('test', as_type=File)
self.assertIsInstance(f, File)
self.assertTrue(f.name.endswith(val.name))
f.close()
def test_autodetect_file_value(self):
val = SimpleUploadedFile("sample_invalid_image.jpg", b"file_content", content_type="image/jpeg")
default_storage.save(val.name, val)
val.close()
self.event.settings.set('test', val)
self.event.settings._flush()
f = self.event.settings.get('test')
self.assertIsInstance(f, File)
self.assertTrue(f.name.endswith(val.name))
f.close()
def test_autodetect_file_value_of_parent(self):
val = SimpleUploadedFile("sample_invalid_image.jpg", b"file_content", content_type="image/jpeg")
default_storage.save(val.name, val)
val.close()
self.organizer.settings.set('test', val)
self.organizer.settings._flush()
f = self.event.settings.get('test')
self.assertIsInstance(f, File)
self.assertTrue(f.name.endswith(val.name))
f.close()
def _test_serialization(self, val, as_type):
self.event.settings.set('test', val)
self.event.settings._flush()
self.assertEqual(self.event.settings.get('test', as_type=as_type), val)
self.assertIsInstance(self.event.settings.get('test', as_type=as_type), as_type)
def test_sandbox(self):
sandbox = SettingsSandbox('testing', 'foo', self.event)
sandbox.set('foo', 'bar')
self.assertEqual(sandbox.get('foo'), 'bar')
self.assertEqual(self.event.settings.get('testing_foo_foo'), 'bar')
self.assertIsNone(self.event.settings.get('foo'), 'bar')
sandbox['bar'] = 'baz'
sandbox.baz = 42
self.event = Event.objects.get(id=self.event.id)
sandbox = SettingsSandbox('testing', 'foo', self.event)
self.assertEqual(sandbox['bar'], 'baz')
self.assertEqual(sandbox.baz, '42')
del sandbox.baz
del sandbox['bar']
self.assertIsNone(sandbox.bar)
self.assertIsNone(sandbox['baz'])
def test_freeze(self):
olddef = settings.DEFAULTS
settings.DEFAULTS = {
'test_default': {
'default': 'def',
'type': str
}
}
self.event.organizer.settings.set('bar', 'baz')
self.event.organizer.settings.set('foo', 'baz')
self.event.settings.set('foo', 'bar')
frozen = self.event.settings.freeze()
self.event.settings.set('foo', 'notfrozen')
try:
self.assertEqual(frozen, {
'test_default': 'def',
'bar': 'baz',
'foo': 'bar'
})
finally:
settings.DEFAULTS = olddef
| Flamacue/pretix | src/tests/base/test_settings.py | Python | apache-2.0 | 11,458 |
# Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run YCSB against MongoDB.
YCSB is a load generator for many 'cloud' databases. MongoDB is a NoSQL
database.
MongoDB homepage: http://www.mongodb.org/
YCSB homepage: https://github.com/brianfrankcooper/YCSB/wiki
"""
import functools
import random
import string
import time
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_benchmarks import mongodb_ycsb_benchmark
from perfkitbenchmarker.linux_packages import ycsb
FLAGS = flags.FLAGS
flags.DEFINE_string('kubernetes_mongodb_cpu_request', '7.1',
'CPU request of mongodb.')
flags.DEFINE_string('kubernetes_mongodb_memory_request', '16Gi',
'Memory request of mongodb.')
flags.DEFINE_string('kubernetes_mongodb_cpu_limit', '7.6',
'CPU limit of mongodb, should be bigger than CPU request')
flags.DEFINE_string(
'kubernetes_mongodb_memory_limit', '32Gi',
'Memory limit of mongodb, should be bigger than memory request')
flags.DEFINE_string('kubernetes_mongodb_disk_size', '200Gi',
'Disk size used by mongodb')
# TODO(user): Use GetStorageClass function, once available.
STORAGE_CLASS = flags.DEFINE_string(
'kubernetes_mongodb_storage_class',
None,
'storageClassType of data disk. Defaults to provider specific storage '
'class.')
BENCHMARK_NAME = 'kubernetes_mongodb'
BENCHMARK_CONFIG = """
kubernetes_mongodb:
description: Benchmarks MongoDB server performance.
container_cluster:
cloud: GCP
type: Kubernetes
vm_count: 1
vm_spec: *default_single_core
nodepools:
mongodb:
vm_count: 1
vm_spec:
GCP:
machine_type: n2-standard-8
zone: us-central1-a
Azure:
zone: westus
machine_type: Standard_D3_v2
AWS:
zone: us-east-1a
machine_type: c5.xlarge
clients:
vm_count: 1
vm_spec: *default_single_core
vm_groups:
clients:
vm_spec: *default_single_core
vm_count: null
"""
def GetConfig(user_config):
"""Load and return benchmark config.
Args:
user_config: user supplied configuration (flags and config file)
Returns:
loaded benchmark configuration
"""
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
if FLAGS['ycsb_client_vms'].present:
config['container_cluster']['nodepools']['mongodb']['vm_count'] = (
FLAGS.ycsb_client_vms)
return config
def _PrepareClient(vm):
"""Install YCSB on the client VM."""
vm.Install('ycsb')
# Disable logging for MongoDB driver, which is otherwise quite verbose.
log_config = """<configuration><root level="WARN"/></configuration>"""
vm.RemoteCommand("echo '{0}' > {1}/logback.xml".format(
log_config, ycsb.YCSB_DIR))
def _PrepareDeployment(benchmark_spec):
"""Deploys MongoDB Operator and instance on the cluster."""
cluster = benchmark_spec.container_cluster
admin_password = ''.join(
random.choice(string.ascii_letters + string.digits) for _ in range(20))
storage_class = STORAGE_CLASS.value or cluster.GetDefaultStorageClass()
cluster.ApplyManifest(
'container/kubernetes_mongodb/kubernetes_mongodb_crd.yaml')
cluster.ApplyManifest(
'container/kubernetes_mongodb/kubernetes_mongodb_operator.yaml.j2',
cpu_request=FLAGS.kubernetes_mongodb_cpu_request,
cpu_limit=FLAGS.kubernetes_mongodb_cpu_limit,
memory_request=FLAGS.kubernetes_mongodb_memory_request,
memory_limit=FLAGS.kubernetes_mongodb_memory_limit,
disk_size=FLAGS.kubernetes_mongodb_disk_size,
storage_class=storage_class,
admin_password=admin_password)
time.sleep(60)
benchmark_spec.container_cluster.WaitForResource('pod/mongodb-0', 'Ready')
mongodb_cluster_ip = benchmark_spec.container_cluster.GetClusterIP(
'mongodb-service')
benchmark_spec.mongodb_url = 'mongodb://ycsb:{password}@{ip_address}:27017/ycsb?authSource=ycsb'.format(
password=admin_password, ip_address=mongodb_cluster_ip)
def Prepare(benchmark_spec):
"""Install MongoDB on one VM and YCSB on another.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
server_partials = [functools.partial(_PrepareDeployment, benchmark_spec)]
client_partials = [
functools.partial(_PrepareClient, client)
for client in benchmark_spec.vm_groups['clients']
]
vm_util.RunThreaded((lambda f: f()), server_partials + client_partials)
benchmark_spec.executor = ycsb.YCSBExecutor('mongodb', cp=ycsb.YCSB_DIR)
def Run(benchmark_spec):
return mongodb_ycsb_benchmark.Run(benchmark_spec)
def Cleanup(benchmark_spec):
"""Remove MongoDB and YCSB.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
del benchmark_spec
| GoogleCloudPlatform/PerfKitBenchmarker | perfkitbenchmarker/linux_benchmarks/kubernetes_mongodb_ycsb_benchmark.py | Python | apache-2.0 | 5,544 |
# coding=utf-8
import pprint
import config
import json
import urllib
import requests
class Driver(object):
def __init__(self):
self.driver_type = self.__class__.__name__
# Get credentials from conf files for CMDB
pass
def get_driver_type(self):
return self.driver_type
def get_ci(self, ci):
pass
def set_ci(self, ci):
pass
class Itop(Driver):
def get_ci(self, ci):
print("Get from itop")
return True
def set_ci(self, ci):
username = config.alexandria.conf_file.get_driver_parameters("itop", "loginItop")
password = config.alexandria.conf_file.get_driver_parameters("itop", "passwordItop")
config.logger.debug("login : {}, password : {}".format(
username,
password
)
)
# Craft request body and header
urlbase = config.alexandria.conf_file.get_driver_parameters("itop", "endpoint")
request = '{"operation":"core/create","comment":"Synchronization from Alexandria","class":"Server","output_fields":"id,name,ram", "fields":{"org_id": "3","name":"' + ci.data["Name"] + '","ram":"' + format((ci.data["MemorySummary"])["TotalSystemMemoryGiB"]) + '","serialnumber":"' + ci.data["SerialNumber"] + '"}}'
urlparam = {'version' : '1.0',
'auth_user' : username,
'auth_pwd' : password,
'json_data' : request
}
#header = {'Content-type': 'application/json'}
url = urlbase + '?' + urllib.urlencode(urlparam)
config.logger.debug(url)
#=======================================================================
# answer = requests.post(url,
# headers=header,
# verify="False"
# )
#=======================================================================
answer = requests.post(url,
auth=(username,password)
)
config.logger.debug(answer.status_code)
config.logger.debug(answer.text)
class Redfish(Driver):
def get_ci(self,ci):
print("Get from redfish")
import redfish
print(ci.ip_mgmt + " - " + ci.login + " - " + ci.password)
#remote_mgmt = redfish.connect(ci.ip_mgmt, ci.login, ci.password, verify_cert=False)
remote_mgmt = redfish.connect(ci.ip_mgmt, ci.login, ci.password, simulator=True, enforceSSL=False)
ci.ci_type = remote_mgmt.Systems.systems_list[0].get_parameter("@odata.type")
ci.data = remote_mgmt.Systems.systems_list[0].get_parameters()
#print("Redfish API version : {} \n".format(remote_mgmt.get_api_version()))
return True
def set_ci(self, ci):
print "Push to Redfish"
return True
class Ironic(Driver):
pass
class Mondorescue(Driver):
pass
class Fakecmdb(Driver):
def set_ci(self, ci):
# Determine ci type so we can do the proper action.
pp = pprint.PrettyPrinter(indent=4)
if ci.ci_type == "Manager":
print("We are in Fakecmdb driver !")
pp.pprint(ci.data)
# Simply write a json file with ci.data content.
with open("Fakecmdb.json", "w") as jsonfile:
json.dump(ci.data, jsonfile, indent=4)
jsonfile.close()
#
#=======================================================================
class Fakeprovider(Driver):
def get_ci(self, ci):
# Simulate a driver that will provide Manager data.
# TODO a connect method must be implemented
# Assuming the connection is ok.
# Now create a copy of manager model from reference model.
#ci.ci_type = "Manager"
#ci.data = config.alexandria.model.get_model("Manager")
# Update the structure with data
# TODO : think to encapsulate to not edit ci.data directly.
# This could be also a way to check source of truth.
# If data provided by our driver is not the source of truth
# then discard it.
#ci.data["ManagerType"] = "BMC"
#ci.data["Model"] = "Néné Manager"
#ci.data["FirmwareVersion"] = "1.00"
#if ci.data is config.alexandria.model.Manager:
# print "identical"
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(ci.ci_type)
class DriverCollection(list):
pass
| uggla/alexandria | alexandria/drivers.py | Python | apache-2.0 | 4,848 |
#
# Copyright (c) 2013, Centre National de la Recherche Scientifique
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import string
import os
import os.path
from random import choice
import stratuslab.system.SystemFactory as SystemFactory
from stratuslab.installator.Installator import Installator
from stratuslab import Util
from stratuslab.Util import printError
class CouchbaseServer(Installator):
@staticmethod
def _generate_password():
chars = string.letters + string.digits
length = 8
return ''.join([choice(chars) for _ in range(length)])
@staticmethod
def _cb_cmd(func, host, options):
opts = ' '.join(options)
cmd = '/opt/couchbase/bin/couchbase-cli %s -c %s:8091 %s' % (func, host, opts)
return cmd
def __init__(self, configHolder):
configHolder.assign(self)
self.system = SystemFactory.getSystem(self.frontendSystem, configHolder)
self._serviceName = 'couchbase-server'
self._packages = ['couchbase-server']
self._cb_cluster_username = 'admin'
self._cb_cluster_password = CouchbaseServer._generate_password()
self._cb_cluster_password_path = '/opt/couchbase/cluster-password.txt'
def _installFrontend(self):
self._installPackages()
def _setupFrontend(self):
if os.path.exists(self._cb_cluster_password_path):
Util.printStep('%s exists; skipping Couchbase configuration' % self._cb_cluster_password_path)
else:
self._configure()
def _startServicesFrontend(self):
self._restartService()
def _installPackages(self):
Util.printStep('Installing Couchbase packages')
self.system.installPackages(self._packages)
def _configure(self):
Util.printStep('(Re-)starting Couchbase')
cmd = 'service %s restart' % self._serviceName
self._executeExitOnError(cmd)
time.sleep(5)
Util.printStep('Set Couchbase data location')
options = ['--node-init-data-path=/opt/couchbase/var/lib/couchbase/data']
cmd = CouchbaseServer._cb_cmd('node-init', self.frontendIp, options)
self._executeExitOnError(cmd)
Util.printStep('Create default Couchbase bucket')
options = ['--bucket=default',
'--bucket-type=couchbase',
'--bucket-ramsize=400',
'--bucket-replica=1']
cmd = CouchbaseServer._cb_cmd('bucket-create', self.frontendIp, options)
self._executeExitOnError(cmd)
Util.printStep('Initialize Couchbase admin account')
options = ['--cluster-init-username=%s' % self._cb_cluster_username,
'--cluster-init-password=%s' % self._cb_cluster_password]
cmd = CouchbaseServer._cb_cmd('cluster-init', self.frontendIp, options)
self._executeExitOnError(cmd)
Util.printStep('Saving cluster password in %s' % self._cb_cluster_password_path)
with open(self._cb_cluster_password_path, 'w') as f:
f.write(self._cb_cluster_password + "\n")
Util.printStep('Reducing read access to password file')
os.chmod(self._cb_cluster_password_path, 0400)
def _restartService(self):
Util.printStep('Adding %s to chkconfig and restarting' % self._serviceName)
cmd = 'chkconfig --add %s' % self._serviceName
Util.execute(cmd.split(' '))
cmd = 'service %s restart' % self._serviceName
Util.execute(cmd.split(' '))
def _executeExitOnError(self, cmd_str):
rc, output = Util.execute(cmd_str.split(' '), withOutput=True, verboseLevel=self.verboseLevel,
verboseThreshold=Util.VERBOSE_LEVEL_DETAILED)
if rc != 0:
printError('Failed running: %s\n%s' % (cmd_str, output))
| StratusLab/client | api/code/src/main/python/stratuslab/installator/CouchbaseServer.py | Python | apache-2.0 | 4,317 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various base layers for the colorization transformer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
import operator
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow.compat.v2.keras import layers
from coltran.utils import att_utils
from coltran.utils import base_utils
# pylint: disable=duplicate-string-formatting-argument
def residual_dropout(inputs, output, dropout, training):
"""out = inputs + dropout(output)."""
if training and dropout:
output = tf.nn.dropout(output, dropout)
output += inputs
return output
class Shift(layers.Layer):
"""Shifts an input tensor either down or right to preserve causal ordering."""
def __init__(self, dimension, resolution, **kwargs):
"""Init.
Args:
dimension: int, 0 to shift down, 1 to shift right.
resolution: list of 2 ints, [H, W].
**kwargs:
"""
super(Shift, self).__init__(**kwargs)
self.dimension = dimension
self.resolution = resolution
def call(self, x):
shape = x.shape
rank = len(shape)
dim = self.dimension + 1
# Assume 1 batch_dim.
index = [0] * len(self.resolution)
y = x
paddings = np.zeros((rank, 2), dtype=np.int32)
paddings[dim, 0] = 1
y = tf.pad(y, paddings)
rem_dims = rank - 1 - len(index[:dim])
slice_inds = [0] + index[:dim] + [0] * rem_dims
return tf.slice(y, slice_inds, shape)
class Cache(layers.Layer):
"""Keras layer for cacheing.
Values are cached in a tensor of shape (B, canvas_shape, D).
B and D are inferred from the inputs to the call method.
Every call to the cache instance is assumed to be a tuple of (index, values).
It updates the cache such that cache[:, index:, :] = values
"""
def __init__(self, canvas_shape,
num_batch_axes=1,
dtype=tf.float32,
**kwargs):
super(Cache, self).__init__(trainable=False, **kwargs)
self.canvas_shape = canvas_shape
self.num_batch_axes = num_batch_axes
self._dtype = dtype
def build(self, input_shapes):
num_canvas_dim = len(self.canvas_shape)
value, _ = input_shapes
features_shape = value[self.num_batch_axes + num_canvas_dim:]
cache_shape = (value[:self.num_batch_axes] + self.canvas_shape +
features_shape)
self.cache = tf.zeros(shape=cache_shape, dtype=self._dtype)
super(Cache, self).build(input_shapes)
def reset(self):
self.cache = tf.zeros(shape=self.cache.shape, dtype=self._dtype)
def call(self, inputs):
value, index = inputs
if self.cache.shape == inputs[0].shape:
self.cache = value
return value
shape = self.cache.shape.as_list()
num_index_axes = index.shape[0]
num_batch_axes = self.num_batch_axes
num_feature_axes = len(shape) - num_index_axes - num_batch_axes
features_shape = shape[num_batch_axes + num_index_axes:]
batch_shape = shape[:num_batch_axes]
value_index_shape = tf.shape(value)[num_batch_axes:-num_feature_axes]
if tf.reduce_max(value_index_shape) > 1:
# This is a block update starting at index.
value_ranges = []
for i, s in enumerate(tf.unstack(value_index_shape)):
curr_range = tf.range(index[i], index[i] + s)
value_ranges.append(curr_range)
batch_ranges = [tf.range(s) for s in batch_shape]
mesh = tf.meshgrid(*(batch_ranges + value_ranges), indexing='ij')
indices = tf.stack(mesh, axis=-1)
indices = tf.reshape(indices, [-1, num_index_axes + num_batch_axes])
else:
# This is a single update at index position.
batch_ranges = [tf.range(s) for s in batch_shape]
mesh = tf.meshgrid(*batch_ranges, indexing='ij')
batch_indices = tf.stack(mesh, axis=-1)
batch_indices = tf.reshape(batch_indices, [-1, num_batch_axes])
# Add leading axes to nd-index and tile to get batched indices.
shape_indices = tf.reshape(index, [1] * num_batch_axes + [-1])
shape_indices = tf.tile(shape_indices, batch_shape + [1])
shape_indices = tf.reshape(shape_indices, [-1, num_index_axes])
indices = tf.concat([batch_indices, shape_indices], axis=-1)
# We need to squeeze nd-axes from value before updating.
value = tf.reshape(value, [-1] + features_shape)
self.cache = tf.tensor_scatter_nd_update(self.cache, indices, value)
return self.cache
class Masking(object):
"""Masking options for self-attention.
We can either mask the entire future, i.e. allow looking into the past and
the current element, or we can mask in addition the present as well, i.e.,
we can look only to the past.
"""
FUTURE = 'future'
FUTURE_PRESENT = 'future_present'
class PositionEmbed(layers.Layer):
"""Adds factorized positional embeddings for specified axes."""
def __init__(self, axes, max_lengths=None, **kwargs):
"""Init.
Args:
axes: list of ints, axis over which to apply the positional embeddings.
max_lengths: list of ints, maximum length over each axis.
**kwargs:
"""
super(PositionEmbed, self).__init__(**kwargs)
if not isinstance(axes, (list, tuple)):
axes = [axes]
self.axes = axes
self.max_lengths = None
if max_lengths:
if not isinstance(max_lengths, (list, tuple)):
max_lengths = [max_lengths]
self.max_lengths = max_lengths
def build(self, input_shape):
rank = len(input_shape)
self.axes = sorted([rank + a if a < 0 else a for a in self.axes])
self.max_lengths = self.max_lengths or [input_shape[a] for a in self.axes]
self.embeddings = []
for i, axis in enumerate(self.axes):
shape = [self.max_lengths[i]] + [1] * (rank - axis - 2)
shape.append(input_shape[-1])
init = tf.keras.initializers.RandomNormal(stddev=shape[-1]**-0.5)
self.embeddings.append(
self.add_weight(
name='position_embedding_%d' % i,
shape=shape,
initializer=init,
trainable=True))
super(PositionEmbed, self).build(input_shape)
def call(self, inputs):
out = inputs
for e in self.embeddings:
out += e
return out
class DenseND(layers.Layer):
"""Maps a rank-m tensor to a rank-n tensor through a dense contraction."""
def __init__(self,
filters,
contract_axes=1,
use_bias=False,
activation=None,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
**kwargs):
super(DenseND, self).__init__(**kwargs)
if isinstance(filters, int):
filters = [filters]
self.filters = tuple(filters)
self.contract_axes = contract_axes
self.use_bias = use_bias
self.activation = tf.keras.activations.get(activation)
self.bias_initializer = bias_initializer
self._kernel_initializer = kernel_initializer
# Behaviours differ when shape(weights) > 2.
# see: https://github.com/tensorflow/tensorflow/blob/r2.1/tensorflow/python/ops/init_ops_v2.py#L733 pylint: disable=line-too-long
if self._kernel_initializer == 'glorot_uniform_nd':
self._kernel_initializer = self._glorot_uniform
def _num_batch_axes(self, input_shape):
"""Returns number of batch axes in inputs."""
return len(input_shape) - len(self.contract_shape)
def _glorot_uniform(self, shape, dtype=tf.float32):
"""Glorot uniform initializer."""
fan_out = functools.reduce(operator.mul, self.filters)
fan_in = functools.reduce(operator.mul, shape[:self.contract_axes])
scale = 1. / max(1., (fan_in + fan_out) / 2.)
limit = math.sqrt(3.0 * scale)
return tf.random.uniform(shape, -limit, limit, dtype)
def build(self, input_shape):
# Infer matrix multiplication if no contract shape specified.
self.contract_shape = input_shape[-self.contract_axes:]
w_shape = self.contract_shape + self.filters
self.w = self.add_weight(
name='kernel',
shape=w_shape,
initializer=self._kernel_initializer,
trainable=True)
if self.use_bias:
self.b = self.add_weight(
name='bias', shape=self.filters, initializer=self.bias_initializer,
trainable=True)
super(DenseND, self).build(input_shape)
def call(self, inputs):
# Workaround lack of ellipsis support.
# pyformat: disable
num_batch_axes = self._num_batch_axes(inputs.shape)
batch_str = 'abcdefghijklm'[:num_batch_axes]
contract_str = 'ABCDEFGHIJKLM'[:len(self.contract_shape)]
output_str = 'nopqrstuvwxyz'[:len(self.filters)]
# pyformat: enable
einsum_str = '{}{},{}{}->{}{}'.format(batch_str, contract_str, contract_str,
output_str, batch_str, output_str)
result = tf.einsum(einsum_str, inputs, self.w)
if self.use_bias:
result += self.b
if self.activation is not None:
result = self.activation(result)
return result
class RelativeAttentionBiasND(layers.Layer):
"""Relative attention bias in nd factorizes over dimensions."""
def __init__(self, lengths, num_heads, **kwargs):
self.num_heads = num_heads
self.lengths = lengths
super(RelativeAttentionBiasND, self).__init__(**kwargs)
def build(self, input_shapes):
self.biases = []
self.total_length = 1
for i, l in enumerate(self.lengths):
self.total_length *= l
if l > 1:
weight = self.add_weight(
name='relative_attention_bias_%d' % i,
shape=[self.num_heads, 2 * l],
initializer=tf.keras.initializers.Zeros(), trainable=True)
else:
weight = None
self.biases.append(weight)
super(RelativeAttentionBiasND, self).build(input_shapes)
def call(self, inputs=None):
tile, index, biases = 1, None, []
len_q = self.total_length
for i, s in enumerate(self.lengths):
# Relative attention in every dimension separately.
if s > 1:
new_bias = att_utils.relative_attn_bias(
self.biases[i], self.num_heads, index)
repeat = self.total_length // (tile * s)
if repeat > 1:
new_bias = tf.expand_dims(new_bias, -1)
new_bias = tf.tile(new_bias, [tile, repeat, tile, repeat])
new_bias = tf.reshape(new_bias,
[len_q, self.num_heads, self.total_length])
elif tile > 1:
new_bias = tf.tile(new_bias, [tile, 1, tile])
tile *= s
biases.append(new_bias)
return tf.add_n(biases)
class ConditionalLayerNorm(layers.Layer):
"""Conditional Layer Norm.
Normalization of the input with the scale and shift as a function of 3-D
context. Transforms 3-D spatial context into 1-D shift and scale of the
layer-norm parameters. This is done via two dense projections:
1. Spatial averaging via spatial_average='mean' or 'learnable'.
2. Pointwise dense projection across channels.
"""
def __init__(self,
spatial_average='learnable',
sequence='sc',
out_init='glorot_uniform',
out_act='identity', **kwargs):
super(ConditionalLayerNorm, self).__init__(**kwargs)
self.spatial_average = spatial_average
self.sequence = sequence
self.out_init = out_init
self.out_act = out_act
self.out_act_func = base_utils.act_to_func(out_act)
if self.spatial_average not in ['mean', 'learnable']:
raise ValueError('Expected spatial average to be "mean" or "learnable" ,'
'got %s' % self.spatial_average)
if self.sequence not in ['sc', 'cs']:
raise ValueError('Expected sequence to be "sc" or "cs" ,'
'got %s' % self.sequence)
def build(self, input_shape):
x_shape = input_shape[0]
height, width, features = x_shape[-3:]
self.layer_norm = layers.LayerNormalization(
trainable=False, name='normalize')
if self.spatial_average == 'learnable':
self.spatial_weights = self.add_weight(
name='spatial_average', shape=(1, height, width, 1),
initializer=tf.keras.initializers.Ones())
self.channel_dense = layers.Dense(
units=2*features, kernel_initializer=self.out_init)
super(ConditionalLayerNorm, self).build(input_shape)
def spatial_projection(self, cond_inputs):
if self.spatial_average == 'learnable':
cond_inputs = self.spatial_weights * cond_inputs
return tf.reduce_mean(cond_inputs, axis=(1, 2), keepdims=True)
def call(self, inputs):
inputs, cond_inputs = inputs
if self.sequence == 'sc':
ops = [self.spatial_projection, self.channel_dense]
elif self.sequence == 'cs':
ops = [self.channel_dense, self.spatial_projection]
for op in ops:
cond_inputs = op(cond_inputs)
scale, shift = tf.split(cond_inputs, num_or_size_splits=2, axis=-1)
scale = self.out_act_func(scale)
shift = self.out_act_func(shift)
inputs_norm = self.layer_norm(inputs)
inputs_norm *= scale
inputs_norm += shift
return inputs_norm
class SelfAttentionND(layers.Layer):
"""Transforms input through a N-D self-attention layer.
Assume key, query and memory tensors are N-D tensors.
1. Project key, query and value tensors into (N+2)-D tensors using
dense layers where the outer two dimensions are
[num_heads, num_channels_per_head].
num_channels_per_head is set to num_channels // num_heads by default.
2. Computes self-attention tensor using 2 dot products.
The first computes similarity between the key and query tensors.
The second uses this similarity to perform a weighted average over
the value tensors. Done in _dot_product and _weighted_sum.
3. The default behaviour, i.e if nd_block is not set, is to do global
self attention. If nd_block_set is set, the above self-attention is limited
to a block-size of nd_block_size.
For instance, in case of 2D inputs (images), setting nd_block_size to
[1, num_columns] or [num_rows, 1] to limit attention to column
and rows respectively.
4. If mask=='future', zero out the contribution of the values that
violate raster ordering. Done in _apply_mask_and_bias
for more details.
5. Project the transformed tensor into hidden_size number of channels
using a dense layer.
Self-attention can be optionally conditioned with an tuple of two values
where the second argument is the conditional input. Supports:
1. Biasing: By setting cond_q, cond_k or cond_v to be True.
2. Scaling: By setting cond_scale to be True.
"""
def __init__(self,
hidden_size,
num_heads=1,
num_channels_per_head=None,
mask=None,
kernel_initializer='glorot_uniform',
nd_block_size=None,
resolution=None,
cond_init='glorot_uniform',
cond_k=False,
cond_q=False,
cond_v=False,
cond_scale=False,
cond_act='identity',
**kwargs):
super(SelfAttentionND, self).__init__(**kwargs)
if nd_block_size:
nd_block_size = list(nd_block_size)
num_channels_per_head = num_channels_per_head or hidden_size // num_heads
self.num_filters = [num_heads, num_channels_per_head]
self.kernel_initializer = kernel_initializer
self.hidden_size = hidden_size
self.cond_k = cond_k
self.cond_q = cond_q
self.cond_v = cond_v
self.cond_scale = cond_scale
self.cond_init = cond_init
self.cond_act_func = base_utils.act_to_func(cond_act)
self.project_cond_q, self.project_cond_k, self.project_cond_v = None, None, None
self.cond_filters = self.num_filters
if cond_scale:
self.cond_filters = [num_heads, 2*num_channels_per_head]
self.nd_block_size = nd_block_size
self.resolution = resolution
self.mask = mask
self.num_channels_per_head = num_channels_per_head
self.num_heads = num_heads
self.hidden_size = hidden_size
# By default, apply attention in third last dimension.
# Last 2 dimensions are heads, channels.
self.attention_dim_q = self.attention_dim_k = -3
# Self attention type.
self.is_block_attention = True if self.nd_block_size else False
def get_num_filters(self, is_cond):
if not is_cond:
return self.num_filters
num_heads, num_channels_per_head = self.num_filters
return [num_heads, 2*num_channels_per_head]
def cond_shift_and_scale(self, inputs, cond_inputs, is_cond, layer):
if not is_cond:
return inputs
cond_out = layer(cond_inputs)
if self.cond_scale:
scale, shift = tf.split(cond_out, num_or_size_splits=2, axis=-1)
scale = self.cond_act_func(scale)
shift = self.cond_act_func(shift)
inputs *= scale
inputs += shift
else:
inputs += cond_out
return inputs
def build(self, input_shape):
if not isinstance(input_shape[-1], int):
input_shape = input_shape[0]
lengths = self.nd_block_size or self.resolution or input_shape[1:-1]
self.project_q = DenseND(
self.num_filters, kernel_initializer=self.kernel_initializer, name='q')
self.project_k = DenseND(
self.num_filters, kernel_initializer=self.kernel_initializer, name='k')
self.project_v = DenseND(
self.num_filters, kernel_initializer=self.kernel_initializer, name='v')
self.project_final = DenseND(
self.hidden_size, kernel_initializer=self.kernel_initializer,
contract_axes=2, name='output')
self.relative_attention = RelativeAttentionBiasND(
lengths, self.num_heads)
self.relative_attention.build([])
if self.cond_k:
self.project_cond_k = DenseND(
self.cond_filters, kernel_initializer=self.cond_init, name='cond_k')
if self.cond_q:
self.project_cond_q = DenseND(
self.cond_filters, kernel_initializer=self.cond_init, name='cond_q')
if self.cond_v:
self.project_cond_v = DenseND(
self.cond_filters, kernel_initializer=self.cond_init, name='cond_v')
self.is_one_dim_attention = (
self.is_block_attention and
sum(s != 1 for s in self.nd_block_size) == 1)
if self.is_one_dim_attention:
max_dim = self.nd_block_size.index(max(self.nd_block_size))
if self.nd_block_size[max_dim] == lengths[max_dim]:
self.is_block_attention = False
self.attention_dim_q = max_dim - len(self.nd_block_size) - 2
self.attention_dim_k = self.attention_dim_q
else:
self.is_one_dim_attention = False
if self.mask:
total_length = functools.reduce(operator.mul, lengths, 1)
self._mask = np.triu(np.ones([total_length, total_length], np.float32))
if self.mask != Masking.FUTURE_PRESENT:
self._mask *= (1.0 - np.eye(total_length))
self._mask *= -1e6
self._mask = tf.constant(
np.reshape(self._mask, [total_length, 1, total_length]))
super(SelfAttentionND, self).build(input_shape)
def _apply_mask_and_bias(self, alphas):
bias = self.relative_attention(None)
if self.mask:
bias += self._mask
expand_bias_dims = -self.attention_dim_q - 3
if expand_bias_dims:
bias = tf.reshape(bias, [-1] + [1] * expand_bias_dims +
list(bias.shape[1:]))
return alphas + bias
def _dot_product(self, q, k, contract_dim_q=-3, contract_dim_k=-3):
num_batch_axes = len(q.shape) + contract_dim_q
pre_str = 'abcdefghij' [:num_batch_axes]
in_dim_q = -contract_dim_q - 2
in_dim_k = -contract_dim_k - 2
in_str_q = 'zyxwv' [:in_dim_q]
in_str_k = 'zyxwv' [:in_dim_k]
einsum_str = '{}Q{}C,{}M{}C->{}Q{}M'.format(pre_str, in_str_q, pre_str,
in_str_k, pre_str, in_str_q)
return tf.einsum(einsum_str, q, k)
def _weighted_sum(self, alphas, v, contract_dim_a=-3, contract_dim_v=-3):
num_batch_axes = len(alphas.shape) + contract_dim_a
pre_str = 'abcdefghij' [:num_batch_axes]
in_dim_a = -contract_dim_a - 2
in_dim_v = -contract_dim_v - 2
in_str_a = 'zyxwv' [:in_dim_a]
in_str_v = 'zyxwv' [:in_dim_v]
einsum_str = '{}Q{}M,{}M{}C->{}Q{}C'.format(pre_str, in_str_a, pre_str,
in_str_v, pre_str, in_str_a)
return tf.einsum(einsum_str, alphas, v)
def _prepare_block_attention(self, x):
return att_utils.divide_nd_blocks(x, self.nd_block_size, collapse=True)
def _prepare_full_attention(self, x):
return tf.reshape(x, [x.shape[0], -1, x.shape[-1]])
def call(self, inputs):
cond_inputs = memory = None
cond_qkv = self.cond_v or self.cond_q or self.cond_k
if cond_qkv:
if tf.is_tensor(inputs) or len(inputs) != 2:
raise ValueError('Expected tuple of (inputs, cond_inputs)')
inputs, cond_inputs = inputs
x = inputs
if not self.is_one_dim_attention:
# We flatten the index axes here. [B, ..., D] --> [B, M, D].
if self.is_block_attention:
x = self._prepare_block_attention(x)
else:
x = self._prepare_full_attention(x)
memory = x
q, k, v = self.project_q(x), self.project_k(memory), self.project_v(memory)
q = self.cond_shift_and_scale(
q, cond_inputs, self.cond_q, self.project_cond_q)
k = self.cond_shift_and_scale(
k, cond_inputs, self.cond_k, self.project_cond_k)
v = self.cond_shift_and_scale(
v, cond_inputs, self.cond_v, self.project_cond_v)
q *= q.shape[-1]**-0.5
alphas = self._dot_product(q, k, self.attention_dim_q, self.attention_dim_k)
alphas = self._apply_mask_and_bias(alphas)
weights = tf.nn.softmax(alphas)
output = self._weighted_sum(weights, v, self.attention_dim_q,
self.attention_dim_k)
output = self.project_final(output)
return output
class FactorizedAttention(layers.Layer):
"""Encodes image into 2-D spatial context with factorized attention layers."""
def __init__(self, config, **kwargs):
super(FactorizedAttention, self).__init__(**kwargs)
self.config = config
self.dropout = self.config.get('dropout', 0.0)
def build(self, input_shapes):
ff_size, hidden_size = self.config.ff_size, self.config.hidden_size
num_heads = self.config.num_heads
height, width = input_shapes[1:3]
self.pos_embed = PositionEmbed(axes=[1, 2], max_lengths=[height, width])
self.residual_layers = []
num_norms = 4 * self.config.num_encoder_layers
self.layer_norms = [layers.LayerNormalization() for _ in range(num_norms)]
for _ in range(self.config.num_encoder_layers):
# unmasked row
unmask_row = SelfAttentionND(
hidden_size=hidden_size, num_heads=num_heads,
nd_block_size=[1, width], resolution=[height, width])
ff_row = tf.keras.Sequential([
layers.Dense(units=ff_size, activation='relu'),
layers.Dense(units=hidden_size)
])
# unmasked column,
unmask_col = SelfAttentionND(
hidden_size=hidden_size, num_heads=num_heads,
nd_block_size=[height, 1], resolution=[height, width])
ff_col = tf.keras.Sequential([
layers.Dense(units=ff_size, activation='relu'),
layers.Dense(units=hidden_size)
])
self.residual_layers.append(unmask_row)
self.residual_layers.append(ff_row)
self.residual_layers.append(unmask_col)
self.residual_layers.append(ff_col)
def call(self, inputs, training=True):
inputs = self.pos_embed(inputs)
# Apply a stack of unmaked row and column attention layers.
for layer, norm in zip(self.residual_layers, self.layer_norms):
output = layer(inputs)
output = residual_dropout(inputs, output, self.dropout, training)
inputs = norm(output)
return inputs
| google-research/google-research | coltran/models/layers.py | Python | apache-2.0 | 24,446 |
from rest_framework import routers
from . import views
class SecretsRootView(routers.APIRootView):
"""
Secrets API root view
"""
def get_view_name(self):
return 'Secrets'
router = routers.DefaultRouter()
router.APIRootView = SecretsRootView
# Field choices
router.register(r'_choices', views.SecretsFieldChoicesViewSet, basename='field-choice')
# Secrets
router.register(r'secret-roles', views.SecretRoleViewSet)
router.register(r'secrets', views.SecretViewSet)
# Miscellaneous
router.register(r'get-session-key', views.GetSessionKeyViewSet, basename='get-session-key')
router.register(r'generate-rsa-key-pair', views.GenerateRSAKeyPairViewSet, basename='generate-rsa-key-pair')
app_name = 'secrets-api'
urlpatterns = router.urls
| lampwins/netbox | netbox/secrets/api/urls.py | Python | apache-2.0 | 763 |
# Microsoft Azure Linux Agent
#
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
import base64
import crypt
import hashlib
import random
import re
import string
import struct
import sys
import traceback
import xml.dom.minidom as minidom
import zlib
from azurelinuxagent.common.future import ustr
def parse_doc(xml_text):
"""
Parse xml document from string
"""
# The minidom lib has some issue with unicode in python2.
# Encode the string into utf-8 first
xml_text = xml_text.encode('utf-8')
return minidom.parseString(xml_text)
def findall(root, tag, namespace=None):
"""
Get all nodes by tag and namespace under Node root.
"""
if root is None:
return []
if namespace is None:
return root.getElementsByTagName(tag)
else:
return root.getElementsByTagNameNS(namespace, tag)
def find(root, tag, namespace=None):
"""
Get first node by tag and namespace under Node root.
"""
nodes = findall(root, tag, namespace=namespace)
if nodes is not None and len(nodes) >= 1:
return nodes[0]
else:
return None
def gettext(node):
"""
Get node text
"""
if node is None:
return None
for child in node.childNodes:
if child.nodeType == child.TEXT_NODE:
return child.data
return None
def findtext(root, tag, namespace=None):
"""
Get text of node by tag and namespace under Node root.
"""
node = find(root, tag, namespace=namespace)
return gettext(node)
def getattrib(node, attr_name):
"""
Get attribute of xml node
"""
if node is not None:
return node.getAttribute(attr_name)
else:
return None
def unpack(buf, offset, value_range):
"""
Unpack bytes into python values.
"""
result = 0
for i in value_range:
result = (result << 8) | str_to_ord(buf[offset + i])
return result
def unpack_little_endian(buf, offset, length):
"""
Unpack little endian bytes into python values.
"""
return unpack(buf, offset, list(range(length - 1, -1, -1)))
def unpack_big_endian(buf, offset, length):
"""
Unpack big endian bytes into python values.
"""
return unpack(buf, offset, list(range(0, length)))
def hex_dump3(buf, offset, length):
"""
Dump range of buf in formatted hex.
"""
return ''.join(['%02X' % str_to_ord(char) for char in buf[offset:offset + length]])
def hex_dump2(buf):
"""
Dump buf in formatted hex.
"""
return hex_dump3(buf, 0, len(buf))
def is_in_range(a, low, high):
"""
Return True if 'a' in 'low' <= a <= 'high'
"""
return low <= a <= high
def is_printable(ch):
"""
Return True if character is displayable.
"""
return (is_in_range(ch, str_to_ord('A'), str_to_ord('Z'))
or is_in_range(ch, str_to_ord('a'), str_to_ord('z'))
or is_in_range(ch, str_to_ord('0'), str_to_ord('9')))
def hex_dump(buffer, size): # pylint: disable=redefined-builtin
"""
Return Hex formated dump of a 'buffer' of 'size'.
"""
if size < 0:
size = len(buffer)
result = ""
for i in range(0, size):
if (i % 16) == 0:
result += "%06X: " % i
byte = buffer[i]
if type(byte) == str:
byte = ord(byte.decode('latin1'))
result += "%02X " % byte
if (i & 15) == 7:
result += " "
if ((i + 1) % 16) == 0 or (i + 1) == size:
j = i
while ((j + 1) % 16) != 0:
result += " "
if (j & 7) == 7:
result += " "
j += 1
result += " "
for j in range(i - (i % 16), i + 1):
byte = buffer[j]
if type(byte) == str:
byte = str_to_ord(byte.decode('latin1'))
k = '.'
if is_printable(byte):
k = chr(byte)
result += k
if (i + 1) != size:
result += "\n"
return result
def str_to_ord(a):
"""
Allows indexing into a string or an array of integers transparently.
Generic utility function.
"""
if type(a) == type(b'') or type(a) == type(u''):
a = ord(a)
return a
def compare_bytes(a, b, start, length):
for offset in range(start, start + length):
if str_to_ord(a[offset]) != str_to_ord(b[offset]):
return False
return True
def int_to_ip4_addr(a):
"""
Build DHCP request string.
"""
return "%u.%u.%u.%u" % ((a >> 24) & 0xFF,
(a >> 16) & 0xFF,
(a >> 8) & 0xFF,
(a) & 0xFF)
def hexstr_to_bytearray(a):
"""
Return hex string packed into a binary struct.
"""
b = b""
for c in range(0, len(a) // 2):
b += struct.pack("B", int(a[c * 2:c * 2 + 2], 16))
return b
def set_ssh_config(config, name, val):
found = False
no_match = -1
match_start = no_match
for i in range(0, len(config)):
if config[i].startswith(name) and match_start == no_match:
config[i] = "{0} {1}".format(name, val)
found = True
elif config[i].lower().startswith("match"):
if config[i].lower().startswith("match all"):
# outside match block
match_start = no_match
elif match_start == no_match:
# inside match block
match_start = i
if not found:
if match_start != no_match:
i = match_start
config.insert(i, "{0} {1}".format(name, val))
return config
def set_ini_config(config, name, val):
notfound = True
nameEqual = name + '='
length = len(config)
text = "{0}=\"{1}\"".format(name, val)
for i in reversed(range(0, length)):
if config[i].startswith(nameEqual):
config[i] = text
notfound = False
break
if notfound:
config.insert(length - 1, text)
def replace_non_ascii(incoming, replace_char=''):
outgoing = ''
if incoming is not None:
for c in incoming:
if str_to_ord(c) > 128:
outgoing += replace_char
else:
outgoing += c
return outgoing
def remove_bom(c):
"""
bom is comprised of a sequence of three chars,0xef, 0xbb, 0xbf, in case of utf-8.
"""
if not is_str_none_or_whitespace(c) and \
len(c) > 2 and \
str_to_ord(c[0]) > 128 and \
str_to_ord(c[1]) > 128 and \
str_to_ord(c[2]) > 128:
c = c[3:]
return c
def gen_password_hash(password, crypt_id, salt_len):
collection = string.ascii_letters + string.digits
salt = ''.join(random.choice(collection) for _ in range(salt_len))
salt = "${0}${1}".format(crypt_id, salt)
if sys.version_info[0] == 2:
# if python 2.*, encode to type 'str' to prevent Unicode Encode Error from crypt.crypt
password = password.encode('utf-8')
return crypt.crypt(password, salt)
def get_bytes_from_pem(pem_str):
base64_bytes = ""
for line in pem_str.split('\n'):
if "----" not in line:
base64_bytes += line
return base64_bytes
def compress(s):
"""
Compress a string, and return the base64 encoded result of the compression.
This method returns a string instead of a byte array. It is expected
that this method is called to compress smallish strings, not to compress
the contents of a file. The output of this method is suitable for
embedding in log statements.
"""
from azurelinuxagent.common.version import PY_VERSION_MAJOR
if PY_VERSION_MAJOR > 2:
return base64.b64encode(zlib.compress(bytes(s, 'utf-8'))).decode('utf-8')
return base64.b64encode(zlib.compress(s))
def b64encode(s):
from azurelinuxagent.common.version import PY_VERSION_MAJOR
if PY_VERSION_MAJOR > 2:
return base64.b64encode(bytes(s, 'utf-8')).decode('utf-8')
return base64.b64encode(s)
def b64decode(s):
from azurelinuxagent.common.version import PY_VERSION_MAJOR
if PY_VERSION_MAJOR > 2:
return base64.b64decode(s).decode('utf-8')
return base64.b64decode(s)
def safe_shlex_split(s):
import shlex
from azurelinuxagent.common.version import PY_VERSION
if PY_VERSION[:2] == (2, 6):
return shlex.split(s.encode('utf-8'))
return shlex.split(s)
def swap_hexstring(s, width=2):
r = len(s) % width
if r != 0:
s = ('0' * (width - (len(s) % width))) + s
return ''.join(reversed(
re.findall(
r'[a-f0-9]{{{0}}}'.format(width),
s,
re.IGNORECASE)))
def parse_json(json_str):
"""
Parse json string and return a resulting dictionary
"""
# trim null and whitespaces
result = None
if not is_str_empty(json_str):
import json
result = json.loads(json_str.rstrip(' \t\r\n\0'))
return result
def is_str_none_or_whitespace(s):
return s is None or len(s) == 0 or s.isspace()
def is_str_empty(s):
return is_str_none_or_whitespace(s) or is_str_none_or_whitespace(s.rstrip(' \t\r\n\0'))
def hash_strings(string_list):
"""
Compute a cryptographic hash of a list of strings
:param string_list: The strings to be hashed
:return: The cryptographic hash (digest) of the strings in the order provided
"""
sha1_hash = hashlib.sha1()
for item in string_list:
sha1_hash.update(item.encode())
return sha1_hash.digest()
def format_memory_value(unit, value):
units = {'bytes': 1, 'kilobytes': 1024, 'megabytes': 1024*1024, 'gigabytes': 1024*1024*1024}
if unit not in units:
raise ValueError("Unit must be one of {0}".format(units.keys()))
try:
value = float(value)
except TypeError:
raise TypeError('Value must be convertible to a float')
return int(value * units[unit])
def str_to_encoded_ustr(s, encoding='utf-8'):
"""
This function takes the string and converts it into the corresponding encoded ustr if its not already a ustr.
The encoding is utf-8 by default if not specified.
Note: ustr() is a unicode object for Py2 and a str object for Py3.
:param s: The string to convert to ustr
:param encoding: Encoding to use. Utf-8 by default
:return: Returns the corresponding ustr string. Returns None if input is None.
"""
# TODO: Import at the top of the file instead of a local import (using local import here to avoid cyclic dependency)
from azurelinuxagent.common.version import PY_VERSION_MAJOR
if s is None or type(s) is ustr:
# If its already a ustr/None then return as is
return s
if PY_VERSION_MAJOR > 2:
try:
# For py3+, str() is unicode by default
if isinstance(s, bytes):
# str.encode() returns bytes which should be decoded to get the str.
return s.decode(encoding)
else:
# If its not encoded, just return the string
return ustr(s)
except Exception:
# If some issues in decoding, just return the string
return ustr(s)
# For Py2, explicitly convert the string to unicode with the specified encoding
return ustr(s, encoding=encoding)
def format_exception(exception):
# Function to format exception message
e = None
if sys.version_info[0] == 2:
_, e, tb = sys.exc_info()
else:
tb = exception.__traceback__
msg = ustr(exception) + "\n"
if tb is None or (sys.version_info[0] == 2 and e != exception):
msg += "[Traceback not available]"
else:
msg += ''.join(traceback.format_exception(etype=type(exception), value=exception, tb=tb))
return msg
| Azure/WALinuxAgent | azurelinuxagent/common/utils/textutil.py | Python | apache-2.0 | 12,526 |
# -*- coding: utf-8 -*-
# Minio Python Library for Amazon S3 Compatible Cloud Storage,
# (C) 2015, 2016, 2017 Minio, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
minio.error
~~~~~~~~~~~~~~~~~~~
This module provides custom exception classes for Minio library
and API specific errors.
:copyright: (c) 2015, 2016, 2017 by Minio, Inc.
:license: Apache 2.0, see LICENSE for more details.
"""
from xml.etree import cElementTree
from xml.etree.cElementTree import ParseError
if hasattr(cElementTree, 'ParseError'):
## ParseError seems to not have .message like other
## exceptions. Add dynamically new attribute carrying
## value from message.
if not hasattr(ParseError, 'message'):
setattr(ParseError, 'message', ParseError.msg)
_ETREE_EXCEPTIONS = (ParseError, AttributeError, ValueError, TypeError)
else:
_ETREE_EXCEPTIONS = (SyntaxError, AttributeError, ValueError, TypeError)
class MinioError(Exception):
"""
Base class for all exceptions
:param message: User defined message.
"""
def __init__(self, message, **kwargs):
super(MinioError, self).__init__(**kwargs)
self.message = message
def __str__(self):
return "{name}: message: {message}".format(
name=self.__class__.__name__,
message=self.message
)
class InvalidEndpointError(MinioError):
"""
InvalidEndpointError is raised when input endpoint URL is invalid.
"""
pass
class InvalidBucketError(MinioError):
"""
InvalidBucketError is raised when input bucket name is invalid.
NOTE: Bucket names are validated based on Amazon S3 requirements.
"""
pass
class InvalidArgumentError(MinioError):
"""
InvalidArgumentError is raised when an unexpected
argument is received by the callee.
"""
pass
class InvalidSizeError(MinioError):
"""
InvalidSizeError is raised when an unexpected size mismatch occurs.
"""
pass
class InvalidXMLError(MinioError):
"""
InvalidXMLError is raised when an unexpected XML tag or
a missing tag is found during parsing.
"""
pass
class MultiDeleteError(object):
"""
Represents an error raised when trying to delete an object in a
Multi-Object Delete API call :class:`MultiDeleteError <MultiDeleteError>`
:object_name: Object name that had a delete error.
:error_code: Error code.
:error_message: Error message.
"""
def __init__(self, object_name, err_code, err_message):
self.object_name = object_name
self.error_code = err_code
self.error_message = err_message
def __str__(self):
string_format = '<MultiDeleteError: object_name: {} error_code: {}' \
' error_message: {}>'
return string_format.format(self.object_name,
self.error_code,
self.error_message)
class ResponseError(MinioError):
"""
ResponseError is raised when an API call doesn't succeed.
raises :exc:`ResponseError` accordingly.
:param response: Response from http client :class:`urllib3.HTTPResponse`.
"""
def __init__(self, response, method, bucket_name=None,
object_name=None):
super(ResponseError, self).__init__(message='')
# initialize parameter fields
self._response = response
self._xml = response.data
self.method = method
self.bucket_name = bucket_name
self.object_name = object_name
# initialize all ResponseError fields
self.code = ''
# Amz headers
self.request_id = ''
self.host_id = ''
self.region = ''
# handle the error
self._handle_error_response(bucket_name)
def get_exception(self):
"""
Gets the error exception derived from the initialization of
an ErrorResponse object
:return: The derived exception or ResponseError exception
"""
exception = known_errors.get(self.code)
if exception:
return exception(self)
else:
return self
def _handle_error_response(self, bucket_name=None):
"""
Sets error response uses xml body if available, otherwise
relies on HTTP headers.
"""
if not self._response.data:
self._set_error_response_without_body(bucket_name)
else:
self._set_error_response_with_body(bucket_name)
def _set_error_response_with_body(self, bucket_name=None):
"""
Sets all the error response fields with a valid response body.
Raises :exc:`ValueError` if invoked on a zero length body.
:param bucket_name: Optional bucket name resource at which error
occurred.
:param object_name: Option object name resource at which error
occurred.
"""
if len(self._response.data) == 0:
raise ValueError('response data has no body.')
try:
root = cElementTree.fromstring(self._response.data)
except _ETREE_EXCEPTIONS as error:
raise InvalidXMLError('"Error" XML is not parsable. '
'Message: {0}'.format(error.message))
for attribute in root:
if attribute.tag == 'Code':
self.code = attribute.text
elif attribute.tag == 'BucketName':
self.bucket_name = attribute.text
elif attribute.tag == 'Key':
self.object_name = attribute.text
elif attribute.tag == 'Message':
self.message = attribute.text
elif attribute.tag == 'RequestId':
self.request_id = attribute.text
elif attribute.tag == 'HostId':
self.host_id = attribute.text
# Set amz headers.
self._set_amz_headers()
def _set_error_response_without_body(self, bucket_name=None):
"""
Sets all the error response fields from response headers.
"""
if self._response.status == 404:
if bucket_name:
if self.object_name:
self.code = 'NoSuchKey'
self.message = self._response.reason
else:
self.code = 'NoSuchBucket'
self.message = self._response.reason
elif self._response.status == 409:
self.code = 'Confict'
self.message = 'The bucket you tried to delete is not empty.'
elif self._response.status == 403:
self.code = 'AccessDenied'
self.message = self._response.reason
elif self._response.status == 400:
self.code = 'BadRequest'
self.message = self._response.reason
elif self._response.status == 301:
self.code = 'PermanentRedirect'
self.message = self._response.reason
elif self._response.status == 307:
self.code = 'Redirect'
self.message = self._response.reason
elif self._response.status in [405, 501]:
self.code = 'MethodNotAllowed'
self.message = self._response.reason
elif self._response.status == 500:
self.code = 'InternalError'
self.message = 'Internal Server Error.'
else:
self.code = 'UnknownException'
self.message = self._response.reason
# Set amz headers.
self._set_amz_headers()
def _set_amz_headers(self):
"""
Sets x-amz-* error response fields from response headers.
"""
if self._response.headers:
# keeping x-amz-id-2 as part of amz_host_id.
if 'x-amz-id-2' in self._response.headers:
self.host_id = self._response.headers['x-amz-id-2']
if 'x-amz-request-id' in self._response.headers:
self.request_id = self._response.headers['x-amz-request-id']
# This is a new undocumented field, set only if available.
if 'x-amz-bucket-region' in self._response.headers:
self.region = self._response.headers['x-amz-bucket-region']
def __str__(self):
return ('ResponseError: code: {0}, message: {1},'
' bucket_name: {2}, object_name: {3}, request_id: {4},'
' host_id: {5}, region: {6}'.format(self.code,
self.message,
self.bucket_name,
self.object_name,
self.request_id,
self.host_id,
self.region))
# Common error responses listed here
# http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.htmlRESTErrorResponses
class KnownResponseError(MinioError):
def __init__(self, response_error, **kwargs):
super(KnownResponseError, self).__init__(message=self.message, **kwargs)
self.response_error = response_error
class AccessDenied(KnownResponseError):
message = 'Access Denied'
class AccountProblem(KnownResponseError):
message = 'There is a problem with your account that prevents the ' \
'operation from completing successfully.'
class AmbiguousGrantByEmailAddress(KnownResponseError):
message = 'The email address you provided is associated with ' \
'more than one account.'
class BadDigest(KnownResponseError):
message = 'The Content-MD5 you specified did not match what we received.'
class BucketAlreadyExists(KnownResponseError):
message = 'The requested bucket name is not available. The ' \
'bucket namespace is shared by all users of the system. ' \
'Please select a different name and try again.'
class BucketAlreadyOwnedByYou(KnownResponseError):
message = 'Your previous request to create the named bucket ' \
'succeeded and you already own it.'
class BucketNotEmpty(KnownResponseError):
message = 'The bucket you tried to delete is not empty.'
class CredentialNotSupported(KnownResponseError):
message = 'This request does not support credentials.'
class CrossLocationLoggingProhibited(KnownResponseError):
message = 'Cross-location logging not allowed. Buckets in one ' \
'geographic location cannot log information to a bucket ' \
'in another location.'
class EntityTooSmall(KnownResponseError):
message = 'Your proposed upload is smaller than the minimum a' \
'llowed object size.'
class EntityTooLarge(KnownResponseError):
message = 'Your proposed upload exceeds the maximum allowed object size.'
class ExpiredToken(KnownResponseError):
message = 'The provided token has expired.'
class IllegalVersioningConfigurationException(KnownResponseError):
message = 'Indicates that the versioning configuration specified ' \
'in the request is invalid.'
class IncompleteBody(KnownResponseError):
message = 'You did not provide the number of bytes specified by the ' \
'Content-Length HTTP header'
class IncorrectNumberOfFilesInPostRequest(KnownResponseError):
message = 'POST requires exactly one file upload per request.'
class InlineDataTooLarge(KnownResponseError):
message = 'Inline data exceeds the maximum allowed size.'
class InternalError(KnownResponseError):
message = 'We encountered an internal error. Please try again.'
class InvalidAccessKeyId(KnownResponseError):
message = 'The access key Id you provided does not exist in our records.'
class InvalidAddressingHeader(KnownResponseError):
message = 'You must specify the Anonymous role.'
class InvalidArgument(KnownResponseError):
message = 'Invalid Argument'
class InvalidBucketName(KnownResponseError):
message = 'The specified bucket is not valid.'
class InvalidBucketState(KnownResponseError):
message = 'The request is not valid with the current state of the bucket.'
class InvalidDigest(KnownResponseError):
message = 'The Content-MD5 you specified is not valid.'
class InvalidEncryptionAlgorithmError(KnownResponseError):
message = 'The encryption request you specified is not valid. ' \
'The valid value is AES256.'
class InvalidLocationConstraint(KnownResponseError):
message = 'The specified location constraint is not valid.'
class InvalidObjectState(KnownResponseError):
message = 'The operation is not valid for the current state of the object.'
class InvalidPart(KnownResponseError):
message = 'One or more of the specified parts could not be found. ' \
'The part might not have been uploaded, or the specified ' \
'entity tag might not have matched the part\'s entity tag'
class InvalidPartOrder(KnownResponseError):
message = 'The list of parts was not in ascending order.Parts list ' \
'must specified in order by part number.'
class InvalidPayer(KnownResponseError):
message = 'All access to this object has been disabled.'
class InvalidPolicyDocument(KnownResponseError):
message = 'The content of the form does not meet the conditions ' \
'specified in the policy document.'
class InvalidRange(KnownResponseError):
message = 'The requested range cannot be satisfied.'
class InvalidRequest(KnownResponseError):
message = 'Invalid Request'
class InvalidSecurity(KnownResponseError):
message = 'The provided security credentials are not valid.'
class InvalidSOAPRequest(KnownResponseError):
message = 'The SOAP request body is invalid.'
class InvalidStorageClass(KnownResponseError):
message = 'The storage class you specified is not valid.'
class InvalidTargetBucketForLogging(KnownResponseError):
message = 'The target bucket for logging does not exist, ' \
'is not owned by you, or does not have the appropriate ' \
'grants for the log-delivery group.'
class InvalidToken(KnownResponseError):
message = 'The provided token is malformed or otherwise invalid.'
class InvalidURI(KnownResponseError):
message = 'Couldn\'t parse the specified URI.'
class KeyTooLong(KnownResponseError):
message = 'Your key is too long.'
class MalformedACLError(KnownResponseError):
message = 'The XML you provided was not well-formed ' \
'or did not validate against our published schema.'
class MalformedPOSTRequest(KnownResponseError):
message = 'The body of your POST request is not ' \
'well-formed multipart/form-data.'
class MalformedXML(KnownResponseError):
message = 'This happens when the user sends malformed xml (xml that ' \
'doesn\'t conform to the published xsd) for the configuration.'
class MaxMessageLengthExceeded(KnownResponseError):
message = 'Your request was too big.'
class MaxPostPreDataLengthExceededError(KnownResponseError):
message = 'Your POST request fields preceding the ' \
'upload file were too large.'
class MetadataTooLarge(KnownResponseError):
message = 'Your metadata headers exceed the maximum allowed metadata size.'
class MethodNotAllowed(KnownResponseError):
message = 'The specified method is not allowed against this resource'
class MissingAttachment(KnownResponseError):
message = 'A SOAP attachment was expected, but none were found.'
class MissingContentLength(KnownResponseError):
message = 'You must provide the Content-Length HTTP header.'
class MissingRequestBodyError(KnownResponseError):
message = 'This happens when the user sends an empty xml document ' \
'as a request. The error message is, "Request body is empty."'
class MissingSecurityElement(KnownResponseError):
message = 'The SOAP 1.1 request is missing a security element.'
class MissingSecurityHeader(KnownResponseError):
message = 'Your request is missing a required header.'
class NoLoggingStatusForKey(KnownResponseError):
message = 'There is no such thing as a logging ' \
'status subresource for a key.'
class NoSuchBucket(KnownResponseError):
message = 'The specified bucket does not exist.'
class NoSuchKey(KnownResponseError):
message = 'The specified key does not exist.'
class NoSuchLifecycleConfiguration(KnownResponseError):
message = 'The lifecycle configuration does not exist.'
class NoSuchUpload(KnownResponseError):
message = 'The specified multipart upload does not exist. ' \
'The upload ID might be invalid, or the multipart \
upload might have been aborted or completed.'
class NoSuchVersion(KnownResponseError):
message = 'Indicates that the version ID specified in the ' \
'request does not match an existing version.'
class APINotImplemented(KnownResponseError):
message = 'A header you provided implies functionality ' \
'that is not implemented.'
class NotSignedUp(KnownResponseError):
message = 'Your account is not signed up.'
class NoSuchBucketPolicy(KnownResponseError):
message = 'The specified bucket does not have a bucket policy.'
class OperationAborted(KnownResponseError):
message = 'A conflicting conditional operation is currently in ' \
'progress against this resource. Try again.'
class PermanentRedirect(KnownResponseError):
message = 'The bucket you are attempting to access must be addressed ' \
'using the specified endpoint. Send all future requests ' \
'to this endpoint.'
class PreconditionFailed(KnownResponseError):
message = 'At least one of the preconditions you specified did not hold.'
class Redirect(KnownResponseError):
message = 'Temporary redirect.'
class RestoreAlreadyInProgress(KnownResponseError):
message = 'Object restore is already in progress.'
class RequestIsNotMultiPartContent(KnownResponseError):
message = 'Bucket POST must be of the enclosure-type multipart/form-data.'
class RequestTimeout(KnownResponseError):
message = 'Your socket connection to the server was not read ' \
'from or written to within the timeout period.'
class RequestTimeTooSkewed(KnownResponseError):
message = 'The difference between the request time and the ' \
'server\'s time is too large.'
class RequestTorrentOfBucketError(KnownResponseError):
message = 'Requesting the torrent file of a bucket is not permitted.'
class SignatureDoesNotMatch(KnownResponseError):
message = 'The request signature we calculated does not match the ' \
'signature you provided.'
class ServiceUnavailable(KnownResponseError):
message = 'Reduce your request rate.'
class SlowDown(KnownResponseError):
message = 'Reduce your request rate.'
class TemporaryRedirect(KnownResponseError):
message = 'You are being redirected to the bucket while DNS updates.'
class TokenRefreshRequired(KnownResponseError):
message = 'The provided token must be refreshed.'
class TooManyBuckets(KnownResponseError):
message = 'You have attempted to create more buckets than allowed.'
class UnexpectedContent(KnownResponseError):
message = 'This request does not support content.'
class UnresolvableGrantByEmailAddress(KnownResponseError):
message = 'The email address you provided does not match any account ' \
'on record.'
class UserKeyMustBeSpecified(KnownResponseError):
message = 'The bucket POST must contain the specified field name. ' \
'If it is specified, check the order of the fields.'
known_errors = {
'AccessDenied': AccessDenied,
'AcccountProblem': AccountProblem,
'AmbiguousGrantByEmailAddress': AmbiguousGrantByEmailAddress,
'BadDigest': BadDigest,
'BucketAlreadyExists': BucketAlreadyExists,
'BucketAlreadyOwnedByYou': BucketAlreadyOwnedByYou,
'BucketNotEmpty': BucketNotEmpty,
'CredentialNotSupported': CredentialNotSupported,
'CrossLocationLoggingProhibited': CrossLocationLoggingProhibited,
'EntityTooSmall': EntityTooSmall,
'EntityTooLarge': EntityTooLarge,
'ExpiredToken': ExpiredToken,
'IllegalVersioningConfigurationException': IllegalVersioningConfigurationException,
'IncompleteBody': IncompleteBody,
'IncorrectNumberOfFilesInPostRequest': IncorrectNumberOfFilesInPostRequest,
'InlineDataTooLarge': InlineDataTooLarge,
'InternalError': InternalError,
'InvalidAccessKeyId': InvalidAccessKeyId,
'InvalidAddressingHeader': InvalidAddressingHeader,
'InvalidArgument': InvalidArgument,
'InvalidBucketName': InvalidBucketName,
'InvalidBucketState': InvalidBucketState,
'InvalidDigest': InvalidDigest,
'InvalidEncryptionAlgorithmError': InvalidEncryptionAlgorithmError,
'InvalidLocationConstraint': InvalidLocationConstraint,
'InvalidObjectState': InvalidObjectState,
'InvalidPart': InvalidPart,
'InvalidPartOrder': InvalidPartOrder,
'InvalidPayer': InvalidPayer,
'InvalidPolicyDocument': InvalidPolicyDocument,
'InvalidRange': InvalidRange,
'InvalidRequest': InvalidRequest,
'InvalidSecurity': InvalidSecurity,
'InvalidSOAPRequest': InvalidSOAPRequest,
'InvalidStorageClass': InvalidStorageClass,
'InvalidTargetBucketForLogging': InvalidTargetBucketForLogging,
'InvalidToken': InvalidToken,
'InvalidURI': InvalidURI,
'KeyTooLong': KeyTooLong,
'MalformedACLError': MalformedACLError,
'MalformedPOSTRequest': MalformedPOSTRequest,
'MalformedXML': MalformedXML,
'MaxMessageLengthExceeded': MaxMessageLengthExceeded,
'MaxPostPreDataLengthExceededError': MaxPostPreDataLengthExceededError,
'MetadataTooLarge': MetadataTooLarge,
'MethodNotAllowed': MethodNotAllowed,
'MissingAttachment': MissingAttachment,
'MissingContentLength': MissingContentLength,
'MissingRequestBodyError': MissingRequestBodyError,
'MissingSecurityElement': MissingSecurityElement,
'MissingSecurityHeader': MissingSecurityHeader,
'NoLoggingStatusForKey': NoLoggingStatusForKey,
'NoSuchBucket': NoSuchBucket,
'NoSuchKey': NoSuchKey,
'NoSuchLifecycleConfiguration': NoSuchLifecycleConfiguration,
'NoSuchUpload': NoSuchUpload,
'NoSuchVersion': NoSuchVersion,
'NotImplemented': APINotImplemented,
'NotSignedUp': NotSignedUp,
'NoSuchBucketPolicy': NoSuchBucketPolicy,
'OperationAborted': OperationAborted,
'PermanentRedirect': PermanentRedirect,
'PreconditionFailed': PreconditionFailed,
'Redirect': Redirect,
'RestoreAlreadyInProgress': RestoreAlreadyInProgress,
'RequestIsNotMultiPartContent': RequestIsNotMultiPartContent,
'RequestTimeout': RequestTimeout,
'RequestTimeTooSkewed': RequestTimeTooSkewed,
'RequestTorrentOfBucketError': RequestTorrentOfBucketError,
'SignatureDoesNotMatch': SignatureDoesNotMatch,
'ServiceUnavailable': ServiceUnavailable,
'SlowDown': SlowDown,
'TemporaryRedirect': TemporaryRedirect,
'TokenRefreshRequired': TokenRefreshRequired,
'TooManyBuckets': TooManyBuckets,
'UnexpectedContent': UnexpectedContent,
'UnresolvableGrantByEmailAddress': UnresolvableGrantByEmailAddress,
'UserKeyMustBeSpecified': UserKeyMustBeSpecified,
}
| NitishT/minio-py | minio/error.py | Python | apache-2.0 | 23,884 |
from django.db import models
from user.models import State
from django.utils.translation import ugettext as _
## debug
from pprint import pprint
# Create your models here.
class Party(models.Model):
short_name = models.CharField(max_length=10, default='***')
full_name = models.CharField(max_length=50, default='*****')
#logo = models.CharField(max_length=50, default='')
#president = models.CharField(max_length=50, default='')
#founded_by = models.CharField(max_length=50, default='')
#founded_year = models.IntegerField(default=0)
class Meta:
verbose_name = _('Party')
verbose_name_plural= _('Parties')
@classmethod
def get_list(klass):
list = klass.objects.all()
return list
class Leader(models.Model):
name = models.CharField(max_length=50, default='*****')
party = models.ForeignKey(Party, on_delete=models.CASCADE)
class Meta:
verbose_name = _('Leader')
verbose_name_plural= _('Leaders')
class ParliamentConstituency(models.Model):
name = models.CharField(max_length=50, default='*****')
lc = models.ForeignKey('LegislativeConstituency', on_delete=models.CASCADE)
state = models.ForeignKey(State, on_delete=models.CASCADE)
class Meta:
verbose_name = _('ParliamentConstituency')
verbose_name_plural= _('ParliamentConstituencies')
class LegislativeConstituency(models.Model):
name = models.CharField(max_length=50, default='*****')
pc = models.ForeignKey(ParliamentConstituency, on_delete=models.CASCADE)
state = models.ForeignKey(State, on_delete=models.CASCADE)
class Meta:
verbose_name = _('LegislativeConstituency')
verbose_name_plural= _('LegislativeConstituencies')
class MemberParliament(models.Model):
leader = models.ForeignKey(Leader, on_delete=models.CASCADE)
constituency = models.ForeignKey(ParliamentConstituency, on_delete=models.CASCADE)
class Meta:
verbose_name = _('MemberParliament')
verbose_name_plural= _('MemberParliaments')
class MemberLegislative(models.Model):
leader = models.ForeignKey(Leader, on_delete=models.CASCADE)
constituency = models.ForeignKey(LegislativeConstituency, on_delete=models.CASCADE)
class Meta:
verbose_name = _('MemberLegislative')
verbose_name_plural= _('MemberLegislatives')
| amitdhiman000/dais | politics/models.py | Python | apache-2.0 | 2,210 |
import logging
import plistlib
from django.http import HttpResponse
from django.urls import reverse
from zentral.conf import settings
from zentral.utils.certificates import split_certificate_chain
from zentral.utils.payloads import generate_payload_uuid, get_payload_identifier
from zentral.utils.payloads import sign_payload
from .models import OTAEnrollment, OTAEnrollmentSession
logger = logging.getLogger("zentral.contrib.mdm.payloads")
def build_configuration_profile_response(data, filename):
response = HttpResponse(data, content_type="application/x-apple-aspen-config")
response['Content-Disposition'] = 'attachment; filename="{}.mobileconfig"'.format(filename)
return response
def build_profile(display_name, suffix, content,
payload_type="Configuration", payload_description=None,
sign=True, encrypt=False):
profile = {"PayloadUUID": generate_payload_uuid(),
"PayloadIdentifier": get_payload_identifier(suffix),
"PayloadVersion": 1,
"PayloadDisplayName": display_name,
"PayloadType": payload_type, # Only known exception: "Profile Service"
"PayloadContent": content}
if payload_description:
profile["PayloadDescription"] = payload_description
data = plistlib.dumps(profile)
if sign:
data = sign_payload(data)
return data
def build_payload(payload_type, payload_display_name, suffix, content, payload_version=1, encapsulate_content=False):
payload = {"PayloadUUID": generate_payload_uuid(),
"PayloadType": payload_type,
"PayloadDisplayName": payload_display_name,
"PayloadIdentifier": get_payload_identifier(suffix),
"PayloadVersion": payload_version}
if encapsulate_content:
# for scep, certificates TODO: what else ?
payload["PayloadContent"] = content
else:
payload.update(content)
return payload
# TODO: BAD. Must check if this is really a root CA before building returning anything
def build_root_ca_payloads():
root_certificate = split_certificate_chain(settings["api"]["tls_fullchain"])[-1]
return [
build_payload("com.apple.security.pem",
"Zentral - root CA", "tls-root-ca-cert",
root_certificate.encode("utf-8"),
encapsulate_content=True)
]
def build_root_ca_configuration_profile():
return build_profile("Zentral - root CA certificates",
"root-ca-certificates",
build_root_ca_payloads())
def build_scep_payload(enrollment_session):
subject = [[["CN", enrollment_session.get_common_name()]]]
serial_number = enrollment_session.get_serial_number()
if serial_number:
subject.append([["2.5.4.5", serial_number]])
subject.append([["O", enrollment_session.get_organization()]])
return build_payload("com.apple.security.scep",
enrollment_session.get_payload_name(),
"scep",
{"URL": "{}/scep".format(settings["api"]["tls_hostname"]), # TODO: hardcoded scep url
"Subject": subject,
"Challenge": enrollment_session.get_challenge(),
"Keysize": 2048,
"KeyType": "RSA",
"Key Usage": 5, # 1 is signing, 4 is encryption, 5 is both signing and encryption
},
encapsulate_content=True)
def build_profile_service_configuration_profile(ota_obj):
if isinstance(ota_obj, OTAEnrollmentSession):
url_path = reverse("mdm:ota_session_enroll")
elif isinstance(ota_obj, OTAEnrollment):
url_path = reverse("mdm:ota_enroll")
else:
raise ValueError("ota_obj not an OTAEnrollment nor an OTAEnrollmentSession")
return build_profile("Zentral - OTA MDM Enrollment",
"profile-service",
{"URL": "{}{}".format(settings["api"]["tls_hostname"], url_path),
"DeviceAttributes": ["UDID",
"VERSION",
"PRODUCT",
"SERIAL",
"MEID",
"IMEI"],
"Challenge": ota_obj.enrollment_secret.secret},
payload_type="Profile Service",
payload_description="Install this profile to enroll your device with Zentral")
def build_ota_scep_configuration_profile(ota_enrollment_session):
return build_profile(ota_enrollment_session.get_payload_name(), "scep",
[build_scep_payload(ota_enrollment_session)])
def build_mdm_configuration_profile(enrollment_session, push_certificate):
scep_payload = build_scep_payload(enrollment_session)
payloads = build_root_ca_payloads()
mdm_config = {
"IdentityCertificateUUID": scep_payload["PayloadUUID"],
"Topic": push_certificate.topic,
"ServerURL": "{}{}".format(
settings["api"]["tls_hostname_for_client_cert_auth"],
reverse("mdm:connect")),
"ServerCapabilities": ["com.apple.mdm.bootstraptoken",
"com.apple.mdm.per-user-connections"],
"CheckInURL": "{}{}".format(
settings["api"]["tls_hostname_for_client_cert_auth"],
reverse("mdm:checkin")),
"CheckOutWhenRemoved": True,
}
managed_apple_id = getattr(enrollment_session, "managed_apple_id", None)
if managed_apple_id:
if enrollment_session.access_token:
# account-driven user enrollment
mdm_config["AssignedManagedAppleID"] = managed_apple_id
mdm_config["EnrollmentMode"] = "BYOD"
else:
# unauthenticated user enrollment
mdm_config["ManagedAppleID"] = managed_apple_id
else:
mdm_config["AccessRights"] = 8191 # TODO: config
payloads.extend([
scep_payload,
build_payload("com.apple.mdm",
"Zentral - MDM",
"mdm", mdm_config)
])
return build_profile("Zentral - MDM enrollment", "mdm", payloads)
| zentralopensource/zentral | zentral/contrib/mdm/payloads.py | Python | apache-2.0 | 6,407 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for Check
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-service-control
# [START servicecontrol_v1_generated_ServiceController_Check_async]
from google.cloud import servicecontrol_v1
async def sample_check():
# Create a client
client = servicecontrol_v1.ServiceControllerAsyncClient()
# Initialize request argument(s)
request = servicecontrol_v1.CheckRequest(
)
# Make the request
response = await client.check(request=request)
# Handle the response
print(response)
# [END servicecontrol_v1_generated_ServiceController_Check_async]
| googleapis/python-service-control | samples/generated_samples/servicecontrol_v1_generated_service_controller_check_async.py | Python | apache-2.0 | 1,437 |
#!/usr/bin/env python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Parallel Hello World
"""
from mpi4py import MPI
import sys
size = MPI.COMM_WORLD.Get_size()
rank = MPI.COMM_WORLD.Get_rank()
name = MPI.Get_processor_name()
sys.stdout.write(
"Hello, World! I am process %d of %d on %s.\n"
% (rank, size, name)) | google/cloud-berg | examples/mpi_helloworld.py | Python | apache-2.0 | 855 |
#!/usr/bin/env python
"""Test the flow_management interface."""
import os
from grr.gui import gui_test_lib
from grr.gui import runtests_test
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import flags
from grr.lib import flow
from grr.lib import hunts
from grr.lib import test_lib
from grr.lib.flows.general import filesystem as flows_filesystem
from grr.lib.flows.general import processes as flows_processes
from grr.lib.flows.general import transfer as flows_transfer
from grr.lib.flows.general import webhistory as flows_webhistory
from grr.lib.hunts import standard
from grr.lib.hunts import standard_test
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import flows as rdf_flows
from grr.lib.rdfvalues import paths as rdf_paths
class TestFlowManagement(gui_test_lib.GRRSeleniumTest,
standard_test.StandardHuntTestMixin):
"""Test the flow management GUI."""
def setUp(self):
super(TestFlowManagement, self).setUp()
with self.ACLChecksDisabled():
self.client_id = rdf_client.ClientURN("C.0000000000000001")
with aff4.FACTORY.Open(
self.client_id, mode="rw", token=self.token) as client:
client.Set(client.Schema.HOSTNAME("HostC.0000000000000001"))
self.RequestAndGrantClientApproval(self.client_id)
self.action_mock = action_mocks.FileFinderClientMock()
def testOpeningManageFlowsOfUnapprovedClientRedirectsToHostInfoPage(self):
self.Open("/#/clients/C.0000000000000002/flows/")
# As we don't have an approval for C.0000000000000002, we should be
# redirected to the host info page.
self.WaitUntilEqual("/#/clients/C.0000000000000002/host-info",
self.GetCurrentUrlPath)
self.WaitUntil(self.IsTextPresent,
"You do not have an approval for this client.")
def testPageTitleReflectsSelectedFlow(self):
pathspec = rdf_paths.PathSpec(
path=os.path.join(self.base_path, "test.plist"),
pathtype=rdf_paths.PathSpec.PathType.OS)
flow_urn = flow.GRRFlow.StartFlow(
flow_name=flows_transfer.GetFile.__name__,
client_id=self.client_id,
pathspec=pathspec,
token=self.token)
self.Open("/#/clients/C.0000000000000001/flows/")
self.WaitUntilEqual("GRR | C.0000000000000001 | Flows", self.GetPageTitle)
self.Click("css=td:contains('GetFile')")
self.WaitUntilEqual("GRR | C.0000000000000001 | " + flow_urn.Basename(),
self.GetPageTitle)
def testFlowManagement(self):
"""Test that scheduling flows works."""
self.Open("/")
self.Type("client_query", "C.0000000000000001")
self.Click("client_query_submit")
self.WaitUntilEqual(u"C.0000000000000001", self.GetText,
"css=span[type=subject]")
# Choose client 1
self.Click("css=td:contains('0001')")
# First screen should be the Host Information already.
self.WaitUntil(self.IsTextPresent, "HostC.0000000000000001")
self.Click("css=a[grrtarget='client.launchFlows']")
self.Click("css=#_Processes")
self.Click("link=" + flows_processes.ListProcesses.__name__)
self.WaitUntil(self.IsTextPresent, "C.0000000000000001")
self.WaitUntil(self.IsTextPresent, "List running processes on a system.")
self.Click("css=button.Launch")
self.WaitUntil(self.IsTextPresent, "Launched Flow ListProcesses")
self.Click("css=#_Browser")
# Wait until the tree has expanded.
self.WaitUntil(self.IsTextPresent, flows_webhistory.FirefoxHistory.__name__)
# Check that we can get a file in chinese
self.Click("css=#_Filesystem")
# Wait until the tree has expanded.
self.WaitUntil(self.IsTextPresent,
flows_filesystem.UpdateSparseImageChunks.__name__)
self.Click("link=" + flows_transfer.GetFile.__name__)
self.Select("css=.form-group:has(> label:contains('Pathtype')) select",
"OS")
self.Type("css=.form-group:has(> label:contains('Path')) input",
u"/dev/c/msn[1].exe")
self.Click("css=button.Launch")
self.WaitUntil(self.IsTextPresent, "Launched Flow GetFile")
# Test that recursive tests are shown in a tree table.
with self.ACLChecksDisabled():
flow.GRRFlow.StartFlow(
client_id="aff4:/C.0000000000000001",
flow_name=gui_test_lib.RecursiveTestFlow.__name__,
token=self.token)
self.Click("css=a[grrtarget='client.flows']")
# Some rows are present in the DOM but hidden because parent flow row
# wasn't expanded yet. Due to this, we have to explicitly filter rows
# with "visible" jQuery filter.
self.WaitUntilEqual("RecursiveTestFlow", self.GetText,
"css=grr-client-flows-list tr:visible:nth(1) td:nth(2)")
self.WaitUntilEqual("GetFile", self.GetText,
"css=grr-client-flows-list tr:visible:nth(2) td:nth(2)")
# Click on the first tree_closed to open it.
self.Click("css=grr-client-flows-list tr:visible:nth(1) .tree_closed")
self.WaitUntilEqual("RecursiveTestFlow", self.GetText,
"css=grr-client-flows-list tr:visible:nth(2) td:nth(2)")
# Select the requests tab
self.Click("css=td:contains(GetFile)")
self.Click("css=li[heading=Requests]")
self.WaitUntil(self.IsElementPresent,
"css=td:contains(flow:request:00000001)")
# Check that a StatFile client action was issued as part of the GetFile
# flow.
self.WaitUntil(self.IsElementPresent,
"css=.tab-content td.proto_value:contains(StatFile)")
def testOverviewIsShownForNestedFlows(self):
with self.ACLChecksDisabled():
for _ in test_lib.TestFlowHelper(
gui_test_lib.RecursiveTestFlow.__name__,
self.action_mock,
client_id=self.client_id,
token=self.token):
pass
self.Open("/#c=C.0000000000000001")
self.Click("css=a[grrtarget='client.flows']")
# There should be a RecursiveTestFlow in the list. Expand nested flows.
self.Click("css=tr:contains('RecursiveTestFlow') span.tree_branch")
# Click on a nested flow.
self.Click("css=tr:contains('RecursiveTestFlow'):nth(2)")
# Nested flow should have Depth argument set to 1.
self.WaitUntil(self.IsElementPresent,
"css=td:contains('Depth') ~ td:nth(0):contains('1')")
# Check that flow id of this flow has forward slash - i.e. consists of
# 2 components.
self.WaitUntil(self.IsTextPresent, "Flow ID")
flow_id = self.GetText("css=dt:contains('Flow ID') ~ dd:nth(0)")
self.assertTrue("/" in flow_id)
def testOverviewIsShownForNestedHuntFlows(self):
with self.ACLChecksDisabled():
with hunts.GRRHunt.StartHunt(
hunt_name=standard.GenericHunt.__name__,
flow_runner_args=rdf_flows.FlowRunnerArgs(
flow_name=gui_test_lib.RecursiveTestFlow.__name__),
client_rate=0,
token=self.token) as hunt:
hunt.Run()
self.AssignTasksToClients(client_ids=[self.client_id])
self.RunHunt(client_ids=[self.client_id])
self.Open("/#c=C.0000000000000001")
self.Click("css=a[grrtarget='client.flows']")
# There should be a RecursiveTestFlow in the list. Expand nested flows.
self.Click("css=tr:contains('RecursiveTestFlow') span.tree_branch")
# Click on a nested flow.
self.Click("css=tr:contains('RecursiveTestFlow'):nth(2)")
# Nested flow should have Depth argument set to 1.
self.WaitUntil(self.IsElementPresent,
"css=td:contains('Depth') ~ td:nth(0):contains('1')")
# Check that flow id of this flow has forward slash - i.e. consists of
# 2 components.
self.WaitUntil(self.IsTextPresent, "Flow ID")
flow_id = self.GetText("css=dt:contains('Flow ID') ~ dd:nth(0)")
self.assertTrue("/" in flow_id)
def testLogsCanBeOpenedByClickingOnLogsTab(self):
# RecursiveTestFlow doesn't send any results back.
with self.ACLChecksDisabled():
for _ in test_lib.TestFlowHelper(
"FlowWithOneLogStatement",
self.action_mock,
client_id=self.client_id,
token=self.token):
pass
self.Open("/#c=C.0000000000000001")
self.Click("css=a[grrtarget='client.flows']")
self.Click("css=td:contains('FlowWithOneLogStatement')")
self.Click("css=li[heading=Log]")
self.WaitUntil(self.IsTextPresent, "I do log.")
def testLogTimestampsArePresentedInUTC(self):
with self.ACLChecksDisabled():
with test_lib.FakeTime(42):
for _ in test_lib.TestFlowHelper(
"FlowWithOneLogStatement",
self.action_mock,
client_id=self.client_id,
token=self.token):
pass
self.Open("/#c=C.0000000000000001")
self.Click("css=a[grrtarget='client.flows']")
self.Click("css=td:contains('FlowWithOneLogStatement')")
self.Click("css=li[heading=Log]")
self.WaitUntil(self.IsTextPresent, "1970-01-01 00:00:42 UTC")
def testResultsAreDisplayedInResultsTab(self):
with self.ACLChecksDisabled():
for _ in test_lib.TestFlowHelper(
"FlowWithOneStatEntryResult",
self.action_mock,
client_id=self.client_id,
token=self.token):
pass
self.Open("/#c=C.0000000000000001")
self.Click("css=a[grrtarget='client.flows']")
self.Click("css=td:contains('FlowWithOneStatEntryResult')")
self.Click("css=li[heading=Results]")
self.WaitUntil(self.IsTextPresent, "aff4:/some/unique/path")
def testEmptyTableIsDisplayedInResultsWhenNoResults(self):
with self.ACLChecksDisabled():
flow.GRRFlow.StartFlow(
flow_name="FlowWithOneStatEntryResult",
client_id=self.client_id,
sync=False,
token=self.token)
self.Open("/#c=" + self.client_id.Basename())
self.Click("css=a[grrtarget='client.flows']")
self.Click("css=td:contains('FlowWithOneStatEntryResult')")
self.Click("css=li[heading=Results]")
self.WaitUntil(self.IsElementPresent, "css=#main_bottomPane table thead "
"th:contains('Value')")
def testHashesAreDisplayedCorrectly(self):
with self.ACLChecksDisabled():
for _ in test_lib.TestFlowHelper(
"FlowWithOneHashEntryResult",
self.action_mock,
client_id=self.client_id,
token=self.token):
pass
self.Open("/#c=C.0000000000000001")
self.Click("css=a[grrtarget='client.flows']")
self.Click("css=td:contains('FlowWithOneHashEntryResult')")
self.Click("css=li[heading=Results]")
self.WaitUntil(self.IsTextPresent,
"9e8dc93e150021bb4752029ebbff51394aa36f069cf19901578"
"e4f06017acdb5")
self.WaitUntil(self.IsTextPresent,
"6dd6bee591dfcb6d75eb705405302c3eab65e21a")
self.WaitUntil(self.IsTextPresent, "8b0a15eefe63fd41f8dc9dee01c5cf9a")
def testChangingTabUpdatesUrl(self):
with self.ACLChecksDisabled():
flow_urn = flow.GRRFlow.StartFlow(
flow_name=gui_test_lib.FlowWithOneStatEntryResult.__name__,
client_id=self.client_id,
token=self.token)
flow_id = flow_urn.Basename()
base_url = "/#/clients/C.0000000000000001/flows/%s" % flow_id
self.Open(base_url)
self.Click("css=li[heading=Requests]")
self.WaitUntilEqual(base_url + "/requests", self.GetCurrentUrlPath)
self.Click("css=li[heading=Results]")
self.WaitUntilEqual(base_url + "/results", self.GetCurrentUrlPath)
self.Click("css=li[heading=Log]")
self.WaitUntilEqual(base_url + "/log", self.GetCurrentUrlPath)
self.Click("css=li[heading='Flow Information']")
self.WaitUntilEqual(base_url, self.GetCurrentUrlPath)
def testDirectLinksToFlowsTabsWorkCorrectly(self):
with self.ACLChecksDisabled():
flow_urn = flow.GRRFlow.StartFlow(
flow_name=gui_test_lib.FlowWithOneStatEntryResult.__name__,
client_id=self.client_id,
token=self.token)
flow_id = flow_urn.Basename()
base_url = "/#/clients/C.0000000000000001/flows/%s" % flow_id
self.Open(base_url + "/requests")
self.WaitUntil(self.IsElementPresent, "css=li.active[heading=Requests]")
self.Open(base_url + "/results")
self.WaitUntil(self.IsElementPresent, "css=li.active[heading=Results]")
self.Open(base_url + "/log")
self.WaitUntil(self.IsElementPresent, "css=li.active[heading=Log]")
# Check that both clients/.../flows/... and clients/.../flows/.../ URLs
# work.
self.Open(base_url)
self.WaitUntil(self.IsElementPresent,
"css=li.active[heading='Flow Information']")
self.Open(base_url + "/")
self.WaitUntil(self.IsElementPresent,
"css=li.active[heading='Flow Information']")
def testCancelFlowWorksCorrectly(self):
"""Tests that cancelling flows works."""
flow.GRRFlow.StartFlow(
client_id=self.client_id,
flow_name=gui_test_lib.RecursiveTestFlow.__name__,
token=self.token)
# Open client and find the flow
self.Open("/")
self.Type("client_query", "C.0000000000000001")
self.Click("client_query_submit")
self.WaitUntilEqual(u"C.0000000000000001", self.GetText,
"css=span[type=subject]")
self.Click("css=td:contains('0001')")
self.Click("css=a[grrtarget='client.flows']")
self.Click("css=td:contains('RecursiveTestFlow')")
self.Click("css=button[name=cancel_flow]")
# The window should be updated now
self.WaitUntil(self.IsTextPresent, "Cancelled in GUI")
def testGlobalFlowManagement(self):
"""Test that scheduling flows works."""
with self.ACLChecksDisabled():
self.CreateAdminUser(self.token.username)
self.Open("/")
self.Click("css=a[grrtarget=globalFlows]")
self.Click("css=#_Reporting")
self.assertEqual("RunReport", self.GetText("link=RunReport"))
self.Click("link=RunReport")
self.WaitUntil(self.IsTextPresent, "Report name")
def main(argv):
# Run the full test suite
runtests_test.SeleniumTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| destijl/grr | grr/gui/plugins/flow_management_test.py | Python | apache-2.0 | 14,194 |
from madrona.features.forms import FeatureForm
from django import forms
from visualize.models import *
class BookmarkForm(FeatureForm):
class Meta(FeatureForm.Meta):
model = Bookmark
| Ecotrust/COMPASS | mp/visualize/forms.py | Python | apache-2.0 | 197 |
"""自适应学习率衰减
tf.train.exponential_decay(learning_rate, global_step, decay_steps, decay_rate, staircase=False, name=None)
退化学习率,衰减学习率,将指数衰减应用于学习速率。
计算公式:decayed_learning_rate = learning_rate *
decay_rate ^ (global_step / decay_steps)
"""
# 初始的学习速率是0.1,总的迭代次数是1000次,如果staircase=True,那就表明每decay_steps次计算学习速率变化,更新原始学习速率,
# 如果是False,那就是每一步都更新学习速率。红色表示False,蓝色表示True。
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
learning_rate = 0.1 # 初始学习速率时0.1
decay_rate = 0.96 # 衰减率
global_steps = 1000 # 总的迭代次数
decay_steps = 100 # 衰减次数
global_ = tf.Variable(tf.constant(0))
c = tf.train.exponential_decay(learning_rate, global_, decay_steps, decay_rate, staircase=True)
d = tf.train.exponential_decay(learning_rate, global_, decay_steps, decay_rate, staircase=False)
T_C = []
F_D = []
with tf.Session() as sess:
for i in range(global_steps):
T_c = sess.run(c, feed_dict={global_: i})
T_C.append(T_c)
F_d = sess.run(d, feed_dict={global_: i})
F_D.append(F_d)
plt.figure(1)
plt.plot(range(global_steps), F_D, 'r-')# "-"表示折线图,r表示红色,b表示蓝色
plt.plot(range(global_steps), T_C, 'b-')
# 关于函数的值的计算0.96^(3/1000)=0.998
plt.show()
| Asurada2015/TFAPI_translation | Training/Decaying the learning rate/tf_train_exponential_decay.py | Python | apache-2.0 | 1,500 |
#!/usr/bin/env python3
# Copyright 2021 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import pygion
from pygion import task
@task
def main():
pygion.print_once("Hello, Legion!")
if __name__ == '__main__':
main()
| StanfordLegion/legion | bindings/python/tests/pass/print_once.py | Python | apache-2.0 | 785 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'ClouDNSDNSDriver'
]
from libcloud.common.base import JsonResponse, ConnectionUserAndKey
from libcloud.dns.types import Provider, RecordType
from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError
from libcloud.dns.types import ZoneAlreadyExistsError
from libcloud.dns.base import DNSDriver, Zone, Record
VALID_RECORD_EXTRA_PARAMS = ['priority', 'ttl']
class ClouDNSDNSResponse(JsonResponse):
def success(self):
if not super(ClouDNSDNSResponse, self).success():
return False
body = self.parse_body()
if type(body) is dict and body.get('status') == 'Failed':
return False
return True
def parse_error(self):
context = self.connection.context
status_description = self.parse_body()['statusDescription']
if status_description == u'{} has been already added.'.format(
context['id']):
if context['resource'] == 'zone':
raise ZoneAlreadyExistsError(value='', driver=self,
zone_id=context['id'])
super(ClouDNSDNSResponse, self).parse_error()
return self.body
class ClouDNSDNSConnection(ConnectionUserAndKey):
host = 'api.cloudns.net'
secure = True
responseCls = ClouDNSDNSResponse
def add_default_params(self, params):
params['auth-id'] = self.user_id
params['auth-password'] = self.key
return params
def request(self, action, params=None, data='', headers=None,
method='POST'):
return super(ClouDNSDNSConnection, self).request(action=action,
params=params,
data=data,
method=method,
headers=headers)
class ClouDNSDNSDriver(DNSDriver):
type = Provider.CLOUDNS
name = 'ClouDNS DNS'
website = 'https://www.cloudns.net'
connectionCls = ClouDNSDNSConnection
RECORD_TYPE_MAP = {
RecordType.A: 'A',
RecordType.AAAA: 'AAAA',
RecordType.CNAME: 'CNAME',
RecordType.MX: 'MX',
RecordType.NS: 'SPF',
RecordType.SRV: 'SRV',
RecordType.TXT: 'TXT',
}
def _to_zone(self, item):
ttl = item.get('ttl', 3600)
zone = Zone(id=item['name'], domain=item['name'],
type=item['type'], ttl=ttl, driver=self)
return zone
def _to_record(self, item, zone=None):
extra = {'ttl': item['ttl']}
record = Record(id=item['id'], name=item['host'],
type=item['type'], data=item['record'],
zone=zone, driver=self, extra=extra)
return record
def get_zone(self, zone_id):
self.connection.set_context({'resource': 'zone', 'id': zone_id})
params = {'page': 1, 'rows-per-page': 10, 'search': zone_id}
zone_result = self.connection.request(
'/dns/list-zones.json', params=params).object
if not zone_result:
raise ZoneDoesNotExistError(value='', driver=self,
zone_id=zone_id)
return self._to_zone(zone_result[0])
def iterate_zones(self):
page = 1
rows_per_page = 100
params = {'page': page, 'rows-per-page': rows_per_page}
zones_list = []
while True:
page_result = self.connection.request(
'/dns/list-zones.json', params=params).object
if not page_result:
break
zones_list.extend(page_result)
params['page'] += 1
for item in zones_list:
yield self._to_zone(item)
def create_zone(self, domain, ttl=None, extra=None):
self.connection.set_context({'resource': 'zone', 'id': domain})
params = {'domain-name': domain, 'zone-type': 'master'}
self.connection.request(
'/dns/register.json', params=params).object
zone = Zone(id=domain, domain=domain,
type='master', ttl=3600, driver=self)
return zone
def delete_zone(self, zone):
self.connection.set_context({'resource': 'zone', 'id': zone.id})
params = {'domain-name': zone.id}
self.connection.request(
'/dns/delete.json', params=params).object
return True
def iterate_records(self, zone):
self.connection.set_context({'resource': 'zone', 'id': zone.id})
params = {'domain-name': zone.id}
records_list = self.connection.request(
'/dns/records.json', params=params).object
if not len(records_list):
return
for item in records_list.values():
yield self._to_record(item, zone=zone)
def get_record(self, zone_id, record_id):
zone = self.get_zone(zone_id=zone_id)
for record in self.iterate_records(zone):
if record.id == record_id:
return record
raise RecordDoesNotExistError(value='', driver=self,
record_id=record_id)
def delete_record(self, record):
self.connection.set_context({'resource': 'record', 'id': record.id})
params = {'domain-name': record.zone.id, 'record-id': record.id}
self.connection.request(
action='/dns/delete-record.json', params=params)
return True
def create_record(self, name, zone, type, data, extra=None):
params = {
'domain-name': zone.id,
'host': name,
'record-type': type,
'record': data,
'ttl': 3600
}
if extra:
if extra.get('ttl'):
params['ttl'] = extra['ttl']
if extra.get('priority'):
params['priority'] = extra['priority']
record_result = self.connection.request(
action='/dns/add-record.json', params=params).object
return Record(id=record_result['data']['id'], name=name,
type=type, data=data,
zone=zone, driver=self, extra=extra)
| niteoweb/libcloud | libcloud/dns/drivers/cloudns.py | Python | apache-2.0 | 7,000 |
from numpy import zeros
from gwlfe.Memoization import memoize
from gwlfe.MultiUse_Fxns.Runoff.RurQRunoff import RurQRunoff
from gwlfe.MultiUse_Fxns.Runoff.RurQRunoff import RurQRunoff_f
from gwlfe.Output.Loading.PConc import PConc
from gwlfe.Output.Loading.PConc import PConc_f
@memoize
def pRunoff(NYrs, DaysMonth, InitSnow_0, Temp, Prec, AntMoist_0, NRur, NUrb, CN, Grow_0, Area, PhosConc, ManuredAreas,
FirstManureMonth, LastManureMonth, ManPhos, FirstManureMonth2,
LastManureMonth2):
result = zeros((NYrs, 12, 10))
rur_q_runoff = RurQRunoff(NYrs, DaysMonth, InitSnow_0, Temp, Prec, AntMoist_0, NRur, NUrb, CN, Grow_0)
p_conc = PConc(NRur, NUrb, PhosConc, ManPhos, ManuredAreas, FirstManureMonth, LastManureMonth, FirstManureMonth2,
LastManureMonth2)
for Y in range(NYrs):
for i in range(12):
for l in range(NRur):
# += changed to =
result[Y][i][l] = 0.1 * p_conc[i][l] * rur_q_runoff[Y][l][i] * Area[l]
return result
@memoize
def pRunoff_f(NYrs, DaysMonth, InitSnow_0, Temp, Prec, AntMoist_0, NRur, NUrb, CN, Grow_0, Area, PhosConc, ManuredAreas,
FirstManureMonth, LastManureMonth, ManPhos, FirstManureMonth2, LastManureMonth2):
p_conc = PConc_f(NRur, NUrb, PhosConc, ManPhos, ManuredAreas, FirstManureMonth, LastManureMonth, FirstManureMonth2,
LastManureMonth2)[:, :NRur]
rur_q_runoff = RurQRunoff_f(NYrs, DaysMonth, InitSnow_0, Temp, Prec, AntMoist_0, NRur, NUrb, CN, Grow_0)
return 0.1 * p_conc * rur_q_runoff * Area[:NRur]
| WikiWatershed/gwlf-e | gwlfe/MultiUse_Fxns/Runoff/pRunoff.py | Python | apache-2.0 | 1,598 |
#!/usr/bin/python
# -*- coding: utf8 -*-
# National Library of Norway, 2014-2015
# load the packages
from pysqlite2 import dbapi2 as sqlite3
from collections import Counter
from operator import itemgetter
from itertools import chain
from flask import Flask, Response, request, session, g, redirect, url_for, \
abort, render_template, flash, jsonify
from contextlib import closing
import re
import json
import sys
import operator
import itertools
## CONFIGURATION
# path to databases
try:
path = str(sys.argv[1])
except:
path = ''
# specify port (default: 5000)
try:
port = int(sys.argv[2])
except:
port = 5000
# specify host (default: 127.0.0.1)
try:
host = str(sys.argv[3])
except:
host = '127.0.0.1'
# paths for the databases
UNIGRAM = path + 'unigram-one-row.db'
BIGRAM = path + 'bigram-one-row.db'
TRIGRAM = path + 'trigram-one-row.db'
AVIS_UNIGRAM = path + 'avis-unigram-one-row.db'
AVIS_BIGRAM = path + 'avis-bigram-one-row.db'
AVIS_TRIGRAM = path + 'avis-trigram-one-row.db'
# database structure
db_names = {'bok': 'bok_', 'avis': 'avis_'}
table_names = ['unigram', 'bigram', 'trigram']
index_names = {'bok': ['_lff_','_lfsf_','_lfstf_'], 'avis': ['_ff_','_fsf_','_fstf_']}
field_names = ['first', 'second', 'third']
# Allowed paramaters
languages = 'all|nob|nno'
corpora = 'bok|avis'
# Default paramaters
default_params = {'terms': '', 'lang': 'all', 'case_sens': '0', 'freq': 'rel', 'corpus': 'bok'};
# Maximum values
maxTerms = 10
maxNgram = 3 # currently, only unigram, bigram, trigram is supported
maxChar = 200 # cut-off-point at 200 characters for query string
maxWildcards = 5
maxAdd = 10
maxTrunct = 5
# loads a JSON object holding the max. frequencies per year for calculation of relative frequency in python (optional: you might want to store these in the database itself)
with open ('totals.json', 'r') as f:
freqs_per_year = json.load(f)
# initiating Flask (with settings from environment variable - for use in development and production environments)
app = Flask(__name__, static_url_path='/ngram/static')
app.config.from_object(__name__)
app.config.from_envvar('FLASK_NGRAM_SETTINGS')
# connection to DB
def connect_db(self):
rv = sqlite3.connect(self)
#rv.row_factory = sqlite3.Row
return rv
@app.before_request
def before_request():
""" establish connection upon request """
g.db = connect_db(UNIGRAM)
# Attach databases
g.db.execute("ATTACH DATABASE '" + UNIGRAM + "' as bok_unigram;")
g.db.execute("ATTACH DATABASE '" + BIGRAM + "' as bok_bigram;")
g.db.execute("ATTACH DATABASE '" + TRIGRAM + "' as bok_trigram;")
g.db.execute("ATTACH DATABASE '" + AVIS_UNIGRAM + "' as avis_unigram;")
g.db.execute("ATTACH DATABASE '" + AVIS_BIGRAM + "' as avis_bigram")
g.db.execute("ATTACH DATABASE '" + AVIS_TRIGRAM + "' as avis_trigram")
@app.after_request
def after_request(response):
""" Close connection after request """
g.db.close()
return response
def query_db_dict(query, args=(), one=False):
""" Return results as dictionary """
cur = g.db.execute(query, args)
rv = [dict((cur.description[idx][0], value)
for idx, value in enumerate(row)) for row in cur.fetchall()]
return (rv[0] if rv else None) if one else rv
def query_db_row(query, args=(), one=False):
""" Return results as rows """
cur = g.db.execute(query, args)
rv = [list((value)
for idx, value in enumerate(row)) for row in cur.fetchall()]
return (rv[0] if rv else None) if one else rv
def return_terms(terms):
"""Gets a string of terms and returns them as a list, with some clean-up"""
# index for wildcards (not allowed to exceed maxWildcards, these ones are powerful)
wildcardIdx = 0
# we only allow a certain amount of characters in the terms string
terms = terms[:maxChar]
# removes unnecessary whitespace or empty query terms
terms = re.sub(r',\s{0,},',',', terms)
# splits on comma (with following whitespace): commas may be masked by quoatation marks
terms = re.findall('[^\,\"]+|\"[^"]*\"', terms)
# gets number of terms
nTerms = len(terms)
# checks if number exceeds maxTerms, remaining ones are removed
if nTerms >= maxTerms:
terms = terms[:maxTerms]
nTerms = maxTerms
# loops through each term
for i in range(nTerms):
# substitutes '*' with '%' for SQL queries, removes illegal wildcards (according to maxWildcards)
if "*" in terms[i] and wildcardIdx < maxWildcards:
wildcardIdx += 1
terms[i] = terms[i].replace("*", "%")
else:
terms[i] = terms[i].replace("*", "")
# removes whitespace at the beginning or the end of the string
terms[i] = re.sub(r'^\s+', '', terms[i])
terms[i] = re.sub(r'\s+$', '', terms[i])
# removes mask for comma
if terms[i] == '","':
terms[i] = re.sub(r'","',',', terms[i])
# removes whitespace between '+' and terms
if "+" in terms[i]:
terms[i] = re.sub(r'\s+\+', '+', terms[i])
terms[i] = re.sub(r'\+\s+', '+', terms[i])
return terms
def query_factory(ngrams, lang, case_sens, corpus):
""" Creates a sql query for each item in the object """
sql_array = []
args_array = []
label_array = []
lang_array = []
corpus_array = []
for ngram in ngrams:
sql, args, query_lang, query_corpus = build_query_single(ngram, lang, case_sens, corpus)
sql_array.append(sql)
args_array.append(args)
label_array.append(' '.join(ngram))
lang_array.append(query_lang)
corpus_array.append(query_corpus)
return sql_array, args_array, label_array, lang_array, corpus_array
def extract_info(term):
""" Extracts information after colon, returns only ngram and dictionary of arguments"""
ngram = []
argumentDict = {}
lastElement = len(term) - 1
splitted = term[lastElement].split(':')
if len(splitted) >= 2:
ngram.extend(term[:lastElement])
ngram.extend([splitted[0]])
extension = splitted[1:]
for element in extension:
if re.match(r'nob|nno|all', element):
argumentDict['lang'] = element
if re.match(r'bok|avis', element):
argumentDict["db"] = element
if re.match (r'avis', element):
argumentDict["lang"] = 'all'
if re.match (r'bok', element) and re.match(r'nob|nno|all', element) != -1:
argumentDict["lang"] = 'all'
if re.match(r'[0-9]{4}', element):
argumentDict["anno"] = element
return ngram, argumentDict
def wildcard_search(ngrams, lang, case_sens, corpus):
""" Returns the ten most common ngrams matching query """
whereClause = []
whereParams = []
args = []
ngramSize = len(ngrams)
argumentDict = {"ngram": [], "lang": lang, "db": corpus}
if extract_info(ngrams) != None:
ngrams, argumentsExtracted = extract_info(ngrams)
argumentDict = dict_merge(argumentDict, argumentsExtracted)
# values based on input
params = 'in (?,?)' if case_sens == '0' else 'in (?)'
langClause = 'and lang = ?' if argumentDict["lang"] != "all" else ''
getFieldNames = ', '.join(field_names[:ngramSize])
getTableNames = db_names[argumentDict["db"]] + table_names[ngramSize-1] + "." + table_names[ngramSize-1]
for ngram in ngrams:
if "%" in ngram:
argumentDict["ngram"].append(ngram)
whereParams.append("LIKE ?")
else:
whereParams.append(params)
if case_sens == '0':
argumentDict["ngram"].extend(swapcase([ngram]))
else:
argumentDict["ngram"].append(ngram)
idxName = query_planner(whereParams,argumentDict["ngram"])
whereClause = " and ".join( list(('(%s %s)' % (field_names[idx],whereParams[idx]))
for idx, val in enumerate(ngrams)) ) + (langClause if argumentDict["db"] == 'bok' else '')
sql = "SELECT DISTINCT %s FROM (SELECT %s, freq FROM %s INDEXED BY %s WHERE %s ORDER BY freq DESC LIMIT 10) T;" % (getFieldNames, getFieldNames, getTableNames, idxName, whereClause)
# builds argument array for SQL query
args.extend(argumentDict["ngram"])
args.append(argumentDict["anno"]) if "anno" in argumentDict else None
if argumentDict["lang"] != 'all' and argumentDict["db"] == 'bok':
args.append(argumentDict["lang"])
cur = g.db.execute(sql, args)
return ([list((value)
for idx, value in enumerate(row)) for row in cur.fetchall()], argumentDict["lang"], argumentDict["db"])
def query_planner(where,args):
""" NB N-gram query planner """
letters = ['f','s','t']
letterCombination = ''
for idx,val in enumerate(where):
if '=' in where[idx]:
letterCombination += letters[idx]
elif 'LIKE' in where[idx] and len(args[idx]) > 1:
letterCombination = ''.join(letters[:len(where)])
return '_' + letterCombination + 'f_'
return '_' + letterCombination + 'f_'
def extract_values(dictionary):
values = []
for key, value in sorted(dictionary.items()):
values.extend(value)
return values
def combination_gen(ngrams):
""" Returns combinations for truncated expressions """
args = []
if len(ngrams) > 1:
for item1 in ngrams[0]:
for item2 in ngrams[1]:
if len(ngrams) == 2:
args.append([item1, item2])
if len(ngrams) == 3:
for item3 in ngrams[2]:
args.append([item1, item2, item3])
else:
for item in ngrams[0]:
args.append([item])
return args
def dict_merge(a, b):
c = a.copy()
c.update(b)
return c
def build_query_single(ngram, lang, case_sens, corpus):
args = []
argumentDict = {"ngram": [], "lang": lang, "db": corpus}
ngramSize = len(ngram)
# get values after colon, parse them
if extract_info(ngram) != None:
ngram, argumentsExtracted = extract_info(ngram)
argumentDict = dict_merge(argumentDict, argumentsExtracted)
# values based on input
params = 'in (?,?)' if case_sens == '0' else 'in (?)'
langClause = ' and lang = ?' if argumentDict["lang"] != 'all' else " and lang in (?,?)"
whereClause = " and ".join( list(('(%s %s)' % (field_names[idx], params))
for idx, val in enumerate(ngram)) ) + (langClause if argumentDict["db"] == 'bok' else '')
getTableName = db_names[argumentDict["db"]] + table_names[ngramSize-1] + "." + table_names[ngramSize-1]
# "Case-insensitive": because of limits of our current sqlite3 implementation, we only allow for a quasi case-insensitive search (only the first letter of a word is considered)
if case_sens == '0':
argumentDict["ngram"] = swapcase(ngram)
else:
argumentDict["ngram"] = ngram
idxName = index_names[argumentDict["db"]][ngramSize-1]
# Builds query string
sql = "SELECT json FROM %s INDEXED BY %s WHERE %s" % (getTableName, idxName, whereClause)
# Builds argument array
args.extend(argumentDict["ngram"])
args.append(argumentDict["anno"]) if "anno" in argumentDict else None
if argumentDict["lang"] != 'all' and argumentDict["db"] == 'bok':
args.append(argumentDict["lang"])
elif argumentDict["lang"] == 'all' and argumentDict["db"] == 'bok':
args.append('nob')
args.append('nno')
return (sql, args, argumentDict["lang"], argumentDict["db"])
def swapcase(args):
""" Swaps the case of the first letter of the argument """
lowerUpperArgs = []
try:
for arg in args:
lowerUpperArgs += arg, arg[0].swapcase() + arg[1:]
except:
return None
return lowerUpperArgs
def tokenize(term):
""" Very simple tokenizer: based on whitespace but not including paranthesis """
return re.findall('[^\s\(]+|\([^)]*\)', term)
def termParser(i, lParams):
ngrams = []
term = lParams['terms'][i]
if "+" in term:
qType = 'agg'
# splits on +, up to value of maxAdd
aggNgrams = re.split('\+', term, maxAdd)[:maxAdd]
for item in aggNgrams:
aggNgram = tokenize(item)
if len(aggNgram) > maxNgram:
ngrams += [aggNgram[:maxNgram]]
else:
ngrams += [aggNgram]
else:
# invokes the tokenizer
ngrams = tokenize(term)
# only unigram to trigram search is allowed for
if len(ngrams) > maxNgram:
ngrams = ngrams[:maxNgram]
if any("%" in ngram for ngram in ngrams):
qType = 'wildcard'
# returns ngrams for wildcard
ngrams, lParams['lang'], lParams['corpus'] = wildcard_search(ngrams, lParams['lang'], lParams['case_sens'], lParams['corpus'])
# hack: as for now, case_sens must be 1 when doing wildcard_search
lParams['case_sens'] = '1'
else:
# checks if the term contains brackets, if, then return the combinations
# regular expression for finding brackets
parentes = re.compile('\([^)]*\)')
if any(parentes.match(ngram) for ngram in ngrams):
qType = 'trunctated'
for i in range(len(ngrams)):
ngrams_or = ngrams[i].strip('()')
ngrams[i] = re.split("\s", ngrams_or, maxTrunct)[:maxTrunct]
ngrams = combination_gen(ngrams)
else:
qType = 'single'
ngrams = [ngrams]
return (ngrams, qType, lParams)
def merge_result(self):
""" Returns a merged object (similar to UNION SELECT) """
total = Counter()
jsonObject = {}
# loops through each result row
for entry in self:
jsonObject = json.loads(entry[0])
entryCounter = Counter(jsonObject)
total += entryCounter
return total
def get_relfreq(total,total_freq):
"""Calculates the relative frequency for each item, returns complete dictionary """
relfreq_dict = []
for attribute, value in total.iteritems():
if int(attribute) >= 1810:
rel_freq = float(value) / total_freq[attribute] * 100
relfreq_dict.append({"x": int(attribute), "y": rel_freq, "f": int(value)})
return relfreq_dict
def return_agg_results(sql,args,lang,label,corpus):
""" Returns results for multiple items to be summed """
entries = []
result = []
corplang_set = set()
corpus_totalfreq = []
total_freq = Counter()
# Gets the result for each sub-query
for idx, val in enumerate(sql):
result += query_db_row(sql[idx], args[idx])
# merges the result
total = merge_result(result)
## finds out which corpora/languages were used in the query prior to calculating relative frequency
corplang_pairs = [[a, b] for a, b in zip(corpus, lang)]
corplang_set = set(map(tuple, corplang_pairs))
for item in corplang_set:
corpus_totalfreq.append([freqs_per_year[item[0]][item[1]]])
## calculates the grand total frequency
for item in corpus_totalfreq:
entry_counter = Counter(item[0])
total_freq += entry_counter
## returns a sorted dictionary with relative frequencies
relfreq_dict = get_relfreq(total,total_freq)
relfreq_dict = sorted(relfreq_dict, key=itemgetter('x'))
if relfreq_dict != []:
entries += [{"key": label, "values": relfreq_dict}]
return entries
def return_single_results(sql,args,lang,label,corpus):
""" Returns the results for single items """
entries = []
total_freq = Counter()
# Gets the result for each sub-query
for idx, val in enumerate(sql):
result = query_db_row(sql[idx], args[idx])
total = merge_result(result)
total_freq = freqs_per_year[corpus[idx]][lang[idx]]
## returns a sorted dictionary with relative frequencies
relfreq_dict = get_relfreq(total,total_freq)
relfreq_dict = sorted(relfreq_dict, key=itemgetter('x'))
if relfreq_dict != []:
entries += [{"key": label[idx], "values": relfreq_dict}]
return entries
def get_query_params(request):
""" Returns a dictionary of query parameters """
qParams = {}
# gets the query parameters, does some basic validation and builds a dictionary of paramaters
terms = request.args.get('terms')
if terms:
qParams['terms'] = terms
lang = request.args.get('lang')
if lang:
if re.match(languages, lang):
qParams['lang'] = lang
case_sens = request.args.get('case_sens')
if case_sens:
if re.match('0|1',case_sens):
qParams['case_sens'] = case_sens
freq = request.args.get('freq')
if freq:
if re.match('rel|abs',freq):
qParams['freq'] = freq
corpus = request.args.get('corpus')
if corpus:
if re.match(corpora,corpus):
qParams['corpus'] = corpus
return qParams
@app.route('/')
def index():
return render_template('header-footer.html')
@app.route('/ngram/query')
def query():
entries = []
# get query paramaters
qParams = get_query_params(request)
# fills in default_parameters for those not set
sParams = dict_merge(default_params, qParams)
# does some clean-up and returns terms as list
sParams['terms'] = return_terms(sParams['terms'])
# gets total number of statements
nTerms = len(sParams['terms'])
# loops through each term, interpreting it and generating query
for i in range(nTerms):
# invokes term parser
ngrams, qType, lParams = termParser(i, sParams)
# starts the query factory for interprated term
sql, args, label, lang, corpus = query_factory(ngrams, lParams['lang'], lParams['case_sens'], lParams['corpus'])
# run query depending on amount of results from query_factory
if len(sql) == 1:
entries += return_single_results(sql,args,lang,label,corpus)
elif len(sql) > 1:
if qType == 'agg':
entries += return_agg_results(sql, args, lang, label, corpus)
elif qType == 'wildcard' or qType == 'trunctated':
entries += return_single_results(sql,args,lang,label,corpus)
else:
pass
else:
pass
jsonOutput = export_to_json(entries)
return Response(jsonOutput, mimetype='application/json')
def export_to_json(entries):
""" Exports results as a JSON object """
return json.dumps(entries, indent=4, separators=(', ', ': '))
def export_to_json_file(entries):
""" Exports result as JSON file """
with open('static/dump.json', 'wb') as outfile:
json.dump(entries, outfile, indent=4, separators=(', ', ': '))
if __name__ == '__main__':
app.run(port=port,host=host)
| NationalLibraryOfNorway/NB-N-gram | backend.py | Python | apache-2.0 | 19,164 |
from django.shortcuts import render
from django.http import HttpResponse
import json
def services(request):
return render(request, 'services/services.html', {})
| Adventure-Inc/chachas-adventures | services/views.py | Python | apache-2.0 | 167 |
import paddle.v2 as paddle
import paddle.v2.framework.layers as layers
import paddle.v2.framework.nets as nets
import paddle.v2.framework.core as core
import paddle.v2.framework.optimizer as optimizer
from paddle.v2.framework.framework import Program, g_program
from paddle.v2.framework.executor import Executor
import numpy as np
init_program = Program()
program = Program()
images = layers.data(
name='pixel',
shape=[1, 28, 28],
data_type='float32',
program=program,
init_program=init_program)
label = layers.data(
name='label',
shape=[1],
data_type='int64',
program=program,
init_program=init_program)
conv_pool_1 = nets.simple_img_conv_pool(
input=images,
filter_size=5,
num_filters=20,
pool_size=2,
pool_stride=2,
act="relu",
program=program,
init_program=init_program)
conv_pool_2 = nets.simple_img_conv_pool(
input=conv_pool_1,
filter_size=5,
num_filters=50,
pool_size=2,
pool_stride=2,
act="relu",
program=program,
init_program=init_program)
predict = layers.fc(input=conv_pool_2,
size=10,
act="softmax",
program=program,
init_program=init_program)
cost = layers.cross_entropy(
input=predict, label=label, program=program, init_program=init_program)
avg_cost = layers.mean(x=cost, program=program)
accuracy = layers.accuracy(
input=predict, label=label, program=program, init_program=init_program)
sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.001)
opts = sgd_optimizer.minimize(avg_cost)
BATCH_SIZE = 50
PASS_NUM = 3
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.mnist.train(), buf_size=500),
batch_size=BATCH_SIZE)
place = core.CPUPlace()
exe = Executor(place)
exe.run(init_program, feed={}, fetch_list=[])
for pass_id in range(PASS_NUM):
count = 0
for data in train_reader():
img_data = np.array(map(lambda x: x[0].reshape([1, 28, 28]),
data)).astype("float32")
y_data = np.array(map(lambda x: x[1], data)).astype("int64")
y_data = y_data.reshape([BATCH_SIZE, 1])
tensor_img = core.LoDTensor()
tensor_y = core.LoDTensor()
tensor_img.set(img_data, place)
tensor_y.set(y_data, place)
outs = exe.run(program,
feed={"pixel": tensor_img,
"label": tensor_y},
fetch_list=[avg_cost, accuracy])
loss = np.array(outs[0])
acc = np.array(outs[1])
if loss < 10.0 and acc > 0.9:
# if avg cost less than 10.0 and accuracy is larger than 0.9, we think our code is good.
exit(0)
exit(1)
| pengli09/Paddle | python/paddle/v2/framework/tests/test_recognize_digits_conv.py | Python | apache-2.0 | 2,759 |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The system for scheduling tasks and executing them in order.
Deals with dependencies, priorities, resources, etc.
The :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them.
See :doc:`/central_scheduler` for more info.
"""
import collections
try:
import cPickle as pickle
except ImportError:
import pickle
import datetime
import functools
import itertools
import logging
import os
import re
import time
from luigi import six
from luigi import configuration
from luigi import notifications
from luigi import parameter
from luigi import task_history as history
from luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN
from luigi.task import Config
logger = logging.getLogger("luigi.server")
class Scheduler(object):
"""
Abstract base class.
Note that the methods all take string arguments, not Task objects...
"""""
add_task = NotImplemented
get_work = NotImplemented
ping = NotImplemented
UPSTREAM_RUNNING = 'UPSTREAM_RUNNING'
UPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT'
UPSTREAM_FAILED = 'UPSTREAM_FAILED'
UPSTREAM_DISABLED = 'UPSTREAM_DISABLED'
UPSTREAM_SEVERITY_ORDER = (
'',
UPSTREAM_RUNNING,
UPSTREAM_MISSING_INPUT,
UPSTREAM_FAILED,
UPSTREAM_DISABLED,
)
UPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index
STATUS_TO_UPSTREAM_MAP = {
FAILED: UPSTREAM_FAILED,
RUNNING: UPSTREAM_RUNNING,
PENDING: UPSTREAM_MISSING_INPUT,
DISABLED: UPSTREAM_DISABLED,
}
TASK_FAMILY_RE = re.compile(r'([^(_]+)[(_]')
class scheduler(Config):
# TODO(erikbern): the config_path is needed for backwards compatilibity. We should drop the compatibility
# at some point (in particular this would force users to replace all dashes with underscores in the config)
retry_delay = parameter.FloatParameter(default=900.0)
remove_delay = parameter.FloatParameter(default=600.0)
worker_disconnect_delay = parameter.FloatParameter(default=60.0)
state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle')
# Jobs are disabled if we see more than disable_failures failures in disable_window seconds.
# These disables last for disable_persist seconds.
disable_window = parameter.IntParameter(default=3600,
config_path=dict(section='scheduler', name='disable-window-seconds'))
disable_failures = parameter.IntParameter(default=None,
config_path=dict(section='scheduler', name='disable-num-failures'))
disable_hard_timeout = parameter.IntParameter(default=None,
config_path=dict(section='scheduler', name='disable-hard-timeout'))
disable_persist = parameter.IntParameter(default=86400,
config_path=dict(section='scheduler', name='disable-persist-seconds'))
max_shown_tasks = parameter.IntParameter(default=100000)
max_graph_nodes = parameter.IntParameter(default=100000)
prune_done_tasks = parameter.BoolParameter(default=False)
record_task_history = parameter.BoolParameter(default=False)
prune_on_get_work = parameter.BoolParameter(default=False)
def fix_time(x):
# Backwards compatibility for a fix in Dec 2014. Prior to the fix, pickled state might store datetime objects
# Let's remove this function soon
if isinstance(x, datetime.datetime):
return time.mktime(x.timetuple())
else:
return x
class Failures(object):
"""
This class tracks the number of failures in a given time window.
Failures added are marked with the current timestamp, and this class counts
the number of failures in a sliding time window ending at the present.
"""
def __init__(self, window):
"""
Initialize with the given window.
:param window: how long to track failures for, as a float (number of seconds).
"""
self.window = window
self.failures = collections.deque()
self.first_failure_time = None
def add_failure(self):
"""
Add a failure event with the current timestamp.
"""
failure_time = time.time()
if not self.first_failure_time:
self.first_failure_time = failure_time
self.failures.append(failure_time)
def num_failures(self):
"""
Return the number of failures in the window.
"""
min_time = time.time() - self.window
while self.failures and fix_time(self.failures[0]) < min_time:
self.failures.popleft()
return len(self.failures)
def clear(self):
"""
Clear the failure queue.
"""
self.failures.clear()
def _get_default(x, default):
if x is not None:
return x
else:
return default
class Task(object):
def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None,
params=None, disable_failures=None, disable_window=None, disable_hard_timeout=None,
tracking_url=None):
self.id = task_id
self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active)
self.workers = set() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active
if deps is None:
self.deps = set()
else:
self.deps = set(deps)
self.status = status # PENDING, RUNNING, FAILED or DONE
self.time = time.time() # Timestamp when task was first added
self.updated = self.time
self.retry = None
self.remove = None
self.worker_running = None # the worker id that is currently running the task or None
self.time_running = None # Timestamp when picked up by worker
self.expl = None
self.priority = priority
self.resources = _get_default(resources, {})
self.family = family
self.module = module
self.params = _get_default(params, {})
self.disable_failures = disable_failures
self.disable_hard_timeout = disable_hard_timeout
self.failures = Failures(disable_window)
self.tracking_url = tracking_url
self.scheduler_disable_time = None
self.runnable = False
def __repr__(self):
return "Task(%r)" % vars(self)
def add_failure(self):
self.failures.add_failure()
def has_excessive_failures(self):
if (self.failures.first_failure_time is not None and
self.disable_hard_timeout):
if (time.time() >= self.failures.first_failure_time +
self.disable_hard_timeout):
return True
if self.failures.num_failures() >= self.disable_failures:
return True
return False
def can_disable(self):
return (self.disable_failures is not None or
self.disable_hard_timeout is not None)
@property
def pretty_id(self):
param_str = ', '.join('{}={}'.format(key, value) for key, value in self.params.items())
return '{}({})'.format(self.family, param_str)
class Worker(object):
"""
Structure for tracking worker activity and keeping their references.
"""
def __init__(self, worker_id, last_active=None):
self.id = worker_id
self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host)
self.last_active = last_active or time.time() # seconds since epoch
self.last_get_work = None
self.started = time.time() # seconds since epoch
self.tasks = set() # task objects
self.info = {}
self.disabled = False
def add_info(self, info):
self.info.update(info)
def update(self, worker_reference, get_work=False):
if worker_reference:
self.reference = worker_reference
self.last_active = time.time()
if get_work:
self.last_get_work = time.time()
def prune(self, config):
# Delete workers that haven't said anything for a while (probably killed)
if self.last_active + config.worker_disconnect_delay < time.time():
return True
def get_pending_tasks(self, state):
"""
Get PENDING (and RUNNING) tasks for this worker.
You have to pass in the state for optimization reasons.
"""
if len(self.tasks) < state.num_pending_tasks():
return six.moves.filter(lambda task: task.status in [PENDING, RUNNING],
self.tasks)
else:
return state.get_pending_tasks()
def is_trivial_worker(self, state):
"""
If it's not an assistant having only tasks that are without
requirements.
We have to pass the state parameter for optimization reasons.
"""
if self.assistant:
return False
return all(not task.resources for task in self.get_pending_tasks(state))
@property
def assistant(self):
return self.info.get('assistant', False)
def __str__(self):
return self.id
class SimpleTaskState(object):
"""
Keep track of the current state and handle persistance.
The point of this class is to enable other ways to keep state, eg. by using a database
These will be implemented by creating an abstract base class that this and other classes
inherit from.
"""
def __init__(self, state_path):
self._state_path = state_path
self._tasks = {} # map from id to a Task object
self._status_tasks = collections.defaultdict(dict)
self._active_workers = {} # map from id to a Worker object
def get_state(self):
return self._tasks, self._active_workers
def set_state(self, state):
self._tasks, self._active_workers = state
def dump(self):
try:
with open(self._state_path, 'wb') as fobj:
pickle.dump(self.get_state(), fobj)
except IOError:
logger.warning("Failed saving scheduler state", exc_info=1)
else:
logger.info("Saved state in %s", self._state_path)
# prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control?
def load(self):
if os.path.exists(self._state_path):
logger.info("Attempting to load state from %s", self._state_path)
try:
with open(self._state_path, 'rb') as fobj:
state = pickle.load(fobj)
except BaseException:
logger.exception("Error when loading state. Starting from clean slate.")
return
self.set_state(state)
self._status_tasks = collections.defaultdict(dict)
for task in six.itervalues(self._tasks):
self._status_tasks[task.status][task.id] = task
# Convert from old format
# TODO: this is really ugly, we need something more future-proof
# Every time we add an attribute to the Worker or Task class, this
# code needs to be updated
# Compatibility since 2014-06-02
for k, v in six.iteritems(self._active_workers):
if isinstance(v, float):
self._active_workers[k] = Worker(worker_id=k, last_active=v)
# Compatibility since 2015-05-28
if any(not hasattr(w, 'tasks') for k, w in six.iteritems(self._active_workers)):
# If you load from an old format where Workers don't contain tasks.
for k, worker in six.iteritems(self._active_workers):
worker.tasks = set()
for task in six.itervalues(self._tasks):
for worker_id in task.workers:
self._active_workers[worker_id].tasks.add(task)
# Compatibility since 2015-04-28
if any(not hasattr(t, 'disable_hard_timeout') for t in six.itervalues(self._tasks)):
for t in six.itervalues(self._tasks):
t.disable_hard_timeout = None
else:
logger.info("No prior state file exists at %s. Starting with clean slate", self._state_path)
def get_active_tasks(self, status=None):
if status:
for task in six.itervalues(self._status_tasks[status]):
yield task
else:
for task in six.itervalues(self._tasks):
yield task
def get_running_tasks(self):
return six.itervalues(self._status_tasks[RUNNING])
def get_pending_tasks(self):
return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status])
for status in [PENDING, RUNNING])
def num_pending_tasks(self):
"""
Return how many tasks are PENDING + RUNNING. O(1).
"""
return len(self._status_tasks[PENDING]) + len(self._status_tasks[RUNNING])
def get_task(self, task_id, default=None, setdefault=None):
if setdefault:
task = self._tasks.setdefault(task_id, setdefault)
self._status_tasks[task.status][task.id] = task
return task
else:
return self._tasks.get(task_id, default)
def has_task(self, task_id):
return task_id in self._tasks
def re_enable(self, task, config=None):
task.scheduler_disable_time = None
task.failures.clear()
if config:
self.set_status(task, FAILED, config)
task.failures.clear()
def set_status(self, task, new_status, config=None):
if new_status == FAILED:
assert config is not None
if new_status == DISABLED and task.status == RUNNING:
return
if task.status == DISABLED:
if new_status == DONE:
self.re_enable(task)
# don't allow workers to override a scheduler disable
elif task.scheduler_disable_time is not None and new_status != DISABLED:
return
if new_status == FAILED and task.can_disable() and task.status != DISABLED:
task.add_failure()
if task.has_excessive_failures():
task.scheduler_disable_time = time.time()
new_status = DISABLED
notifications.send_error_email(
'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id),
'{task} failed {failures} times in the last {window} seconds, so it is being '
'disabled for {persist} seconds'.format(
failures=config.disable_failures,
task=task.id,
window=config.disable_window,
persist=config.disable_persist,
))
elif new_status == DISABLED:
task.scheduler_disable_time = None
if new_status != task.status:
self._status_tasks[task.status].pop(task.id)
self._status_tasks[new_status][task.id] = task
task.status = new_status
task.updated = time.time()
def fail_dead_worker_task(self, task, config, assistants):
# If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic
if task.status == RUNNING and task.worker_running and task.worker_running not in task.stakeholders | assistants:
logger.info("Task %r is marked as running by disconnected worker %r -> marking as "
"FAILED with retry delay of %rs", task.id, task.worker_running,
config.retry_delay)
task.worker_running = None
self.set_status(task, FAILED, config)
task.retry = time.time() + config.retry_delay
def prune(self, task, config):
remove = False
# Mark tasks with no remaining active stakeholders for deletion
if not task.stakeholders:
if task.remove is None:
logger.info("Task %r has stakeholders %r but none remain connected -> will remove "
"task in %s seconds", task.id, task.stakeholders, config.remove_delay)
task.remove = time.time() + config.remove_delay
# Re-enable task after the disable time expires
if task.status == DISABLED and task.scheduler_disable_time is not None:
if time.time() - fix_time(task.scheduler_disable_time) > config.disable_persist:
self.re_enable(task, config)
# Remove tasks that have no stakeholders
if task.remove and time.time() > task.remove:
logger.info("Removing task %r (no connected stakeholders)", task.id)
remove = True
# Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0
if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time():
self.set_status(task, PENDING, config)
return remove
def inactivate_tasks(self, delete_tasks):
# The terminology is a bit confusing: we used to "delete" tasks when they became inactive,
# but with a pluggable state storage, you might very well want to keep some history of
# older tasks as well. That's why we call it "inactivate" (as in the verb)
for task in delete_tasks:
task_obj = self._tasks.pop(task)
self._status_tasks[task_obj.status].pop(task)
def get_active_workers(self, last_active_lt=None, last_get_work_gt=None):
for worker in six.itervalues(self._active_workers):
if last_active_lt is not None and worker.last_active >= last_active_lt:
continue
last_get_work = getattr(worker, 'last_get_work', None)
if last_get_work_gt is not None and (
last_get_work is None or last_get_work <= last_get_work_gt):
continue
yield worker
def get_assistants(self, last_active_lt=None):
return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt))
def get_worker_ids(self):
return self._active_workers.keys() # only used for unit tests
def get_worker(self, worker_id):
return self._active_workers.setdefault(worker_id, Worker(worker_id))
def inactivate_workers(self, delete_workers):
# Mark workers as inactive
for worker in delete_workers:
self._active_workers.pop(worker)
self._remove_workers_from_tasks(delete_workers)
def _remove_workers_from_tasks(self, workers, remove_stakeholders=True):
for task in self.get_active_tasks():
if remove_stakeholders:
task.stakeholders.difference_update(workers)
task.workers.difference_update(workers)
def disable_workers(self, workers):
self._remove_workers_from_tasks(workers, remove_stakeholders=False)
for worker in workers:
self.get_worker(worker).disabled = True
def get_necessary_tasks(self):
necessary_tasks = set()
for task in self.get_active_tasks():
if task.status not in (DONE, DISABLED) or \
getattr(task, 'scheduler_disable_time', None) is not None:
necessary_tasks.update(task.deps)
necessary_tasks.add(task.id)
return necessary_tasks
class CentralPlannerScheduler(Scheduler):
"""
Async scheduler that can handle multiple workers, etc.
Can be run locally or on a server (using RemoteScheduler + server.Server).
"""
def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs):
"""
Keyword Arguments:
:param config: an object of class "scheduler" or None (in which the global instance will be used)
:param resources: a dict of str->int constraints
:param task_history_override: ignore config and use this object as the task history
"""
self._config = config or scheduler(**kwargs)
self._state = SimpleTaskState(self._config.state_path)
if task_history_impl:
self._task_history = task_history_impl
elif self._config.record_task_history:
from luigi import db_task_history # Needs sqlalchemy, thus imported here
self._task_history = db_task_history.DbTaskHistory()
else:
self._task_history = history.NopHistory()
self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter?
self._make_task = functools.partial(
Task, disable_failures=self._config.disable_failures,
disable_hard_timeout=self._config.disable_hard_timeout,
disable_window=self._config.disable_window)
self._worker_requests = {}
def load(self):
self._state.load()
def dump(self):
self._state.dump()
def prune(self):
logger.info("Starting pruning of task graph")
remove_workers = []
for worker in self._state.get_active_workers():
if worker.prune(self._config):
logger.info("Worker %s timed out (no contact for >=%ss)", worker, self._config.worker_disconnect_delay)
remove_workers.append(worker.id)
self._state.inactivate_workers(remove_workers)
assistant_ids = set(w.id for w in self._state.get_assistants())
remove_tasks = []
if assistant_ids:
necessary_tasks = self._state.get_necessary_tasks()
else:
necessary_tasks = ()
for task in self._state.get_active_tasks():
self._state.fail_dead_worker_task(task, self._config, assistant_ids)
removed = self._state.prune(task, self._config)
if removed and task.id not in necessary_tasks:
remove_tasks.append(task.id)
self._state.inactivate_tasks(remove_tasks)
logger.info("Done pruning task graph")
def update(self, worker_id, worker_reference=None, get_work=False):
"""
Keep track of whenever the worker was last active.
"""
worker = self._state.get_worker(worker_id)
worker.update(worker_reference, get_work=get_work)
return not getattr(worker, 'disabled', False)
def _update_priority(self, task, prio, worker):
"""
Update priority of the given task.
Priority can only be increased.
If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled.
"""
task.priority = prio = max(prio, task.priority)
for dep in task.deps or []:
t = self._state.get_task(dep)
if t is not None and prio > t.priority:
self._update_priority(t, prio, worker)
def add_task(self, task_id=None, status=PENDING, runnable=True,
deps=None, new_deps=None, expl=None, resources=None,
priority=0, family='', module=None, params=None,
assistant=False, tracking_url=None, **kwargs):
"""
* add task identified by task_id if it doesn't exist
* if deps is not None, update dependency list
* update status of task
* add additional workers/stakeholders
* update priority when needed
"""
worker_id = kwargs['worker']
worker_enabled = self.update(worker_id)
if worker_enabled:
_default_task = self._make_task(
task_id=task_id, status=PENDING, deps=deps, resources=resources,
priority=priority, family=family, module=module, params=params,
)
else:
_default_task = None
task = self._state.get_task(task_id, setdefault=_default_task)
if task is None or (task.status != RUNNING and not worker_enabled):
return
# for setting priority, we'll sometimes create tasks with unset family and params
if not task.family:
task.family = family
if not getattr(task, 'module', None):
task.module = module
if not task.params:
task.params = _get_default(params, {})
if tracking_url is not None or task.status != RUNNING:
task.tracking_url = tracking_url
if task.remove is not None:
task.remove = None # unmark task for removal so it isn't removed after being added
if expl is not None:
task.expl = expl
if not (task.status == RUNNING and status == PENDING) or new_deps:
# don't allow re-scheduling of task while it is running, it must either fail or succeed first
if status == PENDING or status != task.status:
# Update the DB only if there was a acctual change, to prevent noise.
# We also check for status == PENDING b/c that's the default value
# (so checking for status != task.status woule lie)
self._update_task_history(task, status)
self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config)
if status == FAILED:
task.retry = self._retry_time(task, self._config)
if deps is not None:
task.deps = set(deps)
if new_deps is not None:
task.deps.update(new_deps)
if resources is not None:
task.resources = resources
if worker_enabled and not assistant:
task.stakeholders.add(worker_id)
# Task dependencies might not exist yet. Let's create dummy tasks for them for now.
# Otherwise the task dependencies might end up being pruned if scheduling takes a long time
for dep in task.deps or []:
t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority))
t.stakeholders.add(worker_id)
self._update_priority(task, priority, worker_id)
if runnable and status != FAILED and worker_enabled:
task.workers.add(worker_id)
self._state.get_worker(worker_id).tasks.add(task)
task.runnable = runnable
def add_worker(self, worker, info, **kwargs):
self._state.get_worker(worker).add_info(info)
def disable_worker(self, worker):
self._state.disable_workers({worker})
def update_resources(self, **resources):
if self._resources is None:
self._resources = {}
self._resources.update(resources)
def _has_resources(self, needed_resources, used_resources):
if needed_resources is None:
return True
available_resources = self._resources or {}
for resource, amount in six.iteritems(needed_resources):
if amount + used_resources[resource] > available_resources.get(resource, 1):
return False
return True
def _used_resources(self):
used_resources = collections.defaultdict(int)
if self._resources is not None:
for task in self._state.get_active_tasks():
if task.status == RUNNING and task.resources:
for resource, amount in six.iteritems(task.resources):
used_resources[resource] += amount
return used_resources
def _rank(self, task):
"""
Return worker's rank function for task scheduling.
:return:
"""
return task.priority, -task.time
def _schedulable(self, task):
if task.status != PENDING:
return False
for dep in task.deps:
dep_task = self._state.get_task(dep, default=None)
if dep_task is None or dep_task.status != DONE:
return False
return True
def _retry_time(self, task, config):
return time.time() + config.retry_delay
def get_work(self, host=None, assistant=False, current_tasks=None, **kwargs):
# TODO: remove any expired nodes
# Algo: iterate over all nodes, find the highest priority node no dependencies and available
# resources.
# Resource checking looks both at currently available resources and at which resources would
# be available if all running tasks died and we rescheduled all workers greedily. We do both
# checks in order to prevent a worker with many low-priority tasks from starving other
# workers with higher priority tasks that share the same resources.
# TODO: remove tasks that can't be done, figure out if the worker has absolutely
# nothing it can wait for
if self._config.prune_on_get_work:
self.prune()
worker_id = kwargs['worker']
# Return remaining tasks that have no FAILED descendants
self.update(worker_id, {'host': host}, get_work=True)
if assistant:
self.add_worker(worker_id, [('assistant', assistant)])
best_task = None
if current_tasks is not None:
ct_set = set(current_tasks)
for task in sorted(self._state.get_running_tasks(), key=self._rank):
if task.worker_running == worker_id and task.id not in ct_set:
best_task = task
locally_pending_tasks = 0
running_tasks = []
upstream_table = {}
greedy_resources = collections.defaultdict(int)
n_unique_pending = 0
worker = self._state.get_worker(worker_id)
if worker.is_trivial_worker(self._state):
relevant_tasks = worker.get_pending_tasks(self._state)
used_resources = collections.defaultdict(int)
greedy_workers = dict() # If there's no resources, then they can grab any task
else:
relevant_tasks = self._state.get_pending_tasks()
used_resources = self._used_resources()
activity_limit = time.time() - self._config.worker_disconnect_delay
active_workers = self._state.get_active_workers(last_get_work_gt=activity_limit)
greedy_workers = dict((worker.id, worker.info.get('workers', 1))
for worker in active_workers)
tasks = list(relevant_tasks)
tasks.sort(key=self._rank, reverse=True)
for task in tasks:
upstream_status = self._upstream_status(task.id, upstream_table)
in_workers = (assistant and getattr(task, 'runnable', bool(task.workers))) or worker_id in task.workers
if task.status == RUNNING and in_workers:
# Return a list of currently running tasks to the client,
# makes it easier to troubleshoot
other_worker = self._state.get_worker(task.worker_running)
more_info = {'task_id': task.id, 'worker': str(other_worker)}
if other_worker is not None:
more_info.update(other_worker.info)
running_tasks.append(more_info)
if task.status == PENDING and in_workers and upstream_status != UPSTREAM_DISABLED:
locally_pending_tasks += 1
if len(task.workers) == 1 and not assistant:
n_unique_pending += 1
if best_task:
continue
if task.status == RUNNING and (task.worker_running in greedy_workers):
greedy_workers[task.worker_running] -= 1
for resource, amount in six.iteritems((task.resources or {})):
greedy_resources[resource] += amount
if self._schedulable(task) and self._has_resources(task.resources, greedy_resources):
if in_workers and self._has_resources(task.resources, used_resources):
best_task = task
else:
workers = itertools.chain(task.workers, [worker_id]) if assistant else task.workers
for task_worker in workers:
if greedy_workers.get(task_worker, 0) > 0:
# use up a worker
greedy_workers[task_worker] -= 1
# keep track of the resources used in greedy scheduling
for resource, amount in six.iteritems((task.resources or {})):
greedy_resources[resource] += amount
break
reply = {'n_pending_tasks': locally_pending_tasks,
'running_tasks': running_tasks,
'task_id': None,
'n_unique_pending': n_unique_pending}
if best_task:
self._state.set_status(best_task, RUNNING, self._config)
best_task.worker_running = worker_id
best_task.time_running = time.time()
self._update_task_history(best_task, RUNNING, host=host)
reply['task_id'] = best_task.id
reply['task_family'] = best_task.family
reply['task_module'] = getattr(best_task, 'module', None)
reply['task_params'] = best_task.params
return reply
def ping(self, **kwargs):
worker_id = kwargs['worker']
self.update(worker_id)
def _upstream_status(self, task_id, upstream_status_table):
if task_id in upstream_status_table:
return upstream_status_table[task_id]
elif self._state.has_task(task_id):
task_stack = [task_id]
while task_stack:
dep_id = task_stack.pop()
if self._state.has_task(dep_id):
dep = self._state.get_task(dep_id)
if dep.status == DONE:
continue
if dep_id not in upstream_status_table:
if dep.status == PENDING and dep.deps:
task_stack = task_stack + [dep_id] + list(dep.deps)
upstream_status_table[dep_id] = '' # will be updated postorder
else:
dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '')
upstream_status_table[dep_id] = dep_status
elif upstream_status_table[dep_id] == '' and dep.deps:
# This is the postorder update step when we set the
# status based on the previously calculated child elements
upstream_status = [upstream_status_table.get(a_task_id, '') for a_task_id in dep.deps]
upstream_status.append('') # to handle empty list
status = max(upstream_status, key=UPSTREAM_SEVERITY_KEY)
upstream_status_table[dep_id] = status
return upstream_status_table[dep_id]
def _serialize_task(self, task_id, include_deps=True, deps=None):
task = self._state.get_task(task_id)
ret = {
'display_name': task.pretty_id,
'status': task.status,
'workers': list(task.workers),
'worker_running': task.worker_running,
'time_running': getattr(task, "time_running", None),
'start_time': task.time,
'last_updated': getattr(task, "updated", task.time),
'params': task.params,
'name': task.family,
'priority': task.priority,
'resources': task.resources,
'tracking_url': getattr(task, "tracking_url", None),
}
if task.status == DISABLED:
ret['re_enable_able'] = task.scheduler_disable_time is not None
if include_deps:
ret['deps'] = list(task.deps if deps is None else deps)
return ret
def graph(self, **kwargs):
self.prune()
serialized = {}
seen = set()
for task in self._state.get_active_tasks():
serialized.update(self._traverse_graph(task.id, seen))
return serialized
def _filter_done(self, task_ids):
for task_id in task_ids:
task = self._state.get_task(task_id)
if task is None or task.status != DONE:
yield task_id
def _traverse_graph(self, root_task_id, seen=None, dep_func=None, include_done=True):
""" Returns the dependency graph rooted at task_id
This does a breadth-first traversal to find the nodes closest to the
root before hitting the scheduler.max_graph_nodes limit.
:param root_task_id: the id of the graph's root
:return: A map of task id to serialized node
"""
if seen is None:
seen = set()
elif root_task_id in seen:
return {}
if dep_func is None:
def dep_func(t):
return t.deps
seen.add(root_task_id)
serialized = {}
queue = collections.deque([root_task_id])
while queue:
task_id = queue.popleft()
task = self._state.get_task(task_id)
if task is None or not task.family:
logger.warn('Missing task for id [%s]', task_id)
# NOTE : If a dependency is missing from self._state there is no way to deduce the
# task family and parameters.
family_match = TASK_FAMILY_RE.match(task_id)
family = family_match.group(1) if family_match else UNKNOWN
params = {'task_id': task_id}
serialized[task_id] = {
'deps': [],
'status': UNKNOWN,
'workers': [],
'start_time': UNKNOWN,
'params': params,
'name': family,
'display_name': task_id,
'priority': 0,
}
else:
deps = dep_func(task)
if not include_done:
deps = list(self._filter_done(deps))
serialized[task_id] = self._serialize_task(task_id, deps=deps)
for dep in sorted(deps):
if dep not in seen:
seen.add(dep)
queue.append(dep)
if task_id != root_task_id:
del serialized[task_id]['display_name']
if len(serialized) >= self._config.max_graph_nodes:
break
return serialized
def dep_graph(self, task_id, include_done=True, **kwargs):
self.prune()
if not self._state.has_task(task_id):
return {}
return self._traverse_graph(task_id, include_done=include_done)
def inverse_dep_graph(self, task_id, include_done=True, **kwargs):
self.prune()
if not self._state.has_task(task_id):
return {}
inverse_graph = collections.defaultdict(set)
for task in self._state.get_active_tasks():
for dep in task.deps:
inverse_graph[dep].add(task.id)
return self._traverse_graph(
task_id, dep_func=lambda t: inverse_graph[t.id], include_done=include_done)
def task_list(self, status, upstream_status, limit=True, search=None, **kwargs):
"""
Query for a subset of tasks by status.
"""
self.prune()
result = {}
upstream_status_table = {} # used to memoize upstream status
if search is None:
def filter_func(_):
return True
else:
terms = search.split()
def filter_func(t):
return all(term in t.pretty_id for term in terms)
for task in filter(filter_func, self._state.get_active_tasks(status)):
if (task.status != PENDING or not upstream_status or
upstream_status == self._upstream_status(task.id, upstream_status_table)):
serialized = self._serialize_task(task.id, False)
result[task.id] = serialized
if limit and len(result) > self._config.max_shown_tasks:
return {'num_tasks': len(result)}
return result
def _first_task_display_name(self, worker):
task_id = worker.info.get('first_task', '')
if self._state.has_task(task_id):
return self._state.get_task(task_id).pretty_id
else:
return task_id
def worker_list(self, include_running=True, **kwargs):
self.prune()
workers = [
dict(
name=worker.id,
last_active=worker.last_active,
started=getattr(worker, 'started', None),
first_task_display_name=self._first_task_display_name(worker),
**worker.info
) for worker in self._state.get_active_workers()]
workers.sort(key=lambda worker: worker['started'], reverse=True)
if include_running:
running = collections.defaultdict(dict)
num_pending = collections.defaultdict(int)
num_uniques = collections.defaultdict(int)
for task in self._state.get_pending_tasks():
if task.status == RUNNING and task.worker_running:
running[task.worker_running][task.id] = self._serialize_task(task.id, False)
elif task.status == PENDING:
for worker in task.workers:
num_pending[worker] += 1
if len(task.workers) == 1:
num_uniques[list(task.workers)[0]] += 1
for worker in workers:
tasks = running[worker['name']]
worker['num_running'] = len(tasks)
worker['num_pending'] = num_pending[worker['name']]
worker['num_uniques'] = num_uniques[worker['name']]
worker['running'] = tasks
return workers
def resource_list(self):
"""
Resources usage info and their consumers (tasks).
"""
self.prune()
resources = [
dict(
name=resource,
num_total=r_dict['total'],
num_used=r_dict['used']
) for resource, r_dict in six.iteritems(self.resources())]
if self._resources is not None:
consumers = collections.defaultdict(dict)
for task in self._state.get_running_tasks():
if task.status == RUNNING and task.resources:
for resource, amount in six.iteritems(task.resources):
consumers[resource][task.id] = self._serialize_task(task.id, False)
for resource in resources:
tasks = consumers[resource['name']]
resource['num_consumer'] = len(tasks)
resource['running'] = tasks
return resources
def resources(self):
''' get total resources and available ones '''
used_resources = self._used_resources()
ret = collections.defaultdict(dict)
for resource, total in self._resources.iteritems():
ret[resource]['total'] = total
if resource in used_resources:
ret[resource]['used'] = used_resources[resource]
else:
ret[resource]['used'] = 0
return ret
def task_search(self, task_str, **kwargs):
"""
Query for a subset of tasks by task_id.
:param task_str:
:return:
"""
self.prune()
result = collections.defaultdict(dict)
for task in self._state.get_active_tasks():
if task.id.find(task_str) != -1:
serialized = self._serialize_task(task.id, False)
result[task.status][task.id] = serialized
return result
def re_enable_task(self, task_id):
serialized = {}
task = self._state.get_task(task_id)
if task and task.status == DISABLED and task.scheduler_disable_time:
self._state.re_enable(task, self._config)
serialized = self._serialize_task(task_id)
return serialized
def fetch_error(self, task_id, **kwargs):
if self._state.has_task(task_id):
task = self._state.get_task(task_id)
return {"taskId": task_id, "error": task.expl, 'displayName': task.pretty_id}
else:
return {"taskId": task_id, "error": ""}
def _update_task_history(self, task, status, host=None):
try:
if status == DONE or status == FAILED:
successful = (status == DONE)
self._task_history.task_finished(task, successful)
elif status == PENDING:
self._task_history.task_scheduled(task)
elif status == RUNNING:
self._task_history.task_started(task, host)
except BaseException:
logger.warning("Error saving Task history", exc_info=True)
@property
def task_history(self):
# Used by server.py to expose the calls
return self._task_history
| oldpa/luigi | luigi/scheduler.py | Python | apache-2.0 | 45,889 |
from copy import deepcopy
import pytest
from sovrin_node.test import waits
from stp_core.loop.eventually import eventually
from plenum.common.constants import VERSION
from sovrin_common.constants import REINSTALL
from sovrin_node.test.upgrade.helper import bumpedVersion, checkUpgradeScheduled, \
ensureUpgradeSent, check_no_loop
from sovrin_node.server.upgrade_log import UpgradeLog
import sovrin_node
def test_upgrade_does_not_get_into_loop_if_reinstall(
looper,
tconf,
nodeSet,
validUpgrade,
trustee,
trusteeWallet,
monkeypatch):
new_version = bumpedVersion()
upgr1 = deepcopy(validUpgrade)
upgr1[VERSION] = new_version
upgr1[REINSTALL] = True
# An upgrade scheduled, it should pass
ensureUpgradeSent(looper, trustee, trusteeWallet, upgr1)
looper.run(
eventually(
checkUpgradeScheduled,
nodeSet,
upgr1[VERSION],
retryWait=1,
timeout=waits.expectedUpgradeScheduled()))
# here we make nodes think they have upgraded successfully
monkeypatch.setattr(sovrin_node.__metadata__, '__version__', new_version)
check_no_loop(nodeSet, UpgradeLog.UPGRADE_SUCCEEDED)
| keenondrums/sovrin-node | sovrin_node/test/upgrade/test_pool_upgrade_no_loop_reinstall.py | Python | apache-2.0 | 1,232 |
# Copyright (c) 2016 Iotic Labs Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://github.com/Iotic-Labs/py-IoticAgent/blob/master/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants to hide XSD Datatypes used by Point Values and Properties
These help to describe the data in a feed so the receiving Thing can know what kind of data to expect
See also http://www.w3.org/TR/xmlschema-2/#built-in-datatypes
"""
from __future__ import unicode_literals
BASE64 = 'base64Binary'
'''Represents a sequence of binary octets (bytes) encoded according to RFC 2045,
the standard defining the MIME types (look under "6.8 Base64 Content-Transfer-Encoding").
'''
BOOLEAN = 'boolean'
'''A Boolean true or false value. Representations of true are "true" and "1"; false is denoted as "false" or "0".'''
BYTE = 'byte'
'''A signed 8-bit integer in the range [-128 -> +127]. Derived from the short datatype.'''
UNSIGNED_BYTE = 'unsignedByte'
'''An unsigned 8-bit integer in the range [0, 255]. Derived from the unsignedShort datatype.'''
DATE = 'date'
'''Represents a specific date. The syntax is the same as that for the date part of dateTime,
with an optional time zone indicator. Example: "1889-09-24".
'''
DATETIME = 'dateTime'
'''
Represents a specific instant of time. It has the form YYYY-MM-DDThh:mm:ss followed by an optional time-zone suffix.
`YYYY` is the year, `MM` is the month number, `DD` is the day number,
`hh` the hour in 24-hour format, `mm` the minute, and `ss` the second (a decimal and fraction are allowed for the
seconds part).
The optional zone suffix is either `"Z"` for Universal Coordinated Time (UTC), or a time offset of the form
`"[+|-]hh:mm"`, giving the difference between UTC and local time in hours and minutes.
Example: "2004-10-31T21:40:35.5-07:00" is a time on Halloween 2004 in Mountain Standard time. The equivalent UTC would
be "2004-11-01T04:40:35.5Z".
'''
DECIMAL = 'decimal'
'''Any base-10 fixed-point number. There must be at least one digit to the left of the decimal point, and a leading "+"
or "-" sign is allowed.
Examples: "42", "-3.14159", "+0.004".
'''
DOUBLE = 'double'
'''A 64-bit floating-point decimal number as specified in the IEEE 754-1985 standard. The external form is the same as
the float datatype.
'''
FLOAT = 'float'
'''A 32-bit floating-point decimal number as specified in the IEEE 754-1985 standard.
Allowable values are the same as in the decimal type, optionally followed by an exponent,
or one of the special values "INF" (positive infinity), "-INF" (negative infinity), or "NaN" (not a number).
The exponent starts with either "e" or "E", optionally followed by a sign, and one or more digits.
Example: "6.0235e-23".
'''
INT = 'int'
'''Represents a 32-bit signed integer in the range [-2,147,483,648, 2,147,483,647]. Derived from the long datatype.'''
INTEGER = 'integer'
'''Represents a signed integer. Values may begin with an optional "+" or "-" sign. Derived from the decimal datatype.'''
LONG = 'long'
'''A signed, extended-precision integer; at least 18 digits are guaranteed. Derived from the integer datatype. '''
STRING = 'string'
'''Any sequence of zero or more characters.'''
TIME = 'time'
'''A moment of time that repeats every day. The syntax is the same as that for dateTime,
omitting everything up to and including the separator "T". Examples: "00:00:00" is midnight,
and "13:04:00" is an hour and four minutes after noon.
'''
URI = 'anyURI'
'''
The data must conform to the syntax of a Uniform Resource Identifier (URI), as defined in RFC 2396
as amended by RFC 2732. Example: "http://www.nmt.edu/tcc/"
is the URI for the New Mexico Tech Computer Center's index page.
'''
IRI = 'IRI'
'''Only for use with property API calls. Used to handle properties which require an IRI (URIRef) value.'''
| Iotic-Labs/py-IoticAgent | src/IoticAgent/Datatypes.py | Python | apache-2.0 | 4,222 |
import sys, string
import os.path
import unique
import export
import gene_associations
import traceback
import time
################# Parse directory files
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
#add in code to prevent folder names from being included
dir_list2 = []
for file in dir_list:
lf = string.lower(file)
if '.txt' in lf or '.sif' in lf or '.tab' in lf: dir_list2.append(file)
return dir_list2
################# Begin Analysis from parsing files
def getEnsemblGeneData(filename):
fn=filepath(filename)
global ensembl_symbol_db; ensembl_symbol_db={}; global symbol_ensembl_db; symbol_ensembl_db={}
for line in open(fn,'rU').xreadlines():
data,null = string.split(line,'\n')
t = string.split(data,'\t')
ensembl=t[0];symbol=t[1]
### Have to do this in order to get the WEIRD chromosomal assocaitions and the normal to the same genes
try: symbol_ensembl_db[symbol].append(ensembl)
except Exception: symbol_ensembl_db[symbol] = [ensembl]
try: symbol_ensembl_db[string.lower(symbol)].append(ensembl)
except Exception: symbol_ensembl_db[string.lower(symbol)] = [ensembl]
try: symbol_ensembl_db[symbol.title()].append(ensembl)
except Exception: symbol_ensembl_db[symbol.title()] = [ensembl]
ensembl_symbol_db[ensembl] = symbol
def getHMDBData(species):
program_type,database_dir = unique.whatProgramIsThis()
filename = database_dir+'/'+species+'/gene/HMDB.txt'
x=0
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if x==0: x=1
else:
t = string.split(data,'\t')
try: hmdb_id,symbol,description,secondary_id,iupac,cas_number,chebi_id,pubchem_compound_id,Pathways,ProteinNames = t
except Exception:
### Bad Tab introduced from HMDB
hmdb_id = t[0]; symbol = t[1]; ProteinNames = t[-1]
symbol_hmdb_db[symbol]=hmdb_id
hmdb_symbol_db[hmdb_id] = symbol
ProteinNames=string.split(ProteinNames,',')
### Add gene-metabolite interactions to databases
for protein_name in ProteinNames:
try:
for ensembl in symbol_ensembl_db[protein_name]:
z = InteractionInformation(hmdb_id,ensembl,'HMDB','Metabolic')
interaction_annotation_dbase[ensembl,hmdb_id] = z ### This is the interaction direction that is appropriate
try: interaction_db[hmdb_id][ensembl]=1
except KeyError: db = {ensembl:1}; interaction_db[hmdb_id] = db ###weight of 1 (weights currently not-supported)
try: interaction_db[ensembl][hmdb_id]=1
except KeyError: db = {hmdb_id:1}; interaction_db[ensembl] = db ###weight of 1 (weights currently not-supported)
except Exception: None
def verifyFile(filename):
status = 'not found'
try:
fn=filepath(filename)
for line in open(fn,'rU').xreadlines(): status = 'found';break
except Exception: status = 'not found'
return status
def importInteractionDatabases(interactionDirs):
""" Import multiple interaction format file types (designated by the user) """
exclude=[]
for file in interactionDirs:
status = verifyFile(file)
if status == 'not found':
exclude.append(file)
for i in exclude:
interactionDirs.remove(i)
for fn in interactionDirs: #loop through each file in the directory to output results
x=0; imported=0; stored=0
file = export.findFilename(fn)
print "Parsing interactions from:",file
for line in open(fn,'rU').xreadlines():
data,null = string.split(line,'\n')
t = string.split(data,'\t')
if x==0: x=1
#elif 'PAZAR' in data or 'Amadeus' in data:x+=0
else:
obligatory = False
imported+=1
proceed = True
source=''
interaction_type = 'interaction'
try:
symbol1,interaction_type, symbol2, ensembl1,ensembl2,source = t
ens_ls1=[ensembl1]; ens_ls2=[ensembl2]
if 'HMDB' in ensembl1:
ensembl1 = string.replace(ensembl1,' ','') ### HMDB ID sometimes proceeded by ' '
symbol_hmdb_db[symbol1]=ensembl1
hmdb_symbol_db[ensembl1] = symbol1
interaction_type = 'Metabolic'
if 'HMDB' in ensembl2:
ensembl2 = string.replace(ensembl2,' ','') ### HMDB ID sometimes proceeded by ' '
symbol_hmdb_db[symbol2]=ensembl2
hmdb_symbol_db[ensembl2] = symbol2
interaction_type = 'Metabolic'
except Exception:
try:
ensembl1,ensembl2,symbol1,symbol2,interaction_type=t
if ensembl1 == '':
try:
ens_ls1 = symbol_ensembl_db[symbol1]
ens_ls2 = symbol_ensembl_db[symbol2]
except Exception: None
except Exception:
proceed = False
if proceed: ### If the interaction data conformed to one of the two above types (typically two valid interacting gene IDs)
if (len(ens_ls1)>0 and len(ens_ls2)>0):
secondary_proceed = True
stored+=1
for ensembl1 in ens_ls1:
for ensembl2 in ens_ls2:
"""
if (ensembl1,ensembl2) == ('ENSG00000111704','ENSG00000152284'):
print t;sys.exit()
if (ensembl1,ensembl2) == ('ENSG00000152284','ENSG00000111704'):
print t;sys.exit()
"""
if 'WikiPathways' in file or 'KEGG' in file:
if ensembl2 != ensembl1:
if (ensembl2,ensembl1) in interaction_annotation_dbase:
del interaction_annotation_dbase[(ensembl2,ensembl1)]
### Exclude redundant entries with fewer interaction details (e.g., arrow direction BIOGRID) - overwrite with the opposite gene arrangement below
if (ensembl1,ensembl2) in interaction_annotation_dbase:
if interaction_annotation_dbase[(ensembl1,ensembl2)].InteractionType() !='physical':
secondary_proceed = False ### Don't overwrite a more informative annotation like transcriptional regulation or microRNA targeting
if 'DrugBank' in fn:
source = 'DrugBank'
interaction_type = 'drugInteraction'
obligatory=True
ensembl1, ensembl2 = ensembl2, ensembl1 ### switch the order of these (drugs reported as first ID and gene as the second)
if secondary_proceed:
z = InteractionInformation(ensembl1,ensembl2,source,interaction_type)
interaction_annotation_dbase[ensembl1,ensembl2] = z
#z = InteractionInformation(ensembl2,ensembl1,source,interaction_type)
#interaction_annotation_dbase[ensembl2,ensembl1] = z
try: interaction_db[ensembl1][ensembl2]=1
except KeyError: db = {ensembl2:1}; interaction_db[ensembl1] = db ###weight of 1 (weights currently not-supported)
try: interaction_db[ensembl2][ensembl1]=1
except KeyError: db = {ensembl1:1}; interaction_db[ensembl2] = db ###weight of 1 (weights currently not-supported)
if obligatory and source in obligatoryList: ### Include these in the final pathway if linked to any input node (e.g., miRNAs, drugs)
try: obligatory_interactions[ensembl1][ensembl2]=1
except KeyError: db = {ensembl2:1}; obligatory_interactions[ensembl1] = db ###weight of 1 (weights currentlynot-supported)
elif source in secondDegreeObligatoryCategories:
try: second_degree_obligatory[ensembl1][ensembl2]=1
except KeyError: db = {ensembl2:1}; second_degree_obligatory[ensembl1] = db ###weight of 1 (weights currently not-supported)
else:
proceed = False
try:
ID1, null, ID2 = t
proceed = True
except Exception:
try:
ID1, ID2 = t
proceed = True
except Exception:
None
if proceed:
if 'microRNATargets' in fn:
if 'mir' in ID2: prefix = 'MIR'
else: prefix = 'LET'
ID2='MIR'+string.split(ID2,'-')[2] ### Ensembl naming convention
source = 'microRNATargets'
interaction_type = 'microRNAInteraction'
obligatory=True
try: ID_ls1 = symbol_ensembl_db[ID1]
except Exception: ID_ls1 = [ID1]
try: ID_ls2 = symbol_ensembl_db[ID2]
except Exception: ID_ls2 = [ID2]
"""if 'microRNATargets' in fn:
if '*' not in ID2: print ID_ls2;sys.exit()"""
addInteractions = True
for ID1 in ID_ls1:
for ID2 in ID_ls2:
z = InteractionInformation(ID2,ID1,source,interaction_type)
interaction_annotation_dbase[ID2,ID1] = z ### This is the interaction direction that is appropriate
try: interaction_db[ID1][ID2]=1
except KeyError: db = {ID2:1}; interaction_db[ID1] = db ###weight of 1 (weights currently supported)
try: interaction_db[ID2][ID1]=1
except KeyError: db = {ID1:1}; interaction_db[ID2] = db ###weight of 1 (weights currently supported)
if source in secondDegreeObligatoryCategories:
try: second_degree_obligatory[ID1][ID2]=1
except KeyError: db = {ID2:1}; second_degree_obligatory[ID1] = db ###weight of 1 (weights currently supported)
elif obligatory and source in obligatoryList: ### Include these in the final pathway if linked to any input node (e.g., miRNAs, drugs)
try: obligatory_interactions[ID1][ID2]=1
except KeyError: db = {ID2:1}; obligatory_interactions[ID1] = db ###weight of 1 (weights currently supported)
### Evaluate the most promiscous interactors (e.g., UBC)
remove_list=[]
for ID in interaction_db:
if len(interaction_db[ID])>2000:
remove_list.append(ID)
#print len(interaction_db[ID]),ensembl_symbol_db[ID]
for ID in remove_list:
#print 'removing', ID
del interaction_db[ID]
blackList[ID] = []
print 'Imported interactions:',len(interaction_annotation_dbase)
class InteractionInformation:
def __init__(self, ensembl1, ensembl2, source, interaction_type):
self._ensembl1 = ensembl1; self._ensembl2 = ensembl2; self._source = source
self._interaction_type = interaction_type
def Ensembl1(self): return self._ensembl1
def Ensembl2(self): return self._ensembl2
def Source(self): return self._source
def InteractionType(self): return self._interaction_type
def Report(self):
output = self.Ensembl1()+'|'+self.Ensembl2()
return output
def __repr__(self): return self.Report()
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def importqueryResults(species,dir_file,id_db):
global query_db; query_db = {}
query_interactions={} ### This is the final list of shown interactions
if dir_file == None:
fileRead = dir_file
elif '.' in dir_file:
fn=filepath(dir_file)
fileRead = open(fn,'rU').xreadlines()
else:
fileRead = dir_file ### This is a list of IDs passed to this function rather than in a file
if len(id_db)==0: ### Otherwise, already provided gene IDs to query
translated=0
try:
x=0
for line in fileRead:
try:
data = cleanUpLine(line)
t = string.split(data,'\t')
except Exception:
t = line
if x==1: x = 1 ### no longer statement since the first row may be a valid ID(s)
else:
id = t[0]
ensembl_ls1=[]
if id in ensembl_symbol_db:
symbol = ensembl_symbol_db[id]
query_db[id] = symbol
ensembl_ls1 = [id]
translated+=1
elif id in symbol_ensembl_db:
ensembl_ls1 = symbol_ensembl_db[id]
translated+=1
for ensembl in ensembl_ls1:
query_db[ensembl] = id
elif id in symbol_hmdb_db:
hmdb = symbol_hmdb_db[id]
query_db[hmdb] = id
elif id in hmdb_symbol_db:
symbol = hmdb_symbol_db[id]
query_db[id] = symbol
else:
query_db[id] = id ### Currently not dealt with
ensembl_ls1 = [id]
### If a SIF file add genes and interactions
if len(t)>1 and 'SIF' in inputDataType: ### Potentially SIF format
interaction_type = t[1]
try: id2 = t[2]
except Exception: id2 = t[1]; interaction_type = 'undetermined'
ensembl_ls2=[]
if id2 in ensembl_symbol_db:
symbol = ensembl_symbol_db[id2]
query_db[id2] = symbol
ensembl_ls2 = [id2]
elif id2 in symbol_ensembl_db:
ensembl_ls2 = symbol_ensembl_db[id2]
for ensembl in ensembl_ls2:
query_db[ensembl] = id2
elif id2 in symbol_hmdb_db:
hmdb = symbol_hmdb_db[id2]
query_db[hmdb] = id2
elif id2 in hmdb_symbol_db:
symbol = hmdb_symbol_db[id2]
query_db[id2] = symbol
else:
query_db[id2] = id2
for ensembl1 in ensembl_ls1:
for ensembl2 in ensembl_ls2:
try: query_interactions[ensembl1].append(ensembl2)
except Exception: query_interactions[ensembl1] = [ensembl2]
z = InteractionInformation(ensembl1,ensembl2,'custom',interaction_type)
interaction_annotation_dbase[ensembl1,ensembl2] = z
except Exception:
print traceback.format_exc()
print 'No valid directories or IDs provided. Exiting.'; kill
if translated==0:
import WikiPathways_webservice
try: query_db = WikiPathways_webservice.importDataSimple(dir_file,None,MOD='Ensembl',Species=species)[0]
except Exception: ### If metabolomics
query_db = WikiPathways_webservice.importDataSimple(dir_file,None,MOD='HMDB',Species=species)[0]
### Translate the Ensembl IDs to symbols (where possible)
for id in query_db:
if id in ensembl_symbol_db:
symbol = ensembl_symbol_db[id]
else:
symbol=id
query_db[id] = symbol
else:
for id in id_db:
if id_db[id]==None:
try: id_db[id] = ensembl_symbol_db[id] ### Save symbol (done for imported pathway genes)
except Exception: id_db[id]=id
query_db = id_db ### Input gene IDs (not in a file)
print 'Number of IDs from', dir_file, 'is', len(query_db)
return query_db,query_interactions,dir_file
def associateQueryGenesWithInteractions(query_db,query_interactions,dir_file):
suffix=''
if dir_file!=None:
if len(dir_file)!=0:
suffix='-'+intNameShort+'_'+export.findFilename(dir_file)[:-4]
if len(suffix)==0:
try: suffix = '_'+FileName
except Exception: None
file_name = 'AltAnalyze-network'+suffix
query_interactions_unique={}
interacting_genes={}
connections = 1
primary=0
secondary=0
terciary=0
for ensemblGene in query_db:
if ensemblGene in interaction_db:
for interacting_ensembl in interaction_db[ensemblGene]:
if interacting_ensembl not in blackList:
###Only allow direct interactions found in query
if interacting_ensembl in query_db:
try: query_interactions[ensemblGene].append(interacting_ensembl)
except KeyError: query_interactions[ensemblGene] = [interacting_ensembl]
try: query_interactions[interacting_ensembl].append(ensemblGene)
except KeyError: query_interactions[interacting_ensembl] = [ensemblGene]
primary+=1
if degrees == 2 or degrees == 'indirect':
try: interacting_genes[interacting_ensembl].append(ensemblGene)
except KeyError: interacting_genes[interacting_ensembl] = [ensemblGene]
elif degrees == 'allInteracting' or degrees == 'all possible':
try: query_interactions[ensemblGene].append(interacting_ensembl)
except KeyError: query_interactions[ensemblGene] = [interacting_ensembl]
if interacting_ensembl in secondaryQueryIDs: ### IDs in the expression file
secondary+=1 ### When indirect degrees selected, no additional power added by this (only for direct or shortest path)
try: query_interactions[ensemblGene].append(interacting_ensembl)
except KeyError: query_interactions[ensemblGene] = [interacting_ensembl]
if ensemblGene in second_degree_obligatory:
for interacting_ensembl in second_degree_obligatory[ensemblGene]:
try: interacting_genes[interacting_ensembl].append(ensemblGene)
except KeyError: interacting_genes[interacting_ensembl] = [ensemblGene]
### Include indirect interactions to secondaryQueryIDs from the expression file
if degrees == 2 or degrees == 'indirect':
for ensemblGene in secondaryQueryIDs:
if ensemblGene in interaction_db:
for interacting_ensembl in interaction_db[ensemblGene]:
if interacting_ensembl not in blackList:
try:
interacting_genes[interacting_ensembl].append(ensemblGene)
terciary+=1#; print interacting_ensembl
except KeyError: None ### Only increase the interacting_genes count if the interacting partner is present from the primary query list
#print primary,secondary,terciary
### Report the number of unique interacting genes
for interacting_ensembl in interacting_genes:
if len(interacting_genes[interacting_ensembl])==1:
interacting_genes[interacting_ensembl] = 1
else:
unique_interactions = unique.unique(interacting_genes[interacting_ensembl])
interacting_genes[interacting_ensembl] = len(unique_interactions)
query_indirect_interactions={}; indirect_interacting_gene_list=[]; interacting_gene_list=[]; added=[]
if degrees=='shortestPath' or degrees=='shortest path': ### Typically identifying the single smallest path(s) between two nodes.
query_indirect_interactions, indirect_interacting_gene_list, interacting_gene_list = evaluateShortestPath(query_db,interaction_db,10)
else:
if degrees==2 or degrees=='indirect' or len(secondDegreeObligatoryCategories)>0:
for ensembl in interacting_genes:
if interacting_genes[ensembl] > connections:
if ensembl in interaction_db: ### Only nodes removed due to promiscuity will not be found
for interacting_ensembl in interaction_db[ensembl]:
if interacting_ensembl in query_db or interacting_ensembl in secondaryQueryIDs:
try: query_indirect_interactions[interacting_ensembl].append(ensembl)
except KeyError: query_indirect_interactions[interacting_ensembl] = [ensembl]
###Record the highest linked nodes
indirect_interacting_gene_list.append((interacting_genes[ensembl],ensembl))
if len(obligatory_interactions)>0: ### Include always
all_reported_genes = combineDBs(query_interactions,query_indirect_interactions) ### combinesDBs and returns a unique list of genes
for ensemblGene in all_reported_genes: ###This only includes genes in the original input list
if ensemblGene in obligatory_interactions:
for interacting_ensembl in obligatory_interactions[ensemblGene]:
#symbol = ensembl_symbol_db[ensemblGene]
try: query_interactions[ensemblGene].append(interacting_ensembl)
except KeyError: query_interactions[ensemblGene] = [interacting_ensembl]
z = dict(query_interactions.items() + query_indirect_interactions.items())
interaction_restricted_db={}
for ensembl in z:
interacting_nodes = z[ensembl]
for node in interacting_nodes:
if ensembl in interaction_restricted_db:
db = interaction_restricted_db[ensembl]
db[node] = 1
else: interaction_restricted_db[ensembl] = {node:1}
if node in interaction_restricted_db:
db = interaction_restricted_db[node]
db[ensembl] = 1
else: interaction_restricted_db[node] = {ensembl:1}
if degrees==2 or degrees=='indirect': ### get rid of non-specific interactions
query_indirect_interactions, indirect_interacting_gene_list, interacting_gene_list = evaluateShortestPath(query_db,interaction_restricted_db,4)
###Record the highest linked nodes
for ensembl in query_interactions:
linked_nodes = len(unique.unique(query_interactions[ensembl]))
interacting_gene_list.append((linked_nodes,ensembl))
interacting_gene_list.sort(); interacting_gene_list.reverse()
indirect_interacting_gene_list.sort(); indirect_interacting_gene_list.reverse()
print "Length of query_interactions:",len(query_interactions)
query_interactions_unique=[]
for gene1 in query_interactions:
for gene2 in query_interactions[gene1]:
temp = []; temp.append(gene2); temp.append(gene1)#; temp.sort()
if gene1 == gene2: interaction_type = 'self'
else: interaction_type = 'distinct'
temp.append(interaction_type); temp.reverse()
query_interactions_unique.append(temp)
for gene1 in query_indirect_interactions:
for gene2 in query_indirect_interactions[gene1]:
temp = []; temp.append(gene2); temp.append(gene1)#; temp.sort()
if gene1 == gene2: interaction_type = 'self'
else: interaction_type = 'indirect'
temp.append(interaction_type); temp.reverse()
query_interactions_unique.append(temp)
query_interactions_unique = unique.unique(query_interactions_unique)
query_interactions_unique.sort()
###Write out nodes linked to many other nodes
new_file = outputDir+'/networks/'+file_name+ '-interactions_'+str(degrees)+'_degrees_summary.txt'
data = export.ExportFile(new_file)
for (linked_nodes,ensembl) in interacting_gene_list:
try: symbol = query_db[ensembl]
except KeyError: symbol = ensembl_symbol_db[ensembl]
data.write(str(linked_nodes)+'\t'+ensembl+'\t'+symbol+'\t'+'direct'+'\n')
for (linked_nodes,ensembl) in indirect_interacting_gene_list:
try: symbol = query_db[ensembl]
except KeyError:
try: symbol = ensembl_symbol_db[ensembl]
except KeyError: symbol = ensembl
if 'HMDB' in symbol:
try: symbol = hmdb_symbol_db[ensembl]
except Exception: pass
data.write(str(linked_nodes)+'\t'+ensembl+'\t'+symbol+'\t'+'indirect'+'\n')
data.close()
regulated_gene_db = query_db
sif_export,symbol_pair_unique = exportInteractionData(file_name,query_interactions_unique,regulated_gene_db)
return sif_export,symbol_pair_unique
def combineDBs(db1,db2):
### combinesDBs and returns a unique list of genes
new_db={}
for i in db1:
new_db[i]=[]
for k in db1[i]:
new_db[k]=[]
for i in db2:
new_db[i]=[]
for k in db2[i]:
new_db[k]=[]
return new_db
def evaluateShortestPath(query_db,interaction_restricted_db,depth):
interactions_found=0
start_time = time.time()
query_indirect_interactions={}; indirect_interacting_gene_list=[]; interacting_gene_list=[]; added=[]
print 'Performing shortest path analysis on %s IDs...' % len(query_db),
for gene1 in query_db:
for gene2 in query_db:
if (gene1,gene2) not in added and (gene2,gene1) not in added:
if gene1 != gene2 and gene1 in interaction_restricted_db and gene2 in interaction_restricted_db:
try:
path = shortest_path(interaction_restricted_db,gene1,gene2,depth)
added.append((gene1,gene2))
i=1
while i<len(path): ### Add the relationship pairs
try: query_indirect_interactions[path[i-1]].append(path[i])
except Exception: query_indirect_interactions[path[i-1]]=[path[i]]
interactions_found+=1
i+=1
except Exception:
#tb = traceback.format_exc()
pass
if len(query_indirect_interactions)==0:
print 'None of the query genes interacting in the selected interaction databases...'; queryGeneError
print interactions_found, 'interactions found in', time.time()-start_time, 'seconds'
return query_indirect_interactions, indirect_interacting_gene_list, interacting_gene_list
def shortest_path(G, start, end, depth):
#http://code.activestate.com/recipes/119466-dijkstras-algorithm-for-shortest-paths/
import heapq
def flatten(L): # Flatten linked list of form [0,[1,[2,[]]]]
while len(L) > 0:
yield L[0]
L = L[1]
q = [(0, start, ())] # Heap of (cost, path_head, path_rest).
visited = set() # Visited vertices.
while True:
(cost, v1, path) = heapq.heappop(q)
if v1 not in visited and v1 in G:
visited.add(v1)
if v1 == end:
final_path = list(flatten(path))[::-1] + [v1]
if len(final_path)<depth:
return final_path
else:
return None
path = (v1, path)
for (v2, cost2) in G[v1].iteritems():
if v2 not in visited:
heapq.heappush(q, (cost + cost2, v2, path))
def exportInteractionData(file_name,query_interactions_unique,regulated_gene_db):
file_name = string.replace(file_name,':','-')
new_file = outputDir+'/networks/'+file_name + '-interactions_'+str(degrees)+'.txt'
sif_export = outputDir+'/networks/'+file_name + '-interactions_'+str(degrees)+'.sif'
fn=filepath(new_file); fn2=filepath(sif_export)
data = open(fn,'w'); data2 = open(fn2,'w')
added = {} ### Don't add the same entry twice
symbol_added={}; symbol_pair_unique={}
for (interaction_type,gene1,gene2) in query_interactions_unique:
try: symbol1 = query_db[gene1]
except KeyError:
try: symbol1 = ensembl_symbol_db[gene1]
except KeyError: symbol1 = gene1
if 'HMDB' in symbol1:
symbol1 = hmdb_symbol_db[gene1]
try: symbol2 = query_db[gene2]
except KeyError:
try: symbol2 = ensembl_symbol_db[gene2]
except KeyError: symbol2 = gene2
if 'HMDB' in symbol2:
symbol2 = hmdb_symbol_db[gene2]
gene_pair = ''; symbol_pair=''; direction = 'interactsWith'
if (gene1,gene2) in interaction_annotation_dbase: gene_pair = gene1,gene2; symbol_pair = symbol1,symbol2
elif (gene2,gene1) in interaction_annotation_dbase: gene_pair = gene2,gene1; symbol_pair = symbol2,symbol1
else: print gene1, gene2, symbol1, symbol2; kill
if len(gene_pair)>0:
y = interaction_annotation_dbase[gene_pair]
gene1,gene2 = gene_pair ### This is the proper order of the interaction
symbol1,symbol2 = symbol_pair
interaction_type = y.InteractionType()
if interaction_type == 'drugInteraction':
### Switch their order
gene1, gene2, symbol1, symbol2 = gene2, gene1, symbol2, symbol1
direction = interaction_type
if (gene_pair,direction) not in added:
added[(gene_pair,direction)]=[]
data.write(gene1+'\t'+gene2+'\t'+symbol1+'\t'+symbol2+'\t'+interaction_type+'\n')
if len(symbol1)>1 and len(symbol2)>1 and (symbol_pair,direction) not in symbol_added:
if symbol1 != symbol2:
data2.write(symbol1+'\t'+direction+'\t'+symbol2+'\n')
symbol_added[(symbol_pair,direction)]=[]
symbol_pair_unique[symbol_pair]=[]
data.close(); data2.close()
print "Interaction data exported"
return sif_export,symbol_pair_unique
def eliminate_redundant_dict_values(database):
db1={}
for key in database:
list = unique.unique(database[key])
list.sort()
db1[key] = list
return db1
def importInteractionData(interactionDirs):
global interaction_db; interaction_db = {}
global interaction_annotation_dbase; interaction_annotation_dbase = {}
global obligatory_interactions; obligatory_interactions={}
global second_degree_obligatory; second_degree_obligatory={}
global blackList; blackList = {}
###Collect both Human and Mouse interactions (Mouse directly sorted in interaction_db
importInteractionDatabases(interactionDirs)
def interactionPermuteTest(species,Degrees,inputType,inputDir,outputdir,interactionDirs,Genes=None,
geneSetType=None,PathwayFilter=None,OntologyID=None,directory=None,expressionFile=None,
obligatorySet=None,secondarySet=None,IncludeExpIDs=False):
global degrees
global outputDir
global inputDataType
global obligatoryList ### Add these if connected to anything
global secondaryQueryIDs
global secondDegreeObligatoryCategories ### Add if common to anything in the input - Indicates systems to apply this to
global symbol_hmdb_db; symbol_hmdb_db={}; global hmdb_symbol_db; hmdb_symbol_db={} ### Create an annotation database for HMDB IDs
global FileName
secondaryQueryIDs = {}
degrees = Degrees
outputDir = outputdir
inputDataType = inputType
obligatoryList = obligatorySet
secondDegreeObligatoryCategories=[]
if obligatoryList == None:
obligatoryList=[]
if expressionFile == None:
expressionFile = inputDir ### If it doesn't contain expression values, view as yellow nodes
if secondarySet!= None and (degrees==1 or degrees=='direct'): ### If degrees == 2, this is redundant
### This currently adds alot of predictions - either make more stringent or currently exclude
secondDegreeObligatoryCategories = secondarySet
if PathwayFilter != None: FileName = PathwayFilter
elif OntologyID != None: FileName = OntologyID
elif Genes != None: FileName = Genes
### Import Ensembl-Symbol annotations
getEnsemblGeneData('AltDatabase/ensembl/'+species+'/'+species+'_Ensembl-annotations.txt')
### Import interaction databases indicated in interactionDirs
importInteractionData(interactionDirs)
getHMDBData(species) ### overwrite the symbol annotation from any HMDB that comes from a WikiPathway or KEGG pathway that we also include (for consistent official annotation)
input_IDs = getGeneIDs(Genes)
try: input_IDs = gene_associations.simpleGenePathwayImport(species,geneSetType,PathwayFilter,OntologyID,directory)
except Exception: None
permutations = 10000; p = 0
secondaryQueryIDs = importqueryResults(species,expressionFile,{})[0]
input_IDs,query_interactions,dir_file = importqueryResults(species,inputDir,input_IDs) ### Get the number of unique genes
sif_file, original_symbol_pair_unique = associateQueryGenesWithInteractions(input_IDs,query_interactions,dir_file)
#print len(original_symbol_pair_unique)
ensembl_unique = map(lambda x: x, ensembl_symbol_db)
interaction_lengths = []
import random
while p < permutations:
random_inputs = random.sample(ensembl_unique,len(input_IDs))
random_input_db={}
#print len(random_inputs), len(input_IDs); sys.exit()
for i in random_inputs: random_input_db[i]=i
secondaryQueryIDs = importqueryResults(species,random_inputs,{})[0]
input_IDs,query_interactions,dir_file = importqueryResults(species,inputDir,input_IDs)
sif_file, symbol_pair_unique = associateQueryGenesWithInteractions(input_IDs,query_interactions,inputDir)
#print len(symbol_pair_unique);sys.exit()
interaction_lengths.append(len(symbol_pair_unique))
p+=1
interaction_lengths.sort(); interaction_lengths.reverse()
y = len(original_symbol_pair_unique)
print 'permuted length distribution:',interaction_lengths
print 'original length:',y
k=0
for i in interaction_lengths:
if i>=y: k+=1
print 'p-value:',float(k)/float(permutations)
def buildInteractions(species,Degrees,inputType,inputDir,outputdir,interactionDirs,Genes=None,
geneSetType=None,PathwayFilter=None,OntologyID=None,directory=None,expressionFile=None,
obligatorySet=None,secondarySet=None,IncludeExpIDs=False):
global degrees
global outputDir
global inputDataType
global obligatoryList ### Add these if connected to anything
global secondaryQueryIDs
global secondDegreeObligatoryCategories ### Add if common to anything in the input - Indicates systems to apply this to
global symbol_hmdb_db; symbol_hmdb_db={}; global hmdb_symbol_db; hmdb_symbol_db={} ### Create an annotation database for HMDB IDs
global FileName
global intNameShort
secondaryQueryIDs = {}
degrees = Degrees
outputDir = outputdir
inputDataType = inputType
obligatoryList = obligatorySet
secondDegreeObligatoryCategories=[]
intNameShort=''
if obligatoryList == None:
obligatoryList=[]
if expressionFile == None:
expressionFile = inputDir ### If it doesn't contain expression values, view as yellow nodes
if secondarySet!= None and (degrees==1 or degrees=='direct'): ### If degrees == 2, this is redundant
### This currently adds alot of predictions - either make more stringent or currently exclude
secondDegreeObligatoryCategories = secondarySet
if PathwayFilter != None:
if len(PathwayFilter)==1:
FileName = PathwayFilter[0]
if isinstance(PathwayFilter, tuple) or isinstance(PathwayFilter, list):
FileName = string.join(list(PathwayFilter),' ')
FileName = string.replace(FileName,':','-')
else:
FileName = PathwayFilter
if len(FileName)>40:
FileName = FileName[:40]
elif OntologyID != None: FileName = OntologyID
elif Genes != None: FileName = Genes
### Import Ensembl-Symbol annotations
getEnsemblGeneData('AltDatabase/ensembl/'+species+'/'+species+'_Ensembl-annotations.txt')
if len(interactionDirs[0]) == 1: interactionDirs = [interactionDirs]
### Import interaction databases indicated in interactionDirs
for i in interactionDirs:
print i
i = export.findFilename(i)
i=string.split(i,'-')[1]
intNameShort+=i[0]
importInteractionData(interactionDirs)
getHMDBData(species) ### overwrite the symbol annotation from any HMDB that comes from a WikiPathway or KEGG pathway that we also include (for consistent official annotation)
input_IDs = getGeneIDs(Genes)
try:
if isinstance(PathwayFilter, tuple):
for pathway in PathwayFilter:
IDs = gene_associations.simpleGenePathwayImport(species,geneSetType,pathway,OntologyID,directory)
for id in IDs:input_IDs[id]=None
else:
input_IDs = gene_associations.simpleGenePathwayImport(species,geneSetType,PathwayFilter,OntologyID,directory)
except Exception: None
if expressionFile == None or len(expressionFile)==0:
expressionFile = exportSelectedIDs(input_IDs) ### create an expression file
elif IncludeExpIDs: ### Prioritize selection of IDs for interactions WITH the primary query set (not among expression input IDs)
secondaryQueryIDs = importqueryResults(species,expressionFile,{})[0]
input_IDs,query_interactions,dir_file = importqueryResults(species,inputDir,input_IDs)
sif_file,symbol_pair_unique = associateQueryGenesWithInteractions(input_IDs,query_interactions,dir_file)
output_filename = exportGraphImage(species,sif_file,expressionFile)
return output_filename
def exportSelectedIDs(input_IDs):
expressionFile = outputDir+'/networks/IDList.txt'
data = export.ExportFile(expressionFile)
data.write('UID\tSystemCode\n')
for id in input_IDs:
if 'HMDB' in id:
id = hmdb_symbol_db[id]
data.write(id+'\tEn\n')
data.close()
return expressionFile
def exportGraphImage(species,sif_file,expressionFile):
import clustering
output_filename = clustering.buildGraphFromSIF('Ensembl',species,sif_file,expressionFile)
return output_filename
def getGeneIDs(Genes):
input_IDs={}
if Genes == None: None
elif len(Genes)>0:
### Get IDs from list of gene IDs
Genes=string.replace(Genes,'|',',')
Genes=string.replace(Genes,' ',',')
if ',' in Genes: Genes = string.split(Genes,',')
else: Genes = [Genes]
for i in Genes:
if len(i)>0:
if i in symbol_ensembl_db:
for ensembl in symbol_ensembl_db[i]:
input_IDs[ensembl]=i ### Translate to Ensembl
elif i in symbol_hmdb_db:
hmdb=symbol_hmdb_db[i]
symbol = hmdb_symbol_db[hmdb] ### Get the official symbol
input_IDs[hmdb]=symbol ### Translate to HMDB
else:
try: input_IDs[i] = ensembl_symbol_db[i] ### If an input Ensembl ID
except Exception: input_IDs[i] = i ### Currently not dealt with
return input_IDs
if __name__ == '__main__':
Species = 'Hs'
Degrees = 2
inputType = 'IDs'
inputDir=''
inputDir='/Users/nsalomonis/Desktop/dataAnalysis/Sarwal/Urine-AR-increased/met/networks/AltAnalyze-network_Met.inceased_AR_1.5fold_metabolite-interactions_shortest path.sif'
inputDir='/Users/saljh8/Documents/1-dataAnalysis/PaulTang/ARVC_genes.txt'
obligatorySet = []#['drugInteraction']#'microRNAInteraction'
Genes = 'POU5F1,NANOG,TCF7L1,WNT1,CTNNB1,SOX2,TCF4,GSK3B'
Genes = 'Glucose'; Degrees = 'shortestPath'; Degrees = 'indirect'; Degrees = 'all possible'
Genes = ''; Degrees='indirect'
interactionDirs = []
Genes=''
outputdir = filepath('AltAnalyze/test')
outputdir = '/Users/saljh8/Desktop/Archived/Documents/1-manuscripts/Salomonis/SIDS-WikiPathways/Interactomics/'
interaction_root = 'AltDatabase/goelite/'+Species+'/gene-interactions'
files = read_directory('AltDatabase/goelite/'+Species+'/gene-interactions')
rooot = '/Users/nsalomonis/Desktop/dataAnalysis/Sarwal/CTOTC/AltAnalyze Based/GO-Elite/MarkerFinder/'
expressionFile=None
expressionFile = '/Users/nsalomonis/Desktop/dataAnalysis/Sarwal/Urine-AR-increased/UrinProteomics_Kidney-All/GO-Elite/input/GE.AR_vs_STA-fold1.5_rawp0.05.txt'
expressionFile = '/Users/nsalomonis/Desktop/dataAnalysis/Sarwal/BKVN infection/GO-Elite/input/AR_vs_norm_adjp05.txt'
expressionFile = '/Users/nsalomonis/Desktop/dataAnalysis/Sarwal/Blood AR-BK/AR-STA/Batches/overlap/AR_vs_STA_p0.05_fold1_common.txt'
expressionFile=None
#files2 = read_directory(rooot)
#inputType = 'SIF'
for file in files:
if 'micro' not in file and 'all-Drug' not in file and 'GRID' not in file and 'Drug' not in file and 'TF' not in file: # and 'TF' not in file and 'KEGG' not in file:
interactionDirs.append(filepath(interaction_root+'/'+file))
#"""
inputDir='/Users/saljh8/Desktop/Archived/Documents/1-manuscripts/Salomonis/SIDS-WikiPathways/Interactomics/CoreGeneSet67/core_SIDS.txt'
expressionFile = '/Users/saljh8/Desktop/Archived/Documents/1-manuscripts/Salomonis/SIDS-WikiPathways/Interactomics/Proteomics/proteomics_kinney.txt'
interactionPermuteTest(Species,Degrees,inputType,inputDir,outputdir,interactionDirs,Genes=Genes,obligatorySet=obligatorySet,expressionFile=expressionFile, IncludeExpIDs=True)
sys.exit()
buildInteractions(Species,Degrees,inputType,inputDir,outputdir,interactionDirs,Genes=Genes,obligatorySet=obligatorySet,expressionFile=expressionFile, IncludeExpIDs=True)
sys.exit()
#"""
#canonical Wnt signaling: GO:0060070
# BioMarkers 'Pluripotent Stem Cells' 'gene-mapp'
#inputDir = '/Users/nsalomonis/Desktop/dataAnalysis/Sarwal/Diabetes-Blood/ACR/log2/MergedFiles-Symbol_ACR.txt'
#inputDir = '/Users/nsalomonis/Desktop/dataAnalysis/SplicingFactors/RBM20_splicing_network.txt'; inputType = 'SIF'
#inputDir = '/Users/nsalomonis/Documents/1-manuscripts/Salomonis/SIDS-WikiPathways/67_SIDS-genes.txt'
#Genes=None
#exportGraphImage(Species,'/Users/nsalomonis/Desktop/AltAnalyze/AltAnalyze/test/networks/AltAnalyze-network-interactions_1degrees.sif',inputDir);sys.exit()
#buildInteractions(Species,Degrees,inputType,inputDir,outputdir,interactionDirs,Genes=None,obligatorySet=obligatorySet,geneSetType='BioMarkers',PathwayFilter='Pluripotent Stem Cells',directory='gene-mapp')
buildInteractions(Species,Degrees,inputType,inputDir,outputdir,interactionDirs,Genes=Genes,obligatorySet=obligatorySet,expressionFile=expressionFile)
| wuxue/altanalyze | InteractionBuilder.py | Python | apache-2.0 | 45,237 |
# Copyright 2012 Vincent Jacques
# [email protected]
# This file is part of PyGithub. http://vincent-jacques.net/PyGithub
# PyGithub is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License along with PyGithub. If not, see <http://www.gnu.org/licenses/>.
import GithubObject
import GitAuthor
import GitObject
class GitTag( GithubObject.GithubObject ):
@property
def message( self ):
self._completeIfNotSet( self._message )
return self._NoneIfNotSet( self._message )
@property
def object( self ):
self._completeIfNotSet( self._object )
return self._NoneIfNotSet( self._object )
@property
def sha( self ):
self._completeIfNotSet( self._sha )
return self._NoneIfNotSet( self._sha )
@property
def tag( self ):
self._completeIfNotSet( self._tag )
return self._NoneIfNotSet( self._tag )
@property
def tagger( self ):
self._completeIfNotSet( self._tagger )
return self._NoneIfNotSet( self._tagger )
@property
def url( self ):
self._completeIfNotSet( self._url )
return self._NoneIfNotSet( self._url )
def _initAttributes( self ):
self._message = GithubObject.NotSet
self._object = GithubObject.NotSet
self._sha = GithubObject.NotSet
self._tag = GithubObject.NotSet
self._tagger = GithubObject.NotSet
self._url = GithubObject.NotSet
def _useAttributes( self, attributes ):
if "message" in attributes: # pragma no branch
assert attributes[ "message" ] is None or isinstance( attributes[ "message" ], ( str, unicode ) ), attributes[ "message" ]
self._message = attributes[ "message" ]
if "object" in attributes: # pragma no branch
assert attributes[ "object" ] is None or isinstance( attributes[ "object" ], dict ), attributes[ "object" ]
self._object = None if attributes[ "object" ] is None else GitObject.GitObject( self._requester, attributes[ "object" ], completed = False )
if "sha" in attributes: # pragma no branch
assert attributes[ "sha" ] is None or isinstance( attributes[ "sha" ], ( str, unicode ) ), attributes[ "sha" ]
self._sha = attributes[ "sha" ]
if "tag" in attributes: # pragma no branch
assert attributes[ "tag" ] is None or isinstance( attributes[ "tag" ], ( str, unicode ) ), attributes[ "tag" ]
self._tag = attributes[ "tag" ]
if "tagger" in attributes: # pragma no branch
assert attributes[ "tagger" ] is None or isinstance( attributes[ "tagger" ], dict ), attributes[ "tagger" ]
self._tagger = None if attributes[ "tagger" ] is None else GitAuthor.GitAuthor( self._requester, attributes[ "tagger" ], completed = False )
if "url" in attributes: # pragma no branch
assert attributes[ "url" ] is None or isinstance( attributes[ "url" ], ( str, unicode ) ), attributes[ "url" ]
self._url = attributes[ "url" ]
| sagarsane/abetterportfolio | github/GitTag.py | Python | apache-2.0 | 3,535 |
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import logging
from taskflow.patterns import graph_flow
from taskflow import task
from pumphouse import events
from pumphouse import exceptions
from pumphouse import flows
from pumphouse.tasks import service as service_tasks
from pumphouse import task as pump_task
from pumphouse import utils
LOG = logging.getLogger(__name__)
assignment = flows.register("assignment", default="fixed")
def extract_macs(info):
macs = set(i["mac"] for i in info["meta"]["interfaces"])
return tuple(macs)
class RetrieveAllEnvironments(task.Task):
def execute(self):
from pumphouse._vendor.fuelclient.objects import environment
envs = dict((env.data["name"], env.data)
for env in environment.Environment.get_all())
return envs
class RetrieveEnvironment(task.Task):
def execute(self, envs_infos, env_name):
return envs_infos[env_name]
class RetrieveEnvNodes(task.Task):
def execute(self, env_info):
from pumphouse._vendor.fuelclient.objects import environment
env = environment.Environment.init_with_data(env_info)
nodes = dict((node.data["fqdn"], node.data)
for node in env.get_all_nodes())
return nodes
class RetrieveNode(task.Task):
def execute(self, nodes_infos, hostname):
return nodes_infos[hostname]
class DeployChanges(pump_task.BaseCloudTask):
def execute(self, env_info, **nodes_info):
from pumphouse._vendor.fuelclient.objects import environment
env = environment.Environment.init_with_data(env_info)
task = env.deploy_changes()
unassigned = set(node_info["id"]
for node_info in nodes_info.itervalues()
if node_info["status"] == "discover")
watched_macs = set(extract_macs(node_info)
for node_info in nodes_info.itervalues())
for progress, nodes in task:
for node in nodes:
node_macs = extract_macs(node.data)
if node_macs in watched_macs:
if node.data["id"] in unassigned:
unassigned.discard(node.data["id"])
self.assign_event(node)
self.provisioning_event(progress, node)
env.update()
return env.data
def revert(self, env_info, result, flow_failures, **nodes_info):
LOG.error("Deploying of changes failed for env %r with result %r",
env_info, result)
def provisioning_event(self, progress, node):
LOG.debug("Waiting for deploy: %r, %r", progress, node)
events.emit("update", {
"id": node.data["fqdn"],
"type": "host",
"cloud": self.cloud.name,
"progress": node.data["progress"],
"data": {
"status": node.data["status"],
}
}, namespace="/events")
def assign_event(self, node):
hostname = node.data["fqdn"]
events.emit("create", {
"id": hostname,
"cloud": self.cloud.name,
"type": "host",
"action": "reassignment",
"data": {
"name": hostname,
}
}, namespace="/events")
class ChooseAnyComputeNode(task.Task):
def execute(self, nodes_infos, env_info):
# XXX(akscram): The source of the configuration is the first
# node with the `compute` role.
compute_nodes = [info
for info in nodes_infos.values()
if "compute" in info["roles"]]
if not compute_nodes:
raise exceptions.Conflict("There is no any compute nodes in "
"environment %r" % (env_info,))
compute_node = compute_nodes[0]
return compute_node
class ExtractRolesFromNode(task.Task):
def execute(self, node_info):
return node_info["roles"]
class ExtractDisksFromNode(task.Task):
def execute(self, node_info):
from pumphouse._vendor.fuelclient.objects.node import Node
node = Node.init_with_data(node_info)
disks = node.get_attribute("disks")
return [{
"name": d["name"],
"size": d["size"],
"volumes": d["volumes"],
} for d in disks]
class ExtractIfacesFromNode(task.Task):
def execute(self, node_info):
from pumphouse._vendor.fuelclient.objects.node import Node
node = Node.init_with_data(node_info)
ifaces = node.get_attribute("interfaces")
return [{
"name": i["name"],
"assigned_networks": i["assigned_networks"],
} for i in ifaces]
class ExtractNetworkDataFromEnv(task.Task):
def execute(self, env_info):
from pumphouse._vendor.fuelclient.objects import environment
env = environment.Environment.init_with_data(env_info)
network_data = env.get_network_data()
return network_data
class PopulateIfacesWithIDs(task.Task):
def execute(self, network_data, ifaces):
ifaces_ids = {n["name"]: n["id"] for n in network_data["networks"]}
ifaces_with_ids = [{
"name": i["name"],
"assigned_networks": [{
"id": ifaces_ids[a],
"name": a,
} for a in i["assigned_networks"]],
} for i in ifaces]
return ifaces_with_ids
class ApplyDisksAttributesFromNode(task.Task):
def execute(self, disks, node_info):
from pumphouse._vendor.fuelclient.objects.node import Node
node = Node.init_with_data(node_info)
node_disks = node.get_attribute("disks")
changed_disks = self.update_disks_attrs(disks, node_disks)
node.upload_node_attribute("disks", changed_disks)
node.update()
return node.data
def update_disks_attrs(self, disks1, disks2):
"""Updates geometries of partitions.
Returns a new dict which is made from elements from disk2 with
geometry of partitions from disk1.
"""
def to_dict(attrs):
return dict((attr["name"], attr) for attr in attrs)
attrs = []
disks_dict1 = to_dict(disks1)
for disk in disks2:
volumes = [{"name": v["name"],
"size": v["size"]}
for v in disks_dict1[disk["name"]]["volumes"]]
attrs.append({
"id": disk["id"],
"size": disk["size"],
"volumes": volumes,
})
return attrs
class ApplyNetAttributesFromNode(task.Task):
def execute(self, ifaces, node_info):
from pumphouse._vendor.fuelclient.objects.node import Node
node = Node.init_with_data(node_info)
node_ifaces = node.get_attribute("interfaces")
changed_ifaces = self.update_ifaces_attrs(ifaces, node_ifaces)
node.upload_node_attribute("interfaces", changed_ifaces)
node.update()
return node.data
def update_ifaces_attrs(self, ifaces1, ifaces2):
"""Updates configuration of network interfaces.
Returns a new dict which is made from elements from ifaces2
with assignments from ifaces1.
"""
def to_dict(attrs):
return dict((attr["name"], attr) for attr in attrs)
attrs = []
ifaces_dict1 = to_dict(ifaces1)
for iface in ifaces2:
attrs.append({
"id": iface["id"],
"type": iface["type"],
"assigned_networks":
ifaces_dict1[iface["name"]]["assigned_networks"],
})
return attrs
class WaitUnassignedNode(task.Task):
def execute(self, node_info, **requires):
condition_check = lambda x: x is not None
node_macs = extract_macs(node_info)
unassigned_node_info = utils.wait_for(node_macs,
self.retrieve_unassigned,
attribute_getter=condition_check,
value=True,
timeout=360)
return unassigned_node_info
def retrieve_unassigned(self, node_macs):
from pumphouse._vendor.fuelclient.objects.node import Node
for node in Node.get_all():
if (node.data["status"] == "discover" and
extract_macs(node.data) == node_macs):
return node.data
# TODO(akscram): Raise an exception when status is error.
return None
class UnassignNode(pump_task.BaseCloudTask):
def execute(self, node_info, env_info):
from pumphouse._vendor.fuelclient.objects import environment
from pumphouse._vendor.fuelclient.objects.node import Node
node = Node.init_with_data(node_info)
env = environment.Environment.init_with_data(env_info)
env.unassign((node.id,))
node.update()
self.unassign_start_event(node)
return node.data
def unassign_start_event(self, node):
events.emit("update", {
"id": node.data["fqdn"],
"cloud": self.cloud.name,
"type": "host",
"action": "reassignment",
}, namespace="/events")
class HostsDeleteEvents(pump_task.BaseCloudTask):
def execute(self, services):
# XXX(akscram): Here can be emited some number of unexpected events.
for service in services:
if service["binary"] == "nova-compute":
self.delete_event(service)
def delete_event(self, service):
events.emit("delete", {
"id": service["host"],
"cloud": self.cloud.name,
"type": "host",
}, namespace="/events")
class AssignNode(pump_task.BaseCloudTask):
def execute(self, node_info, node_roles, env_info):
from pumphouse._vendor.fuelclient.objects import environment
from pumphouse._vendor.fuelclient.objects.node import Node
node = Node.init_with_data(node_info)
env = environment.Environment.init_with_data(env_info)
env.assign((node,), node_roles)
node.update()
return node.data
class UpdateNodeInfo(task.Task):
def execute(self, node_info, **requires):
from pumphouse._vendor.fuelclient.objects.node import Node
node = Node.init_with_data(node_info)
node.update()
return node.data
class GetNodeHostname(task.Task):
def execute(self, node_info):
return node_info["fqdn"]
class HostsSuccessEvents(pump_task.BaseCloudTask):
def execute(self, services):
# XXX(akscram): Here can be emited some number of unexpected events.
for service in services:
self.update_event(service)
def update_event(self, service):
events.emit("update", {
"id": service["host"],
"cloud": self.cloud.name,
"type": "host",
"progress": None,
"action": None,
}, namespace="/events")
def unassign_node(context, flow, env_name, hostname):
env = "src-env-{}".format(env_name)
deployed_env = "src-env-deployed-{}".format(env_name)
env_nodes = "src-env-nodes-{}".format(env_name)
node = "node-{}".format(hostname)
pending_node = "node-pending-{}".format(hostname)
unassigned_node = "node-unassigned-{}".format(hostname)
flow.add(
RetrieveEnvNodes(name=env_nodes,
provides=env_nodes,
rebind=[env]),
RetrieveNode(name=node,
provides=node,
rebind=[env_nodes],
inject={"hostname": hostname}),
UnassignNode(context.src_cloud,
name=pending_node,
provides=pending_node,
rebind=[node, env]),
DeployChanges(context.src_cloud,
name=deployed_env,
provides=deployed_env,
rebind=[env],
requires=[pending_node]),
WaitUnassignedNode(name=unassigned_node,
provides=unassigned_node,
rebind=[pending_node],
requires=[deployed_env]),
)
class DeleteServicesFromNode(service_tasks.DeleteServicesSilently):
def execute(self, node_info, **requires):
return super(DeleteServicesFromNode, self).execute(node_info["fqdn"])
def remove_computes(context, flow, env_name, hostname):
deployed_env = "src-env-deployed-{}".format(env_name)
pending_node = "node-pending-{}".format(hostname)
delete_services = "services-delete-{}".format(hostname)
delete_services_events = "services-delete-events-{}".format(hostname)
flow.add(
DeleteServicesFromNode(context.src_cloud,
name=delete_services,
provides=delete_services,
rebind=[pending_node],
inject={"hostname": hostname},
requires=[deployed_env]),
HostsDeleteEvents(context.src_cloud,
name=delete_services_events,
rebind=[delete_services]),
)
@assignment.add("discovery")
def assignment_discovery(context, flow, env_name, hostname):
env = "dst-env-{}".format(env_name)
env_nodes = "dst-env-nodes-{}".format(env_name)
compute_node = "node-compute-{}".format(env_name)
compute_roles = "compute-roles-{}".format(env_name)
compute_disks = "compute-disks-{}".format(env_name)
compute_ifaces = "compute-ifaces-{}".format(env_name)
flow.add(
RetrieveEnvNodes(name=env_nodes,
provides=env_nodes,
rebind=[env]),
ChooseAnyComputeNode(name=compute_node,
provides=compute_node,
rebind=[env_nodes, env]),
ExtractRolesFromNode(name=compute_roles,
provides=compute_roles,
rebind=[compute_node]),
ExtractDisksFromNode(name=compute_disks,
provides=compute_disks,
rebind=[compute_node]),
ExtractIfacesFromNode(name=compute_ifaces,
provides=compute_ifaces,
rebind=[compute_node]),
)
@assignment.add("fixed")
def assignment_fixed(context, flow, env_name, hostname):
env = "dst-env-{}".format(env_name)
env_network = "dst-env-network-{}".format(env_name)
compute_roles = "compute-roles-{}".format(env_name)
compute_disks = "compute-disks-{}".format(env_name)
compute_ifaces = "compute-ifaces-{}".format(env_name)
params = context.config["assignment_parameters"]
context.store.update({
compute_roles: params["roles"],
compute_disks: params["disks"],
})
flow.add(
ExtractNetworkDataFromEnv(name=env_network,
provides=env_network,
rebind=[env]),
PopulateIfacesWithIDs(name=compute_ifaces,
provides=compute_ifaces,
rebind=[env_network],
inject={"ifaces": params["ifaces"]}),
)
def assign_node(context, flow, env_name, hostname):
env = "dst-env-{}".format(env_name)
deployed_env = "dst-env-deployed-{}".format(env_name)
env_nodes = "dst-env-nodes-{}".format(env_name)
unassigned_node = "node-unassigned-{}".format(hostname)
assigned_node = "node-assigned-{}".format(hostname)
compute_roles = "compute-roles-{}".format(env_name)
compute_disks = "compute-disks-{}".format(env_name)
compute_ifaces = "compute-ifaces-{}".format(env_name)
node_with_disks = "node-with-disks-{}".format(hostname)
node_with_nets = "node-with-nets-{}".format(hostname)
flow.add(
AssignNode(context.dst_cloud,
name=assigned_node,
provides=assigned_node,
rebind=[unassigned_node, compute_roles, env]),
ApplyDisksAttributesFromNode(name=node_with_disks,
provides=node_with_disks,
rebind=[compute_disks, assigned_node]),
ApplyNetAttributesFromNode(name=node_with_nets,
provides=node_with_nets,
rebind=[compute_ifaces, assigned_node]),
DeployChanges(context.dst_cloud,
name=deployed_env,
provides=deployed_env,
rebind=[env],
requires=[node_with_disks, node_with_nets]),
)
def wait_computes(context, flow, env_name, hostname):
deployed_env = "dst-env-deployed-{}".format(env_name)
assigned_node = "node-assigned-{}".format(hostname)
updated_assigned_node = "node-assigned-updated-{}".format(hostname)
assigned_node_hostname = "node-assigned-hosetname-{}".format(hostname)
wait_computes = "wait-computes-{}".format(hostname)
host_success_events = "node-success-events-{}".format(hostname)
flow.add(
UpdateNodeInfo(name=updated_assigned_node,
provides=updated_assigned_node,
rebind=[assigned_node],
requires=[deployed_env]),
GetNodeHostname(name=assigned_node_hostname,
provides=assigned_node_hostname,
rebind=[updated_assigned_node]),
service_tasks.WaitComputesServices(context.dst_cloud,
name=wait_computes,
provides=wait_computes,
rebind=[assigned_node_hostname],
requires=[deployed_env]),
HostsSuccessEvents(context.dst_cloud,
name=host_success_events,
rebind=[wait_computes]),
)
def reassign_node(context, hostname):
src_env_name = context.config["source"]
dst_env_name = context.config["destination"]
envs = "all-environments"
src_env = "src-env-{}".format(src_env_name)
dst_env = "dst-env-{}".format(dst_env_name)
src_env_nodes = "src-env-nodes-{}".format(src_env_name)
flow = graph_flow.Flow(name="reassign-node-{}".format(hostname))
flow.add(
RetrieveAllEnvironments(name=envs,
provides=envs),
# Source
RetrieveEnvironment(name=src_env,
provides=src_env,
rebind=[envs],
inject={"env_name": src_env_name}),
# Destination
RetrieveEnvironment(name=dst_env,
provides=dst_env,
rebind=[envs],
inject={"env_name": dst_env_name}),
)
unassign_node(context, flow, src_env_name, hostname)
remove_computes(context, flow, src_env_name, hostname)
assignment(context, flow, dst_env_name, hostname)
assign_node(context, flow, dst_env_name, hostname)
wait_computes(context, flow, dst_env_name, hostname)
return flow
| Mirantis/pumphouse | pumphouse/tasks/node.py | Python | apache-2.0 | 19,990 |
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf_agents.networks.categorical_q_network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.networks import categorical_q_network
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import time_step as ts
from tf_agents.utils import test_utils
class CategoricalQNetworkTest(test_utils.TestCase):
def tearDown(self):
gin.clear_config()
super(CategoricalQNetworkTest, self).tearDown()
def testBuild(self):
batch_size = 3
num_state_dims = 5
action_spec = tensor_spec.BoundedTensorSpec([1], tf.int32, 0, 1)
num_actions = action_spec.maximum - action_spec.minimum + 1
self.assertEqual(num_actions, 2)
observations_spec = tensor_spec.TensorSpec([num_state_dims], tf.float32)
observations = tf.random.uniform([batch_size, num_state_dims])
time_steps = ts.restart(observations, batch_size)
q_network = categorical_q_network.CategoricalQNetwork(
input_tensor_spec=observations_spec,
action_spec=action_spec,
fc_layer_params=[3])
logits, _ = q_network(time_steps.observation)
self.assertAllEqual(logits.shape.as_list(),
[batch_size, num_actions, q_network._num_atoms])
# There are two trainable layers here: the specified fc_layer and the final
# logits layer. Each layer has two trainable_variables (kernel and bias),
# for a total of 4.
self.assertLen(q_network.trainable_variables, 4)
def testChangeHiddenLayers(self):
batch_size = 3
num_state_dims = 5
action_spec = tensor_spec.BoundedTensorSpec([1], tf.int32, 0, 1)
num_actions = action_spec.maximum - action_spec.minimum + 1
self.assertEqual(num_actions, 2)
observations_spec = tensor_spec.TensorSpec([num_state_dims], tf.float32)
observations = tf.random.uniform([batch_size, num_state_dims])
time_steps = ts.restart(observations, batch_size)
q_network = categorical_q_network.CategoricalQNetwork(
input_tensor_spec=observations_spec,
action_spec=action_spec,
fc_layer_params=[3, 3])
logits, _ = q_network(time_steps.observation)
self.assertAllEqual(logits.shape.as_list(),
[batch_size, num_actions, q_network._num_atoms])
# This time there is an extra fc layer, for a total of 6
# trainable_variables.
self.assertLen(q_network.trainable_variables, 6)
def testAddConvLayers(self):
batch_size = 3
num_state_dims = 5
action_spec = tensor_spec.BoundedTensorSpec([1], tf.int32, 0, 1)
num_actions = action_spec.maximum - action_spec.minimum + 1
self.assertEqual(num_actions, 2)
observations_spec = tensor_spec.TensorSpec(
[3, 3, num_state_dims], tf.float32)
observations = tf.random.uniform([batch_size, 3, 3, num_state_dims])
time_steps = ts.restart(observations, batch_size)
q_network = categorical_q_network.CategoricalQNetwork(
input_tensor_spec=observations_spec,
action_spec=action_spec,
conv_layer_params=[(16, 2, 1), (15, 2, 1)])
logits, _ = q_network(time_steps.observation)
self.assertAllEqual(logits.shape.as_list(),
[batch_size, num_actions, q_network._num_atoms])
# This time there are two conv layers and one final logits layer, for a
# total of 6 trainable_variables.
self.assertLen(q_network.trainable_variables, 6)
def testCorrectOutputShape(self):
batch_size = 3
num_state_dims = 5
action_spec = tensor_spec.BoundedTensorSpec([1], tf.int32, 0, 1)
num_actions = action_spec.maximum - action_spec.minimum + 1
self.assertEqual(num_actions, 2)
observations_spec = tensor_spec.TensorSpec([num_state_dims], tf.float32)
observations = tf.random.uniform([batch_size, num_state_dims])
time_steps = ts.restart(observations, batch_size)
q_network = categorical_q_network.CategoricalQNetwork(
input_tensor_spec=observations_spec,
action_spec=action_spec,
fc_layer_params=[3])
logits, _ = q_network(time_steps.observation)
self.assertAllEqual(logits.shape.as_list(),
[batch_size, num_actions, q_network._num_atoms])
self.evaluate(tf.compat.v1.global_variables_initializer())
eval_logits = self.evaluate(logits)
self.assertAllEqual(
eval_logits.shape, [batch_size, num_actions, q_network._num_atoms])
def testGinConfig(self):
batch_size = 3
num_state_dims = 5
action_spec = tensor_spec.BoundedTensorSpec([1], tf.int32, 0, 1)
num_actions = action_spec.maximum - action_spec.minimum + 1
self.assertEqual(num_actions, 2)
observations_spec = tensor_spec.TensorSpec(
[3, 3, num_state_dims], tf.float32)
observations = tf.random.uniform([batch_size, 3, 3, num_state_dims])
next_observations = tf.random.uniform([batch_size, 3, 3, num_state_dims])
time_steps = ts.restart(observations, batch_size)
next_time_steps = ts.restart(next_observations, batch_size)
# Note: this is cleared in tearDown().
gin.parse_config("""
CategoricalQNetwork.conv_layer_params = [(16, 2, 1), (15, 2, 1)]
CategoricalQNetwork.fc_layer_params = [4, 3, 5]
""")
q_network = categorical_q_network.CategoricalQNetwork(
input_tensor_spec=observations_spec,
action_spec=action_spec)
logits, _ = q_network(time_steps.observation)
next_logits, _ = q_network(next_time_steps.observation)
self.assertAllEqual(logits.shape.as_list(),
[batch_size, num_actions, q_network.num_atoms])
self.assertAllEqual(next_logits.shape.as_list(),
[batch_size, num_actions, q_network.num_atoms])
# This time there are six layers: two conv layers, three fc layers, and one
# final logits layer, for 12 trainable_variables in total.
self.assertLen(q_network.trainable_variables, 12)
if __name__ == '__main__':
tf.test.main()
| tensorflow/agents | tf_agents/networks/categorical_q_network_test.py | Python | apache-2.0 | 6,673 |
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import socket
from oslo_concurrency import processutils
from oslo_config import cfg
import oslo_i18n
import pbr.version
# NOTE(dstanek): i18n.enable_lazy() must be called before
# keystone.i18n._() is called to ensure it has the desired lazy lookup
# behavior. This includes cases, like keystone.exceptions, where
# keystone.i18n._() is called at import time.
oslo_i18n.enable_lazy()
from keystone.common import environment
from keystone.common import utils
from keystone import config
from keystone.i18n import _
from keystone.openstack.common import service
from keystone.openstack.common import systemd
from keystone.server import common
from keystone import service as keystone_service
CONF = cfg.CONF
class ServerWrapper(object):
"""Wraps a Server with some launching info & capabilities."""
def __init__(self, server, workers):
self.server = server
self.workers = workers
def launch_with(self, launcher):
self.server.listen()
if self.workers > 1:
# Use multi-process launcher
launcher.launch_service(self.server, self.workers)
else:
# Use single process launcher
launcher.launch_service(self.server)
def create_server(conf, name, host, port, workers):
app = keystone_service.loadapp('config:%s' % conf, name)
server = environment.Server(app, host=host, port=port,
keepalive=CONF.eventlet_server.tcp_keepalive,
keepidle=CONF.eventlet_server.tcp_keepidle)
if CONF.eventlet_server_ssl.enable:
server.set_ssl(CONF.eventlet_server_ssl.certfile,
CONF.eventlet_server_ssl.keyfile,
CONF.eventlet_server_ssl.ca_certs,
CONF.eventlet_server_ssl.cert_required)
return name, ServerWrapper(server, workers)
def serve(*servers):
logging.warning(_('Running keystone via eventlet is deprecated as of Kilo '
'in favor of running in a WSGI server (e.g. mod_wsgi). '
'Support for keystone under eventlet will be removed in '
'the "M"-Release.'))
if max([server[1].workers for server in servers]) > 1:
launcher = service.ProcessLauncher()
else:
launcher = service.ServiceLauncher()
for name, server in servers:
try:
server.launch_with(launcher)
except socket.error:
logging.exception(_('Failed to start the %(name)s server') % {
'name': name})
raise
# notify calling process we are ready to serve
systemd.notify_once()
for name, server in servers:
launcher.wait()
def _get_workers(worker_type_config_opt):
# Get the value from config, if the config value is None (not set), return
# the number of cpus with a minimum of 2.
worker_count = CONF.eventlet_server.get(worker_type_config_opt)
if not worker_count:
worker_count = max(2, processutils.get_worker_count())
return worker_count
def configure_threading():
monkeypatch_thread = not CONF.standard_threads
pydev_debug_url = utils.setup_remote_pydev_debug()
if pydev_debug_url:
# in order to work around errors caused by monkey patching we have to
# set the thread to False. An explanation is here:
# http://lists.openstack.org/pipermail/openstack-dev/2012-August/
# 000794.html
monkeypatch_thread = False
environment.use_eventlet(monkeypatch_thread)
def run(possible_topdir):
dev_conf = os.path.join(possible_topdir,
'etc',
'keystone.conf')
config_files = None
if os.path.exists(dev_conf):
config_files = [dev_conf]
common.configure(
version=pbr.version.VersionInfo('keystone').version_string(),
config_files=config_files,
pre_setup_logging_fn=configure_threading)
paste_config = config.find_paste_config()
def create_servers():
public_worker_count = _get_workers('public_workers')
servers = []
servers.append(create_server(paste_config,
'main',
CONF.eventlet_server.public_bind_host,
CONF.eventlet_server.public_port,
public_worker_count))
return servers
_unused, servers = common.setup_backends(
startup_application_fn=create_servers)
serve(*servers)
| darren-wang/ks3 | keystone/server/eventlet.py | Python | apache-2.0 | 5,193 |
# Copyright (c) 2015 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib._i18n import _
from neutron_lib import exceptions
class VlanTransparencyDriverError(exceptions.NeutronException):
"""Vlan Transparency not supported by all mechanism drivers."""
message = _("Backend does not support VLAN Transparency.")
| openstack/neutron-lib | neutron_lib/exceptions/vlantransparent.py | Python | apache-2.0 | 902 |
#!/usr/bin/env python
#encoding=utf8
#Copyright [2014] [Wei Zhang]
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
###################################################################
# Date: 2014/6/15 #
# Providing various functions for data analysis. These results #
# support our final model choice. #
###################################################################
import sys, csv, json, argparse, datetime
from collections import defaultdict
with open("../SETTINGS.json") as fp:
settings = json.loads(fp.read())
def cnt_num_attendant(eventinfo_path, staresult_path):
''' count the distribution of number of attendants '''
num_attendant = defaultdict(int)
total_num = 0
for i, line in enumerate(open(eventinfo_path)):
try:
num = int(line.strip("\r\t\n").split(",")[9])
num_attendant[num] += 1
total_num += 1
except:
print line
print i
sys.exit(1)
cum_prob = 0.0
num_attendant = sorted(num_attendant.items(), key=lambda x:x[0])
wfd = open(staresult_path, "w")
for pair in num_attendant:
cum_prob += 1.0*pair[1]/total_num
wfd.write("%d %d %.4f\n" % (pair[0], pair[1], cum_prob))
wfd.close()
def cnt_attendant_for_category(eventinfo_path, staresult_path):
''' count number of categories and the distribution of number of
attendants for each category
'''
category_numevents = defaultdict(int)
category_numattendants = defaultdict(int)
for i, line in enumerate(open(eventinfo_path)):
category = line.strip("\r\t\n").split(",")[6]
num_participants = int(line.strip("\r\t\n").split(",")[9])
category_numevents[category] += 1
category_numattendants[category] += num_participants
print "Category statistics information--------\n"
print "\tNumber of categories: %d" % len(category_numevents)
wfd = open(staresult_path, "w")
for category in category_numevents:
wfd.write("%s %d %f\n" % (category, 1.0*category_numattendants[category]/category_numevents[category]))
print 'Average number of attendants for each category can be seen in (staresult.txt)'
def cnt_attendant_for_location(eventinfo_path, staresult_path):
''' count number of locations and the average of number of
attendants for each location
'''
location_numevents = defaultdict(int)
location_numattendants = defaultdict(int)
for i, line in enumerate(open(eventinfo_path)):
location = line.strip("\r\t\n").split(",")[2]
num_participants = int(line.strip("\r\t\n").split(",")[9])
location_numevents[location] += 1
location_numattendants[location] += num_participants
print "Location statistics information--------\n"
print "\tNumber of categories: %d" % len(location_numevents)
wfd = open(staresult_path, "w")
for location in location_numevents:
wfd.write("%s %d %f\n" % (location, location_numevents[location],
1.0*location_numattendants[location]/location_numevents[location]))
print 'Average number of attendants for each location can be seen in (staresult.txt)'
def cnt_attendant_for_organizer(eventinfo_path, staresult_path):
''' count number of organizors and the average of number of
attendants for each organizor
'''
organizor_numevents = defaultdict(int)
organizor_numattendants = defaultdict(int)
for i, line in enumerate(open(eventinfo_path)):
organizor = line.strip("\r\t\n").split(",")[5]
num_participants = int(line.strip("\r\t\n").split(",")[9])
organizor_numevents[organizor] += 1
organizor_numattendants[organizor] += num_participants
print "Organizor statistics information--------\n"
print "\tNumber of categories: %d" % len(organizor_numevents)
wfd = open(staresult_path, "w")
for organizor in organizor_numevents:
wfd.write("%s %d %f\n" % (organizor, organizor_numevents[organizor],
1.0*organizor_numattendants[organizor]/organizor_numevents[organizor]))
print 'Average number of attendants for each organizor can be seen in (staresult.txt)'
def cnt_attendant_for_time(eventinfo_path, staresult_path):
''' count the number of attendants for each time period:
(morning, afternoon, evening, other) * (weekday, weekend) + multiple days
+ multiple weeks + multiple month.
More specifically, (morning, weekday):0, (afternoon, weekday):1, ...
'''
timeperiod_numevents = defaultdict(int)
timeperiod_numattendants = defaultdict(int)
for i, line in enumerate(open(eventinfo_path)):
start_time = line.strip("\r\t\n").split(",")[7]
end_time = line.strip("\r\t\n").split(",")[8]
num_participants = int(line.strip("\r\t\n").split(",")[9])
timeidx = getIdOfTimePeriod(start_time, end_time)
timeperiod_numevents[timeidx] += 1
timeperiod_numattendants[timeidx] += num_participants
print "Time statistics information--------\n"
print "\tNumber of categories: %d" % len(timeperiod_numevents)
wfd = open(staresult_path, "w")
for timeperiod in timeperiod_numevents:
wfd.write("%s %d %f\n" % (timeperiod, timeperiod_numevents[timeperiod],
1.0*timeperiod_numattendants[timeperiod]/timeperiod_numevents[timeperiod]))
print 'Average number of attendants for each timeperiod can be seen in (staresult.txt)'
dt = datetime.datetime.now()
def getIdOfTimePeriod(start_time, end_time):
time1 = dt.strptime(start_time, '%Y-%m-%dT%H:%M:%S+08:00')
time2 = dt.strptime(end_time, '%Y-%m-%dT%H:%M:%S+08:00')
year1 = time1.year
year2 = time2.year
#day1 = time1.day
#day2 = time2.day
day1 = int(time1.strftime('%j'))
day2 = int(time2.strftime('%j'))
if year1 == year2:
if day2-day1 > 30:
return 10
elif day2-day1 > 7:
return 9
elif day2-day1 > 0:
return 8
elif day2 == day1:
idx1= 0
idx2= 0
hour1 = time1.hour
#hour2 = time2.hour
weekday1 = time1.isoweekday()
if weekday1 == 6 or weekday1 == 7:
idx1 = 1
if 8 <= hour1 and hour1 < 12:
idx2 = 0
elif 12 <= hour1 and hour1 < 18:
idx2 = 1
elif 18 <= hour1 and hour1 < 24:
idx2 = 2
else:
idx2 = 3
return idx1*4+idx2
elif year1+1 == year2:
if day2+366-day1 > 30:
return 10
elif day2+366-day1 > 7:
return 9
elif day2+366-day1 > 0:
return 8
else:
print 'Error in getting id of time period'
sys.exit(1)
else:
return 10
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-d', type=str, action='store',
dest='data_num', help='choose which data set to use')
parser.add_argument('-f', type=int, action='store',
dest='function_num', help='choose which data analysis function to use')
if len(sys.argv) != 5:
print 'Command e.g.: python segmentData.py -d 1(11,12,...) -f 1(2,3,...)'
sys.exit(1)
para = parser.parse_args()
if para.data_num == "1":
eventinfo_path = settings["ROOT_PATH"] + settings["SRC_DATA_FILE1_1"]
staresult_path = "./staresult.txt"
elif para.data_num == "11":
eventinfo_path = settings["ROOT_PATH"] + settings["SRC_DATA_FILE1_CITY1"]
staresult_path = "./staresult.txt"
elif para.data_num == "12":
eventinfo_path = settings["ROOT_PATH"] + settings["SRC_DATA_FILE1_CITY2"]
staresult_path = "./staresult.txt"
else:
print 'Invalid choice of dataset'
sys.exit(1)
if para.function_num == 1:
cnt_num_attendant(eventinfo_path, staresult_path)
elif para.function_num == 2:
cnt_attendant_for_category(eventinfo_path, staresult_path)
elif para.function_num == 3:
cnt_attendant_for_location(eventinfo_path, staresult_path)
elif para.function_num == 4:
cnt_attendant_for_organizer(eventinfo_path, staresult_path)
elif para.function_num == 5:
cnt_attendant_for_time(eventinfo_path, staresult_path)
if __name__ == "__main__":
main()
| anthonylife/EventPopularity | script/dataAnalysis.py | Python | apache-2.0 | 8,906 |
# -*- coding: utf-8 -*-
#
# Copyright 2015 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = "Simone Campagna"
__all__ = [
'TestInvoiceProgram',
]
import os
import datetime
import unittest
from invoice.database.db_types import Str, StrList, StrTuple, \
Int, IntList, IntTuple, \
Float, FloatList, FloatTuple, \
Date, DateList, DateTuple, \
DateTime, DateTimeList, DateTimeTuple, \
Path, PathList, PathTuple, \
Bool, BoolList, BoolTuple, \
OptionType, BaseSequence
class TestStr(unittest.TestCase):
def test_db_from(self):
self.assertIs(Str.db_from(None), None)
self.assertEqual(Str.db_from("alpha"), "alpha")
def test_db_to(self):
self.assertIs(Str.db_to(None), None)
self.assertEqual(Str.db_to("alpha"), "alpha")
class TestStrList(unittest.TestCase):
def test_db_from(self):
self.assertIs(StrList.db_from(None), None)
self.assertEqual(StrList.db_from("alpha, beta, 10.3, gamma "), ["alpha", "beta", "10.3", "gamma"])
def test_db_to(self):
self.assertIs(StrList.db_to(None), None)
self.assertEqual(StrList.db_to(["alpha", "beta", "10.3", "gamma"]), "alpha,beta,10.3,gamma")
class TestStrTuple(unittest.TestCase):
def test_db_from(self):
self.assertIs(StrTuple.db_from(None), None)
self.assertEqual(StrTuple.db_from("alpha, beta, 10.3, gamma "), ("alpha", "beta", "10.3", "gamma"))
def test_db_to(self):
self.assertIs(StrTuple.db_to(None), None)
self.assertEqual(StrTuple.db_to(("alpha", "beta", "10.3", "gamma")), "alpha,beta,10.3,gamma")
class TestInt(unittest.TestCase):
def test_db_from(self):
self.assertIs(Int.db_from(None), None)
self.assertEqual(Int.db_from("10"), 10)
def test_db_to(self):
self.assertIs(Int.db_to(None), None)
self.assertEqual(Int.db_to(10), "10")
class TestIntList(unittest.TestCase):
def test_db_from(self):
self.assertIs(IntList.db_from(None), None)
self.assertEqual(IntList.db_from("10, 20"), [10, 20])
def test_db_to(self):
self.assertIs(IntList.db_to(None), None)
self.assertEqual(IntList.db_to([10, 20]), "10,20")
class TestIntTuple(unittest.TestCase):
def test_db_from(self):
self.assertIs(IntTuple.db_from(None), None)
self.assertEqual(IntTuple.db_from("10, 20"), (10, 20))
def test_db_to(self):
self.assertIs(IntTuple.db_to(None), None)
self.assertEqual(IntTuple.db_to((10, 20)), "10,20")
class TestFloat(unittest.TestCase):
def test_db_from(self):
self.assertIs(Float.db_from(None), None)
self.assertEqual(Float.db_from("10.5"), 10.5)
def test_db_to(self):
self.assertIs(Float.db_to(None), None)
self.assertEqual(Float.db_to(10.5), "10.5")
class TestFloatList(unittest.TestCase):
def test_db_from(self):
self.assertIs(FloatList.db_from(None), None)
self.assertEqual(FloatList.db_from("10.5,23.32"), [10.5, 23.32])
def test_db_to(self):
self.assertIs(FloatList.db_to(None), None)
self.assertEqual(FloatList.db_to([10.5, 23.32]), "10.5,23.32")
class TestFloatTuple(unittest.TestCase):
def test_db_from(self):
self.assertIs(FloatTuple.db_from(None), None)
self.assertEqual(FloatTuple.db_from("10.5,23.32"), (10.5, 23.32))
def test_db_to(self):
self.assertIs(FloatTuple.db_to(None), None)
self.assertEqual(FloatTuple.db_to((10.5, 23.32)), "10.5,23.32")
class TestDate(unittest.TestCase):
def test_db_from(self):
self.assertIs(Date.db_from(None), None)
self.assertEqual(Date.db_from("2015-01-04"), datetime.date(2015, 1, 4))
def test_db_to(self):
self.assertIs(Date.db_to(None), None)
self.assertEqual(Date.db_to(datetime.date(2015, 1, 4)), "2015-01-04")
class TestDateList(unittest.TestCase):
def test_db_from(self):
self.assertIs(DateList.db_from(None), None)
self.assertEqual(DateList.db_from(" 2015-01-04 , 2014-04-05 "), [datetime.date(2015, 1, 4), datetime.date(2014, 4, 5)])
def test_db_to(self):
self.assertIs(DateList.db_to(None), None)
self.assertEqual(DateList.db_to([datetime.date(2015, 1, 4), datetime.date(2014, 4, 5)]), "2015-01-04,2014-04-05")
class TestDateTuple(unittest.TestCase):
def test_db_from(self):
self.assertIs(DateTuple.db_from(None), None)
self.assertEqual(DateTuple.db_from(" 2015-01-04 , 2014-04-05 "), (datetime.date(2015, 1, 4), datetime.date(2014, 4, 5)))
def test_db_to(self):
self.assertIs(DateTuple.db_to(None), None)
self.assertEqual(DateTuple.db_to((datetime.date(2015, 1, 4), datetime.date(2014, 4, 5))), "2015-01-04,2014-04-05")
class TestDateTime(unittest.TestCase):
def test_db_from(self):
self.assertIs(DateTime.db_from(None), None)
self.assertEqual(DateTime.db_from("2015-01-04 13:34:45"), datetime.datetime(2015, 1, 4, 13, 34, 45))
def test_db_to(self):
self.assertIs(DateTime.db_to(None), None)
self.assertEqual(DateTime.db_to(datetime.datetime(2015, 1, 4, 13, 34, 45)), "2015-01-04 13:34:45")
class TestDateTimeList(unittest.TestCase):
def test_db_from(self):
self.assertIs(DateTimeList.db_from(None), None)
self.assertEqual(DateTimeList.db_from("2015-01-04 13:34:45,2014-04-05 02:22:01"), [datetime.datetime(2015, 1, 4, 13, 34, 45), datetime.datetime(2014, 4, 5, 2, 22, 1)])
def test_db_to(self):
self.assertIs(DateTimeList.db_to(None), None)
self.assertEqual(DateTimeList.db_to([datetime.datetime(2015, 1, 4, 13, 34, 45), datetime.datetime(2014, 4, 5, 2, 22, 1)]), "2015-01-04 13:34:45,2014-04-05 02:22:01")
class TestDateTimeTuple(unittest.TestCase):
def test_db_from(self):
self.assertIs(DateTimeTuple.db_from(None), None)
self.assertEqual(DateTimeTuple.db_from("2015-01-04 13:34:45,2014-04-05 02:22:01"), (datetime.datetime(2015, 1, 4, 13, 34, 45), datetime.datetime(2014, 4, 5, 2, 22, 1)))
def test_db_to(self):
self.assertIs(DateTimeTuple.db_to(None), None)
self.assertEqual(DateTimeTuple.db_to((datetime.datetime(2015, 1, 4, 13, 34, 45), datetime.datetime(2014, 4, 5, 2, 22, 1))), "2015-01-04 13:34:45,2014-04-05 02:22:01")
class TestPath(unittest.TestCase):
def test_db_from(self):
self.assertIs(Path.db_from(None), None)
f = lambda x: os.path.normpath(os.path.abspath(x))
self.assertEqual(Path.db_from("{}".format(f("alpha"))), f("alpha"))
def test_db_to(self):
self.assertIs(Path.db_to(None), None)
f = lambda x: os.path.normpath(os.path.abspath(x))
self.assertEqual(Path.db_to("alpha"), f("alpha"))
class TestPathList(unittest.TestCase):
def test_db_from(self):
self.assertIs(PathList.db_from(None), None)
f = lambda x: os.path.normpath(os.path.abspath(x))
self.assertEqual(PathList.db_from("{},/b/c,{}".format(f("alpha"), f("d/e"))), [f("alpha"), "/b/c", f("d/e")])
def test_db_to(self):
self.assertIs(PathList.db_to(None), None)
f = lambda x: os.path.normpath(os.path.abspath(x))
self.assertEqual(PathList.db_to(["alpha", "/b/c", "d/e"]), "{},/b/c,{}".format(f("alpha"), f("d/e")))
class TestPathTuple(unittest.TestCase):
def test_db_from(self):
self.assertIs(PathTuple.db_from(None), None)
f = lambda x: os.path.normpath(os.path.abspath(x))
self.assertEqual(PathTuple.db_from("{},/b/c,{}".format(f("alpha"), f("d/e"))), (f("alpha"), "/b/c", f("d/e")))
def test_db_to(self):
self.assertIs(PathTuple.db_to(None), None)
f = lambda x: os.path.normpath(os.path.abspath(x))
self.assertEqual(PathTuple.db_to(("alpha", "/b/c", "d/e")), "{},/b/c,{}".format(f("alpha"), f("d/e")))
class TestBool(unittest.TestCase):
def test_db_from(self):
self.assertIs(Bool.db_from(None), None)
self.assertEqual(Bool.db_from(True), True)
self.assertEqual(Bool.db_from(1), True)
self.assertEqual(Bool.db_from(False), False)
self.assertEqual(Bool.db_from(0), False)
def test_db_to(self):
self.assertIs(Bool.db_to(None), None)
self.assertEqual(Bool.db_to("True"), True)
self.assertEqual(Bool.db_to(1), True)
self.assertEqual(Bool.db_to("False"), False)
self.assertEqual(Bool.db_to(0), False)
with self.assertRaises(ValueError):
Bool.db_to("alpha")
class TestBoolList(unittest.TestCase):
def test_db_from(self):
self.assertIs(BoolList.db_from(None), None)
self.assertEqual(BoolList.db_from("True,True,False,False"), [True, True, False, False])
def test_db_to(self):
self.assertIs(BoolList.db_to(None), None)
self.assertEqual(BoolList.db_to([True, True, False, False]), "True,True,False,False")
with self.assertRaises(ValueError):
BoolList.db_to("True,alpha")
class TestBoolTuple(unittest.TestCase):
def test_db_from(self):
self.assertIs(BoolTuple.db_from(None), None)
self.assertEqual(BoolTuple.db_from("True,True,False,False"), (True, True, False, False))
def test_db_to(self):
self.assertIs(BoolTuple.db_to(None), None)
self.assertEqual(BoolTuple.db_to((True, True, False, False)), "True,True,False,False")
with self.assertRaises(ValueError):
BoolTuple.db_to("True,alpha")
class MyOption(OptionType):
OPTIONS = ("alpha", "beta", "gamma")
class MyOptionTuple(BaseSequence):
SCALAR_TYPE = MyOption
SEQUENCE_TYPE = tuple
class TestMyOption(unittest.TestCase):
def test_db_from(self):
self.assertIs(MyOption.db_from(None), None)
self.assertEqual(MyOption.db_from("alpha"), "alpha")
with self.assertRaises(ValueError) as cm:
self.assertEqual(MyOption.db_from("x"), "x")
def test_db_to(self):
self.assertIs(MyOption.db_to(None), None)
self.assertEqual(MyOption.db_to("alpha"), "alpha")
with self.assertRaises(ValueError) as cm:
self.assertEqual(MyOption.db_to("x"), "x")
class TestMyOptionTuple(unittest.TestCase):
def test_db_from(self):
self.assertIs(MyOptionTuple.db_from(None), None)
self.assertEqual(MyOptionTuple.db_from("alpha, beta, gamma "), ("alpha", "beta", "gamma"))
with self.assertRaises(ValueError) as cm:
self.assertEqual(MyOptionTuple.db_from("alpha, x, gamma "), ("alpha", "x", "gamma"))
def test_db_to(self):
self.assertIs(MyOptionTuple.db_to(None), None)
self.assertEqual(MyOptionTuple.db_to(("alpha", "beta", "gamma")), "alpha,beta,gamma")
with self.assertRaises(ValueError) as cm:
self.assertEqual(MyOptionTuple.db_to(("alpha", "x", "gamma")), "alpha,x,gamma")
| simone-campagna/invoice | tests/unittests/test_db_types.py | Python | apache-2.0 | 11,568 |
import datetime
import json
import logging
import requests
from core.analytics import OneShotAnalytics
from core.errors import ObservableValidationError
from core.observables import Hostname, Email, Ip, Hash
class ThreatCrowdAPI(object):
"""Base class for querying the ThreatCrowd API."""
@staticmethod
def fetch(observable):
base_url_api = "https://www.threatcrowd.org/searchApi/v2"
if isinstance(observable, Hostname):
url = base_url_api + "/domain/report/"
params = {"domain": observable.value}
try:
res = requests.get(url, params)
if res.ok:
return res.json()
except Exception as e:
print("Exception while getting domain report {}".format(e.message))
return None
elif isinstance(observable, Email):
url = base_url_api + "/email/report/"
params = {"email": observable.value}
try:
res = requests.get(url, params)
if res.ok:
return res.json()
except Exception as e:
print("Exception while getting email report {}".format(e.message))
return None
elif isinstance(observable, Ip):
url = base_url_api + "/ip/report/"
print(url)
params = {"ip": observable.value}
print(params)
try:
res = requests.get(url, params)
if res.ok:
return res.json()
except Exception as e:
print("Exception while getting email report {}".format(e.message))
return None
elif isinstance(observable, Hash):
url = base_url_api + "/file/report/"
params = {"resource": observable.value}
try:
res = requests.get(url, params)
if res.ok:
return res.json()
except Exception as e:
print("Exception while getting email report {}".format(e.message))
return None
class ThreatCrowdQuery(ThreatCrowdAPI, OneShotAnalytics):
default_values = {
"name": "ThreatCrowd",
"description": "Perform a ThreatCrowd query.",
}
ACTS_ON = ["Ip", "Hostname", "Hash", "Email"]
@staticmethod
def analyze(observable, results):
links = set()
json_result = ThreatCrowdAPI.fetch(observable)
json_string = json.dumps(
json_result, sort_keys=True, indent=4, separators=(",", ": ")
)
results.update(raw=json_string)
result = {}
if isinstance(observable, Hostname):
if "resolutions" in json_result:
result["ip on this domains"] = 0
for ip in json_result["resolutions"]:
if ip["ip_address"].strip() != observable.value:
if ip["last_resolved"] != "0000-00-00":
last_resolved = datetime.datetime.strptime(
ip["last_resolved"], "%Y-%m-%d"
)
try:
new_ip = Ip.get_or_create(
value=ip["ip_address"].strip()
)
links.update(
new_ip.active_link_to(
observable, "IP", "ThreatCrowd", last_resolved
)
)
result["ip on this domains"] += 1
except ObservableValidationError:
logging.error(
"An error occurred when trying to add subdomain {} to the database".format(
ip["ip_address"]
)
)
if "emails" in json_result:
result["nb emails"] = 0
for email in json_result["emails"]:
try:
new_email = Email.get_or_create(value=email)
links.update(
new_email.active_link_to(
observable, "Used by", "ThreatCrowd"
)
)
result["nb emails"] += 1
except ObservableValidationError:
logging.error(
"An error occurred when trying to add email {} to the database".format(
email
)
)
if "subdomains" in json_result:
result["nb subdomains"] = 0
for subdomain in json_result["subdomains"]:
try:
new_domain = Hostname.get_or_create(value=subdomain)
links.update(
observable.active_link_to(
new_domain, "subdomain", "ThreatCrowd"
)
)
result["nb subdomains"] += 1
except ObservableValidationError:
logging.error(
"An error occurred when trying to add subdomain {} to the database".format(
subdomain
)
)
if isinstance(observable, Ip):
if "resolutions" in json_result:
result["domains resolved"] = 0
for domain in json_result["resolutions"]:
if domain["domain"].strip() != observable.value:
try:
last_resolved = datetime.datetime.strptime(
domain["last_resolved"], "%Y-%m-%d"
)
new_domain = Hostname.get_or_create(
value=domain["domain"].strip()
)
links.update(
new_domain.active_link_to(
observable, "A Record", "ThreatCrowd", last_resolved
)
)
result["domains resolved"] += 1
except ObservableValidationError:
logging.error(
"An error occurred when trying to add domain {} to the database".format(
domain["domain"]
)
)
if "hashes" in json_result and len(json_result["hashes"]) > 0:
result["malwares"] = 0
for h in json_result["hashes"]:
new_hash = Hash.get_or_create(value=h)
links.update(
new_hash.active_link_to(observable, "hash", "ThreatCrowd")
)
result["malwares"] += 1
if isinstance(observable, Email):
if "domains" in json_result and len(json_result) > 0:
result["domains recorded by email"] = 0
for domain in json_result["domains"]:
new_domain = Hostname.get_or_create(value=domain)
links.update(
new_domain.active_link_to(
observable, "recorded by", "ThreatCrowd"
)
)
result["domains recorded by email"] += 1
if isinstance(observable, Hash):
result["nb c2"] = 0
if "md5" in json_result:
new_hash = Hash.get_or_create(value=json_result["md5"])
links.update(new_hash.active_link_to(observable, "md5", "ThreadCrowd"))
if "sha1" in json_result:
new_hash = Hash.get_or_create(value=json_result["sha1"])
links.update(new_hash.active_link_to(observable, "sha1", "ThreadCrowd"))
if "sha256" in json_result:
new_hash = Hash.get_or_create(value=json_result["sha256"])
links.update(
new_hash.active_link_to(observable, "sha256", "ThreadCrowd")
)
if "domains" in json_result and len(json_result["domains"]):
for domain in json_result["domains"]:
new_domain = Hostname.get_or_create(value=domain)
links.update(
observable.active_link_to(new_domain, "c2", "ThreatCrowd")
)
result["nb c2"] += 1
if "ips" in json_result and len(json_result["ips"]):
for ip in json_result["ips"]:
new_ip = Ip.get_or_create(value=ip.strip())
links.update(observable.active_link_to(new_ip, "c2", "ThreatCrowd"))
result["nb c2"] += 1
if "permalink" in json_result:
result["permalink"] = json_result["permalink"]
result["source"] = "threatcrowd_query"
result["raw"] = json_string
observable.add_context(result)
return list(links)
| yeti-platform/yeti | plugins/analytics/public/threatcrowd.py | Python | apache-2.0 | 9,509 |
#!/usr/bin/python
###
# Copyright (c) 2016 Nishant Das Patnaik.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import os, sys, argparse, time, codecs, binascii, frida, json, traceback, subprocess, tempfile
from flask import Flask, request, render_template
from termcolor import colored
import database as db
import platform as platform_module
print("""
___ .______ .______ .___ ___. ______ .__ __.
/ \ | _ \ | _ \ | \/ | / __ \ | \ | |
/ ^ \ | |_) | | |_) | | \ / | | | | | | \| |
/ /_\ \ | ___/ | ___/ | |\/| | | | | | | . ` |
/ _____ \ | | | | | | | | | `--' | | |\ |
/__/ \__\ | _| | _| |__| |__| \______/ |__| \__|
github.com/dpnishant
""")
app = Flask(__name__, static_url_path='/static')
#app.debug = True
device = ''
session = ''
temp_dir = tempfile.mkdtemp()
merged_script_path = os.path.join(temp_dir,'merged.js')
APP_LIST = []
@app.after_request
def add_header(r):
"""
Add headers to both force latest IE rendering engine or Chrome Frame,
and also to cache the rendered page for 10 minutes.
"""
r.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
r.headers["Pragma"] = "no-cache"
r.headers["Expires"] = "0"
r.headers['Cache-Control'] = 'public, max-age=0'
return r
@app.route('/api/fetch', methods=['GET'])
def serve_json():
index = request.args.get('id')
if request.args.get('reportdb'):
db_name = request.args.get('reportdb')
else:
db_name = request.args.get('app')
response = db.read_from_database(db_name, index)
#response = open('static/data.json').read()
return response
@app.route('/monitor/', methods=['GET'])
def monitor_page():
app_name = request.args.get('app')
return render_template('monitor.html', app_name=app_name)
@app.route('/', methods=['GET'])
def landing_page():
global APP_LIST, DB_MAP
app_dumps_dir = os.path.join('.','app_dumps')
for root, dirs, files in os.walk(app_dumps_dir):
path = root.split(os.sep)
for file in files:
file_path = os.path.join(root, file)
if file_path.endswith('.db'):
APP_LIST.append(file.replace('.db', ''))
return render_template('index.html', apps=APP_LIST)
def init_opts():
parser = argparse.ArgumentParser()
parser.add_argument('-a', action='store', dest='app_name', default='',
help='''Process Name;
Accepts "Twitter" for iOS;
"com.twitter.android" for Android; "Twitter" for macOS''')
parser.add_argument('--spawn', action='store', dest='spawn', default=0,
help='''Optional; Accepts 1=Spawn, 0=Attach; Needs "-p PLATFORM"''')
parser.add_argument('-p', action='store', dest='platform',
help='Platform Type; Accepts "ios", "iossim", "android" or "macos"')
parser.add_argument('-s', action='store', dest='script_path', default='',
help='''Path to agent script file;
Can be relative/absolute path for a file or directory;
Multiple scripts in a directory shall be merged;
Needs "-a APP_NAME"''')
parser.add_argument('-o', action='store', dest='output_dir',
help='''(Optional) Path to store any dumps/logs;
Accepts relative/absolute paths''')
parser.add_argument('-r', action='store', dest='report',
help='Report database name (Default is <appname>.db')
parser.add_argument('-ls', action='store', dest='list_apps', default=0,
help='''Optional; Accepts 1 or 0; Lists running Apps on target device; Needs "-p PLATFORM"''')
parser.add_argument('-v', action='version', version='AppMon Sniffer v0.1, Nishant Das Patnaik, 2016')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
global output_dir, report_name
results = parser.parse_args()
app_name = results.app_name
platform = results.platform
script_path = results.script_path
list_apps = int(results.list_apps)
spawn = int(results.spawn)
output_dir = results.output_dir if results.output_dir else os.path.join('.','app_dumps')
report_name = results.report if results.report else app_name
if script_path is not None and app_name == '' and list_apps == 0:
parser.print_help()
sys.exit(1)
return app_name, platform, script_path, list_apps, output_dir, spawn
def merge_scripts(path):
global merged_script_path
script_source = ''
for root, dirs, files in os.walk(path):
path = root.split('/')
for file in files:
script_path = os.path.join(root, file)
if script_path.endswith('.js'):
source = ''
with codecs.open(script_path, 'r', 'utf-8') as f:
source = f.read()
script_source += '/* ____%s/%s____ */\n\n' % (os.path.basename(root), file) + source + '\n\n'
with codecs.open(merged_script_path, "w", "utf-8") as f:
f.write(script_source)
return merged_script_path
def _exit_():
print((colored('[INFO] Exiting...', 'green')))
try:
os.remove(merged_script_path)
except Exception as e:
pass
sys.exit(0)
def writeBinFile(fname, data):
with codecs.open(fname, "a", "utf-8") as f:
f.write(data + '\r\n\r\n')
def list_processes(session):
print(('PID\tProcesses\n', '===\t========='))
for app in session.enumerate_processes():
print(("%s\t%s" % (app.pid, app.name)))
def on_detached():
print((colored('[WARNING] "%s" has terminated!' % (app_name), 'red')))
def on_message(message, data):
os_string = platform_module.system()
if os_string == "Windows":
current_time = time.strftime('%b %d %Y %I:%M %p', time.localtime())
else:
current_time = time.strftime('%b %d %Y %l:%M %p', time.localtime())
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if message['type'] == 'send':
writePath = os.path.join(output_dir, str(report_name) + '.db')
db.save_to_database(writePath, message['payload'])
#writePath = os.path.join(output_dir, app_name + '.json')
#writeBinFile(writePath, message['payload']) #writeBinFile(writePath, binascii.unhexlify(message['payload']))
print((colored('[%s] Dumped to %s' % (current_time, writePath), 'green')))
elif message['type'] == 'error':
print((message['stack']))
def generate_injection():
injection_source = ''
if os.path.isfile(script_path):
with codecs.open(script_path, 'r', 'utf-8') as f:
injection_source = f.read()
elif os.path.isdir(script_path):
with codecs.open(merge_scripts(script_path), 'r', 'utf-8') as f:
injection_source = f.read()
print((colored('[INFO] Building injection...', 'yellow')))
return injection_source
def getDisplayName(session, app_name, platform):
try:
str_script = ""
if platform == "ios":
str_script = """/* ____CFBundleDisplayName Getter for iOS Gadget____ */
'use strict';
rpc.exports = {
gadgetdisplayname: function () {
if (ObjC.available) {
var dict = ObjC.classes.NSBundle.mainBundle().infoDictionary();
var iter = dict.keyEnumerator();
var key = "";
while ((key = iter.nextObject()) !== null) {
if (key.toString() === "CFBundleDisplayName") {
return dict.objectForKey_(key).toString();
}
}
} else { return null; }
}
};
"""
script = session.create_script(str_script)
script.load()
if script.exports.gadgetdisplayname:
app_name = script.exports.gadgetdisplayname()
script.unload()
return app_name
elif platform == "android":
str_script = """/* ____ getPackageName Getter for Android Gadget____ */
'use strict';
rpc.exports = {
gadgetdisplayname: function () {
var appName = "";
Java.perform(function(argument) {
const ActivityThread = Java.use('android.app.ActivityThread');
const app = ActivityThread.currentApplication();
appName = app.toString().split("@")[0];
});
return appName;
}};
"""
script = session.create_script(str_script)
script.load()
if script.exports.gadgetdisplayname:
app_name = script.exports.gadgetdisplayname()
script.unload()
return app_name
except Exception as e:
print((colored("[ERROR] " + str(e), "red")))
traceback.print_exc()
def getBundleID(device, app_name, platform):
try:
session = device.attach(app_name)
session.on('detached', on_detached)
script = session.create_script("""'use strict';
rpc.exports = {
iosbundleid: function () {
return ObjC.classes.NSBundle.mainBundle().bundleIdentifier().toString();
},
macosbundleid: function () {
return ObjC.classes.NSBundle.mainBundle().executablePath().toString();
}
};
""")
script.load()
if platform == 'ios':
bundleID = script.exports.iosbundleid()
elif platform == 'macos':
bundleID = script.exports.macosbundleid()
script.unload()
session.detach()
return bundleID
except Exception as e:
print((colored("[ERROR] " + str(e), "red")))
traceback.print_exc()
def init_session():
try:
session = None
if platform == 'ios' or platform == 'android':
try:
device = frida.get_usb_device(3) # added timeout to wait for 3 seconds
except Exception as e:
print((colored(str(e), "red")))
traceback.print_exc()
if platform == 'android':
print((colored("Troubleshooting Help", "blue")))
print((colored("HINT: Is USB Debugging enabled?", "blue")))
print((colored("HINT: Is `frida-server` running on mobile device (with +x permissions)?", "blue")))
print((colored("HINT: Is `adb` daemon running?", "blue")))
sys.exit(1)
elif platform == "ios":
print((colored("Troubleshooting Help", "blue")))
print((colored("HINT: Have you installed `frida` module from Cydia?", "blue")))
print((colored("HINT: Have used `ipa_installer` to inject the `FridaGadget` shared lbrary?", "blue")))
sys.exit(1)
elif platform == 'iossim':
try:
device = frida.get_remote_device()
except Exception as e:
print((colored("Troubleshooting Help", "blue")))
print((colored("HINT: Have you successfully integrated the FridaGadget dylib with the XCode Project?", "blue")))
print((colored("HINT: Do you see a message similar to \"[Frida INFO] Listening on 127.0.0.1 TCP port 27042\" on XCode console logs?", "blue")))
sys.exit(1)
elif platform == 'macos':
device = frida.get_local_device()
else:
print((colored('[ERROR] Unsupported Platform', 'red')))
sys.exit(1)
pid = None
if app_name:
try:
if platform == 'android' and spawn == 1:
print((colored("Now Spawning %s" % app_name, "green")))
pid = device.spawn([app_name])
#time.sleep(5)
session = device.attach(pid)
#time.sleep(5)
elif (platform == 'ios' or platform == 'macos') and spawn == 1:
bundleID = getBundleID(device, app_name, platform)
if bundleID:
print((colored("Now Spawning %s" % bundleID, "green")))
pid = device.spawn([bundleID])
#time.sleep(5)
session = device.attach(pid)
else:
print((colored("[ERROR] Can't spawn %s" % app_name, "red")))
traceback.print_exc()
sys.exit(1)
else:
arg_to_attach = app_name
if app_name.isdigit():
arg_to_attach = int(app_name)
session = device.attach(arg_to_attach)
except Exception as e:
print((colored('[ERROR] ' + str(e), 'red')))
traceback.print_exc()
if session:
print((colored('[INFO] Attached to %s' % (app_name), 'yellow')))
session.on('detached', on_detached)
except Exception as e:
print((colored('[ERROR] ' + str(e), 'red')))
traceback.print_exc()
sys.exit(1)
return device, session, pid
try:
app_name, platform, script_path, list_apps, output_dir, spawn = init_opts()
device, session, pid = init_session()
if int(list_apps) == 1:
list_processes(device)
sys.exit(0)
if session:
if app_name == "Gadget":
app_name = getDisplayName(session, app_name, platform)
script = session.create_script(generate_injection())
if script:
print((colored('[INFO] Instrumentation started...', 'yellow')))
script.on('message', on_message)
script.load()
if spawn == 1 and pid:
device.resume(pid)
app.run() #Start WebServer
except Exception as e:
print((colored('[ERROR] ' + str(e), 'red')))
traceback.print_exc()
sys.exit(1)
try:
while True:
pass
except KeyboardInterrupt:
script.unload()
session.detach()
_exit_()
| dpnishant/appmon | appmon.py | Python | apache-2.0 | 14,441 |
# third party
# third party
import numpy as np
import pytest
# syft absolute
# absolute
from syft.core.tensor.smpc.share_tensor import ShareTensor
@pytest.mark.smpc
def test_bit_extraction() -> None:
share = ShareTensor(rank=0, parties_info=[], ring_size=2**32)
data = np.array([[21, 32], [-54, 89]], dtype=np.int32)
share.child = data
exp_res1 = np.array([[False, False], [True, False]], dtype=np.bool_)
res = share.bit_extraction(31).child
assert (res == exp_res1).all()
exp_res2 = np.array([[True, False], [False, False]], dtype=np.bool_)
res = share.bit_extraction(2).child
assert (res == exp_res2).all()
@pytest.mark.smpc
def test_bit_extraction_exception() -> None:
share = ShareTensor(rank=0, parties_info=[], ring_size=2**32)
data = np.array([[21, 32], [-54, 89]], dtype=np.int32)
share.child = data
with pytest.raises(Exception):
share >> 33
with pytest.raises(Exception):
share >> -1
| OpenMined/PySyft | tests/integration/smpc/tensor/share_tensor_test.py | Python | apache-2.0 | 975 |
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sys import maxsize
from nose_parameterized import parameterized
from numpy import (
arange,
datetime64,
)
from numpy.testing import (
assert_array_equal,
)
from pandas import (
DataFrame,
Timestamp,
)
from pandas.util.testing import assert_index_equal
from zipline.data.us_equity_pricing import (
BcolzDailyBarReader,
NoDataBeforeDate,
NoDataAfterDate,
)
from zipline.pipeline.loaders.synthetic import (
OHLCV,
asset_start,
asset_end,
expected_bar_value,
expected_bar_values_2d,
make_bar_data,
)
from zipline.testing import seconds_to_timestamp
from zipline.testing.fixtures import (
WithBcolzEquityDailyBarReader,
ZiplineTestCase,
)
from zipline.utils.calendars import get_calendar
TEST_CALENDAR_START = Timestamp('2015-06-01', tz='UTC')
TEST_CALENDAR_STOP = Timestamp('2015-06-30', tz='UTC')
TEST_QUERY_START = Timestamp('2015-06-10', tz='UTC')
TEST_QUERY_STOP = Timestamp('2015-06-19', tz='UTC')
# One asset for each of the cases enumerated in load_raw_arrays_from_bcolz.
EQUITY_INFO = DataFrame(
[
# 1) The equity's trades start and end before query.
{'start_date': '2015-06-01', 'end_date': '2015-06-05'},
# 2) The equity's trades start and end after query.
{'start_date': '2015-06-22', 'end_date': '2015-06-30'},
# 3) The equity's data covers all dates in range.
{'start_date': '2015-06-02', 'end_date': '2015-06-30'},
# 4) The equity's trades start before the query start, but stop
# before the query end.
{'start_date': '2015-06-01', 'end_date': '2015-06-15'},
# 5) The equity's trades start and end during the query.
{'start_date': '2015-06-12', 'end_date': '2015-06-18'},
# 6) The equity's trades start during the query, but extend through
# the whole query.
{'start_date': '2015-06-15', 'end_date': '2015-06-25'},
],
index=arange(1, 7),
columns=['start_date', 'end_date'],
).astype(datetime64)
EQUITY_INFO['symbol'] = [chr(ord('A') + n) for n in range(len(EQUITY_INFO))]
TEST_QUERY_ASSETS = EQUITY_INFO.index
class BcolzDailyBarTestCase(WithBcolzEquityDailyBarReader, ZiplineTestCase):
EQUITY_DAILY_BAR_START_DATE = TEST_CALENDAR_START
EQUITY_DAILY_BAR_END_DATE = TEST_CALENDAR_STOP
@classmethod
def make_equity_info(cls):
return EQUITY_INFO
@classmethod
def make_equity_daily_bar_data(cls):
return make_bar_data(
EQUITY_INFO,
cls.equity_daily_bar_days,
)
@classmethod
def init_class_fixtures(cls):
super(BcolzDailyBarTestCase, cls).init_class_fixtures()
cls.sessions = cls.trading_calendar.sessions_in_range(
cls.trading_calendar.minute_to_session_label(TEST_CALENDAR_START),
cls.trading_calendar.minute_to_session_label(TEST_CALENDAR_STOP)
)
@property
def assets(self):
return EQUITY_INFO.index
def trading_days_between(self, start, end):
return self.sessions[self.sessions.slice_indexer(start, end)]
def asset_start(self, asset_id):
return asset_start(EQUITY_INFO, asset_id)
def asset_end(self, asset_id):
return asset_end(EQUITY_INFO, asset_id)
def dates_for_asset(self, asset_id):
start, end = self.asset_start(asset_id), self.asset_end(asset_id)
return self.trading_days_between(start, end)
def test_write_ohlcv_content(self):
result = self.bcolz_daily_bar_ctable
for column in OHLCV:
idx = 0
data = result[column][:]
multiplier = 1 if column == 'volume' else 1000
for asset_id in self.assets:
for date in self.dates_for_asset(asset_id):
self.assertEqual(
expected_bar_value(
asset_id,
date,
column
) * multiplier,
data[idx],
)
idx += 1
self.assertEqual(idx, len(data))
def test_write_day_and_id(self):
result = self.bcolz_daily_bar_ctable
idx = 0
ids = result['id']
days = result['day']
for asset_id in self.assets:
for date in self.dates_for_asset(asset_id):
self.assertEqual(ids[idx], asset_id)
self.assertEqual(date, seconds_to_timestamp(days[idx]))
idx += 1
def test_write_attrs(self):
result = self.bcolz_daily_bar_ctable
expected_first_row = {
'1': 0,
'2': 5, # Asset 1 has 5 trading days.
'3': 12, # Asset 2 has 7 trading days.
'4': 33, # Asset 3 has 21 trading days.
'5': 44, # Asset 4 has 11 trading days.
'6': 49, # Asset 5 has 5 trading days.
}
expected_last_row = {
'1': 4,
'2': 11,
'3': 32,
'4': 43,
'5': 48,
'6': 57, # Asset 6 has 9 trading days.
}
expected_calendar_offset = {
'1': 0, # Starts on 6-01, 1st trading day of month.
'2': 15, # Starts on 6-22, 16th trading day of month.
'3': 1, # Starts on 6-02, 2nd trading day of month.
'4': 0, # Starts on 6-01, 1st trading day of month.
'5': 9, # Starts on 6-12, 10th trading day of month.
'6': 10, # Starts on 6-15, 11th trading day of month.
}
self.assertEqual(result.attrs['first_row'], expected_first_row)
self.assertEqual(result.attrs['last_row'], expected_last_row)
self.assertEqual(
result.attrs['calendar_offset'],
expected_calendar_offset,
)
cal = get_calendar(result.attrs['calendar_name'])
first_session = Timestamp(result.attrs['start_session_ns'], tz='UTC')
end_session = Timestamp(result.attrs['end_session_ns'], tz='UTC')
sessions = cal.sessions_in_range(first_session, end_session)
assert_index_equal(
self.sessions,
sessions
)
def test_read_first_trading_day(self):
self.assertEqual(
self.bcolz_equity_daily_bar_reader.first_trading_day,
self.sessions[0],
)
def _check_read_results(self, columns, assets, start_date, end_date):
results = self.bcolz_equity_daily_bar_reader.load_raw_arrays(
columns,
start_date,
end_date,
assets,
)
dates = self.trading_days_between(start_date, end_date)
for column, result in zip(columns, results):
assert_array_equal(
result,
expected_bar_values_2d(
dates,
EQUITY_INFO,
column,
)
)
@parameterized.expand([
(['open'],),
(['close', 'volume'],),
(['volume', 'high', 'low'],),
(['open', 'high', 'low', 'close', 'volume'],),
])
def test_read(self, columns):
self._check_read_results(
columns,
self.assets,
TEST_QUERY_START,
TEST_QUERY_STOP,
)
def test_start_on_asset_start(self):
"""
Test loading with queries that starts on the first day of each asset's
lifetime.
"""
columns = ['high', 'volume']
for asset in self.assets:
self._check_read_results(
columns,
self.assets,
start_date=self.asset_start(asset),
end_date=self.sessions[-1],
)
def test_start_on_asset_end(self):
"""
Test loading with queries that start on the last day of each asset's
lifetime.
"""
columns = ['close', 'volume']
for asset in self.assets:
self._check_read_results(
columns,
self.assets,
start_date=self.asset_end(asset),
end_date=self.sessions[-1],
)
def test_end_on_asset_start(self):
"""
Test loading with queries that end on the first day of each asset's
lifetime.
"""
columns = ['close', 'volume']
for asset in self.assets:
self._check_read_results(
columns,
self.assets,
start_date=self.sessions[0],
end_date=self.asset_start(asset),
)
def test_end_on_asset_end(self):
"""
Test loading with queries that end on the last day of each asset's
lifetime.
"""
columns = ['close', 'volume']
for asset in self.assets:
self._check_read_results(
columns,
self.assets,
start_date=self.sessions[0],
end_date=self.asset_end(asset),
)
def test_unadjusted_get_value(self):
reader = self.bcolz_equity_daily_bar_reader
# At beginning
price = reader.get_value(1, Timestamp('2015-06-01', tz='UTC'),
'close')
# Synthetic writes price for date.
self.assertEqual(108630.0, price)
# Middle
price = reader.get_value(1, Timestamp('2015-06-02', tz='UTC'),
'close')
self.assertEqual(108631.0, price)
# End
price = reader.get_value(1, Timestamp('2015-06-05', tz='UTC'),
'close')
self.assertEqual(108634.0, price)
# Another sid at beginning.
price = reader.get_value(2, Timestamp('2015-06-22', tz='UTC'),
'close')
self.assertEqual(208651.0, price)
# Ensure that volume does not have float adjustment applied.
volume = reader.get_value(1, Timestamp('2015-06-02', tz='UTC'),
'volume')
self.assertEqual(109631, volume)
def test_unadjusted_get_value_no_data(self):
table = self.bcolz_daily_bar_ctable
reader = BcolzDailyBarReader(table)
# before
with self.assertRaises(NoDataBeforeDate):
reader.get_value(2, Timestamp('2015-06-08', tz='UTC'), 'close')
# after
with self.assertRaises(NoDataAfterDate):
reader.get_value(4, Timestamp('2015-06-16', tz='UTC'), 'close')
def test_unadjusted_get_value_empty_value(self):
reader = self.bcolz_equity_daily_bar_reader
# A sid, day and corresponding index into which to overwrite a zero.
zero_sid = 1
zero_day = Timestamp('2015-06-02', tz='UTC')
zero_ix = reader.sid_day_index(zero_sid, zero_day)
old = reader._spot_col('close')[zero_ix]
try:
# Write a zero into the synthetic pricing data at the day and sid,
# so that a read should now return -1.
# This a little hacky, in lieu of changing the synthetic data set.
reader._spot_col('close')[zero_ix] = 0
close = reader.get_value(zero_sid, zero_day, 'close')
self.assertEqual(-1, close)
finally:
reader._spot_col('close')[zero_ix] = old
class BcolzDailyBarAlwaysReadAllTestCase(BcolzDailyBarTestCase):
"""
Force tests defined in BcolzDailyBarTestCase to always read the entire
column into memory before selecting desired asset data, when invoking
`load_raw_array`.
"""
BCOLZ_DAILY_BAR_READ_ALL_THRESHOLD = 0
class BcolzDailyBarNeverReadAllTestCase(BcolzDailyBarTestCase):
"""
Force tests defined in BcolzDailyBarTestCase to never read the entire
column into memory before selecting desired asset data, when invoking
`load_raw_array`.
"""
BCOLZ_DAILY_BAR_READ_ALL_THRESHOLD = maxsize
| magne-max/zipline-ja | tests/data/test_us_equity_pricing.py | Python | apache-2.0 | 12,525 |
# Copyright 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import gzip
import jinja2
import json
import mock
import os
import pytest
import shutil
import swiftclient.exceptions
import tarfile
import tempfile
import zpmlib
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
from zpmlib import zpm, commands
class TestFindUIUploads:
"""
Tests for :func:`zpmlib.zpm._find_ui_uploads`.
"""
def test_with_files(self):
zapp = {'ui': ['x']}
tar = mock.Mock(getnames=lambda: ['x', 'y'])
matches = zpm._find_ui_uploads(zapp, tar)
assert sorted(matches) == ['x']
def test_with_glob(self):
zapp = {'ui': ['x', 'ui/*']}
tar = mock.Mock(getnames=lambda: ['x', 'y', 'ui/x', 'ui/y'])
matches = zpm._find_ui_uploads(zapp, tar)
assert sorted(matches) == ['ui/x', 'ui/y', 'x']
def test__prepare_job():
# Test for `zpmlib.zpm._prepare_job`.
# Contents of `boot/system.map`, which is expected to be in the
# `myapp.zapp` archive.
myapp_json = [
{'exec': {'args': 'myapp.py', 'path': 'file://python2.7:python'},
'devices': [{'name': 'python2.7'}, {'name': 'stdout'}],
'name': 'myapp'}
]
zapp = {'meta': {'name': 'myapp'}}
zapp_swift_url = ('swift://AUTH_469a9cd20b5a4fc5be9438f66bb5ee04/'
'test_container/hello.zapp')
# Expected result
exp_job_json = copy.deepcopy(myapp_json)
exp_job_json[0]['devices'].append(
{'name': 'image', 'path': zapp_swift_url}
)
tempdir = tempfile.mkdtemp()
try:
tempzapp = os.path.join(tempdir, 'myapp.zapp')
tf = tarfile.open(tempzapp, 'w:gz')
# prepare a sample job description
system_map = os.path.join(tempdir, 'system.map')
with open(system_map, 'w') as fp:
json.dump(myapp_json, fp)
tf.add(system_map, arcname='boot/system.map')
tf.close()
tf = tarfile.open(tempzapp, 'r:gz')
job = zpm._prepare_job(tf, zapp, zapp_swift_url)
tf.close()
assert exp_job_json == job
finally:
shutil.rmtree(tempdir)
class TestFindProjectRoot:
"""
Tests for :func:`zpmlib.zpm.find_project_root`.
"""
def setup_method(self, _method):
self.tempdir = tempfile.mkdtemp()
self.subdir = os.path.join(self.tempdir, 'foo', 'bar')
os.makedirs(self.subdir)
def test_zapp_yaml_exists(self):
try:
zapp_path = os.path.join(self.tempdir, 'zapp.yaml')
# "touch" the file
open(zapp_path, 'w').close()
with mock.patch('os.getcwd') as cwd:
cwd.return_value = self.subdir
root = zpm.find_project_root()
assert root == self.tempdir
finally:
shutil.rmtree(self.tempdir)
def test_zapp_yaml_not_exists(self):
try:
with mock.patch('os.getcwd') as cwd:
cwd.return_value = self.subdir
with pytest.raises(RuntimeError):
zpm.find_project_root()
finally:
shutil.rmtree(self.tempdir)
def test__generate_job_desc():
# Test :func:`zpmlib.zpm._generate_job_desc`.
zapp_yaml_contents = {
'bundling': ['mapper.py', 'reducer.py'],
'execution': {
'groups': [
{'args': r'mapper.py "foo\\, \nbar"',
'devices': [
{'name': 'python2.7'},
{'name': 'stdout'},
{'name': 'input_swift_file',
'path': 'swift://AUTH_abc123/foo/bar.txt'},
],
'name': 'mapper',
'connect': ['reducer'],
'env': {'FOO': 'bar', 'BAZ': 5},
'path': 'swift://./container/python'},
{'args': r'mapper.py "foo\\, \nbar"',
'devices': [
{'name': 'python2.7'},
{'name': 'stdout'},
{'name': 'input_swift_file',
'path': 'swift://AUTH_abc123/foo/bar.txt'},
],
'name': 'mapper',
'connect': ['reducer'],
'env': {'FOO': 'bar', 'BAZ': 5},
'path': 'swift://~/container/path/to/python'},
{'args': 'reducer.py',
'devices': [
{'name': 'python2.7'},
{'name': 'stdout'},
],
'name': 'reducer',
'path': 'file://python2.7:python'},
]
},
'help': {'args': [['loglevel', 'Log Level']],
'description': 'sample map/reduce app'},
'meta': {'Author-email': 'John Doe <[email protected]',
'Summary': 'Sample map/reduce app',
'Version': '0.1',
'name': 'mapreduce'}
}
expected_job = [
{'devices': [
{'name': 'python2.7'},
{'name': 'stdout'},
{'name': 'input_swift_file',
'path': 'swift://AUTH_abc123/foo/bar.txt'}],
'connect': ['reducer'],
'name': 'mapper',
'exec': {'path': 'swift://./container/python',
'name': 'python',
'args': 'mapper.py foo\\x5c\\x2c\\x20\\x5cnbar',
'env': {'FOO': 'bar', 'BAZ': 5}}},
{'devices': [
{'name': 'python2.7'},
{'name': 'stdout'},
{'name': 'input_swift_file',
'path': 'swift://AUTH_abc123/foo/bar.txt'}],
'connect': ['reducer'],
'name': 'mapper',
'exec': {'path': 'swift://~/container/path/to/python',
'name': 'path/to/python',
'args': 'mapper.py foo\\x5c\\x2c\\x20\\x5cnbar',
'env': {'FOO': 'bar', 'BAZ': 5}}},
{'devices': [
{'name': 'python2.7'},
{'name': 'stdout'}],
'name': 'reducer',
'exec': {'path': 'file://python2.7:python',
'name': 'python',
'args': 'reducer.py'}},
]
actual_job = zpm._generate_job_desc(zapp_yaml_contents)
assert actual_job == expected_job
class TestGetZeroCloudConn:
"""
Tests for :func:`zpmlib.zpm._get_zerocloud_conn`.
"""
def setup_method(self, _method):
self.v1_args = mock.Mock()
self.v1_args.auth_version = '1.0'
self.v1_args.auth = 'http://example.com/auth/v1.0'
self.v1_args.user = 'tenant1:user1'
self.v1_args.key = 'secret'
self.v2_args = mock.Mock()
self.v2_args.auth_version = '2.0'
self.v2_args.os_auth_url = 'http://example.com/v2.0'
self.v2_args.os_username = 'user1'
self.v2_args.os_password = 'secret'
self.v2_args.os_tenant_name = 'tenant1'
def test_v1(self):
conn = zpm._get_zerocloud_conn(self.v1_args)
assert conn.authurl == self.v1_args.auth
assert conn.user == self.v1_args.user
assert conn.key == self.v1_args.key
def test_v1_fail(self):
self.v1_args.user = None
with pytest.raises(zpmlib.ZPMException):
zpm._get_zerocloud_conn(self.v1_args)
def test_v2(self):
conn = zpm._get_zerocloud_conn(self.v2_args)
assert conn.authurl == self.v2_args.os_auth_url
assert conn.user == self.v2_args.os_username
assert conn.key == self.v2_args.os_password
assert conn.os_options['tenant_name'] == self.v2_args.os_tenant_name
def test_v2_fail(self):
self.v2_args.os_tenant_name = None
with pytest.raises(zpmlib.ZPMException):
zpm._get_zerocloud_conn(self.v2_args)
def test_no_auth_details_given(self):
args = mock.Mock()
args.auth_version = None
args.auth = None
args.user = None
args.key = None
args.os_auth_url = None
args.os_username = None
args.os_password = None
args.os_tenant_name = None
env = dict.fromkeys([
'ST_AUTH', 'ST_USER', 'ST_KEY',
'OS_AUTH_URL', 'OS_USERNAME', 'OS_PASSWORD', 'OS_TENANT_NAME',
], '')
with mock.patch.dict('os.environ', env):
with pytest.raises(zpmlib.ZPMException):
zpm._get_zerocloud_conn(args)
class TestDeploy:
"""
Tests :function:`zpmlib.zpm.deploy` and its helper functions.
"""
@classmethod
def setup_class(cls):
cls.zapp_yaml_contents = """\
execution:
groups:
- name: "hello"
path: file://python2.7:python
args: "hello.py"
devices:
- name: python
- name: stdout
meta:
Version: ""
name: "hello"
Author-email: ""
Summary: ""
help:
description: ""
args:
- ["", ""]
bundling:
- "hello.py"
ui:
- "index.html"
- "foo.js.tmpl"
""".encode('utf-8')
cls.job_json_contents = json.dumps([
{'exec': {'args': 'hello.py', 'path': 'file://python2.7:python'},
'devices': [{'name': 'python'}, {'name': 'stdout'}],
'name': 'hello'}
]).encode('utf-8')
cls.job_json_prepped = json.dumps([
{"exec": {"path": "file://python2.7:python", "args": "hello.py"},
"devices": [{"name": "python"}, {"name": "stdout"},
{"name": "image",
"path": "swift:///container1/foo/bar/zapp.yaml"}],
"name": "hello"}
]).encode('utf-8')
cls.hellopy_contents = b"""\
print("Hello from ZeroVM!")
"""
cls.indexhtml_contents = bytearray("""\
<html>
<head><title>Hello!</title></head>
<body>Hello from ZeroVM!</body>
</html>""", 'utf-8')
cls.foojstmpl_contents = b"var opts = {{ auth_opts }};"
cls.temp_dir = tempfile.mkdtemp()
cls.temp_zapp_file = '%s/zapp.yaml' % cls.temp_dir
tar = tarfile.open(cls.temp_zapp_file, 'w:gz')
info = tarfile.TarInfo(name='foo.js.tmpl')
info.size = len(cls.foojstmpl_contents)
tar.addfile(info, BytesIO(cls.foojstmpl_contents))
info = tarfile.TarInfo(name='boot/system.map')
info.size = len(cls.job_json_contents)
tar.addfile(info, BytesIO(cls.job_json_contents))
info = tarfile.TarInfo(name='zapp.yaml')
info.size = len(cls.zapp_yaml_contents)
tar.addfile(info, BytesIO(cls.zapp_yaml_contents))
info = tarfile.TarInfo(name='hello.py')
info.size = len(cls.hellopy_contents)
tar.addfile(info, BytesIO(cls.hellopy_contents))
info = tarfile.TarInfo(name='index.html')
info.size = len(cls.indexhtml_contents)
tar.addfile(info, BytesIO(cls.indexhtml_contents))
tar.close()
@classmethod
def teardown_class(cls):
shutil.rmtree(cls.temp_dir)
def setup_method(self, _method):
self.conn = mock.Mock()
self.conn.get_container.return_value = (
{}, # response headers
[], # object list
)
self.target = 'container1/foo/bar'
self.zapp_path = self.temp_zapp_file
self.conn.url = 'http://example.com'
args = mock.Mock()
args.auth = 'http://example.com/auth/v1.0'
args.user = 'user1'
args.key = 'secret'
self.auth_opts = jinja2.Markup(
json.dumps(zpm._prepare_auth('1.0', args, self.conn))
)
def test__generate_uploads(self):
uploads = zpm._generate_uploads(self.conn, self.target,
self.zapp_path, self.auth_opts)
uploads = list(uploads)
foojs_tmpl = jinja2.Template(self.foojstmpl_contents.decode())
foojs = foojs_tmpl.render(auth_opts=self.auth_opts)
expected_uploads = [
('%s/zapp.yaml' % self.target, gzip.open(self.zapp_path).read(),
'application/x-tar'),
('%s/boot/system.map' % self.target,
self.job_json_prepped.decode('utf-8'),
'application/json'),
('%s/foo.js' % self.target, foojs, None),
('%s/index.html' % self.target, self.indexhtml_contents, None),
]
assert uploads[0] == expected_uploads[0]
assert uploads[1][0] == expected_uploads[1][0]
assert json.loads(uploads[1][1]) == json.loads(expected_uploads[1][1])
assert uploads[2] == expected_uploads[2]
assert uploads[3] == expected_uploads[3]
def test__deploy_zapp(self):
with mock.patch('zpmlib.zpm._generate_uploads') as gu:
gu.return_value = iter([('x/a', 'b', None), ('x/c', 'd', None)])
zpm._deploy_zapp(self.conn, self.target, self.zapp_path,
self.auth_opts)
put_object = self.conn.put_object
assert put_object.call_count == 2
assert put_object.call_args_list == [
mock.call('x', 'a', 'b', content_type=None),
mock.call('x', 'c', 'd', content_type=None)]
def test__deploy_zapp_with_index_html(self):
with mock.patch('zpmlib.zpm._generate_uploads') as gu:
gu.return_value = iter([('cont/dir/index.html', 'data',
'text/html')])
index = zpm._deploy_zapp(self.conn, 'cont', None, None)
assert index == 'cont/dir/index.html'
put_object = self.conn.put_object
assert put_object.call_count == 1
assert put_object.call_args_list == [
mock.call('cont', 'dir/index.html', 'data',
content_type='text/html')
]
def test__deploy_zapp_without_index_html(self):
with mock.patch('zpmlib.zpm._generate_uploads') as gu:
gu.return_value = iter([('cont/foo.html', 'data', 'text/html')])
index = zpm._deploy_zapp(self.conn, 'cont', None, None)
assert index == 'cont/'
put_object = self.conn.put_object
assert put_object.call_count == 1
assert put_object.call_args_list == [
mock.call('cont', 'foo.html', 'data',
content_type='text/html')
]
def test__deploy_zapp_container_not_empty(self):
self.conn.get_container.return_value = (
{}, # response headers
# The actual files list response from Swift is a list of
# dictionaries. For these tests, we don't actually check the
# content; just length of the file list.
['file1'],
)
with pytest.raises(zpmlib.ZPMException) as exc:
zpm._deploy_zapp(self.conn, 'target/dir1/dir2', None, None)
assert str(exc.value) == (
"Target container ('target') is not empty.\n"
"Deploying to a non-empty container can cause consistency "
"problems with overwritten objects.\n"
"Specify the flag `--force/-f` to overwrite anyway."
)
assert self.conn.get_container.call_args_list == [mock.call('target')]
def test__deploy_zapp_container_not_empty_force(self):
self.conn.get_container.return_value = ({}, ['file1'])
with mock.patch('zpmlib.zpm._generate_uploads') as gu:
gu.return_value = iter([('x/a', 'b', None), ('x/c', 'd', None)])
zpm._deploy_zapp(self.conn, self.target, self.zapp_path,
self.auth_opts, force=True)
put_object = self.conn.put_object
assert put_object.call_count == 2
assert put_object.call_args_list == [
mock.call('x', 'a', 'b', content_type=None),
mock.call('x', 'c', 'd', content_type=None)]
def test__deploy_zapp_container_doesnt_exist(self):
self.conn.get_container.side_effect = (
swiftclient.exceptions.ClientException(None)
)
with mock.patch('zpmlib.zpm._generate_uploads') as gu:
gu.return_value = iter([('target/dir/foo.py', 'data', None)])
zpm._deploy_zapp(self.conn, 'target/dir', None, None)
# check that the container is created
assert self.conn.put_container.call_count == 1
assert self.conn.put_container.call_args_list == [
mock.call('target')
]
# check that files are uploaded correctly
assert self.conn.put_object.call_count == 1
assert self.conn.put_object.call_args_list == [
mock.call('target', 'dir/foo.py', 'data', content_type=None)
]
def test_deploy_project_execute(self):
job_path = 'boot/system.map'
job_json = self.job_json_contents.decode('utf-8')
job_dict = json.loads(job_json)
class FakeZeroCloudConnection(mock.Mock):
url = 'http://127.0.0.1'
token = 'abc123'
def post_job(self, job, response_dict=None,
response_body_buffer=None):
response_dict['status'] = 200
response_dict['reason'] = 'OK'
response_dict['headers'] = {
'x-nexe-system': 'node-1',
'x-nexe-cdr-line': (
'5.121, 4.993, 0.13 3.84 1025 75943662 23 735 8 399 0 '
'0'
),
'x-nexe-status': 'ok',
'x-nexe-retcode': '0',
}
# Check the job is passed properly here
assert job == job_dict
def get_container(self, *args, **kwargs):
return {}, []
self.conn = FakeZeroCloudConnection()
self.conn.auth_version = '1.0'
parser = commands.set_up_arg_parser()
args = parser.parse_args(['deploy', 'foo', self.zapp_path, '--exec'])
with mock.patch('zpmlib.zpm._get_zerocloud_conn') as gzc:
gzc.return_value = self.conn
self.conn.get_object = mock.Mock()
get_object = self.conn.get_object
get_object.return_value = ([], job_json)
zpm.deploy_project(args)
assert get_object.call_args_list == [mock.call('foo', job_path)]
def test__prepare_auth_v0():
# Test for :func:`zpmlib.zpm._prepare_auth`, with version 0.0
version = '0.0'
args = None
conn = mock.Mock()
conn.url = 'http://example.com'
expected = {
'version': '0.0',
'swiftUrl': 'http://example.com',
}
assert zpm._prepare_auth(version, args, conn) == expected
def test__prepare_auth_v1():
# Test for :func:`zpmlib.zpm._prepare_auth`, with version 1.0
version = '1.0'
args = mock.Mock()
args.auth = 'http://example.com/auth/v1.0'
args.user = 'user1'
args.key = 'secret'
conn = None
expected = {
'version': '1.0',
'authUrl': 'http://example.com/auth/v1.0',
'username': 'user1',
'password': 'secret',
}
assert zpm._prepare_auth(version, args, conn) == expected
# Make sure that we're robust enough to handle slightly varied version
# inputs.
version = '1'
assert zpm._prepare_auth(version, args, conn) == expected
def test__prepare_auth_v2():
# Test for :func:`zpmlib.zpm._prepare_auth`, with version 2.0
version = '2.0'
args = mock.Mock()
args.os_auth_url = 'http://example.com:5000/v2.0'
args.os_username = 'user1'
args.os_tenant_name = 'tenant1'
args.os_password = 'secret'
conn = None
expected = {
'version': '2.0',
'authUrl': 'http://example.com:5000/v2.0',
'tenant': 'tenant1',
'username': 'user1',
'password': 'secret',
}
assert zpm._prepare_auth(version, args, conn) == expected
# Make sure that we're robust enough to handle slightly varied version
# inputs.
version = '2'
assert zpm._prepare_auth(version, args, conn) == expected
class TestGuessAuthVersion:
def setup_method(self, _method):
self.args = mock.Mock()
self.args.auth = None
self.args.user = None
self.args.key = None
self.args.os_auth_url = None
self.args.os_username = None
self.args.os_password = None
self.args.os_tenant_name = None
def test_args_v1(self):
args = self.args
args.auth = 'auth'
args.user = 'user'
args.key = 'key'
args.os_auth_url = 'authurl'
assert zpm._guess_auth_version(args) == '1.0'
def test_args_v2(self):
args = self.args
args.os_auth_url = 'authurl'
args.os_username = 'username'
args.os_password = 'password'
args.os_tenant_name = 'tenant'
args.auth = 'auth'
assert zpm._guess_auth_version(args) == '2.0'
def test_args_default(self):
args = self.args
args.auth = 'auth'
args.user = 'user'
args.key = 'key'
args.os_auth_url = 'authurl'
args.os_username = 'username'
args.os_password = 'password'
args.os_tenant_name = 'tenant'
assert zpm._guess_auth_version(args) == '1.0'
def test_env_v1(self):
env = dict(
ST_AUTH='auth',
ST_USER='user',
ST_KEY='key',
OS_AUTH_URL='',
OS_USERNAME='username',
OS_PASSWORD='',
OS_TENANT_NAME='',
)
with mock.patch.dict('os.environ', env):
assert zpm._guess_auth_version(self.args) == '1.0'
def test_env_v2(self):
env = dict(
ST_AUTH='',
ST_USER='user',
ST_KEY='',
OS_AUTH_URL='authurl',
OS_USERNAME='username',
OS_PASSWORD='password',
OS_TENANT_NAME='tenant',
)
with mock.patch.dict('os.environ', env):
assert zpm._guess_auth_version(self.args) == '2.0'
def test_env_default(self):
env = dict(
ST_AUTH='auth',
ST_USER='user',
ST_KEY='key',
OS_AUTH_URL='authurl',
OS_USERNAME='username',
OS_PASSWORD='password',
OS_TENANT_NAME='tenant',
)
with mock.patch.dict('os.environ', env):
assert zpm._guess_auth_version(self.args) == '1.0'
def test_none(self):
env = dict.fromkeys([
'ST_AUTH', 'ST_USER', 'ST_KEY',
'OS_AUTH_URL', 'OS_USERNAME', 'OS_PASSWORD', 'OS_TENANT_NAME',
], '')
with mock.patch.dict('os.environ', env):
assert zpm._guess_auth_version(self.args) is None
class TestExecSummaryTable:
def test__get_exec_table_data_1_row(self):
headers = {
'content-length': '20',
'content-type': 'text/html',
'date': 'Tue, 26 Aug 2014 09:27:08 GMT',
'etag': 'af0983cb8fef30642bae9ba0010e7a77',
'x-chain-total-time': '3.920',
'x-nexe-cdr-line': (
'3.920, 3.913, 0.11 3.37 1025 75943644 2 20 0 0 0 0'
),
'x-nexe-etag': 'disabled',
'x-nexe-policy': 'Policy-0',
'x-nexe-retcode': '0',
'x-nexe-status': 'ok',
'x-nexe-system': 'hello',
'x-nexe-validation': '0',
'x-timestamp': '1409045228.85265',
'x-trans-id': 'tx1d61239ed02a56fbbfe5d-0053fc52e9',
'x-zerovm-device': 'stdout',
}
expected_total_t = '3.920'
expected_table = [
['hello', 'ok', '0', '3.913', '0.11', '3.37', '1025', '75943644',
'2', '20', '0', '0', '0', '0']
]
actual_total_t, actual_table = zpm._get_exec_table_data(headers)
assert actual_total_t == expected_total_t
assert actual_table == expected_table
def test__get_exec_table_data_many_rows(self):
cdr_line = (
'5.121, '
'4.993, 0.13 3.84 1025 75943662 23 735 8 399 0 0,'
'4.511, 0.12 4.00 1026 75943758 0 0 0 0 1 11,'
'4.468, 0.10 3.96 1026 75943758 0 0 0 0 1 11,'
'4.965, 0.18 4.20 1025 75943664 0 0 15 33 5 100,'
'4.962, 0.13 3.94 1025 75943664 0 0 15 33 5 100'
)
headers = {
'content-length': '0',
'content-type': 'application/x-gtar',
'date': 'Tue, 26 Aug 2014 09:29:44 GMT',
'etag': '753e7eac4298c4994a7a19c7c783bad5',
'x-chain-total-time': '5.121',
'x-nexe-cdr-line': cdr_line,
'x-nexe-etag': 'disabled,disabled,disabled,disabled,disabled',
'x-nexe-policy': 'Policy-0,Policy-0,Policy-0,Policy-0,Policy-0',
'x-nexe-retcode': '1,0,0,0,0',
'x-nexe-status': 'some error,ok,ok,ok,ok',
'x-nexe-system': 'combiner,mapper-1,mapper-2,reducer-1,reducer-2',
'x-nexe-validation': '1,0,0,0,0',
'x-timestamp': '1409045384.22744',
'x-trans-id': 'txa881f777891648f4834d6-0053fc5382',
}
expected_total_t = '5.121'
expected_table = [
['combiner', 'some error', '1', '4.993', '0.13', '3.84', '1025',
'75943662', '23', '735', '8', '399', '0', '0'],
['mapper-1', 'ok', '0', '4.511', '0.12', '4.00', '1026',
'75943758', '0', '0', '0', '0', '1', '11'],
['mapper-2', 'ok', '0', '4.468', '0.10', '3.96', '1026',
'75943758', '0', '0', '0', '0', '1', '11'],
['reducer-1', 'ok', '0', '4.965', '0.18', '4.20', '1025',
'75943664', '0', '0', '15', '33', '5', '100'],
['reducer-2', 'ok', '0', '4.962', '0.13', '3.94', '1025',
'75943664', '0', '0', '15', '33', '5', '100'],
]
actual_total_t, actual_table = zpm._get_exec_table_data(headers)
assert actual_total_t == expected_total_t
assert actual_table == expected_table
| zerovm/zerovm-cli | zpmlib/tests/test_zpm.py | Python | apache-2.0 | 26,437 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '11.ui'
#
# Created by: PyQt5 UI code generator 5.8.2
#
# WARNING! All changes made in this file will be lost!
from __future__ import print_function
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, SpatialDropout1D,Dropout, Activation
from keras.layers import Embedding
from keras.layers import Conv1D, GlobalMaxPooling1D
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
import numpy as np
from numpy import vstack, row_stack, asarray
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.cross_validation import train_test_split
from pandas import read_csv
from pymystem3 import Mystem
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from collections import Counter
import Twarc
import json
import codecs
import tweepy
auth = tweepy.OAuthHandler('DkNRJdzhUnThKJ7G5C9IftdUp', 'C14fr0ews91xJI8AH1I3BNhZrZ2gdlyz9KqnXFPQOnmZqJUmc7')
auth.set_access_token('866132837082296320-GRx4gxwbRVffxwXgMhjZhXbxgn4RaM0', 'rhtMycE2gFiJchJVIJtlEIf7qgkvqtCbmue9rPDoXEpkt')
api = tweepy.API(auth)
from PyQt5 import QtCore, QtGui, QtWidgets
# Create a summary of a tweet, only showing relevant fields.
def summarize(tweet, extra_fields = None):
new_tweet = {}
for field, value in tweet.items():
if field in ["text", "id_str", "screen_name", "retweet_count", "favorite_count", "in_reply_to_status_id_str", "in_reply_to_screen_name", "in_reply_to_user_id_str"] and value is not None:
new_tweet[field] = value
elif extra_fields and field in extra_fields:
new_tweet[field] = value
elif field in ["retweeted_status", "quoted_status", "user"]:
new_tweet[field] = summarize(value)
return new_tweet
# Print out a tweet, with optional colorizing of selected fields.
def dump(tweet, colorize_fields=None, summarize_tweet=True):
colorize_field_strings = []
for line in json.dumps(summarize(tweet) if summarize_tweet else tweet, indent=4, sort_keys=True).splitlines():
colorize = False
for colorize_field in colorize_fields or []:
if "\"{}\":".format(colorize_field) in line:
print("\x1b" + line + "\x1b")
break
else:
print(line)
tweet = list(t.hydrate(['']))[0]
dump(summarize(tweet, extra_fields=['in_reply_to_status_id_str', 'in_reply_to_user_id']), colorize_fields=['in_reply_to_status_id', 'in_reply_to_status_id_str', 'in_reply_to_screen_name', 'in_reply_to_user_id', 'in_reply_to_user_id_str'], summarize_tweet=False)
def stemconvtext(text):
return(''.join(Mystem().lemmatize(text)))
model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add(Embedding(max_features,
embedding_dims,
input_length=maxlen))
model.add(Dropout(0.2))
# we add a Convolution1D, which will learn filters
# word group filters of size filter_length:
model.add(Conv1D(filters,
kernel_size,
padding='valid',
activation='linear',
strides=1))
# we use max pooling:
model.add(GlobalMaxPooling1D())
# We add a vanilla hidden layer:
model.add(Dense(hidden_dims))
model.add(Dropout(0.2))
model.add(Activation('linear'))
# We project onto a single unit output layer, and squash it with a sigmoid:
model.add(Dense(1))
model.add(Activation('sigmoid'))
class Ui_MainWindow(object):
def load_tweet(self):
tweet = api.get_status(self.plainTextEdit_2.toPlainText())
self.textBrowser_2.setPlainText(tweet.text)
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(911, 597)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.layoutWidget = QtWidgets.QWidget(self.tab)
self.layoutWidget.setGeometry(QtCore.QRect(510, 10, 371, 411))
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.formLayout = QtWidgets.QFormLayout()
self.formLayout.setObjectName("formLayout")
self.checkBox = QtWidgets.QCheckBox(self.layoutWidget)
self.checkBox.setObjectName("checkBox")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.checkBox)
self.dateEdit_2 = QtWidgets.QDateEdit(self.layoutWidget)
self.dateEdit_2.setMinimumDateTime(QtCore.QDateTime(QtCore.QDate(2000, 1, 1), QtCore.QTime(0, 0, 0)))
self.dateEdit_2.setMaximumDate(QtCore.QDate(2017, 6, 30))
self.dateEdit_2.setObjectName("dateEdit_2")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.dateEdit_2)
self.dateEdit = QtWidgets.QDateEdit(self.layoutWidget)
self.dateEdit.setDateTime(QtCore.QDateTime(QtCore.QDate(2017, 6, 15), QtCore.QTime(0, 0, 0)))
self.dateEdit.setObjectName("dateEdit")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.dateEdit)
self.label_2 = QtWidgets.QLabel(self.layoutWidget)
self.label_2.setObjectName("label_2")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.label_2)
self.spinBox = QtWidgets.QSpinBox(self.layoutWidget)
self.spinBox.setMaximum(3)
self.spinBox.setObjectName("spinBox")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.spinBox)
self.label = QtWidgets.QLabel(self.layoutWidget)
self.label.setObjectName("label")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.label)
self.label_3 = QtWidgets.QLabel(self.layoutWidget)
self.label_3.setObjectName("label_3")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.label_3)
self.verticalLayout_2.addLayout(self.formLayout)
self.label_4 = QtWidgets.QLabel(self.layoutWidget)
self.label_4.setObjectName("label_4")
self.verticalLayout_2.addWidget(self.label_4)
self.plainTextEdit_2 = QtWidgets.QPlainTextEdit(self.layoutWidget)
self.plainTextEdit_2.setObjectName("plainTextEdit_2")
self.verticalLayout_2.addWidget(self.plainTextEdit_2)
self.label_5 = QtWidgets.QLabel(self.layoutWidget)
self.label_5.setObjectName("label_5")
self.verticalLayout_2.addWidget(self.label_5)
self.textBrowser = QtWidgets.QTextBrowser(self.layoutWidget)
self.textBrowser.setEnabled(True)
self.textBrowser.setObjectName("textBrowser")
self.verticalLayout_2.addWidget(self.textBrowser)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_7 = QtWidgets.QLabel(self.layoutWidget)
self.label_7.setObjectName("label_7")
self.horizontalLayout_2.addWidget(self.label_7)
self.lcdNumber_5 = QtWidgets.QLCDNumber(self.layoutWidget)
self.lcdNumber_5.setProperty("intValue", 0)
self.lcdNumber_5.setObjectName("lcdNumber_5")
self.horizontalLayout_2.addWidget(self.lcdNumber_5)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.layoutWidget1 = QtWidgets.QWidget(self.tab)
self.layoutWidget1.setGeometry(QtCore.QRect(0, 0, 481, 451))
self.layoutWidget1.setObjectName("layoutWidget1")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.layoutWidget1)
self.verticalLayout_3.setContentsMargins(1, 0, 0, 0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.textBrowser_2 = QtWidgets.QTextBrowser(self.layoutWidget1)
self.textBrowser_2.setEnabled(True)
self.textBrowser_2.setObjectName("textBrowser_2")
self.verticalLayout_3.addWidget(self.textBrowser_2)
self.lcdNumber_4 = QtWidgets.QLCDNumber(self.layoutWidget1)
self.lcdNumber_4.setProperty("intValue", 0)
self.lcdNumber_4.setObjectName("lcdNumber_4")
self.verticalLayout_3.addWidget(self.lcdNumber_4)
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.textBrowser_3 = QtWidgets.QTextBrowser(self.tab_2)
self.textBrowser_3.setGeometry(QtCore.QRect(0, 0, 411, 431))
self.textBrowser_3.setObjectName("textBrowser_3")
self.lcdNumber = QtWidgets.QLCDNumber(self.tab_2)
self.lcdNumber.setEnabled(True)
self.lcdNumber.setGeometry(QtCore.QRect(414, 14, 421, 31))
self.lcdNumber.setSmallDecimalPoint(False)
self.lcdNumber.setProperty("intValue", 0)
self.lcdNumber.setObjectName("lcdNumber")
self.tabWidget.addTab(self.tab_2, "")
self.tab_3 = QtWidgets.QWidget()
self.tab_3.setObjectName("tab_3")
self.textBrowser_4 = QtWidgets.QTextBrowser(self.tab_3)
self.textBrowser_4.setGeometry(QtCore.QRect(0, 0, 411, 431))
self.textBrowser_4.setObjectName("textBrowser_4")
self.lcdNumber_2 = QtWidgets.QLCDNumber(self.tab_3)
self.lcdNumber_2.setEnabled(True)
self.lcdNumber_2.setGeometry(QtCore.QRect(414, 14, 421, 31))
self.lcdNumber_2.setProperty("intValue", 0)
self.lcdNumber_2.setObjectName("lcdNumber_2")
self.tabWidget.addTab(self.tab_3, "")
self.tab_4 = QtWidgets.QWidget()
self.tab_4.setObjectName("tab_4")
self.textBrowser_5 = QtWidgets.QTextBrowser(self.tab_4)
self.textBrowser_5.setGeometry(QtCore.QRect(0, 0, 411, 431))
self.textBrowser_5.setObjectName("textBrowser_5")
self.lcdNumber_3 = QtWidgets.QLCDNumber(self.tab_4)
self.lcdNumber_3.setEnabled(True)
self.lcdNumber_3.setGeometry(QtCore.QRect(414, 14, 421, 31))
self.lcdNumber_3.setProperty("intValue", 0)
self.lcdNumber_3.setObjectName("lcdNumber_3")
self.tabWidget.addTab(self.tab_4, "")
self.horizontalLayout.addWidget(self.tabWidget)
self.verticalLayout.addLayout(self.horizontalLayout)
self.label_6 = QtWidgets.QLabel(self.centralwidget)
self.label_6.setObjectName("label_6")
self.verticalLayout.addWidget(self.label_6)
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setObjectName("pushButton")
self.verticalLayout.addWidget(self.pushButton)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 911, 26))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.pushButton.clicked.connect(self.load_tweet)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.checkBox.setText(_translate("MainWindow", "Анализировать комментарии"))
self.label_2.setText(_translate("MainWindow", "Количество комментариев"))
self.label.setText(_translate("MainWindow", "верхняя граница даты"))
self.label_3.setText(_translate("MainWindow", "нижняя граница даты"))
self.label_4.setText(_translate("MainWindow", "Id на пост"))
self.plainTextEdit_2.setPlainText(_translate("MainWindow", ""))
self.label_5.setText(_translate("MainWindow", "Список первых трех комментариев выбранных по дате"))
self.textBrowser.setHtml(_translate("MainWindow", ""))
self.label_7.setText(_translate("MainWindow", "Средняя тональность всех комментариев "))
self.textBrowser_2.setHtml(_translate("MainWindow", ""))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Пост"))
self.textBrowser_3.setHtml(_translate("MainWindow", ""))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Первый комментарий"))
self.textBrowser_4.setHtml(_translate("MainWindow", ""))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _translate("MainWindow", "Второй комментарий"))
self.textBrowser_5.setHtml(_translate("MainWindow", ""))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_4), _translate("MainWindow", "Третий комментарий"))
self.label_6.setText(_translate("MainWindow", "Эмоциональная тональность от 0 - абсолютный негатив, до 100 - абсолютный позитив"))
self.pushButton.setText(_translate("MainWindow", "Анализ"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| nstustudent/krotyuk_social_network_sa | Interface.py | Python | apache-2.0 | 14,186 |
# import needed models
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
# Create your models here.
# create user object
class Person(User):
internal_id = models.CharField(max_length=25, null=True, blank=True)
verified = models.NullBooleanField(default=False)
approval_date = models.DateTimeField(null=True, blank=True)
# create list object
class List(models.Model):
name = models.CharField('List Name', max_length=50)
created_date = models.DateTimeField(auto_now_add=True)
modified_date = models.DateTimeField(auto_now=True)
links = models.ManyToManyField("Link")
def __str__(self):
return self.name
# create link object
class Link(models.Model):
name = models.CharField('Link Name', max_length=50)
created_date = models.DateTimeField(auto_now_add=True)
modified_date = models.DateTimeField(auto_now=True)
tags = models.TextField(null=True, blank=True)
def __str__(self):
return self.name
| prakashksinha/playground | bookmarks/models.py | Python | apache-2.0 | 1,100 |
# coding=utf-8
# Copyright 2022 The Deeplab2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for resized_fuse."""
import tensorflow as tf
from deeplab2.model.layers import resized_fuse
class ResizedFuseTest(tf.test.TestCase):
def test_resize_and_fuse_features(self):
batch, height, width, channels = 2, 11, 11, 6
smaller_height, smaller_width, smaller_channels = 6, 6, 3
larger_height1, larger_width1 = 21, 21 # Stride 2 conv.
larger_height2, larger_width2 = 22, 22 # Stride 2 conv.
larger_height3, larger_width3 = 23, 23 # Conv and resize.
feature_list = []
feature_list.append(tf.zeros([batch, smaller_height, smaller_width,
smaller_channels]))
feature_list.append(tf.zeros([batch, smaller_height, smaller_width,
channels]))
feature_list.append(tf.zeros([batch, height, width, smaller_channels]))
feature_list.append(tf.zeros([batch, height, width, channels]))
feature_list.append(tf.zeros([batch, larger_height1, larger_width1,
channels]))
feature_list.append(tf.zeros([batch, larger_height1, larger_width1,
smaller_channels]))
feature_list.append(tf.zeros([batch, larger_height2, larger_width2,
smaller_channels]))
feature_list.append(tf.zeros([batch, larger_height3, larger_width3,
smaller_channels]))
layer = resized_fuse.ResizedFuse(name='fuse',
height=height,
width=width,
num_channels=channels)
output = layer(feature_list)
self.assertEqual(output.get_shape().as_list(), [batch, height, width,
channels])
if __name__ == '__main__':
tf.test.main()
| google-research/deeplab2 | model/layers/resized_fuse_test.py | Python | apache-2.0 | 2,422 |
class Drawable(object):
def draw(self, display_screen, dT):
pass | bpeck/tumblr-display | src/Drawable.py | Python | apache-2.0 | 76 |
# Generated by Django 3.0.3 on 2020-08-20 16:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('common', '0002_citation'),
]
operations = [
migrations.AddField(
model_name='citation',
name='page_name',
field=models.TextField(default=None),
preserve_default=False,
),
]
| protwis/protwis | common/migrations/0003_citation_page_name.py | Python | apache-2.0 | 413 |
import pcbnew
from . import toggle_visibility
class ToggleVisibilityPlugin(pcbnew.ActionPlugin):
def defaults(self):
self.name = "Toggle visibility of value/reference (of selected modules)"
self.category = "A descriptive category name"
self.description = "This plugin toggles the visibility of any selected module values/references"
def Run(self):
# The entry function of the plugin that is executed on user action
toggle_visibility.ToggleVisibility()
ToggleVisibilityPlugin().register() # Instantiate and register to Pcbnew
print("done adding toggle")
| mmccoo/kicad_mmccoo | toggle_visibility/__init__.py | Python | apache-2.0 | 606 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import deque, OrderedDict
import numpy as np
from rlkit.core.eval_util import create_stats_ordered_dict
from rlkit.data_management.path_builder import PathBuilder
from rlkit.samplers.data_collector.base import StepCollector
class MdpStepCollector(StepCollector):
def __init__(
self,
env,
policy,
max_num_epoch_paths_saved=None,
render=False,
render_kwargs=None,
):
if render_kwargs is None:
render_kwargs = {}
self._env = env
self._policy = policy
self._max_num_epoch_paths_saved = max_num_epoch_paths_saved
self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)
self._render = render
self._render_kwargs = render_kwargs
self._num_steps_total = 0
self._num_paths_total = 0
self._obs = None # cache variable
def get_epoch_paths(self):
return self._epoch_paths
def end_epoch(self, epoch):
self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)
self._obs = None
def get_diagnostics(self):
path_lens = [len(path['actions']) for path in self._epoch_paths]
stats = OrderedDict([
('num steps total', self._num_steps_total),
('num paths total', self._num_paths_total),
])
stats.update(create_stats_ordered_dict(
"path length",
path_lens,
always_show_all_stats=True,
))
return stats
def get_snapshot(self):
return dict(
env=self._env,
policy=self._policy,
)
def collect_new_steps(
self,
max_path_length,
num_steps,
discard_incomplete_paths,
):
for _ in range(num_steps):
self.collect_one_step(max_path_length, discard_incomplete_paths)
def collect_one_step(
self,
max_path_length,
discard_incomplete_paths,
):
if self._obs is None:
self._start_new_rollout()
action, agent_info = self._policy.get_action(self._obs)
next_ob, reward, terminal, env_info = (
self._env.step(action)
)
if self._render:
self._env.render(**self._render_kwargs)
terminal = np.array([terminal])
reward = np.array([reward])
# store path obs
self._current_path_builder.add_all(
observations=self._obs,
actions=action,
rewards=reward,
next_observations=next_ob,
terminals=terminal,
agent_infos=agent_info,
env_infos=env_info,
)
if terminal or len(self._current_path_builder) >= max_path_length:
self._handle_rollout_ending(max_path_length,
discard_incomplete_paths)
self._start_new_rollout()
else:
self._obs = next_ob
def _start_new_rollout(self):
self._current_path_builder = PathBuilder()
self._obs = self._env.reset()
def _handle_rollout_ending(
self,
max_path_length,
discard_incomplete_paths
):
if len(self._current_path_builder) > 0:
path = self._current_path_builder.get_all_stacked()
path_len = len(path['actions'])
if (
path_len != max_path_length
and not path['terminals'][-1]
and discard_incomplete_paths
):
return
self._epoch_paths.append(path)
self._num_paths_total += 1
self._num_steps_total += path_len
class GoalConditionedStepCollector(StepCollector):
def __init__(
self,
env,
policy,
max_num_epoch_paths_saved=None,
render=False,
render_kwargs=None,
observation_key='observation',
desired_goal_key='desired_goal',
):
if render_kwargs is None:
render_kwargs = {}
self._env = env
self._policy = policy
self._max_num_epoch_paths_saved = max_num_epoch_paths_saved
self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)
self._render = render
self._render_kwargs = render_kwargs
self._observation_key = observation_key
self._desired_goal_key = desired_goal_key
self._num_steps_total = 0
self._num_paths_total = 0
self._obs = None # cache variable
def get_epoch_paths(self):
return self._epoch_paths
def end_epoch(self, epoch):
self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)
self._obs = None
def get_diagnostics(self):
path_lens = [len(path['actions']) for path in self._epoch_paths]
stats = OrderedDict([
('num steps total', self._num_steps_total),
('num paths total', self._num_paths_total),
])
stats.update(create_stats_ordered_dict(
"path length",
path_lens,
always_show_all_stats=True,
))
return stats
def get_snapshot(self):
return dict(
env=self._env,
policy=self._policy,
observation_key=self._observation_key,
desired_goal_key=self._desired_goal_key,
)
def start_collection(self):
self._start_new_rollout()
def end_collection(self):
epoch_paths = self.get_epoch_paths()
return epoch_paths
def collect_new_steps(
self,
max_path_length,
num_steps,
discard_incomplete_paths,
):
for _ in range(num_steps):
self.collect_one_step(max_path_length, discard_incomplete_paths)
def collect_one_step(
self,
max_path_length,
discard_incomplete_paths,
):
if self._obs is None:
self._start_new_rollout()
new_obs = np.hstack((
self._obs[self._observation_key],
self._obs[self._desired_goal_key],
))
action, agent_info = self._policy.get_action(new_obs)
next_ob, reward, terminal, env_info = (
self._env.step(action)
)
if self._render:
self._env.render(**self._render_kwargs)
terminal = np.array([terminal])
reward = np.array([reward])
# store path obs
self._current_path_builder.add_all(
observations=self._obs,
actions=action,
rewards=reward,
next_observations=next_ob,
terminals=terminal,
agent_infos=agent_info,
env_infos=env_info,
)
if terminal or len(self._current_path_builder) >= max_path_length:
self._handle_rollout_ending(max_path_length,
discard_incomplete_paths)
self._start_new_rollout()
else:
self._obs = next_ob
def _start_new_rollout(self):
self._current_path_builder = PathBuilder()
self._obs = self._env.reset()
def _handle_rollout_ending(
self,
max_path_length,
discard_incomplete_paths
):
if len(self._current_path_builder) > 0:
path = self._current_path_builder.get_all_stacked()
path_len = len(path['actions'])
if (
path_len != max_path_length
and not path['terminals'][-1]
and discard_incomplete_paths
):
return
self._epoch_paths.append(path)
self._num_paths_total += 1
self._num_steps_total += path_len
class ObsDictStepCollector(StepCollector):
def __init__(
self,
env,
policy,
max_num_epoch_paths_saved=None,
render=False,
render_kwargs=None,
observation_key='observation',
):
if render_kwargs is None:
render_kwargs = {}
self._env = env
self._policy = policy
self._max_num_epoch_paths_saved = max_num_epoch_paths_saved
self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)
self._render = render
self._render_kwargs = render_kwargs
self._observation_key = observation_key
self._num_steps_total = 0
self._num_paths_total = 0
self._obs = None # cache variable
def get_epoch_paths(self):
return self._epoch_paths
def end_epoch(self, epoch):
self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)
self._obs = None
def get_diagnostics(self):
path_lens = [len(path['actions']) for path in self._epoch_paths]
stats = OrderedDict([
('num steps total', self._num_steps_total),
('num paths total', self._num_paths_total),
])
stats.update(create_stats_ordered_dict(
"path length",
path_lens,
always_show_all_stats=True,
))
return stats
def get_snapshot(self):
return dict(
env=self._env,
policy=self._policy,
observation_key=self._observation_key,
)
def start_collection(self):
self._start_new_rollout()
def end_collection(self):
epoch_paths = self.get_epoch_paths()
return epoch_paths
def collect_new_steps(
self,
max_path_length,
num_steps,
discard_incomplete_paths,
):
for _ in range(num_steps):
self.collect_one_step(max_path_length, discard_incomplete_paths)
def collect_one_step(
self,
max_path_length,
discard_incomplete_paths,
):
if self._obs is None:
self._start_new_rollout()
new_obs = self._obs[self._observation_key]
action, agent_info = self._policy.get_action(new_obs)
next_ob, reward, terminal, env_info = (
self._env.step(action)
)
if self._render:
self._env.render(**self._render_kwargs)
terminal = np.array([terminal])
reward = np.array([reward])
# store path obs
self._current_path_builder.add_all(
observations=self._obs,
actions=action,
rewards=reward,
next_observations=next_ob,
terminals=terminal,
agent_infos=agent_info,
env_infos=env_info,
)
if terminal or len(self._current_path_builder) >= max_path_length:
self._handle_rollout_ending(max_path_length,
discard_incomplete_paths)
self._start_new_rollout()
else:
self._obs = next_ob
def _start_new_rollout(self):
self._current_path_builder = PathBuilder()
self._obs = self._env.reset()
def _handle_rollout_ending(
self,
max_path_length,
discard_incomplete_paths
):
if len(self._current_path_builder) > 0:
path = self._current_path_builder.get_all_stacked()
path_len = len(path['actions'])
if (
path_len != max_path_length
and not path['terminals'][-1]
and discard_incomplete_paths
):
return
self._epoch_paths.append(path)
self._num_paths_total += 1
self._num_steps_total += path_len
| google-research/DBAP-algorithm | third_party/rlkit_library/rlkit/samplers/data_collector/step_collector.py | Python | apache-2.0 | 12,271 |
import itchat, time, re
from itchat.content import *
import urllib2, urllib
import json
from watson_developer_cloud import ConversationV1
response={'context':{}}
@itchat.msg_register([TEXT])
def text_reply(msg):
global response
request_text = msg['Text'].encode('UTF-8')
conversation = ConversationV1(
username='9c359fba-0692-4afa-afb1-bd5bf4d7e367',
password='5Id2zfapBV6e',
version='2017-04-21')
# replace with your own workspace_id
workspace_id = 'd3e50587-f36a-4bdf-bf3e-38c382e8d63a'
print "request ==>", request_text
try:
type(eval(response))
except:
print "first call"
response = conversation.message(workspace_id=workspace_id, message_input={
'text': request_text}, context=response['context'])
else:
print "continue call"
response = conversation.message(workspace_id=workspace_id, message_input={
'text': request_text}, context=response['context'])
if len( response['output']['text']) >0:
response_text = response['output']['text'][0]
else:
response_text = "No message"
itchat.send( response_text, msg['FromUserName'])
itchat.auto_login()
itchat.run(debug=True)
| Jonathanliu92251/watson-conversation | wechat/watson-wechat.py | Python | apache-2.0 | 1,140 |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract API specification for XManager implementations.
Each implementation of the XManager API should override the abstract methods.
Users are normally expected to have the following pair of imports:
```
from xmanager import xm
from xmanager import xm_foo
```
"""
import abc
import asyncio
from concurrent import futures
import enum
import getpass
import inspect
import queue
import threading
from typing import Any, Awaitable, Callable, Collection, Dict, List, Mapping, Optional, Sequence, overload
import attr
from xmanager.xm import async_packager
from xmanager.xm import id_predictor
from xmanager.xm import job_blocks
from xmanager.xm import job_operators
from xmanager.xm import metadata_context
from xmanager.xm import pattern_matching
def _check_if_unsupported_args_are_present(args: Mapping[str, Any],
supported_args: Collection[str],
job_type: str) -> None:
supported_args = set(supported_args)
unsupported_args = set(args.keys()) - supported_args
if unsupported_args:
raise ValueError(
f'Arguments {unsupported_args!r} are not supported by {job_type}. Only '
f'{supported_args!r} are allowed.')
def _apply_args_to_job(job: job_blocks.Job, args: Mapping[str, Any]) -> None:
"""Overrides job properties."""
_check_if_unsupported_args_are_present(args, ('args', 'env_vars'), 'xm.Job')
if 'args' in args:
job.args = job_blocks.merge_args(job.args, args['args'])
job.env_vars.update(args.get('env_vars', {}))
def _apply_args_to_job_group(job_group: job_blocks.JobGroup,
args: Mapping[str, Any]) -> None:
"""Recursively overrides job group properties."""
if args:
_check_if_unsupported_args_are_present(args, job_group.jobs.keys(),
'xm.JobGroup')
for key, job in job_group.jobs.items():
_apply_args(job, args.get(key, {}))
_apply_args = pattern_matching.match(
_apply_args_to_job, _apply_args_to_job_group,
pattern_matching.Case([job_blocks.JobGeneratorType, Any],
lambda other, args: None))
class ExperimentUnitStatus(abc.ABC):
"""The status of an experiment unit."""
@property
@abc.abstractmethod
def is_active(self) -> bool:
"""Returns whether the unit is not in terminal state.
It may be actively running or queued. The unit may produce more results.
If the unit is stopped by a user it will be neither active, completed
nor failed.
"""
raise NotImplementedError
@property
@abc.abstractmethod
def is_completed(self) -> bool:
"""Returns whether the unit has completed without failures.
This is a terminal state. The unit has produced all the intended results.
But it still may be restarted by an explicit request.
"""
raise NotImplementedError
@property
@abc.abstractmethod
def is_failed(self) -> bool:
"""Returns whether the unit has failed.
This is a terminal state. Experiment unit will enter this state on any
fatal failure, such as process exiting with non-zero code, cloud rejecting
to schedule/queue the job or exceptions in JobGenerator. The unit will stay
in this state unless explicitly restarted.
Intermediate failures do not result in this state.
"""
raise NotImplementedError
@property
@abc.abstractmethod
def message(self) -> str:
"""An optional human-readable message providing context for the status.
This may take the form of explaining why the work unit is in this state,
or any potentially transient errors the work unit may be experiencing.
"""
raise NotImplementedError
class ExperimentUnitError(RuntimeError):
"""Experiment unit could not be completed."""
class ExperimentUnitFailedError(ExperimentUnitError):
"""A job running in an experiment unit has failed."""
class ExperimentUnitNotCompletedError(ExperimentUnitError):
"""Experiment unit is neither running nor completed.
For example it may be stopped by a user.
"""
class NotFoundError(KeyError):
"""Experiment/Work Unit/etc. has not been found."""
def _work_unit_arguments(
job: job_blocks.JobType,
args: Optional[Mapping[str, Any]],
) -> Mapping[str, Any]:
"""Constructs work unit arguments to display them in various UIs.
If users pass `args` to the `.add` method explicitly, we assume `args` to be
the sought work unit arguments. If `args` are not passed to `.add`, we deduce
work unit arguments implicitly from the `job`s' `args` and `env_vars`.
Args:
job: A job to run inside a work unit.
args: Explicitly specified arguments (could be empty).
Returns:
Depending on the type of the `job` given, can be one of the following:
- if it's an instance of `Job`, we return `{'args': job.args, 'env_vars':
job.env_vars}` with empty values omitted;
- if it's an instance of `JobGroup`, we recursively unwind the group while
populating corresponding nested dictionaries until we reach standalone
`Job`s;
- if it's a job generator, we return `{}`.
"""
if args is not None:
# In order to give users control on what is shown as work unit arguments we
# don't alter them if a value is given.
return args
def deduce_args_for_job(job: job_blocks.Job) -> Dict[str, Any]:
args = {
'args': job.args.to_dict(kwargs_only=True),
'env_vars': job.env_vars
}
return {key: value for key, value in args.items() if value}
def deduce_args_for_job_group(group: job_blocks.JobGroup) -> Dict[str, Any]:
args = {}
for job_name, job in group.jobs.items():
job_args = deduce_args(job)
if job_args:
args[job_name] = job_args
return args
deduce_args = pattern_matching.match(
deduce_args_for_job, deduce_args_for_job_group,
pattern_matching.Case([job_blocks.JobGeneratorType],
lambda generator: {}))
return deduce_args(job)
class Importance(enum.Enum):
"""How important it is to schedule particular Experiment or ExperimentUnit.
This is a hint to the scheduler. Not all schedulers take it into account
(xm_local doesn't). And even with smart scheduler a less important work unit
may run before a more important one e.g. if it uses a less contended resource.
Unlike ServiceTier, importance only controls preference within a team i.e. how
team's resources are divided between team's experiments. It has no effect on
resource allocation between teams.
"""
# High impact experiments. Try scheduling them even at the cost of significant
# reduction of the overall throughput that your experiments get.
HIGH = 'high'
# The default importance.
NORMAL = 'normal'
# Prefer to schedule other experiments with higher importance, but in overall
# try to maximize throughput.
LOW = 'low'
@attr.s(auto_attribs=True, kw_only=True)
class ExperimentUnitRole(abc.ABC):
"""The role of an experiment unit within the experiment structure.
Attributes:
importance: how important it is to schedule this executable unit comparing
to all your executable units (from all your experiments).
"""
importance: Importance = Importance.NORMAL
class ExperimentUnit(abc.ABC):
"""ExperimentUnit is a collection of semantically associated `Job`s."""
experiment: 'Experiment'
def __init__(self, experiment: 'Experiment',
create_task: Callable[[Awaitable[Any]], futures.Future],
args: Optional[Mapping[str,
Any]], role: ExperimentUnitRole) -> None:
"""Initializes an `ExperimentUnit` instance.
Args:
experiment: An experiment this unit belongs to.
create_task: A callback to register a new asynchronous task.
args: Arguments to this experiment unit. Most commonly used to represent
the hyperparameter sweep trial corresponding to a work unit.
role: The role of this unit in the experiment structure.
"""
self.experiment = experiment
self._create_task = create_task
self._args = args
self._role = role
self._launch_tasks: List[futures.Future] = []
@property
def experiment_id(self) -> int:
"""Returns a unique ID assigned to the experiment."""
return self.experiment.experiment_id
def add(self,
job: job_blocks.JobType,
args: Optional[Mapping[str, Any]] = None) -> Awaitable[None]:
# pyformat: disable
"""Adds a Job / JobGroup to the experiment unit.
Only one JobGroup can be added to an ExperimentUnit. This limitation may be
lifted in future versions.
Args:
job: A job or job group to add.
args: Keyword arguments to be passed to the job. For Job and JobGroup args
are recursively expanded. For example,
```
wu.add(
JobGroup(agent=Job(...)),
args={'agent': {'args': {'learning_rate': 0.1}}},
)
```
would update `args` field of a job `agent` in the group.
Returns:
An awaitable that would be fulfilled when the job is launched.
"""
# pyformat: enable
job = job_operators.shallow_copy_job_type(job)
if args is not None:
_apply_args(job, args)
job_operators.populate_job_names(job)
def launch_job(job: job_blocks.Job) -> Awaitable[None]:
return self._launch_job_group(
job_blocks.JobGroup(**{job.name: job}),
_work_unit_arguments(job, self._args))
def launch_job_group(group: job_blocks.JobGroup) -> Awaitable[None]:
return self._launch_job_group(group,
_work_unit_arguments(group, self._args))
def launch_job_generator(
job_generator: job_blocks.JobGeneratorType) -> Awaitable[None]:
if (not inspect.iscoroutinefunction(job_generator) and
not inspect.iscoroutinefunction(job_generator.__call__)):
raise ValueError(
'Job generator must be an async function. Signature needs to be '
'`async def job_generator(work_unit: xm.WorkUnit):`')
return job_generator(self, **(args or {}))
job_awaitable = pattern_matching.match(launch_job, launch_job_group,
launch_job_generator)(
job)
launch_task = self._create_task(job_awaitable)
self._launch_tasks.append(launch_task)
return asyncio.wrap_future(launch_task)
async def wait_until_complete(self) -> 'ExperimentUnit':
"""Waits until the unit is in a final state: completed/failed/stopped.
Raises:
ExperimentUnitError: Exception if the unit couldn't complete.
Returns:
Returns self to facilitate asyncio.as_completed usage.
"""
try:
for task in self._launch_tasks:
await asyncio.wrap_future(task)
except Exception as e:
raise ExperimentUnitError('Experiment unit could not be created.') from e
await self._wait_until_complete()
return self
async def _launch_job_group(self, job_group: job_blocks.JobGroup,
args_view: Mapping[str, Any]) -> None:
"""Launches a given job group as part of the unit."""
raise NotImplementedError
async def _wait_until_complete(self) -> None:
"""Waits until the unit is in a final state: completed/failed/stopped.
Child classes need to implement this method to support awaiting units.
Unlike wait_until_complete this method asumes that unit has been fully
created. This method is only invoked if somebody has requested to monitor
unit.
"""
raise NotImplementedError
def stop(self) -> None:
"""Initiate the process to stop the unit from running.
This method will synchronously make a request for the unit to stop.
However, the method does not actually wait for the unit to be in a
terminal state.
Use self.wait_until_complete() after self.stop() to guarantee the unit
is stopped.
"""
raise NotImplementedError
def get_status(self) -> ExperimentUnitStatus:
"""Gets the status of this unit."""
raise NotImplementedError
@property
@abc.abstractmethod
def experiment_unit_name(self) -> str:
raise NotImplementedError
def get_full_job_name(self, job_name: str) -> str:
"""Given `Job.name` constructs its full name.
The primary use case is addressing containers -- full names serve as
hostnames.
Args:
job_name: Short name of a job.
Returns:
Full name of the job.
"""
return f'{self.experiment_unit_name}_{job_name}'
@property
def context(self) -> metadata_context.MetadataContext:
"""Returns metadata context for a unit."""
return metadata_context.MetadataContext(
creator=getpass.getuser(),
annotations=metadata_context.ContextAnnotations())
@attr.s(auto_attribs=True, kw_only=True)
class WorkUnitRole(ExperimentUnitRole):
"""An experiment unit with this role is a work unit.
Work units contain jobs that are often run as trials as part of an
experiment's hyper-parameter search. The status of a work unit is used to
determine the status of the experiment.
"""
class WorkUnit(ExperimentUnit):
"""Work units are experiment units with the work unit role."""
@property
@abc.abstractmethod
def work_unit_id(self) -> int:
raise NotImplementedError
async def wait_until_complete(self) -> 'WorkUnit':
"""Waits until the unit is in a final state: completed/failed/stopped.
Raises:
ExperimentUnitError: Exception if the unit couldn't complete.
Returns:
Returns self to facilitate asyncio.as_completed usage.
"""
await super().wait_until_complete()
return self
@attr.s(auto_attribs=True, kw_only=True)
class AuxiliaryUnitRole(ExperimentUnitRole):
"""An experiment unit with this role is an auxiliary unit.
Auxiliary units contain jobs that are not part of the trials of a
hyper-parameter search. The status of an auxiliary unit is not used to
determine the status of the experiment. e.g. Tensorboard
Attributes:
termination_delay_secs: How long to keep AUX unit running after experiment
completion.
"""
termination_delay_secs: int
class AuxiliaryUnitJob(abc.ABC):
"""A job bundled with an AuxiliaryUnitRole.
This class allows libraries to define self-contained objects which would
result in AUX units once added to the expetiment.
Note that this class conforms to xm.JobGenerator interface.
"""
role: AuxiliaryUnitRole
_job: job_blocks.JobType
def __init__(self,
job: job_blocks.JobType,
*,
importance: Importance = Importance.NORMAL,
termination_delay_secs: int) -> None:
self.role = AuxiliaryUnitRole(
importance=importance,
termination_delay_secs=termination_delay_secs,
)
self._job = job
async def __call__(self, aux_unit: ExperimentUnit, **kwargs):
async def launch_generator(
job_generator: job_blocks.JobGeneratorType) -> None:
await job_generator(aux_unit, **kwargs)
async def launch_job(job: Any) -> None:
aux_unit.add(job, args=kwargs)
await pattern_matching.async_match(launch_generator, launch_job)(self._job)
class Experiment(abc.ABC):
"""Experiment contains a family of jobs run on the same snapshot of code.
Experiment also implements the behavior of how to add and execute jobs.
Attempting to add jobs that contain Executables with unsupported types will
fail.
"""
# An event loop in which job generators would be run.
_event_loop: asyncio.AbstractEventLoop
# A queue of background tasks that launch work units.
_running_tasks: queue.Queue
# Work unit ID predictor.
_work_unit_id_predictor: id_predictor.Predictor
# A class variable for batching packaging requests.
_async_packager: async_packager.AsyncPackager
@property
def experiment_id(self) -> int:
"""Returns a unique ID assigned to the experiment."""
raise NotImplementedError
def __enter__(self):
if asyncio.get_event_loop().is_running():
raise RuntimeError('When using Experiment from a coroutine please use '
'`async with` syntax')
self._event_loop = asyncio.new_event_loop()
asyncio.get_child_watcher().attach_loop(self._event_loop)
self._event_loop_thread = threading.Thread(
target=self._event_loop.run_forever, daemon=True)
self._event_loop_thread.start()
# asyncio.run_coroutine_threadsafe doesn't accept class method and wants it
# wrapped in a function.
async def async_enter():
await self.__aenter__()
asyncio.run_coroutine_threadsafe(
async_enter(), loop=self._event_loop).result()
return self
def _wait_for_tasks(self):
while not self._running_tasks.empty():
self._running_tasks.get_nowait().result()
def __exit__(self, exc_type, exc_value, traceback):
self._wait_for_tasks()
self._event_loop.call_soon_threadsafe(self._event_loop.stop)
self._event_loop_thread.join()
async def __aenter__(self):
self._event_loop = asyncio.get_event_loop()
self._running_tasks = queue.Queue()
self._work_unit_id_predictor = id_predictor.Predictor(1 +
self.work_unit_count)
return self
async def _await_for_tasks(self):
while not self._running_tasks.empty():
await asyncio.wrap_future(self._running_tasks.get_nowait())
async def __aexit__(self, exc_type, exc_value, traceback):
await self._await_for_tasks()
@classmethod
def package(
cls, packageables: Sequence[job_blocks.Packageable] = ()
) -> Sequence[job_blocks.Executable]:
"""Packages `packageables` & triggers async packaging.
This function has 2 usages:
- Builds all given executables specs in parallel. While calling package(...)
multiple times is allowed, that would result in slow sequential build,
even if invoked from concurrent threads.
- Triggers packaging of the items enqueued previously with `package_async`.
Args:
packageables: A sequence of extra packageables to build synchronously.
Returns:
A sequence of packaging results associated to `packageables` (same order).
"""
return cls._async_packager.package(packageables)
@classmethod
def package_async(
cls,
packageable: job_blocks.Packageable) -> Awaitable[job_blocks.Executable]:
"""Queues executable spec to be packaged into executable.
If gathering all packageables for a single `package()` call is inconvenient,
one may request packaging with `package_async` and later trigger the build
for the whole batch with `package()`.
Usage:
if eval:
eval_executable = experiment.package_async(xm.blaze_binary(...))
if train:
train_executable = experiment.package_async(xm.blaze_binary(...))
experiment.package() # Explicitly trigger packaging.
jobs = {}
if eval:
jobs['eval'] = xm.job(await eval_executable, ...)
if train:
jobs['train'] = xm.job(await train_executable, ...)
Args:
packageable: Executable spec to package.
Returns:
An awaitable for the packaging result.
"""
return cls._async_packager.add(packageable)
@overload
def add(self,
job: AuxiliaryUnitJob,
args: Optional[Mapping[str, Any]] = ...) -> Awaitable[ExperimentUnit]:
...
@overload
def add(self,
job: job_blocks.JobType,
args: Optional[Mapping[str, Any]] = ...,
role: WorkUnitRole = ...) -> Awaitable[WorkUnit]:
...
@overload
def add(self, job: job_blocks.JobType, args: Optional[Mapping[str, Any]],
role: ExperimentUnitRole) -> Awaitable[ExperimentUnit]:
...
@overload
def add(
self,
job: job_blocks.JobType,
args: Optional[Mapping[str, Any]] = ...,
*, # parameters after “*” are keyword-only parameters
role: ExperimentUnitRole
) -> Awaitable[ExperimentUnit]:
...
# The ExecutableUnit return type is determined by the role.
def add(self, job, args=None, role=WorkUnitRole()):
# pyformat: disable
"""Adds a Job / JobGroup to the experiment.
A new Experiment Unit is created to run the job.
Args:
job: A Job or JobGroup to add.
args: Keyword arguments to be passed to the job. For Job and JobGroup args
are recursively expanded. For example,
```
wu.add(
JobGroup(agent=Job(...)),
args={'agent': {'args': {'learning_rate': 0.1}}},
)
```
would update `args` field of a job `agent` in the group.
role: The role of this unit in the experiment structure.
Returns:
An awaitable that would be fulfilled when the job is launched.
"""
# pyformat: enable
role = pattern_matching.match(
pattern_matching.Case([AuxiliaryUnitJob], lambda job: job.role),
pattern_matching.Case([Any], lambda job: role),
)(
job)
experiment_unit_future = self._create_experiment_unit(args, role)
async def launch():
experiment_unit = await experiment_unit_future
await experiment_unit.add(job, args)
return experiment_unit
return asyncio.wrap_future(self._create_task(launch()))
@abc.abstractmethod
def _create_experiment_unit(
self, args: Optional[Mapping[str, Any]],
role: ExperimentUnitRole) -> Awaitable[ExperimentUnit]:
"""Creates a new experiment unit.
Synchronously starts the experiment unit creation, ensuring that IDs would
be assigned in invocation order. The operation itself may run asynchronously
in background.
Args:
args: Executable unit arguments, to be show as a part of hyper-parameter
sweep.
role: Executable unit role: whether to create a work or auxiliary unit.
Returns:
An awaitable to the creation result.
"""
raise NotImplementedError
def _create_task(self, task: Awaitable[Any]) -> futures.Future:
future = asyncio.run_coroutine_threadsafe(task, loop=self._event_loop)
self._running_tasks.put_nowait(future)
return future
@property
def work_unit_count(self) -> int:
"""Returns how many work units the experiment has."""
raise NotImplementedError
@abc.abstractmethod
def work_units(self) -> Mapping[int, WorkUnit]:
"""Returns a mapping from work_unit_id to an instance of the work unit."""
raise NotImplementedError
@property
def context(self) -> metadata_context.MetadataContext:
"""Returns metadata context for the experiment."""
return metadata_context.MetadataContext(
creator=getpass.getuser(),
annotations=metadata_context.ContextAnnotations())
@abc.abstractmethod
def create_experiment(experiment_title: Optional[str] = None) -> Experiment:
"""Returns a concrete Experiment instance."""
raise NotImplementedError
@abc.abstractmethod
def get_experiment(experiment_id: int) -> Experiment:
"""Returns an Experiment instance associated with this experiment id.
Args:
experiment_id: An ID of an experiment to get.
Raises:
NotFoundError: If experiment is not found.
"""
raise NotImplementedError
| deepmind/xmanager | xmanager/xm/core.py | Python | apache-2.0 | 23,876 |
"""
Provides functions for evaluating the 'fitness'
of ec2 instances. This module really just provides
functions for deciding which instance is best to be
killed by the autoscaler.
"""
def sort_by_system_instance_health(instances):
return sorted(
instances,
key=lambda i: (
i.instance_status["SystemStatus"]["Status"] != "ok"
or i.instance_status["InstanceStatus"]["Status"] != "ok"
),
)
def sort_by_upcoming_events(instances):
return sorted(instances, key=lambda i: len(i.instance_status.get("Events", [])))
def sort_by_total_tasks(instances):
return sorted(instances, key=lambda i: i.task_counts.count, reverse=True)
def sort_by_running_batch_count(instances):
return sorted(instances, key=lambda i: i.task_counts.batch_count, reverse=True)
def sort_by_ec2_fitness(instances):
"""
Sort a list according to their fitness. This will return the list of instances
in order of 'fitness': that is, that which is least desirable to kill is first in
the list.
Fitness is judged according to the following rules:
- any instance considered to have a non 'ok' system or instance status is always
considered to be least healthy
- next, instances are ranked according to whether they have events planned. an event
planned marks against your fitness.
- next, instances are sorted according to the number of batch tasks running on them.
we can't drain batch tasks, so make an effort to avoid disrupting them.
- finally, instances are sorted according to the number of total tasks they have. those with
the hightest total task are considered fittest, because it's painful to drain them.
"""
return sort_by_system_instance_health(
sort_by_upcoming_events(
sort_by_running_batch_count(sort_by_total_tasks(instances))
)
)
| Yelp/paasta | paasta_tools/autoscaling/ec2_fitness.py | Python | apache-2.0 | 1,916 |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VexFlow labeled data generation.
Wraps the node.js generator, which generates a random measure of music as SVG,
and the ground truth glyphs present in the image as a `Page` message.
Each invocation generates a batch of images. There is a tradeoff between the
startup time of node.js for each invocation, and keeping the output size small
enough to pipe into Python.
The final outputs are positive and negative example patches. Positive examples
are centered on an outputted glyph, and have that glyph's type. Negative
examples are at least a few pixels away from any glyph, and have type NONE.
Since negative examples could be a few pixels away from a glyph, we get negative
examples that overlap with partial glyph(s), but are centered too far away from
a glyph to be considered a positive example. Currently, every single glyph
results in a single positive example, and negative examples are randomly
sampled.
All glyphs are emitted to RecordIO, where they are outputted in a single
collection for training. We currently do not store the entire generated image
anywhere. This could be added later in order to try other classification
approaches.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os.path
import random
import subprocess
import sys
import apache_beam as beam
from apache_beam.metrics import Metrics
import numpy as np
import tensorflow as tf
from google.protobuf import text_format
from moonlight import engine
from moonlight.protobuf import musicscore_pb2
from moonlight.staves import staffline_distance
from moonlight.staves import staffline_extractor
# Every image is expected to contain at least 3 glyphs.
POSITIVE_EXAMPLES_PER_IMAGE = 3
def _normalize_path(filename):
"""Normalizes a relative path to a command to spawn.
Args:
filename: String; relative or absolute path.
Returns:
The normalized path. This is necessary because in our use case,
vexflow_generator_pipeline will live in a different directory from
vexflow_generator, and there are symlinks to both directories in the same
parent directory. Without normalization, `..` would reference the parent of
the actual directory that was symlinked. With normalization, it references
the directory that contains the symlink to the working directory.
"""
if filename.startswith('/'):
return filename
else:
return os.path.normpath(
os.path.join(os.path.dirname(sys.argv[0]), filename))
class PageGenerationDoFn(beam.DoFn):
"""Generates the PNG images and ground truth for each batch.
Takes in a batch number, and outputs a tuple of PNG contents (bytes) and the
labeled staff (Staff message).
"""
def __init__(self, num_pages_per_batch, vexflow_generator_command,
svg_to_png_command):
self.num_pages_per_batch = num_pages_per_batch
self.vexflow_generator_command = vexflow_generator_command
self.svg_to_png_command = svg_to_png_command
def process(self, batch_num):
for page in self.get_pages_for_batch(batch_num, self.num_pages_per_batch):
staff = musicscore_pb2.Staff()
text_format.Parse(page['page'], staff)
# TODO(ringw): Fix the internal proto pickling issue so that we don't
# have to serialize the staff here.
yield self._svg_to_png(page['svg']), staff.SerializeToString()
def get_pages_for_batch(self, batch_num, num_pages_per_batch):
"""Generates the music score pages in a single batch.
The generator takes in a seed for the RNG for each page, and outputs all
pages at once. The seeds for all batches are consecutive for determinism,
starting from 0, but each seed to the Mersenne Twister RNG should result in
completely different output.
Args:
batch_num: The index of the batch to output.
num_pages_per_batch: The number of pages to generate in each batch.
Returns:
A list of dicts holding `svg` (XML text) and `page` (text-format
`tensorflow.moonlight.Staff` proto).
"""
return self.get_pages(
range(batch_num * num_pages_per_batch,
(batch_num + 1) * num_pages_per_batch))
def get_pages(self, seeds):
vexflow_generator_command = list(self.vexflow_generator_command)
# If vexflow_generator_command is relative, it is relative to the pipeline
# binary.
vexflow_generator_command[0] = _normalize_path(vexflow_generator_command[0])
seeds = ','.join(map(str, seeds))
return json.loads(
subprocess.check_output(vexflow_generator_command +
['--random_seeds=' + seeds]))
def _svg_to_png(self, svg):
svg_to_png_command = list(self.svg_to_png_command)
svg_to_png_command[0] = _normalize_path(svg_to_png_command[0])
popen = subprocess.Popen(
svg_to_png_command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = popen.communicate(input=svg)
if popen.returncode != 0:
raise ValueError('convert failed with status %d\nstderr:\n%s' %
(popen.returncode, stderr))
return stdout
class PatchExampleDoFn(beam.DoFn):
"""Extracts labeled patches from generated VexFlow music scores."""
def __init__(self,
negative_example_distance,
patch_width,
negative_to_positive_example_ratio,
noise_fn=lambda x: x):
self.negative_example_distance = negative_example_distance
self.patch_width = patch_width
self.negative_to_positive_example_ratio = negative_to_positive_example_ratio
self.noise_fn = noise_fn
self.patch_counter = Metrics.counter(self.__class__, 'num_patches')
def start_bundle(self):
# TODO(ringw): Expose a cleaner way to set this value.
# The image is too small for the default min staffline distance score.
# pylint: disable=protected-access
staffline_distance._MIN_STAFFLINE_DISTANCE_SCORE = 100
self.omr = engine.OMREngine()
def process(self, item):
png_contents, staff_message = item
staff_message = musicscore_pb2.Staff.FromString(staff_message)
with tf.Session(graph=self.omr.graph) as sess:
# Load the image, then feed it in to apply noise.
# Randomly rotate the image and apply noise, then dump it back out as a
# PNG.
# TODO(ringw): Expose a way to pass in the image contents to the main
# OMR TF graph.
img = tf.to_float(tf.image.decode_png(png_contents))
# Collapse the RGB channels, if any. No-op for a monochrome PNG.
img = tf.reduce_mean(img[:, :, :3], axis=2)[:, :, None]
# Fix the stafflines being #999.
img = tf.clip_by_value(img * 2. - 255., 0., 255.)
img = self.noise_fn(img)
# Get a 2D uint8 image array for OMR.
noisy_image = sess.run(
tf.cast(tf.clip_by_value(img, 0, 255)[:, :, 0], tf.uint8))
# Run OMR staffline extraction and staffline distance estimation. The
# stafflines are used to get patches from the generated image.
stafflines, image_staffline_distance = sess.run(
[
self.omr.glyph_classifier.staffline_extractor.extract_staves(),
self.omr.structure.staff_detector.staffline_distance[0]
],
feed_dict={self.omr.image: noisy_image})
if stafflines.shape[0] != 1:
raise ValueError('Image should have one detected staff, got shape: ' +
str(stafflines.shape))
positive_example_count = 0
negative_example_whitelist = np.ones(
(stafflines.shape[staffline_extractor.Axes.POSITION],
stafflines.shape[staffline_extractor.Axes.X]), np.bool)
# Blacklist xs where the patch would overlap with either end.
negative_example_overlap_from_end = max(self.negative_example_distance,
self.patch_width // 2)
negative_example_whitelist[:, :negative_example_overlap_from_end] = False
negative_example_whitelist[:,
-negative_example_overlap_from_end - 1:] = False
all_positive_examples = []
for glyph in staff_message.glyph:
staffline = staffline_extractor.get_staffline(glyph.y_position,
stafflines[0])
glyph_x = int(
round(glyph.x *
self.omr.glyph_classifier.staffline_extractor.target_height /
(image_staffline_distance * self.omr.glyph_classifier
.staffline_extractor.staffline_distance_multiple)))
example = self._create_example(staffline, glyph_x, glyph.type)
if example:
staffline_index = staffline_extractor.y_position_to_index(
glyph.y_position,
stafflines.shape[staffline_extractor.Axes.POSITION])
# Blacklist the area adjacent to the glyph, even if it is not selected
# as a positive example below.
negative_example_whitelist[staffline_index, glyph_x -
self.negative_example_distance + 1:glyph_x +
self.negative_example_distance] = False
all_positive_examples.append(example)
positive_example_count += 1
for example in random.sample(all_positive_examples,
POSITIVE_EXAMPLES_PER_IMAGE):
yield example
self.patch_counter.inc()
negative_example_staffline, negative_example_x = np.where(
negative_example_whitelist)
negative_example_inds = np.random.choice(
len(negative_example_staffline),
int(positive_example_count * self.negative_to_positive_example_ratio))
negative_example_staffline = negative_example_staffline[
negative_example_inds]
negative_example_x = negative_example_x[negative_example_inds]
for staffline, x in zip(negative_example_staffline, negative_example_x):
example = self._create_example(stafflines[0, staffline], x,
musicscore_pb2.Glyph.NONE)
assert example, 'Negative example xs should always be in range'
yield example
self.patch_counter.inc()
def _create_example(self, staffline, x, label):
start_x = x - self.patch_width // 2
limit_x = x + self.patch_width // 2 + 1
assert limit_x - start_x == self.patch_width
# x is the last axis of staffline
if 0 <= start_x <= limit_x < staffline.shape[-1]:
patch = staffline[:, start_x:limit_x]
example = tf.train.Example()
example.features.feature['patch'].float_list.value.extend(patch.ravel())
example.features.feature['label'].int64_list.value.append(label)
example.features.feature['height'].int64_list.value.append(patch.shape[0])
example.features.feature['width'].int64_list.value.append(patch.shape[1])
return example
else:
return None
| tensorflow/moonlight | moonlight/training/generation/generation.py | Python | apache-2.0 | 11,447 |
from orchestra.workflow import Step
from orchestra.workflow import Workflow
from simple_workflow.crawl import crawl_page
crawl_step = Step(
slug='crawl',
name='Web Crawling',
description='Find an awesome image on a website',
worker_type=Step.WorkerType.MACHINE,
creation_depends_on=[],
function=crawl_page,
)
rate_step = Step(
slug='rate',
name='Image Rating',
description='Rate the image that we found',
worker_type=Step.WorkerType.HUMAN,
creation_depends_on=[crawl_step],
required_certifications=[],
review_policy={'policy': 'no_review'},
user_interface={
'javascript_includes': [
'/static/simple_workflow/rate/js/modules.js',
'/static/simple_workflow/rate/js/controllers.js',
'/static/simple_workflow/rate/js/directives.js',
],
'stylesheet_includes': [],
'angular_module': 'simple_workflow.rate.module',
'angular_directive': 'rate',
}
)
simple_workflow = Workflow(
slug='simple_workflow',
name='Simple Workflow',
description='Crawl a web page for an image and rate it'
)
simple_workflow.add_step(crawl_step)
simple_workflow.add_step(rate_step)
| Sonblind/orchestra | simple_workflow/workflow.py | Python | apache-2.0 | 1,200 |
# This is an automatically generated file.
# DO NOT EDIT or your changes may be overwritten
import base64
from xdrlib import Packer, Unpacker
from ..type_checked import type_checked
from .liquidity_pool_withdraw_result_code import LiquidityPoolWithdrawResultCode
__all__ = ["LiquidityPoolWithdrawResult"]
@type_checked
class LiquidityPoolWithdrawResult:
"""
XDR Source Code::
union LiquidityPoolWithdrawResult switch (
LiquidityPoolWithdrawResultCode code)
{
case LIQUIDITY_POOL_WITHDRAW_SUCCESS:
void;
default:
void;
};
"""
def __init__(
self,
code: LiquidityPoolWithdrawResultCode,
) -> None:
self.code = code
def pack(self, packer: Packer) -> None:
self.code.pack(packer)
if self.code == LiquidityPoolWithdrawResultCode.LIQUIDITY_POOL_WITHDRAW_SUCCESS:
return
@classmethod
def unpack(cls, unpacker: Unpacker) -> "LiquidityPoolWithdrawResult":
code = LiquidityPoolWithdrawResultCode.unpack(unpacker)
if code == LiquidityPoolWithdrawResultCode.LIQUIDITY_POOL_WITHDRAW_SUCCESS:
return cls(code=code)
return cls(code=code)
def to_xdr_bytes(self) -> bytes:
packer = Packer()
self.pack(packer)
return packer.get_buffer()
@classmethod
def from_xdr_bytes(cls, xdr: bytes) -> "LiquidityPoolWithdrawResult":
unpacker = Unpacker(xdr)
return cls.unpack(unpacker)
def to_xdr(self) -> str:
xdr_bytes = self.to_xdr_bytes()
return base64.b64encode(xdr_bytes).decode()
@classmethod
def from_xdr(cls, xdr: str) -> "LiquidityPoolWithdrawResult":
xdr_bytes = base64.b64decode(xdr.encode())
return cls.from_xdr_bytes(xdr_bytes)
def __eq__(self, other: object):
if not isinstance(other, self.__class__):
return NotImplemented
return self.code == other.code
def __str__(self):
out = []
out.append(f"code={self.code}")
return f"<LiquidityPoolWithdrawResult {[', '.join(out)]}>"
| StellarCN/py-stellar-base | stellar_sdk/xdr/liquidity_pool_withdraw_result.py | Python | apache-2.0 | 2,123 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START aiplatform_get_model_evaluation_text_classification_sample]
from google.cloud import aiplatform
def get_model_evaluation_text_classification_sample(
project: str,
model_id: str,
evaluation_id: str,
location: str = "us-central1",
api_endpoint: str = "us-central1-aiplatform.googleapis.com",
):
"""
To obtain evaluation_id run the following commands where LOCATION
is the region where the model is stored, PROJECT is the project ID,
and MODEL_ID is the ID of your model.
model_client = aiplatform.gapic.ModelServiceClient(
client_options={
'api_endpoint':'LOCATION-aiplatform.googleapis.com'
}
)
evaluations = model_client.list_model_evaluations(parent='projects/PROJECT/locations/LOCATION/models/MODEL_ID')
print("evaluations:", evaluations)
"""
# The AI Platform services require regional API endpoints.
client_options = {"api_endpoint": api_endpoint}
# Initialize client that will be used to create and send requests.
# This client only needs to be created once, and can be reused for multiple requests.
client = aiplatform.gapic.ModelServiceClient(client_options=client_options)
name = client.model_evaluation_path(
project=project, location=location, model=model_id, evaluation=evaluation_id
)
response = client.get_model_evaluation(name=name)
print("response:", response)
# [END aiplatform_get_model_evaluation_text_classification_sample]
| googleapis/python-aiplatform | samples/snippets/model_service/get_model_evaluation_text_classification_sample.py | Python | apache-2.0 | 2,069 |
"""Support for HomematicIP Cloud switches."""
import logging
from homematicip.aio.device import (
AsyncBrandSwitchMeasuring, AsyncFullFlushSwitchMeasuring, AsyncMultiIOBox,
AsyncOpenCollector8Module, AsyncPlugableSwitch,
AsyncPlugableSwitchMeasuring)
from homematicip.aio.group import AsyncSwitchingGroup
from homeassistant.components.switch import SwitchDevice
from . import DOMAIN as HMIPC_DOMAIN, HMIPC_HAPID, HomematicipGenericDevice
from .device import ATTR_GROUP_MEMBER_UNREACHABLE
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the HomematicIP Cloud switch devices."""
pass
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the HomematicIP switch from a config entry."""
home = hass.data[HMIPC_DOMAIN][config_entry.data[HMIPC_HAPID]].home
devices = []
for device in home.devices:
if isinstance(device, AsyncBrandSwitchMeasuring):
# BrandSwitchMeasuring inherits PlugableSwitchMeasuring
# This device is implemented in the light platform and will
# not be added in the switch platform
pass
elif isinstance(device, (AsyncPlugableSwitchMeasuring,
AsyncFullFlushSwitchMeasuring)):
devices.append(HomematicipSwitchMeasuring(home, device))
elif isinstance(device, AsyncPlugableSwitch):
devices.append(HomematicipSwitch(home, device))
elif isinstance(device, AsyncOpenCollector8Module):
for channel in range(1, 9):
devices.append(HomematicipMultiSwitch(home, device, channel))
elif isinstance(device, AsyncMultiIOBox):
for channel in range(1, 3):
devices.append(HomematicipMultiSwitch(home, device, channel))
for group in home.groups:
if isinstance(group, AsyncSwitchingGroup):
devices.append(
HomematicipGroupSwitch(home, group))
if devices:
async_add_entities(devices)
class HomematicipSwitch(HomematicipGenericDevice, SwitchDevice):
"""representation of a HomematicIP Cloud switch device."""
def __init__(self, home, device):
"""Initialize the switch device."""
super().__init__(home, device)
@property
def is_on(self):
"""Return true if device is on."""
return self._device.on
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
await self._device.turn_on()
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
await self._device.turn_off()
class HomematicipGroupSwitch(HomematicipGenericDevice, SwitchDevice):
"""representation of a HomematicIP switching group."""
def __init__(self, home, device, post='Group'):
"""Initialize switching group."""
device.modelType = 'HmIP-{}'.format(post)
super().__init__(home, device, post)
@property
def is_on(self):
"""Return true if group is on."""
return self._device.on
@property
def available(self):
"""Switch-Group available."""
# A switch-group must be available, and should not be affected by the
# individual availability of group members.
# This allows switching even when individual group members
# are not available.
return True
@property
def device_state_attributes(self):
"""Return the state attributes of the switch-group."""
attr = {}
if self._device.unreach:
attr[ATTR_GROUP_MEMBER_UNREACHABLE] = True
return attr
async def async_turn_on(self, **kwargs):
"""Turn the group on."""
await self._device.turn_on()
async def async_turn_off(self, **kwargs):
"""Turn the group off."""
await self._device.turn_off()
class HomematicipSwitchMeasuring(HomematicipSwitch):
"""Representation of a HomematicIP measuring switch device."""
@property
def current_power_w(self):
"""Return the current power usage in W."""
return self._device.currentPowerConsumption
@property
def today_energy_kwh(self):
"""Return the today total energy usage in kWh."""
if self._device.energyCounter is None:
return 0
return round(self._device.energyCounter)
class HomematicipMultiSwitch(HomematicipGenericDevice, SwitchDevice):
"""Representation of a HomematicIP Cloud multi switch device."""
def __init__(self, home, device, channel):
"""Initialize the multi switch device."""
self.channel = channel
super().__init__(home, device, 'Channel{}'.format(channel))
@property
def unique_id(self):
"""Return a unique ID."""
return "{}_{}_{}".format(self.__class__.__name__,
self.post, self._device.id)
@property
def is_on(self):
"""Return true if device is on."""
return self._device.functionalChannels[self.channel].on
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
await self._device.turn_on(self.channel)
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
await self._device.turn_off(self.channel)
| MartinHjelmare/home-assistant | homeassistant/components/homematicip_cloud/switch.py | Python | apache-2.0 | 5,355 |
# Copyright (c) 2014 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.scheduler.solvers import constraints
CONF = cfg.CONF
CONF.import_opt("max_instances_per_host",
"nova.scheduler.filters.num_instances_filter")
LOG = logging.getLogger(__name__)
class NumInstancesConstraint(constraints.BaseLinearConstraint):
"""Constraint that specifies the maximum number of instances that
each host can launch.
"""
def _generate_components(self, variables, hosts, filter_properties):
num_hosts = len(hosts)
num_instances = filter_properties.get('num_instances')
var_matrix = variables.host_instance_matrix
max_instances = CONF.max_instances_per_host
for i in xrange(num_hosts):
num_host_instances = hosts[i].num_instances
acceptable_num_instances = int(max_instances - num_host_instances)
if acceptable_num_instances < 0:
acceptable_num_instances = 0
if acceptable_num_instances < num_instances:
for j in xrange(acceptable_num_instances, num_instances):
self.variables.append([var_matrix[i][j]])
self.coefficients.append([1])
self.constants.append(0)
self.operators.append('==')
LOG.debug(_("%(host)s can accept %(num)s requested instances "
"according to NumInstancesConstraint."),
{'host': hosts[i],
'num': acceptable_num_instances})
| CiscoSystems/nova-solver-scheduler | nova/scheduler/solvers/constraints/num_instances_constraint.py | Python | apache-2.0 | 2,244 |
# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
ZFS Storage Appliance Cinder Volume Driver
"""
import ast
import math
from oslo_config import cfg
from oslo_log import log
from oslo_serialization import base64
from oslo_utils import units
import six
from cinder import exception
from cinder import utils
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder.volume import driver
from cinder.volume.drivers.san import san
from cinder.volume.drivers.zfssa import zfssarest
from cinder.volume import volume_types
import taskflow.engines
from taskflow.patterns import linear_flow as lf
from taskflow import task
CONF = cfg.CONF
LOG = log.getLogger(__name__)
ZFSSA_OPTS = [
cfg.StrOpt('zfssa_pool',
help='Storage pool name.'),
cfg.StrOpt('zfssa_project',
help='Project name.'),
cfg.StrOpt('zfssa_lun_volblocksize', default='8k',
choices=['512', '1k', '2k', '4k', '8k', '16k', '32k', '64k',
'128k'],
help='Block size.'),
cfg.BoolOpt('zfssa_lun_sparse', default=False,
help='Flag to enable sparse (thin-provisioned): True, False.'),
cfg.StrOpt('zfssa_lun_compression', default='off',
choices=['off', 'lzjb', 'gzip-2', 'gzip', 'gzip-9'],
help='Data compression.'),
cfg.StrOpt('zfssa_lun_logbias', default='latency',
choices=['latency', 'throughput'],
help='Synchronous write bias.'),
cfg.StrOpt('zfssa_initiator_group', default='',
help='iSCSI initiator group.'),
cfg.StrOpt('zfssa_initiator', default='',
help='iSCSI initiator IQNs. (comma separated)'),
cfg.StrOpt('zfssa_initiator_user', default='',
help='iSCSI initiator CHAP user (name).'),
cfg.StrOpt('zfssa_initiator_password', default='',
help='Secret of the iSCSI initiator CHAP user.', secret=True),
cfg.StrOpt('zfssa_initiator_config', default='',
help='iSCSI initiators configuration.'),
cfg.StrOpt('zfssa_target_group', default='tgt-grp',
help='iSCSI target group name.'),
cfg.StrOpt('zfssa_target_user', default='',
help='iSCSI target CHAP user (name).'),
cfg.StrOpt('zfssa_target_password', default='', secret=True,
help='Secret of the iSCSI target CHAP user.'),
cfg.StrOpt('zfssa_target_portal',
help='iSCSI target portal (Data-IP:Port, w.x.y.z:3260).'),
cfg.StrOpt('zfssa_target_interfaces',
help='Network interfaces of iSCSI targets. (comma separated)'),
cfg.IntOpt('zfssa_rest_timeout',
help='REST connection timeout. (seconds)'),
cfg.StrOpt('zfssa_replication_ip', default='',
help='IP address used for replication data. (maybe the same as '
'data ip)'),
cfg.BoolOpt('zfssa_enable_local_cache', default=True,
help='Flag to enable local caching: True, False.'),
cfg.StrOpt('zfssa_cache_project', default='os-cinder-cache',
help='Name of ZFSSA project where cache volumes are stored.')
]
CONF.register_opts(ZFSSA_OPTS)
ZFSSA_LUN_SPECS = {
'zfssa:volblocksize',
'zfssa:sparse',
'zfssa:compression',
'zfssa:logbias',
}
def factory_zfssa():
return zfssarest.ZFSSAApi()
class ZFSSAISCSIDriver(driver.ISCSIDriver):
"""ZFSSA Cinder iSCSI volume driver.
Version history:
1.0.1:
Backend enabled volume migration.
Local cache feature.
"""
VERSION = '1.0.1'
protocol = 'iSCSI'
def __init__(self, *args, **kwargs):
super(ZFSSAISCSIDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(ZFSSA_OPTS)
self.configuration.append_config_values(san.san_opts)
self.zfssa = None
self.tgt_zfssa = None
self._stats = None
self.tgtiqn = None
def _get_target_alias(self):
"""return target alias."""
return self.configuration.zfssa_target_group
def do_setup(self, context):
"""Setup - create multiple elements.
Project, initiators, initiatorgroup, target and targetgroup.
"""
lcfg = self.configuration
LOG.info(_LI('Connecting to host: %s.'), lcfg.san_ip)
self.zfssa = factory_zfssa()
self.tgt_zfssa = factory_zfssa()
self.zfssa.set_host(lcfg.san_ip, timeout=lcfg.zfssa_rest_timeout)
auth_str = '%s:%s' % (lcfg.san_login, lcfg.san_password)
auth_str = base64.encode_as_text(auth_str)[:-1]
self.zfssa.login(auth_str)
self.zfssa.create_project(lcfg.zfssa_pool, lcfg.zfssa_project,
compression=lcfg.zfssa_lun_compression,
logbias=lcfg.zfssa_lun_logbias)
if lcfg.zfssa_enable_local_cache:
self.zfssa.create_project(lcfg.zfssa_pool,
lcfg.zfssa_cache_project,
compression=lcfg.zfssa_lun_compression,
logbias=lcfg.zfssa_lun_logbias)
schemas = [
{'property': 'image_id',
'description': 'OpenStack image ID',
'type': 'String'},
{'property': 'updated_at',
'description': 'Most recent updated time of image',
'type': 'String'}]
self.zfssa.create_schemas(schemas)
if (lcfg.zfssa_initiator_config != ''):
initiator_config = ast.literal_eval(lcfg.zfssa_initiator_config)
for initiator_group in initiator_config:
zfssa_initiator_group = initiator_group
for zfssa_initiator in initiator_config[zfssa_initiator_group]:
self.zfssa.create_initiator(zfssa_initiator['iqn'],
zfssa_initiator_group + '-' +
zfssa_initiator['iqn'],
chapuser=
zfssa_initiator['user'],
chapsecret=
zfssa_initiator['password'])
if (zfssa_initiator_group != 'default'):
self.zfssa.add_to_initiatorgroup(
zfssa_initiator['iqn'],
zfssa_initiator_group)
else:
LOG.warning(_LW('zfssa_initiator_config not found. '
'Using deprecated configuration options.'))
if (lcfg.zfssa_initiator != '' and
(lcfg.zfssa_initiator_group == '' or
lcfg.zfssa_initiator_group == 'default')):
LOG.warning(_LW('zfssa_initiator: %(ini)s'
' wont be used on '
'zfssa_initiator_group= %(inigrp)s.'),
{'ini': lcfg.zfssa_initiator,
'inigrp': lcfg.zfssa_initiator_group})
# Setup initiator and initiator group
if (lcfg.zfssa_initiator != '' and
lcfg.zfssa_initiator_group != '' and
lcfg.zfssa_initiator_group != 'default'):
for initiator in lcfg.zfssa_initiator.split(','):
self.zfssa.create_initiator(
initiator, lcfg.zfssa_initiator_group + '-' +
initiator, chapuser=lcfg.zfssa_initiator_user,
chapsecret=lcfg.zfssa_initiator_password)
self.zfssa.add_to_initiatorgroup(
initiator, lcfg.zfssa_initiator_group)
# Parse interfaces
interfaces = []
for interface in lcfg.zfssa_target_interfaces.split(','):
if interface == '':
continue
interfaces.append(interface)
# Setup target and target group
iqn = self.zfssa.create_target(
self._get_target_alias(),
interfaces,
tchapuser=lcfg.zfssa_target_user,
tchapsecret=lcfg.zfssa_target_password)
self.zfssa.add_to_targetgroup(iqn, lcfg.zfssa_target_group)
def check_for_setup_error(self):
"""Check that driver can login.
Check also pool, project, initiators, initiatorgroup, target and
targetgroup.
"""
lcfg = self.configuration
self.zfssa.verify_pool(lcfg.zfssa_pool)
self.zfssa.verify_project(lcfg.zfssa_pool, lcfg.zfssa_project)
if (lcfg.zfssa_initiator_config != ''):
initiator_config = ast.literal_eval(lcfg.zfssa_initiator_config)
for initiator_group in initiator_config:
zfssa_initiator_group = initiator_group
for zfssa_initiator in initiator_config[zfssa_initiator_group]:
self.zfssa.verify_initiator(zfssa_initiator['iqn'])
else:
if (lcfg.zfssa_initiator != '' and
lcfg.zfssa_initiator_group != '' and
lcfg.zfssa_initiator_group != 'default'):
for initiator in lcfg.zfssa_initiator.split(','):
self.zfssa.verify_initiator(initiator)
self.zfssa.verify_target(self._get_target_alias())
def _get_provider_info(self, volume, lun=None):
"""Return provider information."""
lcfg = self.configuration
project = lcfg.zfssa_project
if ((lcfg.zfssa_enable_local_cache is True) and
(volume['name'].startswith('os-cache-vol-'))):
project = lcfg.zfssa_cache_project
if lun is None:
lun = self.zfssa.get_lun(lcfg.zfssa_pool,
project,
volume['name'])
if isinstance(lun['number'], list):
lun['number'] = lun['number'][0]
if self.tgtiqn is None:
self.tgtiqn = self.zfssa.get_target(self._get_target_alias())
loc = "%s %s %s" % (lcfg.zfssa_target_portal, self.tgtiqn,
lun['number'])
LOG.debug('_get_provider_info: provider_location: %s', loc)
provider = {'provider_location': loc}
if lcfg.zfssa_target_user != '' and lcfg.zfssa_target_password != '':
provider['provider_auth'] = ('CHAP %s %s' %
(lcfg.zfssa_target_user,
lcfg.zfssa_target_password))
return provider
def create_volume(self, volume):
"""Create a volume on ZFSSA."""
LOG.debug('zfssa.create_volume: volume=' + volume['name'])
lcfg = self.configuration
volsize = str(volume['size']) + 'g'
specs = self._get_voltype_specs(volume)
self.zfssa.create_lun(lcfg.zfssa_pool,
lcfg.zfssa_project,
volume['name'],
volsize,
lcfg.zfssa_target_group,
specs)
def delete_volume(self, volume):
"""Deletes a volume with the given volume['name']."""
LOG.debug('zfssa.delete_volume: name=%s', volume['name'])
lcfg = self.configuration
try:
lun2del = self.zfssa.get_lun(lcfg.zfssa_pool,
lcfg.zfssa_project,
volume['name'])
except exception.VolumeBackendAPIException as ex:
# NOTE(jdg): This will log an error and continue
# if for some reason the volume no longer exists
# on the backend
if 'Error Getting Volume' in ex.message:
LOG.error(_LE("Volume ID %s was not found on "
"the zfssa device while attempting "
"delete_volume operation."), volume['id'])
return
# Delete clone temp snapshot. see create_cloned_volume()
if 'origin' in lun2del and 'id' in volume:
if lun2del['nodestroy']:
self.zfssa.set_lun_props(lcfg.zfssa_pool,
lcfg.zfssa_project,
volume['name'],
nodestroy=False)
tmpsnap = 'tmp-snapshot-%s' % volume['id']
if lun2del['origin']['snapshot'] == tmpsnap:
self.zfssa.delete_snapshot(lcfg.zfssa_pool,
lcfg.zfssa_project,
lun2del['origin']['share'],
lun2del['origin']['snapshot'])
return
self.zfssa.delete_lun(pool=lcfg.zfssa_pool,
project=lcfg.zfssa_project,
lun=volume['name'])
if ('origin' in lun2del and
lun2del['origin']['project'] == lcfg.zfssa_cache_project):
self._check_origin(lun2del, volume['name'])
def create_snapshot(self, snapshot):
"""Creates a snapshot of a volume.
Snapshot name: snapshot['name']
Volume name: snapshot['volume_name']
"""
LOG.debug('zfssa.create_snapshot: snapshot=%s', snapshot['name'])
lcfg = self.configuration
self.zfssa.create_snapshot(lcfg.zfssa_pool,
lcfg.zfssa_project,
snapshot['volume_name'],
snapshot['name'])
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
LOG.debug('zfssa.delete_snapshot: snapshot=%s', snapshot['name'])
lcfg = self.configuration
numclones = self.zfssa.num_clones(lcfg.zfssa_pool,
lcfg.zfssa_project,
snapshot['volume_name'],
snapshot['name'])
if numclones > 0:
LOG.error(_LE('Snapshot %s: has clones'), snapshot['name'])
raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
self.zfssa.delete_snapshot(lcfg.zfssa_pool,
lcfg.zfssa_project,
snapshot['volume_name'],
snapshot['name'])
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot - clone a snapshot."""
LOG.debug('zfssa.create_volume_from_snapshot: volume=%s',
volume['name'])
LOG.debug('zfssa.create_volume_from_snapshot: snapshot=%s',
snapshot['name'])
if not self._verify_clone_size(snapshot, volume['size'] * units.Gi):
exception_msg = (_('Error verifying clone size on '
'Volume clone: %(clone)s '
'Size: %(size)d on'
'Snapshot: %(snapshot)s')
% {'clone': volume['name'],
'size': volume['size'],
'snapshot': snapshot['name']})
LOG.error(exception_msg)
raise exception.InvalidInput(reason=exception_msg)
lcfg = self.configuration
self.zfssa.clone_snapshot(lcfg.zfssa_pool,
lcfg.zfssa_project,
snapshot['volume_name'],
snapshot['name'],
lcfg.zfssa_project,
volume['name'])
def _update_volume_status(self):
"""Retrieve status info from volume group."""
LOG.debug("Updating volume status")
self._stats = None
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data["volume_backend_name"] = backend_name or self.__class__.__name__
data["vendor_name"] = 'Oracle'
data["driver_version"] = self.VERSION
data["storage_protocol"] = self.protocol
lcfg = self.configuration
(avail, total) = self.zfssa.get_project_stats(lcfg.zfssa_pool,
lcfg.zfssa_project)
if avail is None or total is None:
return
host = lcfg.san_ip
pool = lcfg.zfssa_pool
project = lcfg.zfssa_project
auth_str = '%s:%s' % (lcfg.san_login, lcfg.san_password)
auth_str = base64.encode_as_text(auth_str)[:-1]
zfssa_tgt_group = lcfg.zfssa_target_group
repl_ip = lcfg.zfssa_replication_ip
data['location_info'] = "%s:%s:%s:%s:%s:%s" % (host, auth_str, pool,
project,
zfssa_tgt_group,
repl_ip)
data['total_capacity_gb'] = int(total) / units.Gi
data['free_capacity_gb'] = int(avail) / units.Gi
data['reserved_percentage'] = 0
data['QoS_support'] = False
self._stats = data
def get_volume_stats(self, refresh=False):
"""Get volume status.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_status()
return self._stats
def create_export(self, context, volume, connector):
pass
def remove_export(self, context, volume):
pass
def ensure_export(self, context, volume):
pass
def extend_volume(self, volume, new_size):
"""Driver entry point to extent volume size."""
LOG.debug('extend_volume: volume name: %s', volume['name'])
lcfg = self.configuration
self.zfssa.set_lun_props(lcfg.zfssa_pool,
lcfg.zfssa_project,
volume['name'],
volsize=new_size * units.Gi)
def create_cloned_volume(self, volume, src_vref):
"""Create a clone of the specified volume."""
zfssa_snapshot = {'volume_name': src_vref['name'],
'name': 'tmp-snapshot-%s' % volume['id']}
self.create_snapshot(zfssa_snapshot)
try:
self.create_volume_from_snapshot(volume, zfssa_snapshot)
except exception.VolumeBackendAPIException:
LOG.error(_LE('Clone Volume:'
'%(volume)s failed from source volume:'
'%(src_vref)s'),
{'volume': volume['name'],
'src_vref': src_vref['name']})
# Cleanup snapshot
self.delete_snapshot(zfssa_snapshot)
@utils.synchronized('zfssaiscsi', external=True)
def clone_image(self, context, volume,
image_location, image_meta,
image_service):
"""Create a volume efficiently from an existing image.
Verify the image ID being used:
(1) If there is no existing cache volume, create one and transfer
image data to it. Take a snapshot.
(2) If a cache volume already exists, verify if it is either alternated
or updated. If so try to remove it, raise exception if removal fails.
Create a new cache volume as in (1).
Clone a volume from the cache volume and returns it to Cinder.
A file lock is placed on this method to prevent:
(a) a race condition when a cache volume has been verified, but then
gets deleted before it is cloned.
(b) failure of subsequent clone_image requests if the first request is
still pending.
"""
LOG.debug('Cloning image %(image)s to volume %(volume)s',
{'image': image_meta['id'], 'volume': volume['name']})
lcfg = self.configuration
cachevol_size = 0
if not lcfg.zfssa_enable_local_cache:
return None, False
with image_utils.TemporaryImages.fetch(image_service,
context,
image_meta['id']) as tmp_image:
info = image_utils.qemu_img_info(tmp_image)
cachevol_size = int(math.ceil(float(info.virtual_size) / units.Gi))
if cachevol_size > volume['size']:
exception_msg = (_LE('Image size %(img_size)dGB is larger '
'than volume size %(vol_size)dGB.'),
{'img_size': cachevol_size,
'vol_size': volume['size']})
LOG.error(exception_msg)
return None, False
specs = self._get_voltype_specs(volume)
cachevol_props = {'size': cachevol_size}
try:
cache_vol, cache_snap = self._verify_cache_volume(context,
image_meta,
image_service,
specs,
cachevol_props)
# A cache volume and a snapshot should be ready by now
# Create a clone from the cache volume
self.zfssa.clone_snapshot(lcfg.zfssa_pool,
lcfg.zfssa_cache_project,
cache_vol,
cache_snap,
lcfg.zfssa_project,
volume['name'])
if cachevol_size < volume['size']:
self.extend_volume(volume, volume['size'])
except exception.VolumeBackendAPIException as exc:
exception_msg = (_LE('Cannot clone image %(image)s to '
'volume %(volume)s. Error: %(error)s.'),
{'volume': volume['name'],
'image': image_meta['id'],
'error': exc.message})
LOG.error(exception_msg)
return None, False
return None, True
def _verify_cache_volume(self, context, img_meta,
img_service, specs, cachevol_props):
"""Verify if we have a cache volume that we want.
If we don't, create one.
If we do, check if it's been updated:
* If so, delete it and recreate a new volume
* If not, we are good.
If it's out of date, delete it and create a new one.
After the function returns, there should be a cache volume available,
ready for cloning.
"""
lcfg = self.configuration
cachevol_name = 'os-cache-vol-%s' % img_meta['id']
cachesnap_name = 'image-%s' % img_meta['id']
cachevol_meta = {
'cache_name': cachevol_name,
'snap_name': cachesnap_name,
}
cachevol_props.update(cachevol_meta)
cache_vol, cache_snap = None, None
updated_at = six.text_type(img_meta['updated_at'].isoformat())
LOG.debug('Verifying cache volume %s:', cachevol_name)
try:
cache_vol = self.zfssa.get_lun(lcfg.zfssa_pool,
lcfg.zfssa_cache_project,
cachevol_name)
if (not cache_vol.get('updated_at', None) or
not cache_vol.get('image_id', None)):
exc_msg = (_('Cache volume %s does not have required '
'properties') % cachevol_name)
LOG.error(exc_msg)
raise exception.VolumeBackendAPIException(data=exc_msg)
cache_snap = self.zfssa.get_lun_snapshot(lcfg.zfssa_pool,
lcfg.zfssa_cache_project,
cachevol_name,
cachesnap_name)
except exception.VolumeNotFound:
# There is no existing cache volume, create one:
return self._create_cache_volume(context,
img_meta,
img_service,
specs,
cachevol_props)
except exception.SnapshotNotFound:
exception_msg = (_('Cache volume %(cache_vol)s'
'does not have snapshot %(cache_snap)s.'),
{'cache_vol': cachevol_name,
'cache_snap': cachesnap_name})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
# A cache volume does exist, check if it's updated:
if ((cache_vol['updated_at'] != updated_at) or
(cache_vol['image_id'] != img_meta['id'])):
# The cache volume is updated, but has clones:
if cache_snap['numclones'] > 0:
exception_msg = (_('Cannot delete '
'cache volume: %(cachevol_name)s. '
'It was updated at %(updated_at)s '
'and currently has %(numclones)s '
'volume instances.'),
{'cachevol_name': cachevol_name,
'updated_at': updated_at,
'numclones': cache_snap['numclones']})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
# The cache volume is updated, but has no clone, so we delete it
# and re-create a new one:
self.zfssa.delete_lun(lcfg.zfssa_pool,
lcfg.zfssa_cache_project,
cachevol_name)
return self._create_cache_volume(context,
img_meta,
img_service,
specs,
cachevol_props)
return cachevol_name, cachesnap_name
def _create_cache_volume(self, context, img_meta,
img_service, specs, cachevol_props):
"""Create a cache volume from an image.
Returns names of the cache volume and its snapshot.
"""
lcfg = self.configuration
cachevol_size = int(cachevol_props['size'])
lunsize = "%sg" % six.text_type(cachevol_size)
lun_props = {
'custom:image_id': img_meta['id'],
'custom:updated_at': (
six.text_type(img_meta['updated_at'].isoformat())),
}
lun_props.update(specs)
cache_vol = {
'name': cachevol_props['cache_name'],
'id': img_meta['id'],
'size': cachevol_size,
}
LOG.debug('Creating cache volume %s.', cache_vol['name'])
try:
self.zfssa.create_lun(lcfg.zfssa_pool,
lcfg.zfssa_cache_project,
cache_vol['name'],
lunsize,
lcfg.zfssa_target_group,
lun_props)
super(ZFSSAISCSIDriver, self).copy_image_to_volume(context,
cache_vol,
img_service,
img_meta['id'])
self.zfssa.create_snapshot(lcfg.zfssa_pool,
lcfg.zfssa_cache_project,
cache_vol['name'],
cachevol_props['snap_name'])
except Exception as exc:
exc_msg = (_('Fail to create cache volume %(volume)s. '
'Error: %(err)s'),
{'volume': cache_vol['name'],
'err': six.text_type(exc)})
LOG.error(exc_msg)
self.zfssa.delete_lun(lcfg.zfssa_pool,
lcfg.zfssa_cache_project,
cache_vol['name'])
raise exception.VolumeBackendAPIException(data=exc_msg)
return cachevol_props['cache_name'], cachevol_props['snap_name']
def local_path(self, volume):
"""Not implemented."""
pass
def backup_volume(self, context, backup, backup_service):
"""Not implemented."""
pass
def restore_backup(self, context, backup, volume, backup_service):
"""Not implemented."""
pass
def _verify_clone_size(self, snapshot, size):
"""Check whether the clone size is the same as the parent volume."""
lcfg = self.configuration
lun = self.zfssa.get_lun(lcfg.zfssa_pool,
lcfg.zfssa_project,
snapshot['volume_name'])
return lun['size'] == size
def initialize_connection(self, volume, connector):
lcfg = self.configuration
init_groups = self.zfssa.get_initiator_initiatorgroup(
connector['initiator'])
if ((lcfg.zfssa_enable_local_cache is True) and
(volume['name'].startswith('os-cache-vol-'))):
project = lcfg.zfssa_cache_project
else:
project = lcfg.zfssa_project
for initiator_group in init_groups:
self.zfssa.set_lun_initiatorgroup(lcfg.zfssa_pool,
project,
volume['name'],
initiator_group)
iscsi_properties = {}
provider = self._get_provider_info(volume)
(target_portal, iqn, lun) = provider['provider_location'].split()
iscsi_properties['target_discovered'] = False
iscsi_properties['target_portal'] = target_portal
iscsi_properties['target_iqn'] = iqn
iscsi_properties['target_lun'] = lun
iscsi_properties['volume_id'] = volume['id']
if 'provider_auth' in provider:
(auth_method, auth_username, auth_password) = provider[
'provider_auth'].split()
iscsi_properties['auth_method'] = auth_method
iscsi_properties['auth_username'] = auth_username
iscsi_properties['auth_password'] = auth_password
return {
'driver_volume_type': 'iscsi',
'data': iscsi_properties
}
def terminate_connection(self, volume, connector, **kwargs):
"""Driver entry point to terminate a connection for a volume."""
LOG.debug('terminate_connection: volume name: %s.', volume['name'])
lcfg = self.configuration
project = lcfg.zfssa_project
if ((lcfg.zfssa_enable_local_cache is True) and
(volume['name'].startswith('os-cache-vol-'))):
project = lcfg.zfssa_cache_project
self.zfssa.set_lun_initiatorgroup(lcfg.zfssa_pool,
project,
volume['name'],
'')
def _get_voltype_specs(self, volume):
"""Get specs suitable for volume creation."""
vtype = volume.get('volume_type_id', None)
extra_specs = None
if vtype:
extra_specs = volume_types.get_volume_type_extra_specs(vtype)
return self._get_specs(extra_specs)
def _get_specs(self, xspecs):
"""Return a dict with extra specs and/or config values."""
result = {}
for spc in ZFSSA_LUN_SPECS:
val = None
prop = spc.split(':')[1]
cfg = 'zfssa_lun_' + prop
if xspecs:
val = xspecs.pop(spc, None)
if val is None:
val = self.configuration.safe_get(cfg)
if val is not None and val != '':
result.update({prop: val})
return result
def migrate_volume(self, ctxt, volume, host):
LOG.debug('Attempting ZFSSA enabled volume migration. volume: %(id)s, '
'host: %(host)s, status=%(status)s.',
{'id': volume['id'],
'host': host,
'status': volume['status']})
lcfg = self.configuration
default_ret = (False, None)
if volume['status'] != "available":
LOG.debug('Only available volumes can be migrated using backend '
'assisted migration. Defaulting to generic migration.')
return default_ret
if (host['capabilities']['vendor_name'] != 'Oracle' or
host['capabilities']['storage_protocol'] != self.protocol):
LOG.debug('Source and destination drivers need to be Oracle iSCSI '
'to use backend assisted migration. Defaulting to '
'generic migration.')
return default_ret
if 'location_info' not in host['capabilities']:
LOG.debug('Could not find location_info in capabilities reported '
'by the destination driver. Defaulting to generic '
'migration.')
return default_ret
loc_info = host['capabilities']['location_info']
try:
(tgt_host, auth_str, tgt_pool, tgt_project, tgt_tgtgroup,
tgt_repl_ip) = loc_info.split(':')
except ValueError:
LOG.error(_LE("Location info needed for backend enabled volume "
"migration not in correct format: %s. Continuing "
"with generic volume migration."), loc_info)
return default_ret
if tgt_repl_ip == '':
msg = _LE("zfssa_replication_ip not set in cinder.conf. "
"zfssa_replication_ip is needed for backend enabled "
"volume migration. Continuing with generic volume "
"migration.")
LOG.error(msg)
return default_ret
src_pool = lcfg.zfssa_pool
src_project = lcfg.zfssa_project
try:
LOG.info(_LI('Connecting to target host: %s for backend enabled '
'migration.'), tgt_host)
self.tgt_zfssa.set_host(tgt_host)
self.tgt_zfssa.login(auth_str)
# Verify that the replication service is online
try:
self.zfssa.verify_service('replication')
self.tgt_zfssa.verify_service('replication')
except exception.VolumeBackendAPIException:
return default_ret
# ensure that a target group by the same name exists on the target
# system also, if not, use default migration.
lun = self.zfssa.get_lun(src_pool, src_project, volume['name'])
if lun['targetgroup'] != tgt_tgtgroup:
return default_ret
tgt_asn = self.tgt_zfssa.get_asn()
src_asn = self.zfssa.get_asn()
# verify on the source system that the destination has been
# registered as a replication target
tgts = self.zfssa.get_replication_targets()
targets = []
for target in tgts['targets']:
if target['asn'] == tgt_asn:
targets.append(target)
if targets == []:
LOG.debug('Target host: %(host)s for volume migration '
'not configured as a replication target '
'for volume: %(vol)s.',
{'host': tgt_repl_ip,
'vol': volume['name']})
return default_ret
# Multiple ips from the same appliance may be configured
# as different targets
for target in targets:
if target['address'] == tgt_repl_ip + ':216':
break
if target['address'] != tgt_repl_ip + ':216':
LOG.debug('Target with replication ip: %s not configured on '
'the source appliance for backend enabled volume '
'migration. Proceeding with default migration.',
tgt_repl_ip)
return default_ret
flow = lf.Flow('zfssa_volume_migration').add(
MigrateVolumeInit(),
MigrateVolumeCreateAction(provides='action_id'),
MigrateVolumeSendReplUpdate(),
MigrateVolumeSeverRepl(),
MigrateVolumeMoveVol(),
MigrateVolumeCleanUp()
)
taskflow.engines.run(flow,
store={'driver': self,
'tgt_zfssa': self.tgt_zfssa,
'tgt_pool': tgt_pool,
'tgt_project': tgt_project,
'volume': volume, 'tgt_asn': tgt_asn,
'src_zfssa': self.zfssa,
'src_asn': src_asn,
'src_pool': src_pool,
'src_project': src_project,
'target': target})
return(True, None)
except Exception:
LOG.error(_LE("Error migrating volume: %s"), volume['name'])
raise
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status):
"""Return model update for migrated volume.
:param volume: The original volume that was migrated to this backend
:param new_volume: The migration volume object that was created on
this backend as part of the migration process
:param original_volume_status: The status of the original volume
:return model_update to update DB with any needed changes
"""
lcfg = self.configuration
original_name = CONF.volume_name_template % volume['id']
current_name = CONF.volume_name_template % new_volume['id']
LOG.debug('Renaming migrated volume: %(cur)s to %(org)s',
{'cur': current_name,
'org': original_name})
self.zfssa.set_lun_props(lcfg.zfssa_pool, lcfg.zfssa_project,
current_name, name=original_name)
return {'_name_id': None}
@utils.synchronized('zfssaiscsi', external=True)
def _check_origin(self, lun, volname):
"""Verify the cache volume of a bootable volume.
If the cache no longer has clone, it will be deleted.
There is a small lag between the time a clone is deleted and the number
of clones being updated accordingly. There is also a race condition
when multiple volumes (clones of a cache volume) are deleted at once,
leading to the number of clones reported incorrectly. The file lock is
here to avoid such issues.
"""
lcfg = self.configuration
cache = lun['origin']
numclones = -1
if (cache['snapshot'].startswith('image-') and
cache['share'].startswith('os-cache-vol')):
try:
numclones = self.zfssa.num_clones(lcfg.zfssa_pool,
lcfg.zfssa_cache_project,
cache['share'],
cache['snapshot'])
except Exception:
LOG.debug('Cache volume is already deleted.')
return
LOG.debug('Checking cache volume %(name)s, numclones = %(clones)d',
{'name': cache['share'], 'clones': numclones})
# Sometimes numclones still hold old values even when all clones
# have been deleted. So we handle this situation separately here:
if numclones == 1:
try:
self.zfssa.get_lun(lcfg.zfssa_pool,
lcfg.zfssa_project,
volname)
# The volume does exist, so return
return
except exception.VolumeNotFound:
# The volume is already deleted
numclones = 0
if numclones == 0:
try:
self.zfssa.delete_lun(lcfg.zfssa_pool,
lcfg.zfssa_cache_project,
cache['share'])
except exception.VolumeBackendAPIException:
LOG.warning(_LW("Volume %s exists but can't be deleted"),
cache['share'])
class MigrateVolumeInit(task.Task):
def execute(self, src_zfssa, volume, src_pool, src_project):
LOG.debug('Setting inherit flag on source backend to False.')
src_zfssa.edit_inherit_replication_flag(src_pool, src_project,
volume['name'], set=False)
def revert(self, src_zfssa, volume, src_pool, src_project, **kwargs):
LOG.debug('Rollback: Setting inherit flag on source appliance to '
'True.')
src_zfssa.edit_inherit_replication_flag(src_pool, src_project,
volume['name'], set=True)
class MigrateVolumeCreateAction(task.Task):
def execute(self, src_zfssa, volume, src_pool, src_project, target,
tgt_pool):
LOG.debug('Creating replication action on source appliance.')
action_id = src_zfssa.create_replication_action(src_pool,
src_project,
target['label'],
tgt_pool,
volume['name'])
self._action_id = action_id
return action_id
def revert(self, src_zfssa, **kwargs):
if hasattr(self, '_action_id'):
LOG.debug('Rollback: deleting replication action on source '
'appliance.')
src_zfssa.delete_replication_action(self._action_id)
class MigrateVolumeSendReplUpdate(task.Task):
def execute(self, src_zfssa, action_id):
LOG.debug('Sending replication update from source appliance.')
src_zfssa.send_repl_update(action_id)
LOG.debug('Deleting replication action on source appliance.')
src_zfssa.delete_replication_action(action_id)
self._action_deleted = True
class MigrateVolumeSeverRepl(task.Task):
def execute(self, tgt_zfssa, src_asn, action_id, driver):
source = tgt_zfssa.get_replication_source(src_asn)
if not source:
err = (_('Source with host ip/name: %s not found on the '
'target appliance for backend enabled volume '
'migration, procedding with default migration.'),
driver.configuration.san_ip)
LOG.error(err)
raise exception.VolumeBackendAPIException(data=err)
LOG.debug('Severing replication package on destination appliance.')
tgt_zfssa.sever_replication(action_id, source['name'],
project=action_id)
class MigrateVolumeMoveVol(task.Task):
def execute(self, tgt_zfssa, tgt_pool, tgt_project, action_id, volume):
LOG.debug('Moving LUN to destination project on destination '
'appliance.')
tgt_zfssa.move_volume(tgt_pool, action_id, volume['name'], tgt_project)
LOG.debug('Deleting temporary project on destination appliance.')
tgt_zfssa.delete_project(tgt_pool, action_id)
self._project_deleted = True
def revert(self, tgt_zfssa, tgt_pool, tgt_project, action_id, volume,
**kwargs):
if not hasattr(self, '_project_deleted'):
LOG.debug('Rollback: deleting temporary project on destination '
'appliance.')
tgt_zfssa.delete_project(tgt_pool, action_id)
class MigrateVolumeCleanUp(task.Task):
def execute(self, driver, volume, tgt_zfssa):
LOG.debug('Finally, delete source volume on source appliance.')
driver.delete_volume(volume)
tgt_zfssa.logout()
| apporc/cinder | cinder/volume/drivers/zfssa/zfssaiscsi.py | Python | apache-2.0 | 45,567 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Cloud'
db.create_table(u'cloudslave_cloud', (
('name', self.gf('django.db.models.fields.CharField')(max_length=200, primary_key=True)),
('endpoint', self.gf('django.db.models.fields.URLField')(max_length=200)),
('user_name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('tenant_name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('password', self.gf('django.db.models.fields.CharField')(max_length=200)),
('region', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
('flavor_name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('image_name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('floating_ip_mode', self.gf('django.db.models.fields.SmallIntegerField')(default=0)),
))
db.send_create_signal(u'cloudslave', ['Cloud'])
# Adding model 'KeyPair'
db.create_table(u'cloudslave_keypair', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('cloud', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cloudslave.Cloud'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('private_key', self.gf('django.db.models.fields.TextField')()),
('public_key', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'cloudslave', ['KeyPair'])
# Adding unique constraint on 'KeyPair', fields ['cloud', 'name']
db.create_unique(u'cloudslave_keypair', ['cloud_id', 'name'])
# Adding model 'Reservation'
db.create_table(u'cloudslave_reservation', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('cloud', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cloudslave.Cloud'])),
('number_of_slaves', self.gf('django.db.models.fields.IntegerField')()),
('state', self.gf('django.db.models.fields.SmallIntegerField')(default=0)),
('timeout', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal(u'cloudslave', ['Reservation'])
# Adding model 'Slave'
db.create_table(u'cloudslave_slave', (
('name', self.gf('django.db.models.fields.CharField')(max_length=200, primary_key=True)),
('reservation', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cloudslave.Reservation'])),
('cloud_node_id', self.gf('django.db.models.fields.CharField')(max_length=200)),
('state', self.gf('django.db.models.fields.CharField')(max_length=15, null=True, blank=True)),
))
db.send_create_signal(u'cloudslave', ['Slave'])
def backwards(self, orm):
# Removing unique constraint on 'KeyPair', fields ['cloud', 'name']
db.delete_unique(u'cloudslave_keypair', ['cloud_id', 'name'])
# Deleting model 'Cloud'
db.delete_table(u'cloudslave_cloud')
# Deleting model 'KeyPair'
db.delete_table(u'cloudslave_keypair')
# Deleting model 'Reservation'
db.delete_table(u'cloudslave_reservation')
# Deleting model 'Slave'
db.delete_table(u'cloudslave_slave')
models = {
u'cloudslave.cloud': {
'Meta': {'object_name': 'Cloud'},
'endpoint': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'flavor_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'floating_ip_mode': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'image_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'tenant_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'cloudslave.keypair': {
'Meta': {'unique_together': "(('cloud', 'name'),)", 'object_name': 'KeyPair'},
'cloud': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cloudslave.Cloud']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'private_key': ('django.db.models.fields.TextField', [], {}),
'public_key': ('django.db.models.fields.TextField', [], {})
},
u'cloudslave.reservation': {
'Meta': {'object_name': 'Reservation'},
'cloud': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cloudslave.Cloud']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number_of_slaves': ('django.db.models.fields.IntegerField', [], {}),
'state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'timeout': ('django.db.models.fields.DateTimeField', [], {})
},
u'cloudslave.slave': {
'Meta': {'object_name': 'Slave'},
'cloud_node_id': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'primary_key': 'True'}),
'reservation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cloudslave.Reservation']"}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['cloudslave'] | sorenh/python-django-cloudslave | cloudslave/migrations/0001_initial.py | Python | apache-2.0 | 6,224 |
"""Copyright 2009 Chris Davis
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
from functools import reduce
import pytz
from six import text_type
from six.moves.urllib.parse import unquote_plus
from datetime import datetime
from django.conf import settings
from graphite.carbonlink import CarbonLink
from graphite.compat import HttpResponse, HttpResponseBadRequest
from graphite.errors import InputParameterError, handleInputParameterError
from graphite.logger import log
from graphite.render.attime import parseATTime
from graphite.storage import STORE, extractForwardHeaders
from graphite.user_util import getProfile
from graphite.util import epoch, json, pickle, msgpack
def index_json(request):
queryParams = request.GET.copy()
queryParams.update(request.POST)
try:
jsonp = queryParams.get('jsonp', False)
requestContext = {
'localOnly': int( queryParams.get('local', 0) ),
'forwardHeaders': extractForwardHeaders(request),
}
matches = STORE.get_index(requestContext)
except Exception:
log.exception()
return json_response_for(request, [], jsonp=jsonp, status=500)
return json_response_for(request, matches, jsonp=jsonp)
def queryParamAsInt(queryParams, name, default):
if name not in queryParams:
return default
try:
return int(queryParams[name])
except Exception as e:
raise InputParameterError('Invalid int value {value} for param {name}: {err}'.format(
value=repr(queryParams[name]),
name=name,
err=str(e)))
@handleInputParameterError
def find_view(request):
"View for finding metrics matching a given pattern"
queryParams = request.GET.copy()
queryParams.update(request.POST)
format = queryParams.get('format', 'treejson')
leaves_only = queryParamAsInt(queryParams, 'leavesOnly', 0)
local_only = queryParamAsInt(queryParams, 'local', 0)
wildcards = queryParamAsInt(queryParams, 'wildcards', 0)
tzinfo = pytz.timezone(settings.TIME_ZONE)
if 'tz' in queryParams:
try:
value = queryParams['tz']
tzinfo = pytz.timezone(value)
except pytz.UnknownTimeZoneError:
pass
except Exception as e:
raise InputParameterError(
'Invalid value {value} for param tz: {err}'
.format(value=repr(value), err=str(e)))
if 'now' in queryParams:
try:
value = queryParams['now']
now = parseATTime(value, tzinfo)
except Exception as e:
raise InputParameterError(
'Invalid value {value} for param now: {err}'
.format(value=repr(value), err=str(e)))
else:
now = datetime.now(tzinfo)
if 'from' in queryParams and str(queryParams['from']) != '-1':
try:
value = queryParams['from']
fromTime = int(epoch(parseATTime(value, tzinfo, now)))
except Exception as e:
raise InputParameterError(
'Invalid value {value} for param from: {err}'
.format(value=repr(value), err=str(e)))
else:
fromTime = -1
if 'until' in queryParams and str(queryParams['until']) != '-1':
try:
value = queryParams['until']
untilTime = int(epoch(parseATTime(value, tzinfo, now)))
except Exception as e:
raise InputParameterError(
'Invalid value {value} for param until: {err}'
.format(value=repr(value), err=str(e)))
else:
untilTime = -1
nodePosition = queryParamAsInt(queryParams, 'position', -1)
jsonp = queryParams.get('jsonp', False)
forward_headers = extractForwardHeaders(request)
if fromTime == -1:
fromTime = None
if untilTime == -1:
untilTime = None
automatic_variants = queryParamAsInt(queryParams, 'automatic_variants', 0)
try:
query = str(queryParams['query'])
except KeyError:
raise InputParameterError('Missing required parameter \'query\'')
if query == '':
raise InputParameterError('Required parameter \'query\' is empty')
if '.' in query:
base_path = query.rsplit('.', 1)[0] + '.'
else:
base_path = ''
if format == 'completer':
query = query.replace('..', '*.')
if not query.endswith('*'):
query += '*'
if automatic_variants:
query_parts = query.split('.')
for i,part in enumerate(query_parts):
if ',' in part and '{' not in part:
query_parts[i] = '{%s}' % part
query = '.'.join(query_parts)
try:
matches = list(STORE.find(
query, fromTime, untilTime,
local=local_only,
headers=forward_headers,
leaves_only=leaves_only,
))
except Exception:
log.exception()
raise
log.info('find_view query=%s local_only=%s matches=%d' % (query, local_only, len(matches)))
matches.sort(key=lambda node: node.name)
log.info("received remote find request: pattern=%s from=%s until=%s local_only=%s format=%s matches=%d" % (query, fromTime, untilTime, local_only, format, len(matches)))
if format == 'treejson':
profile = getProfile(request)
content = tree_json(matches, base_path, wildcards=profile.advancedUI or wildcards)
response = json_response_for(request, content, jsonp=jsonp)
elif format == 'nodelist':
content = nodes_by_position(matches, nodePosition)
response = json_response_for(request, content, jsonp=jsonp)
elif format == 'pickle':
content = pickle_nodes(matches)
response = HttpResponse(content, content_type='application/pickle')
elif format == 'msgpack':
content = msgpack_nodes(matches)
response = HttpResponse(content, content_type='application/x-msgpack')
elif format == 'json':
content = json_nodes(matches)
response = json_response_for(request, content, jsonp=jsonp)
elif format == 'completer':
results = []
for node in matches:
node_info = dict(path=node.path, name=node.name, is_leaf=str(int(node.is_leaf)))
if not node.is_leaf:
node_info['path'] += '.'
results.append(node_info)
if len(results) > 1 and wildcards:
wildcardNode = {'name' : '*'}
results.append(wildcardNode)
response = json_response_for(request, { 'metrics' : results }, jsonp=jsonp)
else:
return HttpResponseBadRequest(
content="Invalid value for 'format' parameter",
content_type='text/plain')
response['Pragma'] = 'no-cache'
response['Cache-Control'] = 'no-cache'
return response
def expand_view(request):
"View for expanding a pattern into matching metric paths"
queryParams = request.GET.copy()
queryParams.update(request.POST)
local_only = int( queryParams.get('local', 0) )
group_by_expr = int( queryParams.get('groupByExpr', 0) )
leaves_only = int( queryParams.get('leavesOnly', 0) )
jsonp = queryParams.get('jsonp', False)
forward_headers = extractForwardHeaders(request)
results = {}
for query in queryParams.getlist('query'):
results[query] = set()
for node in STORE.find(query, local=local_only, headers=forward_headers):
if node.is_leaf or not leaves_only:
results[query].add( node.path )
# Convert our results to sorted lists because sets aren't json-friendly
if group_by_expr:
for query, matches in results.items():
results[query] = sorted(matches)
else:
results = sorted( reduce(set.union, results.values(), set()) )
result = {
'results' : results
}
response = json_response_for(request, result, jsonp=jsonp)
response['Pragma'] = 'no-cache'
response['Cache-Control'] = 'no-cache'
return response
def get_metadata_view(request):
queryParams = request.GET.copy()
queryParams.update(request.POST)
key = queryParams.get('key')
metrics = queryParams.getlist('metric')
jsonp = queryParams.get('jsonp', False)
results = {}
for metric in metrics:
try:
results[metric] = CarbonLink.get_metadata(metric, key)
except Exception:
log.exception()
results[metric] = dict(error="Unexpected error occurred in CarbonLink.get_metadata(%s, %s)" % (metric, key))
return json_response_for(request, results, jsonp=jsonp)
def set_metadata_view(request):
results = {}
if request.method == 'GET':
metric = request.GET['metric']
key = request.GET['key']
value = request.GET['value']
try:
results[metric] = CarbonLink.set_metadata(metric, key, value)
except Exception:
log.exception()
results[metric] = dict(error="Unexpected error occurred in CarbonLink.set_metadata(%s, %s)" % (metric, key))
elif request.method == 'POST':
if request.META.get('CONTENT_TYPE') == 'application/json':
operations = json.loads( request.body )
else:
operations = json.loads( request.POST['operations'] )
for op in operations:
metric = None
try:
metric, key, value = op['metric'], op['key'], op['value']
results[metric] = CarbonLink.set_metadata(metric, key, value)
except Exception:
log.exception()
if metric:
results[metric] = dict(error="Unexpected error occurred in bulk CarbonLink.set_metadata(%s)" % metric)
else:
results = dict(error='Invalid request method')
return json_response_for(request, results)
def tree_json(nodes, base_path, wildcards=False):
results = []
branchNode = {
'allowChildren': 1,
'expandable': 1,
'leaf': 0,
}
leafNode = {
'allowChildren': 0,
'expandable': 0,
'leaf': 1,
}
#Add a wildcard node if appropriate
if len(nodes) > 1 and wildcards:
wildcardNode = {'text' : '*', 'id' : base_path + '*'}
if any(not n.is_leaf for n in nodes):
wildcardNode.update(branchNode)
else:
wildcardNode.update(leafNode)
results.append(wildcardNode)
found = set()
results_leaf = []
results_branch = []
for node in nodes: #Now let's add the matching children
if node.name in found:
continue
found.add(node.name)
resultNode = {
'text' : unquote_plus(str(node.name)),
'id' : base_path + str(node.name),
}
if node.is_leaf:
resultNode.update(leafNode)
results_leaf.append(resultNode)
else:
resultNode.update(branchNode)
results_branch.append(resultNode)
results.extend(results_branch)
results.extend(results_leaf)
return results
def nodes_by_position(matches, position):
found = set()
for metric in matches:
nodes = metric.path.split('.')
found.add(nodes[position])
results = { 'nodes' : sorted(found) }
return results
def pickle_nodes(nodes):
nodes_info = []
for node in nodes:
info = dict(path=node.path, is_leaf=node.is_leaf)
if node.is_leaf:
info['intervals'] = node.intervals
nodes_info.append(info)
return pickle.dumps(nodes_info, protocol=-1)
def msgpack_nodes(nodes):
nodes_info = []
# make sure everything is unicode in python 2.x and 3.x
for node in nodes:
info = {
text_type('path'): text_type(node.path),
text_type('is_leaf'): node.is_leaf,
}
if node.is_leaf:
info[text_type('intervals')] = [interval.tuple for interval in node.intervals]
nodes_info.append(info)
return msgpack.dumps(nodes_info, use_bin_type=True)
def json_nodes(nodes):
nodes_info = []
for node in nodes:
info = dict(path=node.path, is_leaf=node.is_leaf)
if node.is_leaf:
info['intervals'] = [{'start': i.start, 'end': i.end} for i in node.intervals]
nodes_info.append(info)
return sorted(nodes_info, key=lambda item: item['path'])
def json_response_for(request, data, content_type='application/json', jsonp=False, **kwargs):
accept = request.META.get('HTTP_ACCEPT', 'application/json')
ensure_ascii = accept == 'application/json'
pretty = bool(request.POST.get('pretty', request.GET.get('pretty')))
content = json.dumps(data, ensure_ascii=ensure_ascii, indent=(2 if pretty else None))
if jsonp:
content = "%s(%s)" % (jsonp, content)
content_type = 'text/javascript'
if not ensure_ascii:
content_type += ';charset=utf-8'
return HttpResponse(content, content_type=content_type, **kwargs)
| criteo-forks/graphite-web | webapp/graphite/metrics/views.py | Python | apache-2.0 | 12,370 |
# import system modules
import arcpy
from arcpy import env
# Set environment settings
env.workspace = "C:\Users\Ewan\Desktop\SFTPDST5\MapFiles"
try:
# Set the local variable
in_Table = "Topography.csv"
x_coords = "x"
y_coords = "y"
out_Layer = "Topography_Layer"
saved_Layer = "c:\Users\Ewan\Desktop\SFTPDST5\Mapfiles\Topography.lyr"
# Set the spatial reference
spRef = r"Coordinate Systems\Geographic Coordinate Systens\World\WGS 1984.prj"
# Make the XY Event Layer
arcpy.MakeXYEventLayer_management(in_Table, x_coords, y_coords, out_Layer, spRef)
# Save to a layer file
arcpy.SaveToLayerFile_management(out_Layer, saved_Layer)
except Exception as err:
print(err.args[0])
# Set local variables
inFeatures = "Topography.lyr"
valField = "Topography"
outRaster = "C:\Users\Ewan\Desktop\SFTPDST5\Mapfiles\TopographyR"
assignmentType = "MOST_FREQUENT"
priorityField = ""
cellSize = 0.000005
# Execute PointToRaster
arcpy.PointToRaster_conversion(inFeatures, valField, outRaster, assignmentType, priorityField, cellSize)
##Assign colormap using clr file
arcpy.AddColormap_management("c:\Users\Ewan\Desktop\SFTPDST5\Mapfiles\TopographyR", "#", "c:\Users\Ewan\Desktop\SFTPDST5\Mapfiles\colormap.clr") | harryfb/DST5 | ArcPy Code/Topography.py | Python | apache-2.0 | 1,297 |
from sqlalchemy import *
from sqlalchemy import sql, schema
from sqlalchemy.sql import compiler
from test.lib import *
class QuoteTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
@classmethod
def setup_class(cls):
# TODO: figure out which databases/which identifiers allow special
# characters to be used, such as: spaces, quote characters,
# punctuation characters, set up tests for those as well.
global table1, table2, table3
metadata = MetaData(testing.db)
table1 = Table('WorstCase1', metadata,
Column('lowercase', Integer, primary_key=True),
Column('UPPERCASE', Integer),
Column('MixedCase', Integer),
Column('ASC', Integer, key='a123'))
table2 = Table('WorstCase2', metadata,
Column('desc', Integer, primary_key=True, key='d123'),
Column('Union', Integer, key='u123'),
Column('MixedCase', Integer))
table1.create()
table2.create()
def teardown(self):
table1.delete().execute()
table2.delete().execute()
@classmethod
def teardown_class(cls):
table1.drop()
table2.drop()
def test_basic(self):
table1.insert().execute({'lowercase':1,'UPPERCASE':2,'MixedCase':3,'a123':4},
{'lowercase':2,'UPPERCASE':2,'MixedCase':3,'a123':4},
{'lowercase':4,'UPPERCASE':3,'MixedCase':2,'a123':1})
table2.insert().execute({'d123':1,'u123':2,'MixedCase':3},
{'d123':2,'u123':2,'MixedCase':3},
{'d123':4,'u123':3,'MixedCase':2})
res1 = select([table1.c.lowercase, table1.c.UPPERCASE, table1.c.MixedCase, table1.c.a123]).execute().fetchall()
print res1
assert(res1==[(1,2,3,4),(2,2,3,4),(4,3,2,1)])
res2 = select([table2.c.d123, table2.c.u123, table2.c.MixedCase]).execute().fetchall()
print res2
assert(res2==[(1,2,3),(2,2,3),(4,3,2)])
def test_numeric(self):
metadata = MetaData()
t1 = Table('35table', metadata,
Column('25column', Integer))
self.assert_compile(schema.CreateTable(t1), 'CREATE TABLE "35table" ('
'"25column" INTEGER'
')'
)
def test_reflect(self):
meta2 = MetaData(testing.db)
t2 = Table('WorstCase2', meta2, autoload=True, quote=True)
assert 'MixedCase' in t2.c
def test_labels(self):
table1.insert().execute({'lowercase':1,'UPPERCASE':2,'MixedCase':3,'a123':4},
{'lowercase':2,'UPPERCASE':2,'MixedCase':3,'a123':4},
{'lowercase':4,'UPPERCASE':3,'MixedCase':2,'a123':1})
table2.insert().execute({'d123':1,'u123':2,'MixedCase':3},
{'d123':2,'u123':2,'MixedCase':3},
{'d123':4,'u123':3,'MixedCase':2})
res1 = select([table1.c.lowercase, table1.c.UPPERCASE, table1.c.MixedCase, table1.c.a123], use_labels=True).execute().fetchall()
print res1
assert(res1==[(1,2,3,4),(2,2,3,4),(4,3,2,1)])
res2 = select([table2.c.d123, table2.c.u123, table2.c.MixedCase], use_labels=True).execute().fetchall()
print res2
assert(res2==[(1,2,3),(2,2,3),(4,3,2)])
def test_quote_flag(self):
metadata = MetaData()
t1 = Table('TableOne', metadata,
Column('ColumnOne', Integer), schema="FooBar")
self.assert_compile(t1.select(), '''SELECT "FooBar"."TableOne"."ColumnOne" FROM "FooBar"."TableOne"''')
metadata = MetaData()
t1 = Table('t1', metadata,
Column('col1', Integer, quote=True), quote=True, schema="foo", quote_schema=True)
self.assert_compile(t1.select(), '''SELECT "foo"."t1"."col1" FROM "foo"."t1"''')
self.assert_compile(t1.select().apply_labels(), '''SELECT "foo"."t1"."col1" AS "foo_t1_col1" FROM "foo"."t1"''')
a = t1.select().alias('anon')
b = select([1], a.c.col1==2, from_obj=a)
self.assert_compile(b,
'''SELECT 1 FROM (SELECT "foo"."t1"."col1" AS "col1" FROM '''\
'''"foo"."t1") AS anon WHERE anon."col1" = :col1_1'''
)
metadata = MetaData()
t1 = Table('TableOne', metadata,
Column('ColumnOne', Integer, quote=False), quote=False, schema="FooBar", quote_schema=False)
self.assert_compile(t1.select(), "SELECT FooBar.TableOne.ColumnOne FROM FooBar.TableOne")
self.assert_compile(t1.select().apply_labels(),
"SELECT FooBar.TableOne.ColumnOne AS "\
"FooBar_TableOne_ColumnOne FROM FooBar.TableOne" # TODO: is this what we really want here ? what if table/schema
# *are* quoted?
)
a = t1.select().alias('anon')
b = select([1], a.c.ColumnOne==2, from_obj=a)
self.assert_compile(b,
"SELECT 1 FROM (SELECT FooBar.TableOne.ColumnOne AS "\
"ColumnOne FROM FooBar.TableOne) AS anon WHERE anon.ColumnOne = :ColumnOne_1"
)
def test_table_quote_flag(self):
metadata = MetaData()
t1 = Table('TableOne', metadata,
Column('id', Integer),
quote=False)
t2 = Table('TableTwo', metadata,
Column('id', Integer),
Column('t1_id', Integer, ForeignKey('TableOne.id')),
quote=False)
self.assert_compile(
t2.join(t1).select(),
"SELECT TableTwo.id, TableTwo.t1_id, TableOne.id "
"FROM TableTwo JOIN TableOne ON TableOne.id = TableTwo.t1_id")
@testing.crashes('oracle', 'FIXME: unknown, verify not fails_on')
@testing.requires.subqueries
def test_labels(self):
"""test the quoting of labels.
if labels arent quoted, a query in postgresql in particular will fail since it produces:
SELECT LaLa.lowercase, LaLa."UPPERCASE", LaLa."MixedCase", LaLa."ASC"
FROM (SELECT DISTINCT "WorstCase1".lowercase AS lowercase,
"WorstCase1"."UPPERCASE" AS UPPERCASE,
"WorstCase1"."MixedCase" AS MixedCase, "WorstCase1"."ASC" AS ASC \nFROM "WorstCase1") AS LaLa
where the "UPPERCASE" column of "LaLa" doesnt exist.
"""
x = table1.select(distinct=True).alias("LaLa").select().scalar()
def test_labels2(self):
metadata = MetaData()
table = Table("ImATable", metadata,
Column("col1", Integer))
x = select([table.c.col1.label("ImATable_col1")]).alias("SomeAlias")
self.assert_compile(select([x.c.ImATable_col1]),
'''SELECT "SomeAlias"."ImATable_col1" FROM (SELECT "ImATable".col1 AS "ImATable_col1" FROM "ImATable") AS "SomeAlias"''')
# note that 'foo' and 'FooCol' are literals already quoted
x = select([sql.literal_column("'foo'").label("somelabel")], from_obj=[table]).alias("AnAlias")
x = x.select()
self.assert_compile(x,
'''SELECT "AnAlias".somelabel FROM (SELECT 'foo' AS somelabel FROM "ImATable") AS "AnAlias"''')
x = select([sql.literal_column("'FooCol'").label("SomeLabel")], from_obj=[table])
x = x.select()
self.assert_compile(x,
'''SELECT "SomeLabel" FROM (SELECT 'FooCol' AS "SomeLabel" FROM "ImATable")''')
def test_reserved_words(self):
metadata = MetaData()
table = Table("ImATable", metadata,
Column("col1", Integer),
Column("from", Integer),
Column("louisville", Integer),
Column("order", Integer))
x = select([table.c.col1, table.c['from'], table.c.louisville, table.c.order])
self.assert_compile(x,
'''SELECT "ImATable".col1, "ImATable"."from", "ImATable".louisville, "ImATable"."order" FROM "ImATable"''')
class PreparerTest(fixtures.TestBase):
"""Test the db-agnostic quoting services of IdentifierPreparer."""
def test_unformat(self):
prep = compiler.IdentifierPreparer(None)
unformat = prep.unformat_identifiers
def a_eq(have, want):
if have != want:
print "Wanted %s" % want
print "Received %s" % have
self.assert_(have == want)
a_eq(unformat('foo'), ['foo'])
a_eq(unformat('"foo"'), ['foo'])
a_eq(unformat("'foo'"), ["'foo'"])
a_eq(unformat('foo.bar'), ['foo', 'bar'])
a_eq(unformat('"foo"."bar"'), ['foo', 'bar'])
a_eq(unformat('foo."bar"'), ['foo', 'bar'])
a_eq(unformat('"foo".bar'), ['foo', 'bar'])
a_eq(unformat('"foo"."b""a""r"."baz"'), ['foo', 'b"a"r', 'baz'])
def test_unformat_custom(self):
class Custom(compiler.IdentifierPreparer):
def __init__(self, dialect):
super(Custom, self).__init__(dialect, initial_quote='`',
final_quote='`')
def _escape_identifier(self, value):
return value.replace('`', '``')
def _unescape_identifier(self, value):
return value.replace('``', '`')
prep = Custom(None)
unformat = prep.unformat_identifiers
def a_eq(have, want):
if have != want:
print "Wanted %s" % want
print "Received %s" % have
self.assert_(have == want)
a_eq(unformat('foo'), ['foo'])
a_eq(unformat('`foo`'), ['foo'])
a_eq(unformat(`'foo'`), ["'foo'"])
a_eq(unformat('foo.bar'), ['foo', 'bar'])
a_eq(unformat('`foo`.`bar`'), ['foo', 'bar'])
a_eq(unformat('foo.`bar`'), ['foo', 'bar'])
a_eq(unformat('`foo`.bar'), ['foo', 'bar'])
a_eq(unformat('`foo`.`b``a``r`.`baz`'), ['foo', 'b`a`r', 'baz'])
| ioram7/keystone-federado-pgid2013 | build/sqlalchemy/test/sql/test_quote.py | Python | apache-2.0 | 9,788 |
#!/usr/bin/env python
import logging
import io
from time import sleep
import json
import requests
from . import *
logging.basicConfig(filename='terminal.log', level=logging.DEBUG)
class PyTerminal():
"""
This class allows the execution of a command in the same way as the web-cli does. The return is a
CommandOutput instance which holds the text lines generated by the command execution. The content is the same as it
would be in the web-cli.
"""
_content_type = "application/text"
_se_post_command_headers = {'X-Requested-With': 'XMLHttpRequest', 'Accept': _content_type}
_se_poll_command_headers = {'X-Requested-With': 'XMLHttpRequest', 'Accept': _content_type,
'Accept-Encoding': 'gzip, deflate, sdch'}
_content_type_octectstream = "application/octet-stream"
def __init__(self, url, session):
"""
Default
"""
self.session = session
self.url = url
self._set_se_urls(url)
self._response=""
def execute(self, command_str, file=None, timeout_seconds=600):
"""
Execute the command and return the result from Server.
:param command_str: command to be executed. For more information about a command's syntax, please
check the online help
:param file: file object to be imported - optional parameter - needed if the command requires a file for
upload
:return CommandOutput: CommandOutput instance
"""
logging.debug('Executing command...')
try:
self._response = self._se_command_post(command_str, file)
except requests.exceptions.ConnectionError as e:
if str(e) == '(\'Connection aborted.\', BadStatusLine(\"\'\'\",))':
logging.debug('Re-trying post request.' + str(e))
self._response = self._se_command_post(command_str, file)
else:
raise e
if self.is_download_present() is True:
logging.info("Download request")
bytes = self._get_bytes(self._response.text)
return bytes
if self._response.status_code is 200:
logging.debug('Command executed successfully: ' + command_str)
return CommandOutput(self._response.text, self._response.status_code, True)
logging.debug('Command executed successfully: ' + command_str)
return CommandOutput(self._response.text, self._response.status_code, True, self)
def _header(self, content_type):
return {'X-Requested-With': 'XMLHttpRequest', 'Accept': content_type}
def _download(self, file_id, path):
logging.debug('Downloading file from Server')
response = self._se_download_result(file_id)
with io.open(path, 'wb') as handle:
for block in response.iter_content(1024):
if not block:
break
handle.write(block)
logging.debug('Wrote file {} to disk'.format(path))
def _get_bytes(self, file_id):
logging.debug('Downloading file from Server')
response = self._se_download_result(file_id)
filename = response.headers['content-disposition'].split("\"")[1]
buf = bytearray()
for b in response.iter_content():
if not b:
break
buf.extend(b)
logging.debug('Wrote {} bytes to memory'.format(len(buf)))
return filename,buf
def _set_se_urls(self, url):
self._uri_server_command_execute = url + '/Certificate/rest/certapi/execute/'
self._uri_server_command_download = url + '/Certificate/rest/certapi/download/'
def _se_command_post(self, command, file_in=None):
logging.debug('POST command request')
file_data = {'name': 'command', 'command': command}
req_data = None
response = self.session.post(
self._uri_server_command_execute+command,
headers=self._se_post_command_headers,
files=file_data,
data=req_data,
verify=False,
allow_redirects=False)
logging.debug('POST command request executed')
return response
def _se_download_result(self, file_id):
response = self.session.get(
self._uri_server_command_download + file_id
, stream=True, headers=self._header(self._content_type_octectstream))
return response
def is_download_present(self):
if "http" in self._response.text:
return True
return False
class CommandOutput():
"""
Class representing the output of the command execution
"""
def __init__(self, response, http_code, success, terminal=None):
"""
:param response: command response format
:param http_code: http_response code
"""
self._http_response_code = http_code
self._success = success
if success and response is not None:
self._response = response
self._files = []
self.terminal = terminal
def is_success_sent(self):
"""
:return: boolean, true if the request was successfully sent to the server
"""
return self._success
def http_response_code(self):
"""
:return: http_response code from the underlying request
"""
return self._http_response_code
def get_output(self):
"""
:return: list of strings representing the output from the command
lines are single strings
table rows are single strings with tabs delimiting columns
"""
logging.debug('get_output')
logging.debug(self._http_response_code)
logging.debug(self._success)
if not self._success:
logging.warn('There is no output to parse, because command execution failed: raising IllegalStateException')
raise IllegalStateException('There is no output to parse, because command execution failed')
if self._response is not None:
# Result is a list of strings representing the output from the command
return self._response
else:
logging.warn('Illegal server response. Raising InternalError')
raise InternalError('Illegal server response')
logging.debug('get_output returning lines')
return ""
| sumanta23/pyscript | pyscript/terminal.py | Python | apache-2.0 | 6,496 |
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from mock import patch
from polyaxon.env_vars.keys import (
POLYAXON_KEYS_GIT_CREDENTIALS,
POLYAXON_KEYS_RUN_INSTANCE,
)
from polyaxon.exceptions import PolyaxonContainerException
from polyaxon.init.git import (
create_code_repo,
get_clone_url,
has_cred_access,
has_ssh_access,
)
from polyaxon.utils.test_utils import BaseTestCase
@pytest.mark.init_mark
class TestInitCode(BaseTestCase):
def test_raise_if_env_var_not_found(self):
with self.assertRaises(PolyaxonContainerException):
create_code_repo(repo_path="", url="", revision="")
def test_raise_if_env_var_not_correct(self):
os.environ[POLYAXON_KEYS_RUN_INSTANCE] = "foo"
with self.assertRaises(PolyaxonContainerException):
create_code_repo(repo_path="", url="", revision="")
del os.environ[POLYAXON_KEYS_RUN_INSTANCE]
def test_has_cred_access(self):
assert has_cred_access() is False
os.environ[POLYAXON_KEYS_GIT_CREDENTIALS] = "foo:bar"
assert has_cred_access() is True
del os.environ[POLYAXON_KEYS_GIT_CREDENTIALS]
def test_has_ssh_access(self):
assert has_ssh_access() is False
def test_get_clone_url(self):
url = "https://foo.com/test"
assert get_clone_url(url=url) == url
os.environ[POLYAXON_KEYS_GIT_CREDENTIALS] = "foo:bar"
assert get_clone_url(url=url) == "https://foo:[email protected]/test"
del os.environ[POLYAXON_KEYS_GIT_CREDENTIALS]
with patch("polyaxon.init.git.has_ssh_access") as ssh_access_mock:
ssh_access_mock.return_value = True
assert get_clone_url(url=url) == "[email protected]:test.git"
url = "[email protected]:test.git"
with patch("polyaxon.init.git.has_ssh_access") as ssh_access_mock:
ssh_access_mock.return_value = True
assert get_clone_url(url=url) == "[email protected]:test.git"
| polyaxon/polyaxon | core/tests/test_init/test_init_code.py | Python | apache-2.0 | 2,535 |
#!/usr/bin/env python
"""
SlipStream Client
=====
Copyright (C) 2015 SixSq Sarl (sixsq.com)
=====
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest
from slipstream.NodeDecorator import (NodeDecorator, RUN_CATEGORY_IMAGE)
from slipstream.NodeInstance import NodeInstance
from slipstream.UserInfo import UserInfo
import slipstream.util as util
from slipstream_openstack.OpenStackClientCloud import \
OpenStackClientCloud, FLOATING_IPS_KEY
from slipstream_openstack.OpenStackClientCloud import searchInObjectList
from slipstream_openstack.TestBaseLive import TestBaseLive
CONFIG_FILE = os.path.join(os.path.dirname(__file__),
'pyunit.credentials.properties')
# Example configuration file.
"""
[Test]
openstack.location = LVS
openstack.username = [email protected]
openstack.password = xxx
openstack.imageid = d02ee717-33f7-478b-ba14-02196978fea8
openstack.ssh.username = ubuntu
openstack.ssh.password = yyy
""" # pylint: disable=pointless-string-statement
class TestOpenStackClientCloudLive(TestBaseLive):
cin = 'openstack'
conf_keys = ['endpoint',
'tenant-name',
'username',
'password',
'domain-name',
'identityVersion',
'serviceType',
'serviceName',
'serviceRegion',
'tenant-name',
'network.type',
FLOATING_IPS_KEY,
UserInfo.NETWORK_PUBLIC_KEY,
UserInfo.NETWORK_PRIVATE_KEY]
def _update_user_info(self):
self.user_info[self.construct_key(FLOATING_IPS_KEY)] = \
util.str2bool(self.user_info.get_cloud(FLOATING_IPS_KEY, 'false'))
def setUp(self):
self._setUp(OpenStackClientCloud, CONFIG_FILE, self.conf_keys)
self._update_user_info()
security_groups = self._conf_val('security.groups')
image_id = self._conf_val('imageid')
instance_type = self._conf_val('intance.type', 'm1.tiny')
network_type = self._conf_val('network.type')
node_name = 'test_node'
self.node_instances = {}
for i in range(1, self.multiplicity + 1):
node_instance_name = node_name + '.' + str(i)
self.node_instances[node_instance_name] = NodeInstance({
NodeDecorator.NODE_NAME_KEY: node_name,
NodeDecorator.NODE_INSTANCE_NAME_KEY: node_instance_name,
'cloudservice': self.cin,
'image.platform': 'Ubuntu',
'image.imageId': image_id,
'image.id': image_id,
self.construct_key('instance.type'): instance_type,
self.construct_key('security.groups'): security_groups,
'network': network_type
})
self.node_instance = NodeInstance({
NodeDecorator.NODE_NAME_KEY: NodeDecorator.MACHINE_NAME,
NodeDecorator.NODE_INSTANCE_NAME_KEY: NodeDecorator.MACHINE_NAME,
'cloudservice': self.cin,
'image.platform': 'Ubuntu',
'image.imageId': image_id,
'image.id': image_id,
self.construct_key('instance.type'): instance_type,
self.construct_key('security.groups'): security_groups,
'network': network_type,
'image.prerecipe':
"""#!/bin/sh
set -e
set -x
ls -l /tmp
dpkg -l | egrep "nano|lvm" || true
""",
'image.packages': ['lvm2', 'nano'],
'image.recipe':
"""#!/bin/sh
set -e
set -x
dpkg -l | egrep "nano|lvm" || true
lvs
"""
})
self.node_instance_with_additional_disk = NodeInstance({
NodeDecorator.NODE_NAME_KEY: NodeDecorator.MACHINE_NAME,
NodeDecorator.NODE_INSTANCE_NAME_KEY: NodeDecorator.MACHINE_NAME,
'cloudservice': self.cin,
'image.platform': 'Ubuntu',
'image.imageId': image_id,
'image.id': image_id,
self.construct_key('instance.type'): instance_type,
'network': network_type,
'extra.disk.volatile': '20'
})
def tearDown(self):
os.environ.pop('SLIPSTREAM_CONNECTOR_INSTANCE')
os.environ.pop('SLIPSTREAM_BOOTSTRAP_BIN')
self.client = None
self.ch = None
def xtest_1_start_stop_images(self):
self._test_start_stop_images()
def xtest_2_buildImage(self):
self.client.run_category = RUN_CATEGORY_IMAGE
self.client.start_nodes_and_clients(self.user_info, {NodeDecorator.MACHINE_NAME: self.node_instance})
instances_details = self.client.get_vms_details()
assert instances_details
assert instances_details[0][NodeDecorator.MACHINE_NAME]
new_id = self.client.build_image(self.user_info, self.node_instance)
assert new_id
def xtest_3_list_instances(self):
self.client._initialization(self.user_info)
assert isinstance(self.client.list_instances(), list)
def xtest_4_start_image_with_extra_disk(self):
self.client.run_category = RUN_CATEGORY_IMAGE
self.client.start_nodes_and_clients(self.user_info,
{NodeDecorator.MACHINE_NAME: self.node_instance_with_additional_disk})
vm_id = self.client.get_vms()[NodeDecorator.MACHINE_NAME]['id']
nodes = self.client.list_instances()
assert searchInObjectList(nodes, 'id', vm_id).extra['volumes_attached']
self.client.stop_deployment()
if __name__ == '__main__':
unittest.main()
| slipstream/SlipStreamConnectors | openstack/python/tar/test/TestOpenStackClientCloudLive.py | Python | apache-2.0 | 6,070 |
# -*- coding: utf-8 -*-
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# google-cloud-functions documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
# For plugins that can not read conf.py.
# See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85
sys.path.insert(0, os.path.abspath("."))
__version__ = ""
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.5.5"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"recommonmark",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_options = {"members": True}
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The root toctree document.
root_doc = "index"
# General information about the project.
project = "google-cloud-functions"
copyright = "2019, Google"
author = "Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
"_build",
"**/.nox/**/*",
"samples/AUTHORING_GUIDE.md",
"samples/CONTRIBUTING.md",
"samples/snippets/README.rst",
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for google-cloud-functions",
"github_user": "googleapis",
"github_repo": "python-functions",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-functions-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
root_doc,
"google-cloud-functions.tex",
"google-cloud-functions Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
root_doc,
"google-cloud-functions",
"google-cloud-functions Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
root_doc,
"google-cloud-functions",
"google-cloud-functions Documentation",
author,
"google-cloud-functions",
"google-cloud-functions Library",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://python.readthedocs.org/en/latest/", None),
"google-auth": ("https://googleapis.dev/python/google-auth/latest/", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,),
"grpc": ("https://grpc.github.io/grpc/python/", None),
"proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None),
"protobuf": ("https://googleapis.dev/python/protobuf/latest/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
| googleapis/python-functions | docs/conf.py | Python | apache-2.0 | 12,404 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script for deploying cloud functions."""
from __future__ import print_function
import subprocess
import sys
from turbinia import config
index_file = './index.yaml'
if len(sys.argv) > 1:
function_names = [sys.argv[1]]
else:
function_names = ['gettasks', 'closetasks']
config.LoadConfig()
for cloud_function in function_names:
print('Deploying function {0:s}'.format(cloud_function))
cmd = (
'gcloud --project {0:s} functions deploy {1:s} --stage-bucket {2:s} '
'--region {3:s} --runtime nodejs14 --trigger-http --memory 256MB '
'--timeout 60s'.format(
config.TURBINIA_PROJECT, cloud_function, config.BUCKET_NAME,
config.TURBINIA_REGION))
print(subprocess.check_call(cmd, shell=True))
print('/nCreating Datastore index from {0:s}'.format(index_file))
cmd = 'gcloud --quiet --project {0:s} datastore indexes create {1:s}'.format(
config.TURBINIA_PROJECT, index_file)
subprocess.check_call(cmd, shell=True)
| google/turbinia | tools/gcf_init/deploy_gcf.py | Python | apache-2.0 | 1,012 |
# -*- coding: iso-8859-1 -*-
"""
MoinMoin - Supporting function for Python magic
@copyright: 2002 Juergen Hermann <[email protected]>
@license: GNU GPL, see COPYING for details.
"""
#############################################################################
### Module import / Plugins
#############################################################################
def isImportable(module):
""" Check whether a certain module is available.
"""
try:
__import__(module)
return 1
except ImportError:
return 0
def getPluginModules(packagedir):
"""
Return a list of plugin modules for a given plugin package dir,
omitting any that start with an underscore.
"""
import os, re
pyre = re.compile(r"^([^_].*)\.py$")
dirlist = os.listdir(packagedir)
matches = [pyre.match(fn) for fn in dirlist]
modules = [match.group(1) for match in matches if match]
modules.sort()
return modules
def getPackageModules(packagefile):
""" Return a list of modules for a package, omitting any modules
starting with an underscore.
"""
import os, re
packagedir = os.path.dirname(packagefile)
in_plugin_dir = lambda dir, ops=os.path.split: ops(ops(dir)[0])[1] == "plugin"
moinmodule = __import__('MoinMoin')
# Is it in a .zip file?
if not in_plugin_dir(packagedir) and hasattr(moinmodule, '__loader__'):
pyre = re.compile(r"^([^_].*)\.py(?:c|o)$")
zipfiles = moinmodule.__loader__._files
dirlist = [entry[0].replace(r'/', '\\').split('\\')[-1]
for entry in zipfiles.values() if packagedir in entry[0]]
else:
pyre = re.compile(r"^([^_].*)\.py$")
dirlist = os.listdir(packagedir)
matches = [pyre.match(fn) for fn in dirlist]
modules = [match.group(1) for match in matches if match]
modules.sort()
return modules
def importName(modulename, name):
""" Import name dynamically from module
Used to do dynamic import of modules and names that you know their
names only in runtime.
Any error raised here must be handled by the caller.
@param modulename: full qualified mudule name, e.g. x.y.z
@param name: name to import from modulename
@rtype: any object
@return: name from module
"""
module = __import__(modulename, globals(), {}, [name])
return getattr(module, name)
def makeThreadSafe(function, lock=None):
""" Call with a function you want to make thread safe
Call without lock to make the function thread safe using one lock per
function. Call with existing lock object if you want to make several
functions use same lock, e.g. all functions that change same data
structure.
@param function: function to make thread safe
@param lock: threading.Lock instance or None
@rtype: function
@return: function decorated with locking
"""
if lock is None:
import threading
lock = threading.Lock()
def decorated(*args, **kw):
lock.acquire()
try:
return function(*args, **kw)
finally:
lock.release()
return decorated
| RealTimeWeb/wikisite | MoinMoin/util/pysupport.py | Python | apache-2.0 | 3,165 |
import matplotlib as mpl
mpl.use('Agg')
import survivalstan
from stancache import stancache
import numpy as np
from nose.tools import ok_
from functools import partial
num_iter = 1000
from .test_datasets import load_test_dataset
model_code = survivalstan.models.exp_survival_model
make_inits = None
def test_model():
''' Test survival model on test dataset
'''
d = load_test_dataset()
testfit = survivalstan.fit_stan_survival_model(
model_cohort = 'test model',
model_code = model_code,
df = d,
time_col = 't',
event_col = 'event',
formula = 'age + sex',
iter = num_iter,
chains = 2,
seed = 9001,
make_inits = make_inits,
FIT_FUN = stancache.cached_stan_fit,
)
ok_('fit' in testfit)
ok_('coefs' in testfit)
ok_('loo' in testfit)
survivalstan.utils.plot_coefs([testfit])
survivalstan.utils.plot_coefs([testfit], trans=np.exp)
return(testfit)
def test_null_model(**kwargs):
''' Test NULL survival model on flchain dataset
'''
d = load_test_dataset()
testfit = survivalstan.fit_stan_survival_model(
model_cohort = 'test model',
model_code = model_code,
df = d,
time_col = 't',
event_col = 'event',
formula = '~ 1',
iter = num_iter,
chains = 2,
seed = 9001,
make_inits = make_inits,
FIT_FUN = stancache.cached_stan_fit,
)
ok_('fit' in testfit)
ok_('coefs' in testfit)
ok_('loo' in testfit)
survivalstan.utils.plot_coefs([testfit])
survivalstan.utils.plot_coefs([testfit], trans=np.exp)
return(testfit)
| jburos/survivalstan | test/test_exp_survival_model.py | Python | apache-2.0 | 1,679 |
# isort:skip_file
from .data_connector import DataConnector
from .runtime_data_connector import RuntimeDataConnector
from .file_path_data_connector import FilePathDataConnector
from .configured_asset_file_path_data_connector import (
ConfiguredAssetFilePathDataConnector,
)
from .inferred_asset_file_path_data_connector import (
InferredAssetFilePathDataConnector,
)
from .configured_asset_filesystem_data_connector import (
ConfiguredAssetFilesystemDataConnector,
)
from .inferred_asset_filesystem_data_connector import (
InferredAssetFilesystemDataConnector,
)
from .configured_asset_s3_data_connector import (
ConfiguredAssetS3DataConnector,
)
from .inferred_asset_s3_data_connector import InferredAssetS3DataConnector
from .configured_asset_azure_data_connector import (
ConfiguredAssetAzureDataConnector,
)
from .inferred_asset_azure_data_connector import (
InferredAssetAzureDataConnector,
)
from .configured_asset_gcs_data_connector import (
ConfiguredAssetGCSDataConnector,
)
from .inferred_asset_gcs_data_connector import (
InferredAssetGCSDataConnector,
)
from .configured_asset_sql_data_connector import (
ConfiguredAssetSqlDataConnector,
)
from .inferred_asset_sql_data_connector import (
InferredAssetSqlDataConnector,
)
from .configured_asset_dbfs_data_connector import ConfiguredAssetDBFSDataConnector
from .inferred_asset_dbfs_data_connector import InferredAssetDBFSDataConnector
| great-expectations/great_expectations | great_expectations/datasource/data_connector/__init__.py | Python | apache-2.0 | 1,441 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Initial migration
Revision ID: 464e951dc3b8
Revises: None
Create Date: 2014-08-05 17:41:34.470183
"""
# revision identifiers, used by Alembic.
revision = '464e951dc3b8'
down_revision = None
from alembic import op # noqa: E402
import sqlalchemy as sa # noqa: E402
def upgrade():
op.create_table(
'states',
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('state', sa.BigInteger(), nullable=False),
sa.Column('s_metadata', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('name'))
op.create_table(
'modules_state',
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('state', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('name'))
| openstack/cloudkitty | cloudkitty/db/sqlalchemy/alembic/versions/464e951dc3b8_initial_migration.py | Python | apache-2.0 | 1,314 |
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides the logic used by all the search_next commands."""
import re
from aquilon.utils import force_int
int_re = re.compile(r'^(\d+)')
def search_next(session, cls, attr, value, start, pack, locked=False,
**filters):
q = session.query(cls).filter(attr.startswith(value))
if filters:
q = q.filter_by(**filters)
# Doing the locking here is not the most efficient as we're potentially
# locking a lot of rows - but if there's no better object to lock, then we
# don't have much choice.
if locked and q.count() == 0:
# Nothing to lock -- so we'll crudely pick out the first and
# lock that.
q2 = session.query(cls).order_by(attr).limit(1)
if q2.count() == 1:
attrval = q2.value(attr)
# This is not particularly pleasant: Oracle won't permit a
# "FOR UPDATE" query where "ORDER BY" is given (ORA-02014);
# constructing the allowable version of the query may not be
# possible with SQLAlchemy.
q2 = session.query(cls).filter(attr == attrval)
session.execute(q2.with_for_update())
# Re-execute the original query: only 1 will get through here
q = session.query(cls).filter(attr.startswith(value))
if filters:
q = q.filter_by(**filters)
# Else (q2.count == 0): the table is empty, so we'll just head
# forwards and accept that this may break in that fairly rare
# (one-off) case; something may also have raced and removed the
# first row we picked.
elif locked:
# The FOR UPDATE query needs to be executed separately, otherwise it
# won't see allocations done in a different session
session.execute(q.with_for_update())
if start:
start = force_int("start", start)
else:
start = 1
entries = set()
for (attrvalue,) in q.values(attr):
m = int_re.match(attrvalue[len(value):])
if m:
n = int(m.group(1))
# Only remember entries that we care about...
if n >= start:
entries.add(n)
if not entries:
return start
entries = sorted(entries)
if pack:
expecting = start
for current in entries:
if current > expecting:
return expecting
expecting += 1
return entries[-1] + 1
| quattor/aquilon | lib/aquilon/worker/dbwrappers/search.py | Python | apache-2.0 | 3,147 |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handles tarring up documentation directories."""
import subprocess
from docuploader import shell
def compress(directory: str, destination: str) -> subprocess.CompletedProcess:
"""Compress the given directory into the tarfile at destination."""
# Note: we don't use the stdlib's "tarfile" module for performance reasons.
# While it can handle creating tarfiles, its not as efficient on large
# numbers of files like the tar command.
return shell.run(
[
"tar",
"--create",
f"--directory={directory}",
f"--file={destination}",
# Treat a colon in the filename as part of the filename,
# not an indication of a remote file. This is required in order to
# handle canonical filenames on Windows.
"--force-local",
"--gzip",
"--verbose",
".",
],
hide_output=False,
)
def decompress(archive: str, destination: str) -> subprocess.CompletedProcess:
"""Decompress the given tarfile to the destination."""
# Note: we don't use the stdlib's "tarfile" module for performance reasons.
# While it can handle creating tarfiles, its not as efficient on large
# numbers of files like the tar command.
return shell.run(
[
"tar",
"--extract",
f"--directory={destination}",
f"--file={archive}",
"--gzip",
"--verbose",
],
hide_output=True,
)
| googleapis/docuploader | docuploader/tar.py | Python | apache-2.0 | 2,102 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
"""Utilities used for translating operators from Onnx to Mxnet."""
# pylint: disable=protected-access
from __future__ import absolute_import as _abs
from .... import symbol
from .... import module
from .... import context
from .... import ndarray as nd
from .... import io
def _fix_attribute_names(attrs, change_map):
"""
Change attribute names as per values in change_map dictionary.
Parameters
----------
:param attrs : dict Dict of operator attributes
:param change_map : dict Dict of onnx attribute name to mxnet attribute names.
Returns
-------
:return new_attr : dict Converted dict of operator attributes.
"""
new_attr = {}
for k in attrs.keys():
if k in change_map:
new_attr[change_map[k]] = attrs[k]
else:
new_attr[k] = attrs[k]
return new_attr
def _remove_attributes(attrs, remove_list):
"""
Removes attributes in the remove list from the input attribute dict
:param attrs : Dict of operator attributes
:param remove_list : list of attributes to be removed
:return new_attr : Dict of operator attributes without the listed attributes.
"""
new_attrs = {}
for attr in attrs.keys():
if attr not in remove_list:
new_attrs[attr] = attrs[attr]
return new_attrs
def _add_extra_attributes(attrs, extra_attr_map):
"""
:param attrs: Current Attribute list
:param extraAttrMap: Additional attributes to be added
:return: new_attr
"""
for attr in extra_attr_map:
if attr not in attrs:
attrs[attr] = extra_attr_map[attr]
return attrs
def _pad_sequence_fix(attr, kernel_dim=None):
"""Changing onnx's pads sequence to match with mxnet's pad_width
mxnet: (x1_begin, x1_end, ... , xn_begin, xn_end)
onnx: (x1_begin, x2_begin, ... , xn_end, xn_end)"""
new_attr = ()
if len(attr) % 2 == 0:
for index in range(int(len(attr) / 2)):
new_attr = new_attr + attr[index::int(len(attr) / 2)]
# Making sure pad values are in the attr for all axes.
if kernel_dim is not None:
while len(new_attr) < kernel_dim*2:
new_attr = new_attr + (0, 0)
return new_attr
def _fix_pooling(pool_type, inputs, new_attr):
"""onnx pooling operator supports asymmetrical padding
Adding pad operator before pooling in mxnet to work with onnx"""
stride = new_attr.get('stride')
kernel = new_attr.get('kernel')
padding = new_attr.get('pad')
p_value = new_attr.get('p_value')
# Adding default stride.
if stride is None:
stride = (1,) * len(kernel)
# Add padding attr if not provided.
if padding is None:
padding = (0,) * len(kernel) * 2
# Mxnet Pad operator supports only 4D/5D tensors.
# For 1D case, these are the steps:
# Step 1. Add extra dummy dimension to make it 4D. Adding to axis = 2
# Step 2. Apply padding to this changed tensor
# Step 3. Remove the extra dimension added in step 1.
if len(kernel) == 1:
dummy_axis = 2
# setting 0 padding to the new dim to be added.
padding = (0, padding[0], 0, padding[1])
pad_width = (0, 0, 0, 0) + _pad_sequence_fix(padding, kernel_dim=2)
# Step 1.
curr_sym = symbol.expand_dims(inputs[0], axis=dummy_axis)
# Step 2. Common for all tensor sizes
new_pad_op = symbol.pad(curr_sym, mode='edge', pad_width=pad_width)
# Step 3: Removing extra dim added.
new_pad_op = symbol.split(new_pad_op, axis=dummy_axis, num_outputs=1, squeeze_axis=1)
else:
# For 2D/3D cases:
# Apply padding
pad_width = (0, 0, 0, 0) + _pad_sequence_fix(padding, kernel_dim=len(kernel))
curr_sym = inputs[0]
if pool_type == 'max':
# For max pool : mode = 'edge', we should replicate the
# edge values to pad, so that we only include input data values
# for calculating 'max'
new_pad_op = symbol.pad(curr_sym, mode='edge', pad_width=pad_width)
else:
# For avg pool, we should add 'zeros' for padding so mode='constant'
new_pad_op = symbol.pad(curr_sym, mode='constant', pad_width=pad_width)
# Apply pooling without pads.
if pool_type == 'lp':
new_pooling_op = symbol.Pooling(new_pad_op, pool_type=pool_type, stride=stride, kernel=kernel, p_value=p_value)
else:
new_pooling_op = symbol.Pooling(new_pad_op, pool_type=pool_type, stride=stride, kernel=kernel)
return new_pooling_op
def _fix_bias(op_name, attrs, num_inputs):
"""A workaround for 'use_bias' attribute since onnx don't provide this attribute,
we have to check the number of inputs to decide it."""
if num_inputs == 3:
attrs['no_bias'] = False
elif num_inputs == 2:
attrs['no_bias'] = True
else:
raise ValueError("Unexpected number of inputs for: {}".format(op_name))
return attrs
def _fix_broadcast(op_name, inputs, broadcast_axis, proto_obj):
"""A workaround to reshape bias term to (1, num_channel)."""
if int(len(proto_obj._params)) > 0:
assert len(list(inputs)) == 2
input0_shape = get_input_shape(inputs[0], proto_obj)
#creating reshape shape
reshape_shape = list(len(input0_shape) * (1,))
reshape_shape[broadcast_axis] = -1
reshape_shape = tuple(reshape_shape)
reshape_op_sym = symbol.reshape(inputs[1], shape=reshape_shape)
op_sym = getattr(symbol, op_name)(inputs[0], reshape_op_sym)
else:
op_sym = op_name
return op_sym
def _fix_channels(op_name, attrs, inputs, proto_obj):
"""A workaround for getting 'channels' or 'units' since onnx don't provide
these attributes. We check the shape of weights provided to get the number.
"""
weight_name = inputs[1].name
if not weight_name in proto_obj._params:
raise ValueError("Unable to get channels/units attr from onnx graph.")
else:
wshape = proto_obj._params[weight_name].shape
assert len(wshape) >= 2, "Weights shape is invalid: {}".format(wshape)
if op_name == 'FullyConnected':
attrs['num_hidden'] = wshape[0]
else:
if op_name == 'Convolution':
# Weight shape for Conv and FC: (M x C x kH x kW) : M is number of
# feature maps/hidden and C is number of channels
attrs['num_filter'] = wshape[0]
elif op_name == 'Deconvolution':
# Weight shape for DeConv : (C x M x kH x kW) : M is number of
# feature maps/filters and C is number of channels
attrs['num_filter'] = wshape[1]
return attrs
def _fix_gemm(op_name, inputs, old_attr, proto_obj):
"""Using FullyConnected operator in place of linalg_gemm to perform same operation"""
op_sym = getattr(symbol, op_name, None)
alpha = float(old_attr.get('alpha', 1.0))
beta = float(old_attr.get('beta', 1.0))
trans_a = int(old_attr.get('transA', 0))
trans_b = int(old_attr.get('transB', 0))
if trans_a:
inputs[0] = symbol.transpose(inputs[0], axes=(1, 0))
if not trans_b:
inputs[1] = symbol.transpose(inputs[1], axes=(1, 0))
new_inputs = [alpha*inputs[0], inputs[1], beta*inputs[2]]
new_attr = {'num_hidden' : proto_obj._params[inputs[2].name].shape[0]}
return op_sym, new_attr, new_inputs
def get_input_shape(sym, proto_obj):
"""Helper function to obtain the shape of an array"""
arg_params = proto_obj.arg_dict
aux_params = proto_obj.aux_dict
model_input_shape = [data[1] for data in proto_obj.model_metadata.get('input_tensor_data')]
data_names = [data[0] for data in proto_obj.model_metadata.get('input_tensor_data')]
#creating dummy inputs
inputs = []
for in_shape in model_input_shape:
inputs.append(nd.ones(shape=in_shape))
data_shapes = []
for idx, input_name in enumerate(data_names):
data_shapes.append((input_name, inputs[idx].shape))
ctx = context.cpu()
# create a module
mod = module.Module(symbol=sym, data_names=data_names, context=ctx, label_names=None)
mod.bind(for_training=False, data_shapes=data_shapes, label_shapes=None)
mod.set_params(arg_params=arg_params, aux_params=aux_params)
data_forward = []
for idx, input_name in enumerate(data_names):
val = inputs[idx]
data_forward.append(val)
mod.forward(io.DataBatch(data_forward))
result = mod.get_outputs()[0].asnumpy()
return result.shape
| ptrendx/mxnet | python/mxnet/contrib/onnx/onnx2mx/_translation_utils.py | Python | apache-2.0 | 9,404 |
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^webcam/', include('webCam.urls')),
]
| EddyTheB/Warbler | Warbler/urls.py | Python | apache-2.0 | 174 |
# coding: utf-8
#
# Copyright 2010-2014 Ning, Inc.
# Copyright 2014-2020 Groupon, Inc
# Copyright 2020-2021 Equinix, Inc
# Copyright 2014-2021 The Billing Project, LLC
#
# The Billing Project, LLC licenses this file to you under the Apache License, version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Kill Bill
Kill Bill is an open-source billing and payments platform # noqa: E501
OpenAPI spec version: 0.22.22-SNAPSHOT
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from killbill.api_client import ApiClient
class SecurityApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def add_role_definition(self, body=None, created_by=None, **kwargs): # noqa: E501
"""Add a new role definition) # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.add_role_definition(body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param RoleDefinition body: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: RoleDefinition
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.add_role_definition_with_http_info(body, created_by, **kwargs) # noqa: E501
else:
(data) = self.add_role_definition_with_http_info(body, created_by, **kwargs) # noqa: E501
return data
def add_role_definition_with_http_info(self, body=None, created_by=None, **kwargs): # noqa: E501
"""Add a new role definition) # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.add_role_definition_with_http_info(body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param RoleDefinition body: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: RoleDefinition
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'created_by', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_role_definition" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `add_role_definition`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `add_role_definition`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/security/roles', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RoleDefinition', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def add_user_roles(self, body=None, created_by=None, **kwargs): # noqa: E501
"""Add a new user with roles (to make api requests) # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.add_user_roles(body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param UserRoles body: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: UserRoles
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.add_user_roles_with_http_info(body, created_by, **kwargs) # noqa: E501
else:
(data) = self.add_user_roles_with_http_info(body, created_by, **kwargs) # noqa: E501
return data
def add_user_roles_with_http_info(self, body=None, created_by=None, **kwargs): # noqa: E501
"""Add a new user with roles (to make api requests) # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.add_user_roles_with_http_info(body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param UserRoles body: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: UserRoles
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'created_by', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_user_roles" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `add_user_roles`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `add_user_roles`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/security/users', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UserRoles', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_current_user_permissions(self, **kwargs): # noqa: E501
"""List user permissions # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_current_user_permissions(async=True)
>>> result = thread.get()
:param async bool
:return: List[Str]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_current_user_permissions_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_current_user_permissions_with_http_info(**kwargs) # noqa: E501
return data
def get_current_user_permissions_with_http_info(self, **kwargs): # noqa: E501
"""List user permissions # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_current_user_permissions_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:return: List[Str]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_current_user_permissions" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/security/permissions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='List[Str]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_current_user_subject(self, **kwargs): # noqa: E501
"""Get user information # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_current_user_subject(async=True)
>>> result = thread.get()
:param async bool
:return: Subject
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_current_user_subject_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_current_user_subject_with_http_info(**kwargs) # noqa: E501
return data
def get_current_user_subject_with_http_info(self, **kwargs): # noqa: E501
"""Get user information # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_current_user_subject_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:return: Subject
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_current_user_subject" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/security/subject', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Subject', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_role_definition(self, role=None, **kwargs): # noqa: E501
"""Get role definition # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_role_definition(role, async=True)
>>> result = thread.get()
:param async bool
:param Str role: (required)
:return: RoleDefinition
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_role_definition_with_http_info(role, **kwargs) # noqa: E501
else:
(data) = self.get_role_definition_with_http_info(role, **kwargs) # noqa: E501
return data
def get_role_definition_with_http_info(self, role=None, **kwargs): # noqa: E501
"""Get role definition # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_role_definition_with_http_info(role, async=True)
>>> result = thread.get()
:param async bool
:param Str role: (required)
:return: RoleDefinition
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['role'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_role_definition" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'role' is set
if ('role' not in params or
params['role'] is None):
raise ValueError("Missing the required parameter `role` when calling `get_role_definition`") # noqa: E501
if 'role' in params and not re.search('.*', params['role']): # noqa: E501
raise ValueError("Invalid value for parameter `role` when calling `get_role_definition`, must conform to the pattern `/.*/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'role' in params:
path_params['role'] = params['role'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/security/roles/{role}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RoleDefinition', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_user_roles(self, username=None, **kwargs): # noqa: E501
"""Get roles associated to a user # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_user_roles(username, async=True)
>>> result = thread.get()
:param async bool
:param Str username: (required)
:return: UserRoles
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_user_roles_with_http_info(username, **kwargs) # noqa: E501
else:
(data) = self.get_user_roles_with_http_info(username, **kwargs) # noqa: E501
return data
def get_user_roles_with_http_info(self, username=None, **kwargs): # noqa: E501
"""Get roles associated to a user # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_user_roles_with_http_info(username, async=True)
>>> result = thread.get()
:param async bool
:param Str username: (required)
:return: UserRoles
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['username'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_user_roles" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'username' is set
if ('username' not in params or
params['username'] is None):
raise ValueError("Missing the required parameter `username` when calling `get_user_roles`") # noqa: E501
if 'username' in params and not re.search('.*', params['username']): # noqa: E501
raise ValueError("Invalid value for parameter `username` when calling `get_user_roles`, must conform to the pattern `/.*/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'username' in params:
path_params['username'] = params['username'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/security/users/{username}/roles', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UserRoles', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def invalidate_user(self, username=None, created_by=None, **kwargs): # noqa: E501
"""Invalidate an existing user # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.invalidate_user(username, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str username: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.invalidate_user_with_http_info(username, created_by, **kwargs) # noqa: E501
else:
(data) = self.invalidate_user_with_http_info(username, created_by, **kwargs) # noqa: E501
return data
def invalidate_user_with_http_info(self, username=None, created_by=None, **kwargs): # noqa: E501
"""Invalidate an existing user # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.invalidate_user_with_http_info(username, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str username: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['username', 'created_by', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method invalidate_user" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'username' is set
if ('username' not in params or
params['username'] is None):
raise ValueError("Missing the required parameter `username` when calling `invalidate_user`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `invalidate_user`") # noqa: E501
if 'username' in params and not re.search('.*', params['username']): # noqa: E501
raise ValueError("Invalid value for parameter `username` when calling `invalidate_user`, must conform to the pattern `/.*/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'username' in params:
path_params['username'] = params['username'] # noqa: E501
query_params = []
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/security/users/{username}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_role_definition(self, body=None, created_by=None, **kwargs): # noqa: E501
"""Update a new role definition) # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_role_definition(body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param RoleDefinition body: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.update_role_definition_with_http_info(body, created_by, **kwargs) # noqa: E501
else:
(data) = self.update_role_definition_with_http_info(body, created_by, **kwargs) # noqa: E501
return data
def update_role_definition_with_http_info(self, body=None, created_by=None, **kwargs): # noqa: E501
"""Update a new role definition) # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_role_definition_with_http_info(body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param RoleDefinition body: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'created_by', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_role_definition" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `update_role_definition`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `update_role_definition`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/security/roles', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_user_password(self, username=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Update a user password # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_user_password(username, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str username: (required)
:param UserRoles body: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.update_user_password_with_http_info(username, body, created_by, **kwargs) # noqa: E501
else:
(data) = self.update_user_password_with_http_info(username, body, created_by, **kwargs) # noqa: E501
return data
def update_user_password_with_http_info(self, username=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Update a user password # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_user_password_with_http_info(username, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str username: (required)
:param UserRoles body: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['username', 'body', 'created_by', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_user_password" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'username' is set
if ('username' not in params or
params['username'] is None):
raise ValueError("Missing the required parameter `username` when calling `update_user_password`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `update_user_password`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `update_user_password`") # noqa: E501
if 'username' in params and not re.search('.*', params['username']): # noqa: E501
raise ValueError("Invalid value for parameter `username` when calling `update_user_password`, must conform to the pattern `/.*/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'username' in params:
path_params['username'] = params['username'] # noqa: E501
query_params = []
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/security/users/{username}/password', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_user_roles(self, username=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Update roles associated to a user # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_user_roles(username, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str username: (required)
:param UserRoles body: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.update_user_roles_with_http_info(username, body, created_by, **kwargs) # noqa: E501
else:
(data) = self.update_user_roles_with_http_info(username, body, created_by, **kwargs) # noqa: E501
return data
def update_user_roles_with_http_info(self, username=None, body=None, created_by=None, **kwargs): # noqa: E501
"""Update roles associated to a user # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_user_roles_with_http_info(username, body, created_by, async=True)
>>> result = thread.get()
:param async bool
:param Str username: (required)
:param UserRoles body: (required)
:param Str created_by: (required)
:param Str reason:
:param Str comment:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['username', 'body', 'created_by', 'reason', 'comment'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_user_roles" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'username' is set
if ('username' not in params or
params['username'] is None):
raise ValueError("Missing the required parameter `username` when calling `update_user_roles`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `update_user_roles`") # noqa: E501
# verify the required parameter 'created_by' is set
if ('created_by' not in params or
params['created_by'] is None):
raise ValueError("Missing the required parameter `created_by` when calling `update_user_roles`") # noqa: E501
if 'username' in params and not re.search('.*', params['username']): # noqa: E501
raise ValueError("Invalid value for parameter `username` when calling `update_user_roles`, must conform to the pattern `/.*/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'username' in params:
path_params['username'] = params['username'] # noqa: E501
query_params = []
header_params = {}
if 'created_by' in params:
header_params['X-Killbill-CreatedBy'] = params['created_by'] # noqa: E501
if 'reason' in params:
header_params['X-Killbill-Reason'] = params['reason'] # noqa: E501
if 'comment' in params:
header_params['X-Killbill-Comment'] = params['comment'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/1.0/kb/security/users/{username}/roles', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| killbill/killbill-client-python | killbill/api/security_api.py | Python | apache-2.0 | 44,678 |
from django import forms
class PartnerLogoForm(forms.Form):
partner_logo = forms.ImageField(
label='Select a file',
) | Upande/MaMaSe | apps/partners/forms.py | Python | apache-2.0 | 134 |
# Copyright 2012, Julius Seporaitis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import os
import os.path
import urllib2
import time
sys.path.append('.')
import s3iam
from urlparse import urlparse
class S3GrabberTest(unittest.TestCase):
def test_example_sign(self):
"""Test with example data"""
req = urllib2.Request("https://johnsmith.s3.amazonaws.com/photos/puppy.jpg")
grabber = s3iam.S3Grabber("http://johnsmith.s3.amazonaws.com/", iamrole="s3access")
grabber.access_key = "AKIAIOSFODNN7EXAMPLE"
grabber.secret_key = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
grabber.token = None
request = grabber._request("photos/puppy.jpg")
signature = grabber.sign(request, timeval=time.mktime(time.struct_time(
tm_year=2007,
tm_mon=3,
tm_mday=27,
tm_hour=19,
tm_min=36,
tm_sec=42)))
self.assertEqual(signature, "bWq2s1WEIj+Ydj0vQ697zp+IXMU=")
if __name__ == '__main__':
unittest.main()
| henrysher/yum-s3-tools | test/tests.py | Python | apache-2.0 | 1,566 |
# -*- coding:utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright (c) 2012 Samsung SDS Co., LTD
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from synaps.monitor.api import API
from synaps.utils import (validate_email, validate_international_phonenumber,
validate_instance_action,
validate_groupnotification_action)
import json
class Datapoint(object):
"""
The Datapoint data type encapsulates the statistical data that Amazon
CloudWatch computes from metric data.
Average
The average of metric values that correspond to the datapoint.
Type: Double
Maximum
The maximum of the metric value used for the datapoint.
Type: Double
Minimum
The minimum metric value used for the datapoint.
Type: Double
SampleCount
The number of metric values that contributed to the aggregate value of
this datapoint.
Type: Double
Sum
The sum of metric values used for the datapoint.
Type: Double
Timestamp
The time stamp used for the datapoint.
Type: DateTime
Unit
The standard unit used for the datapoint.
Type: String
Valid Values: Seconds | Microseconds | Milliseconds | Bytes |
Kilobytes | Megabytes | Gigabytes | Terabytes | Bits |
Kilobits | Megabits | Gigabits | Terabits | Percent |
Count | Bytes/Second | Kilobytes/Second |
Megabytes/Second | Gigabytes/Second | Terabytes/Second |
Bits/Second | Kilobits/Second | Megabits/Second |
Gigabits/Second | Terabits/Second | Count/Second | None
"""
class Dimension(object):
"""
The Dimension data type further expands on the identity of a metric using
a Name, Value pair.
For examples that use one or more dimensions, see PutMetricData.
Name
The name of the dimension.
Type: String
Length constraints: Minimum length of 1. Maximum length of 255.
Value
The value representing the dimension measurement
Type: String
Length constraints: Minimum length of 1. Maximum length of 255.
"""
class DimensionFilter(object):
"""
The DimensionFilter data type is used to filter ListMetrics results.
Name
The dimension name to be matched.
Type: String
Length constraints: Minimum length of 1. Maximum length of 255.
Value
The value of the dimension to be matched.
Note: Specifying a Name without specifying a Value returns all values
associated with that Name.
Type: String
Length constraints: Minimum length of 1. Maximum length of 255.
"""
class GetMetricStatisticsResult(object):
"""
The output for the GetMetricStatistics action.
Datapoints
The datapoints for the specified metric.
Type: Datapoint list
Label
A label describing the specified metric.
Type: String
"""
class ListMetricsResult(object):
"""
The output for the ListMetrics action.
Metrics
A list of metrics used to generate statistics for an AWS account.
Type: Metric list
NextToken
A string that marks the start of the next batch of returned results.
Type: String
"""
class Metric(object):
"""
The Metric data type contains information about a specific metric. If you
call ListMetrics, Amazon CloudWatch returns information contained by this
data type.
The example in the Examples section publishes two metrics named buffers
and latency. Both metrics are in the examples namespace. Both metrics have
two dimensions, InstanceID and InstanceType.
Dimensions
A list of dimensions associated with the metric.
Type: Dimension list
Length constraints: Minimum of 0 item(s) in the list. Maximum of 10
item(s) in the list.
MetricName
The name of the metric.
Type: String
Length constraints: Minimum length of 1. Maximum length of 255.
Namespace
The namespace of the metric.
Type: String
Length constraints: Minimum length of 1. Maximum length of 255.
"""
def __init__(self, project_id=None, namespace=None, name=None,
dimensions=None):
self.project_id = project_id
self.name = name
self.dimensions = dimensions
class MetricAlarm(object):
OP_MAP = {'>=':'GreaterThanOrEqualToThreshold',
'>':'GreaterThanThreshold',
'<':'LessThanThreshold',
'<=':'LessThanOrEqualToThreshold'}
STATISTICS = ('SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum')
OP_VALUES = OP_MAP.values()
def __init__(self, alarm_name, comparison_operator, evaluation_periods,
metric_name, namespace, period, statistic, threshold,
actions_enabled=False, alarm_actions=[], alarm_description="",
dimensions={}, insufficient_data_actions=[], ok_actions=[],
unit=""):
def validate_actions(actions):
assert (isinstance(actions, list))
for a in actions:
assert (validate_email(a) or
validate_international_phonenumber(a) or
validate_instance_action(a) or
validate_groupnotification_action(a))
assert (isinstance(actions_enabled, bool))
self.actions_enabled = actions_enabled
validate_actions(alarm_actions)
self.alarm_actions = alarm_actions
validate_actions(insufficient_data_actions)
self.insufficient_data_actions = insufficient_data_actions
validate_actions(ok_actions)
self.ok_actions = ok_actions
assert (len(alarm_description) <= 255)
self.alarm_description = alarm_description
assert (len(alarm_name) <= 255)
self.alarm_name = alarm_name
assert (comparison_operator in self.OP_MAP.values())
self.comparison_operator = comparison_operator
assert (isinstance(dimensions, dict))
self.dimensions = dimensions
assert (isinstance(evaluation_periods, int))
self.evaluation_periods = evaluation_periods
assert (len(metric_name) <= 255)
self.metric_name = metric_name
assert (len(namespace) <= 255)
self.namespace = namespace
assert (isinstance(period, int))
self.period = period
assert (statistic in self.STATISTICS)
self.statistic = statistic
self.threshold = threshold
self.unit = unit
self.alarm_arn = None
self.alarm_configuration_updated_timestamp = None
self.state_reason = None
self.state_reason_data = None
self.state_updated_timestamp = None
self.state_value = None
def to_columns(self):
return {
'actions_enabled': self.actions_enabled,
'alarm_actions': json.dumps(self.alarm_actions),
'alarm_arn': self.alarm_arn,
'alarm_configuration_updated_timestamp':
self.alarm_configuration_updated_timestamp,
'alarm_description': self.alarm_description,
'alarm_name': self.alarm_name,
'comparison_operator': self.comparison_operator,
'dimensions':json.dumps(self.dimensions),
'evaluation_periods':self.evaluation_periods,
'insufficient_data_actions': \
json.dumps(self.insufficient_data_actions),
'metric_name':self.metric_name,
'namespace':self.namespace,
'ok_actions':json.dumps(self.ok_actions),
'period':self.period,
'statistic':self.statistic,
'threshold':self.threshold,
'unit':self.unit
}
def __repr__(self):
return "MetricAlarm:%s[%s(%s) %s %s]" % (self.alarm_name,
self.metric_name,
self.statistic,
self.comparison_operator,
self.threshold)
class MetricDatum(object):
"""
The MetricDatum data type encapsulates the information sent with
PutMetricData to either create a new metric or add new values to be
aggregated into an existing metric.
Dimensions
A list of dimensions associated with the metric.
Type: Dimension list
Length constraints: Minimum of 0 item(s) in the list. Maximum of 10
item(s) in the list.
MetricName
The name of the metric.
Type: String
Length constraints: Minimum length of 1. Maximum length of 255.
StatisticValues
A set of statistical values describing the metric.
Type: StatisticSet
Timestamp
The time stamp used for the metric. If not specified, the default
value is set to the time the metric data was received.
Type: DateTime
Unit
The unit of the metric.
Type: String
Valid Values: Seconds | Microseconds | Milliseconds | Bytes |
Kilobytes | Megabytes | Gigabytes | Terabytes | Bits |
Kilobits | Megabits | Gigabits | Terabits | Percent |
Count | Bytes/Second | Kilobytes/Second |
Megabytes/Second | Gigabytes/Second | Terabytes/Second |
Bits/Second | Kilobits/Second | Megabits/Second |
Gigabits/Second | Terabits/Second | Count/Second | None
Value
The value for the metric.
Important: Although the Value parameter accepts numbers of type
Double, Amazon CloudWatch truncates values with very large exponents.
Values with base-10 exponents greater than 126 (1 x 10^126) are
truncated. Likewise, values with base-10 exponents less than -130
(1 x 10^-130) are also truncated.
Type: Double
"""
class StatisticSet(object):
"""
The StatisticSet data type describes the StatisticValues component of
MetricDatum, and represents a set of statistics that describes a specific
metric.
Maximum
The maximum value of the sample set.
Type: Double
Minimum
The minimum value of the sample set.
Type: Double
SampleCount
The number of samples used for the statistic set.
Type: Double
Sum
The sum of values for the sample set.
Type: Double
"""
| spcs/synaps | synaps/monitor/__init__.py | Python | apache-2.0 | 11,730 |
# Copyright 2013-2021 Aerospike, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import re
from lib.utils import util
def parse_record(parent_field, record):
field_names = []
field_values = []
for name in record:
if isinstance(record[name], dict):
new_parent_field = parent_field.copy()
new_parent_field.append(name)
names = " ".join(new_parent_field)
if "converted" in record[name]:
field_names.append(names)
field_values.append(record[name]["converted"])
elif "raw" in record[name]:
field_names.append(names)
field_values.append(record[name]["raw"])
else:
# Must have subgroups:
sub_names, sub_values = parse_record(new_parent_field, record[name])
field_names.extend(sub_names)
field_values.extend(sub_values)
else:
raise Exception("Unhandled parsing")
return field_names, field_values
def parse_output(actual_out={}, horizontal=False, header_len=2, merge_header=True):
"""
commmon parser for all show commands will return tuple of following
@param heading : first line of output
@param header: Second line of output
@param params: list of parameters
"""
title = actual_out["title"]
description = actual_out.get("description", "")
data_names = {}
data_values = []
num_records = 0
for group in actual_out["groups"]:
for record in group["records"]:
temp_names, temp_values = parse_record([], record)
# We assume every record has the same set of names
if len(data_names) == 0:
data_names = temp_names
data_values.append(temp_values)
num_records += 1
return title, description, data_names, data_values, num_records
def get_separate_output(in_str=""):
_regex = re.compile(r"((?<=^{).*?(?=^}))", re.MULTILINE | re.DOTALL)
out = re.findall(_regex, in_str)
ls = []
for item in out:
item = remove_escape_sequence(item)
item = "{" + item + "}"
ls.append(json.loads(item))
return ls
def capture_separate_and_parse_output(rc, commands):
actual_stdout = util.capture_stdout(rc.execute, commands)
separated_stdout = get_separate_output(actual_stdout)
result = parse_output(separated_stdout[0])
return result
def get_merged_header(*lines):
h = [[_f for _f in _h.split(" ") if _f] for _h in lines]
header = []
if len(h) == 0 or any(len(h[i]) != len(h[i + 1]) for i in range(len(h) - 1)):
return header
for idx in range(len(h[0])):
header_i = h[0][idx]
for jdx in range(len(h) - 1):
if h[jdx + 1][idx] == ".":
break
header_i += " " + h[jdx + 1][idx]
header.append(header_i)
return header
def check_for_subset(actual_list, expected_sub_list):
if not expected_sub_list:
return True
if not actual_list:
return False
for i in expected_sub_list:
if isinstance(i, tuple):
found = False
for s_i in i:
if s_i is None:
found = True
break
if s_i in actual_list:
found = True
break
if not found:
print(i, actual_list)
return False
else:
if i not in actual_list:
print(i)
return False
return True
# Checks that a single expected list has a subset equal to actual_list.
def check_for_subset_in_list_of_lists(actual_list, list_of_expected_sub_lists):
for expected_list in list_of_expected_sub_lists:
if check_for_subset(actual_list, expected_list):
return True
return False
def remove_escape_sequence(line):
ansi_escape = re.compile(r"(\x9b|\x1b\[)[0-?]*[ -\/]*[@-~]")
return ansi_escape.sub("", line)
def check_for_types(actual_lists, expected_types):
def is_float(x):
try:
float(x)
if "." in x:
return True
return False
except ValueError:
return False
def is_int(x):
try:
int(x)
if "." in x:
return False
return True
except ValueError:
return False
def is_bool(x):
if x in ("True", "true", "False", "false"):
return True
return False
def check_list_against_types(a_list):
if a_list is None or expected_types is None:
return False
if len(a_list) == len(expected_types):
for idx in range(len(a_list)):
typ = expected_types[idx]
val = a_list[idx]
if typ == int:
if not is_int(val):
return False
elif typ == float:
if not is_float(val):
return False
elif typ == bool:
if not is_bool(val):
return False
elif typ == str:
if any([is_bool(val), is_int(val), is_float(val)]):
return False
else:
raise Exception("Type is not yet handles in test_util.py", typ)
return True
return False
for actual_list in actual_lists:
if not check_list_against_types(actual_list):
return False
return True
| aerospike/aerospike-admin | test/e2e/util.py | Python | apache-2.0 | 6,156 |
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example updates budget delivery method for a given campaign. To get
campaigns, run get_campaigns.py.
Tags: CampaignService.mutate
"""
__author__ = '[email protected] (Kevin Winter)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import AdWordsClient
campaign_id = 'INSERT_CAMPAIGN_ID_HERE'
def main(client, campaign_id):
# Initialize appropriate service.
campaign_service = client.GetCampaignService(
'https://adwords-sandbox.google.com', 'v201109')
# Construct operations and update campaign.
operations = [{
'operator': 'SET',
'operand': {
'id': campaign_id,
'budget': {
'deliveryMethod': 'ACCELERATED'
}
}
}]
campaigns = campaign_service.Mutate(operations)[0]
# Display results.
for campaign in campaigns['value']:
print ('Campaign with name \'%s\' and id \'%s\' was updated.'
% (campaign['name'], campaign['id']))
print
print ('Usage: %s units, %s operations' % (client.GetUnits(),
client.GetOperations()))
if __name__ == '__main__':
# Initialize client object.
client = AdWordsClient(path=os.path.join('..', '..', '..', '..', '..'))
main(client, campaign_id)
| nearlyfreeapps/python-googleadwords | examples/adspygoogle/adwords/v201109/basic_operations/update_campaign.py | Python | apache-2.0 | 1,970 |
# Copyright (c) 2016 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import socket
import netaddr
from netaddr import AddrFormatError, IPAddress
from pycalico.datastore_datatypes import IPPool
from pycalico.ipam import IPAMClient
from pycalico.util import get_host_ips, validate_asn
DEFAULT_IPV4_POOL = IPPool("192.168.0.0/16")
DEFAULT_IPV6_POOL = IPPool("fd80:24e2:f998:72d6::/64")
def _find_pool(ip_addr, ipv4_pools):
"""
Find the pool containing the given IP.
:param ip_addr: IP address to find.
:param ipv4_pools: iterable containing IPPools.
:return: The pool, or None if not found
"""
for pool in ipv4_pools:
if ip_addr in pool.cidr:
return pool
else:
return None
def _ensure_host_tunnel_addr(ipv4_pools, ipip_pools):
"""
Ensure the host has a valid IP address for its IPIP tunnel device.
This must be an IP address claimed from one of the IPIP pools.
Handles re-allocating the address if it finds an existing address
that is not from an IPIP pool.
:param ipv4_pools: List of all IPv4 pools.
:param ipip_pools: List of IPIP-enabled pools.
"""
ip_addr = _get_host_tunnel_ip()
if ip_addr:
# Host already has a tunnel IP assigned, verify that it's still valid.
pool = _find_pool(ip_addr, ipv4_pools)
if pool and not pool.ipip:
# No longer an IPIP pool. Release the IP, it's no good to us.
client.release_ips({ip_addr})
ip_addr = None
elif not pool:
# Not in any IPIP pool. IP must be stale. Since it's not in any
# pool, we can't release it.
ip_addr = None
if not ip_addr:
# Either there was no IP or the IP needs to be replaced. Try to
# get an IP from one of the IPIP-enabled pools.
_assign_host_tunnel_addr(ipip_pools)
def _assign_host_tunnel_addr(ipip_pools):
"""
Claims an IPIP-enabled IP address from the first pool with some
space.
Stores the result in the host's config as its tunnel address.
Exits on failure.
:param ipip_pools: List of IPPools to search for an address.
"""
for ipip_pool in ipip_pools:
v4_addrs, _ = client.auto_assign_ips(
num_v4=1, num_v6=0,
handle_id=None,
attributes={},
pool=(ipip_pool, None),
host=hostname
)
if v4_addrs:
# Successfully allocated an address. Unpack the list.
[ip_addr] = v4_addrs
break
else:
# Failed to allocate an address, the pools must be full.
print "Failed to allocate an IP address from an IPIP-enabled pool " \
"for the host's IPIP tunnel device. Pools are likely " \
"exhausted."
sys.exit(1)
# If we get here, we've allocated a new IPIP-enabled address,
# Store it in etcd so that Felix will pick it up.
client.set_per_host_config(hostname, "IpInIpTunnelAddr",
str(ip_addr))
def _remove_host_tunnel_addr():
"""
Remove any existing IP address for this host's IPIP tunnel device.
Idempotent; does nothing if there is no IP assigned. Releases the
IP from IPAM.
"""
ip_addr = _get_host_tunnel_ip()
if ip_addr:
client.release_ips({ip_addr})
client.remove_per_host_config(hostname, "IpInIpTunnelAddr")
def _get_host_tunnel_ip():
"""
:return: The IPAddress of the host's IPIP tunnel or None if not
present/invalid.
"""
raw_addr = client.get_per_host_config(hostname, "IpInIpTunnelAddr")
try:
ip_addr = IPAddress(raw_addr)
except (AddrFormatError, ValueError, TypeError):
# Either there's no address or the data is bad. Treat as missing.
ip_addr = None
return ip_addr
def error_if_bgp_ip_conflict(ip, ip6):
"""
Prints an error message and exits if either of the IPv4 or IPv6 addresses
is already in use by another calico BGP host.
:param ip: User-provided IPv4 address to start this node with.
:param ip6: User-provided IPv6 address to start this node with.
:return: Nothing
"""
ip_list = []
if ip:
ip_list.append(ip)
if ip6:
ip_list.append(ip6)
try:
# Get hostname of host that already uses the given IP, if it exists
ip_conflicts = client.get_hostnames_from_ips(ip_list)
except KeyError:
# No hosts have been configured in etcd, so there cannot be a conflict
return
if ip_conflicts.keys():
ip_error = "ERROR: IP address %s is already in use by host %s. " \
"Calico requires each compute host to have a unique IP. " \
"If this is your first time running the Calico node on " \
"this host, ensure that another host is not already using " \
"the same IP address."
try:
if ip_conflicts[ip] != hostname:
ip_error = ip_error % (ip, str(ip_conflicts[ip]))
print ip_error
sys.exit(1)
except KeyError:
# IP address was not found in ip-host dictionary
pass
try:
if ip6 and ip_conflicts[ip6] != hostname:
ip_error = ip_error % (ip6, str(ip_conflicts[ip6]))
print ip_error
sys.exit(1)
except KeyError:
# IP address was not found in ip-host dictionary
pass
def warn_if_unknown_ip(ip, ip6):
"""
Prints a warning message if the IP addresses are not assigned to interfaces
on the current host.
:param ip: IPv4 address which should be present on the host.
:param ip6: IPv6 address which should be present on the host.
:return: None
"""
if ip and IPAddress(ip) not in get_host_ips(version=4, exclude=["docker0"]):
print "WARNING: Could not confirm that the provided IPv4 address is" \
" assigned to this host."
if ip6 and IPAddress(ip6) not in get_host_ips(version=6,
exclude=["docker0"]):
print "WARNING: Could not confirm that the provided IPv6 address is" \
" assigned to this host."
def warn_if_hostname_conflict(ip):
"""
Prints a warning message if it seems like an existing host is already running
calico using this hostname.
:param ip: User-provided or detected IP address to start this node with.
:return: Nothing
"""
try:
current_ipv4, _ = client.get_host_bgp_ips(hostname)
except KeyError:
# No other machine has registered configuration under this hostname.
# This must be a new host with a unique hostname, which is the
# expected behavior.
pass
else:
if current_ipv4 != "" and current_ipv4 != ip:
hostname_warning = "WARNING: Hostname '%s' is already in use " \
"with IP address %s. Calico requires each " \
"compute host to have a unique hostname. " \
"If this is your first time running " \
"the Calico node on this host, ensure " \
"that another host is not already using the " \
"same hostname." % (hostname, current_ipv4)
print hostname_warning
def main():
ip = os.getenv("IP")
ip = ip or None
if ip and not netaddr.valid_ipv4(ip):
print "IP environment (%s) is not a valid IPv4 address." % ip
sys.exit(1)
ip6 = os.getenv("IP6")
ip6 = ip6 or None
if ip6 and not netaddr.valid_ipv6(ip6):
print "IP6 environment (%s) is not a valid IPv6 address." % ip6
sys.exit(1)
as_num = os.getenv("AS")
as_num = as_num or None
if as_num and not validate_asn(as_num):
print "AS environment (%s) is not a AS number." % as_num
sys.exit(1)
# Get IP address of host, if none was specified
if not ip:
ips = get_host_ips(exclude=["^docker.*", "^cbr.*",
"virbr.*", "lxcbr.*", "veth.*",
"cali.*", "tunl.*", "flannel.*"])
try:
ip = str(ips.pop())
except IndexError:
print "Couldn't autodetect a management IP address. Please " \
"provide an IP address by rerunning the container with the" \
" IP environment variable set."
sys.exit(1)
else:
print "No IP provided. Using detected IP: %s" % ip
# Write a startup environment file containing the IP address that may have
# just been detected.
# This is required because the confd templates expect to be able to fill in
# some templates by fetching them from the environment.
with open('startup.env', 'w') as f:
f.write("IP=%s\n" % ip)
f.write("HOSTNAME=%s\n" % hostname)
warn_if_hostname_conflict(ip)
# Verify that IPs are not already in use by another host.
error_if_bgp_ip_conflict(ip, ip6)
# Verify that the chosen IP exists on the current host
warn_if_unknown_ip(ip, ip6)
if os.getenv("NO_DEFAULT_POOLS", "").lower() != "true":
# Set up etcd
ipv4_pools = client.get_ip_pools(4)
ipv6_pools = client.get_ip_pools(6)
# Create default pools if required
if not ipv4_pools:
client.add_ip_pool(4, DEFAULT_IPV4_POOL)
# If the OS has not been built with IPv6 then the /proc config for IPv6
# will not be present.
if not ipv6_pools and os.path.exists('/proc/sys/net/ipv6'):
client.add_ip_pool(6, DEFAULT_IPV6_POOL)
client.ensure_global_config()
client.create_host(hostname, ip, ip6, as_num)
# If IPIP is enabled, the host requires an IP address for its tunnel
# device, which is in an IPIP pool. Without this, a host can't originate
# traffic to a pool address because the response traffic would not be
# routed via the tunnel (likely being dropped by RPF checks in the fabric).
ipv4_pools = client.get_ip_pools(4)
ipip_pools = [p for p in ipv4_pools if p.ipip]
if ipip_pools:
# IPIP is enabled, make sure the host has an address for its tunnel.
_ensure_host_tunnel_addr(ipv4_pools, ipip_pools)
else:
# No IPIP pools, clean up any old address.
_remove_host_tunnel_addr()
# Try the HOSTNAME environment variable, but default to
# the socket.gethostname() value if unset.
hostname = os.getenv("HOSTNAME")
if not hostname:
hostname = socket.gethostname()
client = IPAMClient()
if __name__ == "__main__":
main()
| quater/calico-containers | calico_node/filesystem/startup.py | Python | apache-2.0 | 11,293 |
from ex import *
from ex.alg.common import svdex
def RPCA(D, lam = None, tol = 1e-7, maxIter = 500):
'''Yi Ma's robust pca
return (L, SingularValues(L))
'''
m, n = D.shape
maxmn, minmn = (max(m, n), min(m, n))
lam = float(lam) if lam is not None else 1.0
log.info('RPCA for %dx%d matrix. lambda = %f.' % (m, n, lam))
lam = lam/sqrt(maxmn)
Y = D.copy()
norm_two = svdex(Y, 1); norm_two = norm_two[1][0]
norm_inf = norm(Y.ravel(), inf) / lam
dual_norm = max(norm_two, norm_inf)
Y = Y / dual_norm
A_hat = zeros((m, n))
E_hat = zeros((m, n))
mu = 1.25/norm_two
mu_bar = mu*1e7
rho = 1.5
d_norm = norm(D, 'fro')
sv = 10
for it in range(maxIter):
temp_T = D - A_hat + (1/mu)*Y;
E_hat[:] = 0;
filt = temp_T > lam/mu;
E_hat[filt] = temp_T[filt] - lam/mu
filt = temp_T < -lam/mu;
E_hat[filt] = temp_T[filt] + lam/mu
U, diagS, Vh = svdex(D - E_hat + (1/mu)*Y, sv)
svp = sum(diagS > 1/mu);
if svp < sv:
sv = min(svp + 1, minmn)
else:
sv = min(svp + round(0.05*minmn), minmn)
A_hat = mul(U[:,:svp]*(diagS[:svp] - 1/mu), Vh[:svp])
Z = D - A_hat - E_hat
Y = Y + mu*Z
mu = min(mu*rho, mu_bar)
# stop criterion
stop = norm(Z, 'fro') / d_norm
converged = stop < tol
log.info('Iter=%d, rank=%d, |E|_0=%d, Stop=%g' % (
it, svp, sum(fabs(E_hat.ravel()) > 1e-10), stop))
if converged: break
return (A_hat, diagS[:svp])
if __name__ == '__main__':
InitLog()
rk = 10
m = 500
num_ol = int(round(m*m*0.01))
BG = mul(randn(rk, m).T, randn(rk, m))/rk
OL = zeros((m, m))
for ind in range(num_ol):
ij = random.randint(0, m, 2)
OL[ij[0], ij[1]] = 5 + rand(1)*10
A = BG + OL
A_hat = RPCA(A)[0]
A_svd = svdex(A, rk)
A_svd = mul(A_svd[0]*A_svd[1], A_svd[2])
log.info('RPCA RMSE = %f, SVD RMSE = %f' % (
rmse(BG-A_hat), rmse(BG-A_svd)))
test(rmse(BG-A_hat) < 1e-5, 'RPCA recovering L')
| excelly/xpy-ml | ex/ml/rpca.py | Python | apache-2.0 | 2,134 |
Subsets and Splits
Unique Repositories with URLs
Lists unique repository names along with their GitHub URLs, providing basic identification information for each repository.