hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a24e5af148e6e7349744b7b9f2538aef9d3388a | 326 | py | Python | JellyBot/components/navbar/header.py | RaenonX/Jelly-Bot-API | c7da1e91783dce3a2b71b955b3a22b68db9056cf | [
"MIT"
] | 5 | 2020-08-26T20:12:00.000Z | 2020-12-11T16:39:22.000Z | JellyBot/components/navbar/header.py | RaenonX/Jelly-Bot | c7da1e91783dce3a2b71b955b3a22b68db9056cf | [
"MIT"
] | 234 | 2019-12-14T03:45:19.000Z | 2020-08-26T18:55:19.000Z | JellyBot/components/navbar/header.py | RaenonX/Jelly-Bot-API | c7da1e91783dce3a2b71b955b3a22b68db9056cf | [
"MIT"
] | 2 | 2019-10-23T15:21:15.000Z | 2020-05-22T09:35:55.000Z | from ._base import NavBaseItem
class NavHeader(NavBaseItem):
def __init__(self, parent, label: str):
super().__init__(parent)
self._label = label
@property
def label(self) -> str:
return self._label
def to_html(self):
return f'<h6 class="dropdown-header">{self.label}</h6>'
| 21.733333 | 63 | 0.631902 |
4a24e65d9678a98521ca8c45f547ed3d92c6eceb | 3,035 | py | Python | parsyfiles/plugins_optional/support_for_attrs.py | smarie/python-simple-file-collection-parsing-framework | 344b37e1151e8d4e7c2ee49ae09d6568715ae64e | [
"BSD-3-Clause"
] | null | null | null | parsyfiles/plugins_optional/support_for_attrs.py | smarie/python-simple-file-collection-parsing-framework | 344b37e1151e8d4e7c2ee49ae09d6568715ae64e | [
"BSD-3-Clause"
] | null | null | null | parsyfiles/plugins_optional/support_for_attrs.py | smarie/python-simple-file-collection-parsing-framework | 344b37e1151e8d4e7c2ee49ae09d6568715ae64e | [
"BSD-3-Clause"
] | null | null | null | import attr
from attr import fields, NOTHING
from attr.validators import _OptionalValidator, _InstanceOfValidator
def _guess_type_from_validator(validator):
"""
Utility method to return the declared type of an attribute or None. It handles _OptionalValidator and _AndValidator
in order to unpack the validators.
:param validator:
:return: the type of attribute declared in an inner 'instance_of' validator (if any is found, the first one is used)
or None if no inner 'instance_of' validator is found
"""
if isinstance(validator, _OptionalValidator):
# Optional : look inside
return _guess_type_from_validator(validator.validator)
elif isinstance(validator, _AndValidator):
# Sequence : try each of them
for v in validator.validators:
typ = _guess_type_from_validator(v)
if typ is not None:
return typ
return None
elif isinstance(validator, _InstanceOfValidator):
# InstanceOf validator : found it !
return validator.type
else:
# we could not find the type
return None
def guess_type_from_validators(attr):
"""
Utility method to return the declared type of an attribute or None. It handles _OptionalValidator and _AndValidator
in order to unpack the validators.
:param attr:
:return: the type of attribute declared in an inner 'instance_of' validator (if any is found, the first one is used)
or None if no inner 'instance_of' validator is found
"""
return _guess_type_from_validator(attr.validator)
def is_optional(attr):
"""
Helper method to find if an attribute is mandatory
:param attr:
:return:
"""
return isinstance(attr.validator, _OptionalValidator) or (attr.default is not None and attr.default is not NOTHING)
def get_attrs_declarations(item_type):
"""
Helper method to return a dictionary of tuples. Each key is attr_name, and value is (attr_type, attr_is_optional)
:param item_type:
:return:
"""
# this will raise an error if the type is not an attr-created type
attribs = fields(item_type)
res = dict()
for attr in attribs:
attr_name = attr.name
# -- is the attribute mandatory ?
optional = is_optional(attr)
# -- get and check the attribute type
typ = guess_type_from_validators(attr)
# -- store both info in result dict
res[attr_name] = (typ, optional)
return res
@attr.s(repr=False, slots=True)
class _AndValidator(object):
validators = attr.ib()
def __call__(self, inst, attr, value):
for v in self.validators:
v(inst, attr, value)
return
def __repr__(self):
return (
"<validator sequence : {seq}>".format(seq=repr(self.validators))
)
def chain(*validators):
"""
A validator that applies several validators in order
:param validators: A sequence of validators
"""
return _AndValidator(validators) | 28.632075 | 120 | 0.674794 |
4a24e69fcecf0973b361e7354cb5f16c5e86e0b9 | 16,468 | py | Python | venv/lib/python3.5/site-packages/flask_testing/utils.py | JonathanVaughan/project | 8f0a84345588017b96b6724efb07af7441cf6844 | [
"MIT"
] | null | null | null | venv/lib/python3.5/site-packages/flask_testing/utils.py | JonathanVaughan/project | 8f0a84345588017b96b6724efb07af7441cf6844 | [
"MIT"
] | null | null | null | venv/lib/python3.5/site-packages/flask_testing/utils.py | JonathanVaughan/project | 8f0a84345588017b96b6724efb07af7441cf6844 | [
"MIT"
] | 1 | 2021-04-11T05:23:48.000Z | 2021-04-11T05:23:48.000Z | # -*- coding: utf-8 -*-
"""
flask_testing.utils
~~~~~~~~~~~~~~~~~~~
Flask unittest integration.
:copyright: (c) 2010 by Dan Jacob.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, with_statement
import gc
import multiprocessing
import socket
import time
try:
import socketserver
except ImportError:
# Python 2 SocketServer fallback
import SocketServer as socketserver
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
from urllib.parse import urlparse, urljoin
except ImportError:
# Python 2 urlparse fallback
from urlparse import urlparse, urljoin
from werkzeug.utils import cached_property
# Use Flask's preferred JSON module so that our runtime behavior matches.
from flask import json_available, templating, template_rendered
try:
from flask import message_flashed
_is_message_flashed = True
except ImportError:
message_flashed = None
_is_message_flashed = False
if json_available:
from flask import json
# we'll use signals for template-related tests if
# available in this version of Flask
try:
import blinker
_is_signals = True
except ImportError: # pragma: no cover
_is_signals = False
__all__ = ["TestCase"]
class ContextVariableDoesNotExist(Exception):
pass
class JsonResponseMixin(object):
"""
Mixin with testing helper methods
"""
@cached_property
def json(self):
if not json_available: # pragma: no cover
raise NotImplementedError
return json.loads(self.data)
def _make_test_response(response_class):
class TestResponse(response_class, JsonResponseMixin):
pass
return TestResponse
def _empty_render(template, context, app):
"""
Used to monkey patch the render_template flask method when
the render_templates property is set to False in the TestCase
"""
if _is_signals:
template_rendered.send(app, template=template, context=context)
return ""
def _check_for_message_flashed_support():
if not _is_signals or not _is_message_flashed:
raise RuntimeError(
"Your version of Flask doesn't support message_flashed. "
"This requires Flask 0.10+ with the blinker module installed."
)
def _check_for_signals_support():
if not _is_signals:
raise RuntimeError(
"Your version of Flask doesn't support signals. "
"This requires Flask 0.6+ with the blinker module installed."
)
class TestCase(unittest.TestCase):
render_templates = True
run_gc_after_test = False
def create_app(self):
"""
Create your Flask app here, with any
configuration you need.
"""
raise NotImplementedError
def __call__(self, result=None):
"""
Does the required setup, doing it here
means you don't have to call super.setUp
in subclasses.
"""
try:
self._pre_setup()
super(TestCase, self).__call__(result)
finally:
self._post_teardown()
def debug(self):
try:
self._pre_setup()
super(TestCase, self).debug()
finally:
self._post_teardown()
def _pre_setup(self):
self.app = self.create_app()
self._orig_response_class = self.app.response_class
self.app.response_class = _make_test_response(self.app.response_class)
self.client = self.app.test_client()
self._ctx = self.app.test_request_context()
self._ctx.push()
if not self.render_templates:
# Monkey patch the original template render with a empty render
self._original_template_render = templating._render
templating._render = _empty_render
self.templates = []
self.flashed_messages = []
if _is_signals:
template_rendered.connect(self._add_template)
if _is_message_flashed:
message_flashed.connect(self._add_flash_message)
def _add_flash_message(self, app, message, category):
self.flashed_messages.append((message, category))
def _add_template(self, app, template, context):
if len(self.templates) > 0:
self.templates = []
self.templates.append((template, context))
def _post_teardown(self):
if getattr(self, '_ctx', None) is not None:
self._ctx.pop()
del self._ctx
if getattr(self, 'app', None) is not None:
if getattr(self, '_orig_response_class', None) is not None:
self.app.response_class = self._orig_response_class
del self.app
if hasattr(self, 'client'):
del self.client
if hasattr(self, 'templates'):
del self.templates
if hasattr(self, 'flashed_messages'):
del self.flashed_messages
if _is_signals:
template_rendered.disconnect(self._add_template)
if _is_message_flashed:
message_flashed.disconnect(self._add_flash_message)
if hasattr(self, '_original_template_render'):
templating._render = self._original_template_render
if self.run_gc_after_test:
gc.collect()
def assertMessageFlashed(self, message, category='message'):
"""
Checks if a given message was flashed.
Only works if your version of Flask has message_flashed
signal support (0.10+) and blinker is installed.
:param message: expected message
:param category: expected message category
"""
_check_for_message_flashed_support()
for _message, _category in self.flashed_messages:
if _message == message and _category == category:
return True
raise AssertionError("Message '%s' in category '%s' wasn't flashed" % (message, category))
assert_message_flashed = assertMessageFlashed
def assertTemplateUsed(self, name, tmpl_name_attribute='name'):
"""
Checks if a given template is used in the request.
Only works if your version of Flask has signals
support (0.6+) and blinker is installed.
If the template engine used is not Jinja2, provide
``tmpl_name_attribute`` with a value of its `Template`
class attribute name which contains the provided ``name`` value.
:versionadded: 0.2
:param name: template name
:param tmpl_name_attribute: template engine specific attribute name
"""
_check_for_signals_support()
used_templates = []
for template, context in self.templates:
if getattr(template, tmpl_name_attribute) == name:
return True
used_templates.append(template)
raise AssertionError("Template %s not used. Templates were used: %s" % (name, ' '.join(repr(used_templates))))
assert_template_used = assertTemplateUsed
def get_context_variable(self, name):
"""
Returns a variable from the context passed to the
template. Only works if your version of Flask
has signals support (0.6+) and blinker is installed.
Raises a ContextVariableDoesNotExist exception if does
not exist in context.
:versionadded: 0.2
:param name: name of variable
"""
_check_for_signals_support()
for template, context in self.templates:
if name in context:
return context[name]
raise ContextVariableDoesNotExist
def assertContext(self, name, value, message=None):
"""
Checks if given name exists in the template context
and equals the given value.
:versionadded: 0.2
:param name: name of context variable
:param value: value to check against
"""
try:
self.assertEqual(self.get_context_variable(name), value, message)
except ContextVariableDoesNotExist:
self.fail(message or "Context variable does not exist: %s" % name)
assert_context = assertContext
def assertRedirects(self, response, location, message=None):
"""
Checks if response is an HTTP redirect to the
given location.
:param response: Flask response
:param location: relative URL path to SERVER_NAME or an absolute URL
"""
parts = urlparse(location)
if parts.netloc:
expected_location = location
else:
server_name = self.app.config.get('SERVER_NAME') or 'localhost'
expected_location = urljoin("http://%s" % server_name, location)
valid_status_codes = (301, 302, 303, 305, 307)
valid_status_code_str = ', '.join(str(code) for code in valid_status_codes)
not_redirect = "HTTP Status %s expected but got %d" % (valid_status_code_str, response.status_code)
self.assertTrue(response.status_code in valid_status_codes, message or not_redirect)
self.assertEqual(response.location, expected_location, message)
assert_redirects = assertRedirects
def assertStatus(self, response, status_code, message=None):
"""
Helper method to check matching response status.
:param response: Flask response
:param status_code: response status code (e.g. 200)
:param message: Message to display on test failure
"""
message = message or 'HTTP Status %s expected but got %s' \
% (status_code, response.status_code)
self.assertEqual(response.status_code, status_code, message)
assert_status = assertStatus
def assert200(self, response, message=None):
"""
Checks if response status code is 200
:param response: Flask response
:param message: Message to display on test failure
"""
self.assertStatus(response, 200, message)
assert_200 = assert200
def assert400(self, response, message=None):
"""
Checks if response status code is 400
:versionadded: 0.2.5
:param response: Flask response
:param message: Message to display on test failure
"""
self.assertStatus(response, 400, message)
assert_400 = assert400
def assert401(self, response, message=None):
"""
Checks if response status code is 401
:versionadded: 0.2.1
:param response: Flask response
:param message: Message to display on test failure
"""
self.assertStatus(response, 401, message)
assert_401 = assert401
def assert403(self, response, message=None):
"""
Checks if response status code is 403
:versionadded: 0.2
:param response: Flask response
:param message: Message to display on test failure
"""
self.assertStatus(response, 403, message)
assert_403 = assert403
def assert404(self, response, message=None):
"""
Checks if response status code is 404
:param response: Flask response
:param message: Message to display on test failure
"""
self.assertStatus(response, 404, message)
assert_404 = assert404
def assert405(self, response, message=None):
"""
Checks if response status code is 405
:versionadded: 0.2
:param response: Flask response
:param message: Message to display on test failure
"""
self.assertStatus(response, 405, message)
assert_405 = assert405
def assert500(self, response, message=None):
"""
Checks if response status code is 500
:versionadded: 0.4.1
:param response: Flask response
:param message: Message to display on test failure
"""
self.assertStatus(response, 500, message)
assert_500 = assert500
# A LiveServerTestCase useful with Selenium or headless browsers
# Inspired by https://docs.djangoproject.com/en/dev/topics/testing/#django.test.LiveServerTestCase
class LiveServerTestCase(unittest.TestCase):
def create_app(self):
"""
Create your Flask app here, with any
configuration you need.
"""
raise NotImplementedError
def __call__(self, result=None):
"""
Does the required setup, doing it here means you don't have to
call super.setUp in subclasses.
"""
# Get the app
self.app = self.create_app()
self._configured_port = self.app.config.get('LIVESERVER_PORT', 5000)
self._port_value = multiprocessing.Value('i', self._configured_port)
# We need to create a context in order for extensions to catch up
self._ctx = self.app.test_request_context()
self._ctx.push()
try:
self._spawn_live_server()
super(LiveServerTestCase, self).__call__(result)
finally:
self._post_teardown()
self._terminate_live_server()
def get_server_url(self):
"""
Return the url of the test server
"""
return 'http://localhost:%s' % self._port_value.value
def _spawn_live_server(self):
self._process = None
port_value = self._port_value
def worker(app, port):
# Based on solution: http://stackoverflow.com/a/27598916
# Monkey-patch the server_bind so we can determine the port bound by Flask.
# This handles the case where the port specified is `0`, which means that
# the OS chooses the port. This is the only known way (currently) of getting
# the port out of Flask once we call `run`.
original_socket_bind = socketserver.TCPServer.server_bind
def socket_bind_wrapper(self):
ret = original_socket_bind(self)
# Get the port and save it into the port_value, so the parent process
# can read it.
(_, port) = self.socket.getsockname()
port_value.value = port
socketserver.TCPServer.server_bind = original_socket_bind
return ret
socketserver.TCPServer.server_bind = socket_bind_wrapper
app.run(port=port, use_reloader=False)
self._process = multiprocessing.Process(
target=worker, args=(self.app, self._configured_port)
)
self._process.start()
# We must wait for the server to start listening, but give up
# after a specified maximum timeout
timeout = self.app.config.get('LIVESERVER_TIMEOUT', 5)
start_time = time.time()
while True:
elapsed_time = (time.time() - start_time)
if elapsed_time > timeout:
raise RuntimeError(
"Failed to start the server after %d seconds. " % timeout
)
if self._can_ping_server():
break
def _can_ping_server(self):
host, port = self._get_server_address()
if port == 0:
# Port specified by the user was 0, and the OS has not yet assigned
# the proper port.
return False
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((host, port))
except socket.error as e:
success = False
else:
success = True
finally:
sock.close()
return success
def _get_server_address(self):
"""
Gets the server address used to test the connection with a socket.
Respects both the LIVESERVER_PORT config value and overriding
get_server_url()
"""
parts = urlparse(self.get_server_url())
host = parts.hostname
port = parts.port
if port is None:
if parts.scheme == 'http':
port = 80
elif parts.scheme == 'https':
port = 443
else:
raise RuntimeError(
"Unsupported server url scheme: %s" % parts.scheme
)
return host, port
def _post_teardown(self):
if getattr(self, '_ctx', None) is not None:
self._ctx.pop()
del self._ctx
def _terminate_live_server(self):
if self._process:
self._process.terminate()
| 29.779385 | 118 | 0.628249 |
4a24e78d17cb717b05364ce2c66c4fcd0efca07d | 2,937 | py | Python | examples/archive/max_distance_from_time.py | BillMakwae/Simulation | 8d0ec274643f23bc0e78c96e50508b60791c11d2 | [
"MIT"
] | 8 | 2020-03-29T01:44:16.000Z | 2022-03-26T23:15:34.000Z | examples/archive/max_distance_from_time.py | BillMakwae/Simulation | 8d0ec274643f23bc0e78c96e50508b60791c11d2 | [
"MIT"
] | 60 | 2020-02-08T22:07:16.000Z | 2022-03-26T23:51:55.000Z | examples/archive/max_distance_from_time.py | BillMakwae/Simulation | 8d0ec274643f23bc0e78c96e50508b60791c11d2 | [
"MIT"
] | 1 | 2021-10-20T20:07:06.000Z | 2021-10-20T20:07:06.000Z | import simulation
import datetime
"""
Description: Given an input time, determine the largest distance the car can travel in that time. [time -> distance]
Note: this example assumes constant speed throughout
"""
# Time parameters
tick = 1
# Inputs
simulation_duration = int(60 * 60 * 9) # 9 hours
# Simulation constants
incident_sunlight = 1000
initial_battery_charge = 0.9
battery_charge = initial_battery_charge
lvs_power_loss = 0
speed_increment = 1
max_speed_kmh = 50
# Outputs
final_parameters = dict()
for speed_kmh in range(1, max_speed_kmh + 1, speed_increment):
distance_travelled = 0
basic_array = simulation.BasicArray(incident_sunlight)
basic_array.set_produced_energy(0)
basic_battery = simulation.BasicBattery(initial_battery_charge)
basic_lvs = simulation.BasicLVS(lvs_power_loss * tick)
basic_motor = simulation.BasicMotor()
# Run the simulation at a specific speed
for time in range(tick, simulation_duration + tick, tick):
basic_array.update(tick)
produced_energy = basic_array.get_produced_energy()
basic_lvs.update(tick)
lvs_consumed_energy = basic_lvs.get_consumed_energy()
basic_motor.update(tick)
basic_motor.calculate_power_in(speed_kmh)
motor_consumed_energy = basic_motor.get_consumed_energy()
basic_battery.charge(produced_energy)
# tries to remove some energy from the battery
try:
basic_battery.discharge(lvs_consumed_energy)
basic_battery.discharge(motor_consumed_energy)
basic_battery.update(tick)
# Battery is empty
except simulation.BatteryEmptyError:
break
# Battery still has some charge in it
else:
distance_travelled += speed_kmh * (tick / 3600)
# Ensures that the simulation doesn't run completely when the battery charge reaches equilibrium
if battery_charge == basic_battery.get_state_of_charge() and basic_battery.is_empty() is not True:
print(f"Equilibrium reached at speed {speed_kmh}km/h.\n")
distance_travelled = speed_kmh * (simulation_duration / 3600)
break
finally:
battery_charge = basic_battery.get_state_of_charge()
if time % 60 == 0:
print(f"Time: {time} sec / {datetime.timedelta(seconds=time)}")
print(f"Car speed: {speed_kmh:.2f}km/h")
print(f"Distance travelled: {distance_travelled:.2f}km")
print(f"Battery SOC: {float(battery_charge) * 100:.3f}%\n")
final_parameters[speed_kmh] = distance_travelled
max_distance = round(max(final_parameters.values()), 2)
optimal_speed = round(max(final_parameters, key=final_parameters.get), 2)
print(f"Simulation complete! Maximum traversable distance in {datetime.timedelta(seconds=simulation_duration)} is "
f"{max_distance}km at speed {optimal_speed}km/h.")
| 34.964286 | 117 | 0.701396 |
4a24e7c39d2d3cf6ec0555ca66a8fa00e0faceb5 | 1,620 | py | Python | pymatgen/core/helper.py | materialsproject/workshop-2017 | 893da316ecb77fece2a9937ffa604b57c0938e0a | [
"BSD-3-Clause-LBNL"
] | 21 | 2017-07-25T17:38:55.000Z | 2022-01-17T09:20:26.000Z | pymatgen/core/helper.py | materialsproject/workshop-2017 | 893da316ecb77fece2a9937ffa604b57c0938e0a | [
"BSD-3-Clause-LBNL"
] | 1 | 2017-07-25T23:29:35.000Z | 2017-07-25T23:29:35.000Z | pymatgen/core/helper.py | materialsproject/workshop-2017 | 893da316ecb77fece2a9937ffa604b57c0938e0a | [
"BSD-3-Clause-LBNL"
] | 22 | 2017-07-25T22:42:33.000Z | 2022-02-24T12:41:18.000Z | from pymatgen import vis
from monty.serialization import loadfn
import matplotlib as mpl
mpl.use("TkAgg")
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import os, itertools, re
colors = loadfn(os.path.join(os.path.dirname(vis.__file__), "ElementColorSchemes.yaml"))
color_dict = {el:[j / 256. for j in colors["Jmol"][el]] for el in colors["Jmol"].keys()}
def vis_struct(structure, res=100, show_unit_cell=True):
"""
Visualizes structure using 3d matplotlib plot
"""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_aspect('equal')
#plot unit cell
if show_unit_cell:
vecs = structure.lattice.matrix
for vec in vecs:
ax.plot(*zip([0, 0, 0], vec), 'k-', zorder=0)
for (vec1, vec2) in itertools.permutations(vecs, 2):
ax.plot(*zip(vec1, vec1+vec2), 'k-', zorder=0)
for (vec1, vec2, vec3) in itertools.permutations(vecs, 3):
ax.plot(*zip(vec1+vec2, vec1+vec2+vec3), 'k-', zorder=0)
phi = np.linspace(0, 2*np.pi, res)
theta = np.linspace(0, np.pi, res)
regex = re.compile('[^a-zA-Z]')
for site in structure.sites:
rad = float(site.specie.atomic_radius)
loc = site.coords
xm = rad * np.outer(np.cos(phi), np.sin(theta)) + loc[0]
ym = rad * np.outer(np.sin(phi), np.sin(theta)) + loc[1]
zm = rad * np.outer(np.ones(np.size(phi)), np.cos(theta)) + loc[2]
color = color_dict[regex.sub('', site.species_string)]
ax.plot_surface(xm, ym, zm, color=color, linewidth=0, zorder=1)
return fig
| 38.571429 | 88 | 0.633333 |
4a24e809049aebb4595d4d4e2e79b92b47f0932e | 3,005 | py | Python | uob_utils.py | TeamworkNUS/uob_voice_to_text_proj | dce143572b041d54463a04a19fa0fda97eff1361 | [
"Unlicense"
] | null | null | null | uob_utils.py | TeamworkNUS/uob_voice_to_text_proj | dce143572b041d54463a04a19fa0fda97eff1361 | [
"Unlicense"
] | null | null | null | uob_utils.py | TeamworkNUS/uob_voice_to_text_proj | dce143572b041d54463a04a19fa0fda97eff1361 | [
"Unlicense"
] | 4 | 2022-03-07T09:49:49.000Z | 2022-03-09T06:59:19.000Z | import json
import os
from typing import Any, Optional
from pydub import AudioSegment
import wave
import filetype # to check file type
def check_file_type(audioname, audiopath):
audiofile = os.path.join(audiopath, audioname)
kind = filetype.guess(audiofile)
if kind is None:
print('Cannot guess file type!')
print('File extension: %s' % kind.extension)
print('File MIME type: %s' % kind.mime)
return kind.extension, kind.mime
def audio2wav(from_audioname, from_audiopath, to_audioname, to_audiopath):
from_audiofile = os.path.join(from_audiopath,from_audioname)
to_audiofile = os.path.join(to_audiopath,to_audioname)
AudioSegment.from_file(from_audiofile).export(to_audiofile,format='wav')
def get_audio_params(audioname, audiopath):
'''
Note: only works on .wav file
'''
audiofile = os.path.join(audiopath, audioname)
f=wave.open(audiofile)
params = f.getparams()
channels = f.getnchannels()
samplerate = f.getframerate()
sampwidth = f.getsampwidth()
nframes = f.getnframes()
f.close()
print('params: ',params)
# print('channels: ',channels)
# print('sample rate: ', samplerate)
# print('sample width: ', sampwidth)
# print('nframes: ', nframes)
return params, channels, samplerate, sampwidth, nframes
def standardize_audio(from_audioname, from_audiopath, to_audioname, to_audiopath, sample_rate, no_of_channel):
'''
requirements:
ffmpeg
$ ffmpeg {global param} {input file param} -i {input file} {output file param} {output file}
'''
from_audiofile = os.path.join(from_audiopath,from_audioname)
to_audiofile = os.path.join(to_audiopath,to_audioname)
# cmd = "ffmpeg -i " + str(from_audiofile) + " -hide_banner " +" -ac " + str(no_of_channel) + " -ar " + str(sample_rate) +' ' + to_audiofile
# print(cmd)
# os.system(cmd)
AudioSegment.from_wav(from_audiofile).set_frame_rate(sample_rate).set_channels(no_of_channel).export(to_audiofile,format='wav')
def get_audio_meta(audioname,audiopath):
audiofile = os.path.join(audiopath, audioname)
f=wave.open(audiofile)
nframes = f.getnframes()
rate = f.getframerate()
duration = nframes / float(rate)
bytes_size=os.path.getsize(os.path.join(audiopath, audioname))
params = f.getparams()
nchannels = f.getnchannels()
samplerate = f.getframerate()
sampwidth = f.getsampwidth()
bit_type = f.getsampwidth() * 8
bit_rate=samplerate * bit_type * nchannels
f.close()
# print('params: ',params)
# print('channels: ',channels)
# print('sample rate: ', samplerate)
# print('sample width: ', sampwidth)
# print('nframes: ', nframes)
output_obj = {"nchannels":nchannels,"samplerate":samplerate,"sampwidth":sampwidth,"nframes":nframes, "duration":duration, "bytes_size":bytes_size, "bit_type":bit_type,"bit_rate":round(bit_rate)}
output = json.dumps(output_obj)
return output
| 31.968085 | 198 | 0.686522 |
4a24e891e0c49cb6fe6b24b3bb8a34afbdf3838c | 25,982 | py | Python | stencilflow/stencil/intel_fpga.py | spcl/stencilflow | 28bb88e7f4251f29aecc266663bc780023ed2549 | [
"BSD-3-Clause"
] | 12 | 2020-06-05T09:27:41.000Z | 2021-11-22T04:29:55.000Z | stencilflow/stencil/intel_fpga.py | spcl/stencilflow | 28bb88e7f4251f29aecc266663bc780023ed2549 | [
"BSD-3-Clause"
] | 1 | 2021-09-11T14:42:50.000Z | 2021-09-15T12:33:34.000Z | stencilflow/stencil/intel_fpga.py | spcl/stencilflow | 28bb88e7f4251f29aecc266663bc780023ed2549 | [
"BSD-3-Clause"
] | 5 | 2020-09-29T11:35:32.000Z | 2021-09-26T03:31:25.000Z | import ast
import astunparse
import collections
import functools
import itertools
import operator
import re
import dace
import numpy as np
from .subscript_converter import SubscriptConverter
from ._common import JUNK_VAL, dim_to_abs_val, make_iterators
@dace.library.expansion
class ExpandStencilIntelFPGA(dace.library.ExpandTransformation):
environments = []
@staticmethod
def expansion(node, parent_state, parent_sdfg):
sdfg = dace.SDFG(node.label + "_outer")
state = sdfg.add_state(node.label + "_outer")
shape = np.array(node.shape)
# Extract which fields to read from streams and what to buffer
field_to_memlet = {} # Maps fields to which memlet carries it
field_to_data = {} # Maps fields to which data object represents it
buffer_sizes = collections.OrderedDict()
buffer_accesses = collections.OrderedDict()
in_edges = parent_state.in_edges(node)
scalars = {} # {name: type}
vector_lengths = {} # {name: vector length}
for field_name, (dim_mask, relative) in node.accesses.items():
relative = dace.dtypes.deduplicate(
[tuple(int(x) for x in r) for r in relative])
if not any(dim_mask):
# This is a scalar, no buffer needed. Instead, the SDFG must
# take this as a symbol
scalars[field_name] = parent_sdfg.symbols[field_name]
vector_lengths[field_name] = 1
sdfg.add_symbol(field_name, parent_sdfg.symbols[field_name])
continue
# Find the corresponding input memlets
for e in in_edges:
if e.dst_conn == field_name:
field_to_memlet[field_name] = e
data = dace.sdfg.find_input_arraynode(parent_state, e).data
field_to_data[field_name] = data
vector_lengths[field_name] = parent_sdfg.data(data).veclen
break
else:
raise KeyError("Input connector {} was not found for {}".format(
connector, field_name, node.label))
# Deduplicate, as we can have multiple accesses to the same index
abs_indices = (
[dim_to_abs_val(i, shape[dim_mask]) for i in relative] +
([0] if node.boundary_conditions[field_name]["btype"] == "copy"
else []))
max_access = max(abs_indices)
min_access = min(abs_indices)
buffer_size = max_access - min_access + vector_lengths[field_name]
buffer_sizes[field_name] = buffer_size
# (indices relative to center, buffer indices, buffer center index)
buffer_accesses[field_name] = ([tuple(r) for r in relative], [
i - min_access for i in abs_indices
], -min_access)
# Find output connectors
for field_name, offset in node.output_fields.items():
for e in parent_state.out_edges(node):
if e.src_conn == field_name:
data = dace.sdfg.find_output_arraynode(parent_state, e).data
field_to_data[field_name] = data
vector_lengths[field_name] = parent_sdfg.data(data).veclen
break
else:
raise KeyError(
"Output connector {} was not found for {}".format(
field_name, node.label))
# Assert that we support the given vectorization widths
vector_length = max(vector_lengths.values())
for field_name in node.output_fields:
if vector_lengths[field_name] != vector_length:
raise ValueError("{} has vector length {}, should be {}".format(
field_name, vector_lengths[field_name], vector_length))
# All inputs must be vectorized if they read the innermost dimension,
# and cannot be vectorized if they don't
for field_name, (dim_mask, _) in node.accesses.items():
if dim_mask[-1] == True:
if vector_lengths[field_name] != vector_length:
raise ValueError("Input {} has vectorization width, "
"expected {}.".format(
field_name, vector_lengths[field_name],
vector_length))
else:
if vector_lengths[field_name] != 1:
raise ValueError(
"Input {} cannot be vectorized, "
"because it doesn't read the innermost dimension.".
format(field_name))
# Create a initialization phase corresponding to the highest distance
# to the center
init_sizes = [
(buffer_sizes[key] - vector_lengths[key] - val[2]) // vector_length
for key, val in buffer_accesses.items()
]
init_size_max = int(np.max(init_sizes))
parameters = np.array(["i", "j", "k"])[:len(shape)]
iterator_mask = shape > 1 # Dimensions we need to iterate over
iterators = make_iterators(shape[iterator_mask],
parameters=parameters[iterator_mask])
if vector_length > 1:
iterators[parameters[-1]] += "/{}".format(vector_length)
# Manually add pipeline entry and exit nodes
pipeline_range = dace.properties.SubsetProperty.from_string(', '.join(
iterators.values()))
pipeline = dace.sdfg.nodes.Pipeline(
"compute_" + node.label,
list(iterators.keys()),
pipeline_range,
dace.dtypes.ScheduleType.FPGA_Device,
False,
init_size=init_size_max,
init_overlap=False,
drain_size=init_size_max,
drain_overlap=True)
entry = dace.sdfg.nodes.PipelineEntry(pipeline)
exit = dace.sdfg.nodes.PipelineExit(pipeline)
state.add_nodes_from([entry, exit])
# Add nested SDFG to do 1) shift buffers 2) read from input 3) compute
nested_sdfg = dace.SDFG(node.label + "_inner", parent=state)
nested_sdfg._parent_sdfg = sdfg # TODO: This should not be necessary
nested_sdfg_tasklet = dace.sdfg.nodes.NestedSDFG(
nested_sdfg.label,
nested_sdfg,
# Input connectors
[k + "_in" for k, v in node.accesses.items() if any(v[0])] +
[name + "_buffer_in" for name, _ in buffer_sizes.items()],
# Output connectors
[k + "_out" for k in node.output_fields.keys()] +
[name + "_buffer_out" for name, _ in buffer_sizes.items()],
schedule=dace.ScheduleType.FPGA_Device)
nested_sdfg.parent_nsdfg_node = nested_sdfg_tasklet
# Map all symbols
for f, (dim_mask, _) in node.accesses.items():
if not any(dim_mask):
nested_sdfg_tasklet.symbol_mapping[f] = f
# Map iterators
for p in parameters:
nested_sdfg.add_symbol(p, dace.int64)
nested_sdfg_tasklet.symbol_mapping[p] = p
state.add_node(nested_sdfg_tasklet)
# Shift state, which shifts all buffers by one
shift_state = nested_sdfg.add_state(node.label + "_shift")
# Update state, which reads new values from memory
update_state = nested_sdfg.add_state(node.label + "_update")
#######################################################################
# Tasklet code generation
#######################################################################
code = node.code.as_string
# Replace relative indices with memlet names
converter = SubscriptConverter()
# Add copy boundary conditions
for field in node.boundary_conditions:
btype = node.boundary_conditions[field]["btype"]
if btype == "copy":
center_index = tuple(0
for _ in range(sum(accesses[field][0], 0)))
# This will register the renaming
converter.convert(field, center_index)
new_ast = converter.visit(ast.parse(code))
code = astunparse.unparse(new_ast)
code_memlet_names = converter.names
#######################################################################
# Implement boundary conditions
#######################################################################
boundary_code = ""
# Loop over each input
for (field_name, (accesses, accesses_buffer,
center)) in buffer_accesses.items():
# Loop over each access to this data
for indices in accesses:
# Loop over each index of this access
try:
memlet_name = code_memlet_names[field_name][indices]
except KeyError:
raise KeyError("Missing access in code: {}[{}]".format(
field_name, ", ".join(map(str, indices))))
cond = []
for i, offset in enumerate(indices):
if vector_length > 1 and i == len(indices) - 1:
par = "{}*{} + i_unroll".format(vector_length,
parameters[i])
else:
par = parameters[i]
if offset < 0:
cond.append(par + " < " + str(-offset))
elif offset > 0:
cond.append(par + " >= " + str(shape[i] - offset))
ctype = parent_sdfg.data(field_to_data[field_name]).dtype.ctype
if len(cond) == 0:
boundary_code += "{} = _{}\n".format(
memlet_name, memlet_name)
else:
bc = node.boundary_conditions[field_name]
btype = bc["btype"]
if btype == "copy":
center_memlet = code_memlet_names[field_name][center]
boundary_val = "_{}".format(center_memlet)
elif btype == "constant":
boundary_val = bc["value"]
elif btype == "shrink":
# We don't need to do anything here, it's up to the
# user to not use the junk output
boundary_val = JUNK_VAL
pass
else:
raise ValueError(
"Unsupported boundary condition type: {}".format(
node.boundary_conditions[field_name]["btype"]))
boundary_code += ("{} = {} if {} else _{}\n".format(
memlet_name, boundary_val, " or ".join(cond),
memlet_name))
#######################################################################
# Only write if we're in bounds
#######################################################################
write_code = ("\n".join([
"{}_inner_out = {}\n".format(
output,
code_memlet_names[output][tuple(0 for _ in range(len(shape)))])
for output in node.output_fields
]))
if init_size_max > 0:
init_cond = pipeline.init_condition()
write_condition = f"if not {init_cond}:\n\t"
nested_sdfg_tasklet.symbol_mapping[init_cond] = (init_cond)
nested_sdfg.add_symbol(init_cond, dace.bool)
else:
write_condition = ""
code = boundary_code + "\n" + code + "\n" + write_code
#######################################################################
# Create DaCe compute state
#######################################################################
# Compute state, which reads from input channels, performs the compute,
# and writes to the output channel(s)
compute_state = nested_sdfg.add_state(node.label + "_compute")
compute_inputs = list(
itertools.chain.from_iterable(
[["_" + v for v in code_memlet_names[f].values()]
for f, a in node.accesses.items() if any(a[0])]))
compute_tasklet = compute_state.add_tasklet(
node.label + "_compute",
compute_inputs,
{name + "_inner_out"
for name in node.output_fields},
code,
language=dace.dtypes.Language.Python)
if vector_length > 1:
compute_unroll_entry, compute_unroll_exit = compute_state.add_map(
compute_state.label + "_unroll",
{"i_unroll": "0:{}".format(vector_length)},
schedule=dace.ScheduleType.FPGA_Device,
unroll=True)
# Connect the three nested states
nested_sdfg.add_edge(shift_state, update_state,
dace.sdfg.InterstateEdge())
nested_sdfg.add_edge(update_state, compute_state,
dace.sdfg.InterstateEdge())
# First, grab scalar variables
for scalar, scalar_type in scalars.items():
nested_sdfg.add_symbol(scalar, scalar_type)
for (field_name, size), init_size in zip(buffer_sizes.items(),
init_sizes):
data_name = field_to_data[field_name]
connector = field_to_memlet[field_name].dst_conn
# Outer memory read
stream_name_outer = connector
stream_name_inner = field_name + "_in"
stream_outer = parent_sdfg.arrays[data_name].clone()
stream_outer.transient = False
sdfg.add_datadesc(stream_name_outer, stream_outer)
read_node_outer = state.add_read(stream_name_outer)
state.add_memlet_path(read_node_outer,
entry,
nested_sdfg_tasklet,
dst_conn=stream_name_inner,
memlet=dace.memlet.Memlet.simple(
stream_name_outer, "0", num_accesses=-1))
# Create inner memory pipe
stream_inner = stream_outer.clone()
nested_sdfg.add_datadesc(stream_name_inner, stream_inner)
buffer_name_outer = "{}_{}_buffer".format(node.label, field_name)
buffer_name_inner_read = "{}_buffer_in".format(field_name)
buffer_name_inner_write = "{}_buffer_out".format(field_name)
# Create buffer transient in outer SDFG
field_dtype = parent_sdfg.data(data_name).dtype
_, desc_outer = sdfg.add_array(
buffer_name_outer, (size, ),
field_dtype.base_type,
storage=dace.dtypes.StorageType.FPGA_Local,
transient=True)
# Create read and write nodes
read_node_outer = state.add_read(buffer_name_outer)
write_node_outer = state.add_write(buffer_name_outer)
# Outer buffer read
state.add_memlet_path(read_node_outer,
entry,
nested_sdfg_tasklet,
dst_conn=buffer_name_inner_read,
memlet=dace.memlet.Memlet.simple(
buffer_name_outer,
"0:{}".format(size),
num_accesses=-1))
# Outer buffer write
state.add_memlet_path(nested_sdfg_tasklet,
exit,
write_node_outer,
src_conn=buffer_name_inner_write,
memlet=dace.memlet.Memlet.simple(
write_node_outer.data,
"0:{}".format(size),
num_accesses=-1))
# Inner copy
desc_inner_read = desc_outer.clone()
desc_inner_read.transient = False
desc_inner_read.name = buffer_name_inner_read
desc_inner_write = desc_inner_read.clone()
desc_inner_write.name = buffer_name_inner_write
nested_sdfg.add_datadesc(buffer_name_inner_read, desc_inner_read)
nested_sdfg.add_datadesc(buffer_name_inner_write, desc_inner_write)
# Make shift state if necessary
if size > 1:
shift_read = shift_state.add_read(buffer_name_inner_read)
shift_write = shift_state.add_write(buffer_name_inner_write)
shift_entry, shift_exit = shift_state.add_map(
"shift_{}".format(field_name), {
"i_shift":
"0:{} - {}".format(size, vector_lengths[field_name])
},
schedule=dace.dtypes.ScheduleType.FPGA_Device,
unroll=True)
shift_tasklet = shift_state.add_tasklet(
"shift_{}".format(field_name),
{"{}_shift_in".format(field_name)},
{"{}_shift_out".format(field_name)},
"{field}_shift_out = {field}_shift_in".format(
field=field_name))
shift_state.add_memlet_path(shift_read,
shift_entry,
shift_tasklet,
dst_conn=field_name + "_shift_in",
memlet=dace.memlet.Memlet.simple(
shift_read.data,
"i_shift + {}".format(
vector_lengths[field_name]),
num_accesses=1))
shift_state.add_memlet_path(shift_tasklet,
shift_exit,
shift_write,
src_conn=field_name + "_shift_out",
memlet=dace.memlet.Memlet.simple(
shift_write.data,
"i_shift",
num_accesses=1))
# Begin reading according to this field's own buffer size, which is
# translated to an index by subtracting it from the maximum buffer
# size
begin_reading = (init_size_max - init_size)
end_reading = (
functools.reduce(operator.mul, shape, 1) / vector_length +
init_size_max - init_size)
update_read = update_state.add_read(stream_name_inner)
update_write = update_state.add_write(buffer_name_inner_write)
update_tasklet = update_state.add_tasklet(
"read_wavefront", {"wavefront_in"}, {"buffer_out"},
"if {it} >= {begin} and {it} < {end}:\n"
"\tbuffer_out = wavefront_in\n".format(
it=pipeline.iterator_str(),
begin=begin_reading,
end=end_reading),
language=dace.dtypes.Language.Python)
nested_sdfg_tasklet.symbol_mapping[pipeline.iterator_str()] = (
pipeline.iterator_str())
iterator_str = pipeline.iterator_str()
if iterator_str not in nested_sdfg.symbols:
nested_sdfg.add_symbol(iterator_str, dace.int64)
update_state.add_memlet_path(update_read,
update_tasklet,
memlet=dace.memlet.Memlet.simple(
update_read.data,
"0",
num_accesses=-1),
dst_conn="wavefront_in")
update_state.add_memlet_path(
update_tasklet,
update_write,
memlet=dace.memlet.Memlet.simple(
update_write.data, "{size} - {veclen}:{size}".format(
size=size,
veclen=vector_lengths[field_name],
dynamic=True) if size > 1 else "0"),
src_conn="buffer_out")
# Make compute state
compute_read = compute_state.add_read(buffer_name_inner_read)
for relative, offset in zip(buffer_accesses[field_name][0],
buffer_accesses[field_name][1]):
memlet_name = code_memlet_names[field_name][tuple(relative)]
if vector_length > 1:
if vector_lengths[field_name] > 1:
offset = "{} + i_unroll".format(offset)
else:
offset = str(offset)
path = [compute_read, compute_unroll_entry, compute_tasklet]
else:
offset = str(offset)
path = [compute_read, compute_tasklet]
compute_state.add_memlet_path(*path,
dst_conn="_" + memlet_name,
memlet=dace.memlet.Memlet.simple(
compute_read.data,
offset,
num_accesses=1))
for field_name, offset in node.output_fields.items():
if offset is not None and list(offset) != [0] * len(offset):
raise NotImplementedError("Output offsets not implemented")
data_name = field_to_data[field_name]
# Outer write
stream_name_outer = field_name
stream_name_inner = field_name + "_out"
stream_outer = parent_sdfg.arrays[data_name].clone()
stream_outer.transient = False
try:
sdfg.add_datadesc(stream_name_outer, stream_outer)
except NameError: # Already an input
parent_sdfg.arrays[data_name].access = (
dace.AccessType.ReadWrite)
write_node_outer = state.add_write(stream_name_outer)
state.add_memlet_path(nested_sdfg_tasklet,
exit,
write_node_outer,
src_conn=stream_name_inner,
memlet=dace.memlet.Memlet.simple(
stream_name_outer, "0", num_accesses=-1))
# Create inner stream
stream_inner = stream_outer.clone()
nested_sdfg.add_datadesc(stream_name_inner, stream_inner)
# Inner write
write_node_inner = compute_state.add_write(stream_name_inner)
# Intermediate buffer, mostly relevant for vectorization
output_buffer_name = field_name + "_output_buffer"
nested_sdfg.add_array(output_buffer_name, (vector_length, ),
stream_inner.dtype.base_type,
storage=dace.StorageType.FPGA_Registers,
transient=True)
output_buffer = compute_state.add_access(output_buffer_name)
# Condition write tasklet
output_tasklet = compute_state.add_tasklet(
field_name + "_conditional_write",
{"_{}".format(output_buffer_name)},
{"_{}".format(stream_name_inner)},
(write_condition +
"_{} = _{}".format(stream_name_inner, output_buffer_name)))
# If vectorized, we need to pass through the unrolled scope
if vector_length > 1:
compute_state.add_memlet_path(
compute_tasklet,
compute_unroll_exit,
output_buffer,
src_conn=field_name + "_inner_out",
memlet=dace.memlet.Memlet.simple(output_buffer_name,
"i_unroll",
num_accesses=1))
else:
compute_state.add_memlet_path(
compute_tasklet,
output_buffer,
src_conn=field_name + "_inner_out",
memlet=dace.memlet.Memlet.simple(output_buffer_name,
"0",
num_accesses=1))
# Final memlet to the output
compute_state.add_memlet_path(
output_buffer,
output_tasklet,
dst_conn="_{}".format(output_buffer_name),
memlet=dace.Memlet.simple(output_buffer.data,
"0:{}".format(vector_length)))
compute_state.add_memlet_path(
output_tasklet,
write_node_inner,
src_conn="_{}".format(stream_name_inner),
memlet=dace.memlet.Memlet.simple(write_node_inner.data,
"0",
num_accesses=-1))
sdfg.parent = parent_state
sdfg._parent_sdfg = parent_sdfg # TODO: this should not be necessary
return sdfg
| 47.326047 | 80 | 0.50739 |
4a24e979607bb713cd05fc1b119ce53eef18f604 | 4,682 | py | Python | homeassistant/components/deluge/sensor.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 6 | 2020-07-18T16:33:25.000Z | 2021-09-26T09:52:04.000Z | homeassistant/components/deluge/sensor.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 47 | 2020-07-23T07:13:11.000Z | 2022-03-31T06:01:46.000Z | homeassistant/components/deluge/sensor.py | klauern/home-assistant-core | c18ba6aec0627e6afb6442c678edb5ff2bb17db6 | [
"Apache-2.0"
] | 5 | 2020-03-29T00:29:13.000Z | 2021-09-06T20:58:40.000Z | """Support for monitoring the Deluge BitTorrent client API."""
import logging
from deluge_client import DelugeRPCClient, FailedToReconnectException
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST,
CONF_MONITORED_VARIABLES,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
DATA_RATE_KILOBYTES_PER_SECOND,
STATE_IDLE,
)
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
_THROTTLED_REFRESH = None
DEFAULT_NAME = "Deluge"
DEFAULT_PORT = 58846
DHT_UPLOAD = 1000
DHT_DOWNLOAD = 1000
SENSOR_TYPES = {
"current_status": ["Status", None],
"download_speed": ["Down Speed", DATA_RATE_KILOBYTES_PER_SECOND],
"upload_speed": ["Up Speed", DATA_RATE_KILOBYTES_PER_SECOND],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_MONITORED_VARIABLES, default=[]): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Deluge sensors."""
name = config[CONF_NAME]
host = config[CONF_HOST]
username = config[CONF_USERNAME]
password = config[CONF_PASSWORD]
port = config[CONF_PORT]
deluge_api = DelugeRPCClient(host, port, username, password)
try:
deluge_api.connect()
except ConnectionRefusedError:
_LOGGER.error("Connection to Deluge Daemon failed")
raise PlatformNotReady
dev = []
for variable in config[CONF_MONITORED_VARIABLES]:
dev.append(DelugeSensor(variable, deluge_api, name))
add_entities(dev)
class DelugeSensor(Entity):
"""Representation of a Deluge sensor."""
def __init__(self, sensor_type, deluge_client, client_name):
"""Initialize the sensor."""
self._name = SENSOR_TYPES[sensor_type][0]
self.client = deluge_client
self.type = sensor_type
self.client_name = client_name
self._state = None
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
self.data = None
self._available = False
@property
def name(self):
"""Return the name of the sensor."""
return f"{self.client_name} {self._name}"
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def available(self):
"""Return true if device is available."""
return self._available
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
def update(self):
"""Get the latest data from Deluge and updates the state."""
try:
self.data = self.client.call(
"core.get_session_status",
[
"upload_rate",
"download_rate",
"dht_upload_rate",
"dht_download_rate",
],
)
self._available = True
except FailedToReconnectException:
_LOGGER.error("Connection to Deluge Daemon Lost")
self._available = False
return
upload = self.data[b"upload_rate"] - self.data[b"dht_upload_rate"]
download = self.data[b"download_rate"] - self.data[b"dht_download_rate"]
if self.type == "current_status":
if self.data:
if upload > 0 and download > 0:
self._state = "Up/Down"
elif upload > 0 and download == 0:
self._state = "Seeding"
elif upload == 0 and download > 0:
self._state = "Downloading"
else:
self._state = STATE_IDLE
else:
self._state = None
if self.data:
if self.type == "download_speed":
kb_spd = float(download)
kb_spd = kb_spd / 1024
self._state = round(kb_spd, 2 if kb_spd < 0.1 else 1)
elif self.type == "upload_speed":
kb_spd = float(upload)
kb_spd = kb_spd / 1024
self._state = round(kb_spd, 2 if kb_spd < 0.1 else 1)
| 31.422819 | 80 | 0.618966 |
4a24ea186e9967618d4d5543ceeb82aa5e761f9f | 2,265 | py | Python | ginga/misc/Future.py | kyraikeda/ginga | e0ce979de4a87e12ba7a90eec0517a0be05d14bc | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 76 | 2015-01-05T14:46:14.000Z | 2022-03-23T04:10:54.000Z | ginga/misc/Future.py | kyraikeda/ginga | e0ce979de4a87e12ba7a90eec0517a0be05d14bc | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 858 | 2015-01-17T01:55:12.000Z | 2022-03-08T20:20:31.000Z | ginga/misc/Future.py | kyraikeda/ginga | e0ce979de4a87e12ba7a90eec0517a0be05d14bc | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 60 | 2015-01-14T21:59:07.000Z | 2022-02-13T03:38:49.000Z | #
# Future.py -- implementation of a computation placeholder object
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
# TODO: python finally has something like this. Use it instead?
#
import threading
from . import Callback
class TimeoutError(Exception):
pass
class Future(Callback.Callbacks):
def __init__(self, data=None, priority=0):
Callback.Callbacks.__init__(self)
self.evt = threading.Event()
self.res = None
# User can attach some arbitrary data if desired
self.data = data
self.priority = priority
self.enable_callback('resolved')
# for sorting in PriorityQueues
def __lt__(self, other):
return self.priority < other.priority
def get_data(self):
return self.data
# TODO: Could add some args type/value, return value validation here
def freeze(self, method, *args, **kwdargs):
self.method = method
self.args = args
self.kwdargs = kwdargs
def thaw(self, suppress_exception=True):
self.evt.clear()
if not suppress_exception:
res = self.method(*self.args, **self.kwdargs)
else:
try:
res = self.method(*self.args, **self.kwdargs)
except Exception as e:
res = e
self.resolve(res)
return res
def has_value(self):
return self.evt.is_set()
def resolve(self, value):
self.res = value
self.evt.set()
# TODO: need to change callbacks on some custom plugins first
#self.make_callback('resolved', value)
self.make_callback('resolved')
def get_value(self, block=True, timeout=None, suppress_exception=False):
if block:
self.evt.wait(timeout=timeout)
if not self.has_value():
raise TimeoutError("Timed out waiting for value!")
if isinstance(self.res, Exception) and (not suppress_exception):
raise self.res
return self.res
def wait(self, timeout=None):
self.evt.wait(timeout=timeout)
if not self.has_value():
raise TimeoutError("Timed out waiting for value!")
return self.res
# END
| 25.738636 | 76 | 0.623841 |
4a24ebc9ca59f2c6871eceff2454a8931d5214f2 | 15,147 | py | Python | lib/datasets/json_dataset_evaluator.py | hp-zhengzy/detectron | b6cc9e2d243f43dacfa7329fb8920076727d9ece | [
"Apache-2.0"
] | null | null | null | lib/datasets/json_dataset_evaluator.py | hp-zhengzy/detectron | b6cc9e2d243f43dacfa7329fb8920076727d9ece | [
"Apache-2.0"
] | null | null | null | lib/datasets/json_dataset_evaluator.py | hp-zhengzy/detectron | b6cc9e2d243f43dacfa7329fb8920076727d9ece | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Functions for evaluating results computed for a json dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import logging
import numpy as np
import os
import uuid
from pycocotools.cocoeval import COCOeval
from core.config import cfg
from utils.io import save_object
import utils.boxes as box_utils
logger = logging.getLogger(__name__)
def evaluate_masks(
json_dataset,
all_boxes,
all_segms,
output_dir,
use_salt=True,
cleanup=False
):
res_file = os.path.join(
output_dir, 'segmentations_' + json_dataset.name + '_results'
)
if use_salt:
res_file += '_{}'.format(str(uuid.uuid4()))
res_file += '.json'
_write_coco_segms_results_file(
json_dataset, all_boxes, all_segms, res_file)
# Only do evaluation on non-test sets (annotations are undisclosed on test)
if json_dataset.name.find('test') == -1:
coco_eval = _do_segmentation_eval(json_dataset, res_file, output_dir)
else:
coco_eval = None
# Optionally cleanup results json file
if cleanup:
os.remove(res_file)
return coco_eval
def _write_coco_segms_results_file(
json_dataset, all_boxes, all_segms, res_file
):
# [{"image_id": 42,
# "category_id": 18,
# "segmentation": [...],
# "score": 0.236}, ...]
results = []
for cls_ind, cls in enumerate(json_dataset.classes):
if cls == '__background__':
continue
if cls_ind >= len(all_boxes):
break
cat_id = json_dataset.category_to_id_map[cls]
results.extend(_coco_segms_results_one_category(
json_dataset, all_boxes[cls_ind], all_segms[cls_ind], cat_id))
print(results)
logger.info(
'Writing segmentation results json to: {}'.format(
os.path.abspath(res_file)))
with open(res_file, 'w') as fid:
json.dump(results, fid)
def _coco_segms_results_one_category(json_dataset, boxes, segms, cat_id):
results = []
image_ids = json_dataset.COCO.getImgIds()
image_ids.sort()
assert len(boxes) == len(image_ids)
assert len(segms) == len(image_ids)
for i, image_id in enumerate(image_ids):
dets = boxes[i]
rles = segms[i]
if isinstance(dets, list) and len(dets) == 0:
continue
dets = dets.astype(np.float)
scores = dets[:, -1]
results.extend(
[{'image_id': image_id,
'category_id': cat_id,
'segmentation': rles[k],
'score': scores[k]}
for k in range(dets.shape[0])])
return results
def _do_segmentation_eval(json_dataset, res_file, output_dir):
coco_dt = json_dataset.COCO.loadRes(str(res_file))
coco_eval = COCOeval(json_dataset.COCO, coco_dt, 'segm')
coco_eval.evaluate()
coco_eval.accumulate()
_log_detection_eval_metrics(json_dataset, coco_eval)
eval_file = os.path.join(output_dir, 'segmentation_results.pkl')
save_object(coco_eval, eval_file)
logger.info('Wrote json eval results to: {}'.format(eval_file))
return coco_eval
def evaluate_boxes(
json_dataset, all_boxes, output_dir, use_salt=True, cleanup=False
):
res_file = os.path.join(
output_dir, 'bbox_' + json_dataset.name + '_results'
)
if use_salt:
res_file += '_{}'.format(str(uuid.uuid4()))
res_file += '.json'
_write_coco_bbox_results_file(json_dataset, all_boxes, res_file)
# Only do evaluation on non-test sets (annotations are undisclosed on test)
if json_dataset.name.find('test') == -1:
coco_eval = _do_detection_eval(json_dataset, res_file, output_dir)
else:
coco_eval = None
# Optionally cleanup results json file
if cleanup:
os.remove(res_file)
return coco_eval
def _write_coco_bbox_results_file(json_dataset, all_boxes, res_file):
# [{"image_id": 42,
# "category_id": 18,
# "bbox": [258.15,41.29,348.26,243.78],
# "score": 0.236}, ...]
results = []
for cls_ind, cls in enumerate(json_dataset.classes):
if cls == '__background__':
continue
if cls_ind >= len(all_boxes):
break
cat_id = json_dataset.category_to_id_map[cls]
results.extend(_coco_bbox_results_one_category(
json_dataset, all_boxes[cls_ind], cat_id))
logger.info(
'Writing bbox results json to: {}'.format(os.path.abspath(res_file)))
with open(res_file, 'w') as fid:
json.dump(results, fid)
def _coco_bbox_results_one_category(json_dataset, boxes, cat_id):
results = []
image_ids = json_dataset.COCO.getImgIds()
image_ids.sort()
assert len(boxes) == len(image_ids)
for i, image_id in enumerate(image_ids):
dets = boxes[i]
if isinstance(dets, list) and len(dets) == 0:
continue
dets = dets.astype(np.float)
scores = dets[:, -1]
xywh_dets = box_utils.xyxy_to_xywh(dets[:, 0:4])
xs = xywh_dets[:, 0]
ys = xywh_dets[:, 1]
ws = xywh_dets[:, 2]
hs = xywh_dets[:, 3]
results.extend(
[{'image_id': image_id,
'category_id': cat_id,
'bbox': [xs[k], ys[k], ws[k], hs[k]],
'score': scores[k]} for k in range(dets.shape[0])])
return results
def _do_detection_eval(json_dataset, res_file, output_dir):
coco_dt = json_dataset.COCO.loadRes(str(res_file))
coco_eval = COCOeval(json_dataset.COCO, coco_dt, 'bbox')
coco_eval.evaluate()
coco_eval.accumulate()
_log_detection_eval_metrics(json_dataset, coco_eval)
eval_file = os.path.join(output_dir, 'detection_results.pkl')
save_object(coco_eval, eval_file)
logger.info('Wrote json eval results to: {}'.format(eval_file))
return coco_eval
def _log_detection_eval_metrics(json_dataset, coco_eval):
def _get_thr_ind(coco_eval, thr):
ind = np.where((coco_eval.params.iouThrs > thr - 1e-5) &
(coco_eval.params.iouThrs < thr + 1e-5))[0][0]
iou_thr = coco_eval.params.iouThrs[ind]
assert np.isclose(iou_thr, thr)
return ind
IoU_lo_thresh = 0.5
IoU_hi_thresh = 0.95
ind_lo = _get_thr_ind(coco_eval, IoU_lo_thresh)
ind_hi = _get_thr_ind(coco_eval, IoU_hi_thresh)
# precision has dims (iou, recall, cls, area range, max dets)
# area range index 0: all area ranges
# max dets index 2: 100 per image
precision = coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, :, 0, 2]
ap_default = np.mean(precision[precision > -1])
logger.info(
'~~~~ Mean and per-category AP @ IoU=[{:.2f},{:.2f}] ~~~~'.format(
IoU_lo_thresh, IoU_hi_thresh))
logger.info('{:.1f}'.format(100 * ap_default))
for cls_ind, cls in enumerate(json_dataset.classes):
if cls == '__background__':
continue
# minus 1 because of __background__
precision = coco_eval.eval['precision'][
ind_lo:(ind_hi + 1), :, cls_ind - 1, 0, 2]
ap = np.mean(precision[precision > -1])
logger.info('{:.1f}'.format(100 * ap))
logger.info('~~~~ Summary metrics ~~~~')
coco_eval.summarize()
def evaluate_box_proposals(
json_dataset, roidb, thresholds=None, area='all', limit=None
):
"""Evaluate detection proposal recall metrics. This function is a much
faster alternative to the official COCO API recall evaluation code. However,
it produces slightly different results.
"""
# Record max overlap value for each gt box
# Return vector of overlap values
areas = {
'all': 0,
'small': 1,
'medium': 2,
'large': 3,
'96-128': 4,
'128-256': 5,
'256-512': 6,
'512-inf': 7}
area_ranges = [
[0**2, 1e5**2], # all
[0**2, 32**2], # small
[32**2, 96**2], # medium
[96**2, 1e5**2], # large
[96**2, 128**2], # 96-128
[128**2, 256**2], # 128-256
[256**2, 512**2], # 256-512
[512**2, 1e5**2]] # 512-inf
assert area in areas, 'Unknown area range: {}'.format(area)
area_range = area_ranges[areas[area]]
gt_overlaps = np.zeros(0)
num_pos = 0
for entry in roidb:
gt_inds = np.where(
(entry['gt_classes'] > 0) & (entry['is_crowd'] == 0))[0]
gt_boxes = entry['boxes'][gt_inds, :]
gt_areas = entry['seg_areas'][gt_inds]
valid_gt_inds = np.where(
(gt_areas >= area_range[0]) & (gt_areas <= area_range[1]))[0]
gt_boxes = gt_boxes[valid_gt_inds, :]
num_pos += len(valid_gt_inds)
non_gt_inds = np.where(entry['gt_classes'] == 0)[0]
boxes = entry['boxes'][non_gt_inds, :]
if boxes.shape[0] == 0:
continue
if limit is not None and boxes.shape[0] > limit:
boxes = boxes[:limit, :]
overlaps = box_utils.bbox_overlaps(
boxes.astype(dtype=np.float32, copy=False),
gt_boxes.astype(dtype=np.float32, copy=False))
_gt_overlaps = np.zeros((gt_boxes.shape[0]))
for j in range(min(boxes.shape[0], gt_boxes.shape[0])):
# find which proposal box maximally covers each gt box
argmax_overlaps = overlaps.argmax(axis=0)
# and get the iou amount of coverage for each gt box
max_overlaps = overlaps.max(axis=0)
# find which gt box is 'best' covered (i.e. 'best' = most iou)
gt_ind = max_overlaps.argmax()
gt_ovr = max_overlaps.max()
assert gt_ovr >= 0
# find the proposal box that covers the best covered gt box
box_ind = argmax_overlaps[gt_ind]
# record the iou coverage of this gt box
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert _gt_overlaps[j] == gt_ovr
# mark the proposal box and the gt box as used
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
# append recorded iou coverage level
gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps))
gt_overlaps = np.sort(gt_overlaps)
if thresholds is None:
step = 0.05
thresholds = np.arange(0.5, 0.95 + 1e-5, step)
recalls = np.zeros_like(thresholds)
# compute recall for each iou threshold
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).sum() / float(num_pos)
# ar = 2 * np.trapz(recalls, thresholds)
ar = recalls.mean()
return {'ar': ar, 'recalls': recalls, 'thresholds': thresholds,
'gt_overlaps': gt_overlaps, 'num_pos': num_pos}
def evaluate_keypoints(
json_dataset,
all_boxes,
all_keypoints,
output_dir,
use_salt=True,
cleanup=False
):
res_file = os.path.join(
output_dir, 'keypoints_' + json_dataset.name + '_results'
)
if use_salt:
res_file += '_{}'.format(str(uuid.uuid4()))
res_file += '.json'
_write_coco_keypoint_results_file(
json_dataset, all_boxes, all_keypoints, res_file)
# Only do evaluation on non-test sets (annotations are undisclosed on test)
if json_dataset.name.find('test') == -1:
coco_eval = _do_keypoint_eval(json_dataset, res_file, output_dir)
else:
coco_eval = None
# Optionally cleanup results json file
if cleanup:
os.remove(res_file)
return coco_eval
def _write_coco_keypoint_results_file(
json_dataset, all_boxes, all_keypoints, res_file
):
results = []
for cls_ind, cls in enumerate(json_dataset.classes):
if cls == '__background__':
continue
if cls_ind >= len(all_keypoints):
break
logger.info(
'Collecting {} results ({:d}/{:d})'.format(
cls, cls_ind, len(all_keypoints) - 1))
cat_id = json_dataset.category_to_id_map[cls]
results.extend(_coco_kp_results_one_category(
json_dataset, all_boxes[cls_ind], all_keypoints[cls_ind], cat_id))
logger.info(
'Writing keypoint results json to: {}'.format(
os.path.abspath(res_file)))
with open(res_file, 'w') as fid:
json.dump(results, fid)
def _coco_kp_results_one_category(json_dataset, boxes, kps, cat_id):
results = []
image_ids = json_dataset.COCO.getImgIds()
image_ids.sort()
assert len(kps) == len(image_ids)
assert len(boxes) == len(image_ids)
use_box_score = False
if cfg.KRCNN.KEYPOINT_CONFIDENCE == 'logit':
# This is ugly; see utils.keypoints.heatmap_to_keypoints for the magic
# indexes
score_index = 2
elif cfg.KRCNN.KEYPOINT_CONFIDENCE == 'prob':
score_index = 3
elif cfg.KRCNN.KEYPOINT_CONFIDENCE == 'bbox':
use_box_score = True
else:
raise ValueError(
'KRCNN.KEYPOINT_CONFIDENCE must be "logit", "prob", or "bbox"')
for i, image_id in enumerate(image_ids):
if len(boxes[i]) == 0:
continue
kps_dets = kps[i]
scores = boxes[i][:, -1].astype(np.float)
if len(kps_dets) == 0:
continue
for j in range(len(kps_dets)):
xy = []
kps_score = 0
for k in range(kps_dets[j].shape[1]):
xy.append(float(kps_dets[j][0, k]))
xy.append(float(kps_dets[j][1, k]))
xy.append(1)
if not use_box_score:
kps_score += kps_dets[j][score_index, k]
if use_box_score:
kps_score = scores[j]
else:
kps_score /= kps_dets[j].shape[1]
results.extend([{'image_id': image_id,
'category_id': cat_id,
'keypoints': xy,
'score': kps_score}])
return results
def _do_keypoint_eval(json_dataset, res_file, output_dir):
ann_type = 'keypoints'
imgIds = json_dataset.COCO.getImgIds()
imgIds.sort()
coco_dt = json_dataset.COCO.loadRes(res_file)
coco_eval = COCOeval(json_dataset.COCO, coco_dt, ann_type)
coco_eval.params.imgIds = imgIds
coco_eval.evaluate()
coco_eval.accumulate()
eval_file = os.path.join(output_dir, 'keypoint_results.pkl')
save_object(coco_eval, eval_file)
logger.info('Wrote json eval results to: {}'.format(eval_file))
coco_eval.summarize()
return coco_eval
| 35.0625 | 80 | 0.618076 |
4a24eea48c207ee6db0142a26cbf6b883121d36b | 4,770 | py | Python | tests.py | nicorellius/password-generator | 15fff8583f070f1d5ef41524f8776c8c97bf99db | [
"MIT"
] | 1 | 2017-06-11T14:29:54.000Z | 2017-06-11T14:29:54.000Z | tests.py | nicorellius/password-generator | 15fff8583f070f1d5ef41524f8776c8c97bf99db | [
"MIT"
] | null | null | null | tests.py | nicorellius/password-generator | 15fff8583f070f1d5ef41524f8776c8c97bf99db | [
"MIT"
] | null | null | null | """
Write some tests!
"""
import pytest
from click.testing import CliRunner
from scripts.generate import generate_secret
from scripts.generate import (
_validate_count, _roll_dice, _concatenate_remainder,
_prepare_chunks, _chunks
)
import config
from utils import get_roc
# All tests use these...
runner = CliRunner()
roc = get_roc(config.API_KEY)
chars = config.CHARACTERS
# ========== simple add function for sanity check ==========
def add_function(x):
return x + 1
def test_add_function_pass():
assert add_function(4) == 5
# ==========================================================
def test__concatenate_remainder_default():
tmp_pw = _concatenate_remainder(roc, chars, 20)
assert tmp_pw is not None
assert len(tmp_pw) == 20
def test__concatenate_remainder_thirty_chars():
tmp_pw = _concatenate_remainder(roc, chars, 30)
assert tmp_pw is not None
assert len(tmp_pw) == 30
def test__generate_secret_mixed_default():
result = runner.invoke(generate_secret, ['mixed'])
tmp_pw = result.output
assert result.exit_code == 0
assert tmp_pw is not None
assert len(tmp_pw) == 40
assert len(tmp_pw.split()) == 4
def test__generate_secret_numbers_default():
result = runner.invoke(generate_secret, ['numbers'])
tmp_pw = result.output
assert result.exit_code == 0
assert tmp_pw is not None
assert len(tmp_pw) == 40
assert len(tmp_pw.split()) == 4
def test__generate_secret_default():
result = runner.invoke(generate_secret, ['words'])
assert result.exit_code == 0
assert result.output is not None
assert len(result.output.split()) == 8
def test__generate_secret_short_list_four_words():
result = runner.invoke(generate_secret, ['words',
'--how-many', 1,
'--number-rolls', 4,
'--number-dice', 4])
assert result.exit_code == 0
assert result.output is not None
assert len(result.output.split()) == 7
def test__generate_secret_long_list_five_words():
result = runner.invoke(generate_secret, ['words',
'--how-many', 1,
'--number-rolls', 5,
'--number-dice', 5])
assert result.exit_code == 0
assert result.output is not None
assert len(result.output.split()) == 8
def test__generate_secret_short_list_five_words():
result = runner.invoke(generate_secret, ['words',
'--how-many', 1,
'--number-rolls', 4,
'--number-dice', 5])
assert result.exit_code == 0
assert result.output is not None
assert len(result.output.split()) == 7
def test__generate_secret_long_list_four_words():
result = runner.invoke(generate_secret, ['words',
'--how-many', 1,
'--number-rolls', 5,
'--number-dice', 4])
assert result.exit_code == 0
assert result.output is not None
assert len(result.output.split()) == 8
def test__roll_dice_is_list():
r5 = _roll_dice()
r4 = _roll_dice()
# Test if roll result type is a list
assert type(r5) is list
assert type(r4) is list
# Test for emptiness of various lists
assert not [] is True # This one is weird and confusing
assert not []
assert [1, 2, 3, 4, 5]
assert r5, r4
# @pytest.mark.parametrize('execution_number', range(10))
# def test__roll_dice(execution_number):
#
# r = _roll_dice()
# total = sum(r)
#
# assert total >= 25
#
# # This test will fail ~7% of the time, so it's considered brittle
# for i in {1, 2, 3, 4, 5, 6}:
# assert i in r
def test__roll_dice():
r = _roll_dice()
total = sum(r)
assert total >= 25
assert 1 or 2 or 3 or 4 or 5 or 6 in r
def test__chunks():
inlist = [1, 2, 3, 4, 5]
results = _chunks(inlist, 1)
assert results is not None
def test__prepare_chunks_four():
result = _prepare_chunks(4, 4)
for i in result:
assert len(i) == 4
def test__prepare_chunks_five():
result = _prepare_chunks(5, 5)
for i in result:
assert len(i) == 5
def test__validate_count():
v1 = 4
v2 = 5
c1 = _validate_count(v1)
c2 = _validate_count(v2)
assert c1 == 4
assert c2 == 5
def test__validate_count_throws_correct_exception():
with pytest.raises(Exception):
v3 = 6
_validate_count(v3)
| 23.15534 | 71 | 0.578826 |
4a24eee501abc6d0e1697fdbaf9bb26cb1d15319 | 5,420 | py | Python | tensorflow/contrib/model_pruning/python/layers/layers_test.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | tensorflow/contrib/model_pruning/python/layers/layers_test.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | tensorflow/contrib/model_pruning/python/layers/layers_test.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for imagingvision.intelligence.tensorflow.model_pruning.layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.model_pruning.python.layers import core_layers
from tensorflow.contrib.model_pruning.python.layers import layers
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class MaskedConvolutionLayerTest(test.TestCase):
def setUp(self):
super(MaskedConvolutionLayerTest, self).setUp()
self.height, self.width = 7, 9
def testInvalidRank3(self):
input_tensor = array_ops.ones((self.height, self.width, 3))
with self.assertRaisesRegexp(ValueError, 'rank'):
layers.masked_conv2d(input_tensor, 32, 3)
def testInvalidRank5(self):
input_tensor = array_ops.ones((8, 8, self.height, self.width, 3))
with self.assertRaisesRegexp(ValueError, 'rank'):
layers.masked_conv2d(input_tensor, 32, 3)
def testSingleConvMaskAdded(self):
kernel_size = 3
input_depth, output_depth = 8, 32
input_tensor = array_ops.ones((8, self.height, self.width, input_depth))
layers.masked_conv2d(input_tensor, output_depth, kernel_size)
masks = ops.get_collection(core_layers.MASK_COLLECTION)
self.assertEqual(len(masks), 1)
self.assertListEqual(masks[0].get_shape().as_list(),
[kernel_size, kernel_size, input_depth, output_depth])
masked_weight = ops.get_collection(core_layers.MASKED_WEIGHT_COLLECTION)
self.assertEqual(len(masked_weight), 1)
self.assertListEqual(masked_weight[0].get_shape().as_list(),
[kernel_size, kernel_size, input_depth, output_depth])
def testMultipleConvMaskAdded(self):
number_of_layers = 5
kernel_size = 3
base_depth = 4
depth_step = 7
input_tensor = array_ops.ones((8, self.height, self.width, base_depth))
top_layer = input_tensor
for ix in range(number_of_layers):
top_layer = layers.masked_conv2d(top_layer, base_depth +
(ix + 1) * depth_step, kernel_size)
masks = ops.get_collection(core_layers.MASK_COLLECTION)
self.assertEqual(len(masks), number_of_layers)
for ix in range(number_of_layers):
self.assertListEqual(masks[ix].get_shape().as_list(), [
kernel_size, kernel_size, base_depth + ix * depth_step,
base_depth + (ix + 1) * depth_step
])
masked_weight = ops.get_collection(core_layers.MASKED_WEIGHT_COLLECTION)
self.assertEqual(len(masked_weight), number_of_layers)
for ix in range(number_of_layers):
self.assertListEqual(masked_weight[ix].get_shape().as_list(), [
kernel_size, kernel_size, base_depth + ix * depth_step,
base_depth + (ix + 1) * depth_step
])
class MaskedFullyConnectedLayerTest(test.TestCase):
def testSingleFCMaskAdded(self):
input_depth, output_depth = 8, 32
input_tensor = array_ops.ones((5, input_depth))
layers.masked_fully_connected(input_tensor, output_depth)
masks = ops.get_collection(core_layers.MASK_COLLECTION)
self.assertEqual(len(masks), 1)
self.assertListEqual(masks[0].get_shape().as_list(),
[input_depth, output_depth])
masked_weight = ops.get_collection(core_layers.MASKED_WEIGHT_COLLECTION)
self.assertEqual(len(masked_weight), 1)
self.assertListEqual(masked_weight[0].get_shape().as_list(),
[input_depth, output_depth])
def testMultipleConvMaskAdded(self):
number_of_layers = 5
base_depth = 4
depth_step = 7
input_tensor = array_ops.ones((8, base_depth))
top_layer = input_tensor
for ix in range(number_of_layers):
top_layer = layers.masked_fully_connected(top_layer, base_depth +
(ix + 1) * depth_step)
masks = ops.get_collection(core_layers.MASK_COLLECTION)
self.assertEqual(len(masks), number_of_layers)
for ix in range(number_of_layers):
self.assertListEqual(masks[ix].get_shape().as_list(), [
base_depth + ix * depth_step, base_depth + (ix + 1) * depth_step
])
masked_weight = ops.get_collection(core_layers.MASKED_WEIGHT_COLLECTION)
self.assertEqual(len(masked_weight), number_of_layers)
for ix in range(number_of_layers):
self.assertListEqual(masked_weight[ix].get_shape().as_list(), [
base_depth + ix * depth_step, base_depth + (ix + 1) * depth_step
])
if __name__ == '__main__':
test.main()
| 38.714286 | 81 | 0.684502 |
4a24ef2e858b4c420e0ab165bdcf7645e7a1697f | 2,175 | py | Python | tests/entry_point_test.py | elias-1/NiftyNet | 05cd2ffbff5043d9a40b524a6d72f6bd5cd072d2 | [
"Apache-2.0"
] | 1 | 2018-02-24T12:43:53.000Z | 2018-02-24T12:43:53.000Z | tests/entry_point_test.py | elias-1/NiftyNet | 05cd2ffbff5043d9a40b524a6d72f6bd5cd072d2 | [
"Apache-2.0"
] | null | null | null | tests/entry_point_test.py | elias-1/NiftyNet | 05cd2ffbff5043d9a40b524a6d72f6bd5cd072d2 | [
"Apache-2.0"
] | 1 | 2018-02-24T12:43:40.000Z | 2018-02-24T12:43:40.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import os
import sys
import tensorflow as tf
import net_autoencoder
import net_gan
import net_regress
import net_run
import net_segment
class EntryPointTest(tf.test.TestCase):
def test_wrong_app(self):
sys.argv = ['', 'train',
'-a', 'foo',
'-c', os.path.join('config', 'default_segmentation.ini')]
with self.assertRaisesRegexp(ValueError, 'module'):
net_run.main()
sys.argv = ['', 'train',
'-c', os.path.join('config', 'default_segmentation.ini')]
with self.assertRaisesRegexp(ValueError, 'module'):
net_run.main()
def test_wrong_config(self):
sys.argv = ['', 'train',
'-a', 'net_segment',
'-c', os.path.join('foo', 'default_segmentation.ini')]
with self.assertRaisesRegexp(IOError, ''):
net_run.main()
sys.argv = ['', 'train',
'-a', 'net_segment']
with self.assertRaisesRegexp(IOError, ''):
net_run.main()
def test_no_action(self):
sys.argv = ['',
'-a', 'net_segment',
'-c', os.path.join('config', 'default_segmentation.ini')]
with self.assertRaisesRegexp(SystemExit, ''):
net_run.main()
def test_wrong_param(self):
sys.argv = ['',
'-a', 'net_segment',
'-c', os.path.join('config', 'default_segmentation.ini'),
'--foo=bar']
with self.assertRaisesRegexp(SystemExit, ''):
net_run.main()
def test_empty(self):
with self.assertRaisesRegexp(SystemExit, ''):
net_run.main()
with self.assertRaisesRegexp(SystemExit, ''):
net_gan.main()
with self.assertRaisesRegexp(SystemExit, ''):
net_segment.main()
with self.assertRaisesRegexp(SystemExit, ''):
net_regress.main()
with self.assertRaisesRegexp(SystemExit, ''):
net_autoencoder.main()
if __name__ == "__main__":
tf.test.main()
| 30.633803 | 77 | 0.551724 |
4a24f0c63e218f563222e8f8a9b5c7688f728ba8 | 1,209 | py | Python | code/run_models.py | mcallaghan/cc-topography | 0a48cc7ef19013076aceba7195a9375e499cbf6e | [
"MIT"
] | 5 | 2020-01-28T13:09:21.000Z | 2022-01-12T15:43:02.000Z | code/run_models.py | xueke-li/cc-topography | 0a48cc7ef19013076aceba7195a9375e499cbf6e | [
"MIT"
] | null | null | null | code/run_models.py | xueke-li/cc-topography | 0a48cc7ef19013076aceba7195a9375e499cbf6e | [
"MIT"
] | 7 | 2020-01-27T17:43:33.000Z | 2022-02-20T11:46:16.000Z | import sys, os, django
sys.path.append('/home/galm/software/django/tmv/BasicBrowser/')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "BasicBrowser.settings")
django.setup()
from scoping.models import *
from tmv_app.models import *
from tmv_app.tasks import *
sw = set([])
sw.add('elsevier')
sw.add('rights')
sw.add('reserved')
sw.add('john')
sw.add('wiley')
sw.add('sons')
sw.add('copyright')
q = Query.objects.get(pk=6187)
for m in ["NM","LD"]:
for a in [0.01,0.05,0.1]:
for k in [80,90,100,110,120,130,140,150]:
if m=="LD":
alpha=a*10
else:
alpha=a
try:
stat, created = RunStats.objects.get_or_create(
K=k,
alpha=alpha,
fancy_tokenization=True,
max_df=0.9,
max_iter=500,
method=m,
min_freq=50,
ngram=1,
query=q,
)
except:
continue
if created or stat.status==0:
stat.extra_stopwords=list(sw)
stat.save()
do_nmf(stat.pk)
| 25.1875 | 72 | 0.490488 |
4a24f214d80f0b166d48e52b9f23a5187b33e525 | 228 | py | Python | accounts/apps.py | ccollado12/Price-Comparison-Group1-BE-1 | b257e963ce946303efe6a640fd6d23039d39e34f | [
"MIT"
] | 1 | 2021-11-09T11:20:45.000Z | 2021-11-09T11:20:45.000Z | accounts/apps.py | ccollado12/Price-Comparison-Group1-BE-1 | b257e963ce946303efe6a640fd6d23039d39e34f | [
"MIT"
] | 1 | 2021-07-22T02:30:03.000Z | 2021-07-22T02:30:03.000Z | accounts/apps.py | ccollado12/Price-Comparison-Group1-BE-1 | b257e963ce946303efe6a640fd6d23039d39e34f | [
"MIT"
] | 4 | 2021-07-15T14:35:47.000Z | 2021-08-14T18:07:14.000Z | from django.apps import AppConfig
class AccountsConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'accounts'
# Use created signals
def ready(self):
import accounts.signals
| 20.727273 | 56 | 0.714912 |
4a24f41c6440042630371ecb46583ce87fe572bc | 197 | py | Python | car/factories/brand.py | stefangeorg/town-car | 81ca81eed8912911527c3cb6dc521d000482b0fd | [
"Apache-2.0"
] | null | null | null | car/factories/brand.py | stefangeorg/town-car | 81ca81eed8912911527c3cb6dc521d000482b0fd | [
"Apache-2.0"
] | null | null | null | car/factories/brand.py | stefangeorg/town-car | 81ca81eed8912911527c3cb6dc521d000482b0fd | [
"Apache-2.0"
] | null | null | null | import factory
from car.models import Brand
class BrandFactory(factory.django.DjangoModelFactory):
name = factory.Sequence(lambda n: "Model %03d" % n)
class Meta:
model = Brand
| 17.909091 | 55 | 0.705584 |
4a24f4279472d48c30df8429861ca338b4fa36ca | 15,094 | py | Python | src/main/python/lib/default/reconnect.py | emilybache/texttest-runner | 2d5c42b8d37699a2cbcb8f19af7c271d6ad1024a | [
"MIT"
] | null | null | null | src/main/python/lib/default/reconnect.py | emilybache/texttest-runner | 2d5c42b8d37699a2cbcb8f19af7c271d6ad1024a | [
"MIT"
] | null | null | null | src/main/python/lib/default/reconnect.py | emilybache/texttest-runner | 2d5c42b8d37699a2cbcb8f19af7c271d6ad1024a | [
"MIT"
] | null | null | null |
import os, shutil, plugins, operator, logging, time, datetime
from glob import glob
from itertools import groupby
# Trawl around for a suitable dir to reconnect to if we haven't been told one
# A tangle of side-effects: we find the run directory when asked for the extra versions,
# (so we can provide further ones accordingly), find the application directory when asked to check sanity
# (so we can bail if it's not there) and store in self.reconnDir, ready to provide to the ReconnectTest action
class ReconnectConfig:
runDirCache = {}
datedVersions = set()
def __init__(self, optionMap):
self.fullRecalculate = optionMap.has_key("reconnfull")
self.diag = logging.getLogger("Reconnection")
self.reconnectTmpInfo = optionMap.get("reconnect")
self.reconnDir = None
self.errorMessage = ""
def getReconnectAction(self):
return ReconnectTest(self.reconnDir, self.fullRecalculate)
def cacheRunDir(self, app, runDir, version=""):
if version:
keys = [ app.fullName() + "." + version ]
else:
keys = [ app.fullName() ] + app.versions
for i in range(len(keys)):
subKey = ".".join(keys[:i+1])
if i == len(keys) - 1 or not self.runDirCache.has_key(subKey):
self.runDirCache[subKey] = runDir
self.diag.info("Caching " + subKey + " = " + runDir)
def findRunDir(self, app):
return self._findRunDir(repr(app))
def _findRunDir(self, searchKey):
self.diag.info("Searching for run directory for " + searchKey)
entry = self.runDirCache.get(searchKey)
if entry:
return entry
parts = searchKey.split(".")
if len(parts) > 1:
return self._findRunDir(".".join(parts[:-1]))
def getExtraVersions(self, app, givenExtras):
self.diag = logging.getLogger("Reconnection")
self.diag.info("Finding reconnect 'extra versions' for " + repr(app) + " given tmp info '" + repr(self.reconnectTmpInfo) + "'")
if self.reconnectTmpInfo and os.path.isdir(self.reconnectTmpInfo):
# See if this is an explicitly provided run directory
dirName = os.path.normpath(self.reconnectTmpInfo)
versionLists = self.getVersionListsTopDir(dirName)
self.diag.info("Directory has version lists " + repr(versionLists))
if versionLists is not None:
return self.getVersionsFromDirs(app, [ dirName ], givenExtras)
fetchDir = app.getPreviousWriteDirInfo(self.reconnectTmpInfo)
if not os.path.isdir(fetchDir):
if fetchDir == self.reconnectTmpInfo or not self.reconnectTmpInfo:
self.errorMessage = "Could not find TextTest temporary directory at " + fetchDir
else:
self.errorMessage = "Could not find TextTest temporary directory for " + \
self.reconnectTmpInfo + " at " + fetchDir
return []
self.diag.info("Looking for run directories under " + fetchDir)
runDirs = self.getReconnectRunDirs(app, fetchDir)
self.diag.info("Found run directories " + repr(runDirs))
if len(runDirs) == 0:
self.errorMessage = "Could not find any runs matching " + app.description() + " under " + fetchDir
return []
else:
return self.getVersionsFromDirs(app, runDirs, givenExtras)
def findAppDirUnder(self, app, runDir):
# Don't pay attention to dated versions here...
appVersions = frozenset(app.versions).difference(self.datedVersions)
self.diag.info("Looking for directory with versions " + repr(appVersions))
for f in os.listdir(runDir):
versionSet = self.getVersionSetSubDir(f, app.name)
if versionSet == appVersions:
return os.path.join(runDir, f)
def getReconnectRunDirs(self, app, fetchDir):
correctNames = sorted(os.listdir(fetchDir))
fullPaths = [ os.path.join(fetchDir, d) for d in correctNames ]
return filter(lambda d: self.isRunDirectoryFor(app, d), fullPaths)
def getFilter(self):
return ReconnectFilter(self.reconnDir)
@classmethod
def all_perms(cls, items):
# Lifted from a standard recipe
if len(items) <= 1:
yield items
else:
for perm in cls.all_perms(items[1:]):
for i in range(len(perm)+1):
yield perm[:i] + items[0:1] + perm[i:]
def versionSuffix(self, parts):
fullVersion = ".".join(parts)
if len(fullVersion) == 0:
return ""
return "." + fullVersion
def isRunDirectoryFor(self, app, d):
for permutation in self.all_perms(app.versions):
appDirRoot = os.path.join(d, app.name + self.versionSuffix(permutation))
if os.path.isdir(appDirRoot) or len(glob(appDirRoot + ".*")) > 0:
return True
return False
def getVersionListsTopDir(self, fileName):
# Show the framework how to find the version list given a file name
# If it doesn't match, return None
parts = os.path.basename(fileName).split(".")
if len(parts) > 2 and parts[0] != "static_gui":
# drop the run descriptor at the start and the date/time and pid at the end
versionParts = ".".join(parts[1:-2]).split("++")
return [ part.split(".") for part in versionParts ]
def getVersionListSubDir(self, fileName, stem):
# Show the framework how to find the version list given a file name
# If it doesn't match, return None
parts = fileName.split(".")
if stem == parts[0]:
# drop the application at the start
return parts[1:]
def getVersionSetSubDir(self, fileName, stem):
vlist = self.getVersionListSubDir(fileName, stem)
if vlist is not None:
return frozenset(vlist)
def getAllVersionLists(self, app, givenExtras, groupDirs):
vlists = []
for groupDir in groupDirs:
for path in os.listdir(groupDir):
fullPath = os.path.join(groupDir, path)
if os.path.isdir(fullPath):
vlist = self.getVersionListSubDir(path, app.name)
if vlist is None:
continue
self.diag.info("Found list " + repr(vlist))
if givenExtras:
vset = frozenset(vlist).difference(givenExtras)
vlist = filter(lambda v: v in vset, vlist)
if vlist not in vlists:
vlists.append(vlist)
return vlists
def expandExtraVersions(self, extras):
expanded = set()
for extra in extras:
expanded.add(extra)
expanded.update(extra.split("."))
return expanded
def getVersionsFromDirs(self, app, dirs, givenExtras):
versions = []
allGivenExtras = self.expandExtraVersions(givenExtras)
self.diag.info("Getting extra versions from directories, versions from config = " + repr(allGivenExtras))
appVersions = frozenset(app.versions)
for versionLists, groupDirIter in groupby(dirs, self.getVersionListsTopDir):
groupDirs = list(groupDirIter)
self.diag.info("Considering version lists " + repr(versionLists) + " with dirs " + repr(groupDirs))
for versionList in self.getAllVersionLists(app, allGivenExtras, groupDirs):
version = ".".join(versionList)
self.diag.info("Considering version list " + repr(versionList))
versionSet = frozenset(versionList)
if len(appVersions.difference(versionSet)) > 0:
continue # If the given version isn't included, ignore it
extraVersionSet = versionSet.difference(appVersions)
# Important to preserve the order of the versions as received
extraVersionList = filter(lambda v: v in extraVersionSet, versionList)
extraVersion = ".".join(extraVersionList)
if len(groupDirs) == 1:
if extraVersion:
versions.append(extraVersion)
self.cacheRunDir(app, groupDirs[0], version)
else:
self.cacheRunDir(app, groupDirs[0])
else:
datedVersionMap = {}
for dir in groupDirs:
datedVersionMap[os.path.basename(dir).split(".")[-2]] = dir
datedVersions = sorted(datedVersionMap.keys(), key=self.dateValue, reverse=True)
self.datedVersions.update(datedVersions)
self.diag.info("Found candidate dated versions: " + repr(datedVersions))
if not extraVersion: # one of them has to be the main version...
mainVersion = datedVersions.pop(0)
self.cacheRunDir(app, datedVersionMap.get(mainVersion))
for datedVersion in datedVersions:
dir = datedVersionMap.get(datedVersion)
if extraVersion:
versions.append(extraVersion + "." + datedVersion)
self.cacheRunDir(app, dir, version + "." + datedVersion)
else:
versions.append(datedVersion)
if version:
self.cacheRunDir(app, dir, version + "." + datedVersion)
else:
self.cacheRunDir(app, dir, datedVersion)
self.diag.info("Extra versions found as " + repr(versions))
return versions
@staticmethod
def dateValue(version):
yearlessDatetime = datetime.datetime.strptime(version, "%d%b%H%M%S")
now = datetime.datetime.now()
currYearDatetime = yearlessDatetime.replace(year=now.year)
if currYearDatetime > now:
return currYearDatetime.replace(year=now.year - 1)
else:
return currYearDatetime
def checkSanity(self, app):
if self.errorMessage: # We failed already, basically
raise plugins.TextTestError, self.errorMessage
runDir = self.findRunDir(app)
if not runDir:
raise plugins.TextTestWarning, "Could not find any runs matching " + app.description()
self.diag.info("Found run directory " + repr(runDir))
self.reconnDir = self.findAppDirUnder(app, runDir)
self.diag.info("Found application directory " + repr(self.reconnDir))
if not self.reconnDir:
raise plugins.TextTestWarning, "Could not find an application directory matching " + app.description() + \
" for the run directory found at " + runDir
for datedVersion in self.datedVersions:
app.addConfigEntry("unsaveable_version", datedVersion)
class ReconnectFilter(plugins.TextFilter):
def __init__(self, rootDir):
self.rootDir = rootDir
def acceptsTestCase(self, test):
return os.path.exists(os.path.join(self.rootDir, test.getRelPath()))
def acceptsTestSuite(self, suite):
return os.path.exists(os.path.join(self.rootDir, suite.getRelPath()))
class ReconnectTest(plugins.Action):
def __init__(self, rootDirToCopy, fullRecalculate):
self.rootDirToCopy = rootDirToCopy
self.fullRecalculate = fullRecalculate
self.diag = logging.getLogger("Reconnection")
def __repr__(self):
return "Reconnecting to"
def __call__(self, test):
newState = self.getReconnectState(test)
self.describe(test, self.getStateText(newState))
if newState:
test.changeState(newState)
def getReconnectState(self, test):
reconnLocation = os.path.join(self.rootDirToCopy, test.getRelPath())
self.diag.info("Reconnecting to test at " + reconnLocation)
if os.path.isdir(reconnLocation):
return self.getReconnectStateFrom(test, reconnLocation)
else:
return plugins.Unrunnable(briefText="no results", \
freeText="No file found to load results from under " + reconnLocation)
def getStateText(self, state):
if state:
return " (state " + state.category + ")"
else:
return " (recomputing)"
def getReconnectStateFrom(self, test, location):
stateToUse = None
stateFile = os.path.join(location, "framework_tmp", "teststate")
if os.path.isfile(stateFile):
newTmpPath = os.path.dirname(self.rootDirToCopy)
loaded, newState = test.getNewState(open(stateFile, "rU"), updatePaths=True, newTmpPath=newTmpPath)
self.diag.info("Loaded state file at " + stateFile + " - " + repr(loaded))
if loaded and self.modifyState(test, newState): # if we can't read it, recompute it
stateToUse = newState
if self.fullRecalculate or not stateToUse:
self.copyFiles(test, location)
return stateToUse
def copyFiles(self, test, reconnLocation):
test.makeWriteDirectory()
tmpDir = test.getDirectory(temporary=1)
self.diag.info("Copying files from " + reconnLocation + " to " + tmpDir)
for file in os.listdir(reconnLocation):
fullPath = os.path.join(reconnLocation, file)
if os.path.isfile(fullPath):
targetPath = os.path.join(tmpDir, os.path.basename(fullPath))
try:
shutil.copyfile(fullPath, targetPath)
except EnvironmentError, e:
# File could not be copied, may not have been readable
# Write the exception to it instead
targetFile = open(targetPath, "w")
targetFile.write("Failed to copy file - exception info follows :\n" + str(e) + "\n")
targetFile.close()
def modifyState(self, test, newState):
if self.fullRecalculate:
# Only pick up errors here, recalculate the rest. Don't notify until
# we're done with recalculation.
if newState.hasResults():
# Also pick up execution machines, we can't get them otherwise...
test.state.executionHosts = newState.executionHosts
return False # don't actually change the state
else:
newState.lifecycleChange = "" # otherwise it's regarded as complete
return True
else:
return True
def setUpApplication(self, app):
plugins.log.info("Reconnecting to test results in directory " + self.rootDirToCopy)
def setUpSuite(self, suite):
self.describe(suite)
| 46.443077 | 135 | 0.59792 |
4a24f56e1fb3731a1be2f5e878c6b82a8cddc520 | 2,799 | py | Python | lale/lib/rasl/_folds_for_monoid.py | Ingkarat/lale | faded6f80790629cfe216f72f1ceb1fb4f6c70bb | [
"Apache-2.0"
] | null | null | null | lale/lib/rasl/_folds_for_monoid.py | Ingkarat/lale | faded6f80790629cfe216f72f1ceb1fb4f6c70bb | [
"Apache-2.0"
] | null | null | null | lale/lib/rasl/_folds_for_monoid.py | Ingkarat/lale | faded6f80790629cfe216f72f1ceb1fb4f6c70bb | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn.metrics
import sklearn.model_selection
from sklearn.utils.metaestimators import _safe_split
from lale.datasets.data_schemas import add_folds_for_monoid
class FoldsForMonoid:
def __init__(self, estimator, X, y, cv):
self.train_Xs = []
self.train_ys = []
self.test_Xs = []
self.test_ys = []
for split_id, (train, test) in enumerate(cv.split(X, y)):
train_X, train_y = _safe_split(estimator, X, y, train)
train_X = add_folds_for_monoid(train_X, (split_id, self))
train_y = add_folds_for_monoid(train_y, (split_id, self))
self.train_Xs.append(train_X)
self.train_ys.append(train_y)
test_X, test_y = _safe_split(estimator, X, y, test, train)
test_X = add_folds_for_monoid(test_X, (split_id, self))
test_y = add_folds_for_monoid(test_y, (split_id, self))
self.test_Xs.append(test_X)
self.test_ys.append(test_y)
self._split2op2lifted = [{} for _ in range(cv.get_n_splits())]
def get_lifted(self, split_id, op_id):
return self._split2op2lifted[split_id][op_id]
def has_lifted(self, split_id, op_id):
return op_id in self._split2op2lifted[split_id]
def set_lifted(self, split_id, op_id, lifted):
self._split2op2lifted[split_id][op_id] = lifted
def get_n_splits(self):
return len(self.train_Xs)
def cross_val_score_for_monoid(
estimator,
X,
y=None,
scoring=sklearn.metrics.accuracy_score,
cv=5,
return_estimators=False,
):
if isinstance(cv, int):
cv = sklearn.model_selection.StratifiedKFold(cv)
folds = FoldsForMonoid(estimator, X, y, cv)
estimators = []
cv_results = []
for i in range(folds.get_n_splits()):
trained_estimator = estimator.fit(folds.train_Xs[i], folds.train_ys[i])
if return_estimators:
estimators.append(trained_estimator)
predicted_values = trained_estimator.predict(folds.test_Xs[i])
scoring_result = scoring(folds.test_ys[i], predicted_values)
cv_results.append(scoring_result)
if return_estimators:
return estimators, cv_results
return cv_results
| 36.350649 | 79 | 0.689532 |
4a24f716918cb6215cfc9b7bd23b6225776eedac | 38 | py | Python | test/movie_test.py | cck325/free-style-project | f09073b0e15bad4e7326803e73306698e09e6fa8 | [
"MIT"
] | null | null | null | test/movie_test.py | cck325/free-style-project | f09073b0e15bad4e7326803e73306698e09e6fa8 | [
"MIT"
] | null | null | null | test/movie_test.py | cck325/free-style-project | f09073b0e15bad4e7326803e73306698e09e6fa8 | [
"MIT"
] | null | null | null | from move import now
def test_now()
| 7.6 | 20 | 0.736842 |
4a24f86cdc2e6e90fedd411d159a3556cef0d220 | 3,191 | py | Python | tests/contrib/decorators/test_decorators.py | yhzqb/kedro | 619d7f0ccb51895d3bb43d30e3dee9d4d0cebcab | [
"Apache-2.0"
] | 1 | 2021-11-19T05:36:47.000Z | 2021-11-19T05:36:47.000Z | tests/contrib/decorators/test_decorators.py | yhzqb/kedro | 619d7f0ccb51895d3bb43d30e3dee9d4d0cebcab | [
"Apache-2.0"
] | null | null | null | tests/contrib/decorators/test_decorators.py | yhzqb/kedro | 619d7f0ccb51895d3bb43d30e3dee9d4d0cebcab | [
"Apache-2.0"
] | 1 | 2021-11-19T05:36:49.000Z | 2021-11-19T05:36:49.000Z | # Copyright 2018-2019 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import pytest
from kedro.contrib.decorators import pandas_to_spark, retry, spark_to_pandas
from kedro.pipeline import node
@pytest.fixture()
def pandas_df():
return pd.DataFrame(
{
"Name": ["Alex", "Bob", "Clarke", "Dave"],
"Age": [31, 12, 65, 29],
"member": ["y", "n", "y", "n"],
}
)
@pytest.fixture()
def spark_df(pandas_df, spark_session):
return spark_session.createDataFrame(pandas_df)
@pytest.fixture()
def three_arg_node():
return node(
lambda arg1, arg2, arg3: [arg1, arg2, arg3],
["input1", "input2", "input3"],
["output1", "output2", "output3"],
)
@pytest.fixture()
def inputs(pandas_df, spark_df):
return {"input1": pandas_df, "input2": spark_df, "input3": pandas_df}
def test_pandas_to_spark(three_arg_node, spark_session, pandas_df, inputs):
res = three_arg_node.decorate(pandas_to_spark(spark_session)).run(inputs)
for output in ["output1", "output2", "output3"]:
assert res[output].toPandas().equals(pandas_df)
def test_spark_to_pandas(three_arg_node, pandas_df, inputs):
res = three_arg_node.decorate(spark_to_pandas()).run(inputs)
for output in ["output1", "output2", "output3"]:
assert res[output].equals(pandas_df)
def test_retry():
def _bigger(obj):
obj["value"] += 1
if obj["value"] >= 0:
return True
raise ValueError("Value less than 0")
decorated = node(_bigger, "in", "out").decorate(retry())
with pytest.raises(ValueError, match=r"Value less than 0"):
decorated.run({"in": {"value": -3}})
decorated2 = node(_bigger, "in", "out").decorate(retry(n_times=2))
assert decorated2.run({"in": {"value": -3}})
| 35.065934 | 77 | 0.699154 |
4a24f8cd6acb87c053dffb04f638b8ce5b85d92c | 7,334 | py | Python | tests/upgrades/test_upgrades.py | martriay/cairo-contracts | 6835b57a599f9dc1c2b9648a6ae4c6041bc4a5cc | [
"MIT"
] | 12 | 2022-03-23T18:47:13.000Z | 2022-03-26T15:14:36.000Z | tests/upgrades/test_upgrades.py | martriay/cairo-contracts | 6835b57a599f9dc1c2b9648a6ae4c6041bc4a5cc | [
"MIT"
] | null | null | null | tests/upgrades/test_upgrades.py | martriay/cairo-contracts | 6835b57a599f9dc1c2b9648a6ae4c6041bc4a5cc | [
"MIT"
] | null | null | null | import pytest
import asyncio
from starkware.starknet.testing.starknet import Starknet
from utils import (
Signer, assert_revert, assert_event_emitted, get_contract_def, cached_contract
)
# random value
VALUE_1 = 123
VALUE_2 = 987
signer = Signer(123456789987654321)
@pytest.fixture(scope='module')
def event_loop():
return asyncio.new_event_loop()
@pytest.fixture(scope='module')
def contract_defs():
account_def = get_contract_def('openzeppelin/account/Account.cairo')
v1_def = get_contract_def('tests/mocks/upgrades_v1_mock.cairo')
v2_def = get_contract_def('tests/mocks/upgrades_v2_mock.cairo')
proxy_def = get_contract_def('openzeppelin/upgrades/Proxy.cairo')
return account_def, v1_def, v2_def, proxy_def
@pytest.fixture(scope='module')
async def proxy_init(contract_defs):
account_def, dummy_v1_def, dummy_v2_def, proxy_def = contract_defs
starknet = await Starknet.empty()
account1 = await starknet.deploy(
contract_def=account_def,
constructor_calldata=[signer.public_key]
)
account2 = await starknet.deploy(
contract_def=account_def,
constructor_calldata=[signer.public_key]
)
v1 = await starknet.deploy(
contract_def=dummy_v1_def,
constructor_calldata=[]
)
v2 = await starknet.deploy(
contract_def=dummy_v2_def,
constructor_calldata=[]
)
proxy = await starknet.deploy(
contract_def=proxy_def,
constructor_calldata=[v1.contract_address]
)
return (
starknet.state,
account1,
account2,
v1,
v2,
proxy
)
@pytest.fixture
def proxy_factory(contract_defs, proxy_init):
account_def, dummy_v1_def, dummy_v2_def, proxy_def = contract_defs
state, account1, account2, v1, v2, proxy = proxy_init
_state = state.copy()
account1 = cached_contract(_state, account_def, account1)
account2 = cached_contract(_state, account_def, account2)
v1 = cached_contract(_state, dummy_v1_def, v1)
v2 = cached_contract(_state, dummy_v2_def, v2)
proxy = cached_contract(_state, proxy_def, proxy)
return account1, account2, v1, v2, proxy
@pytest.fixture
async def after_upgrade(proxy_factory):
admin, other, v1, v2, proxy = proxy_factory
# initialize
await signer.send_transaction(
admin, proxy.contract_address, 'initializer', [
admin.contract_address
]
)
# set value
await signer.send_transaction(
admin, proxy.contract_address, 'set_value_1', [
VALUE_1
]
)
# upgrade
await signer.send_transaction(
admin, proxy.contract_address, 'upgrade', [
v2.contract_address
]
)
return admin, other, v1, v2, proxy
@pytest.mark.asyncio
async def test_initializer(proxy_factory):
admin, _, _, _, proxy = proxy_factory
await signer.send_transaction(
admin, proxy.contract_address, 'initializer', [
admin.contract_address
]
)
@pytest.mark.asyncio
async def test_initializer_already_initialized(proxy_factory):
admin, _, _, _, proxy = proxy_factory
await signer.send_transaction(
admin, proxy.contract_address, 'initializer', [
admin.contract_address
]
)
await assert_revert(
signer.send_transaction(
admin, proxy.contract_address, 'initializer', [
admin.contract_address
]
),
reverted_with='Proxy: contract already initialized'
)
@pytest.mark.asyncio
async def test_upgrade(proxy_factory):
admin, _, _, v2, proxy = proxy_factory
# initialize implementation
await signer.send_transaction(
admin, proxy.contract_address, 'initializer', [
admin.contract_address
]
)
# set value
await signer.send_transaction(
admin, proxy.contract_address, 'set_value_1', [
VALUE_1
]
)
# check value
execution_info = await signer.send_transaction(
admin, proxy.contract_address, 'get_value_1', []
)
assert execution_info.result.response == [VALUE_1, ]
# upgrade
await signer.send_transaction(
admin, proxy.contract_address, 'upgrade', [
v2.contract_address
]
)
# check value
execution_info = await signer.send_transaction(
admin, proxy.contract_address, 'get_value_1', []
)
assert execution_info.result.response == [VALUE_1, ]
@pytest.mark.asyncio
async def test_upgrade_event(proxy_factory):
admin, _, _, v2, proxy = proxy_factory
# initialize implementation
await signer.send_transaction(
admin, proxy.contract_address, 'initializer', [
admin.contract_address
]
)
# upgrade
tx_exec_info = await signer.send_transaction(
admin, proxy.contract_address, 'upgrade', [
v2.contract_address
]
)
# check event
assert_event_emitted(
tx_exec_info,
from_address=proxy.contract_address,
name='Upgraded',
data=[
v2.contract_address
]
)
@pytest.mark.asyncio
async def test_upgrade_from_non_admin(proxy_factory):
admin, non_admin, _, v2, proxy = proxy_factory
# initialize implementation
await signer.send_transaction(
admin, proxy.contract_address, 'initializer', [
admin.contract_address
]
)
# upgrade should revert
await assert_revert(
signer.send_transaction(
non_admin, proxy.contract_address, 'upgrade', [
v2.contract_address
]
),
reverted_with="Proxy: caller is not admin"
)
# Using `after_upgrade` fixture henceforth
@pytest.mark.asyncio
async def test_implementation_v2(after_upgrade):
admin, _, _, v2, proxy = after_upgrade
# check implementation address
execution_info = await signer.send_transaction(
admin, proxy.contract_address, 'get_implementation', []
)
assert execution_info.result.response == [v2.contract_address]
# check admin
execution_info = await signer.send_transaction(
admin, proxy.contract_address, 'get_admin', []
)
assert execution_info.result.response == [admin.contract_address]
# check value
execution_info = await signer.send_transaction(
admin, proxy.contract_address, 'get_value_1', []
)
assert execution_info.result.response == [VALUE_1, ]
@pytest.mark.asyncio
async def test_set_admin(after_upgrade):
admin, new_admin, _, _, proxy = after_upgrade
# change admin
await signer.send_transaction(
admin, proxy.contract_address, 'set_admin', [
new_admin.contract_address
]
)
# check admin
execution_info = await signer.send_transaction(
admin, proxy.contract_address, 'get_admin', []
)
assert execution_info.result.response == [new_admin.contract_address]
@pytest.mark.asyncio
async def test_set_admin_from_non_admin(after_upgrade):
_, non_admin, _, _, proxy = after_upgrade
# change admin should revert
await assert_revert(
signer.send_transaction(
non_admin, proxy.contract_address, 'set_admin', [
non_admin.contract_address
]
)
)
| 26.099644 | 82 | 0.665394 |
4a24f9a1563d6252902ec3c44c24d90d680eed64 | 8,345 | py | Python | test/functional/feature_proxy.py | merelcoin/merelcoin | 088b1f9f653363238aafc41d4218a3cd0d7ad968 | [
"MIT"
] | null | null | null | test/functional/feature_proxy.py | merelcoin/merelcoin | 088b1f9f653363238aafc41d4218a3cd0d7ad968 | [
"MIT"
] | null | null | null | test/functional/feature_proxy.py | merelcoin/merelcoin | 088b1f9f653363238aafc41d4218a3cd0d7ad968 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test merelcoind with different proxy configuration.
Test plan:
- Start merelcoind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on merelcoind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create merelcoinds that connect to them
- Manipulate the merelcoinds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
"""
import socket
import os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
PORT_MIN,
PORT_RANGE,
assert_equal,
)
from test_framework.netutil import test_ipv6_local
RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports
class ProxyTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def setup_nodes(self):
self.have_ipv6 = test_ipv6_local()
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
self.log.warning("Testing without local IPv6 support")
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
args = [
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
[]
]
if self.have_ipv6:
args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
self.add_nodes(self.num_nodes, extra_args=args)
self.start_nodes()
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if self.have_ipv6:
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
# Test: outgoing onion connection through node
node.addnode("bitcoinostk4e4re.onion:9333", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"bitcoinostk4e4re.onion")
assert_equal(cmd.port, 9333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:9333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"node.noumenon")
assert_equal(cmd.port, 9333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), len(rv))
if self.have_ipv6:
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
# test RPC getnetworkinfo
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
if self.have_ipv6:
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
| 41.311881 | 121 | 0.626123 |
4a24fbe2dd700a9378c18f31d3c8ca69a55b7ca0 | 1,911 | py | Python | pdfmajor/interpreter/commands/state/PDFGraphicState/__init__.py | asosnovsky/pdfmajor | 7e24c64b5b4fdc84c12b2f78dcaab0e1aa07f4ad | [
"MIT"
] | 23 | 2019-01-13T23:32:24.000Z | 2021-07-08T04:29:15.000Z | pdfmajor/interpreter/commands/state/PDFGraphicState/__init__.py | asosnovsky/pdfmajor | 7e24c64b5b4fdc84c12b2f78dcaab0e1aa07f4ad | [
"MIT"
] | 3 | 2019-08-09T18:42:01.000Z | 2019-12-13T15:43:24.000Z | pdfmajor/interpreter/commands/state/PDFGraphicState/__init__.py | asosnovsky/pdfmajor | 7e24c64b5b4fdc84c12b2f78dcaab0e1aa07f4ad | [
"MIT"
] | 2 | 2020-01-09T11:18:20.000Z | 2020-03-24T06:02:30.000Z | from .PDFColor import PDFColor, PDFColorSpace
from .PDFColorSpace import PREDEFINED_COLORSPACE
class PDFGraphicState(object):
def __init__(self):
self.linewidth = 0
self.linecap = None
self.linejoin = None
self.miterlimit = None
self.dash = None
self.intent = None
self.flatness = None
self.scolor:PDFColor = PDFColor(None)
self.ncolor:PDFColor = PDFColor(None)
self.scolspace:PDFColorSpace = None
self.ncolspace:PDFColorSpace = None
def set_stroke_color(self, colspace: PDFColorSpace, *values):
if colspace is None:
colspace = self.scolspace
self.scolor = PDFColor(colspace, *values)
def set_nostroke_color(self, colspace: PDFColorSpace, *values):
if colspace is None:
colspace = self.ncolspace
self.ncolor = PDFColor(colspace, *values)
def copy(self):
obj = PDFGraphicState()
obj.linewidth = self.linewidth
obj.linecap = self.linecap
obj.linejoin = self.linejoin
obj.miterlimit = self.miterlimit
obj.dash = self.dash
obj.intent = self.intent
obj.flatness = self.flatness
obj.scolor = self.scolor.copy()
obj.ncolor = self.ncolor.copy()
obj.scolspace = self.scolspace
obj.ncolspace = self.ncolspace
return obj
def __repr__(self):
return ('<PDFGraphicState linewidth=%r, linecap=%r, linejoin=%r, '
' miterlimit=%r, dash=%r, intent=%r, flatness=%r, '
' stroking color=%r, non stroking color=%r'
'>' %
(
self.linewidth, self.linecap, self.linejoin,
self.miterlimit, self.dash, self.intent, self.flatness,
str(self.scolor), str(self.ncolor)
)
)
| 34.745455 | 75 | 0.579801 |
4a24fc8cc508928081a834186aa743c80cfd6c9f | 864 | py | Python | bluesky_queueserver/__init__.py | dmgav/bluesky-queueserver | 75b941bb1764f135ac492985b5bb79f673121dec | [
"BSD-3-Clause"
] | 8 | 2020-01-29T23:11:09.000Z | 2022-03-31T14:14:21.000Z | bluesky_queueserver/__init__.py | dmgav/bluesky-queueserver | 75b941bb1764f135ac492985b5bb79f673121dec | [
"BSD-3-Clause"
] | 134 | 2020-09-01T22:02:39.000Z | 2022-03-30T17:56:59.000Z | bluesky_queueserver/__init__.py | dmgav/bluesky-queueserver | 75b941bb1764f135ac492985b5bb79f673121dec | [
"BSD-3-Clause"
] | 14 | 2020-09-02T20:36:36.000Z | 2021-08-06T16:59:08.000Z | from ._version import get_versions
__version__ = get_versions()["version"]
del get_versions
from .manager.comms import ZMQCommSendAsync, ZMQCommSendThreads, CommTimeoutError # noqa: E402, F401
from .manager.annotation_decorator import parameter_annotation_decorator # noqa: E402, F401
from .manager.output_streaming import ReceiveConsoleOutput, ReceiveConsoleOutputAsync # noqa: E402, F401
from .manager.profile_ops import validate_plan # noqa: E402, F401
from .manager.profile_ops import bind_plan_arguments # noqa: E402, F401
from .manager.profile_ops import construct_parameters # noqa: E402, F401
from .manager.profile_ops import format_text_descriptions # noqa: E402, F401
from .manager.profile_tools import is_re_worker_active # noqa: E402, F401
from .manager.profile_tools import set_re_worker_active, clear_re_worker_active # noqa: E402, F401
| 50.823529 | 105 | 0.820602 |
4a24fcbb76135a2179b0b04df9a43deb48a09ac2 | 22,729 | py | Python | code/client/munkilib/keychain.py | backwardn/munki | f1b5162841475a9486983faabff22504e14c06ae | [
"Apache-2.0"
] | 1 | 2020-12-17T19:52:42.000Z | 2020-12-17T19:52:42.000Z | code/client/munkilib/keychain.py | backwardn/munki | f1b5162841475a9486983faabff22504e14c06ae | [
"Apache-2.0"
] | null | null | null | code/client/munkilib/keychain.py | backwardn/munki | f1b5162841475a9486983faabff22504e14c06ae | [
"Apache-2.0"
] | null | null | null | # encoding: utf-8
#
# Copyright 2014-2020 Greg Neagle.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
keychain
Created by Greg Neagle on 2014-06-09.
Incorporating work and ideas from Michael Lynn here:
https://gist.github.com/pudquick/7704254
and here:
https://gist.github.com/pudquick/836a19b5ff17c5b7640d#file-cert_tricks-py
"""
from __future__ import absolute_import, print_function
import base64
import hashlib
import os
import subprocess
from . import display
from . import osutils
from . import prefs
from .wrappers import unicode_or_str
DEFAULT_KEYCHAIN_NAME = 'munki.keychain'
DEFAULT_KEYCHAIN_PASSWORD = 'munki'
KEYCHAIN_DIRECTORY = os.path.join(
prefs.pref('ManagedInstallDir'), 'Keychains')
def read_file(pathname):
'''Return the contents of pathname as a string'''
try:
fileobj = open(pathname, mode='r')
data = fileobj.read()
fileobj.close()
return data
except (OSError, IOError) as err:
display.display_error(
'Could not read %s: %s', pathname, err)
return ''
def write_file(stringdata, pathname):
'''Writes stringdata to pathname.
Returns the pathname on success, empty string on failure.'''
try:
fileobject = open(pathname, mode='w')
fileobject.write(stringdata)
fileobject.close()
return pathname
except (OSError, IOError) as err:
display.display_error(
'Couldn\'t write %s to %s: %s', stringdata, pathname, err)
return ''
def pem_cert_bytes(cert_path):
'''Read in a base64 pem file, return raw bytestring of embedded
certificate'''
cert_data = read_file(cert_path)
if not (('-----BEGIN CERTIFICATE-----' in cert_data) and
('-----END CERTIFICATE-----' in cert_data)):
raise Exception('Certificate does not appear to be .pem file')
core_data = cert_data.split(
'-----BEGIN CERTIFICATE', 1)[-1].replace('\r', '\n').split(
'-----\n', 1)[-1].split('\n-----END CERTIFICATE-----', 1)[0]
return base64.b64decode(''.join(core_data.split('\n')))
def pem_cert_sha1_digest(cert_path):
'''Return SHA1 digest for pem certificate at path'''
try:
raw_bytes = pem_cert_bytes(cert_path)
return hashlib.sha1(raw_bytes).hexdigest().upper()
except BaseException as err:
display.display_error('Error reading %s: %s' % (cert_path, err))
return None
def get_munki_server_cert_info():
'''Attempt to get information we need from Munki's preferences or
defaults. Returns a dictionary.'''
cert_info = {}
# get server CA cert if it exists so we can verify the Munki server
cert_info['ca_cert_path'] = None
cert_info['ca_dir_path'] = None
if prefs.pref('SoftwareRepoCAPath'):
ca_path = prefs.pref('SoftwareRepoCAPath')
if os.path.isfile(ca_path):
cert_info['ca_cert_path'] = ca_path
elif os.path.isdir(ca_path):
cert_info['ca_dir_path'] = ca_path
if prefs.pref('SoftwareRepoCACertificate'):
cert_info['ca_cert_path'] = prefs.pref(
'SoftwareRepoCACertificate')
if cert_info['ca_cert_path'] is None:
ca_cert_path = os.path.join(
prefs.pref('ManagedInstallDir'), 'certs', 'ca.pem')
if os.path.exists(ca_cert_path):
cert_info['ca_cert_path'] = ca_cert_path
return cert_info
def get_munki_client_cert_info():
'''Attempt to get information we need from Munki's preferences or
defaults. Returns a dictionary.'''
cert_info = {}
cert_info['client_cert_path'] = None
cert_info['client_key_path'] = None
cert_info['site_urls'] = []
# get client cert if it exists
if prefs.pref('UseClientCertificate'):
cert_info['client_cert_path'] = (
prefs.pref('ClientCertificatePath') or None)
cert_info['client_key_path'] = prefs.pref('ClientKeyPath') or None
if not cert_info['client_cert_path']:
for name in ['cert.pem', 'client.pem', 'munki.pem']:
client_cert_path = os.path.join(
prefs.pref('ManagedInstallDir'), 'certs', name)
if os.path.exists(client_cert_path):
cert_info['client_cert_path'] = client_cert_path
break
site_urls = []
for key in ['SoftwareRepoURL', 'PackageURL', 'CatalogURL',
'ManifestURL', 'IconURL', 'ClientResourceURL']:
url = prefs.pref(key)
if url:
site_urls.append(url.rstrip('/') + '/')
cert_info['site_urls'] = site_urls
return cert_info
def get_client_cert_common_name():
'''Returns the common name for the client cert, if any'''
common_name = None
cert_info = get_munki_client_cert_info()
client_cert_path = cert_info['client_cert_path']
if client_cert_path and os.path.exists(client_cert_path):
cmd = ['/usr/bin/openssl', 'x509', '-noout', '-subject', '-in',
client_cert_path]
proc = subprocess.Popen(cmd,
bufsize=-1, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stdin=subprocess.PIPE)
out = proc.communicate()[0].decode("UTF-8")
if out:
for i in out.split('/'):
if i.startswith('CN='):
common_name = i[3:].rstrip()
return common_name
def add_ca_certs_to_system_keychain(cert_info=None):
'''Adds any CA certs as trusted root certs to System.keychain'''
if not cert_info:
cert_info = get_munki_server_cert_info()
ca_cert_path = cert_info['ca_cert_path']
ca_dir_path = cert_info['ca_dir_path']
if not ca_cert_path and not ca_dir_path:
# no CA certs, so nothing to do
display.display_debug2(
'No CA cert info provided, so nothing to add to System keychain.')
return
else:
display.display_debug2('CA cert path: %s', ca_cert_path)
display.display_debug2('CA dir path: %s', ca_dir_path)
system_keychain = "/Library/Keychains/System.keychain"
if not os.path.exists(system_keychain):
display.display_warning('%s not found.', system_keychain)
return
# Add CA certs. security add-trusted-cert does the right thing even if
# the cert is already added, so we just do it; checking to see if the
# cert is already added is hard.
certs_to_add = []
if ca_cert_path:
certs_to_add.append(ca_cert_path)
if ca_dir_path:
# add any pem files in the ca_dir_path directory
for item in os.listdir(ca_dir_path):
if item.endswith('.pem'):
certs_to_add.append(os.path.join(ca_dir_path, item))
for cert in certs_to_add:
display.display_debug1('Adding CA cert %s...', cert)
try:
output = security('add-trusted-cert', '-d',
'-k', system_keychain, cert)
if output:
display.display_debug2(output)
except SecurityError as err:
display.display_error(
'Could not add CA cert %s into System keychain: %s', cert, err)
display.display_detail('System.keychain updated.')
def make_client_keychain(cert_info=None):
'''Builds a client cert keychain from existing client certs'''
if not cert_info:
# just grab data from Munki's preferences/defaults
cert_info = get_munki_client_cert_info()
client_cert_path = cert_info['client_cert_path']
client_key_path = cert_info['client_key_path']
site_urls = cert_info['site_urls']
if not client_cert_path:
# no client, so nothing to do
display.display_debug1(
'No client cert info provided, '
'so no client keychain will be created.')
return
else:
display.display_debug1('Client cert path: %s', client_cert_path)
display.display_debug1('Client key path: %s', client_key_path)
# to do some of the following options correctly, we need to be root
# and have root's home.
# check to see if we're root
if os.geteuid() != 0:
display.display_error(
'Can\'t make our client keychain unless we are root!')
return
# switch HOME if needed to root's home
original_home = os.environ.get('HOME')
if original_home:
os.environ['HOME'] = os.path.expanduser('~root')
keychain_pass = (
prefs.pref('KeychainPassword') or DEFAULT_KEYCHAIN_PASSWORD)
abs_keychain_path = get_keychain_path()
if os.path.exists(abs_keychain_path):
os.unlink(abs_keychain_path)
if not os.path.exists(os.path.dirname(abs_keychain_path)):
os.makedirs(os.path.dirname(abs_keychain_path))
# create a new keychain
display.display_debug1('Creating client keychain...')
try:
output = security('create-keychain',
'-p', keychain_pass, abs_keychain_path)
if output:
display.display_debug2(output)
except SecurityError as err:
display.display_error(
'Could not create keychain %s: %s', abs_keychain_path, err)
if original_home:
# switch it back
os.environ['HOME'] = original_home
return
# Ensure the keychain is in the search path and unlocked
added_keychain = add_to_keychain_list(abs_keychain_path)
unlock_and_set_nonlocking(abs_keychain_path)
# Add client cert (and optionally key)
client_cert_file = None
combined_pem = None
if client_key_path:
# combine client cert and private key before we import
cert_data = read_file(client_cert_path)
key_data = read_file(client_key_path)
# write the combined data
combined_pem = os.path.join(osutils.tmpdir(), 'combined.pem')
if write_file(cert_data + key_data, combined_pem):
client_cert_file = combined_pem
else:
display.display_error(
'Could not combine client cert and key for import!')
else:
client_cert_file = client_cert_path
if client_cert_file:
# client_cert_file is combined_pem or client_cert_file
display.display_debug2('Importing client cert and key...')
try:
output = security(
'import', client_cert_file, '-A', '-k', abs_keychain_path)
if output:
display.display_debug2(output)
except SecurityError as err:
display.display_error(
'Could not import %s: %s', client_cert_file, err)
if combined_pem:
# we created this; we should clean it up
try:
os.unlink(combined_pem)
except (OSError, IOError):
pass
id_hash = pem_cert_sha1_digest(client_cert_path)
if not id_hash:
display.display_error(
'Cannot create keychain identity preference.')
else:
# set up identity preference(s) linking the identity (cert and key)
# to the various urls
display.display_debug1('Creating identity preferences...')
try:
output = security('default-keychain')
if output:
display.display_debug2('Default keychain is %s', output)
# One is defined, remember the path
default_keychain = [
x.strip().strip('"')
for x in output.split('\n') if x.strip()][0]
except SecurityError as err:
# error raised if there is no default
default_keychain = None
# Temporarily assign the default keychain to ours
try:
output = security(
'default-keychain', '-s', abs_keychain_path)
if output:
display.display_debug2(
'Attempting to set default keychain to %s resulted in: %s',
abs_keychain_path, output)
except SecurityError as err:
display.display_error(
'Could not set default keychain to %s failed: %s'
% (abs_keychain_path, err))
default_keychain = None
# Create the identity preferences
for url in site_urls:
try:
display.display_debug2(
'Adding identity preference for %s...' % url)
output = security(
'set-identity-preference',
'-s', url, '-Z', id_hash, abs_keychain_path)
if output:
display.display_debug2(
'security set-identity-preference output: ' + output)
except SecurityError as err:
display.display_error(
'Setting identity preference for %s failed: %s'
% (url, err))
if default_keychain:
# We originally had a different default, set it back
output = security(
'default-keychain', '-s', default_keychain)
if output:
display.display_debug2(
'Attempting to set default keychain to %s resulted in: %s',
default_keychain, output)
# we're done, clean up.
if added_keychain:
remove_from_keychain_list(abs_keychain_path)
if original_home:
# switch it back
os.environ['HOME'] = original_home
display.display_info(
'Completed creation of client keychain at %s' % abs_keychain_path)
def add_to_keychain_list(keychain_path):
'''Ensure the keychain is in the search path. Returns True if we
added the keychain to the list.'''
added_keychain = False
output = security('list-keychains', '-d', 'user')
# Split the output and strip it of whitespace and leading/trailing
# quotes, the result are absolute paths to keychains
# Preserve the order in case we need to append to them
search_keychains = [x.strip().strip('"')
for x in output.split('\n') if x.strip()]
if not keychain_path in search_keychains:
# Keychain is not in the search paths
display.display_debug2('Adding client keychain to search path...')
search_keychains.append(keychain_path)
try:
output = security(
'list-keychains', '-d', 'user', '-s', *search_keychains)
if output:
display.display_debug2(output)
added_keychain = True
except SecurityError as err:
display.display_error(
'Could not add keychain %s to keychain list: %s',
keychain_path, err)
added_keychain = False
return added_keychain
def remove_from_keychain_list(keychain_path):
'''Remove keychain from the list of keychains'''
output = security('list-keychains', '-d', 'user')
# Split the output and strip it of whitespace and leading/trailing
# quotes, the result are absolute paths to keychains
# Preserve the order in case we need to append to them
search_keychains = [x.strip().strip('"')
for x in output.split('\n') if x.strip()]
if keychain_path in search_keychains:
# Keychain is in the search path
display.display_debug1(
'Removing %s from search path...', keychain_path)
filtered_keychains = [keychain for keychain in search_keychains
if keychain != keychain_path]
try:
output = security(
'list-keychains', '-d', 'user', '-s', *filtered_keychains)
if output:
display.display_debug2(output)
except SecurityError as err:
display.display_error(
'Could not set new keychain list: %s', err)
def unlock_and_set_nonlocking(keychain_path):
'''Unlocks the keychain and sets it to non-locking'''
keychain_pass = (
prefs.pref('KeychainPassword') or DEFAULT_KEYCHAIN_PASSWORD)
try:
output = security(
'unlock-keychain', '-p', keychain_pass, keychain_path)
if output:
display.display_debug2(output)
except SecurityError as err:
# some problem unlocking the keychain.
display.display_error(
'Could not unlock %s: %s.', keychain_path, err)
# delete it
try:
os.unlink(keychain_path)
except OSError as err:
display.display_error(
'Could not remove %s: %s.', keychain_path, err)
return
try:
output = security('set-keychain-settings', keychain_path)
if output:
display.display_debug2(output)
except SecurityError as err:
display.display_error(
'Could not set keychain settings for %s: %s',
keychain_path, err)
def client_certs_exist():
'''Returns True if there a client cert exists
that we need to import into a keychain'''
cert_info = get_munki_client_cert_info()
client_cert_path = cert_info['client_cert_path']
# we must have a client cert; we don't at this stage need
# to check for a key
return client_cert_path and os.path.exists(client_cert_path)
def client_certs_newer_than_keychain():
'''Returns True if we have client certs that are newer than our
client keychain, False otherwise'''
# pylint: disable=invalid-name
cert_info = get_munki_client_cert_info()
client_cert_path = cert_info['client_cert_path']
client_key_path = cert_info['client_key_path']
keychain_path = get_keychain_path()
if not client_cert_path or not os.path.exists(client_cert_path):
return False
if not os.path.exists(keychain_path):
return False
keychain_mod_time = os.stat(keychain_path).st_mtime
if os.stat(client_cert_path).st_mtime > keychain_mod_time:
return True
if client_key_path and os.path.exists(client_key_path):
if os.stat(client_key_path).st_mtime > keychain_mod_time:
return True
return False
def debug_output():
'''Debugging output for keychain'''
try:
display.display_debug1('***Keychain list***')
display.display_debug1(security('list-keychains', '-d', 'user'))
display.display_debug1('***Default keychain info***')
display.display_debug1(security('default-keychain', '-d', 'user'))
keychainfile = get_keychain_path()
if os.path.exists(keychainfile):
display.display_debug1('***Info for %s***' % keychainfile)
display.display_debug1(
security('show-keychain-info', keychainfile))
except SecurityError as err:
display.display_error(unicode_or_str(err))
class SecurityError(Exception):
'''An exception class to raise if there is an error running
/usr/bin/security'''
pass
def security(verb_name, *args):
'''Runs the security binary with args. Returns stdout.
Raises SecurityError for a non-zero return code'''
cmd = ['/usr/bin/security', verb_name] + list(args)
proc = subprocess.Popen(
cmd, shell=False, bufsize=-1,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(output, err) = proc.communicate()
if proc.returncode:
raise SecurityError('%s: %s' % (proc.returncode, err.decode("UTF-8")))
return (output or err).decode("UTF-8")
def get_keychain_path():
'''Returns an absolute path for our keychain'''
keychain_name = (
prefs.pref('KeychainName') or DEFAULT_KEYCHAIN_NAME)
# If we have an odd path that appears to be all directory and no
# file name, revert to default filename
if not os.path.basename(keychain_name):
keychain_name = DEFAULT_KEYCHAIN_NAME
# Check to make sure it's just a simple file name, no directory
# information
if os.path.dirname(keychain_name):
# keychain name should be just the filename,
# so we'll drop down to the base name
keychain_name = os.path.basename(
keychain_name).strip() or DEFAULT_KEYCHAIN_NAME
# Correct the filename to include '.keychain' if not already present
if not keychain_name.lower().endswith('.keychain'):
keychain_name += '.keychain'
keychain_path = os.path.realpath(
os.path.join(KEYCHAIN_DIRECTORY, keychain_name))
return keychain_path
class MunkiKeychain(object):
'''Wrapper class for handling the client keychain'''
# pylint: disable=too-few-public-methods
keychain_path = None
added_keychain = False
def __init__(self):
'''Adds CA certs as trusted to System keychain.
Unlocks the munki.keychain if it exists.
Makes sure the munki.keychain is in the search list.
Creates a new client keychain if needed.'''
add_ca_certs_to_system_keychain()
self.keychain_path = get_keychain_path()
if client_certs_exist() and os.path.exists(self.keychain_path):
# we have client certs; we should build a keychain using them
try:
os.unlink(self.keychain_path)
except (OSError, IOError) as err:
display.display_error(
'Could not remove pre-existing %s: %s'
% (self.keychain_path, err))
if os.path.exists(self.keychain_path):
# ensure existing keychain is available for use
self.added_keychain = add_to_keychain_list(self.keychain_path)
unlock_and_set_nonlocking(self.keychain_path)
if not os.path.exists(self.keychain_path):
# try making a new keychain
make_client_keychain()
if os.path.exists(self.keychain_path):
self.added_keychain = add_to_keychain_list(self.keychain_path)
unlock_and_set_nonlocking(self.keychain_path)
if not os.path.exists(self.keychain_path):
# give up
self.keychain_path = None
self.added_keychain = False
def __del__(self):
'''Remove our keychain from the keychain list if we added it'''
if self.added_keychain:
remove_from_keychain_list(self.keychain_path)
if __name__ == '__main__':
print('This is a library of support tools for the Munki Suite.')
| 38.393581 | 79 | 0.63276 |
4a24fd88ed68405ef7c2fcbe7ba28fab7905b762 | 9,234 | py | Python | mitmproxy/proxy/layer.py | timgates42/mitmproxy | b0a258f236d495efa8f874374dabdb60d5ee3a62 | [
"MIT"
] | null | null | null | mitmproxy/proxy/layer.py | timgates42/mitmproxy | b0a258f236d495efa8f874374dabdb60d5ee3a62 | [
"MIT"
] | 18 | 2020-12-28T20:12:26.000Z | 2022-03-15T20:44:40.000Z | mitmproxy/proxy/layer.py | timgates42/mitmproxy | b0a258f236d495efa8f874374dabdb60d5ee3a62 | [
"MIT"
] | null | null | null | """
Base class for protocol layers.
"""
import collections
import textwrap
from abc import abstractmethod
from typing import Optional, List, ClassVar, Deque, NamedTuple, Generator, Any, TypeVar
from mitmproxy.proxy import commands, events
from mitmproxy.proxy.commands import Command, Hook
from mitmproxy.proxy.context import Connection, Context
T = TypeVar('T')
CommandGenerator = Generator[Command, Any, T]
"""
A function annotated with CommandGenerator[bool] may yield commands and ultimately return a boolean value.
"""
class Paused(NamedTuple):
"""
State of a layer that's paused because it is waiting for a command reply.
"""
command: commands.Command
generator: CommandGenerator
class Layer:
"""
The base class for all protocol layers.
Layers interface with their child layer(s) by calling .handle_event(event),
which returns a list (more precisely: a generator) of commands.
Most layers only implement ._handle_event, which is called by the default implementation of .handle_event.
The default implementation allows layers to emulate blocking code:
When ._handle_event yields a command that has its blocking attribute set to True, .handle_event pauses
the execution of ._handle_event and waits until it is called with the corresponding CommandReply. All events
encountered in the meantime are buffered and replayed after execution is resumed.
The result is code that looks like blocking code, but is not blocking:
def _handle_event(self, event):
err = yield OpenConnection(server) # execution continues here after a connection has been established.
"""
__last_debug_message: ClassVar[str] = ""
context: Context
_paused: Optional[Paused]
_paused_event_queue: Deque[events.Event]
debug: Optional[str] = None
"""
Enable debug logging by assigning a prefix string for log messages.
Different amounts of whitespace for different layers work well.
"""
def __init__(self, context: Context) -> None:
self.context = context
self.context.layers.append(self)
self._paused = None
self._paused_event_queue = collections.deque()
show_debug_output = getattr(context.options, "proxy_debug", False)
if show_debug_output: # pragma: no cover
self.debug = " " * len(context.layers)
def __repr__(self):
statefun = getattr(self, "state", self._handle_event)
state = getattr(statefun, "__name__", "")
state = state.replace("state_", "")
if state == "_handle_event":
state = ""
else:
state = f"state: {state}"
return f"{type(self).__name__}({state})"
def __debug(self, message):
if len(message) > 512:
message = message[:512] + "…"
if Layer.__last_debug_message == message:
message = message.split("\n", 1)[0].strip()
if len(message) > 256:
message = message[:256] + "…"
else:
Layer.__last_debug_message = message
return commands.Log(
textwrap.indent(message, self.debug),
"debug"
)
@abstractmethod
def _handle_event(self, event: events.Event) -> CommandGenerator[None]:
"""Handle a proxy server event"""
yield from () # pragma: no cover
def handle_event(self, event: events.Event) -> CommandGenerator[None]:
if self._paused:
# did we just receive the reply we were waiting for?
pause_finished = (
isinstance(event, events.CommandReply) and
event.command is self._paused.command
)
if self.debug is not None:
yield self.__debug(f"{'>>' if pause_finished else '>!'} {event}")
if pause_finished:
assert isinstance(event, events.CommandReply)
yield from self.__continue(event)
else:
self._paused_event_queue.append(event)
else:
if self.debug is not None:
yield self.__debug(f">> {event}")
command_generator = self._handle_event(event)
yield from self.__process(command_generator)
def __process(self, command_generator: CommandGenerator, send=None):
"""
yield all commands from a generator.
if a command is blocking, the layer is paused and this function returns before
processing any other commands.
"""
try:
command = command_generator.send(send)
except StopIteration:
return
while True:
if self.debug is not None:
if not isinstance(command, commands.Log):
yield self.__debug(f"<< {command}")
if command.blocking is True:
command.blocking = self # assign to our layer so that higher layers don't block.
self._paused = Paused(
command,
command_generator,
)
yield command
return
else:
yield command
try:
command = next(command_generator)
except StopIteration:
return
def __continue(self, event: events.CommandReply):
"""continue processing events after being paused"""
assert self._paused is not None
command_generator = self._paused.generator
self._paused = None
yield from self.__process(command_generator, event.reply)
while not self._paused and self._paused_event_queue:
ev = self._paused_event_queue.popleft()
if self.debug is not None:
yield self.__debug(f"!> {ev}")
command_generator = self._handle_event(ev)
yield from self.__process(command_generator)
mevents = events # alias here because autocomplete above should not have aliased version.
class NextLayerHook(Hook):
data: "NextLayer"
class NextLayer(Layer):
layer: Optional[Layer]
"""The next layer. To be set by an addon."""
events: List[mevents.Event]
"""All events that happened before a decision was made."""
_ask_on_start: bool
def __init__(self, context: Context, ask_on_start: bool = False) -> None:
super().__init__(context)
self.context.layers.remove(self)
self.layer = None
self.events = []
self._ask_on_start = ask_on_start
self._handle = None
def __repr__(self):
return f"NextLayer:{repr(self.layer)}"
def handle_event(self, event: mevents.Event):
if self._handle is not None:
yield from self._handle(event)
else:
yield from super().handle_event(event)
def _handle_event(self, event: mevents.Event):
self.events.append(event)
# We receive new data. Let's find out if we can determine the next layer now?
if self._ask_on_start and isinstance(event, events.Start):
yield from self._ask()
elif isinstance(event, mevents.ConnectionClosed) and event.connection == self.context.client:
# If we have not determined the next protocol yet and the client already closes the connection,
# we abort everything.
yield commands.CloseConnection(self.context.client)
elif isinstance(event, mevents.DataReceived):
# For now, we only ask if we have received new data to reduce hook noise.
yield from self._ask()
def _ask(self):
"""
Manually trigger a next_layer hook.
The only use at the moment is to make sure that the top layer is initialized.
"""
yield NextLayerHook(self)
# Has an addon decided on the next layer yet?
if self.layer:
if self.debug:
yield commands.Log(f"{self.debug}[nextlayer] {self.layer!r}", "debug")
for e in self.events:
yield from self.layer.handle_event(e)
self.events.clear()
# Why do we need three assignments here?
# 1. When this function here is invoked we may have paused events. Those should be
# forwarded to the sublayer right away, so we reassign ._handle_event.
# 2. This layer is not needed anymore, so we directly reassign .handle_event.
# 3. Some layers may however still have a reference to the old .handle_event.
# ._handle is just an optimization to reduce the callstack in these cases.
self.handle_event = self.layer.handle_event
self._handle_event = self.layer._handle_event
self._handle = self.layer.handle_event
# Utility methods for whoever decides what the next layer is going to be.
def data_client(self):
return self._data(self.context.client)
def data_server(self):
return self._data(self.context.server)
def _data(self, connection: Connection):
data = (
e.data for e in self.events
if isinstance(e, mevents.DataReceived) and e.connection == connection
)
return b"".join(data)
| 37.844262 | 115 | 0.627897 |
4a24fe5ce29ebcfb7242f3f39f17e2a209a2ec74 | 19,475 | py | Python | detectron2/layers/deform_conv.py | sm047/detectron2 | 1036cce320ce0f2adbce7f143566462d3222bd5a | [
"Apache-2.0"
] | 5 | 2020-06-16T11:31:22.000Z | 2021-11-08T03:07:47.000Z | detectron2/layers/deform_conv.py | fangchengji/detectron2 | 1036cce320ce0f2adbce7f143566462d3222bd5a | [
"Apache-2.0"
] | null | null | null | detectron2/layers/deform_conv.py | fangchengji/detectron2 | 1036cce320ce0f2adbce7f143566462d3222bd5a | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import math
from functools import lru_cache
import torch
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from detectron2 import _C
from .wrappers import _NewEmptyTensorOp
from detectron2.layers import Conv2d
class _DeformConv(Function):
@staticmethod
def forward(
ctx,
input,
offset,
weight,
stride=1,
padding=0,
dilation=1,
groups=1,
deformable_groups=1,
im2col_step=64,
):
if input is not None and input.dim() != 4:
raise ValueError(
"Expected 4D tensor as input, got {}D tensor instead.".format(input.dim())
)
ctx.stride = _pair(stride)
ctx.padding = _pair(padding)
ctx.dilation = _pair(dilation)
ctx.groups = groups
ctx.deformable_groups = deformable_groups
ctx.im2col_step = im2col_step
ctx.save_for_backward(input, offset, weight)
output = input.new_empty(
_DeformConv._output_size(input, weight, ctx.padding, ctx.dilation, ctx.stride)
)
ctx.bufs_ = [input.new_empty(0), input.new_empty(0)] # columns, ones
if not input.is_cuda:
raise NotImplementedError("Deformable Conv is not supported on CPUs!")
else:
cur_im2col_step = _DeformConv._cal_im2col_step(input.shape[0], ctx.im2col_step)
assert (input.shape[0] % cur_im2col_step) == 0, "im2col step must divide batchsize"
_C.deform_conv_forward(
input,
weight,
offset,
output,
ctx.bufs_[0],
ctx.bufs_[1],
weight.size(3),
weight.size(2),
ctx.stride[1],
ctx.stride[0],
ctx.padding[1],
ctx.padding[0],
ctx.dilation[1],
ctx.dilation[0],
ctx.groups,
ctx.deformable_groups,
cur_im2col_step,
)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
input, offset, weight = ctx.saved_tensors
grad_input = grad_offset = grad_weight = None
if not grad_output.is_cuda:
raise NotImplementedError("Deformable Conv is not supported on CPUs!")
else:
cur_im2col_step = _DeformConv._cal_im2col_step(input.shape[0], ctx.im2col_step)
assert (input.shape[0] % cur_im2col_step) == 0, "im2col step must divide batchsize"
if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]:
grad_input = torch.zeros_like(input)
grad_offset = torch.zeros_like(offset)
_C.deform_conv_backward_input(
input,
offset,
grad_output,
grad_input,
grad_offset,
weight,
ctx.bufs_[0],
weight.size(3),
weight.size(2),
ctx.stride[1],
ctx.stride[0],
ctx.padding[1],
ctx.padding[0],
ctx.dilation[1],
ctx.dilation[0],
ctx.groups,
ctx.deformable_groups,
cur_im2col_step,
)
if ctx.needs_input_grad[2]:
grad_weight = torch.zeros_like(weight)
_C.deform_conv_backward_filter(
input,
offset,
grad_output,
grad_weight,
ctx.bufs_[0],
ctx.bufs_[1],
weight.size(3),
weight.size(2),
ctx.stride[1],
ctx.stride[0],
ctx.padding[1],
ctx.padding[0],
ctx.dilation[1],
ctx.dilation[0],
ctx.groups,
ctx.deformable_groups,
1,
cur_im2col_step,
)
return grad_input, grad_offset, grad_weight, None, None, None, None, None, None
@staticmethod
def _output_size(input, weight, padding, dilation, stride):
channels = weight.size(0)
output_size = (input.size(0), channels)
for d in range(input.dim() - 2):
in_size = input.size(d + 2)
pad = padding[d]
kernel = dilation[d] * (weight.size(d + 2) - 1) + 1
stride_ = stride[d]
output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1,)
if not all(map(lambda s: s > 0, output_size)):
raise ValueError(
"convolution input is too small (output would be {})".format(
"x".join(map(str, output_size))
)
)
return output_size
@staticmethod
@lru_cache(maxsize=128)
def _cal_im2col_step(input_size, default_size):
"""
Calculate proper im2col step size, which should be divisible by input_size and not larger
than prefer_size. Meanwhile the step size should be as large as possible to be more
efficient. So we choose the largest one among all divisors of input_size which are smaller
than prefer_size.
:param input_size: input batch size .
:param default_size: default preferred im2col step size.
:return: the largest proper step size.
"""
if input_size <= default_size:
return input_size
best_step = 1
for step in range(2, min(int(math.sqrt(input_size)) + 1, default_size)):
if input_size % step == 0:
if input_size // step <= default_size:
return input_size // step
best_step = step
return best_step
class _ModulatedDeformConv(Function):
@staticmethod
def forward(
ctx,
input,
offset,
mask,
weight,
bias=None,
stride=1,
padding=0,
dilation=1,
groups=1,
deformable_groups=1,
):
ctx.stride = stride
ctx.padding = padding
ctx.dilation = dilation
ctx.groups = groups
ctx.deformable_groups = deformable_groups
ctx.with_bias = bias is not None
if not ctx.with_bias:
bias = input.new_empty(1) # fake tensor
if not input.is_cuda:
raise NotImplementedError("Deformable Conv is not supported on CPUs!")
if (
weight.requires_grad
or mask.requires_grad
or offset.requires_grad
or input.requires_grad
):
ctx.save_for_backward(input, offset, mask, weight, bias)
output = input.new_empty(_ModulatedDeformConv._infer_shape(ctx, input, weight))
ctx._bufs = [input.new_empty(0), input.new_empty(0)]
_C.modulated_deform_conv_forward(
input,
weight,
bias,
ctx._bufs[0],
offset,
mask,
output,
ctx._bufs[1],
weight.shape[2],
weight.shape[3],
ctx.stride,
ctx.stride,
ctx.padding,
ctx.padding,
ctx.dilation,
ctx.dilation,
ctx.groups,
ctx.deformable_groups,
ctx.with_bias,
)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
if not grad_output.is_cuda:
raise NotImplementedError("Deformable Conv is not supported on CPUs!")
input, offset, mask, weight, bias = ctx.saved_tensors
grad_input = torch.zeros_like(input)
grad_offset = torch.zeros_like(offset)
grad_mask = torch.zeros_like(mask)
grad_weight = torch.zeros_like(weight)
grad_bias = torch.zeros_like(bias)
_C.modulated_deform_conv_backward(
input,
weight,
bias,
ctx._bufs[0],
offset,
mask,
ctx._bufs[1],
grad_input,
grad_weight,
grad_bias,
grad_offset,
grad_mask,
grad_output,
weight.shape[2],
weight.shape[3],
ctx.stride,
ctx.stride,
ctx.padding,
ctx.padding,
ctx.dilation,
ctx.dilation,
ctx.groups,
ctx.deformable_groups,
ctx.with_bias,
)
if not ctx.with_bias:
grad_bias = None
return (
grad_input,
grad_offset,
grad_mask,
grad_weight,
grad_bias,
None,
None,
None,
None,
None,
)
@staticmethod
def _infer_shape(ctx, input, weight):
n = input.size(0)
channels_out = weight.size(0)
height, width = input.shape[2:4]
kernel_h, kernel_w = weight.shape[2:4]
height_out = (
height + 2 * ctx.padding - (ctx.dilation * (kernel_h - 1) + 1)
) // ctx.stride + 1
width_out = (
width + 2 * ctx.padding - (ctx.dilation * (kernel_w - 1) + 1)
) // ctx.stride + 1
return n, channels_out, height_out, width_out
deform_conv = _DeformConv.apply
modulated_deform_conv = _ModulatedDeformConv.apply
class DeformConv(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
deformable_groups=1,
bias=False,
norm=None,
activation=None,
):
"""
Deformable convolution from :paper:`deformconv`.
Arguments are similar to :class:`Conv2D`. Extra arguments:
Args:
deformable_groups (int): number of groups used in deformable convolution.
norm (nn.Module, optional): a normalization layer
activation (callable(Tensor) -> Tensor): a callable activation function
"""
super(DeformConv, self).__init__()
assert not bias
assert in_channels % groups == 0, "in_channels {} cannot be divisible by groups {}".format(
in_channels, groups
)
assert (
out_channels % groups == 0
), "out_channels {} cannot be divisible by groups {}".format(out_channels, groups)
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.groups = groups
self.deformable_groups = deformable_groups
self.norm = norm
self.activation = activation
self.weight = nn.Parameter(
torch.Tensor(out_channels, in_channels // self.groups, *self.kernel_size)
)
self.bias = None
nn.init.kaiming_uniform_(self.weight, nonlinearity="relu")
def forward(self, x, offset):
if x.numel() == 0:
# When input is empty, we want to return a empty tensor with "correct" shape,
# So that the following operations will not panic
# if they check for the shape of the tensor.
# This computes the height and width of the output tensor
output_shape = [
(i + 2 * p - (di * (k - 1) + 1)) // s + 1
for i, p, di, k, s in zip(
x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride
)
]
output_shape = [x.shape[0], self.weight.shape[0]] + output_shape
return _NewEmptyTensorOp.apply(x, output_shape)
x = deform_conv(
x,
offset,
self.weight,
self.stride,
self.padding,
self.dilation,
self.groups,
self.deformable_groups,
)
if self.norm is not None:
x = self.norm(x)
if self.activation is not None:
x = self.activation(x)
return x
def extra_repr(self):
tmpstr = "in_channels=" + str(self.in_channels)
tmpstr += ", out_channels=" + str(self.out_channels)
tmpstr += ", kernel_size=" + str(self.kernel_size)
tmpstr += ", stride=" + str(self.stride)
tmpstr += ", padding=" + str(self.padding)
tmpstr += ", dilation=" + str(self.dilation)
tmpstr += ", groups=" + str(self.groups)
tmpstr += ", deformable_groups=" + str(self.deformable_groups)
tmpstr += ", bias=False"
return tmpstr
class ModulatedDeformConv(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
deformable_groups=1,
bias=True,
norm=None,
activation=None,
):
"""
Modulated deformable convolution from :paper:`deformconv2`.
Arguments are similar to :class:`Conv2D`. Extra arguments:
Args:
deformable_groups (int): number of groups used in deformable convolution.
norm (nn.Module, optional): a normalization layer
activation (callable(Tensor) -> Tensor): a callable activation function
"""
super(ModulatedDeformConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.deformable_groups = deformable_groups
self.with_bias = bias
self.norm = norm
self.activation = activation
self.weight = nn.Parameter(
torch.Tensor(out_channels, in_channels // groups, *self.kernel_size)
)
if bias:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.bias = None
nn.init.kaiming_uniform_(self.weight, nonlinearity="relu")
if self.bias is not None:
nn.init.constant_(self.bias, 0)
def forward(self, x, offset, mask):
if x.numel() == 0:
output_shape = [
(i + 2 * p - (di * (k - 1) + 1)) // s + 1
for i, p, di, k, s in zip(
x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride
)
]
output_shape = [x.shape[0], self.weight.shape[0]] + output_shape
return _NewEmptyTensorOp.apply(x, output_shape)
x = modulated_deform_conv(
x,
offset,
mask,
self.weight,
self.bias,
self.stride,
self.padding,
self.dilation,
self.groups,
self.deformable_groups,
)
if self.norm is not None:
x = self.norm(x)
if self.activation is not None:
x = self.activation(x)
return x
def extra_repr(self):
tmpstr = "in_channels=" + str(self.in_channels)
tmpstr += ", out_channels=" + str(self.out_channels)
tmpstr += ", kernel_size=" + str(self.kernel_size)
tmpstr += ", stride=" + str(self.stride)
tmpstr += ", padding=" + str(self.padding)
tmpstr += ", dilation=" + str(self.dilation)
tmpstr += ", groups=" + str(self.groups)
tmpstr += ", deformable_groups=" + str(self.deformable_groups)
tmpstr += ", bias=" + str(self.with_bias)
return tmpstr
class DFConv2d(nn.Module):
"""Deformable convolutional layer"""
def __init__(
self,
in_channels,
out_channels,
with_modulated_dcn=True,
kernel_size=3,
stride=1,
groups=1,
dilation=1,
deformable_groups=1,
bias=False,
padding=None
):
super(DFConv2d, self).__init__()
if isinstance(kernel_size, (list, tuple)):
assert isinstance(stride, (list, tuple))
assert isinstance(dilation, (list, tuple))
assert len(kernel_size) == 2
assert len(stride) == 2
assert len(dilation) == 2
padding = (
dilation[0] * (kernel_size[0] - 1) // 2,
dilation[1] * (kernel_size[1] - 1) // 2
)
offset_base_channels = kernel_size[0] * kernel_size[1]
else:
padding = dilation * (kernel_size - 1) // 2
offset_base_channels = kernel_size * kernel_size
if with_modulated_dcn:
from .deform_conv import ModulatedDeformConv
offset_channels = offset_base_channels * 3 # default: 27
conv_block = ModulatedDeformConv
else:
from .deform_conv import DeformConv
offset_channels = offset_base_channels * 2 # default: 18
conv_block = DeformConv
self.offset = Conv2d(
in_channels,
deformable_groups * offset_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=1,
dilation=dilation
)
for l in [self.offset, ]:
nn.init.kaiming_uniform_(l.weight, a=1)
torch.nn.init.constant_(l.bias, 0.)
self.conv = conv_block(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
deformable_groups=deformable_groups,
bias=bias
)
self.with_modulated_dcn = with_modulated_dcn
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.offset_split = offset_base_channels * deformable_groups * 2
def forward(self, x, return_offset=False):
if x.numel() > 0:
if not self.with_modulated_dcn:
offset_mask = self.offset(x)
x = self.conv(x, offset_mask)
else:
offset_mask = self.offset(x)
offset = offset_mask[:, :self.offset_split, :, :]
mask = offset_mask[:, self.offset_split:, :, :].sigmoid()
x = self.conv(x, offset, mask)
if return_offset:
return x, offset_mask
return x
# get output shape
output_shape = [
(i + 2 * p - (di * (k - 1) + 1)) // d + 1
for i, p, di, k, d in zip(
x.shape[-2:],
self.padding,
self.dilation,
self.kernel_size,
self.stride
)
]
output_shape = [x.shape[0], self.conv.weight.shape[0]] + output_shape
return _NewEmptyTensorOp.apply(x, output_shape) | 32.952623 | 99 | 0.533248 |
4a24fedd3f7b0acd3a9708b2fcb6f8b8fba6c72d | 81,609 | py | Python | src/transformers/modeling_utils.py | katarinaslama/transformers-1 | a5a8eeb772b185b0746f3ce9be6ae43181d2ca71 | [
"Apache-2.0"
] | 3 | 2020-11-11T14:29:18.000Z | 2021-03-07T14:42:33.000Z | src/transformers/modeling_utils.py | katarinaslama/transformers-1 | a5a8eeb772b185b0746f3ce9be6ae43181d2ca71 | [
"Apache-2.0"
] | 1 | 2021-09-15T09:20:01.000Z | 2022-03-02T17:16:01.000Z | src/transformers/modeling_utils.py | katarinaslama/transformers-1 | a5a8eeb772b185b0746f3ce9be6ae43181d2ca71 | [
"Apache-2.0"
] | 1 | 2020-11-11T14:29:22.000Z | 2020-11-11T14:29:22.000Z | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
import re
import warnings
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
import torch
from torch import Tensor, device, dtype, nn
from torch.nn import CrossEntropyLoss
from torch.nn import functional as F
from .activations import get_activation
from .configuration_utils import PretrainedConfig
from .file_utils import (
DUMMY_INPUTS,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
WEIGHTS_NAME,
ModelOutput,
cached_path,
hf_bucket_url,
is_remote_url,
is_torch_tpu_available,
replace_return_docstrings,
)
from .generation_utils import GenerationMixin
from .utils import logging
logger = logging.get_logger(__name__)
try:
from torch.nn import Identity
except ImportError:
# Older PyTorch compatibility
class Identity(nn.Module):
r"""A placeholder identity operator that is argument-insensitive."""
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, input):
return input
def find_pruneable_heads_and_indices(
heads: List[int], n_heads: int, head_size: int, already_pruned_heads: Set[int]
) -> Tuple[Set[int], torch.LongTensor]:
"""
Finds the heads and their indices taking :obj:`already_pruned_heads` into account.
Args:
heads (:obj:`List[int]`): List of the indices of heads to prune.
n_heads (:obj:`int`): The number of heads in the model.
head_size (:obj:`int`): The size of each head.
already_pruned_heads (:obj:`Set[int]`): A set of already pruned heads.
Returns:
:obj:`Tuple[Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices.
"""
mask = torch.ones(n_heads, head_size)
heads = set(heads) - already_pruned_heads # Convert to set and remove already pruned heads
for head in heads:
# Compute how many pruned heads are before the head and move the index accordingly
head = head - sum(1 if h < head else 0 for h in already_pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index: torch.LongTensor = torch.arange(len(mask))[mask].long()
return heads, index
class ModuleUtilsMixin:
"""
A few utilities for :obj:`torch.nn.Modules`, to be used as a mixin.
"""
@staticmethod
def _hook_rss_memory_pre_forward(module, *args, **kwargs):
try:
import psutil
except (ImportError):
raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.")
process = psutil.Process(os.getpid())
mem = process.memory_info()
module.mem_rss_pre_forward = mem.rss
return None
@staticmethod
def _hook_rss_memory_post_forward(module, *args, **kwargs):
try:
import psutil
except (ImportError):
raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.")
process = psutil.Process(os.getpid())
mem = process.memory_info()
module.mem_rss_post_forward = mem.rss
mem_rss_diff = module.mem_rss_post_forward - module.mem_rss_pre_forward
module.mem_rss_diff = mem_rss_diff + (module.mem_rss_diff if hasattr(module, "mem_rss_diff") else 0)
return None
def add_memory_hooks(self):
"""
Add a memory hook before and after each sub-module forward pass to record increase in memory consumption.
Increase in memory consumption is stored in a :obj:`mem_rss_diff` attribute for each module and can be reset to
zero with :obj:`model.reset_memory_hooks_state()`.
"""
for module in self.modules():
module.register_forward_pre_hook(self._hook_rss_memory_pre_forward)
module.register_forward_hook(self._hook_rss_memory_post_forward)
self.reset_memory_hooks_state()
def reset_memory_hooks_state(self):
"""
Reset the :obj:`mem_rss_diff` attribute of each module (see
:func:`~transformers.modeling_utils.ModuleUtilsMixin.add_memory_hooks`).
"""
for module in self.modules():
module.mem_rss_diff = 0
module.mem_rss_post_forward = 0
module.mem_rss_pre_forward = 0
@property
def device(self) -> device:
"""
:obj:`torch.device`: The device on which the module is (assuming that all the module parameters are on the same
device).
"""
try:
return next(self.parameters()).device
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].device
@property
def dtype(self) -> dtype:
"""
:obj:`torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype).
"""
try:
return next(self.parameters()).dtype
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].dtype
def invert_attention_mask(self, encoder_attention_mask: Tensor) -> Tensor:
"""
Invert an attention mask (e.g., switches 0. and 1.).
Args:
encoder_attention_mask (:obj:`torch.Tensor`): An attention mask.
Returns:
:obj:`torch.Tensor`: The inverted attention mask.
"""
if encoder_attention_mask.dim() == 3:
encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
if self.dtype == torch.float16:
encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e4
elif self.dtype == torch.float32:
encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e9
else:
raise ValueError(
"{} not recognized. `dtype` should be set to either `torch.float32` or `torch.float16`".format(
self.dtype
)
)
return encoder_extended_attention_mask
def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device) -> Tensor:
"""
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (:obj:`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (:obj:`Tuple[int]`):
The shape of the input to the model.
device: (:obj:`torch.device`):
The device of the input to the model.
Returns:
:obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
"""
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
# in case past_key_values are used we need to add a prefix ones mask to the causal mask
# causal and attention masks must have same type with pytorch version < 1.3
causal_mask = causal_mask.to(attention_mask.dtype)
if causal_mask.shape[1] < attention_mask.shape[1]:
prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
causal_mask = torch.cat(
[
torch.ones(
(batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype
),
causal_mask,
],
axis=-1,
)
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
input_shape, attention_mask.shape
)
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def get_head_mask(
self, head_mask: Optional[Tensor], num_hidden_layers: int, is_attention_chunked: bool = False
) -> Tensor:
"""
Prepare the head mask if needed.
Args:
head_mask (:obj:`torch.Tensor` with shape :obj:`[num_heads]` or :obj:`[num_hidden_layers x num_heads]`, `optional`):
The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard).
num_hidden_layers (:obj:`int`):
The number of hidden layers in the model.
is_attention_chunked: (:obj:`bool`, `optional, defaults to :obj:`False`):
Whether or not the attentions scores are computed by chunks or not.
Returns:
:obj:`torch.Tensor` with shape :obj:`[num_hidden_layers x batch x num_heads x seq_length x seq_length]`
or list with :obj:`[None]` for each layer.
"""
if head_mask is not None:
head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers)
if is_attention_chunked is True:
head_mask = head_mask.unsqueeze(-1)
else:
head_mask = [None] * num_hidden_layers
return head_mask
def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers):
"""-> [num_hidden_layers x batch x num_heads x seq_length x seq_length]"""
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
assert head_mask.dim() == 5, f"head_mask.dim != 5, instead {head_mask.dim()}"
head_mask = head_mask.to(dtype=self.dtype) # switch to float if need + fp16 compatibility
return head_mask
def num_parameters(self, only_trainable: bool = False, exclude_embeddings: bool = False) -> int:
"""
Get number of (optionally, trainable or non-embeddings) parameters in the module.
Args:
only_trainable (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to return only the number of trainable parameters
exclude_embeddings (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to return only the number of non-embeddings parameters
Returns:
:obj:`int`: The number of parameters.
"""
def parameter_filter(x):
return (x.requires_grad or not only_trainable) and not (
isinstance(x, torch.nn.Embedding) and exclude_embeddings
)
params = filter(parameter_filter, self.parameters()) if only_trainable else self.parameters()
return sum(p.numel() for p in params)
def estimate_tokens(self, input_dict: Dict[str, Union[torch.Tensor, Any]]) -> int:
"""
Helper function to estimate the total number of tokens from the model inputs.
Args:
inputs (:obj:`dict`): The model inputs.
Returns:
:obj:`int`: The total number of tokens.
"""
token_inputs = [tensor for key, tensor in input_dict.items() if "input" in key]
if token_inputs:
return sum([token_input.numel() for token_input in token_inputs])
else:
warnings.warn(
"Could not estimate the number of tokens of the input, floating-point operations will not be computed"
)
return 0
def floating_point_ops(
self, input_dict: Dict[str, Union[torch.Tensor, Any]], exclude_embeddings: bool = True
) -> int:
"""
Get number of (optionally, non-embeddings) floating-point operations for the forward and backward passes of a
batch with this transformer model. Default approximation neglects the quadratic dependency on the number of
tokens (valid if :obj:`12 * d_model << sequence_length`) as laid out in `this paper <https://arxiv.org/pdf/2001.08361.pdf>`__ section
2.1. Should be overriden for transformers with parameter re-use e.g. Albert or Universal Transformers, or
if doing long-range modeling with very high sequence lengths.
Args:
batch_size (:obj:`int`):
The batch size for the forward pass.
sequence_length (:obj:`int`):
The number of tokens in each line of the batch.
exclude_embeddings (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to count embedding and softmax operations.
Returns:
:obj:`int`: The number of floating-point operations.
"""
return 6 * self.estimate_tokens(input_dict) * self.num_parameters(exclude_embeddings=exclude_embeddings)
class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin):
r"""
Base class for all models.
:class:`~transformers.PreTrainedModel` takes care of storing the configuration of the models and handles methods
for loading, downloading and saving models as well as a few methods common to all models to:
* resize the input embeddings,
* prune heads in the self-attention heads.
Class attributes (overridden by derived classes):
- **config_class** (:class:`~transformers.PretrainedConfig`) -- A subclass of
:class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture.
- **load_tf_weights** (:obj:`Callable`) -- A python `method` for loading a TensorFlow checkpoint in a
PyTorch model, taking as arguments:
- **model** (:class:`~transformers.PreTrainedModel`) -- An instance of the model on which to load the
TensorFlow checkpoint.
- **config** (:class:`~transformers.PreTrainedConfig`) -- An instance of the configuration associated
to the model.
- **path** (:obj:`str`) -- A path to the TensorFlow checkpoint.
- **base_model_prefix** (:obj:`str`) -- A string indicating the attribute associated to the base model in
derived classes of the same architecture adding modules on top of the base model.
- **authorized_missing_keys** (:obj:`Optional[List[str]]`) -- A list of re pattern of tensor names to ignore
when loading the model (and avoid unnecessary warnings).
- **keys_to_never_save** (:obj:`Optional[List[str]]`) -- A list of of tensor names to ignore
when saving the model (useful for keys that aren't trained, but which are deterministic)
"""
config_class = None
base_model_prefix = ""
authorized_missing_keys = None
authorized_unexpected_keys = None
keys_to_never_save = None
@property
def dummy_inputs(self) -> Dict[str, torch.Tensor]:
"""
:obj:`Dict[str, torch.Tensor]`: Dummy inputs to do a forward pass in the network.
"""
return {"input_ids": torch.tensor(DUMMY_INPUTS)}
def __init__(self, config: PretrainedConfig, *inputs, **kwargs):
super().__init__()
if not isinstance(config, PretrainedConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
)
)
# Save config in model
self.config = config
@property
def base_model(self) -> nn.Module:
"""
:obj:`torch.nn.Module`: The main body of the model.
"""
return getattr(self, self.base_model_prefix, self)
def get_input_embeddings(self) -> nn.Module:
"""
Returns the model's input embeddings.
Returns:
:obj:`nn.Module`: A torch module mapping vocabulary to hidden states.
"""
base_model = getattr(self, self.base_model_prefix, self)
if base_model is not self:
return base_model.get_input_embeddings()
else:
raise NotImplementedError
def set_input_embeddings(self, value: nn.Module):
"""
Set model's input embeddings.
Args:
value (:obj:`nn.Module`): A module mapping vocabulary to hidden states.
"""
base_model = getattr(self, self.base_model_prefix, self)
if base_model is not self:
base_model.set_input_embeddings(value)
else:
raise NotImplementedError
def get_output_embeddings(self) -> nn.Module:
"""
Returns the model's output embeddings.
Returns:
:obj:`nn.Module`: A torch module mapping hidden states to vocabulary.
"""
return None # Overwrite for models with output embeddings
def tie_weights(self):
"""
Tie the weights between the input embeddings and the output embeddings.
If the :obj:`torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning
the weights instead.
"""
output_embeddings = self.get_output_embeddings()
if output_embeddings is not None and self.config.tie_word_embeddings:
self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())
if self.config.is_encoder_decoder and self.config.tie_encoder_decoder:
self._tie_encoder_decoder_weights(self.encoder, self.decoder, self.base_model_prefix)
@staticmethod
def _tie_encoder_decoder_weights(encoder: nn.Module, decoder: nn.Module, base_model_prefix: str):
uninitialized_encoder_weights: List[str] = []
assert decoder.__class__ == encoder.__class__, f"{decoder.__class__} and {encoder.__class__} have to be equal."
def tie_encoder_to_decoder_recursively(
decoder_pointer: nn.Module,
encoder_pointer: nn.Module,
module_name: str,
uninitialized_encoder_weights: List[str],
depth=0,
):
assert isinstance(decoder_pointer, nn.Module) and isinstance(
encoder_pointer, nn.Module
), f"{decoder_pointer} and {encoder_pointer} have to be of type torch.nn.Module"
if hasattr(decoder_pointer, "weight"):
assert hasattr(encoder_pointer, "weight")
encoder_pointer.weight = decoder_pointer.weight
if hasattr(decoder_pointer, "bias"):
assert hasattr(encoder_pointer, "bias")
encoder_pointer.bias = decoder_pointer.bias
return
encoder_modules = encoder_pointer._modules
decoder_modules = decoder_pointer._modules
if len(decoder_modules) > 0:
assert (
len(encoder_modules) > 0
), f"Encoder module {encoder_pointer} does not match decoder module {decoder_pointer}"
all_encoder_weights = set([module_name + "/" + sub_name for sub_name in encoder_modules.keys()])
encoder_layer_pos = 0
for name, module in decoder_modules.items():
if name.isdigit():
encoder_name = str(int(name) + encoder_layer_pos)
decoder_name = name
if not isinstance(decoder_modules[decoder_name], type(encoder_modules[encoder_name])):
# this can happen if the name corresponds to the position in a list module list of layers
# in this case the decoder has added a cross-attention that the encoder does not have
# thus skip this step and substract one layer pos from encoder
encoder_layer_pos -= 1
continue
elif name not in encoder_modules:
continue
elif depth > 500:
raise ValueError(
"Max depth of recursive function `tie_encoder_to_decoder` reached. It seems that there is a circular dependency between two or more `nn.Modules` of your model."
)
else:
decoder_name = encoder_name = name
tie_encoder_to_decoder_recursively(
decoder_modules[decoder_name],
encoder_modules[encoder_name],
module_name + "/" + name,
uninitialized_encoder_weights,
depth=depth + 1,
)
all_encoder_weights.remove(module_name + "/" + encoder_name)
uninitialized_encoder_weights += list(all_encoder_weights)
# tie weights recursively
tie_encoder_to_decoder_recursively(decoder, encoder, base_model_prefix, uninitialized_encoder_weights)
if len(uninitialized_encoder_weights) > 0:
logger.warning(
f"The following encoder weights were not tied to the decoder {uninitialized_encoder_weights}"
)
def _tie_or_clone_weights(self, output_embeddings, input_embeddings):
"""Tie or clone module weights depending of whether we are using TorchScript or not"""
if self.config.torchscript:
output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone())
else:
output_embeddings.weight = input_embeddings.weight
if getattr(output_embeddings, "bias", None) is not None:
output_embeddings.bias.data = torch.nn.functional.pad(
output_embeddings.bias.data,
(
0,
output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0],
),
"constant",
0,
)
if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"):
output_embeddings.out_features = input_embeddings.num_embeddings
def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> torch.nn.Embedding:
"""
Resizes input token embeddings matrix of the model if :obj:`new_num_tokens != config.vocab_size`.
Takes care of tying weights embeddings afterwards if the model class has a :obj:`tie_weights()` method.
Arguments:
new_num_tokens (:obj:`int`, `optional`):
The number of new tokens in the embedding matrix. Increasing the size will add newly initialized
vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`,
just returns a pointer to the input tokens :obj:`torch.nn.Embedding` module of the model wihtout doing
anything.
Return:
:obj:`torch.nn.Embedding`: Pointer to the input tokens Embeddings Module of the model.
"""
base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed
model_embeds = base_model._resize_token_embeddings(new_num_tokens)
if new_num_tokens is None:
return model_embeds
# Update base model and current model config
self.config.vocab_size = new_num_tokens
base_model.vocab_size = new_num_tokens
# Tie weights again if needed
self.tie_weights()
return model_embeds
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self.get_input_embeddings()
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
self.set_input_embeddings(new_embeddings)
return self.get_input_embeddings()
def _get_resized_embeddings(
self, old_embeddings: torch.nn.Embedding, new_num_tokens: Optional[int] = None
) -> torch.nn.Embedding:
"""
Build a resized Embedding Module from a provided token Embedding Module. Increasing the size will add newly
initialized vectors at the end. Reducing the size will remove vectors from the end
Args:
old_embeddings (:obj:`torch.nn.Embedding`):
Old embeddings to be resized.
new_num_tokens (:obj:`int`, `optional`):
New number of tokens in the embedding matrix.
Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens
:obj:`torch.nn.Embedding`` module of the model wihtout doing anything.
Return:
:obj:`torch.nn.Embedding`: Pointer to the resized Embedding Module or the old Embedding Module if
:obj:`new_num_tokens` is :obj:`None`
"""
if new_num_tokens is None:
return old_embeddings
old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
if old_num_tokens == new_num_tokens:
return old_embeddings
# Build new embeddings
new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim)
new_embeddings.to(old_embeddings.weight.device)
# initialize all new embeddings (in particular added tokens)
self._init_weights(new_embeddings)
# Copy token embeddings from the previous weights
num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
new_embeddings.weight.data[:num_tokens_to_copy, :] = old_embeddings.weight.data[:num_tokens_to_copy, :]
return new_embeddings
def init_weights(self):
"""
Initializes and prunes weights if needed.
"""
# Initialize weights
self.apply(self._init_weights)
# Prune heads if needed
if self.config.pruned_heads:
self.prune_heads(self.config.pruned_heads)
# Tie weights if needed
self.tie_weights()
def prune_heads(self, heads_to_prune: Dict[int, List[int]]):
"""
Prunes heads of the base model.
Arguments:
heads_to_prune (:obj:`Dict[int, List[int]]`):
Dictionary with keys being selected layer indices (:obj:`int`) and associated values being the list
of heads to prune in said layer (list of :obj:`int`). For instance {1: [0, 2], 2: [2, 3]} will
prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.
"""
# save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads
for layer, heads in heads_to_prune.items():
union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads)
self.config.pruned_heads[layer] = list(union_heads) # Unfortunately we have to store it as list for JSON
self.base_model._prune_heads(heads_to_prune)
def save_pretrained(self, save_directory):
"""
Save a model and its configuration file to a directory, so that it can be re-loaded using the
`:func:`~transformers.PreTrainedModel.from_pretrained`` class method.
Arguments:
save_directory (:obj:`str`):
Directory to which to save. Will be created if it doesn't exist.
"""
if os.path.isfile(save_directory):
logger.error("Provided path ({}) should be a directory, not a file".format(save_directory))
return
os.makedirs(save_directory, exist_ok=True)
# Only save the model itself if we are using distributed training
model_to_save = self.module if hasattr(self, "module") else self
# Attach architecture to the config
model_to_save.config.architectures = [model_to_save.__class__.__name__]
state_dict = model_to_save.state_dict()
# Handle the case where some state_dict keys shouldn't be saved
if self.keys_to_never_save is not None:
state_dict = {k: v for k, v in state_dict.items() if k not in self.keys_to_never_save}
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(save_directory, WEIGHTS_NAME)
if getattr(self.config, "xla_device", False) and is_torch_tpu_available():
import torch_xla.core.xla_model as xm
if xm.is_master_ordinal():
# Save configuration file
model_to_save.config.save_pretrained(save_directory)
# xm.save takes care of saving only from master
xm.save(state_dict, output_model_file)
else:
model_to_save.config.save_pretrained(save_directory)
torch.save(state_dict, output_model_file)
logger.info("Model weights saved in {}".format(output_model_file))
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Instantiate a pretrained pytorch model from a pre-trained model configuration.
The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated).
To train the model, you should first set it back in training mode with ``model.train()``.
The warning `Weights from XXX not initialized from pretrained model` means that the weights of XXX do not come
pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
task.
The warning `Weights from XXX not used in YYY` means that the layer XXX is not used by YYY, therefore those
weights are discarded.
Parameters:
pretrained_model_name_or_path (:obj:`str`, `optional`):
Can be either:
- A string with the `shortcut name` of a pretrained model to load from cache or download, e.g.,
``bert-base-uncased``.
- A string with the `identifier name` of a pretrained model that was user-uploaded to our S3, e.g.,
``dbmdz/bert-base-german-cased``.
- A path to a `directory` containing model weights saved using
:func:`~transformers.PreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.
- A path or url to a `tensorflow index checkpoint file` (e.g, ``./tf_model/model.ckpt.index``). In
this case, ``from_tf`` should be set to :obj:`True` and a configuration object should be provided
as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in
a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
- :obj:`None` if you are both providing the configuration and state dictionary (resp. with keyword
arguments ``config`` and ``state_dict``).
model_args (sequence of positional arguments, `optional`):
All remaning positional arguments will be passed to the underlying model's ``__init__`` method.
config (:obj:`Union[PretrainedConfig, str]`, `optional`):
Can be either:
- an instance of a class derived from :class:`~transformers.PretrainedConfig`,
- a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained`.
Configuration for the model to use instead of an automatically loaded configuation. Configuration can
be automatically loaded when:
- The model is a model provided by the library (loaded with the `shortcut name` string of a
pretrained model).
- The model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded
by suppling the save directory.
- The model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a
configuration JSON file named `config.json` is found in the directory.
state_dict (:obj:`Dict[str, torch.Tensor]`, `optional`):
A state dictionary to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own
weights. In this case though, you should check if using
:func:`~transformers.PreTrainedModel.save_pretrained` and
:func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir (:obj:`str`, `optional`):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
from_tf (:obj:`bool`, `optional`, defaults to :obj:`False`):
Load the model weights from a TensorFlow checkpoint save file (see docstring of
``pretrained_model_name_or_path`` argument).
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies (:obj:`Dict[str, str], `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g.,
:obj:`{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each
request.
output_loading_info(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether ot not to also return a dictionary containing missing keys, unexpected keys and error
messages.
local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to only look at local files (e.g., not try doanloading the model).
use_cdn(:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to use Cloudfront (a Content Delivery Network, or CDN) when searching for the model on
our S3 (faster). Should be set to :obj:`False` for checkpoints larger than 20GB.
mirror(:obj:`str`, `optional`, defaults to :obj:`None`):
Mirror source to accelerate downloads in China. If you are from China and have an accessibility problem,
you can set this option to resolve it. Note that we do not guarantee the timeliness or safety. Please
refer to the mirror site for more information.
kwargs (remaining dictionary of keyword arguments, `optional`):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
:obj:`output_attentions=True`). Behaves differently depending on whether a ``config`` is provided or
automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the
underlying model's ``__init__`` method (we assume all relevant updates to the configuration have
already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class
initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of
``kwargs`` that corresponds to a configuration attribute will be used to override said attribute
with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration
attribute will be passed to the underlying model's ``__init__`` function.
Examples::
>>> from transformers import BertConfig, BertModel
>>> # Download model and configuration from S3 and cache.
>>> model = BertModel.from_pretrained('bert-base-uncased')
>>> # Model was saved using `save_pretrained('./test/saved_model/')` (for example purposes, not runnable).
>>> model = BertModel.from_pretrained('./test/saved_model/')
>>> # Update configuration during loading.
>>> model = BertModel.from_pretrained('bert-base-uncased', output_attentions=True)
>>> assert model.config.output_attentions == True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable).
>>> config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json')
>>> model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
state_dict = kwargs.pop("state_dict", None)
cache_dir = kwargs.pop("cache_dir", None)
from_tf = kwargs.pop("from_tf", False)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
output_loading_info = kwargs.pop("output_loading_info", False)
local_files_only = kwargs.pop("local_files_only", False)
use_cdn = kwargs.pop("use_cdn", True)
mirror = kwargs.pop("mirror", None)
# Load config if we don't provide a configuration
if not isinstance(config, PretrainedConfig):
config_path = config if config is not None else pretrained_model_name_or_path
config, model_kwargs = cls.config_class.from_pretrained(
config_path,
*model_args,
cache_dir=cache_dir,
return_unused_kwargs=True,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
local_files_only=local_files_only,
**kwargs,
)
else:
model_kwargs = kwargs
# Load model
if pretrained_model_name_or_path is not None:
if os.path.isdir(pretrained_model_name_or_path):
if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")):
# Load from a TF 1.0 checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")
elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
# Load from a TF 2.0 checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
# Load from a PyTorch checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
else:
raise EnvironmentError(
"Error no file named {} found in directory {} or `from_tf` set to False".format(
[WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + ".index"],
pretrained_model_name_or_path,
)
)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
archive_file = pretrained_model_name_or_path
elif os.path.isfile(pretrained_model_name_or_path + ".index"):
assert (
from_tf
), "We found a TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint".format(
pretrained_model_name_or_path + ".index"
)
archive_file = pretrained_model_name_or_path + ".index"
else:
archive_file = hf_bucket_url(
pretrained_model_name_or_path,
filename=(TF2_WEIGHTS_NAME if from_tf else WEIGHTS_NAME),
use_cdn=use_cdn,
mirror=mirror,
)
try:
# Load from URL or cache if already cached
resolved_archive_file = cached_path(
archive_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
)
if resolved_archive_file is None:
raise EnvironmentError
except EnvironmentError:
msg = (
f"Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:\n\n"
f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n"
f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named one of {WEIGHTS_NAME}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME}.\n\n"
)
raise EnvironmentError(msg)
if resolved_archive_file == archive_file:
logger.info("loading weights file {}".format(archive_file))
else:
logger.info("loading weights file {} from cache at {}".format(archive_file, resolved_archive_file))
else:
resolved_archive_file = None
# Instantiate model.
model = cls(config, *model_args, **model_kwargs)
if state_dict is None and not from_tf:
try:
state_dict = torch.load(resolved_archive_file, map_location="cpu")
except Exception:
raise OSError(
"Unable to load weights from pytorch checkpoint file. "
"If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. "
)
missing_keys = []
unexpected_keys = []
error_msgs = []
if from_tf:
if resolved_archive_file.endswith(".index"):
# Load from a TensorFlow 1.X checkpoint - provided by original authors
model = cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index'
else:
# Load from our TensorFlow 2.0 checkpoints
try:
from .modeling_tf_pytorch_utils import load_tf2_checkpoint_in_pytorch_model
model = load_tf2_checkpoint_in_pytorch_model(model, resolved_archive_file, allow_missing_keys=True)
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
else:
# Convert old format to new format if needed from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if "gamma" in key:
new_key = key.replace("gamma", "weight")
if "beta" in key:
new_key = key.replace("beta", "bias")
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
# PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants
# so we need to apply the function recursively.
def load(module: nn.Module, prefix=""):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict,
prefix,
local_metadata,
True,
missing_keys,
unexpected_keys,
error_msgs,
)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + ".")
# Make sure we are able to load base models as well as derived models (with heads)
start_prefix = ""
model_to_load = model
has_prefix_module = any(s.startswith(cls.base_model_prefix) for s in state_dict.keys())
if not hasattr(model, cls.base_model_prefix) and has_prefix_module:
start_prefix = cls.base_model_prefix + "."
if hasattr(model, cls.base_model_prefix) and not has_prefix_module:
model_to_load = getattr(model, cls.base_model_prefix)
load(model_to_load, prefix=start_prefix)
if model.__class__.__name__ != model_to_load.__class__.__name__:
base_model_state_dict = model_to_load.state_dict().keys()
head_model_state_dict_without_base_prefix = [
key.split(cls.base_model_prefix + ".")[-1] for key in model.state_dict().keys()
]
missing_keys.extend(head_model_state_dict_without_base_prefix - base_model_state_dict)
# Some models may have keys that are not in the state by design, removing them before needlessly warning
# the user.
if cls.authorized_missing_keys is not None:
for pat in cls.authorized_missing_keys:
missing_keys = [k for k in missing_keys if re.search(pat, k) is None]
if cls.authorized_unexpected_keys is not None:
for pat in cls.authorized_unexpected_keys:
unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]
if len(unexpected_keys) > 0:
logger.warning(
f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when "
f"initializing {model.__class__.__name__}: {unexpected_keys}\n"
f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task "
f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPretraining model).\n"
f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect "
f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)."
)
else:
logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n")
if len(missing_keys) > 0:
logger.warning(
f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} "
f"and are newly initialized: {missing_keys}\n"
f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
)
else:
logger.info(
f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\n"
f"If your task is similar to the task the model of the checkpoint was trained on, "
f"you can already use {model.__class__.__name__} for predictions without further training."
)
if len(error_msgs) > 0:
raise RuntimeError(
"Error(s) in loading state_dict for {}:\n\t{}".format(
model.__class__.__name__, "\n\t".join(error_msgs)
)
)
# make sure token embedding weights are still tied if needed
model.tie_weights()
# Set model in evaluation mode to deactivate DropOut modules by default
model.eval()
if output_loading_info:
loading_info = {
"missing_keys": missing_keys,
"unexpected_keys": unexpected_keys,
"error_msgs": error_msgs,
}
return model, loading_info
if hasattr(config, "xla_device") and config.xla_device and is_torch_tpu_available():
import torch_xla.core.xla_model as xm
model = xm.send_cpu_data_to_device(model, xm.xla_device())
model.to(xm.xla_device())
return model
class Conv1D(nn.Module):
"""
1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).
Basically works like a linear layer but the weights are transposed.
Args:
nf (:obj:`int`): The number of output features.
nx (:obj:`int`): The number of input features.
"""
def __init__(self, nf, nx):
super().__init__()
self.nf = nf
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = nn.Parameter(w)
self.bias = nn.Parameter(torch.zeros(nf))
def forward(self, x):
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
x = x.view(*size_out)
return x
class PoolerStartLogits(nn.Module):
"""
Compute SQuAD start logits from sequence hidden states.
Args:
config (:class:`~transformers.PretrainedConfig`):
The config used by the model, will be used to grab the :obj:`hidden_size` of the model.
"""
def __init__(self, config: PretrainedConfig):
super().__init__()
self.dense = nn.Linear(config.hidden_size, 1)
def forward(
self, hidden_states: torch.FloatTensor, p_mask: Optional[torch.FloatTensor] = None
) -> torch.FloatTensor:
"""
Args:
hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`):
The final hidden states of the model.
p_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len)`, `optional`):
Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS).
1.0 means token should be masked.
Returns:
:obj:`torch.FloatTensor`: The start logits for SQuAD.
"""
x = self.dense(hidden_states).squeeze(-1)
if p_mask is not None:
if next(self.parameters()).dtype == torch.float16:
x = x * (1 - p_mask) - 65500 * p_mask
else:
x = x * (1 - p_mask) - 1e30 * p_mask
return x
class PoolerEndLogits(nn.Module):
"""
Compute SQuAD end logits from sequence hidden states.
Args:
config (:class:`~transformers.PretrainedConfig`):
The config used by the model, will be used to grab the :obj:`hidden_size` of the model and the
:obj:`layer_norm_eps` to use.
"""
def __init__(self, config: PretrainedConfig):
super().__init__()
self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.activation = nn.Tanh()
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dense_1 = nn.Linear(config.hidden_size, 1)
def forward(
self,
hidden_states: torch.FloatTensor,
start_states: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
p_mask: Optional[torch.FloatTensor] = None,
) -> torch.FloatTensor:
"""
Args:
hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`):
The final hidden states of the model.
start_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`, `optional`):
The hidden states of the first tokens for the labeled span.
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
The position of the first token for the labeled span.
p_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len)`, `optional`):
Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS).
1.0 means token should be masked.
.. note::
One of ``start_states`` or ``start_positions`` should be not obj:`None`. If both are set,
``start_positions`` overrides ``start_states``.
Returns:
:obj:`torch.FloatTensor`: The end logits for SQuAD.
"""
assert (
start_states is not None or start_positions is not None
), "One of start_states, start_positions should be not None"
if start_positions is not None:
slen, hsz = hidden_states.shape[-2:]
start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
start_states = hidden_states.gather(-2, start_positions) # shape (bsz, 1, hsz)
start_states = start_states.expand(-1, slen, -1) # shape (bsz, slen, hsz)
x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1))
x = self.activation(x)
x = self.LayerNorm(x)
x = self.dense_1(x).squeeze(-1)
if p_mask is not None:
if next(self.parameters()).dtype == torch.float16:
x = x * (1 - p_mask) - 65500 * p_mask
else:
x = x * (1 - p_mask) - 1e30 * p_mask
return x
class PoolerAnswerClass(nn.Module):
"""
Compute SQuAD 2.0 answer class from classification and start tokens hidden states.
Args:
config (:class:`~transformers.PretrainedConfig`):
The config used by the model, will be used to grab the :obj:`hidden_size` of the model.
"""
def __init__(self, config):
super().__init__()
self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.activation = nn.Tanh()
self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False)
def forward(
self,
hidden_states: torch.FloatTensor,
start_states: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
cls_index: Optional[torch.LongTensor] = None,
) -> torch.FloatTensor:
"""
Args:
hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`):
The final hidden states of the model.
start_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`, `optional`):
The hidden states of the first tokens for the labeled span.
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
The position of the first token for the labeled span.
cls_index (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Position of the CLS token for each sentence in the batch. If :obj:`None`, takes the last token.
.. note::
One of ``start_states`` or ``start_positions`` should be not obj:`None`. If both are set,
``start_positions`` overrides ``start_states``.
Returns:
:obj:`torch.FloatTensor`: The SQuAD 2.0 answer class.
"""
# No dependency on end_feature so that we can obtain one single `cls_logits` for each sample.
hsz = hidden_states.shape[-1]
assert (
start_states is not None or start_positions is not None
), "One of start_states, start_positions should be not None"
if start_positions is not None:
start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
start_states = hidden_states.gather(-2, start_positions).squeeze(-2) # shape (bsz, hsz)
if cls_index is not None:
cls_index = cls_index[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, hsz)
else:
cls_token_state = hidden_states[:, -1, :] # shape (bsz, hsz)
x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1))
x = self.activation(x)
x = self.dense_1(x).squeeze(-1)
return x
@dataclass
class SquadHeadOutput(ModelOutput):
"""
Base class for outputs of question answering models using a :class:`~transformers.modeling_utils.SQuADHead`.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned if both :obj:`start_positions` and :obj:`end_positions` are provided):
Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.
start_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the top config.start_n_top start token possibilities (beam-search).
start_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Indices for the top config.start_n_top start token possibilities (beam-search).
end_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
end_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
cls_logits (``torch.FloatTensor`` of shape ``(batch_size,)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the ``is_impossible`` label of the answers.
"""
loss: Optional[torch.FloatTensor] = None
start_top_log_probs: Optional[torch.FloatTensor] = None
start_top_index: Optional[torch.LongTensor] = None
end_top_log_probs: Optional[torch.FloatTensor] = None
end_top_index: Optional[torch.LongTensor] = None
cls_logits: Optional[torch.FloatTensor] = None
class SQuADHead(nn.Module):
r"""
A SQuAD head inspired by XLNet.
Args:
config (:class:`~transformers.PretrainedConfig`):
The config used by the model, will be used to grab the :obj:`hidden_size` of the model and the
:obj:`layer_norm_eps` to use.
"""
def __init__(self, config):
super().__init__()
self.start_n_top = config.start_n_top
self.end_n_top = config.end_n_top
self.start_logits = PoolerStartLogits(config)
self.end_logits = PoolerEndLogits(config)
self.answer_class = PoolerAnswerClass(config)
@replace_return_docstrings(output_type=SquadHeadOutput, config_class=PretrainedConfig)
def forward(
self,
hidden_states: torch.FloatTensor,
start_positions: Optional[torch.LongTensor] = None,
end_positions: Optional[torch.LongTensor] = None,
cls_index: Optional[torch.LongTensor] = None,
is_impossible: Optional[torch.LongTensor] = None,
p_mask: Optional[torch.FloatTensor] = None,
return_dict: bool = False,
) -> Union[SquadHeadOutput, Tuple[torch.FloatTensor]]:
"""
Args:
hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`):
Final hidden states of the model on the sequence tokens.
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Positions of the first token for the labeled span.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Positions of the last token for the labeled span.
cls_index (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Position of the CLS token for each sentence in the batch. If :obj:`None`, takes the last token.
is_impossible (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Whether the question has a possible answer in the paragraph or not.
p_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len)`, `optional`):
Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS).
1.0 means token should be masked.
return_dict (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to return a :class:`~transformers.file_utils.ModelOuput` instead of a plain tuple.
Returns:
"""
start_logits = self.start_logits(hidden_states, p_mask=p_mask)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, let's remove the dimension added by batch splitting
for x in (start_positions, end_positions, cls_index, is_impossible):
if x is not None and x.dim() > 1:
x.squeeze_(-1)
# during training, compute the end logits based on the ground truth of the start position
end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)
loss_fct = CrossEntropyLoss()
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if cls_index is not None and is_impossible is not None:
# Predict answerability from the representation of CLS and START
cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
loss_fct_cls = nn.BCEWithLogitsLoss()
cls_loss = loss_fct_cls(cls_logits, is_impossible)
# note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
total_loss += cls_loss * 0.5
return SquadHeadOutput(loss=total_loss) if return_dict else (total_loss,)
else:
# during inference, compute the end logits based on beam search
bsz, slen, hsz = hidden_states.size()
start_log_probs = F.softmax(start_logits, dim=-1) # shape (bsz, slen)
start_top_log_probs, start_top_index = torch.topk(
start_log_probs, self.start_n_top, dim=-1
) # shape (bsz, start_n_top)
start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz)
start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz)
start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz)
hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(
start_states
) # shape (bsz, slen, start_n_top, hsz)
p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
end_log_probs = F.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top)
end_top_log_probs, end_top_index = torch.topk(
end_log_probs, self.end_n_top, dim=1
) # shape (bsz, end_n_top, start_n_top)
end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)
start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs)
cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index)
if not return_dict:
return (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits)
else:
return SquadHeadOutput(
start_top_log_probs=start_top_log_probs,
start_top_index=start_top_index,
end_top_log_probs=end_top_log_probs,
end_top_index=end_top_index,
cls_logits=cls_logits,
)
class SequenceSummary(nn.Module):
r"""
Compute a single vector summary of a sequence hidden states.
Args:
config (:class:`~transformers.PretrainedConfig`):
The config used by the model. Relevant arguments in the config class of the model are (refer to the
actual config class of your model for the default values it uses):
- **summary_type** (:obj:`str`) -- The method to use to make this summary. Accepted values are:
- :obj:`"last"` -- Take the last token hidden state (like XLNet)
- :obj:`"first"` -- Take the first token hidden state (like Bert)
- :obj:`"mean"` -- Take the mean of all tokens hidden states
- :obj:`"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2)
- :obj:`"attn"` -- Not implemented now, use multi-head attention
- **summary_use_proj** (:obj:`bool`) -- Add a projection after the vector extraction.
- **summary_proj_to_labels** (:obj:`bool`) -- If :obj:`True`, the projection outputs to
:obj:`config.num_labels` classes (otherwise to :obj:`config.hidden_size`).
- **summary_activation** (:obj:`Optional[str]`) -- Set to :obj:`"tanh"` to add a tanh activation to the
output, another string or :obj:`None` will add no activation.
- **summary_first_dropout** (:obj:`float`) -- Optional dropout probability before the projection and
activation.
- **summary_last_dropout** (:obj:`float`)-- Optional dropout probability after the projection and
activation.
"""
def __init__(self, config: PretrainedConfig):
super().__init__()
self.summary_type = getattr(config, "summary_type", "last")
if self.summary_type == "attn":
# We should use a standard multi-head attention module with absolute positional embedding for that.
# Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
# We can probably just use the multi-head attention module of PyTorch >=1.1.0
raise NotImplementedError
self.summary = Identity()
if hasattr(config, "summary_use_proj") and config.summary_use_proj:
if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0:
num_classes = config.num_labels
else:
num_classes = config.hidden_size
self.summary = nn.Linear(config.hidden_size, num_classes)
activation_string = getattr(config, "summary_activation", None)
self.activation: Callable = get_activation(activation_string) if activation_string else Identity()
self.first_dropout = Identity()
if hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0:
self.first_dropout = nn.Dropout(config.summary_first_dropout)
self.last_dropout = Identity()
if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0:
self.last_dropout = nn.Dropout(config.summary_last_dropout)
def forward(
self, hidden_states: torch.FloatTensor, cls_index: Optional[torch.LongTensor] = None
) -> torch.FloatTensor:
"""
Compute a single vector summary of a sequence hidden states.
Args:
hidden_states (:obj:`torch.FloatTensor` of shape :obj:`[batch_size, seq_len, hidden_size]`):
The hidden states of the last layer.
cls_index (:obj:`torch.LongTensor` of shape :obj:`[batch_size]` or :obj:`[batch_size, ...]` where ... are optional leading dimensions of :obj:`hidden_states`, `optional`):
Used if :obj:`summary_type == "cls_index"` and takes the last token of the sequence as classification
token.
Returns:
:obj:`torch.FloatTensor`: The summary of the sequence hidden states.
"""
if self.summary_type == "last":
output = hidden_states[:, -1]
elif self.summary_type == "first":
output = hidden_states[:, 0]
elif self.summary_type == "mean":
output = hidden_states.mean(dim=1)
elif self.summary_type == "cls_index":
if cls_index is None:
cls_index = torch.full_like(
hidden_states[..., :1, :],
hidden_states.shape[-2] - 1,
dtype=torch.long,
)
else:
cls_index = cls_index.unsqueeze(-1).unsqueeze(-1)
cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),))
# shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
output = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, XX, hidden_size)
elif self.summary_type == "attn":
raise NotImplementedError
output = self.first_dropout(output)
output = self.summary(output)
output = self.activation(output)
output = self.last_dropout(output)
return output
def prune_linear_layer(layer: torch.nn.Linear, index: torch.LongTensor, dim: int = 0) -> torch.nn.Linear:
"""
Prune a linear layer to keep only entries in index.
Used to remove heads.
Args:
layer (:obj:`torch.nn.Linear`): The layer to prune.
index (:obj:`torch.LongTensor`): The indices to keep in the layer.
dim (:obj:`int`, `optional`, defaults to 0): The dimension on which to keep the indices.
Returns:
:obj:`torch.nn.Linear`: The pruned layer as a new layer with :obj:`requires_grad=True`.
"""
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if layer.bias is not None:
if dim == 1:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if layer.bias is not None:
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
def prune_conv1d_layer(layer: Conv1D, index: torch.LongTensor, dim: int = 1) -> Conv1D:
"""
Prune a Conv1D layer to keep only entries in index. A Conv1D work as a Linear layer (see e.g. BERT) but the weights
are transposed.
Used to remove heads.
Args:
layer (:class:`~transformers.modeling_utils.Conv1D`): The layer to prune.
index (:obj:`torch.LongTensor`): The indices to keep in the layer.
dim (:obj:`int`, `optional`, defaults to 1): The dimension on which to keep the indices.
Returns:
:class:`~transformers.modeling_utils.Conv1D`: The pruned layer as a new layer with :obj:`requires_grad=True`.
"""
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if dim == 0:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
def prune_layer(
layer: Union[torch.nn.Linear, Conv1D], index: torch.LongTensor, dim: Optional[int] = None
) -> Union[torch.nn.Linear, Conv1D]:
"""
Prune a Conv1D or linear layer to keep only entries in index.
Used to remove heads.
Args:
layer (:obj:`Union[torch.nn.Linear, Conv1D]`): The layer to prune.
index (:obj:`torch.LongTensor`): The indices to keep in the layer.
dim (:obj:`int`, `optional`): The dimension on which to keep the indices.
Returns:
:obj:`torch.nn.Linear` or :class:`~transformers.modeling_utils.Conv1D`:
The pruned layer as a new layer with :obj:`requires_grad=True`.
"""
if isinstance(layer, nn.Linear):
return prune_linear_layer(layer, index, dim=0 if dim is None else dim)
elif isinstance(layer, Conv1D):
return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim)
else:
raise ValueError("Can't prune layer of class {}".format(layer.__class__))
def apply_chunking_to_forward(
forward_fn: Callable[..., torch.Tensor], chunk_size: int, chunk_dim: int, *input_tensors
) -> torch.Tensor:
"""
This function chunks the :obj:`input_tensors` into smaller input tensor parts of size :obj:`chunk_size` over the
dimension :obj:`chunk_dim`. It then applies a layer :obj:`forward_fn` to each chunk independently to save memory.
If the :obj:`forward_fn` is independent across the :obj:`chunk_dim` this function will yield the same result as
directly applying :obj:`forward_fn` to :obj:`input_tensors`.
Args:
forward_fn (:obj:`Callable[..., torch.Tensor]`):
The forward function of the model.
chunk_size (:obj:`int`):
The chunk size of a chunked tensor: :obj:`num_chunks = len(input_tensors[0]) / chunk_size`.
chunk_dim (:obj:`int`):
The dimension over which the :obj:`input_tensors` should be chunked.
input_tensors (:obj:`Tuple[torch.Tensor]`):
The input tensors of ``forward_fn`` which will be chunked.
Returns:
:obj:`torch.Tensor`: A tensor with the same shape as the :obj:`foward_fn` would have given if applied`.
Examples::
# rename the usual forward() fn to forward_chunk()
def forward_chunk(self, hidden_states):
hidden_states = self.decoder(hidden_states)
return hidden_states
# implement a chunked forward function
def forward(self, hidden_states):
return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states)
"""
assert len(input_tensors) > 0, "{} has to be a tuple/list of tensors".format(input_tensors)
tensor_shape = input_tensors[0].shape
assert all(
input_tensor.shape == tensor_shape for input_tensor in input_tensors
), "All input tenors have to be of the same shape"
# inspect.signature exist since python 3.5 and is a python method -> no problem with backward compability
num_args_in_forward_chunk_fn = len(inspect.signature(forward_fn).parameters)
assert num_args_in_forward_chunk_fn == len(
input_tensors
), "forward_chunk_fn expects {} arguments, but only {} input tensors are given".format(
num_args_in_forward_chunk_fn, len(input_tensors)
)
if chunk_size > 0:
assert (
input_tensors[0].shape[chunk_dim] % chunk_size == 0
), "The dimension to be chunked {} has to be a multiple of the chunk size {}".format(
input_tensors[0].shape[chunk_dim], chunk_size
)
num_chunks = input_tensors[0].shape[chunk_dim] // chunk_size
# chunk input tensor into tuples
input_tensors_chunks = tuple(input_tensor.chunk(num_chunks, dim=chunk_dim) for input_tensor in input_tensors)
# apply forward fn to every tuple
output_chunks = tuple(forward_fn(*input_tensors_chunk) for input_tensors_chunk in zip(*input_tensors_chunks))
# concatenate output at same dimension
return torch.cat(output_chunks, dim=chunk_dim)
return forward_fn(*input_tensors)
| 48.375222 | 197 | 0.628105 |
4a2500eda82325c1ec5344b9d75fa4b96252becd | 1,306 | py | Python | jorldy/config/icm_ppo/super_mario_bros.py | Kyushik/JORLDY | 6a24a2195e5e87ade157ee53f631af2221f0a188 | [
"Apache-2.0"
] | 300 | 2021-11-03T07:06:34.000Z | 2022-03-24T02:23:56.000Z | jorldy/config/icm_ppo/super_mario_bros.py | Kyushik/JORLDY | 6a24a2195e5e87ade157ee53f631af2221f0a188 | [
"Apache-2.0"
] | 37 | 2021-11-04T04:31:07.000Z | 2022-03-30T01:40:49.000Z | jorldy/config/icm_ppo/super_mario_bros.py | Kyushik/JORLDY | 6a24a2195e5e87ade157ee53f631af2221f0a188 | [
"Apache-2.0"
] | 45 | 2021-11-03T08:05:56.000Z | 2022-03-24T08:35:05.000Z | ### ICM PPO Super Mario Bros Config ###
env = {
"name": "super_mario_bros",
"render": False,
"gray_img": True,
"img_width": 84,
"img_height": 84,
"stack_frame": 4,
"no_op": True,
"skip_frame": 4,
"reward_clip": True,
"episodic_life": True,
}
agent = {
"name": "icm_ppo",
"network": "discrete_policy_value",
"head": "cnn",
"gamma": 0.99,
"batch_size": 16,
"n_step": 128,
"n_epoch": 3,
"_lambda": 0.95,
"epsilon_clip": 0.1,
"vf_coef": 1.0,
"ent_coef": 0.01,
"clip_grad_norm": 1.0,
"use_standardization": True,
"lr_decay": True,
# Parameters for Curiosity-driven Exploration
"icm_network": "icm_cnn", # icm_mlp, icm_cnn, icm_multi
"beta": 0.2,
"lamb": 1.0,
"eta": 0.1,
"extrinsic_coeff": 1.0,
"intrinsic_coeff": 1.0,
"obs_normalize": True,
"ri_normalize": True,
"batch_norm": True,
}
optim = {
"name": "adam",
"lr": 2.5e-4,
}
train = {
"training": True,
"load_path": None,
"run_step": 30000000,
"print_period": 10000,
"save_period": 500000,
"eval_iteration": 1,
"record": True,
"record_period": 500000,
# distributed setting
"distributed_batch_size": 1024,
"update_period": agent["n_step"],
"num_workers": 64,
}
| 21.064516 | 60 | 0.569678 |
4a2500fc7f57081d8b12a4f49d5fff1d33311e02 | 7,202 | py | Python | dao/ERC20Initializer.py | caleb-berry/dao | 8898e85eca5e052eb7545b9a41862048c1d9932f | [
"Apache-2.0"
] | 9 | 2021-12-18T16:48:36.000Z | 2022-02-15T17:54:07.000Z | dao/ERC20Initializer.py | caleb-berry/dao | 8898e85eca5e052eb7545b9a41862048c1d9932f | [
"Apache-2.0"
] | 20 | 2021-12-16T13:47:42.000Z | 2022-03-17T17:39:49.000Z | dao/ERC20Initializer.py | bugout-dev/dao | d6e089d32ecd54a5bfd3b31f98e582528b201f15 | [
"Apache-2.0"
] | null | null | null | # Code generated by moonworm : https://github.com/bugout-dev/moonworm
# Moonworm version : 0.1.14
import argparse
import json
import os
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
from brownie import Contract, network, project
from brownie.network.contract import ContractContainer
from eth_typing.evm import ChecksumAddress
PROJECT_DIRECTORY = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
BUILD_DIRECTORY = os.path.join(PROJECT_DIRECTORY, "build", "contracts")
def boolean_argument_type(raw_value: str) -> bool:
TRUE_VALUES = ["1", "t", "y", "true", "yes"]
FALSE_VALUES = ["0", "f", "n", "false", "no"]
if raw_value.lower() in TRUE_VALUES:
return True
elif raw_value.lower() in FALSE_VALUES:
return False
raise ValueError(
f"Invalid boolean argument: {raw_value}. Value must be one of: {','.join(TRUE_VALUES + FALSE_VALUES)}"
)
def bytes_argument_type(raw_value: str) -> bytes:
return raw_value.encode()
def get_abi_json(abi_name: str) -> List[Dict[str, Any]]:
abi_full_path = os.path.join(BUILD_DIRECTORY, f"{abi_name}.json")
if not os.path.isfile(abi_full_path):
raise IOError(
f"File does not exist: {abi_full_path}. Maybe you have to compile the smart contracts?"
)
with open(abi_full_path, "r") as ifp:
build = json.load(ifp)
abi_json = build.get("abi")
if abi_json is None:
raise ValueError(f"Could not find ABI definition in: {abi_full_path}")
return abi_json
def contract_from_build(abi_name: str) -> ContractContainer:
# This is workaround because brownie currently doesn't support loading the same project multiple
# times. This causes problems when using multiple contracts from the same project in the same
# python project.
PROJECT = project.main.Project("moonworm", Path(PROJECT_DIRECTORY))
abi_full_path = os.path.join(BUILD_DIRECTORY, f"{abi_name}.json")
if not os.path.isfile(abi_full_path):
raise IOError(
f"File does not exist: {abi_full_path}. Maybe you have to compile the smart contracts?"
)
with open(abi_full_path, "r") as ifp:
build = json.load(ifp)
return ContractContainer(PROJECT, build)
class ERC20Initializer:
def __init__(self, contract_address: Optional[ChecksumAddress]):
self.contract_name = "ERC20Initializer"
self.address = contract_address
self.contract = None
self.abi = get_abi_json("ERC20Initializer")
if self.address is not None:
self.contract: Optional[Contract] = Contract.from_abi(
self.contract_name, self.address, self.abi
)
def deploy(self, transaction_config):
contract_class = contract_from_build(self.contract_name)
deployed_contract = contract_class.deploy(transaction_config)
self.address = deployed_contract.address
self.contract = deployed_contract
def assert_contract_is_instantiated(self) -> None:
if self.contract is None:
raise Exception("contract has not been instantiated")
def verify_contract(self):
self.assert_contract_is_instantiated()
contract_class = contract_from_build(self.contract_name)
contract_class.publish_source(self.contract)
def init(self, transaction_config) -> Any:
self.assert_contract_is_instantiated()
return self.contract.init(transaction_config)
def get_transaction_config(args: argparse.Namespace) -> Dict[str, Any]:
signer = network.accounts.load(args.sender, args.password)
transaction_config: Dict[str, Any] = {"from": signer}
if args.gas_price is not None:
transaction_config["gas_price"] = args.gas_price
if args.max_fee_per_gas is not None:
transaction_config["max_fee"] = args.max_fee_per_gas
if args.max_priority_fee_per_gas is not None:
transaction_config["priority_fee"] = args.max_priority_fee_per_gas
if args.confirmations is not None:
transaction_config["required_confs"] = args.confirmations
if args.nonce is not None:
transaction_config["nonce"] = args.nonce
return transaction_config
def add_default_arguments(parser: argparse.ArgumentParser, transact: bool) -> None:
parser.add_argument(
"--network", required=True, help="Name of brownie network to connect to"
)
parser.add_argument(
"--address", required=False, help="Address of deployed contract to connect to"
)
if not transact:
return
parser.add_argument(
"--sender", required=True, help="Path to keystore file for transaction sender"
)
parser.add_argument(
"--password",
required=False,
help="Password to keystore file (if you do not provide it, you will be prompted for it)",
)
parser.add_argument(
"--gas-price", default=None, help="Gas price at which to submit transaction"
)
parser.add_argument(
"--max-fee-per-gas",
default=None,
help="Max fee per gas for EIP1559 transactions",
)
parser.add_argument(
"--max-priority-fee-per-gas",
default=None,
help="Max priority fee per gas for EIP1559 transactions",
)
parser.add_argument(
"--confirmations",
type=int,
default=None,
help="Number of confirmations to await before considering a transaction completed",
)
parser.add_argument(
"--nonce", type=int, default=None, help="Nonce for the transaction (optional)"
)
def handle_deploy(args: argparse.Namespace) -> None:
network.connect(args.network)
transaction_config = get_transaction_config(args)
contract = ERC20Initializer(None)
result = contract.deploy(transaction_config=transaction_config)
print(result)
def handle_verify_contract(args: argparse.Namespace) -> None:
network.connect(args.network)
contract = ERC20Initializer(args.address)
result = contract.verify_contract()
print(result)
def handle_init(args: argparse.Namespace) -> None:
network.connect(args.network)
contract = ERC20Initializer(args.address)
transaction_config = get_transaction_config(args)
result = contract.init(transaction_config=transaction_config)
print(result)
def generate_cli() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description="CLI for ERC20Initializer")
parser.set_defaults(func=lambda _: parser.print_help())
subcommands = parser.add_subparsers()
deploy_parser = subcommands.add_parser("deploy")
add_default_arguments(deploy_parser, True)
deploy_parser.set_defaults(func=handle_deploy)
verify_contract_parser = subcommands.add_parser("verify-contract")
add_default_arguments(verify_contract_parser, False)
verify_contract_parser.set_defaults(func=handle_verify_contract)
init_parser = subcommands.add_parser("init")
add_default_arguments(init_parser, True)
init_parser.set_defaults(func=handle_init)
return parser
def main() -> None:
parser = generate_cli()
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()
| 34.132701 | 110 | 0.701611 |
4a250214e0a1d9d5bdec4bf3d75be27517fa9e33 | 530 | py | Python | backend/home/migrations/0001_load_initial_data.py | crowdbotics-apps/kikter-34297 | c00388be0a9a1fa83c441d3dc94b4c9f88eaeb88 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/home/migrations/0001_load_initial_data.py | crowdbotics-apps/kikter-34297 | c00388be0a9a1fa83c441d3dc94b4c9f88eaeb88 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/home/migrations/0001_load_initial_data.py | crowdbotics-apps/kikter-34297 | c00388be0a9a1fa83c441d3dc94b4c9f88eaeb88 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "kikter-34297.botics.co"
site_params = {
"name": "kikter",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| 20.384615 | 61 | 0.65283 |
4a25026959c078c3ac209b1a8245de320efc3aea | 7,456 | py | Python | tests/components/motion_blinds/test_config_flow.py | dlintott/core | a6c83cc46a34084fdc4c0e7221b6ba493f82cbac | [
"Apache-2.0"
] | 1 | 2021-01-27T08:47:19.000Z | 2021-01-27T08:47:19.000Z | tests/components/motion_blinds/test_config_flow.py | dlintott/core | a6c83cc46a34084fdc4c0e7221b6ba493f82cbac | [
"Apache-2.0"
] | 36 | 2021-02-08T08:20:27.000Z | 2022-03-31T06:06:05.000Z | tests/components/motion_blinds/test_config_flow.py | dlintott/core | a6c83cc46a34084fdc4c0e7221b6ba493f82cbac | [
"Apache-2.0"
] | 1 | 2021-01-01T02:35:17.000Z | 2021-01-01T02:35:17.000Z | """Test the Motion Blinds config flow."""
import socket
import pytest
from homeassistant import config_entries
from homeassistant.components.motion_blinds.config_flow import DEFAULT_GATEWAY_NAME
from homeassistant.components.motion_blinds.const import DOMAIN
from homeassistant.const import CONF_API_KEY, CONF_HOST
from tests.async_mock import Mock, patch
TEST_HOST = "1.2.3.4"
TEST_HOST2 = "5.6.7.8"
TEST_API_KEY = "12ab345c-d67e-8f"
TEST_MAC = "ab:cd:ef:gh"
TEST_MAC2 = "ij:kl:mn:op"
TEST_DEVICE_LIST = {TEST_MAC: Mock()}
TEST_DISCOVERY_1 = {
TEST_HOST: {
"msgType": "GetDeviceListAck",
"mac": TEST_MAC,
"deviceType": "02000002",
"ProtocolVersion": "0.9",
"token": "12345A678B9CDEFG",
"data": [
{"mac": "abcdefghujkl", "deviceType": "02000002"},
{"mac": "abcdefghujkl0001", "deviceType": "10000000"},
{"mac": "abcdefghujkl0002", "deviceType": "10000000"},
],
}
}
TEST_DISCOVERY_2 = {
TEST_HOST: {
"msgType": "GetDeviceListAck",
"mac": TEST_MAC,
"deviceType": "02000002",
"ProtocolVersion": "0.9",
"token": "12345A678B9CDEFG",
"data": [
{"mac": "abcdefghujkl", "deviceType": "02000002"},
{"mac": "abcdefghujkl0001", "deviceType": "10000000"},
],
},
TEST_HOST2: {
"msgType": "GetDeviceListAck",
"mac": TEST_MAC2,
"deviceType": "02000002",
"ProtocolVersion": "0.9",
"token": "12345A678B9CDEFG",
"data": [
{"mac": "abcdefghujkl", "deviceType": "02000002"},
{"mac": "abcdefghujkl0001", "deviceType": "10000000"},
],
},
}
@pytest.fixture(name="motion_blinds_connect", autouse=True)
def motion_blinds_connect_fixture():
"""Mock motion blinds connection and entry setup."""
with patch(
"homeassistant.components.motion_blinds.gateway.MotionGateway.GetDeviceList",
return_value=True,
), patch(
"homeassistant.components.motion_blinds.gateway.MotionGateway.Update",
return_value=True,
), patch(
"homeassistant.components.motion_blinds.gateway.MotionGateway.device_list",
TEST_DEVICE_LIST,
), patch(
"homeassistant.components.motion_blinds.config_flow.MotionDiscovery.discover",
return_value=TEST_DISCOVERY_1,
), patch(
"homeassistant.components.motion_blinds.async_setup_entry", return_value=True
):
yield
async def test_config_flow_manual_host_success(hass):
"""Successful flow manually initialized by the user."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: TEST_HOST},
)
assert result["type"] == "form"
assert result["step_id"] == "connect"
assert result["errors"] is None
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_API_KEY: TEST_API_KEY},
)
assert result["type"] == "create_entry"
assert result["title"] == DEFAULT_GATEWAY_NAME
assert result["data"] == {
CONF_HOST: TEST_HOST,
CONF_API_KEY: TEST_API_KEY,
}
async def test_config_flow_discovery_1_success(hass):
"""Successful flow with 1 gateway discovered."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result["type"] == "form"
assert result["step_id"] == "connect"
assert result["errors"] is None
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_API_KEY: TEST_API_KEY},
)
assert result["type"] == "create_entry"
assert result["title"] == DEFAULT_GATEWAY_NAME
assert result["data"] == {
CONF_HOST: TEST_HOST,
CONF_API_KEY: TEST_API_KEY,
}
async def test_config_flow_discovery_2_success(hass):
"""Successful flow with 2 gateway discovered."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {}
with patch(
"homeassistant.components.motion_blinds.config_flow.MotionDiscovery.discover",
return_value=TEST_DISCOVERY_2,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result["type"] == "form"
assert result["step_id"] == "select"
assert result["data_schema"].schema["select_ip"].container == [
TEST_HOST,
TEST_HOST2,
]
assert result["errors"] is None
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"select_ip": TEST_HOST2},
)
assert result["type"] == "form"
assert result["step_id"] == "connect"
assert result["errors"] is None
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_API_KEY: TEST_API_KEY},
)
assert result["type"] == "create_entry"
assert result["title"] == DEFAULT_GATEWAY_NAME
assert result["data"] == {
CONF_HOST: TEST_HOST2,
CONF_API_KEY: TEST_API_KEY,
}
async def test_config_flow_connection_error(hass):
"""Failed flow manually initialized by the user with connection timeout."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: TEST_HOST},
)
assert result["type"] == "form"
assert result["step_id"] == "connect"
assert result["errors"] is None
with patch(
"homeassistant.components.motion_blinds.gateway.MotionGateway.GetDeviceList",
side_effect=socket.timeout,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_API_KEY: TEST_API_KEY},
)
assert result["type"] == "abort"
assert result["reason"] == "connection_error"
async def test_config_flow_discovery_fail(hass):
"""Failed flow with no gateways discovered."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {}
with patch(
"homeassistant.components.motion_blinds.config_flow.MotionDiscovery.discover",
return_value={},
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {"base": "discovery_error"}
| 29.943775 | 86 | 0.635461 |
4a2502ab4483fab98f449c34a0ee6f02b9ba8ab1 | 3,534 | py | Python | pyvizio/api/pair.py | jezzab/pyvizio | 8086f9e5aac49d1d99ade02684ca35c05e03a7eb | [
"MIT"
] | 72 | 2017-08-08T19:32:12.000Z | 2022-03-18T03:18:41.000Z | pyvizio/api/pair.py | raman325/pyvizio | 9cf45fcc9b409caf223a38d8f79c775742ab4127 | [
"MIT"
] | 48 | 2017-09-16T16:37:54.000Z | 2022-01-23T20:43:42.000Z | pyvizio/api/pair.py | ConnectionMaster/pyvizio | 0fe4558557917509d3da3bb24f9221f15ba901ce | [
"MIT"
] | 42 | 2017-09-04T22:59:21.000Z | 2022-03-18T03:18:30.000Z | """Vizio SmartCast API commands and class for pairing."""
from typing import Any, Dict, Union
from pyvizio.api._protocol import ENDPOINT, PairingResponseKey, ResponseKey
from pyvizio.api.base import CommandBase
from pyvizio.helpers import dict_get_case_insensitive
class PairCommandBase(CommandBase):
"""Base pairing command."""
def __init__(self, device_id: str, device_type: str, endpoint: str) -> None:
"""Initialize base pairing command."""
super(PairCommandBase, self).__init__(ENDPOINT[device_type][endpoint])
self.DEVICE_ID: str = device_id
class BeginPairResponse(object):
"""Response from command to begin pairing process."""
def __init__(self, ch_type: str, token: str) -> None:
"""Initialize response from command to begin pairing process."""
self.ch_type: str = ch_type
self.token: str = token
def __repr__(self) -> str:
return f"{type(self).__name__}({self.__dict__})"
def __eq__(self, other) -> bool:
return self is other or self.__dict__ == other.__dict__
class BeginPairCommand(PairCommandBase):
"""Command to begin pairing process."""
def __init__(self, device_id: str, device_name: str, device_type: str) -> None:
"""Initialize command to begin pairing process."""
super().__init__(device_id, device_type, "BEGIN_PAIR")
self.DEVICE_NAME: str = str(device_name)
def process_response(self, json_obj: Dict[str, Any]) -> BeginPairResponse:
"""Return response to command to begin pairing process."""
item = dict_get_case_insensitive(json_obj, ResponseKey.ITEM)
return BeginPairResponse(
dict_get_case_insensitive(item, PairingResponseKey.CHALLENGE_TYPE),
dict_get_case_insensitive(item, PairingResponseKey.PAIRING_REQ_TOKEN),
)
class PairChallengeResponse(object):
"""Response from command to complete pairing process."""
def __init__(self, auth_token: str) -> None:
"""Initialize response from command to complete pairing process."""
self.auth_token = auth_token
def __repr__(self) -> str:
return f"{type(self).__name__}({self.__dict__})"
def __eq__(self, other) -> bool:
return self is other or self.__dict__ == other.__dict__
class PairChallengeCommand(PairCommandBase):
"""Command to complete pairing process."""
def __init__(
self,
device_id: str,
challenge_type: Union[int, str],
pairing_token: Union[int, str],
pin: str,
device_type: str,
) -> None:
"""Initialize command to complete pairing process."""
super().__init__(device_id, device_type, "FINISH_PAIR")
self.CHALLENGE_TYPE = int(challenge_type)
self.PAIRING_REQ_TOKEN = int(pairing_token)
self.RESPONSE_VALUE = str(pin)
def process_response(self, json_obj: Dict[str, Any]) -> PairChallengeResponse:
"""Return response to command to complete pairing process."""
item = dict_get_case_insensitive(json_obj, ResponseKey.ITEM)
return PairChallengeResponse(
dict_get_case_insensitive(item, PairingResponseKey.AUTH_TOKEN)
)
class CancelPairCommand(PairCommandBase):
"""Command to cancel pairing process."""
def __init__(self, device_id, device_name: str, device_type: str) -> None:
"""Initialize command to cancel pairing process."""
super().__init__(device_id, device_type, "CANCEL_PAIR")
self.DEVICE_NAME = str(device_name)
| 34.990099 | 83 | 0.685342 |
4a2503be6992977277d49b6e3836794b00eff7b1 | 10,438 | py | Python | comicapi/comicinfoxml.py | OzzieIsaacs/comicapi | 59e7830f62c5f6e42aea16d171d336814dca826f | [
"Apache-2.0"
] | 2 | 2022-01-22T19:17:11.000Z | 2022-03-16T20:19:20.000Z | comicapi/comicinfoxml.py | OzzieIsaacs/comicapi | 59e7830f62c5f6e42aea16d171d336814dca826f | [
"Apache-2.0"
] | null | null | null | comicapi/comicinfoxml.py | OzzieIsaacs/comicapi | 59e7830f62c5f6e42aea16d171d336814dca826f | [
"Apache-2.0"
] | 1 | 2021-08-02T07:07:01.000Z | 2021-08-02T07:07:01.000Z | """
A python class to encapsulate ComicRack's ComicInfo.xml data
Copyright 2012-2014 Anthony Beville
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import xml.etree.ElementTree as ET
from comicapi.genericmetadata import GenericMetadata
import comicapi.utils
class ComicInfoXml:
writer_synonyms = ['writer', 'plotter', 'scripter']
penciller_synonyms = ['artist', 'penciller', 'penciler', 'breakdowns']
inker_synonyms = ['inker', 'artist', 'finishes']
colorist_synonyms = ['colorist', 'colourist', 'colorer', 'colourer']
letterer_synonyms = ['letterer']
cover_synonyms = ['cover', 'covers', 'coverartist', 'cover artist']
editor_synonyms = ['editor']
def getParseableCredits(self):
parsable_credits = []
parsable_credits.extend(self.writer_synonyms)
parsable_credits.extend(self.penciller_synonyms)
parsable_credits.extend(self.inker_synonyms)
parsable_credits.extend(self.colorist_synonyms)
parsable_credits.extend(self.letterer_synonyms)
parsable_credits.extend(self.cover_synonyms)
parsable_credits.extend(self.editor_synonyms)
return parsable_credits
def metadataFromString(self, string):
tree = ET.ElementTree(ET.fromstring(string))
return self.convertXMLToMetadata(tree)
def stringFromMetadata(self, metadata):
header = '<?xml version="1.0"?>\n'
tree = self.convertMetadataToXML(self, metadata)
return header + ET.tostring(tree.getroot())
def indent(self, elem, level=0):
# for making the XML output readable
i = "\n" + level * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
self.indent(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def convertMetadataToXML(self, filename, metadata):
# shorthand for the metadata
md = metadata
# build a tree structure
root = ET.Element("ComicInfo")
root.attrib['xmlns:xsi'] = "http://www.w3.org/2001/XMLSchema-instance"
root.attrib['xmlns:xsd'] = "http://www.w3.org/2001/XMLSchema"
# helper func
def assign(cix_entry, md_entry):
if md_entry is not None:
ET.SubElement(root, cix_entry).text = u"{0}".format(md_entry)
assign('Title', md.title)
assign('Series', md.series)
assign('Number', md.issue)
assign('Count', md.issueCount)
assign('Volume', md.volume)
assign('AlternateSeries', md.alternateSeries)
assign('AlternateNumber', md.alternateNumber)
assign('StoryArc', md.storyArc)
assign('SeriesGroup', md.seriesGroup)
assign('AlternateCount', md.alternateCount)
assign('Summary', md.comments)
assign('Notes', md.notes)
assign('Year', md.year)
assign('Month', md.month)
assign('Day', md.day)
# need to specially process the credits, since they are structured differently than CIX
credit_writer_list = list()
credit_penciller_list = list()
credit_inker_list = list()
credit_colorist_list = list()
credit_letterer_list = list()
credit_cover_list = list()
credit_editor_list = list()
# first, loop thru credits, and build a list for each role that CIX supports
for credit in metadata.credits:
if credit['role'].lower() in set(self.writer_synonyms):
credit_writer_list.append(credit['person'].replace(",", ""))
if credit['role'].lower() in set(self.penciller_synonyms):
credit_penciller_list.append(credit['person'].replace(",", ""))
if credit['role'].lower() in set(self.inker_synonyms):
credit_inker_list.append(credit['person'].replace(",", ""))
if credit['role'].lower() in set(self.colorist_synonyms):
credit_colorist_list.append(credit['person'].replace(",", ""))
if credit['role'].lower() in set(self.letterer_synonyms):
credit_letterer_list.append(credit['person'].replace(",", ""))
if credit['role'].lower() in set(self.cover_synonyms):
credit_cover_list.append(credit['person'].replace(",", ""))
if credit['role'].lower() in set(self.editor_synonyms):
credit_editor_list.append(credit['person'].replace(",", ""))
# second, convert each list to string, and add to XML struct
if len(credit_writer_list) > 0:
node = ET.SubElement(root, 'Writer')
node.text = comicapi.utils.listToString(credit_writer_list)
if len(credit_penciller_list) > 0:
node = ET.SubElement(root, 'Penciller')
node.text = comicapi.utils.listToString(credit_penciller_list)
if len(credit_inker_list) > 0:
node = ET.SubElement(root, 'Inker')
node.text = comicapi.utils.listToString(credit_inker_list)
if len(credit_colorist_list) > 0:
node = ET.SubElement(root, 'Colorist')
node.text = comicapi.utils.listToString(credit_colorist_list)
if len(credit_letterer_list) > 0:
node = ET.SubElement(root, 'Letterer')
node.text = comicapi.utils.listToString(credit_letterer_list)
if len(credit_cover_list) > 0:
node = ET.SubElement(root, 'CoverArtist')
node.text = comicapi.utils.listToString(credit_cover_list)
if len(credit_editor_list) > 0:
node = ET.SubElement(root, 'Editor')
node.text = comicapi.utils.listToString(credit_editor_list)
assign('Publisher', md.publisher)
assign('Imprint', md.imprint)
assign('Genre', md.genre)
assign('Web', md.webLink)
assign('PageCount', md.pageCount)
assign('LanguageISO', md.language)
assign('Format', md.format)
assign('AgeRating', md.maturityRating)
if md.blackAndWhite is not None and md.blackAndWhite:
ET.SubElement(root, 'BlackAndWhite').text = "Yes"
assign('Manga', md.manga)
assign('Characters', md.characters)
assign('Teams', md.teams)
assign('Locations', md.locations)
assign('ScanInformation', md.scanInfo)
# loop and add the page entries under pages node
if len(md.pages) > 0:
pages_node = ET.SubElement(root, 'Pages')
for page_dict in md.pages:
page_node = ET.SubElement(pages_node, 'Page')
page_node.attrib = page_dict
# self pretty-print
self.indent(root)
# wrap it in an ElementTree instance, and save as XML
tree = ET.ElementTree(root)
return tree
def convertXMLToMetadata(self, tree):
root = tree.getroot()
if root.tag != 'ComicInfo':
raise KeyError("Not a ComicInfo XML!")
# return None
metadata = GenericMetadata()
md = metadata
# Helper function
def xlate(tag):
node = root.find(tag)
if node is not None:
return node.text
else:
return None
md.series = xlate('Series')
md.title = xlate('Title')
md.issue = xlate('Number')
md.issueCount = xlate('Count')
md.volume = xlate('Volume')
md.alternateSeries = xlate('AlternateSeries')
md.alternateNumber = xlate('AlternateNumber')
md.alternateCount = xlate('AlternateCount')
md.comments = xlate('Summary')
md.notes = xlate('Notes')
md.year = xlate('Year')
md.month = xlate('Month')
md.day = xlate('Day')
md.publisher = xlate('Publisher')
md.imprint = xlate('Imprint')
md.genre = xlate('Genre')
md.webLink = xlate('Web')
md.language = xlate('LanguageISO')
md.format = xlate('Format')
md.manga = xlate('Manga')
md.characters = xlate('Characters')
md.teams = xlate('Teams')
md.locations = xlate('Locations')
md.pageCount = xlate('PageCount')
md.scanInfo = xlate('ScanInformation')
md.storyArc = xlate('StoryArc')
md.seriesGroup = xlate('SeriesGroup')
md.maturityRating = xlate('AgeRating')
tmp = xlate('BlackAndWhite')
md.blackAndWhite = False
if tmp is not None and tmp.lower() in ["yes", "true", "1"]:
md.blackAndWhite = True
# Now extract the credit info
for n in root:
if (n.tag == 'Writer' or n.tag == 'Penciller' or n.tag == 'Inker'
or n.tag == 'Colorist' or n.tag == 'Letterer'
or n.tag == 'Editor'):
if n.text is not None:
for name in n.text.split(','):
metadata.addCredit(name.strip(), n.tag)
if n.tag == 'CoverArtist':
if n.text is not None:
for name in n.text.split(','):
metadata.addCredit(name.strip(), "Cover")
# parse page data now
pages_node = root.find("Pages")
if pages_node is not None:
for page in pages_node:
metadata.pages.append(page.attrib)
# print page.attrib
metadata.isEmpty = False
return metadata
def writeToExternalFile(self, filename, metadata):
tree = self.convertMetadataToXML(self, metadata)
tree.write(filename, encoding='utf-8')
def readFromExternalFile(self, filename):
tree = ET.parse(filename)
return self.convertXMLToMetadata(tree)
| 37.014184 | 95 | 0.603372 |
4a2503e4ce2f4db46b32a5ac3f91beb31c971221 | 6,261 | py | Python | clean_fac_staff_data.py | code-ape/SocialJusticeDataProcessing | 7535fe9d510e44b0eb08067b6c526f987917a0f8 | [
"Apache-2.0"
] | null | null | null | clean_fac_staff_data.py | code-ape/SocialJusticeDataProcessing | 7535fe9d510e44b0eb08067b6c526f987917a0f8 | [
"Apache-2.0"
] | null | null | null | clean_fac_staff_data.py | code-ape/SocialJusticeDataProcessing | 7535fe9d510e44b0eb08067b6c526f987917a0f8 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import csv, json
import os
import traceback
import settings
import tools
diverse_matcher = {
"Very Diverse": 3, "Moderately Diverse": 2,
"Slightly Diverse": 1, "Not Diverse": 0
}
importance_matcher = {
"Very Important": 3,
"Moderately Important": 2,
"Slightly Important": 1,
"Slightly Unimportant": -1, #contains special ascii character
"Moderately Unimportant": -2,
"Very Unimporant": -3
}
agreement_matcher = {
"Strongly Agree": 3,
"Moderately Agree": 2,
"Slightly Agree": 1,
"Slightly Disagree": -1,
"Moderately Disagree": -2,
"Strongly Disagree": -3
}
def clean_fac_staff_data(fac_staff_data):
fac_staff_clean_key = []
fac_staff_clean_data = []
# strip initial entry that is the question each answer coorelates to
for i in xrange(1, len(fac_staff_data)):
try:
print('.'),
raw_entry = fac_staff_data[i]
clean_entry = []
# ignore timestamp
# position
position_matcher = {"Staff": "staff",
"Non-Tenured Faculty": "nontenured faculty",
"Tenured Faculty": "tenured faculty"}
position = position_matcher[raw_entry[1]]
clean_entry.append(('position', raw_entry[1], position))
# race
race_matcher = {"African American / Black": "black",
"Asian": "asian", "Hispanic / Latino / Latina": "hispanic",
"Non-Hispanic White": "white",
"Pacific Islander / Hawaiian" : "hawaiian", "Not Listed": "not listed"
}
race = race_matcher[raw_entry[2]]
clean_entry.append(('race', raw_entry[2], race))
# bio gender
bio_gender_matcher = {"Female": "female", "Male": "male"}
bio_gender = bio_gender_matcher[raw_entry[3]]
clean_entry.append(('bio_gender', raw_entry[3], bio_gender))
# id gender
id_gender_matcher = {"Man": "male", "Woman": "female",
"Intersexual": "intersexual", "Transgender": "transgender",
"Not Listed": "not listed"}
id_gender = id_gender_matcher[raw_entry[4]]
clean_entry.append(('id_gender', raw_entry[4], id_gender))
# sexuality
sexuality_matcher = {"Asexual": "asexual", "Bisexual": "bisexual",
"Gay": "gay", "Heterosexual": "heterosexual", "Lesbian": "lesbian",
"Questioning": "questioning", "Not Listed": "not listed"
}
sexuality = sexuality_matcher[raw_entry[5]]
clean_entry.append(('sexuality', raw_entry[5], sexuality))
# years at E&H
years_working_matcher = {"1st year": 1, "2-5": 3.5, "6-10": 8,
"11-19": 15, "20+": 25
}
years_working = years_working_matcher[raw_entry[6]]
clean_entry.append(('years_working', raw_entry[6], years_working))
# division
division_matcher = {"Humanities": "humanities",
"Life Sciences": "life sciences",
"Social Sciences": "social sciences", "": None
}
division = division_matcher[raw_entry[7]]
clean_entry.append(('division', raw_entry[7], division))
# student body diversity perception
student_body_diversity_perception = diverse_matcher[raw_entry[8]]
clean_entry.append(("student_body_diversity_perception", raw_entry[8],
student_body_diversity_perception))
# student faculty staff diversity perception
student_fac_staff_diversity_perception = diverse_matcher[raw_entry[9]]
clean_entry.append(('student_fac_staff_diversity_perception', raw_entry[9],
student_fac_staff_diversity_perception))
# diversity importance
diversity_importance = importance_matcher[raw_entry[10]]
clean_entry.append(('diversity_importance', raw_entry[10], diversity_importance))
# experience loop
categories = (
'diversity_emphesis', 'race_experience', 'financial_experience',
'religion_experience', 'gender_experience', 'sexuality_experience',
'safe_in_buildings', 'safe_walking', 'asking_me_for_help',
'help_availability', 'student_of_diff_race_seek_help',
'greek_life_discriminiation', 'non_greek_life_discriminiation',
'athletics_discrimination', 'non_athletics_discrimination',
'prc_access'
)
number = 11
for cat in categories:
raw_val = raw_entry[number]
clean_val = agreement_matcher[raw_val]
clean_entry.append((cat, raw_val, clean_val))
number += 1
fac_staff_clean_data.append(clean_entry)
except Exception as e:
print("\nProcessing failed for entry {}".format(i))
traceback.print_exc()
raise(e)
return fac_staff_clean_data
def main():
print("Loading fac_staff raw csv.")
fac_staff_data = []
with open(settings.fac_staff_raw_path, 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
fac_staff_data.append(row)
fac_staff_clean_data = clean_fac_staff_data(fac_staff_data)
print('\nFinished processing {} fac_staff responses.'.format(len(fac_staff_clean_data)))
# tools.print_first(3, fac_staff_clean_data)
# deleting old clean data
if os.path.exists(settings.fac_staff_clean_path):
print("Deleting old clean data.")
os.remove(settings.fac_staff_clean_path)
else:
print("No old clean data to delete.")
print("Writing data to: {}".format(settings.fac_staff_clean_path))
try:
with open(settings.fac_staff_clean_path, "w") as f:
f.write(json.dumps(fac_staff_clean_data))
except Exception as e:
print("Failed to write clean fac_staff data!")
raise e
if __name__ == "__main__":
print("Starting clean_data.py\n")
main()
print("\nExiting clean_data.py")
| 35.174157 | 93 | 0.598467 |
4a2503e61a636e8f8bcb04a5ab77cb9812bff9ca | 6,689 | py | Python | huskar_api/models/auth/team.py | mowangdk/huskar | 7692fbc5672a5ae6e2a33616c493466a7137f8cd | [
"MIT"
] | 59 | 2019-10-31T10:50:10.000Z | 2021-11-26T04:32:25.000Z | huskar_api/models/auth/team.py | mowangdk/huskar | 7692fbc5672a5ae6e2a33616c493466a7137f8cd | [
"MIT"
] | 5 | 2019-10-31T10:37:30.000Z | 2020-03-02T06:45:46.000Z | huskar_api/models/auth/team.py | mowangdk/huskar | 7692fbc5672a5ae6e2a33616c493466a7137f8cd | [
"MIT"
] | 9 | 2019-10-31T10:35:00.000Z | 2019-12-01T14:13:58.000Z | from __future__ import absolute_import
import logging
from sqlalchemy import Column, Integer, Unicode, UniqueConstraint
from sqlalchemy.dialects.mysql import TINYINT
from sqlalchemy.exc import IntegrityError
# TODO Do not use this base exception in future
from huskar_api.service.exc import HuskarApiException
from huskar_api.models.db import UpsertMixin
from huskar_api.models.signals import (
team_will_be_archived, team_will_be_deleted)
from huskar_api.models import (
DeclarativeBase, CacheMixin, DBSession, cache_on_arguments)
from huskar_api.models.exceptions import NameOccupiedError
from .user import User
from .role import Authority
logger = logging.getLogger(__name__)
class Team(CacheMixin, DeclarativeBase):
"""The team which organized applications of Huskar."""
__tablename__ = 'team'
#: The team name which serves minimal mode
DEFAULT_NAME = 'default'
STATUS_ACTIVE = 0
STATUS_ARCHIVED = 1
id = Column(Integer, primary_key=True)
team_name = Column(Unicode(32, collation='utf8mb4_bin'),
nullable=False, unique=True)
team_desc = Column(Unicode(128, collation='utf8mb4_bin'))
status = Column(TINYINT, nullable=False, default=STATUS_ACTIVE)
@classmethod
def get_by_name(cls, name):
team_id = cls.get_id_by_name(name)
if team_id is not None:
return cls.get(team_id)
@classmethod
@cache_on_arguments(5 * 60)
def get_id_by_name(cls, name):
cond = (
(cls.team_name == name) &
(cls.status == cls.STATUS_ACTIVE)
)
return DBSession().query(cls.id).filter(cond).scalar()
@classmethod
@cache_on_arguments(5 * 60)
def get_all_ids(cls):
rs = DBSession().query(cls.id) \
.filter_by(status=cls.STATUS_ACTIVE).all()
return sorted(r[0] for r in rs)
@classmethod
def get_multi_by_admin(cls, user_id):
ids = TeamAdmin.get_team_ids(user_id)
return cls.mget(ids)
@classmethod
def get_all(cls):
ids = cls.get_all_ids()
return cls.mget(ids)
@classmethod
def create(cls, name, desc=None):
"""Creates a new team.
:param name: The unique name of team.
:param desc: The readable name of team.
:returns: The instance of :class:`Team`.
"""
try:
with DBSession().close_on_exit(False) as db:
instance = cls(team_name=name, team_desc=desc or name)
db.add(instance)
except IntegrityError:
raise NameOccupiedError
cls.flush([instance.id])
cls.get_id_by_name.flush(instance.team_name)
cls.get_all_ids.flush()
return instance
def _set_status(self, status):
with DBSession().close_on_exit(False):
self.status = status
self.__class__.flush([self.id])
self.__class__.get_all_ids.flush()
self.__class__.get_id_by_name.flush(self.team_name)
@classmethod
def delete(cls, team_id):
team = cls.get(team_id)
with DBSession().close_on_exit(False) as db:
team_will_be_deleted.send(cls, db=db, team_id=team_id)
for user_id in TeamAdmin.get_user_ids(team_id):
TeamAdmin.discard(team_id, user_id)
db.query(cls).filter_by(id=team_id).delete()
if team is not None: # can be skip safety (instance will be erased)
cls.get_id_by_name.flush(team.team_name)
cls.get_all_ids.flush()
cls.flush([team_id])
def rename_desc(self, new_desc):
with DBSession().close_on_exit(False):
self.team_desc = new_desc
self.__class__.flush([self.id])
def archive(self):
team_will_be_archived.send(
self.__class__, db=DBSession(), team_id=self.id)
self._set_status(self.STATUS_ARCHIVED)
def unarchive(self):
self._set_status(self.STATUS_ACTIVE)
@property
def is_active(self):
return self.status == self.STATUS_ACTIVE
def check_is_admin(self, user_id):
"""Checks a user is admin of this team or not."""
ids = TeamAdmin.get_user_ids(self.id)
return int(user_id) in ids
def list_admin(self):
"""Get the list of admin users for this team.
:returns: The list of :class:`User`.
"""
ids = TeamAdmin.get_user_ids(self.id)
return User.mget(ids)
def grant_admin(self, user_id):
"""Grants user as an admin for this team.
:param user_id: The id of user.
"""
TeamAdmin.ensure(self.id, user_id)
def dismiss_admin(self, user_id):
"""Dismisses user's admin role for this team.
:param user_id: The id of user.
"""
TeamAdmin.discard(self.id, user_id)
def check_auth(self, authority, user_id):
assert authority in Authority
return self.check_is_admin(user_id)
class TeamAdmin(CacheMixin, UpsertMixin, DeclarativeBase):
__tablename__ = 'team_admin'
__table_args__ = (
UniqueConstraint(
'user_id', 'team_id', name='uq_team_admin',
),
DeclarativeBase.__table_args__,
)
id = Column(Integer, primary_key=True)
team_id = Column(Integer, nullable=False, index=True)
user_id = Column(Integer, nullable=False)
@classmethod
@cache_on_arguments(5 * 60)
def get_user_ids(cls, team_id):
rs = DBSession().query(cls.user_id) \
.filter_by(team_id=team_id) \
.order_by(cls.id.asc()) \
.all()
return [r[0] for r in rs]
@classmethod
@cache_on_arguments(5 * 60)
def get_team_ids(cls, user_id):
rs = DBSession().query(cls.team_id) \
.filter_by(user_id=user_id) \
.order_by(cls.id.asc()) \
.all()
return [r[0] for r in rs]
@classmethod
def flush_by(cls, team_id, user_id):
cls.get_user_ids.flush(team_id)
cls.get_team_ids.flush(user_id)
@classmethod
def ensure(cls, team_id, user_id):
stmt = cls.upsert().values(team_id=team_id, user_id=user_id)
with DBSession().close_on_exit(False) as db:
db.execute(stmt)
cls.flush_by(team_id, user_id)
@classmethod
def discard(cls, team_id, user_id):
stmt = cls.__table__.delete().where(
(cls.team_id == team_id) & (cls.user_id == user_id))
with DBSession().close_on_exit(False) as db:
db.execute(stmt)
cls.flush_by(team_id, user_id)
class TeamNotEmptyError(HuskarApiException):
pass
| 31.257009 | 76 | 0.630588 |
4a2504187fce9e06f3d156d71a6a17ce9a0c4ea0 | 5,808 | py | Python | cool_netstat.py | kilbouri/cool-netstat | bf35ffc8d343cbc88fed834d856fed6beb66600b | [
"Unlicense"
] | null | null | null | cool_netstat.py | kilbouri/cool-netstat | bf35ffc8d343cbc88fed834d856fed6beb66600b | [
"Unlicense"
] | null | null | null | cool_netstat.py | kilbouri/cool-netstat | bf35ffc8d343cbc88fed834d856fed6beb66600b | [
"Unlicense"
] | null | null | null | ''' Configuration '''
interval = 10 # number of minutes between completion of a test and starting a new one
outfile = "results.txt" # the path/file to output results in
show_res = False # show result after each test?
''' Dependencies:
speedtest pip3 install speedtest-cli
'''
''' Program '''
from time import localtime, sleep
import json
import threading
try:
# import the speedtest module from speedtest-cli
import speedtest as st
except ImportError:
print("\nFailed to import module \"speedtest\". You can install it with:")
print("\tpip3 install speedtest-cli")
exit()
results = list()
num_to_day = { # will be used to translate a day (from time.localtime)
0: "Monday",
1: "Tuesday",
2: "Wednesday",
3: "Thursday",
4: "Friday",
5: "Saturday",
6: "Sunday"
}
def bits_to_megabits(bits) -> float:
return bits/1000/1000
def run_speedtest(printRes) -> dict:
'''
Runs a speed test and returns a dictionary result
'''
t = st.Speedtest()
start = localtime()
ping = t.results.ping
up = bits_to_megabits(t.upload())
dn = bits_to_megabits(t.download())
end = localtime()
# assemble tuple
result = {
"ping": ping,
"upload": up,
"download": dn,
"t_started": {
"weekday": start[6],
"hour": start[3],
"minute": start[4],
"second": start[5]
},
"t_finished": {
"weekday": end[6],
"hour": end[3],
"minute": end[4],
"second": end[5]
}
}
if printRes:
# print the results of the most recent test
print(f"\tPing: %d ms" % ping)
print(f"\tDown: %.2f Mbps" % dn)
print(f"\t Up: %.2f Mbps" % up)
return result
def make_time_string(result) -> str:
'''
Creates a 24-hour time string (hh:mm) from a given speedtest result
'''
hour = result["t_started"]["hour"]
minute = result["t_started"]["minute"]
if hour < 10:
string = "0" + str(hour)
else:
string = str(hour)
string += ":"
if minute < 10:
string += "0" + str(minute)
else:
string += str(minute)
return string
def create_stat_string(name, average, best, worst, unit="") -> str:
'''
Creates a string to be printed that displays the best, worst, and average of
a given statistic in a human-readable format
'''
t_best = make_time_string(best_ping_result)
t_worst = make_time_string(worst_ping_result)
string = "\t" + name + "\n"
string += ("\t\tAverage: %.2f" % average) + f"{unit}\n"
string += ("\t\t Best: %.2f" % best) + f"{unit} (Time: " + t_best + ")\n"
string += ("\t\t Worst: %.2f" % worst) + f"{unit} (Time: " + t_worst + ")\n"
return string
if __name__ == "__main__":
completed = 0
print("Running network speed tests. Use [Ctrl/Cmd + C] to stop data collection.")
while (True):
try:
results.append(run_speedtest(show_res))
print(f"Completed tests: {completed + 1}", end="\n")
completed += 1
sleep(60 * interval)
except KeyboardInterrupt:
print("Stopping data collection. Please wait for data compilation.")
break
res_by_day = dict()
for result in results:
if result["t_started"]["weekday"] not in res_by_day.keys():
res_by_day[result["t_started"]["weekday"]] = list()
res_by_day[result["t_started"]["weekday"]].append(result)
stats_by_day = dict()
for day in range(0, 7):
if day not in res_by_day.keys():
continue
# stats for each weekday
count = len(res_by_day[day])
worst_ping_result = res_by_day[day][0]
best_ping_result = res_by_day[day][0]
avg_ping = 0
best_up_result = res_by_day[day][0]
worst_up_result = res_by_day[day][0]
avg_up = 0
best_down_result = res_by_day[day][0]
worst_down_result = res_by_day[day][0]
avg_down = 0
# determine the day's stats
for result in res_by_day[day]:
ping = result["ping"]
up = result["upload"]
down = result["download"]
# ping -> lower is better
if (worst_down_result["ping"] < ping):
worst_ping_result = result
if (ping < best_ping_result["ping"]):
best_ping_result = result
avg_ping += ping
# upload -> higher is better
if (up < worst_up_result["upload"]):
worst_up_result = result
if (best_up_result["upload"] < up):
best_up_result = result
avg_up += up
# download -> higher is better
if (down < worst_down_result["download"]):
worst_down_result = result
if (best_down_result["download"] < down):
best_down_result = result
avg_down += down
avg_ping = float(avg_ping) / count
avg_up = float(avg_up) / count
avg_down = float(avg_down) / count
# store the day's stats
stats_by_day[num_to_day[day]] = {
"ping": {
"best": best_ping_result,
"worst": worst_ping_result,
"average": avg_ping
},
"upload": {
"best": best_up_result,
"worst": worst_up_result,
"average": avg_up
},
"download": {
"best": best_down_result,
"worst": worst_down_result,
"average": avg_down
}
}
# output the stats of the weekday
print(f"Stats for {num_to_day[day]}:")
best_ping = best_ping_result["ping"]
worst_ping = worst_ping_result["ping"]
print(create_stat_string("Ping", avg_ping, best_ping, worst_ping, unit="ms"))
best_up = best_up_result["upload"]
worst_up = worst_up_result["upload"]
print(create_stat_string("Upload", avg_up, best_up, worst_up, unit="Mbps"))
best_down = best_down_result["download"]
worst_down = worst_down_result["download"]
print(create_stat_string("Download", avg_down, best_down, worst_down, unit="Mbps"))
with(open(outfile, "w")) as out:
final_results = {
"stats": stats_by_day,
"datapoints": results
}
out.write(json.dumps(final_results, indent=4))
print("Data has been logged in " + outfile) | 25.928571 | 96 | 0.634298 |
4a25049fb5b6031c0db1d5cdf12f97fb219dae07 | 2,937 | py | Python | check_jenkins_job_buildable.py | stdevel/nagios-plugins | 5ea0e186fa6fdd0e70681c7fed02c6d46d50bbb5 | [
"IBM-pibs",
"Apache-1.1"
] | null | null | null | check_jenkins_job_buildable.py | stdevel/nagios-plugins | 5ea0e186fa6fdd0e70681c7fed02c6d46d50bbb5 | [
"IBM-pibs",
"Apache-1.1"
] | null | null | null | check_jenkins_job_buildable.py | stdevel/nagios-plugins | 5ea0e186fa6fdd0e70681c7fed02c6d46d50bbb5 | [
"IBM-pibs",
"Apache-1.1"
] | null | null | null | #!/usr/bin/env python
# vim:ts=4:sts=4:sw=4:et
#
# Author: Hari Sekhon
# Date: 2017-06-22 22:29:37 +0200 (Thu, 22 Jun 2017)
#
# https://github.com/harisekhon/nagios-plugins
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn
# and optionally send me feedback to help steer this or other code I publish
#
# https://www.linkedin.com/in/harisekhon
#
"""
Nagios Plugin to check if a Jenkins job is set to buildable via the Rest API
The --password switch accepts either a password or an API token
Tested on Jenkins 2.60.1
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import traceback
srcdir = os.path.abspath(os.path.dirname(__file__))
libdir = os.path.join(srcdir, 'pylib')
sys.path.append(libdir)
try:
# pylint: disable=wrong-import-position
from harisekhon import RestNagiosPlugin
from harisekhon.utils import validate_chars, ERRORS
except ImportError as _:
print(traceback.format_exc(), end='')
sys.exit(4)
__author__ = 'Hari Sekhon'
__version__ = '0.1'
class CheckJenkinsJob(RestNagiosPlugin):
def __init__(self):
# Python 2.x
super(CheckJenkinsJob, self).__init__()
# Python 3.x
# super().__init__()
self.name = 'Jenkins'
self.default_port = 8080
self.json = True
self.msg = self.name + ' job '
self.path = None
self.job = None
self.list_jobs = False
self.age = None
def add_options(self):
super(CheckJenkinsJob, self).add_options()
self.add_opt('-j', '--job', help='Job name to check')
self.add_opt('-l', '--list', action='store_true', help='List jobs and exit')
self.add_thresholds(default_warning=3600, default_critical=7200)
def process_options(self):
super(CheckJenkinsJob, self).process_options()
self.job = self.get_opt('job')
self.list_jobs = self.get_opt('list')
if self.list_jobs:
self.path = '/api/json'
else:
validate_chars(self.job, 'job', r'A-Za-z0-9\s\._-')
self.path = '/job/{job}/api/json'.format(job=self.job)
self.msg += "'{job}' is ".format(job=self.job)
self.validate_thresholds(integer=False, optional=True)
def parse_json(self, json_data):
if self.list_jobs:
print('Jenkins Jobs:\n')
for job in json_data['jobs']:
print(job['name'])
sys.exit(ERRORS['UNKNOWN'])
displayname = json_data['displayName']
assert displayname == self.job
buildable = json_data['buildable']
if not buildable:
self.critical()
self.msg += 'not '
self.msg += 'buildable'
if __name__ == '__main__':
CheckJenkinsJob().main()
| 29.079208 | 84 | 0.642492 |
4a25049ff96c819bf75953ad0505149aa6263666 | 2,705 | py | Python | malaya_speech/utils/char.py | dtx525942103/malaya-speech | 212c4e890d0cbcbbca0037c89a698b68b05db393 | [
"MIT"
] | null | null | null | malaya_speech/utils/char.py | dtx525942103/malaya-speech | 212c4e890d0cbcbbca0037c89a698b68b05db393 | [
"MIT"
] | null | null | null | malaya_speech/utils/char.py | dtx525942103/malaya-speech | 212c4e890d0cbcbbca0037c89a698b68b05db393 | [
"MIT"
] | 1 | 2021-08-19T02:34:41.000Z | 2021-08-19T02:34:41.000Z | import six
from typing import List
PAD = '<PAD>'
EOS = '<EOS>'
RESERVED_TOKENS = [PAD, EOS]
NUM_RESERVED_TOKENS = len(RESERVED_TOKENS)
PAD_ID = RESERVED_TOKENS.index(PAD)
EOS_ID = RESERVED_TOKENS.index(EOS)
VOCAB_SIZE = 256
BLANK = 0
def strip_ids(ids, ids_to_strip):
"""Strip ids_to_strip from the end ids."""
ids = list(ids)
while ids and ids[-1] in ids_to_strip:
ids.pop()
return ids
def generate_vocab(strings: List[str]):
"""
Generate character vocab sorted based on frequency.
Parameters
-----------
strings: List[str]
Returns
--------
result: List[str]
"""
joined = ' '.join(strings)
unique_chars = set(joined)
unique_chars = [(c, joined.count(c)) for c in unique_chars]
unique_chars = sorted(
unique_chars, key = lambda element: element[1], reverse = True
)
unique_chars, _ = zip(*unique_chars)
unique_chars = list(unique_chars)
return RESERVED_TOKENS + unique_chars
def encode(
string: str,
add_eos: bool = True,
add_blank: bool = False,
lookup: List[str] = None,
):
"""
Encode string to integer representation based on ascii table or lookup variable.
Parameters
-----------
string: str
add_eos: bool, optional (default=True)
add EOS token at the end of encoded.
add_blank: bool, optional (default=False)
add BLANK token at the starting of encoded, this is for transducer / transformer based.
lookup: List[str], optional (default=None)
list of unique strings.
Returns
--------
result: List[int]
"""
if lookup:
if len(lookup) != len(set(lookup)):
raise ValueError('lookup must be a list of unique strings')
r = [lookup.index(c) for c in string]
else:
r = [c + NUM_RESERVED_TOKENS for c in string.encode('utf-8')]
if add_eos:
r = r + [1]
if add_blank:
r = [BLANK] + r
return r
def decode(ids, lookup: List[str] = None):
"""
Decode integer representation to string based on ascii table or lookup variable.
Parameters
-----------
ids: List[int]
lookup: List[str], optional (default=None)
list of unique strings.
Returns
--------
result: str
"""
decoded_ids = []
int2byte = six.int2byte
for id_ in ids:
if 0 <= id_ < NUM_RESERVED_TOKENS:
decoded_ids.append(RESERVED_TOKENS[int(id_)])
else:
if lookup:
decoded_ids.append(lookup[id_])
else:
decoded_ids.append(
int2byte(id_ - NUM_RESERVED_TOKENS).decode('utf-8')
)
return ''.join(decoded_ids)
| 24.816514 | 95 | 0.6 |
4a2504dcc154ee18bdb32d4b4601266999cb63b0 | 17,798 | py | Python | redtubeapi/utils.py | Mattlk13/RedTubeAPI | 842c2251188c165a47026d169aa7d3e808666fcf | [
"MIT"
] | null | null | null | redtubeapi/utils.py | Mattlk13/RedTubeAPI | 842c2251188c165a47026d169aa7d3e808666fcf | [
"MIT"
] | null | null | null | redtubeapi/utils.py | Mattlk13/RedTubeAPI | 842c2251188c165a47026d169aa7d3e808666fcf | [
"MIT"
] | null | null | null | #!/usr/bin/env
# -*- coding: utf-8 -*-
# author=Kristen
# date = 8/8/16
import sys, os
from collections import Counter, OrderedDict, MutableSet,namedtuple
import logging
import datetime
import re
from logging import FileHandler as _FileHandler
from requests import get
import feedparser
from kgerringrc import *
from operator import attrgetter, itemgetter
__all__ = ['Cache', 'Entry', 'FeedParser', 'OrderedSet', 'RecordExistsError', 'RedException', 'RSS_FEED', '_parse_date_w3dtf', 'all_deleted',
'change_page', 'getCategoriesList', 'getDeletedVideos', 'getRedtubeVideos', 'getStarDetailedList', 'getStarList',
'getTagList', 'get_datetime', 'get_deleted_videos', 'get_entry', 'get_feed', 'get_info', 'odict', 'parse_datestr',
'prune_dict', 'redtuple', 'request', 'row_is_expanded', 'searchVideos', 'username']
startTime = datetime.datetime.fromtimestamp(logging._startTime)
RSS_FEED = RSSFEED = "http://feeds.feedburner.com/redtube/videos"
def get_feed(url=RSS_FEED):
feed = feedparser.parse(RSS_FEED).get('entries')
return feed
def prune_dict(d):
data = {k: v for k, v in d.items() if v is not None}
#def _parse_date_w3dtf(datestr, timetuple=True):
# timezonenames = {
# 'ut' : 0, 'gmt': 0, 'z': 0,
# 'adt': -3, 'ast': -4, 'at': -4,
# 'edt': -4, 'est': -5, 'et': -5,
# 'cdt': -5, 'cst': -6, 'ct': -6,
# 'mdt': -6, 'mst': -7, 'mt': -7,
# 'pdt': -7, 'pst': -8, 'pt': -8,
# 'a' : -1, 'n': 1,
# 'm' : -12, 'y': 12,
# }
# if not datestr.strip():
# return None
# parts = datestr.lower().split('t')
# if len(parts) == 1:
# # This may be a date only, or may be an MSSQL-style date
# parts = parts[0].split()
# if len(parts) == 1:
# # Treat this as a date only
# parts.append('00:00:00z')
# elif len(parts) > 2:
# return None
# date = parts[0].split('-', 2)
# if not date or len(date[0]) != 4:
# return None
# # Ensure that `date` has 3 elements. Using '1' sets the default
# # month to January and the default day to the 1st of the month.
# date.extend(['1'] * (3 - len(date)))
# try:
# year, month, day = [int(i) for i in date]
# except ValueError:
# # `date` may have more than 3 elements or may contain
# # non-integer strings.
# return None
# if parts[1].endswith('z'):
# parts[1] = parts[1][:-1]
# parts.append('z')
# # Append the numeric timezone offset, if any, to parts.
# # If this is an MSSQL-style date then parts[2] already contains
# # the timezone information, so `append()` will not affect it.
# # Add 1 to each value so that if `find()` returns -1 it will be
# # treated as False.
# loc = parts[1].find('-') + 1 or parts[1].find('+') + 1 or len(parts[1]) + 1
# loc = loc - 1
# parts.append(parts[1][loc:])
# parts[1] = parts[1][:loc]
# time = parts[1].split(':', 2)
# # Ensure that time has 3 elements. Using '0' means that the
# # minutes and seconds, if missing, will default to 0.
# time.extend(['0'] * (3 - len(time)))
# tzhour = 0
# tzmin = 0
# if parts[2][:1] in ('-', '+'):
# try:
# tzhour = int(parts[2][1:3])
# tzmin = int(parts[2][4:])
# except ValueError:
# return None
# if parts[2].startswith('-'):
# tzhour = tzhour * -1
# tzmin = tzmin * -1
# else:
# tzhour = timezonenames.get(parts[2], 0)
# try:
# hour, minute, second = [int(float(i)) for i in time]
# except ValueError:
# return None
# # Create the datetime object and timezone delta objects
# try:
# stamp = datetime(year, month, day, hour, minute, second)
# except ValueError:
# return None
# delta = timedelta(0, 0, 0, 0, tzmin, tzhour)
# # Return the date and timestamp in a UTC 9-tuple
# if timetuple:
# try:
# return (stamp - delta).utctimetuple()
# except (OverflowError, ValueError):
# return None
# if not timetuple:
# try:
# return (stamp - delta)
# except (OverflowError, ValueError):
# return None
#
#def parse_datestr(datestr):
# return _parse_date_w3dtf(datestr, timetuple=False)
#
class odict(OrderedDict):
def __init__(self, *args, **kwargs):
super(odict, self).__init__(*args, **kwargs)
def __getattr__(self, attr):
return self[attr]
def __repr__(self):
results = []
for k, v in self.items():
item = '"{}": {}'.format(k, v)
results.append(item)
display = ', '.join(results)
final = '{' + display + '}'
return final
class OrderedSet(MutableSet):
SLICE_ALL = slice(None)
"""
An OrderedSet is a custom MutableSet that remembers its order, so that
every entry has an index that can be looked up.
"""
import collections
def __init__(self, iterable=None):
self.items = []
self.map = {}
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.items)
def __getitem__(self, index):
"""
Get the item at a given index.
If `index` is a slice, you will get back that slice of items. If it's
the slice [:], exactly the same object is returned. (If you want an
independent copy of an OrderedSet, use `OrderedSet.copy()`.)
If `index` is an iterable, you'll get the OrderedSet of items
corresponding to those indices. This is similar to NumPy's
"fancy indexing".
"""
if index == self.SLICE_ALL:
return self
elif hasattr(index, '__index__') or isinstance(index, slice):
result = self.items[index]
if isinstance(result, list):
return OrderedSet(result)
else:
return result
elif self.is_iterable(index):
return OrderedSet([self.items[i] for i in index])
else:
raise TypeError("Don't know how to index an OrderedSet by %r" % index)
def is_iterable(obj):
"""
Are we being asked to look up a list of things, instead of a single thing?
We check for the `__iter__` attribute so that this can cover types that
don't have to be known by this module, such as NumPy arrays.
Strings, however, should be considered as atomic values to look up, not
iterables. The same goes for tuples, since they are immutable and therefore
valid entries.
We don't need to check for the Python 2 `unicode` type, because it doesn't
have an `__iter__` attribute anyway.
"""
return hasattr(obj, '__iter__') and not isinstance(obj, str) and not isinstance(obj, tuple)
def copy(self):
return OrderedSet(self)
def __getstate__(self):
if len(self) == 0:
# The state can't be an empty list.
# We need to return a truthy value, or else __setstate__ won't be run.
#
# This could have been done more gracefully by always putting the state
# in a tuple, but this way is backwards- and forwards- compatible with
# previous versions of OrderedSet.
return (None,)
else:
return list(self)
def __setstate__(self, state):
if state == (None,):
self.__init__([])
else:
self.__init__(state)
def __contains__(self, key):
return key in self.map
def find_similar(self, term, max_dist=0.8):
from jellyfish import jaro_distance
if exact:
found = [item for item in self.items if item.__contains__(term)]
else:
found = [(item, self.index(item)) for item in self.items if
term or term.lower() or term.upper() or term.title() in item]
if found is None:
return None
else:
return dict(found)
def add(self, key):
"""
Add `key` as an item to this OrderedSet, then return its index.
If `key` is already in the OrderedSet, return the index it already
had.
"""
if key not in self.map:
self.map[key] = len(self.items)
self.items.append(key)
return self.map[key]
append = add
def update(self, sequence):
"""
Update the set with the given iterable sequence, then return the index
of the last element inserted.
"""
item_index = None
try:
for item in sequence:
item_index = self.add(item)
except TypeError:
raise ValueError('Argument needs to be an iterable, got %s' % type(sequence))
return item_index
def index(self, key):
"""
Get the index of a given entry, raising an IndexError if it's not
present.
`key` can be an iterable of entries that is not a string, in which case
this returns a list of indices.
"""
if self.is_iterable(key):
return [self.index(subkey) for subkey in key]
return self.map[key]
def pop(self):
"""
Remove and return the last element from the set.
Raises KeyError if the set is empty.
"""
if not self.items:
raise KeyError('Set is empty')
elem = self.items[-1]
del self.items[-1]
del self.map[elem]
return elem
def discard(self, key):
"""
Remove an element. Do not raise an exception if absent.
The MutableSet mixin uses this to implement the .remove() method, which
*does* raise an error when asked to remove a non-existent item.
"""
if key in self:
i = self.items.index(key)
del self.items[i]
del self.map[key]
for k, v in self.map.items():
if v >= i:
self.map[k] = v - 1
def clear(self):
"""
Remove all items from this OrderedSet.
"""
del self.items[:]
self.map.clear()
def __iter__(self):
return iter(self.items)
def __reversed__(self):
return reversed(self.items)
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and self.items == other.items
try:
other_as_set = set(other)
except TypeError:
# If `other` can't be converted into a set, it's not equal.
return False
else:
return set(self) == other_as_set
@property
def _as_dict(self):
"""The reverse of map"""
return dict(enumerate(self.items))
def __repr__(self):
if len(self.items) < 10:
return 'OrderedSet(%r)' % self.items
else:
return 'OrderedSet of %d items like %r' % (len(self.items), self[0])
class Cache(object):
def __init__(self, *args, **kwargs):
self.words = OrderedSet(kwargs.get('replace').words.items) if 'replace' in kwargs else OrderedSet()
self.results = odict(kwargs.get('replace').results.items()) if 'replace' in kwargs else odict()
def __setitem__(self, k, v):
if k not in self.words:
self.words.add(k)
self.results[k] = v
def __getitem__(self, word):
if word in self.words.items:
answer = self.results.get(word)
if answer is None:
self.words.remove(word)
return None
return answer
elif word in self.results.keys() and self.results.get(word) is not None and word not in self.words.items:
self.words.add(word)
self.results.move_to_end(word)
return {word: self.results.get(word)}
elif word not in self.words.items and word not in self.results.keys():
return None
return None
class RedException(Exception): pass
class RecordExistsError(Exception):
def __init__(self, record='', table=''):
self.record = record
self.table = table
self.msg = 'Record "{}" already exists for table "{}"'.format(self.record, self.table)
def __str__(self):
return repr(self.msg)
def get_datetime(s):
try:
return datetime.strptime(s, '%Y-%m-%d %H:%M:%S')
except AttributeError:
return datetime.datetime.strptime(s, '%Y-%m-%d %H:%M:%S')
def searchVideos(self, **data):
client = self or RedClient()
query_type = 'redtube.Videos.searchVideos'
data = data
result = client._request(query_type, **data)
result.update(data)
return result
def change_page(url=None, page=None):
url = url or 'http://api.redtube.com/?data=redtube.Videos.searchVideos&page=1'
pattern = 'page=(\d+)'
current_page = re.search(pattern, url).group(1)
pagestr = 'page={}'.format(int(current_page) + 1) or 'page={}'.format(page)
newstr = re.sub(pattern, pagestr, url)
return newstr
redtuple = namedtuple('RedTube', (
'id', 'url', 'title', 'uploader', 'views', 'rating', 'votes', 'comments', 'upload_date', 'duration', 'tags',
'categories', 'stars'))
def getCategoriesList():
raw_categories = get('http://api.redtube.com/?data=redtube.Categories.getCategoriesList&output=json').json().get(
'categories')
categories = [entry['category'] for entry in raw_categories]
return categories
def getTagList():
raw_tags = get('http://api.redtube.com/?data=redtube.Tags.getTagList&output=json').json().get('tags')
tags = [entry['tag']['tag_name'] for entry in raw_tags]
return tags
def getStarList():
raw_stars = get('http://api.redtube.com/?data=redtube.Stars.getStarList&output=json').json().get('stars')
stars = [entry['star']['star_name'] for entry in raw_stars]
return stars
def getStarDetailedList():
raw_stars = get('http://api.redtube.com/?data=redtube.Stars.getStarDetailedList&output=json').json().get('stars')
stars = [(entry['star']['star_name'], entry['star']['star_url']) for entry in raw_stars]
return stars
def username(url):
text = get(url).text
username = re.compile('<td class="withbadge">\\s*<a href="/(?P<username>\\w+)"')
# likes = re.compile('<span class="percent-likes">(\d\d\d|\d\d|\d)%</span>')
# views = '<td>102,501\s?<span class="added-time">'
if username.search(text):
return username.search(text).group('username')
return None
class FeedParser(object):
RSS_FEED = "http://feeds.feedburner.com/redtube/videos"
TM = attrgetter('tm_year', 'tm_mon', 'tm_mday', 'tm_hour', 'tm_min', 'tm_sec')
def __init__(self):
self.feed = feedparser.parse(self.RSS_FEED)
self.etag = self.feed.get('etag', None)
self.updated_parsed = self.TM(self.feed.get('updated_parsed')) or None
self.entries = self.feed.get('entries')
def get_ids(self):
ids = [entry.get('id').rsplit('/', 1)[-1] for entry in self.entries]
return ids
def get_entry(i):
FEED = feedparser.parse("http://feeds.feedburner.com/redtube/videos").get('entries')
entry = FEED[i]
id = int(entry.get('id').rsplit('/', 1)[-1])
url = entry.get('feedburner_origlink')
info = get_info(url)
info['id'] = id
info['url'] = url
title = entry.get('title')
info['title'] = title
updated_parsed = entry.get('updated_parsed')
tm = attrgetter('tm_year', 'tm_mon', 'tm_mday', 'tm_hour', 'tm_min', 'tm_sec')(updated_parsed)
upload_date = datetime(*tm)
info['upload_date'] = upload_date
content = entry.get('content')
summary = entry.get('summary_detail', {}).get('value')
other = re.compile(
'\s?Added:\s?(?P<added>\d\d\d\d-\d\d-\d\d),\s?Duration:\s?(?P<duration>\d\d:\d\d|\d:\d\d|:\d\d),\s?Rating:\s(?P<rating>\d.\d\d|\d.\d|\d),\s?Views:\s(?P<views>\d+)')
gd = other.search(summary).groupdict() if other.search(summary) else {}
info.update(gd)
views = int(gd.get('views', '0'))
info['views'] = views
rating = gd.get('rating', '0.00')
info['rating'] = rating
timing = gd.get('duration', '0:00')
info['timing'] = timing
duration = int(sum([x[1] * pow(60, x[0]) for x in enumerate(map(int, timing.split(':')[::-1]))]))
info['duration'] = duration
return info
added_at = isodate.parse_date(gd.get('added', '1900-01-01'))
link = entry.get('link')
updated = entry.get('updated')
updated = isodate.parse_datetime(updated)
updated_at = attrgetter('year', 'month', 'day', 'hour', 'minute', 'second')(updated)
updated_at = datetime(*updated_at)
return info
class Entry(feedparser.FeedParserDict):
def __init__(self, *args, **kwargs):
super(Entry, self).__init__(*args, **kwargs)
self.url = self.get('feedburner_origlink') or self.get('id', '')
self.title = self.get('title').lower()
def parse_summary(self):
from toolz import partition
self.summary = self.get('summary', ' ').lower()
title = self.title
self.unparsed = summary.split(title)[-1].strip('"').strip().replace(',', '')
unparsed_split = self.unparsed.split()
if len(unparsed_split) / 2 != 1:
return self.unparsed
self._summary = dict(partition(2, unparsed_split))
return self._summary # dict(partition(2, unparsed_split))
def duration(self):
duration = int(sum([x[1] * pow(60, x[0]) for x in enumerate(map(int, self.duration.split(':')[::-1]))]))
############
def row_is_expanded(self):
try:
return bool(self.info)
except AttributeError:
return False
def request(data, **kwargs): #todo
from urllib.parse import urlencode
ordering = set(['newest', 'mostviewed', 'rating'])
period = set(['weekly', 'monthly', 'alltime'])
kwargs.update({'output': 'json', 'data': data})
url = '%s?%s' % ('http://api.redtube.com/', urlencode(kwargs))
print(url)
return get(url).json()
def get_info(url):
u = G.request(url=url)
likes = u.select('//span[@class="percent-likes"]').text('0%')
likes = re.search('(\d\d\d|\d\d|\d)%', likes).group(1)
tags = '/'.join(u.select('//td[preceding-sibling::td="TAGS"]/a').text_list([])).lower()
categories = '/'.join(u.select('//td[preceding-sibling::td="FROM"][3]/*').text_list([])).lower()
stars = '/'.join(u.select('//td[preceding-sibling::td="PORNSTARS"]').text_list([])).lower()
comments = u.select('//a[@class="comments-btn"]').text('comments (0)')
comments = re.search('\s?comments\s?\((\d\d\d\d|\d\d\d|\d\d|\d)\)', comments).group(1)
username = u.select('//td[@class="withbadge"]/a').text('')
return dict(likes=likes,
tags=tags,
categories=categories,
comments=comments,
uploader=username,
stars=stars)
#todo
def get_deleted_videos(page=0):
req = request(data='redtube.Videos.getDeletedVideos', ordering='newest', period='all', page=page)
return [Row(**entry.get('video')) for entry in req.get('videos')]
def getRedtubeVideos(page=0, category=None):
http = 'http://api.redtube.com/'
url = 'http://api.redtube.com/?data=redtube.Videos.searchVideos&output=json&ordering=newest&page={}'.format(page)
count = get(url).json().get('count')
videos = get(url).json().get('videos')
return [Row(RedClient(), **entry.get('video')) for entry in videos]
def getDeletedVideos(page=0):
page = page
url = 'http://api.redtube.com/?data=redtube.Videos.getDeletedVideos&ordering=newest&period=alltime&output=json&page={}'.format(
page)
json = get(url).json()
count = json.get('count')
videos = json.get('videos')
return dict(query=url, total=count, videos=videos, page=page)
def all_deleted(page=0):
url = 'http://api.redtube.com/?data=redtube.Videos.getDeletedVideos&ordering=newest&period=alltime&output=json&page={}'.format(
page=page)
json = get(url).json()
count = json.get('count')
| 32.537477 | 166 | 0.670356 |
4a2505434fb9ec672dc75d7f5328d5d0141ca5b9 | 1,851 | py | Python | src/deployer/plugins/echo.py | jbenden/deployer | b036fa3030f99ed0730bb3770cf7e01c58c257f1 | [
"Apache-2.0"
] | 2 | 2018-08-30T14:14:13.000Z | 2022-03-24T15:19:29.000Z | src/deployer/plugins/echo.py | jbenden/deployer | b036fa3030f99ed0730bb3770cf7e01c58c257f1 | [
"Apache-2.0"
] | null | null | null | src/deployer/plugins/echo.py | jbenden/deployer | b036fa3030f99ed0730bb3770cf7e01c58c257f1 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2018 Joseph Benden <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The module plug-in providing the ```echo``` command.
.. moduleauthor:: Joseph Benden <[email protected]>
:copyright: (c) Copyright 2018 by Joseph Benden.
:license: Apache License 2.0, see LICENSE.txt for full details.
"""
import logging
from collections import OrderedDict
from deployer.rendering import render
from deployer.result import Result
from .plugin import Plugin
LOGGER = logging.getLogger(__name__)
class Echo(Plugin):
"""Print data to console."""
TAG = 'echo'
def __init__(self, msg):
"""Ctor."""
self.msg = msg['echo']
@staticmethod
def valid(node):
"""Ensure node structure is valid."""
if type(node) is not OrderedDict:
return False
if Echo.TAG not in node:
return False
return True
@staticmethod
def build(node):
"""Build an `Echo` node."""
yield Echo(node)
def execute(self, context):
"""Perform the plugin's task purpose."""
if context:
msg = render(self.msg, **context.variables.last())
else:
msg = self.msg
for line in msg.splitlines(False):
LOGGER.info("| %s", line)
return Result(result='success')
| 25.708333 | 74 | 0.653701 |
4a250616ae55bbb9cadc79db09f445fc985389d4 | 8,446 | py | Python | docs/conf.py | AjanShrestha/rentomatic | 30f42a4f349bd1a2d4303dbc4eee4f4066b9c634 | [
"MIT"
] | null | null | null | docs/conf.py | AjanShrestha/rentomatic | 30f42a4f349bd1a2d4303dbc4eee4f4066b9c634 | [
"MIT"
] | null | null | null | docs/conf.py | AjanShrestha/rentomatic | 30f42a4f349bd1a2d4303dbc4eee4f4066b9c634 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# rentomatic documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import rentomatic
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Rentomatic'
copyright = u"2020, Ajan Lal Shrestha"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = rentomatic.__version__
# The full version, including alpha/beta/rc tags.
release = rentomatic.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'rentomaticdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'rentomatic.tex',
u'Rentomatic Documentation',
u'Ajan Lal Shrestha', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'rentomatic',
u'Rentomatic Documentation',
[u'Ajan Lal Shrestha'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'rentomatic',
u'Rentomatic Documentation',
u'Ajan Lal Shrestha',
'rentomatic',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 30.601449 | 76 | 0.716552 |
4a2506c873018a3f4274f4d860b8cda748ec373c | 158 | py | Python | scripts/portal/gold_boss_gate.py | Snewmy/swordie | ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17 | [
"MIT"
] | 9 | 2021-04-26T11:59:29.000Z | 2021-12-20T13:15:27.000Z | scripts/portal/gold_boss_gate.py | Snewmy/swordie | ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17 | [
"MIT"
] | null | null | null | scripts/portal/gold_boss_gate.py | Snewmy/swordie | ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17 | [
"MIT"
] | 6 | 2021-07-14T06:32:05.000Z | 2022-02-06T02:32:56.000Z | # 252020000 - to gold boss
# idk if this is the correct portal progression but i couldn't find another way to link everything together
sm.warp(252030000, 5)
| 31.6 | 107 | 0.772152 |
4a2506db2c71f143d623b0584d66060be9f68943 | 1,806 | py | Python | ace/label.py | jflournoy/ACE | b28a4eddfb31cd758df38f48d170e970618e16da | [
"MIT"
] | 20 | 2015-01-16T19:39:51.000Z | 2022-02-22T19:50:46.000Z | ace/label.py | jflournoy/ACE | b28a4eddfb31cd758df38f48d170e970618e16da | [
"MIT"
] | 2 | 2015-10-15T19:34:57.000Z | 2017-02-21T22:11:21.000Z | ace/label.py | jflournoy/ACE | b28a4eddfb31cd758df38f48d170e970618e16da | [
"MIT"
] | 12 | 2015-05-08T20:06:26.000Z | 2020-05-05T09:34:00.000Z | # from nltk import *
import re
from collections import Counter
from database import Article
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
import pandas as pd
def extract_ngram_features(db, tfidf=True, save=None, vocabulary=None, require_activations=True, **kwargs):
''' Takes text from an article as input and returns a matrix of document -->
ngram weights. At the moment, only extracts terms from abstracts.
Args:
db: A database instance
tfidf: If True, uses a tf-idf tokenizer; otherwise uses raw counts
save: an optional path to save a CSV to; if None, returns the resulting data
vocabulary: an optional list of ngrams to restrict extraction to
require_activations: When True, only articles containing at least one fMRI activation
table will be included. When False, use all articles in DB.
kwargs: Optional keywords passed onto the scikit-learn vectorizer. Common args are
ngram_range, min_df, max_df, stop_words, and vocabulary.
'''
# Extract article texts--for now, uses abstracts
articles = db.session.query(Article.id, Article.abstract)
if require_activations:
articles = articles.filter(Article.tables.any())
pmids, corpus = zip(*articles.all())
# Instantiate vectorizer--either simple counts, or tf-idf
vectorizer = TfidfVectorizer if tfidf else CountVectorizer
vectorizer = vectorizer(vocabulary=vocabulary, **kwargs)
# Transform texts
weights = vectorizer.fit_transform(corpus).toarray()
names = vectorizer.get_feature_names()
data = pd.DataFrame(weights, columns=names, index=pmids)
if save is not None:
data.to_csv(save, sep='\t', index_label='pmid', encoding='utf-8')
else:
return data
| 41.045455 | 107 | 0.717608 |
4a2506ef05b670b2319cbdba9aa52f26f52decd6 | 1,856 | py | Python | tts/espeak.py | javatechs/controller | caf99feb15b85f369f3d929847563be3f391757f | [
"Apache-2.0"
] | null | null | null | tts/espeak.py | javatechs/controller | caf99feb15b85f369f3d929847563be3f391757f | [
"Apache-2.0"
] | null | null | null | tts/espeak.py | javatechs/controller | caf99feb15b85f369f3d929847563be3f391757f | [
"Apache-2.0"
] | 1 | 2020-09-07T22:04:17.000Z | 2020-09-07T22:04:17.000Z | import os
import tempfile
import uuid
import logging
log = logging.getLogger('RemoTV.tts.espeak')
tempDir = None
male = None
voice_number = None
hw_num = None
espeak_path = None
def setup(robot_config):
global tempDir
global male
global voice_number
global hw_num
global espeak_path
male = robot_config.getboolean('espeak', 'male')
voice_number = robot_config.getint('espeak', 'voice_number')
if robot_config.has_option('tts', 'speaker_num'):
hw_num = robot_config.get('tts', 'speaker_num')
else:
hw_num = robot_config.get('tts', 'hw_num')
if robot_config.has_option('espeak', 'espeak_path'):
espeak_path = robot_config.get('espeak', 'espeak_path')
else:
if os.name == 'nt':
espeak_path = '"c:\\Program Files (x86)\\eSpeak\\command_line\\espeak.exe"'
else:
espeak_path = '/usr/bin/espeak'
#set the location to write the temp file to
tempDir = tempfile.gettempdir()
log.info("TTS temporary directory : %s", tempDir)
def say(*args):
message = args[0]
message = message.encode('ascii', 'ignore')
tempFilePath = os.path.join(tempDir, "text_" + str(uuid.uuid4()))
f = open(tempFilePath, "wb")
f.write(message)
f.close()
if male:
espeak_command = espeak_path + ' -v en-us+m{} -s 170'.format(voice_number)
else:
espeak_command = espeak_path + ' -v en-us+f{} -s 170'.format(voice_number)
if os.name == 'nt':
os.system('type ' + tempFilePath + ' | ' + espeak_command )
else:
# os.system('cat ' + tempFilePath + ' | ' + espeak_command + ' --stdout | aplay -q -D plughw:{}'.format(hw_num) )
mw_str = 'cat ' + tempFilePath + ' | ' + espeak_command + ' --stdout | aplay -q'
log.debug(mw_str)
os.system(mw_str)
os.remove(tempFilePath)
| 29.460317 | 121 | 0.626078 |
4a25082298efebb554c3b878070d1dbf07ee8b41 | 5,100 | py | Python | FreeCAD_geometry_generation/racetrack_tapered_cavity.py | alunmorgan/EM_CAD_frontend | 9bed7842444129b2c962d2870b9e06782a8999c4 | [
"Apache-2.0"
] | 2 | 2020-08-09T19:43:08.000Z | 2020-10-10T01:31:30.000Z | FreeCAD_geometry_generation/racetrack_tapered_cavity.py | alunmorgan/EM_CAD_frontend | 9bed7842444129b2c962d2870b9e06782a8999c4 | [
"Apache-2.0"
] | 3 | 2019-08-15T19:36:21.000Z | 2022-01-19T03:28:57.000Z | FreeCAD_geometry_generation/racetrack_tapered_cavity.py | alunmorgan/EM_CAD_frontend | 9bed7842444129b2c962d2870b9e06782a8999c4 | [
"Apache-2.0"
] | null | null | null | from freecad_elements import make_beampipe, make_racetrack_aperture, make_circular_aperture,\
make_taper, ModelException, parameter_sweep, base_model
from sys import argv
import os
# baseline model parameters
INPUT_PARAMETERS = {'racetrack_height': 10e-3, 'racetrack_width': 40e-3, 'racetrack_length': 80e-3,
'cavity_radius': 20e-3, 'cavity_length': 20e-3, 'taper_length': 30e-3, 'pipe_thickness': 2e-3}
MODEL_NAME, OUTPUT_PATH = argv
def racetrack_to_octagonal_cavity_model(input_parameters):
"""Generates the geometry for a circular cavity with tapers to a racetrack pipe in FreeCAD.
Also writes out the geometry as STL files
and writes a "sidecar" text file containing the input parameters used.
Args:
input_parameters (dict): Dictionary of input parameter names and values.
"""
try:
wire1, face1 = make_racetrack_aperture(input_parameters['racetrack_height'],
input_parameters['racetrack_width'])
wire2, face2 = make_circular_aperture(input_parameters['cavity_radius'])
wire3, face3 = make_racetrack_aperture(input_parameters['racetrack_height'] + input_parameters['pipe_thickness'],
input_parameters['racetrack_width'] + input_parameters['pipe_thickness'])
wire4, face4 = make_circular_aperture(input_parameters['cavity_radius'] + input_parameters['pipe_thickness'])
beampipe_vac1 = make_beampipe(face1, input_parameters['racetrack_length'],
(-input_parameters['racetrack_length'] / 2. -
input_parameters['taper_length'] -
input_parameters['cavity_length'] / 2., 0, 0))
taper_vac1 = make_taper(wire2, wire1, input_parameters['taper_length'],
(-input_parameters['cavity_length'] / 2., 0, 0), (0, 180, 0))
beampipe_vac2 = make_beampipe(face2, input_parameters['cavity_length'])
taper_vac2 = make_taper(wire2, wire1, input_parameters['taper_length'],
(input_parameters['cavity_length'] / 2., 0, 0))
beampipe_vac3 = make_beampipe(face1, input_parameters['racetrack_length'],
(input_parameters['racetrack_length'] / 2. +
input_parameters['taper_length'] +
input_parameters['cavity_length'] / 2., 0, 0))
beampipe1 = make_beampipe(face3, input_parameters['racetrack_length'],
(-input_parameters['racetrack_length'] / 2. -
input_parameters['taper_length'] -
input_parameters['cavity_length'] / 2., 0, 0))
taper1 = make_taper(wire4, wire3, input_parameters['taper_length'],
(-input_parameters['cavity_length'] / 2., 0, 0), (0, 180, 0))
beampipe2 = make_beampipe(face4, input_parameters['cavity_length'])
taper2 = make_taper(wire4, wire3, input_parameters['taper_length'],
(input_parameters['cavity_length'] / 2., 0, 0))
beampipe3 = make_beampipe(face3, input_parameters['racetrack_length'],
(input_parameters['racetrack_length'] / 2. +
input_parameters['taper_length'] +
input_parameters['cavity_length'] / 2., 0, 0))
fin1 = beampipe1.fuse(taper1)
fin2 = fin1.fuse(beampipe2)
fin3 = fin2.fuse(taper2)
fin4 = fin3.fuse(beampipe3)
vac1 = beampipe_vac1.fuse(taper_vac1)
vac2 = vac1.fuse(beampipe_vac2)
vac3 = vac2.fuse(taper_vac2)
vac4 = vac3.fuse(beampipe_vac3)
full_pipe = fin4.cut(vac4)
except Exception as e:
raise ModelException(e)
# An entry in the parts dictionary corresponds to an STL file. This is useful for parts of differing materials.
parts = {'pipe': full_pipe, 'vac': vac4}
return parts, os.path.splitext(os.path.basename(MODEL_NAME))[0]
base_model(racetrack_to_octagonal_cavity_model, INPUT_PARAMETERS, OUTPUT_PATH, accuracy=10)
parameter_sweep(racetrack_to_octagonal_cavity_model, INPUT_PARAMETERS, OUTPUT_PATH, 'cavity_radius', [5e-3, 10e-3, 15e-3, 25e-3, 30e-3])
parameter_sweep(racetrack_to_octagonal_cavity_model, INPUT_PARAMETERS, OUTPUT_PATH, 'taper_length', [10e-3, 20e-3, 40e-3, 50e-3, 60e-3])
parameter_sweep(racetrack_to_octagonal_cavity_model, INPUT_PARAMETERS, OUTPUT_PATH, 'racetrack_height', [15e-3, 20e-3, 25e-3, 30e-3, 35e-3, 40e-3, 45e-3, 50e-3])
parameter_sweep(racetrack_to_octagonal_cavity_model, INPUT_PARAMETERS, OUTPUT_PATH, 'racetrack_width', [20e-3, 30e-3, 50e-3, 60e-3, 70e-3])
parameter_sweep(racetrack_to_octagonal_cavity_model, INPUT_PARAMETERS, OUTPUT_PATH, 'racetrack_length', [50e-3, 100e-3, 150e-3, 200e-3, 250e-3, 300e-3])
parameter_sweep(racetrack_to_octagonal_cavity_model, INPUT_PARAMETERS, OUTPUT_PATH, 'cavity_length', [10e-3, 30e-3])
| 65.384615 | 161 | 0.646275 |
4a250860c2bee2736db0d8cbb33a31524eb3c35f | 3,965 | py | Python | tests/test_clblas.py | SarckFour/purecl | 72429ff03d224ea8a2665610e1c579cf59d1af53 | [
"BSD-2-Clause-FreeBSD"
] | 9 | 2016-07-22T13:33:30.000Z | 2022-03-18T11:31:19.000Z | tests/test_clblas.py | SarckFour/purecl | 72429ff03d224ea8a2665610e1c579cf59d1af53 | [
"BSD-2-Clause-FreeBSD"
] | 3 | 2016-01-03T06:50:23.000Z | 2017-07-28T02:57:36.000Z | tests/test_clblas.py | SarckFour/purecl | 72429ff03d224ea8a2665610e1c579cf59d1af53 | [
"BSD-2-Clause-FreeBSD"
] | 9 | 2015-08-05T12:13:46.000Z | 2021-04-15T01:03:40.000Z | """
Copyright (c) 2014, Samsung Electronics Co.,Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of Samsung Electronics Co.,Ltd..
"""
"""
opencl4py - OpenCL cffi bindings and helper classes.
URL: https://github.com/Samsung/opencl4py
Original author: Alexey Kazantsev <[email protected]>
"""
"""
Tests some of the api in opencl4py.blas._clBlas module.
"""
import unittest
import logging
import numpy
import opencl4py as cl
import opencl4py.blas as blas
import os
class Test(unittest.TestCase):
def setUp(self):
self.old_env = os.environ.get("PYOPENCL_CTX")
if self.old_env is None:
os.environ["PYOPENCL_CTX"] = "0:0"
self.blas = blas.CLBLAS()
def tearDown(self):
if self.old_env is None:
del os.environ["PYOPENCL_CTX"]
else:
os.environ["PYOPENCL_CTX"] = self.old_env
del self.old_env
def _test_gemm(self, gemm, dtype):
ctx = cl.Platforms().create_some_context()
queue = ctx.create_queue(ctx.devices[0])
a = numpy.zeros([127, 353], dtype=dtype)
b = numpy.zeros([135, a.shape[1]], dtype=dtype)
c = numpy.zeros([a.shape[0], b.shape[0]], dtype=dtype)
numpy.random.seed(numpy.array([123], dtype=numpy.int32)[0])
a[:] = numpy.random.rand(a.size).astype(dtype).reshape(a.shape)
b[:] = numpy.random.rand(b.size).astype(dtype).reshape(b.shape)
gold_c = numpy.dot(a, b.transpose())
a_buf = ctx.create_buffer(
cl.CL_MEM_READ_WRITE | cl.CL_MEM_COPY_HOST_PTR, a)
b_buf = ctx.create_buffer(
cl.CL_MEM_READ_WRITE | cl.CL_MEM_COPY_HOST_PTR, b)
c_buf = ctx.create_buffer(
cl.CL_MEM_READ_WRITE | cl.CL_MEM_COPY_HOST_PTR, c)
gemm([queue], blas.clblasRowMajor, blas.clblasNoTrans,
blas.clblasTrans, a.shape[0], b.shape[0], a.shape[1],
1.0, a_buf, b_buf, 0.0, c_buf)
queue.flush()
queue.read_buffer(c_buf, c)
max_diff = numpy.fabs(c - gold_c).max()
self.assertLess(max_diff, 0.00001 if dtype == numpy.float64
else 0.00015)
def test_sgemm(self):
logging.debug("ENTER: test_sgemm")
self._test_gemm(self.blas.sgemm, numpy.float32)
logging.debug("EXIT: test_sgemm")
def test_dgemm(self):
logging.debug("ENTER: test_dgemm")
self._test_gemm(self.blas.dgemm, numpy.float64)
logging.debug("EXIT: test_dgemm")
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
unittest.main()
| 38.872549 | 79 | 0.700631 |
4a250966e8b5afb68d4c15439c392e443329d1c4 | 328 | py | Python | dms_importer/config/docs.py | deepeshgarg007/dms_importer | bc21d2e89b80214a62d2b80ed613965b1260840a | [
"MIT"
] | null | null | null | dms_importer/config/docs.py | deepeshgarg007/dms_importer | bc21d2e89b80214a62d2b80ed613965b1260840a | [
"MIT"
] | null | null | null | dms_importer/config/docs.py | deepeshgarg007/dms_importer | bc21d2e89b80214a62d2b80ed613965b1260840a | [
"MIT"
] | null | null | null | """
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/dms_importer"
# docs_base_url = "https://[org_name].github.io/dms_importer"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
def get_context(context):
context.brand_html = "Dms Importer"
| 27.333333 | 68 | 0.731707 |
4a2509da2465fb6d4450ffb2f6598fb16874eba1 | 16,184 | py | Python | sqlite3ct/test/regression.py | bhuztez/sqlite3ct | df8888bdbff0ee5c6830e2ed9681c5f70245ed51 | [
"Zlib"
] | 1 | 2020-07-02T14:47:46.000Z | 2020-07-02T14:47:46.000Z | sqlite3ct/test/regression.py | bhuztez/sqlite3ct | df8888bdbff0ee5c6830e2ed9681c5f70245ed51 | [
"Zlib"
] | null | null | null | sqlite3ct/test/regression.py | bhuztez/sqlite3ct | df8888bdbff0ee5c6830e2ed9681c5f70245ed51 | [
"Zlib"
] | null | null | null | # pysqlite2/test/regression.py: pysqlite regression tests
#
# Copyright (C) 2006-2010 Gerhard Häring <[email protected]>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
import datetime
import unittest
import sqlite3ct as sqlite
import weakref
from test import support
class RegressionTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
def tearDown(self):
self.con.close()
def test_PragmaUserVersion(self):
# This used to crash pysqlite because this pragma command returns NULL for the column name
cur = self.con.cursor()
cur.execute("pragma user_version")
def test_PragmaSchemaVersion(self):
# This still crashed pysqlite <= 2.2.1
con = sqlite.connect(":memory:", detect_types=sqlite.PARSE_COLNAMES)
try:
cur = self.con.cursor()
cur.execute("pragma schema_version")
finally:
cur.close()
con.close()
def test_StatementReset(self):
# pysqlite 2.1.0 to 2.2.0 have the problem that not all statements are
# reset before a rollback, but only those that are still in the
# statement cache. The others are not accessible from the connection object.
con = sqlite.connect(":memory:", cached_statements=5)
cursors = [con.cursor() for x in range(5)]
cursors[0].execute("create table test(x)")
for i in range(10):
cursors[0].executemany("insert into test(x) values (?)", [(x,) for x in range(10)])
for i in range(5):
cursors[i].execute(" " * i + "select x from test")
con.rollback()
def test_ColumnNameWithSpaces(self):
cur = self.con.cursor()
cur.execute('select 1 as "foo bar [datetime]"')
self.assertEqual(cur.description[0][0], "foo bar")
cur.execute('select 1 as "foo baz"')
self.assertEqual(cur.description[0][0], "foo baz")
def test_StatementFinalizationOnCloseDb(self):
# pysqlite versions <= 2.3.3 only finalized statements in the statement
# cache when closing the database. statements that were still
# referenced in cursors weren't closed and could provoke "
# "OperationalError: Unable to close due to unfinalised statements".
con = sqlite.connect(":memory:")
cursors = []
# default statement cache size is 100
for i in range(105):
cur = con.cursor()
cursors.append(cur)
cur.execute("select 1 x union select " + str(i))
con.close()
@unittest.skipIf(sqlite.sqlite_version_info < (3, 2, 2), 'needs sqlite 3.2.2 or newer')
def test_OnConflictRollback(self):
con = sqlite.connect(":memory:")
con.execute("create table foo(x, unique(x) on conflict rollback)")
con.execute("insert into foo(x) values (1)")
try:
con.execute("insert into foo(x) values (1)")
except sqlite.DatabaseError:
pass
con.execute("insert into foo(x) values (2)")
try:
con.commit()
except sqlite.OperationalError:
self.fail("pysqlite knew nothing about the implicit ROLLBACK")
def test_WorkaroundForBuggySqliteTransferBindings(self):
"""
pysqlite would crash with older SQLite versions unless
a workaround is implemented.
"""
self.con.execute("create table foo(bar)")
self.con.execute("drop table foo")
self.con.execute("create table foo(bar)")
def test_EmptyStatement(self):
"""
pysqlite used to segfault with SQLite versions 3.5.x. These return NULL
for "no-operation" statements
"""
self.con.execute("")
def test_TypeMapUsage(self):
"""
pysqlite until 2.4.1 did not rebuild the row_cast_map when recompiling
a statement. This test exhibits the problem.
"""
SELECT = "select * from foo"
con = sqlite.connect(":memory:",detect_types=sqlite.PARSE_DECLTYPES)
con.execute("create table foo(bar timestamp)")
con.execute("insert into foo(bar) values (?)", (datetime.datetime.now(),))
con.execute(SELECT)
con.execute("drop table foo")
con.execute("create table foo(bar integer)")
con.execute("insert into foo(bar) values (5)")
con.execute(SELECT)
def test_ErrorMsgDecodeError(self):
# When porting the module to Python 3.0, the error message about
# decoding errors disappeared. This verifies they're back again.
with self.assertRaises(sqlite.OperationalError) as cm:
self.con.execute("select 'xxx' || ? || 'yyy' colname",
(bytes(bytearray([250])),)).fetchone()
msg = "Could not decode to UTF-8 column 'colname' with text 'xxx"
self.assertIn(msg, str(cm.exception))
def test_RegisterAdapter(self):
"""
See issue 3312.
"""
self.assertRaises(TypeError, sqlite.register_adapter, {}, None)
def test_SetIsolationLevel(self):
# See issue 27881.
class CustomStr(str):
def upper(self):
return None
def __del__(self):
con.isolation_level = ""
con = sqlite.connect(":memory:")
con.isolation_level = None
for level in "", "DEFERRED", "IMMEDIATE", "EXCLUSIVE":
with self.subTest(level=level):
con.isolation_level = level
con.isolation_level = level.lower()
con.isolation_level = level.capitalize()
con.isolation_level = CustomStr(level)
# setting isolation_level failure should not alter previous state
con.isolation_level = None
con.isolation_level = "DEFERRED"
pairs = [
(1, TypeError), (b'', TypeError), ("abc", ValueError),
("IMMEDIATE\0EXCLUSIVE", ValueError), ("\xe9", ValueError),
]
for value, exc in pairs:
with self.subTest(level=value):
with self.assertRaises(exc):
con.isolation_level = value
self.assertEqual(con.isolation_level, "DEFERRED")
def test_CursorConstructorCallCheck(self):
"""
Verifies that cursor methods check whether base class __init__ was
called.
"""
class Cursor(sqlite.Cursor):
def __init__(self, con):
pass
con = sqlite.connect(":memory:")
cur = Cursor(con)
with self.assertRaises(sqlite.ProgrammingError):
cur.execute("select 4+5").fetchall()
with self.assertRaisesRegex(sqlite.ProgrammingError,
r'^Base Cursor\.__init__ not called\.$'):
cur.close()
def test_StrSubclass(self):
"""
The Python 3.0 port of the module didn't cope with values of subclasses of str.
"""
class MyStr(str): pass
self.con.execute("select ?", (MyStr("abc"),))
def test_ConnectionConstructorCallCheck(self):
"""
Verifies that connection methods check whether base class __init__ was
called.
"""
class Connection(sqlite.Connection):
def __init__(self, name):
pass
con = Connection(":memory:")
with self.assertRaises(sqlite.ProgrammingError):
cur = con.cursor()
def test_CursorRegistration(self):
"""
Verifies that subclassed cursor classes are correctly registered with
the connection object, too. (fetch-across-rollback problem)
"""
class Connection(sqlite.Connection):
def cursor(self):
return Cursor(self)
class Cursor(sqlite.Cursor):
def __init__(self, con):
sqlite.Cursor.__init__(self, con)
con = Connection(":memory:")
cur = con.cursor()
cur.execute("create table foo(x)")
cur.executemany("insert into foo(x) values (?)", [(3,), (4,), (5,)])
cur.execute("select x from foo")
con.rollback()
with self.assertRaises(sqlite.InterfaceError):
cur.fetchall()
def test_AutoCommit(self):
"""
Verifies that creating a connection in autocommit mode works.
2.5.3 introduced a regression so that these could no longer
be created.
"""
con = sqlite.connect(":memory:", isolation_level=None)
def test_PragmaAutocommit(self):
"""
Verifies that running a PRAGMA statement that does an autocommit does
work. This did not work in 2.5.3/2.5.4.
"""
cur = self.con.cursor()
cur.execute("create table foo(bar)")
cur.execute("insert into foo(bar) values (5)")
cur.execute("pragma page_size")
row = cur.fetchone()
def test_ConnectionCall(self):
"""
Call a connection with a non-string SQL request: check error handling
of the statement constructor.
"""
self.assertRaises(TypeError, self.con, 1)
def test_Collation(self):
def collation_cb(a, b):
return 1
self.assertRaises(sqlite.ProgrammingError, self.con.create_collation,
# Lone surrogate cannot be encoded to the default encoding (utf8)
"\uDC80", collation_cb)
def test_RecursiveCursorUse(self):
"""
http://bugs.python.org/issue10811
Recursively using a cursor, such as when reusing it from a generator led to segfaults.
Now we catch recursive cursor usage and raise a ProgrammingError.
"""
con = sqlite.connect(":memory:")
cur = con.cursor()
cur.execute("create table a (bar)")
cur.execute("create table b (baz)")
def foo():
cur.execute("insert into a (bar) values (?)", (1,))
yield 1
with self.assertRaises(sqlite.ProgrammingError):
cur.executemany("insert into b (baz) values (?)",
((i,) for i in foo()))
def test_ConvertTimestampMicrosecondPadding(self):
"""
http://bugs.python.org/issue14720
The microsecond parsing of convert_timestamp() should pad with zeros,
since the microsecond string "456" actually represents "456000".
"""
con = sqlite.connect(":memory:", detect_types=sqlite.PARSE_DECLTYPES)
cur = con.cursor()
cur.execute("CREATE TABLE t (x TIMESTAMP)")
# Microseconds should be 456000
cur.execute("INSERT INTO t (x) VALUES ('2012-04-04 15:06:00.456')")
# Microseconds should be truncated to 123456
cur.execute("INSERT INTO t (x) VALUES ('2012-04-04 15:06:00.123456789')")
cur.execute("SELECT * FROM t")
values = [x[0] for x in cur.fetchall()]
self.assertEqual(values, [
datetime.datetime(2012, 4, 4, 15, 6, 0, 456000),
datetime.datetime(2012, 4, 4, 15, 6, 0, 123456),
])
def test_InvalidIsolationLevelType(self):
# isolation level is a string, not an integer
self.assertRaises(TypeError,
sqlite.connect, ":memory:", isolation_level=123)
def test_NullCharacter(self):
# Issue #21147
con = sqlite.connect(":memory:")
self.assertRaises(ValueError, con, "\0select 1")
self.assertRaises(ValueError, con, "select 1\0")
cur = con.cursor()
self.assertRaises(ValueError, cur.execute, " \0select 2")
self.assertRaises(ValueError, cur.execute, "select 2\0")
def test_CommitCursorReset(self):
"""
Connection.commit() did reset cursors, which made sqlite3
to return rows multiple times when fetched from cursors
after commit. See issues 10513 and 23129 for details.
"""
con = sqlite.connect(":memory:")
con.executescript("""
create table t(c);
create table t2(c);
insert into t values(0);
insert into t values(1);
insert into t values(2);
""")
self.assertEqual(con.isolation_level, "")
counter = 0
for i, row in enumerate(con.execute("select c from t")):
with self.subTest(i=i, row=row):
con.execute("insert into t2(c) values (?)", (i,))
con.commit()
if counter == 0:
self.assertEqual(row[0], 0)
elif counter == 1:
self.assertEqual(row[0], 1)
elif counter == 2:
self.assertEqual(row[0], 2)
counter += 1
self.assertEqual(counter, 3, "should have returned exactly three rows")
def test_Bpo31770(self):
"""
The interpreter shouldn't crash in case Cursor.__init__() is called
more than once.
"""
def callback(*args):
pass
con = sqlite.connect(":memory:")
cur = sqlite.Cursor(con)
ref = weakref.ref(cur, callback)
cur.__init__(con)
del cur
# The interpreter shouldn't crash when ref is collected.
del ref
support.gc_collect()
def test_DelIsolation_levelSegfault(self):
with self.assertRaises(AttributeError):
del self.con.isolation_level
class UnhashableFunc:
__hash__ = None
def __init__(self, return_value=None):
self.calls = 0
self.return_value = return_value
def __call__(self, *args, **kwargs):
self.calls += 1
return self.return_value
class UnhashableCallbacksTestCase(unittest.TestCase):
"""
https://bugs.python.org/issue34052
Registering unhashable callbacks raises TypeError, callbacks are not
registered in SQLite after such registration attempt.
"""
def setUp(self):
self.con = sqlite.connect(':memory:')
def tearDown(self):
self.con.close()
def test_progress_handler(self):
f = UnhashableFunc(return_value=0)
with self.assertRaisesRegex(TypeError, 'unhashable type'):
self.con.set_progress_handler(f, 1)
self.con.execute('SELECT 1')
self.assertFalse(f.calls)
def test_func(self):
func_name = 'func_name'
f = UnhashableFunc()
with self.assertRaisesRegex(TypeError, 'unhashable type'):
self.con.create_function(func_name, 0, f)
msg = 'no such function: %s' % func_name
with self.assertRaisesRegex(sqlite.OperationalError, msg):
self.con.execute('SELECT %s()' % func_name)
self.assertFalse(f.calls)
def test_authorizer(self):
f = UnhashableFunc(return_value=sqlite.SQLITE_DENY)
with self.assertRaisesRegex(TypeError, 'unhashable type'):
self.con.set_authorizer(f)
self.con.execute('SELECT 1')
self.assertFalse(f.calls)
def test_aggr(self):
class UnhashableType(type):
__hash__ = None
aggr_name = 'aggr_name'
with self.assertRaisesRegex(TypeError, 'unhashable type'):
self.con.create_aggregate(aggr_name, 0, UnhashableType('Aggr', (), {}))
msg = 'no such function: %s' % aggr_name
with self.assertRaisesRegex(sqlite.OperationalError, msg):
self.con.execute('SELECT %s()' % aggr_name)
| 36.698413 | 98 | 0.608873 |
4a250a1924e20872b7aae983ee58db03555f8dd1 | 3,740 | py | Python | subset.py | ajyl/KEMP | d71d34e3fb1d636db7f2cf40f6a3aa0040681389 | [
"MIT"
] | null | null | null | subset.py | ajyl/KEMP | d71d34e3fb1d636db7f2cf40f6a3aa0040681389 | [
"MIT"
] | null | null | null | subset.py | ajyl/KEMP | d71d34e3fb1d636db7f2cf40f6a3aa0040681389 | [
"MIT"
] | null | null | null | """
Subset
"""
import os
import ast
from collections import defaultdict
from constants import KEMP_HOME
KEMP_FILEPATH = os.path.join(KEMP_HOME, "result/KEMP/KEMP.txt")
def load_data(filepath):
"""
Load data
"""
with open(filepath, "r") as file_p:
data = file_p.readlines()
emotion = None
context = None
concept = None
preds = None
ref = None
formatted = {}
curr_id = 0
for idx, line in enumerate(data):
if line.startswith("Emotion:"):
if emotion is not None:
breakpoint()
raise RuntimeError("Unexpected emotion")
emotion = line.replace("Emotion:", "").strip()
elif line.startswith("Context:"):
if context is not None:
breakpoint()
raise RuntimeError("Unexpected context")
context = line.replace("Context:", "").strip()
context = ast.literal_eval(context)
elif line.startswith("Concept:"):
if concept is not None:
breakpoint()
raise RuntimeError("Unexpected concept")
concept = line.replace("Concept:", "").strip()
header = "[defaultdict(<class \"list\">, "
concept = concept[len(header):-2]
concept = ast.literal_eval(concept)
elif line.startswith("Pred:"):
if preds is not None:
breakpoint()
raise RuntimeError("Unexpected pref.")
pred = line.replace("Pred:", "").strip()
sents = pred.replace("! ", ". ").split(".")
sents = [
sent.replace("?", "").replace("!", "").strip()
for sent in sents
if sent != ""
]
preds = [x for x in sents if x != ""]
elif line.startswith("Ref:"):
if ref is not None:
breakpoint()
raise RuntimeError("Unexpected ref")
ref = line.replace("Ref:", "").strip()
formatted[idx] = {
"emotion": emotion,
"context": context,
"concept": concept,
"preds": preds,
"ref": ref,
}
emotion = None
context = None
concept = None
preds = None
ref = None
return formatted
def reformat(data):
"""
Reformat data into mapping of emotions to list of ids
"""
reformatted = defaultdict(list)
for _id, obj in data.items():
reformatted[obj["emotion"]].append(obj["preds"])
return reformatted
def reformat_sentiment(data):
"""
Reformat into positive, negative sentiments.
"""
sentiments = {
# Positive
"joyful": "positive",
"excited": "positive",
"hopeful": "positive",
"faithful": "positive",
"content": "positive",
"nostalgic": "positive",
"grateful": "positive",
"trusting": "positive",
"impressed": "positive",
"proud": "positive",
# Negative
"guilty": "negative",
"lonely": "negative",
"sad": "negative",
"angry": "negative",
"disappointed": "negative",
"annoyed": "negative",
"terrified": "negative",
"ashamed": "negative",
"furious": "negative",
}
reformatted = defaultdict(list)
for _id, obj in data.items():
emotion = obj["emotion"]
if emotion not in sentiments:
continue
reformatted[sentiments[emotion]].append(obj["preds"])
return reformatted
if __name__ == "__main__":
_data = load_data(KEMP_FILEPATH)
split = reformat(_data)
breakpoint()
| 26.714286 | 63 | 0.517647 |
4a250b363f8e5f5bb61e0480c78ab305ef88b28f | 1,499 | py | Python | library/setup.py | philwil/ads1015-python | 47c0effd311f1a9f2aff9b282bc8ca60e237e53a | [
"MIT"
] | 5 | 2020-01-28T15:09:49.000Z | 2022-02-07T12:44:06.000Z | library/setup.py | philwil/ads1015-python | 47c0effd311f1a9f2aff9b282bc8ca60e237e53a | [
"MIT"
] | 14 | 2019-07-07T17:36:18.000Z | 2022-01-13T12:51:52.000Z | library/setup.py | philwil/ads1015-python | 47c0effd311f1a9f2aff9b282bc8ca60e237e53a | [
"MIT"
] | 4 | 2020-02-24T14:58:22.000Z | 2021-10-31T02:01:47.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2016 Pimoroni
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from setuptools import setup, __version__
from pkg_resources import parse_version
minimum_version = parse_version('30.4.0')
if parse_version(__version__) < minimum_version:
raise RuntimeError("Package setuptools must be at least version {}".format(minimum_version))
setup(
packages=['ads1015'],
install_requires=['i2cdevice>=0.0.6', 'setuptools>={}'.format(minimum_version)]
)
| 40.513514 | 96 | 0.781855 |
4a250b8d1295dfd5b7f8b99a0521da59c2bbfe0d | 233 | py | Python | Greedy/1758.py | esdx245/algorithms | 39dd06c9277f30bfdce594a86ce71f52e28e5dc8 | [
"MIT"
] | null | null | null | Greedy/1758.py | esdx245/algorithms | 39dd06c9277f30bfdce594a86ce71f52e28e5dc8 | [
"MIT"
] | null | null | null | Greedy/1758.py | esdx245/algorithms | 39dd06c9277f30bfdce594a86ce71f52e28e5dc8 | [
"MIT"
] | null | null | null | n = int(input())
lista = []
result = 0
for _ in range(n):
lista.append(int(input()))
lista.sort(reverse = 1)
for i in range(n):
temp = lista[i] - i
if temp > 0:
result += temp
temp = 0
else:
temp = 0
print(result) | 16.642857 | 28 | 0.575107 |
4a250cd84636f48e240f1e71686132eb9ec8eda0 | 10,473 | py | Python | geemap/utils.py | XianranZ/geemap | d2c5489b24bbb9aa1e4238a3700c0411d75cb27c | [
"MIT"
] | null | null | null | geemap/utils.py | XianranZ/geemap | d2c5489b24bbb9aa1e4238a3700c0411d75cb27c | [
"MIT"
] | null | null | null | geemap/utils.py | XianranZ/geemap | d2c5489b24bbb9aa1e4238a3700c0411d75cb27c | [
"MIT"
] | null | null | null | import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
# Compute area in square meters
def vec_area(f):
# Compute area in square meters. Convert to hectares.
areaSqm = f.area()
# A new property called 'area' will be set on each feature.
return f.set({'area': areaSqm})
def vec_area_sqkm(f):
areaSqkm = f.area().divide(1000 * 1000)
return f.set({'area': areaSqkm})
def vec_area_ha(f):
# Compute area in square meters. Convert to hectares.
areaHa = f.area(1).divide(100 * 100)
# A new property called 'area' will be set on each feature.
return f.set({'area': areaHa})
def get_year(date):
return ee.Date(date).get('year')
# Convert string to number
def str_to_number(str):
return ee.Number.parse(str)
# Calculate array sum
def array_sum(arr):
return ee.Array(arr).accum(0).get([-1])
# Calculate array mean
def array_mean(arr):
sum = ee.Array(arr).accum(0).get([-1])
size = arr.length()
return ee.Number(sum.divide(size))
def get_annual_NAIP(year):
try:
collection = ee.ImageCollection('USDA/NAIP/DOQQ')
start_date = str(year) + '-01-01'
end_date = str(year) + '-12-31'
naip = collection.filterDate(start_date, end_date) \
.filter(ee.Filter.listContains("system:band_names", "N"))
return naip
except Exception as e:
print(e)
def get_all_NAIP(start_year=2009, end_year=2018):
try:
def get_annual_NAIP(year):
try:
collection = ee.ImageCollection('USDA/NAIP/DOQQ')
start_date = ee.Date.fromYMD(year, 1, 1)
end_date = ee.Date.fromYMD(year, 12, 31)
naip = collection.filterDate(start_date, end_date) \
.filter(ee.Filter.listContains("system:band_names", "N"))
return ee.ImageCollection(naip)
except Exception as e:
print(e)
years = ee.List.sequence(start_year, end_year)
collection = years.map(get_annual_NAIP)
return collection
except Exception as e:
print(e)
# Create NAIP mosaic for a specified year
def annual_NAIP(year, geometry):
start_date = ee.Date.fromYMD(year, 1, 1)
end_date = ee.Date.fromYMD(year, 12, 31)
collection = ee.ImageCollection('USDA/NAIP/DOQQ') \
.filterDate(start_date, end_date) \
.filterBounds(geometry)
time_start = ee.Date(
ee.List(collection.aggregate_array('system:time_start')).sort().get(0))
time_end = ee.Date(
ee.List(collection.aggregate_array('system:time_end')).sort().get(-1))
image = ee.Image(collection.mosaic().clip(geometry))
NDWI = ee.Image(image).normalizedDifference(
['G', 'N']).select(['nd'], ['ndwi'])
NDVI = ee.Image(image).normalizedDifference(
['N', 'R']).select(['nd'], ['ndvi'])
image = image.addBands(NDWI)
image = image.addBands(NDVI)
return image.set({'system:time_start': time_start, 'system:time_end': time_end})
# Find all available NAIP images for a geometry
def find_NAIP(geometry, add_NDVI=True, add_NDWI=True):
init_collection = ee.ImageCollection('USDA/NAIP/DOQQ') \
.filterBounds(geometry) \
.filterDate('2009-01-01', '2018-12-31') \
.filter(ee.Filter.listContains("system:band_names", "N"))
yearList = ee.List(init_collection.distinct(
['system:time_start']).aggregate_array('system:time_start'))
init_years = yearList.map(lambda y: ee.Date(y).get('year'))
# remove duplicates
init_years = ee.Dictionary(init_years.reduce(
ee.Reducer.frequencyHistogram())).keys()
years = init_years.map(lambda x: ee.Number.parse(x))
# years = init_years.map(lambda x: x)
# Available NAIP years with NIR band
def NAIPAnnual(year):
start_date = ee.Date.fromYMD(year, 1, 1)
end_date = ee.Date.fromYMD(year, 12, 31)
collection = init_collection.filterDate(start_date, end_date)
# .filterBounds(geometry)
# .filter(ee.Filter.listContains("system:band_names", "N"))
time_start = ee.Date(
ee.List(collection.aggregate_array('system:time_start')).sort().get(0))
time_end = ee.Date(
ee.List(collection.aggregate_array('system:time_end')).sort().get(-1))
col_size = collection.size()
image = ee.Image(collection.mosaic().clip(geometry))
if add_NDVI:
NDVI = ee.Image(image).normalizedDifference(
['N', 'R']).select(['nd'], ['ndvi'])
image = image.addBands(NDVI)
if add_NDWI:
NDWI = ee.Image(image).normalizedDifference(
['G', 'N']).select(['nd'], ['ndwi'])
image = image.addBands(NDWI)
return image.set({'system:time_start': time_start, 'system:time_end': time_end, 'tiles': col_size})
# remove years with incomplete coverage
naip = ee.ImageCollection(years.map(NAIPAnnual))
mean_size = ee.Number(naip.aggregate_mean('tiles'))
total_sd = ee.Number(naip.aggregate_total_sd('tiles'))
threshold = mean_size.subtract(total_sd.multiply(1))
naip = naip.filter(ee.Filter.Or(ee.Filter.gte(
'tiles', threshold), ee.Filter.gte('tiles', 15)))
naip = naip.filter(ee.Filter.gte('tiles', 7))
naip_count = naip.size()
naip_seq = ee.List.sequence(0, naip_count.subtract(1))
def set_index(index):
img = ee.Image(naip.toList(naip_count).get(index))
return img.set({'system:uid': ee.Number(index).toUint8()})
naip = naip_seq.map(set_index)
return ee.ImageCollection(naip)
# Get NWI by HUC
def filter_NWI(HUC08_Id, geometry):
nwi_asset_prefix = 'users/wqs/NWI-HU8/HU8_'
nwi_asset_suffix = '_Wetlands'
nwi_asset_path = nwi_asset_prefix + HUC08_Id + nwi_asset_suffix
nwi_huc = ee.FeatureCollection(nwi_asset_path).filterBounds(geometry) \
.filter(ee.Filter.notEquals(**{'leftField': 'WETLAND_TY', 'rightValue': 'Riverine'}))
return nwi_huc
# Find HUC08 intersecting a geometry
def filter_HUC08(geometry):
USGS_HUC08 = ee.FeatureCollection('USGS/WBD/2017/HUC08') # Subbasins
HUC08 = USGS_HUC08.filterBounds(geometry)
return HUC08
# Find HUC10 intersecting a geometry
def filter_HUC10(geometry):
USGS_HUC10 = ee.FeatureCollection('USGS/WBD/2017/HUC10') # Watersheds
HUC10 = USGS_HUC10.filterBounds(geometry)
return HUC10
# Find HUC08 by HUC ID
def find_HUC08(HUC08_Id):
USGS_HUC08 = ee.FeatureCollection('USGS/WBD/2017/HUC08') # Subbasins
HUC08 = USGS_HUC08.filter(ee.Filter.eq('huc8', HUC08_Id))
return HUC08
# Find HUC10 by HUC ID
def find_HUC10(HUC10_Id):
USGS_HUC10 = ee.FeatureCollection('USGS/WBD/2017/HUC10') # Watersheds
HUC10 = USGS_HUC10.filter(ee.Filter.eq('huc10', HUC10_Id))
return HUC10
# find NWI by HUC08
def find_NWI(HUC08_Id):
nwi_asset_prefix = 'users/wqs/NWI-HU8/HU8_'
nwi_asset_suffix = '_Wetlands'
nwi_asset_path = nwi_asset_prefix + HUC08_Id + nwi_asset_suffix
nwi_huc = ee.FeatureCollection(nwi_asset_path) \
.filter(ee.Filter.notEquals(**{'leftField': 'WETLAND_TY', 'rightValue': 'Riverine'}))
return nwi_huc
# # Extract NWI by providing a geometry
# def extractNWI(geometry):
# HUC08 = filterHUC08(geometry)
# HUC_list = ee.List(HUC08.aggregate_array('huc8')).getInfo()
# # print('Intersecting HUC08 IDs:', HUC_list)
# nwi = ee.FeatureCollection(HUC_list.map(findNWI)).flatten()
# return nwi.filterBounds(geometry)
# NWI legend: https://www.fws.gov/wetlands/Data/Mapper-Wetlands-Legend.html
def nwi_add_color(fc):
emergent = ee.FeatureCollection(
fc.filter(ee.Filter.eq('WETLAND_TY', 'Freshwater Emergent Wetland')))
emergent = emergent.map(lambda f: f.set(
'R', 127).set('G', 195).set('B', 28))
# print(emergent.first())
forested = fc.filter(ee.Filter.eq(
'WETLAND_TY', 'Freshwater Forested/Shrub Wetland'))
forested = forested.map(lambda f: f.set('R', 0).set('G', 136).set('B', 55))
pond = fc.filter(ee.Filter.eq('WETLAND_TY', 'Freshwater Pond'))
pond = pond.map(lambda f: f.set('R', 104).set('G', 140).set('B', 192))
lake = fc.filter(ee.Filter.eq('WETLAND_TY', 'Lake'))
lake = lake.map(lambda f: f.set('R', 19).set('G', 0).set('B', 124))
riverine = fc.filter(ee.Filter.eq('WETLAND_TY', 'Riverine'))
riverine = riverine.map(lambda f: f.set(
'R', 1).set('G', 144).set('B', 191))
fc = ee.FeatureCollection(emergent.merge(
forested).merge(pond).merge(lake).merge(riverine))
# base = ee.Image(0).mask(0).toInt8()
base = ee.Image().byte()
img = base.paint(fc, 'R') \
.addBands(base.paint(fc, 'G')
.addBands(base.paint(fc, 'B')))
return img
# calculate total image area (unit: m2)
def image_area(img, geometry, scale):
pixelArea = img.Add(ee.Image(1)).multiply(
ee.Image.pixelArea())
imgArea = pixelArea.reduceRegion(**{
'geometry': geometry,
'reducer': ee.Reducer.sum(),
'scale': scale,
'maxPixels': 1e9
})
return imgArea
# calculate total image area (unit: ha)
def image_area_ha(img, geometry, scale):
pixelArea = img.Add(ee.Image(1)).multiply(
ee.Image.pixelArea()).divide(10000)
imgArea = pixelArea.reduceRegion(**{
'geometry': geometry,
'reducer': ee.Reducer.sum(),
'scale': scale,
'maxPixels': 1e9
})
return imgArea
# get highest value
def max_value(img, scale=30):
max_value = img.reduceRegion(**{
'reducer': ee.Reducer.max(),
'geometry': img.geometry(),
'scale': scale,
'maxPixels': 1e9
})
return max_value
# get lowest value
def min_value(img, scale=30):
min_value = img.reduceRegion(**{
'reducer': ee.Reducer.min(),
'geometry': img.geometry(),
'scale': scale,
'maxPixels': 1e9
})
return min_value
# get mean value
def mean_value(img, scale=30):
mean_value = img.reduceRegion(**{
'reducer': ee.Reducer.mean(),
'geometry': img.geometry(),
'scale': scale,
'maxPixels': 1e9
})
return mean_value
# get standard deviation
def std_value(img, scale=30):
std_value = img.reduceRegion(**{
'reducer': ee.Reducer.stdDev(),
'geometry': img.geometry(),
'scale': scale,
'maxPixels': 1e9
})
return std_value
| 31.262687 | 107 | 0.636589 |
4a250f96a14ce5573c44cfb00553490a6b521924 | 9,818 | py | Python | plugins/login.py | Mehmetbaba06/Instagram-Bot | 09eb2ff455ae5378d8ec41f6706e12915a05f94e | [
"MIT"
] | null | null | null | plugins/login.py | Mehmetbaba06/Instagram-Bot | 09eb2ff455ae5378d8ec41f6706e12915a05f94e | [
"MIT"
] | null | null | null | plugins/login.py | Mehmetbaba06/Instagram-Bot | 09eb2ff455ae5378d8ec41f6706e12915a05f94e | [
"MIT"
] | null | null | null | #MIT License
#Copyright (c) 2021 subinps
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup
from pyrogram import Client, filters
from config import Config
from utils import *
import os
from instaloader import Profile, TwoFactorAuthRequiredException, BadCredentialsException
from asyncio.exceptions import TimeoutError
USER=Config.USER
STATUS=Config.STATUS
OWNER=Config.OWNER
HOME_TEXT=Config.HOME_TEXT
insta = Config.L
@Client.on_message(filters.command("login") & filters.private)
async def login(bot, message):
if str(message.from_user.id) != OWNER:
await message.reply_text(
HOME_TEXT.format(message.from_user.first_name, message.from_user.id, USER, USER, USER, OWNER),
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("👨🏼💻Developer", url='https://t.me/Mahoaga'),
InlineKeyboardButton("👨🏼💻Developer", url="https://t.me/Mahoaga")
],
[
InlineKeyboardButton("👨🏼💻Developer", url="https://t.me/Mahoaga"),
InlineKeyboardButton("👨🏼💻Developer", url="https://t.me/Mahoaga")
],
[
InlineKeyboardButton("👨🏼🦯Nasıl Kullanılır?", callback_data="help#subin")
]
]
)
)
return
username=USER
if 1 in STATUS:
m=await bot.send_message(message.from_user.id, "Instagram'dan ayrıntılar getir")
profile = Profile.own_profile(insta.context)
mediacount = profile.mediacount
name = profile.full_name
bio = profile.biography
profilepic = profile.profile_pic_url
igtvcount = profile.igtvcount
followers = profile.followers
following = profile.followees
await m.delete()
await bot.send_photo(
chat_id=message.from_user.id,
caption=f"Zaten Oturum Açmış durumdasınız {name}\n\n**Hesap Bilgileriniz**\n\n🏷 **İsim**: {name}\n🔖 **Kullanıcı adı**: {profile.username}\n📝 **Biyo**: {bio}\n📍 **Hesap Türü**: {acc_type(profile.is_private)}\n🏭 **İşletme Hesabı mı?**: {yes_or_no(profile.is_business_account)}\n👥 **Toplam Takipçi Sayısı**: {followers}\n👥 **Toplam Takip**: {following}\n📸 **Toplam Gönderi Sayısı**: {mediacount}\n📺 **IGTV Videolar**: {igtvcount}",
photo=profilepic
)
return
while True:
try:
password = await bot.ask(text = f"Merhaba {USER} Hesabınıza giriş yapmak için Instagram Şifrenizi girin 🙈", chat_id = message.from_user.id, filters=filters.text, timeout=30)
except TimeoutError:
await bot.send_message(message.from_user.id, "Hata!!\n\nİstek zamanlanmış.\nKullanarak yeniden başlatma /login")
return
passw=password.text
break
try:
insta.login(username, passw)
insta.save_session_to_file(filename=f"./{username}")
f=await bot.send_document(
chat_id=message.from_user.id,
document=f"./{username}",
file_name=str(message.from_user.id),
caption="⚠️ BU OTURUM DOSYASINI GÜVENDE TUTUN VE KIMSEYLE PAYLAŞMAYIN"
)
file_id=f.document.file_id
await bot.send_message(message.from_user.id, f"Şimdi git: [Heroku](https://dashboard.heroku.com/apps) ve Ortam değişkenini ayarlayın.\n\n\n**KEY**: <code>INSTA_SESSIONFILE_ID</code>\n\n**VALUE**: <code>{file_id}</code>\n\nBunu ayarlamazsanız Heroku yeniden başlatıldığında tekrar giriş yapmanız gerekebilir.", disable_web_page_preview=True)
STATUS.add(1)
m=await bot.send_message(message.from_user.id, "Instagram'dan ayrıntılar getir")
profile = Profile.from_username(insta.context, username)
mediacount = profile.mediacount
name = profile.full_name
bio = profile.biography
profilepic = profile.profile_pic_url
igtvcount = profile.igtvcount
followers = profile.followers
following = profile.followees
await m.delete()
await bot.send_photo(
chat_id=message.from_user.id,
caption=f"🔓Başarıyla Oturum Açtı {name}\n\n**Hesap Bilgileriniz**\n\n🏷 **İsim**: {name}\n🔖 **Kullanıcı adı**: {profile.username}\n📝 **Biyo**: {bio}\n📍 **Hesap Türü**: {acc_type(profile.is_private)}\n🏭 **İşletme Hesabı mı?**: {yes_or_no(profile.is_business_account)}\n👥 **Toplam Takipçi Sayısı**: {followers}\n👥 **Toplam Takip**: {following}\n📸 **Toplam Gönderi SayısıToplam Gönderi Sayısı**: {mediacount}\n📺 **IGTV Videolar**: {igtvcount}",
photo=profilepic
)
except TwoFactorAuthRequiredException:
while True:
try:
code = await bot.ask(text = "Oh!!\nInstagram hesabınızda İki Faktörlü Kimlik Doğrulama etkin🔐\n\nTelefonunuza bir OTP gönderildi\nEnter OTP", chat_id = message.from_user.id, filters=filters.text, timeout=30)
except TimeoutError:
await bot.send_message(message.from_user.id, "Hata!!\n\nİstek zamanlanmış.\nKullanarak yeniden başlatma /login")
return
codei=code.text
try:
codei=int(codei)
break
except:
await bot.send_message(message.from_user.id, "OTP Tamsayı Olmalıdır")
continue
try:
insta.two_factor_login(codei)
insta.save_session_to_file(filename=f"./{username}")
f=await bot.send_document(
chat_id=message.from_user.id,
document=f"./{username}",
file_name=str(message.from_user.id),
caption="⚠️ BU OTURUM DOSYASINI GÜVENDE TUTUN VE HERHANGI BIR YER ILE PAYLAŞMAYIN"
)
file_id=f.document.file_id
await bot.send_message(message.from_user.id, f"Now go to [Heroku](https://dashboard.heroku.com/apps) and set Environment variable.\n\n\n**KEY**: <code>INSTA_SESSIONFILE_ID</code>\n\n**VALUE**: <code>{file_id}</code>\n\nIf you do not set this you may need to Login again When Heroku restarts.", disable_web_page_preview=True)
STATUS.add(1)
m=await bot.send_message(message.from_user.id, "Instagram'dan ayrıntılar getir")
profile = Profile.from_username(insta.context, username)
mediacount = profile.mediacount
name = profile.full_name
bio = profile.biography
profilepic = profile.profile_pic_url
igtvcount = profile.igtvcount
followers = profile.followers
following = profile.followees
await m.delete()
await bot.send_photo(
chat_id=message.from_user.id,
caption=f"🔓Başarıyla Oturum Açtı {name}\n\n**Hesap Bilgileriniz**\n\n🏷 **İsim**: {name}\n🔖 **Kullanıcı adı**: {profile.username}\n📝**Biyo**: {bio}\n📍**Hesap Türü**: {acc_type(profile.is_private)}\n🏭**İşletme Hesabı mı?**: {yes_or_no(profile.is_business_account)}\n👥**Toplam Takipçi Sayısı**: {followers}\n👥**Toplam Takip**: {following}\n📸**Toplam Gönderi Sayısı**: {mediacount}\n📺**IGTV Videolar**: {igtvcount}",
photo=profilepic
)
except BadCredentialsException:
await bot.send_message(message.from_user.id, "Yanlış Kimlik Bilgileri\n\n/login yine")
pass
except Exception as e:
await bot.send_message(message.from_user.id, f"{e}\nTry /login again")
print("Logged in")
except Exception as e:
await bot.send_message(message.from_user.id, f"{e}\nYeniden deneyin veya Bu Sorunu Bildir [Developer](tg://user?id=1957316197)")
@Client.on_message(filters.command("logout") & filters.private)
async def logout(bot, message):
if str(message.from_user.id) != OWNER:
await message.reply_text(
HOME_TEXT.format(message.from_user.first_name, message.from_user.id, USER, USER, USER, OWNER),
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("👨🏼💻Developer", url='https://t.me/Mahoaga'),
InlineKeyboardButton("👨🏼💻Developer", url="https://t.me/Mahoaga")
],
[
InlineKeyboardButton("👨🏼💻Developer", url="https://t.me/Anshu888o"),
InlineKeyboardButton("👨🏼💻Developer", url="https://t.me/Mahoaga")
],
[
InlineKeyboardButton("👨🏼🦯Nasıl Kullanılır?", callback_data="help#subin")
]
]
)
)
return
if 1 in STATUS:
await message.reply_text("Başarıyla Oturumu Kapat")
STATUS.remove(1)
os.remove(f"./{USER}")
else:
await message.reply_text("Oturum Açmadınız\nKullanmak /login açınız")
| 49.585859 | 452 | 0.650336 |
4a250fe633e2420d23eb46a6ad48858b05e613c2 | 77 | py | Python | torch_rl/torch_rl/algos/__init__.py | RobertSamoilescu/RL_CarRacing | c1b4c3bed00fd1630e68f32a02798034df57c73a | [
"MIT"
] | 39 | 2019-10-30T08:59:35.000Z | 2022-03-18T01:28:01.000Z | torch_rl/torch_rl/algos/__init__.py | RobertSamoilescu/RL_CarRacing | c1b4c3bed00fd1630e68f32a02798034df57c73a | [
"MIT"
] | 10 | 2019-12-29T17:10:40.000Z | 2021-07-03T09:32:22.000Z | torch_rl/torch_rl/algos/__init__.py | RobertSamoilescu/RL_CarRacing | c1b4c3bed00fd1630e68f32a02798034df57c73a | [
"MIT"
] | 12 | 2020-01-19T09:31:29.000Z | 2022-02-08T01:30:08.000Z | from torch_rl.algos.a2c import A2CAlgo
from torch_rl.algos.ppo import PPOAlgo | 38.5 | 38 | 0.857143 |
4a2510e4b7eedd3aa5f3ae3d52c040474e7df9fb | 11,649 | py | Python | angr/storage/memory_mixins/__init__.py | BA7JCM/angr | 187a713c35759d998d93dfc5280630976d42d717 | [
"BSD-2-Clause"
] | null | null | null | angr/storage/memory_mixins/__init__.py | BA7JCM/angr | 187a713c35759d998d93dfc5280630976d42d717 | [
"BSD-2-Clause"
] | null | null | null | angr/storage/memory_mixins/__init__.py | BA7JCM/angr | 187a713c35759d998d93dfc5280630976d42d717 | [
"BSD-2-Clause"
] | null | null | null | # pylint:disable=abstract-method
from typing import Iterable, Tuple, Dict, Any, Optional
import claripy
from ...state_plugins.plugin import SimStatePlugin
from ...errors import SimMemoryError
class MemoryMixin(SimStatePlugin):
SUPPORTS_CONCRETE_LOAD = False
def __init__(self, memory_id=None, endness='Iend_BE'):
super().__init__()
self.id = memory_id
self.endness = endness
def copy(self, memo):
o = type(self).__new__(type(self))
o.id = self.id
o.endness = self.endness
return o
@property
def category(self):
"""
Return the category of this SimMemory instance. It can be one of the three following categories: reg, mem,
or file.
"""
if self.id in ('reg', 'mem'):
return self.id
elif self.id.startswith('file'):
return 'file'
elif '_' in self.id:
return self.id.split('_')[0]
else:
raise SimMemoryError('Unknown SimMemory category for memory_id "%s"' % self.id)
@property
def variable_key_prefix(self):
s = self.category
if s == 'file':
return (s, self.id)
return (s,)
def find(self, addr, data, max_search, **kwargs):
pass
def _add_constraints(self, c, add_constraints=True, condition=None, **kwargs):
if add_constraints:
if condition is not None:
to_add = (c & condition) | ~condition
else:
to_add = c
self.state.add_constraints(to_add)
def load(self, addr, **kwargs):
pass
def store(self, addr, data, **kwargs):
pass
def merge(self, others, merge_conditions, common_ancestor=None) -> bool:
pass
def widen(self, others):
pass
def permissions(self, addr, permissions=None, **kwargs):
pass
def map_region(self, addr, length, permissions, init_zero=False, **kwargs):
pass
def unmap_region(self, addr, length, **kwargs):
pass
# Optional interface:
def concrete_load(self, addr, size, writing=False, **kwargs) -> memoryview:
"""
Set SUPPORTS_CONCRETE_LOAD to True and implement concrete_load if reading concrete bytes is faster in this
memory model.
:param addr: The address to load from.
:param size: Size of the memory read.
:param writing:
:return: A memoryview into the loaded bytes.
"""
raise NotImplementedError()
def erase(self, addr, size=None, **kwargs) -> None:
"""
Set [addr:addr+size) to uninitialized. In many cases this will be faster than overwriting those locations with
new values. This is commonly used during static data flow analysis.
:param addr: The address to start erasing.
:param size: The number of bytes for erasing.
:return: None
"""
raise NotImplementedError()
def _default_value(self, addr, size, name=None, inspect=True, events=True, key=None, **kwargs):
"""
Override this method to provide default values for a variety of edge cases and base cases.
:param addr: If this value is being filled to provide a default memory value, this will be its address.
Otherwise, None.
:param size: The size in bytes of the value to return
:param name: A descriptive identifier for the value, for if a symbol is created.
The ``inspect``, ``events``, and ``key`` parameters are for ``state.solver.Unconstrained``, if it is used.
"""
pass
def _merge_values(self, values: Iterable[Tuple[Any,Any]], merged_size: int, **kwargs) -> Optional[Any]:
"""
Override this method to provide value merging support.
:param values: A collection of values with their merge conditions.
:param merged_size: The size (in bytes) of the merged value.
:return: The merged value, or None to skip merging of the current value.
"""
raise NotImplementedError()
def _merge_labels(self, labels: Iterable[Dict], **kwargs) -> Optional[Dict]:
"""
Override this method to provide label merging support.
:param labels: A collection of labels.
:return: The merged label, or None to skip merging of the current label.
"""
raise NotImplementedError()
def replace_all(self, old: claripy.ast.BV, new: claripy.ast.BV):
raise NotImplementedError()
def _replace_all(self, addrs: Iterable[int], old: claripy.ast.BV, new: claripy.ast.BV):
raise NotImplementedError()
def copy_contents(self, dst, src, size, condition=None, **kwargs):
"""
Override this method to provide faster copying of large chunks of data.
:param dst: The destination of copying.
:param src: The source of copying.
:param size: The size of copying.
:param condition: The storing condition.
:param kwargs: Other parameters.
:return: None
"""
raise NotImplementedError()
from .actions_mixin import ActionsMixinHigh, ActionsMixinLow
from .address_concretization_mixin import AddressConcretizationMixin
from .bvv_conversion_mixin import DataNormalizationMixin
from .clouseau_mixin import InspectMixinHigh
from .conditional_store_mixin import ConditionalMixin
from .convenient_mappings_mixin import ConvenientMappingsMixin
from .default_filler_mixin import DefaultFillerMixin, SpecialFillerMixin, ExplicitFillerMixin
from .dirty_addrs_mixin import DirtyAddrsMixin
from .hex_dumper_mixin import HexDumperMixin
from .label_merger_mixin import LabelMergerMixin
from .multi_value_merger_mixin import MultiValueMergerMixin
from .name_resolution_mixin import NameResolutionMixin
from .simplification_mixin import SimplificationMixin
from .simple_interface_mixin import SimpleInterfaceMixin
from .size_resolution_mixin import SizeNormalizationMixin, SizeConcretizationMixin
from .smart_find_mixin import SmartFindMixin
from .symbolic_merger_mixin import SymbolicMergerMixin
from .top_merger_mixin import TopMergerMixin
from .underconstrained_mixin import UnderconstrainedMixin
from .unwrapper_mixin import UnwrapperMixin
from .paged_memory.page_backer_mixins import ClemoryBackerMixin, ConcreteBackerMixin, DictBackerMixin
from .paged_memory.paged_memory_mixin import PagedMemoryMixin, ListPagesMixin, UltraPagesMixin, \
ListPagesWithLabelsMixin, MVListPagesMixin, MVListPagesWithLabelsMixin
from .paged_memory.privileged_mixin import PrivilegedPagingMixin
from .paged_memory.stack_allocation_mixin import StackAllocationMixin
from .paged_memory.pages import *
from .slotted_memory import SlottedMemoryMixin
from .regioned_memory import RegionedMemoryMixin, RegionCategoryMixin, StaticFindMixin, AbstractMergerMixin, \
MemoryRegionMetaMixin, RegionedAddressConcretizationMixin
from .keyvalue_memory import KeyValueMemoryMixin
from .javavm_memory import JavaVmMemoryMixin
class DefaultMemory(
HexDumperMixin,
SmartFindMixin,
UnwrapperMixin,
NameResolutionMixin,
DataNormalizationMixin,
SimplificationMixin,
InspectMixinHigh,
ActionsMixinHigh,
UnderconstrainedMixin,
SizeConcretizationMixin,
SizeNormalizationMixin,
AddressConcretizationMixin,
#InspectMixinLow,
ActionsMixinLow,
ConditionalMixin,
ConvenientMappingsMixin,
DirtyAddrsMixin,
# -----
StackAllocationMixin,
ConcreteBackerMixin,
ClemoryBackerMixin,
DictBackerMixin,
PrivilegedPagingMixin,
UltraPagesMixin,
DefaultFillerMixin,
SymbolicMergerMixin,
PagedMemoryMixin,
):
pass
class DefaultListPagesMemory(
HexDumperMixin,
SmartFindMixin,
UnwrapperMixin,
NameResolutionMixin,
DataNormalizationMixin,
SimplificationMixin,
ActionsMixinHigh,
UnderconstrainedMixin,
SizeConcretizationMixin,
SizeNormalizationMixin,
InspectMixinHigh,
AddressConcretizationMixin,
#InspectMixinLow,
ActionsMixinLow,
ConditionalMixin,
ConvenientMappingsMixin,
DirtyAddrsMixin,
# -----
StackAllocationMixin,
ClemoryBackerMixin,
DictBackerMixin,
PrivilegedPagingMixin,
ListPagesMixin,
DefaultFillerMixin,
SymbolicMergerMixin,
PagedMemoryMixin,
):
pass
class FastMemory(
NameResolutionMixin,
SimpleInterfaceMixin,
SimplificationMixin,
InspectMixinHigh,
ConditionalMixin,
ExplicitFillerMixin,
DefaultFillerMixin,
SlottedMemoryMixin,
):
pass
class AbstractMemory(
UnwrapperMixin,
NameResolutionMixin,
DataNormalizationMixin,
SimplificationMixin,
InspectMixinHigh,
ActionsMixinHigh,
UnderconstrainedMixin,
SizeConcretizationMixin,
SizeNormalizationMixin,
#InspectMixinLow,
ActionsMixinLow,
ConditionalMixin,
RegionedAddressConcretizationMixin,
# -----
RegionedMemoryMixin,
):
pass
class RegionedMemory(
RegionCategoryMixin,
MemoryRegionMetaMixin,
StaticFindMixin,
UnwrapperMixin,
NameResolutionMixin,
DataNormalizationMixin,
SimplificationMixin,
SizeConcretizationMixin,
SizeNormalizationMixin,
AddressConcretizationMixin,
ConvenientMappingsMixin,
DirtyAddrsMixin,
# -----
ClemoryBackerMixin,
DictBackerMixin,
UltraPagesMixin,
DefaultFillerMixin,
AbstractMergerMixin,
PagedMemoryMixin,
):
pass
class LabeledMemory(
SizeNormalizationMixin,
ListPagesWithLabelsMixin,
DefaultFillerMixin,
TopMergerMixin,
LabelMergerMixin,
PagedMemoryMixin,
):
"""
LabeledMemory is used in static analysis. It allows storing values with labels, such as `Definition`.
"""
def _default_value(self, addr, size, **kwargs):
# TODO: Make _default_value() a separate Mixin
if kwargs.get("name", "").startswith("merge_uc_"):
# this is a hack. when this condition is satisfied, _default_value() is called inside Listpage.merge() to
# create temporary values. we simply return a TOP value here.
return self.state.top(size * self.state.arch.byte_width)
# we never fill default values for non-existent loads
kwargs['fill_missing'] = False
return super()._default_value(addr, size, **kwargs)
class MultiValuedMemory(
SizeNormalizationMixin,
MVListPagesMixin,
DefaultFillerMixin,
MultiValueMergerMixin,
PagedMemoryMixin,
):
def _default_value(self, addr, size, **kwargs):
# TODO: Make _default_value() a separate Mixin
if kwargs.get("name", "").startswith("merge_uc_"):
# this is a hack. when this condition is satisfied, _default_value() is called inside Listpage.merge() to
# create temporary values. we simply return a TOP value here.
return self.state.top(size * self.state.arch.byte_width)
# we never fill default values for non-existent loads
kwargs['fill_missing'] = False
return super()._default_value(addr, size, **kwargs)
class KeyValueMemory(
KeyValueMemoryMixin,
):
pass
class JavaVmMemory(
JavaVmMemoryMixin,
):
pass
from angr.sim_state import SimState
SimState.register_default('sym_memory', DefaultMemory)
SimState.register_default('fast_memory', FastMemory)
SimState.register_default('abs_memory', AbstractMemory)
SimState.register_default('keyvalue_memory', KeyValueMemory)
SimState.register_default('javavm_memory', JavaVmMemory)
| 30.981383 | 118 | 0.697227 |
4a2511be2292a757afbc1029baeffad1ffe2f3f9 | 174 | py | Python | prob16.py | davidkartchner/project_euler | 1f24bf115c460e00319da5f80777cd054d003165 | [
"MIT"
] | null | null | null | prob16.py | davidkartchner/project_euler | 1f24bf115c460e00319da5f80777cd054d003165 | [
"MIT"
] | null | null | null | prob16.py | davidkartchner/project_euler | 1f24bf115c460e00319da5f80777cd054d003165 | [
"MIT"
] | null | null | null | """
David Kartchner
Project Euler Problem 16
May 11, 2016
"""
big_string = str(2**1000)
digit_list = [int(i) for i in big_string]
digit_sum = sum(digit_list)
print digit_sum | 17.4 | 41 | 0.735632 |
4a2514dd02fdcbf50f7d010b2ddebdb7f26ae8ca | 30,649 | py | Python | sdk/python/pulumi_azure_native/web/get_web_app_slot.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/web/get_web_app_slot.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/web/get_web_app_slot.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetWebAppSlotResult',
'AwaitableGetWebAppSlotResult',
'get_web_app_slot',
]
@pulumi.output_type
class GetWebAppSlotResult:
"""
A web app, a mobile app backend, or an API app.
"""
def __init__(__self__, availability_state=None, client_affinity_enabled=None, client_cert_enabled=None, client_cert_exclusion_paths=None, client_cert_mode=None, container_size=None, custom_domain_verification_id=None, daily_memory_time_quota=None, default_host_name=None, enabled=None, enabled_host_names=None, host_name_ssl_states=None, host_names=None, host_names_disabled=None, hosting_environment_profile=None, https_only=None, hyper_v=None, id=None, identity=None, in_progress_operation_id=None, is_default_container=None, is_xenon=None, key_vault_reference_identity=None, kind=None, last_modified_time_utc=None, location=None, max_number_of_workers=None, name=None, outbound_ip_addresses=None, possible_outbound_ip_addresses=None, redundancy_mode=None, repository_site_name=None, reserved=None, resource_group=None, scm_site_also_stopped=None, server_farm_id=None, site_config=None, slot_swap_status=None, state=None, storage_account_required=None, suspended_till=None, tags=None, target_swap_slot=None, traffic_manager_host_names=None, type=None, usage_state=None, virtual_network_subnet_id=None):
if availability_state and not isinstance(availability_state, str):
raise TypeError("Expected argument 'availability_state' to be a str")
pulumi.set(__self__, "availability_state", availability_state)
if client_affinity_enabled and not isinstance(client_affinity_enabled, bool):
raise TypeError("Expected argument 'client_affinity_enabled' to be a bool")
pulumi.set(__self__, "client_affinity_enabled", client_affinity_enabled)
if client_cert_enabled and not isinstance(client_cert_enabled, bool):
raise TypeError("Expected argument 'client_cert_enabled' to be a bool")
pulumi.set(__self__, "client_cert_enabled", client_cert_enabled)
if client_cert_exclusion_paths and not isinstance(client_cert_exclusion_paths, str):
raise TypeError("Expected argument 'client_cert_exclusion_paths' to be a str")
pulumi.set(__self__, "client_cert_exclusion_paths", client_cert_exclusion_paths)
if client_cert_mode and not isinstance(client_cert_mode, str):
raise TypeError("Expected argument 'client_cert_mode' to be a str")
pulumi.set(__self__, "client_cert_mode", client_cert_mode)
if container_size and not isinstance(container_size, int):
raise TypeError("Expected argument 'container_size' to be a int")
pulumi.set(__self__, "container_size", container_size)
if custom_domain_verification_id and not isinstance(custom_domain_verification_id, str):
raise TypeError("Expected argument 'custom_domain_verification_id' to be a str")
pulumi.set(__self__, "custom_domain_verification_id", custom_domain_verification_id)
if daily_memory_time_quota and not isinstance(daily_memory_time_quota, int):
raise TypeError("Expected argument 'daily_memory_time_quota' to be a int")
pulumi.set(__self__, "daily_memory_time_quota", daily_memory_time_quota)
if default_host_name and not isinstance(default_host_name, str):
raise TypeError("Expected argument 'default_host_name' to be a str")
pulumi.set(__self__, "default_host_name", default_host_name)
if enabled and not isinstance(enabled, bool):
raise TypeError("Expected argument 'enabled' to be a bool")
pulumi.set(__self__, "enabled", enabled)
if enabled_host_names and not isinstance(enabled_host_names, list):
raise TypeError("Expected argument 'enabled_host_names' to be a list")
pulumi.set(__self__, "enabled_host_names", enabled_host_names)
if host_name_ssl_states and not isinstance(host_name_ssl_states, list):
raise TypeError("Expected argument 'host_name_ssl_states' to be a list")
pulumi.set(__self__, "host_name_ssl_states", host_name_ssl_states)
if host_names and not isinstance(host_names, list):
raise TypeError("Expected argument 'host_names' to be a list")
pulumi.set(__self__, "host_names", host_names)
if host_names_disabled and not isinstance(host_names_disabled, bool):
raise TypeError("Expected argument 'host_names_disabled' to be a bool")
pulumi.set(__self__, "host_names_disabled", host_names_disabled)
if hosting_environment_profile and not isinstance(hosting_environment_profile, dict):
raise TypeError("Expected argument 'hosting_environment_profile' to be a dict")
pulumi.set(__self__, "hosting_environment_profile", hosting_environment_profile)
if https_only and not isinstance(https_only, bool):
raise TypeError("Expected argument 'https_only' to be a bool")
pulumi.set(__self__, "https_only", https_only)
if hyper_v and not isinstance(hyper_v, bool):
raise TypeError("Expected argument 'hyper_v' to be a bool")
pulumi.set(__self__, "hyper_v", hyper_v)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if in_progress_operation_id and not isinstance(in_progress_operation_id, str):
raise TypeError("Expected argument 'in_progress_operation_id' to be a str")
pulumi.set(__self__, "in_progress_operation_id", in_progress_operation_id)
if is_default_container and not isinstance(is_default_container, bool):
raise TypeError("Expected argument 'is_default_container' to be a bool")
pulumi.set(__self__, "is_default_container", is_default_container)
if is_xenon and not isinstance(is_xenon, bool):
raise TypeError("Expected argument 'is_xenon' to be a bool")
pulumi.set(__self__, "is_xenon", is_xenon)
if key_vault_reference_identity and not isinstance(key_vault_reference_identity, str):
raise TypeError("Expected argument 'key_vault_reference_identity' to be a str")
pulumi.set(__self__, "key_vault_reference_identity", key_vault_reference_identity)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if last_modified_time_utc and not isinstance(last_modified_time_utc, str):
raise TypeError("Expected argument 'last_modified_time_utc' to be a str")
pulumi.set(__self__, "last_modified_time_utc", last_modified_time_utc)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if max_number_of_workers and not isinstance(max_number_of_workers, int):
raise TypeError("Expected argument 'max_number_of_workers' to be a int")
pulumi.set(__self__, "max_number_of_workers", max_number_of_workers)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if outbound_ip_addresses and not isinstance(outbound_ip_addresses, str):
raise TypeError("Expected argument 'outbound_ip_addresses' to be a str")
pulumi.set(__self__, "outbound_ip_addresses", outbound_ip_addresses)
if possible_outbound_ip_addresses and not isinstance(possible_outbound_ip_addresses, str):
raise TypeError("Expected argument 'possible_outbound_ip_addresses' to be a str")
pulumi.set(__self__, "possible_outbound_ip_addresses", possible_outbound_ip_addresses)
if redundancy_mode and not isinstance(redundancy_mode, str):
raise TypeError("Expected argument 'redundancy_mode' to be a str")
pulumi.set(__self__, "redundancy_mode", redundancy_mode)
if repository_site_name and not isinstance(repository_site_name, str):
raise TypeError("Expected argument 'repository_site_name' to be a str")
pulumi.set(__self__, "repository_site_name", repository_site_name)
if reserved and not isinstance(reserved, bool):
raise TypeError("Expected argument 'reserved' to be a bool")
pulumi.set(__self__, "reserved", reserved)
if resource_group and not isinstance(resource_group, str):
raise TypeError("Expected argument 'resource_group' to be a str")
pulumi.set(__self__, "resource_group", resource_group)
if scm_site_also_stopped and not isinstance(scm_site_also_stopped, bool):
raise TypeError("Expected argument 'scm_site_also_stopped' to be a bool")
pulumi.set(__self__, "scm_site_also_stopped", scm_site_also_stopped)
if server_farm_id and not isinstance(server_farm_id, str):
raise TypeError("Expected argument 'server_farm_id' to be a str")
pulumi.set(__self__, "server_farm_id", server_farm_id)
if site_config and not isinstance(site_config, dict):
raise TypeError("Expected argument 'site_config' to be a dict")
pulumi.set(__self__, "site_config", site_config)
if slot_swap_status and not isinstance(slot_swap_status, dict):
raise TypeError("Expected argument 'slot_swap_status' to be a dict")
pulumi.set(__self__, "slot_swap_status", slot_swap_status)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if storage_account_required and not isinstance(storage_account_required, bool):
raise TypeError("Expected argument 'storage_account_required' to be a bool")
pulumi.set(__self__, "storage_account_required", storage_account_required)
if suspended_till and not isinstance(suspended_till, str):
raise TypeError("Expected argument 'suspended_till' to be a str")
pulumi.set(__self__, "suspended_till", suspended_till)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if target_swap_slot and not isinstance(target_swap_slot, str):
raise TypeError("Expected argument 'target_swap_slot' to be a str")
pulumi.set(__self__, "target_swap_slot", target_swap_slot)
if traffic_manager_host_names and not isinstance(traffic_manager_host_names, list):
raise TypeError("Expected argument 'traffic_manager_host_names' to be a list")
pulumi.set(__self__, "traffic_manager_host_names", traffic_manager_host_names)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if usage_state and not isinstance(usage_state, str):
raise TypeError("Expected argument 'usage_state' to be a str")
pulumi.set(__self__, "usage_state", usage_state)
if virtual_network_subnet_id and not isinstance(virtual_network_subnet_id, str):
raise TypeError("Expected argument 'virtual_network_subnet_id' to be a str")
pulumi.set(__self__, "virtual_network_subnet_id", virtual_network_subnet_id)
@property
@pulumi.getter(name="availabilityState")
def availability_state(self) -> str:
"""
Management information availability state for the app.
"""
return pulumi.get(self, "availability_state")
@property
@pulumi.getter(name="clientAffinityEnabled")
def client_affinity_enabled(self) -> Optional[bool]:
"""
<code>true</code> to enable client affinity; <code>false</code> to stop sending session affinity cookies, which route client requests in the same session to the same instance. Default is <code>true</code>.
"""
return pulumi.get(self, "client_affinity_enabled")
@property
@pulumi.getter(name="clientCertEnabled")
def client_cert_enabled(self) -> Optional[bool]:
"""
<code>true</code> to enable client certificate authentication (TLS mutual authentication); otherwise, <code>false</code>. Default is <code>false</code>.
"""
return pulumi.get(self, "client_cert_enabled")
@property
@pulumi.getter(name="clientCertExclusionPaths")
def client_cert_exclusion_paths(self) -> Optional[str]:
"""
client certificate authentication comma-separated exclusion paths
"""
return pulumi.get(self, "client_cert_exclusion_paths")
@property
@pulumi.getter(name="clientCertMode")
def client_cert_mode(self) -> Optional[str]:
"""
This composes with ClientCertEnabled setting.
- ClientCertEnabled: false means ClientCert is ignored.
- ClientCertEnabled: true and ClientCertMode: Required means ClientCert is required.
- ClientCertEnabled: true and ClientCertMode: Optional means ClientCert is optional or accepted.
"""
return pulumi.get(self, "client_cert_mode")
@property
@pulumi.getter(name="containerSize")
def container_size(self) -> Optional[int]:
"""
Size of the function container.
"""
return pulumi.get(self, "container_size")
@property
@pulumi.getter(name="customDomainVerificationId")
def custom_domain_verification_id(self) -> Optional[str]:
"""
Unique identifier that verifies the custom domains assigned to the app. Customer will add this id to a txt record for verification.
"""
return pulumi.get(self, "custom_domain_verification_id")
@property
@pulumi.getter(name="dailyMemoryTimeQuota")
def daily_memory_time_quota(self) -> Optional[int]:
"""
Maximum allowed daily memory-time quota (applicable on dynamic apps only).
"""
return pulumi.get(self, "daily_memory_time_quota")
@property
@pulumi.getter(name="defaultHostName")
def default_host_name(self) -> str:
"""
Default hostname of the app. Read-only.
"""
return pulumi.get(self, "default_host_name")
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
"""
<code>true</code> if the app is enabled; otherwise, <code>false</code>. Setting this value to false disables the app (takes the app offline).
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="enabledHostNames")
def enabled_host_names(self) -> Sequence[str]:
"""
Enabled hostnames for the app.Hostnames need to be assigned (see HostNames) AND enabled. Otherwise,
the app is not served on those hostnames.
"""
return pulumi.get(self, "enabled_host_names")
@property
@pulumi.getter(name="hostNameSslStates")
def host_name_ssl_states(self) -> Optional[Sequence['outputs.HostNameSslStateResponse']]:
"""
Hostname SSL states are used to manage the SSL bindings for app's hostnames.
"""
return pulumi.get(self, "host_name_ssl_states")
@property
@pulumi.getter(name="hostNames")
def host_names(self) -> Sequence[str]:
"""
Hostnames associated with the app.
"""
return pulumi.get(self, "host_names")
@property
@pulumi.getter(name="hostNamesDisabled")
def host_names_disabled(self) -> Optional[bool]:
"""
<code>true</code> to disable the public hostnames of the app; otherwise, <code>false</code>.
If <code>true</code>, the app is only accessible via API management process.
"""
return pulumi.get(self, "host_names_disabled")
@property
@pulumi.getter(name="hostingEnvironmentProfile")
def hosting_environment_profile(self) -> Optional['outputs.HostingEnvironmentProfileResponse']:
"""
App Service Environment to use for the app.
"""
return pulumi.get(self, "hosting_environment_profile")
@property
@pulumi.getter(name="httpsOnly")
def https_only(self) -> Optional[bool]:
"""
HttpsOnly: configures a web site to accept only https requests. Issues redirect for
http requests
"""
return pulumi.get(self, "https_only")
@property
@pulumi.getter(name="hyperV")
def hyper_v(self) -> Optional[bool]:
"""
Hyper-V sandbox.
"""
return pulumi.get(self, "hyper_v")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.ManagedServiceIdentityResponse']:
"""
Managed service identity.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter(name="inProgressOperationId")
def in_progress_operation_id(self) -> str:
"""
Specifies an operation id if this site has a pending operation.
"""
return pulumi.get(self, "in_progress_operation_id")
@property
@pulumi.getter(name="isDefaultContainer")
def is_default_container(self) -> bool:
"""
<code>true</code> if the app is a default container; otherwise, <code>false</code>.
"""
return pulumi.get(self, "is_default_container")
@property
@pulumi.getter(name="isXenon")
def is_xenon(self) -> Optional[bool]:
"""
Obsolete: Hyper-V sandbox.
"""
return pulumi.get(self, "is_xenon")
@property
@pulumi.getter(name="keyVaultReferenceIdentity")
def key_vault_reference_identity(self) -> Optional[str]:
"""
Identity to use for Key Vault Reference authentication.
"""
return pulumi.get(self, "key_vault_reference_identity")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter(name="lastModifiedTimeUtc")
def last_modified_time_utc(self) -> str:
"""
Last time the app was modified, in UTC. Read-only.
"""
return pulumi.get(self, "last_modified_time_utc")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource Location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="maxNumberOfWorkers")
def max_number_of_workers(self) -> int:
"""
Maximum number of workers.
This only applies to Functions container.
"""
return pulumi.get(self, "max_number_of_workers")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="outboundIpAddresses")
def outbound_ip_addresses(self) -> str:
"""
List of IP addresses that the app uses for outbound connections (e.g. database access). Includes VIPs from tenants that site can be hosted with current settings. Read-only.
"""
return pulumi.get(self, "outbound_ip_addresses")
@property
@pulumi.getter(name="possibleOutboundIpAddresses")
def possible_outbound_ip_addresses(self) -> str:
"""
List of IP addresses that the app uses for outbound connections (e.g. database access). Includes VIPs from all tenants except dataComponent. Read-only.
"""
return pulumi.get(self, "possible_outbound_ip_addresses")
@property
@pulumi.getter(name="redundancyMode")
def redundancy_mode(self) -> Optional[str]:
"""
Site redundancy mode
"""
return pulumi.get(self, "redundancy_mode")
@property
@pulumi.getter(name="repositorySiteName")
def repository_site_name(self) -> str:
"""
Name of the repository site.
"""
return pulumi.get(self, "repository_site_name")
@property
@pulumi.getter
def reserved(self) -> Optional[bool]:
"""
<code>true</code> if reserved; otherwise, <code>false</code>.
"""
return pulumi.get(self, "reserved")
@property
@pulumi.getter(name="resourceGroup")
def resource_group(self) -> str:
"""
Name of the resource group the app belongs to. Read-only.
"""
return pulumi.get(self, "resource_group")
@property
@pulumi.getter(name="scmSiteAlsoStopped")
def scm_site_also_stopped(self) -> Optional[bool]:
"""
<code>true</code> to stop SCM (KUDU) site when the app is stopped; otherwise, <code>false</code>. The default is <code>false</code>.
"""
return pulumi.get(self, "scm_site_also_stopped")
@property
@pulumi.getter(name="serverFarmId")
def server_farm_id(self) -> Optional[str]:
"""
Resource ID of the associated App Service plan, formatted as: "/subscriptions/{subscriptionID}/resourceGroups/{groupName}/providers/Microsoft.Web/serverfarms/{appServicePlanName}".
"""
return pulumi.get(self, "server_farm_id")
@property
@pulumi.getter(name="siteConfig")
def site_config(self) -> Optional['outputs.SiteConfigResponse']:
"""
Configuration of the app.
"""
return pulumi.get(self, "site_config")
@property
@pulumi.getter(name="slotSwapStatus")
def slot_swap_status(self) -> 'outputs.SlotSwapStatusResponse':
"""
Status of the last deployment slot swap operation.
"""
return pulumi.get(self, "slot_swap_status")
@property
@pulumi.getter
def state(self) -> str:
"""
Current state of the app.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="storageAccountRequired")
def storage_account_required(self) -> Optional[bool]:
"""
Checks if Customer provided storage account is required
"""
return pulumi.get(self, "storage_account_required")
@property
@pulumi.getter(name="suspendedTill")
def suspended_till(self) -> str:
"""
App suspended till in case memory-time quota is exceeded.
"""
return pulumi.get(self, "suspended_till")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="targetSwapSlot")
def target_swap_slot(self) -> str:
"""
Specifies which deployment slot this app will swap into. Read-only.
"""
return pulumi.get(self, "target_swap_slot")
@property
@pulumi.getter(name="trafficManagerHostNames")
def traffic_manager_host_names(self) -> Sequence[str]:
"""
Azure Traffic Manager hostnames associated with the app. Read-only.
"""
return pulumi.get(self, "traffic_manager_host_names")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="usageState")
def usage_state(self) -> str:
"""
State indicating whether the app has exceeded its quota usage. Read-only.
"""
return pulumi.get(self, "usage_state")
@property
@pulumi.getter(name="virtualNetworkSubnetId")
def virtual_network_subnet_id(self) -> Optional[str]:
"""
Azure Resource Manager ID of the Virtual network and subnet to be joined by Regional VNET Integration.
This must be of the form /subscriptions/{subscriptionName}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{vnetName}/subnets/{subnetName}
"""
return pulumi.get(self, "virtual_network_subnet_id")
class AwaitableGetWebAppSlotResult(GetWebAppSlotResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWebAppSlotResult(
availability_state=self.availability_state,
client_affinity_enabled=self.client_affinity_enabled,
client_cert_enabled=self.client_cert_enabled,
client_cert_exclusion_paths=self.client_cert_exclusion_paths,
client_cert_mode=self.client_cert_mode,
container_size=self.container_size,
custom_domain_verification_id=self.custom_domain_verification_id,
daily_memory_time_quota=self.daily_memory_time_quota,
default_host_name=self.default_host_name,
enabled=self.enabled,
enabled_host_names=self.enabled_host_names,
host_name_ssl_states=self.host_name_ssl_states,
host_names=self.host_names,
host_names_disabled=self.host_names_disabled,
hosting_environment_profile=self.hosting_environment_profile,
https_only=self.https_only,
hyper_v=self.hyper_v,
id=self.id,
identity=self.identity,
in_progress_operation_id=self.in_progress_operation_id,
is_default_container=self.is_default_container,
is_xenon=self.is_xenon,
key_vault_reference_identity=self.key_vault_reference_identity,
kind=self.kind,
last_modified_time_utc=self.last_modified_time_utc,
location=self.location,
max_number_of_workers=self.max_number_of_workers,
name=self.name,
outbound_ip_addresses=self.outbound_ip_addresses,
possible_outbound_ip_addresses=self.possible_outbound_ip_addresses,
redundancy_mode=self.redundancy_mode,
repository_site_name=self.repository_site_name,
reserved=self.reserved,
resource_group=self.resource_group,
scm_site_also_stopped=self.scm_site_also_stopped,
server_farm_id=self.server_farm_id,
site_config=self.site_config,
slot_swap_status=self.slot_swap_status,
state=self.state,
storage_account_required=self.storage_account_required,
suspended_till=self.suspended_till,
tags=self.tags,
target_swap_slot=self.target_swap_slot,
traffic_manager_host_names=self.traffic_manager_host_names,
type=self.type,
usage_state=self.usage_state,
virtual_network_subnet_id=self.virtual_network_subnet_id)
def get_web_app_slot(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
slot: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWebAppSlotResult:
"""
A web app, a mobile app backend, or an API app.
API Version: 2020-12-01.
:param str name: Name of the app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
:param str slot: Name of the deployment slot. By default, this API returns the production slot.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['slot'] = slot
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:web:getWebAppSlot', __args__, opts=opts, typ=GetWebAppSlotResult).value
return AwaitableGetWebAppSlotResult(
availability_state=__ret__.availability_state,
client_affinity_enabled=__ret__.client_affinity_enabled,
client_cert_enabled=__ret__.client_cert_enabled,
client_cert_exclusion_paths=__ret__.client_cert_exclusion_paths,
client_cert_mode=__ret__.client_cert_mode,
container_size=__ret__.container_size,
custom_domain_verification_id=__ret__.custom_domain_verification_id,
daily_memory_time_quota=__ret__.daily_memory_time_quota,
default_host_name=__ret__.default_host_name,
enabled=__ret__.enabled,
enabled_host_names=__ret__.enabled_host_names,
host_name_ssl_states=__ret__.host_name_ssl_states,
host_names=__ret__.host_names,
host_names_disabled=__ret__.host_names_disabled,
hosting_environment_profile=__ret__.hosting_environment_profile,
https_only=__ret__.https_only,
hyper_v=__ret__.hyper_v,
id=__ret__.id,
identity=__ret__.identity,
in_progress_operation_id=__ret__.in_progress_operation_id,
is_default_container=__ret__.is_default_container,
is_xenon=__ret__.is_xenon,
key_vault_reference_identity=__ret__.key_vault_reference_identity,
kind=__ret__.kind,
last_modified_time_utc=__ret__.last_modified_time_utc,
location=__ret__.location,
max_number_of_workers=__ret__.max_number_of_workers,
name=__ret__.name,
outbound_ip_addresses=__ret__.outbound_ip_addresses,
possible_outbound_ip_addresses=__ret__.possible_outbound_ip_addresses,
redundancy_mode=__ret__.redundancy_mode,
repository_site_name=__ret__.repository_site_name,
reserved=__ret__.reserved,
resource_group=__ret__.resource_group,
scm_site_also_stopped=__ret__.scm_site_also_stopped,
server_farm_id=__ret__.server_farm_id,
site_config=__ret__.site_config,
slot_swap_status=__ret__.slot_swap_status,
state=__ret__.state,
storage_account_required=__ret__.storage_account_required,
suspended_till=__ret__.suspended_till,
tags=__ret__.tags,
target_swap_slot=__ret__.target_swap_slot,
traffic_manager_host_names=__ret__.traffic_manager_host_names,
type=__ret__.type,
usage_state=__ret__.usage_state,
virtual_network_subnet_id=__ret__.virtual_network_subnet_id)
| 45.271787 | 1,108 | 0.687004 |
4a2514edd295622b075522bd9581988c6f41697b | 13,682 | py | Python | yolov5/models/yolo.py | Jeensh/adaptive-cruise-control | 911b55a0c83d47c6eba557a662d5513b89206d91 | [
"MIT"
] | 12 | 2021-06-15T01:45:07.000Z | 2022-03-29T12:01:37.000Z | models/yolo.py | shashank524/patent_analysis | bd3fde329797ea8b823749ea556b222fb7594fd3 | [
"MIT"
] | null | null | null | models/yolo.py | shashank524/patent_analysis | bd3fde329797ea8b823749ea556b222fb7594fd3 | [
"MIT"
] | 6 | 2021-07-12T08:27:47.000Z | 2022-03-19T14:45:29.000Z | """YOLOv5-specific modules
Usage:
$ python path/to/models/yolo.py --cfg yolov5s.yaml
"""
import argparse
import logging
import sys
from copy import deepcopy
from pathlib import Path
FILE = Path(__file__).absolute()
sys.path.append(FILE.parents[1].as_posix()) # add yolov5/ to path
from models.common import *
from models.experimental import *
from utils.autoanchor import check_anchor_order
from utils.general import make_divisible, check_file, set_logging
from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \
select_device, copy_attr
try:
import thop # for FLOPs computation
except ImportError:
thop = None
logger = logging.getLogger(__name__)
class Detect(nn.Module):
stride = None # strides computed during build
onnx_dynamic = False # ONNX export parameter
def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer
super(Detect, self).__init__()
self.nc = nc # number of classes
self.no = nc + 5 # number of outputs per anchor
self.nl = len(anchors) # number of detection layers
self.na = len(anchors[0]) // 2 # number of anchors
self.grid = [torch.zeros(1)] * self.nl # init grid
a = torch.tensor(anchors).float().view(self.nl, -1, 2)
self.register_buffer('anchors', a) # shape(nl,na,2)
self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2)
self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
self.inplace = inplace # use in-place ops (e.g. slice assignment)
def forward(self, x):
# x = x.copy() # for profiling
z = [] # inference output
for i in range(self.nl):
x[i] = self.m[i](x[i]) # conv
bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
if not self.training: # inference
if self.grid[i].shape[2:4] != x[i].shape[2:4] or self.onnx_dynamic:
self.grid[i] = self._make_grid(nx, ny).to(x[i].device)
y = x[i].sigmoid()
if self.inplace:
y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy
y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953
xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy
wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i].view(1, self.na, 1, 1, 2) # wh
y = torch.cat((xy, wh, y[..., 4:]), -1)
z.append(y.view(bs, -1, self.no))
return x if self.training else (torch.cat(z, 1), x)
@staticmethod
def _make_grid(nx=20, ny=20):
yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
class Model(nn.Module):
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes
super(Model, self).__init__()
if isinstance(cfg, dict):
self.yaml = cfg # model dict
else: # is *.yaml
import yaml # for torch hub
self.yaml_file = Path(cfg).name
with open(cfg) as f:
self.yaml = yaml.safe_load(f) # model dict
# Define model
ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels
if nc and nc != self.yaml['nc']:
logger.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}")
self.yaml['nc'] = nc # override yaml value
if anchors:
logger.info(f'Overriding model.yaml anchors with anchors={anchors}')
self.yaml['anchors'] = round(anchors) # override yaml value
self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist
self.names = [str(i) for i in range(self.yaml['nc'])] # default names
self.inplace = self.yaml.get('inplace', True)
# logger.info([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])
# Build strides, anchors
m = self.model[-1] # Detect()
if isinstance(m, Detect):
s = 256 # 2x min stride
m.inplace = self.inplace
m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward
m.anchors /= m.stride.view(-1, 1, 1)
check_anchor_order(m)
self.stride = m.stride
self._initialize_biases() # only run once
# logger.info('Strides: %s' % m.stride.tolist())
# Init weights, biases
initialize_weights(self)
self.info()
logger.info('')
def forward(self, x, augment=False, profile=False):
if augment:
return self.forward_augment(x) # augmented inference, None
else:
return self.forward_once(x, profile) # single-scale inference, train
def forward_augment(self, x):
img_size = x.shape[-2:] # height, width
s = [1, 0.83, 0.67] # scales
f = [None, 3, None] # flips (2-ud, 3-lr)
y = [] # outputs
for si, fi in zip(s, f):
xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))
yi = self.forward_once(xi)[0] # forward
# cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
yi = self._descale_pred(yi, fi, si, img_size)
y.append(yi)
return torch.cat(y, 1), None # augmented inference, train
def forward_once(self, x, profile=False):
y, dt = [], [] # outputs
for m in self.model:
if m.f != -1: # if not from previous layer
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
if profile:
o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs
t = time_synchronized()
for _ in range(10):
_ = m(x)
dt.append((time_synchronized() - t) * 100)
if m == self.model[0]:
logger.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} {'module'}")
logger.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}')
x = m(x) # run
y.append(x if m.i in self.save else None) # save output
if profile:
logger.info('%.1fms total' % sum(dt))
return x
def _descale_pred(self, p, flips, scale, img_size):
# de-scale predictions following augmented inference (inverse operation)
if self.inplace:
p[..., :4] /= scale # de-scale
if flips == 2:
p[..., 1] = img_size[0] - p[..., 1] # de-flip ud
elif flips == 3:
p[..., 0] = img_size[1] - p[..., 0] # de-flip lr
else:
x, y, wh = p[..., 0:1] / scale, p[..., 1:2] / scale, p[..., 2:4] / scale # de-scale
if flips == 2:
y = img_size[0] - y # de-flip ud
elif flips == 3:
x = img_size[1] - x # de-flip lr
p = torch.cat((x, y, wh, p[..., 4:]), -1)
return p
def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
# https://arxiv.org/abs/1708.02002 section 3.3
# cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
m = self.model[-1] # Detect() module
for mi, s in zip(m.m, m.stride): # from
b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls
mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
def _print_biases(self):
m = self.model[-1] # Detect() module
for mi in m.m: # from
b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85)
logger.info(
('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean()))
# def _print_weights(self):
# for m in self.model.modules():
# if type(m) is Bottleneck:
# logger.info('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights
def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
logger.info('Fusing layers... ')
for m in self.model.modules():
if type(m) is Conv and hasattr(m, 'bn'):
m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
delattr(m, 'bn') # remove batchnorm
m.forward = m.fuseforward # update forward
self.info()
return self
def nms(self, mode=True): # add or remove NMS module
present = type(self.model[-1]) is NMS # last layer is NMS
if mode and not present:
logger.info('Adding NMS... ')
m = NMS() # module
m.f = -1 # from
m.i = self.model[-1].i + 1 # index
self.model.add_module(name='%s' % m.i, module=m) # add
self.eval()
elif not mode and present:
logger.info('Removing NMS... ')
self.model = self.model[:-1] # remove
return self
def autoshape(self): # add AutoShape module
logger.info('Adding AutoShape... ')
m = AutoShape(self) # wrap model
copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes
return m
def info(self, verbose=False, img_size=640): # print model information
model_info(self, verbose, img_size)
def parse_model(d, ch): # model_dict, input_channels(3)
logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))
anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
m = eval(m) if isinstance(m, str) else m # eval strings
for j, a in enumerate(args):
try:
args[j] = eval(a) if isinstance(a, str) else a # eval strings
except:
pass
n = max(round(n * gd), 1) if n > 1 else n # depth gain
if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP,
C3, C3TR]:
c1, c2 = ch[f], args[0]
if c2 != no: # if not output
c2 = make_divisible(c2 * gw, 8)
args = [c1, c2, *args[1:]]
if m in [BottleneckCSP, C3, C3TR]:
args.insert(2, n) # number of repeats
n = 1
elif m is nn.BatchNorm2d:
args = [ch[f]]
elif m is Concat:
c2 = sum([ch[x] for x in f])
elif m is Detect:
args.append([ch[x] for x in f])
if isinstance(args[1], int): # number of anchors
args[1] = [list(range(args[1] * 2))] * len(f)
elif m is Contract:
c2 = ch[f] * args[0] ** 2
elif m is Expand:
c2 = ch[f] // args[0] ** 2
else:
c2 = ch[f]
m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module
t = str(m)[8:-2].replace('__main__.', '') # module type
np = sum([x.numel() for x in m_.parameters()]) # number params
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
layers.append(m_)
if i == 0:
ch = []
ch.append(c2)
return nn.Sequential(*layers), sorted(save)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
opt = parser.parse_args()
opt.cfg = check_file(opt.cfg) # check file
set_logging()
device = select_device(opt.device)
# Create model
model = Model(opt.cfg).to(device)
model.train()
# Profile
# img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 320, 320).to(device)
# y = model(img, profile=True)
# Tensorboard (not working https://github.com/ultralytics/yolov5/issues/2898)
# from torch.utils.tensorboard import SummaryWriter
# tb_writer = SummaryWriter('.')
# logger.info("Run 'tensorboard --logdir=models' to view tensorboard at http://localhost:6006/")
# tb_writer.add_graph(torch.jit.trace(model, img, strict=False), []) # add model graph
# tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard
| 43.993569 | 119 | 0.542538 |
4a2515ea951a190080b861d68cea716b6885cbeb | 19,093 | py | Python | code/dataset/transform.py | luozm/frog | ce29cdaddba5ecf78cc66bce1bfc64b301b706ca | [
"MIT"
] | null | null | null | code/dataset/transform.py | luozm/frog | ce29cdaddba5ecf78cc66bce1bfc64b301b706ca | [
"MIT"
] | null | null | null | code/dataset/transform.py | luozm/frog | ce29cdaddba5ecf78cc66bce1bfc64b301b706ca | [
"MIT"
] | null | null | null | """
Transformations for both images and masks.
"""
import os
import cv2
import math
import random
import skimage
import skimage.morphology
from PIL import Image
import numpy as np
# for debug
def dummy_transform(image):
print('\tdummy_transform')
return image
# ------------------------------------------------------------------------------------
# transform both for images & masks
# ------------------------------------------------------------------------------------
def resize_to_factor2(image, mask, factor=16):
H,W = image.shape[:2]
h = (H//factor)*factor
w = (W //factor)*factor
return fix_resize_transform2(image, mask, w, h)
def fix_resize_transform2(image, mask, w, h):
H,W = image.shape[:2]
if (H,W) != (h,w):
image = cv2.resize(image,(w,h))
mask = mask.astype(np.float32)
mask = cv2.resize(mask,(w,h),cv2.INTER_NEAREST)
mask = mask.astype(np.int32)
return image, mask
def fix_crop_transform2(image, mask, x,y,w,h):
H,W = image.shape[:2]
assert(H>=h)
assert(W >=w)
if (x==-1 & y==-1):
x=(W-w)//2
y=(H-h)//2
if (x,y,w,h) != (0,0,W,H):
image = image[y:y+h, x:x+w]
mask = mask[y:y+h, x:x+w]
return image, mask
def random_crop_transform2(image, mask, w,h, u=0.5):
x,y = -1,-1
if random.random() < u:
H,W = image.shape[:2]
if H!=h:
y = np.random.choice(H-h)
else:
y=0
if W!=w:
x = np.random.choice(W-w)
else:
x=0
return fix_crop_transform2(image, mask, x,y,w,h)
def random_horizontal_flip_transform2(image, mask, u=0.5):
if random.random() < u:
image = cv2.flip(image,1) #np.fliplr(img) ##left-right
mask = cv2.flip(mask,1)
return image, mask
def random_vertical_flip_transform2(image, mask, u=0.5):
if random.random() < u:
image = cv2.flip(image,0)
mask = cv2.flip(mask,0)
return image, mask
def random_rotate90_transform2(image, mask, u=0.5):
if random.random() < u:
angle=random.randint(1,3)*90
if angle == 90:
image = image.transpose(1,0,2) #cv2.transpose(img)
image = cv2.flip(image,1)
mask = mask.transpose(1,0)
mask = cv2.flip(mask,1)
elif angle == 180:
image = cv2.flip(image,-1)
mask = cv2.flip(mask,-1)
elif angle == 270:
image = image.transpose(1,0,2) #cv2.transpose(img)
image = cv2.flip(image,0)
mask = mask.transpose(1,0)
mask = cv2.flip(mask,0)
return image, mask
def relabel_multi_mask(multi_mask):
data = multi_mask
data = data[:,:,np.newaxis]
unique_color = set( tuple(v) for m in data for v in m )
#print(len(unique_color))
H, W = data.shape[:2]
multi_mask = np.zeros((H,W),np.int32)
for color in unique_color:
#print(color)
if color == (0,): continue
mask = (data==color).all(axis=2)
label = skimage.morphology.label(mask)
index = [label!=0]
multi_mask[index] = label[index]+multi_mask.max()
return multi_mask
def random_shift_scale_rotate_transform2( image, mask,
shift_limit=[-0.0625,0.0625], scale_limit=[1/1.2,1.2],
rotate_limit=[-15,15], borderMode=cv2.BORDER_REFLECT_101 , u=0.5):
if random.random() < u:
height, width, channel = image.shape
angle = random.uniform(rotate_limit[0], rotate_limit[1]) #degree
scale = random.uniform(scale_limit[0], scale_limit[1])
sx = scale
sy = scale
dx = round(random.uniform(shift_limit[0], shift_limit[1])*width)
dy = round(random.uniform(shift_limit[0], shift_limit[1])*height)
cc = math.cos(angle/180*math.pi)*(sx)
ss = math.sin(angle/180*math.pi)*(sy)
rotate_matrix = np.array([ [cc,-ss], [ss,cc] ])
box0 = np.array([ [0,0], [width,0], [width,height], [0,height], ])
box1 = box0 - np.array([width/2,height/2])
box1 = np.dot(box1,rotate_matrix.T) + np.array([width/2+dx,height/2+dy])
box0 = box0.astype(np.float32)
box1 = box1.astype(np.float32)
mat = cv2.getPerspectiveTransform(box0, box1)
image = cv2.warpPerspective(image, mat, (width,height),flags=cv2.INTER_LINEAR,
borderMode=borderMode,borderValue=(0,0,0,)) #cv2.BORDER_CONSTANT, borderValue = (0, 0, 0)) #cv2.BORDER_REFLECT_101
mask = mask.astype(np.float32)
mask = cv2.warpPerspective(mask, mat, (width, height), flags=cv2.INTER_NEAREST,#cv2.INTER_LINEAR
borderMode=borderMode, borderValue=(0,0,0,)) #cv2.BORDER_CONSTANT, borderValue = (0, 0, 0)) #cv2.BORDER_REFLECT_101
mask = mask.astype(np.int32)
mask = relabel_multi_mask(mask)
return image, mask
# ------------------------------------------------------------------------------------
# transform for images
# ------------------------------------------------------------------------------------
# --------------------------------
# photometric transform
# --------------------------------
def random_brightness_shift_transform(image, limit=[16,64], u=0.5):
if np.random.random() < u:
alpha = np.random.uniform(limit[0], limit[1])
image = image + alpha*255
image = np.clip(image, 0, 255).astype(np.uint8)
return image
def random_brightness_transform(image, limit=[0.5,1.5], u=0.5):
if np.random.random() < u:
alpha = np.random.uniform(limit[0], limit[1])
image = alpha*image
image = np.clip(image, 0, 255).astype(np.uint8)
return image
def random_contrast_transform(image, limit=[0.5,1.5], u=0.5):
if np.random.random() < u:
alpha = np.random.uniform(limit[0], limit[1])
coef = np.array([[[0.114, 0.587, 0.299]]]) #rgb to gray (YCbCr)
gray = image * coef
gray = (3.0 * (1.0 - alpha) / gray.size) * np.sum(gray)
image = alpha*image + gray
image = np.clip(image, 0, 255).astype(np.uint8)
return image
def random_saturation_transform(image, limit=[0.5,1.5], u=0.5):
if np.random.random() < u:
alpha = np.random.uniform(limit[0], limit[1])
coef = np.array([[[0.114, 0.587, 0.299]]])
gray = image * coef
gray = np.sum(gray,axis=2, keepdims=True)
image = alpha*image + (1.0 - alpha)*gray
image = np.clip(image, 0, 255).astype(np.uint8)
return image
# https://github.com/chainer/chainercv/blob/master/chainercv/links/model/ssd/transforms.py
# https://github.com/fchollet/keras/pull/4806/files
# https://zhuanlan.zhihu.com/p/24425116
# http://lamda.nju.edu.cn/weixs/project/CNNTricks/CNNTricks.html
def random_hue_transform(image, limit=[-0.1,0.1], u=0.5):
if random.random() < u:
h = int(np.random.uniform(limit[0], limit[1])*180)
#print(h)
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
hsv[:, :, 0] = (hsv[:, :, 0].astype(int) + h) % 180
image = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return image
def random_noise_transform(image, limit=[0, 0.5], u=0.5):
if random.random() < u:
H,W = image.shape[:2]
noise = np.random.uniform(limit[0],limit[1],size=(H,W))*255
image = image + noise[:,:,np.newaxis]*np.array([1,1,1])
image = np.clip(image, 0, 255).astype(np.uint8)
return image
# --------------------------------
# geometric transform
# --------------------------------
def resize_to_factor(image, factor=16):
height, width = image.shape[:2]
h = (height//factor)*factor
w = (width //factor)*factor
return fix_resize_transform(image, w, h)
def fix_resize_transform(image, w, h):
height,width = image.shape[:2]
if (height,width) != (h,w):
image = cv2.resize(image,(w,h))
return image
def pad_to_factor(image, factor=16):
height, width = image.shape[:2]
h = math.ceil(height/factor)*factor
w = math.ceil(width/factor)*factor
image = cv2.copyMakeBorder(image, top=0, bottom=h-height, left=0, right=w-width,
borderType=cv2.BORDER_REFLECT101, value=[0, 0, 0])
return image
class GaussianDistortion:
"""
This class performs randomised, elastic gaussian distortions on images.
"""
def __init__(self, probability, grid_width, grid_height, magnitude, corner='bell', method='in', mex=0.5, mey=0.5, sdx=0.05, sdy=0.05):
"""
As well as the probability, the granularity of the distortions
produced by this class can be controlled using the width and
height of the overlaying distortion grid. The larger the height
and width of the grid, the smaller the distortions. This means
that larger grid sizes can result in finer, less severe distortions.
As well as this, the magnitude of the distortions vectors can
also be adjusted.
:param probability: Controls the probability that the operation is
performed when it is invoked in the pipeline.
:param grid_width: The width of the gird overlay, which is used
by the class to apply the transformations to the image.
:param grid_height: The height of the gird overlay, which is used
by the class to apply the transformations to the image.
:param magnitude: Controls the degree to which each distortion is
applied to the overlaying distortion grid.
:param corner: which corner of picture to distort.
Possible values: "bell"(circular surface applied), "ul"(upper left),
"ur"(upper right), "dl"(down left), "dr"(down right).
:param method: possible values: "in"(apply max magnitude to the chosen
corner), "out"(inverse of method in).
:param mex: used to generate 3d surface for similar distortions.
Surface is based on normal distribution.
:param mey: used to generate 3d surface for similar distortions.
Surface is based on normal distribution.
:param sdx: used to generate 3d surface for similar distortions.
Surface is based on normal distribution.
:param sdy: used to generate 3d surface for similar distortions.
Surface is based on normal distribution.
:type probability: Float
:type grid_width: Integer
:type grid_height: Integer
:type magnitude: Integer
:type corner: String
:type method: String
:type mex: Float
:type mey: Float
:type sdx: Float
:type sdy: Float
For values :attr:`mex`, :attr:`mey`, :attr:`sdx`, and :attr:`sdy` the
surface is based on the normal distribution:
.. math::
e^{- \Big( \\frac{(x-\\text{mex})^2}{\\text{sdx}} + \\frac{(y-\\text{mey})^2}{\\text{sdy}} \Big) }
"""
self.probability = probability
self.grid_width = grid_width
self.grid_height = grid_height
self.magnitude = abs(magnitude)
self.randomise_magnitude = True
self.corner = corner
self.method = method
self.mex = mex
self.mey = mey
self.sdx = sdx
self.sdy = sdy
def perform_operation(self, images):
"""
Distorts the passed image(s) according to the parameters supplied
during instantiation, returning the newly distorted image.
:param images: The image(s) to be distorted.
:type images: List containing PIL.Image object(s).
:return: The transformed image(s) as a list of object(s) of type
PIL.Image.
"""
w, h = images[0].size
horizontal_tiles = self.grid_width
vertical_tiles = self.grid_height
width_of_square = int(math.floor(w / float(horizontal_tiles)))
height_of_square = int(math.floor(h / float(vertical_tiles)))
width_of_last_square = w - (width_of_square * (horizontal_tiles - 1))
height_of_last_square = h - (height_of_square * (vertical_tiles - 1))
dimensions = []
for vertical_tile in range(vertical_tiles):
for horizontal_tile in range(horizontal_tiles):
if vertical_tile == (vertical_tiles - 1) and horizontal_tile == (horizontal_tiles - 1):
dimensions.append([horizontal_tile * width_of_square,
vertical_tile * height_of_square,
width_of_last_square + (horizontal_tile * width_of_square),
height_of_last_square + (height_of_square * vertical_tile)])
elif vertical_tile == (vertical_tiles - 1):
dimensions.append([horizontal_tile * width_of_square,
vertical_tile * height_of_square,
width_of_square + (horizontal_tile * width_of_square),
height_of_last_square + (height_of_square * vertical_tile)])
elif horizontal_tile == (horizontal_tiles - 1):
dimensions.append([horizontal_tile * width_of_square,
vertical_tile * height_of_square,
width_of_last_square + (horizontal_tile * width_of_square),
height_of_square + (height_of_square * vertical_tile)])
else:
dimensions.append([horizontal_tile * width_of_square,
vertical_tile * height_of_square,
width_of_square + (horizontal_tile * width_of_square),
height_of_square + (height_of_square * vertical_tile)])
last_column = []
for i in range(vertical_tiles):
last_column.append((horizontal_tiles-1)+horizontal_tiles*i)
last_row = range((horizontal_tiles * vertical_tiles) - horizontal_tiles, horizontal_tiles * vertical_tiles)
polygons = []
for x1, y1, x2, y2 in dimensions:
polygons.append([x1, y1, x1, y2, x2, y2, x2, y1])
polygon_indices = []
for i in range((vertical_tiles * horizontal_tiles) - 1):
if i not in last_row and i not in last_column:
polygon_indices.append([i, i + 1, i + horizontal_tiles, i + 1 + horizontal_tiles])
def sigmoidf(x, y, sdx=0.05, sdy=0.05, mex=0.5, mey=0.5, const=1):
sigmoid = lambda x1, y1: (const * (math.exp(-(((x1-mex)**2)/sdx + ((y1-mey)**2)/sdy) )) + max(0,-const) - max(0, const))
xl = np.linspace(0,1)
yl = np.linspace(0, 1)
X, Y = np.meshgrid(xl, yl)
Z = np.vectorize(sigmoid)(X, Y)
mino = np.amin(Z)
maxo = np.amax(Z)
res = sigmoid(x, y)
res = max(((((res - mino) * (1 - 0)) / (maxo - mino)) + 0), 0.01)*self.magnitude
return res
def corner(x, y, corner="ul", method="out", sdx=0.05, sdy=0.05, mex=0.5, mey=0.5):
ll = {'dr': (0, 0.5, 0, 0.5), 'dl': (0.5, 1, 0, 0.5), 'ur': (0, 0.5, 0.5, 1), 'ul': (0.5, 1, 0.5, 1), 'bell': (0, 1, 0, 1)}
new_c = ll[corner]
new_x = (((x - 0) * (new_c[1] - new_c[0])) / (1 - 0)) + new_c[0]
new_y = (((y - 0) * (new_c[3] - new_c[2])) / (1 - 0)) + new_c[2]
if method == "in":
const = 1
else:
if method == "out":
const =- 1
else:
const = 1
res = sigmoidf(x=new_x, y=new_y,sdx=sdx, sdy=sdy, mex=mex, mey=mey, const=const)
return res
def do(image):
for a, b, c, d in polygon_indices:
x1, y1, x2, y2, x3, y3, x4, y4 = polygons[a]
sigmax = corner(x=x3/w, y=y3/h, corner=self.corner, method=self.method, sdx=self.sdx, sdy=self.sdy, mex=self.mex, mey=self.mey)
dx = np.random.normal(0, sigmax, 1)[0]
dy = np.random.normal(0, sigmax, 1)[0]
polygons[a] = [x1, y1,
x2, y2,
x3 + dx, y3 + dy,
x4, y4]
x1, y1, x2, y2, x3, y3, x4, y4 = polygons[b]
polygons[b] = [x1, y1,
x2 + dx, y2 + dy,
x3, y3,
x4, y4]
x1, y1, x2, y2, x3, y3, x4, y4 = polygons[c]
polygons[c] = [x1, y1,
x2, y2,
x3, y3,
x4 + dx, y4 + dy]
x1, y1, x2, y2, x3, y3, x4, y4 = polygons[d]
polygons[d] = [x1 + dx, y1 + dy,
x2, y2,
x3, y3,
x4, y4]
generated_mesh = []
for i in range(len(dimensions)):
generated_mesh.append([dimensions[i], polygons[i]])
return image.transform(image.size, Image.MESH, generated_mesh, resample=Image.BICUBIC)
augmented_images = []
for image in images:
augmented_images.append(do(image))
return augmented_images
def normalize_transform(image):
# image = image[:,:,0]
# image = image.reshape(image.shape+(1,))
std = np.std(image)
if std <1e-10 or std>1e10:
return None
mean = np.mean(image)
image_trans = (image-mean)/std
return image_trans
if __name__ == '__main__':
print('%s: calling main function ... ' % os.path.basename(__file__))
from dataset.reader import ScienceDataset
import matplotlib.pyplot as plt
from skimage import exposure
from common import IMAGE_DIR
# import Augmentor
# p = Augmentor.Pipeline(IMAGE_DIR + 'train1_norm/images')
# p.random_distortion(0.5,16,16,5)
# img = p.sample(10)
# plt.imshow(img)
# plt.show()
dataset = ScienceDataset('train1_train_603', img_folder='train1_norm', mask_folder='stage1_train', mode='train')
img = dataset[572]
for img, mask, meta, idx in dataset:
if img.shape[0]<513 and img.shape[1]<513:
print(idx)
'f4c4db3df4ff0de90f44b027fc2e28c16bf7e5c75ea75b0a9762bbb7ac86e7a3'
# mask_re = mask.reshape(mask.shape + (1,))
# img_concat = np.stack((img[:, :, 0], mask), axis=2)
# result = random_contrast_transform(img, u=1)
# result2 = random_brightness_transform(img, u=1)
# img_norm = normalize_transform(img)
# result = elastic_transform_2(img[:,:,0],img.shape[1]*2, img.shape[1]*0.08)
# plt.imshow(img)
# plt.figure()
# plt.imshow(img_adapteq)
# plt.figure()
# plt.imshow(result2)
# elastic_transform(img_concat, img_concat.shape[1]*2, img_concat.shape[1]*0.08, img_concat.shape[1]*0.08)
print()
print('\nsucess!')
| 36.788054 | 152 | 0.555596 |
4a251719926ebad3e6c081d334b27dba4c23974f | 4,803 | py | Python | tools/download_fuchsia_sdk.py | kofj/engine | e17ee4a5fff8a13626a9a7786b7e2534aba51df0 | [
"BSD-3-Clause"
] | null | null | null | tools/download_fuchsia_sdk.py | kofj/engine | e17ee4a5fff8a13626a9a7786b7e2534aba51df0 | [
"BSD-3-Clause"
] | null | null | null | tools/download_fuchsia_sdk.py | kofj/engine | e17ee4a5fff8a13626a9a7786b7e2534aba51df0 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2013 The Flutter Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# The return code of this script will always be 0, even if there is an error,
# unless the --fail-loudly flag is passed.
import argparse
import tarfile
import json
import os
import shutil
import subprocess
import sys
SRC_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
FUCHSIA_SDK_DIR = os.path.join(SRC_ROOT, 'fuchsia', 'sdk')
FLUTTER_DIR = os.path.join(SRC_ROOT, 'flutter')
SDK_VERSION_INFO_FILE = os.path.join(FLUTTER_DIR, '.fuchsia_sdk_version')
# Prints to stderr.
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def FileNameForBucket(bucket):
return bucket.split('/')[-1]
def DownloadFuchsiaSDKFromGCS(bucket, verbose):
file = FileNameForBucket(bucket)
url = 'https://storage.googleapis.com/{}'.format(bucket)
dest = os.path.join(FUCHSIA_SDK_DIR, file)
if verbose:
print('Fuchsia SDK url: "%s"' % url)
print('Fuchsia SDK destination path: "%s"' % dest)
if os.path.isfile(dest):
os.unlink(dest)
curl_command = [
'curl',
'--retry', '3',
'--continue-at', '-', '--location',
'--output', dest,
url,
]
if verbose:
print('Running: "%s"' % (' '.join(curl_command)))
curl_result = subprocess.run(
curl_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
if curl_result.returncode == 0 and verbose:
print('curl output:stdout:\n{}\nstderr:\n{}'.format(
curl_result.stdout, curl_result.stderr,
))
elif curl_result.returncode != 0:
eprint('Failed to download: stdout:\n{}\nstderr:\n{}'.format(
curl_result.stdout, curl_result.stderr,
))
return None
return dest
def OnErrorRmTree(func, path, exc_info):
"""
Error handler for ``shutil.rmtree``.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage : ``shutil.rmtree(path, onerror=onerror)``
"""
import stat
# Is the error an access error?
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
def ExtractGzipArchive(archive, host_os, verbose):
sdk_dest = os.path.join(FUCHSIA_SDK_DIR, host_os)
if os.path.isdir(sdk_dest):
shutil.rmtree(sdk_dest, onerror=OnErrorRmTree)
extract_dest = os.path.join(FUCHSIA_SDK_DIR, 'temp')
if os.path.isdir(extract_dest):
shutil.rmtree(extract_dest, onerror=OnErrorRmTree)
os.makedirs(extract_dest, exist_ok=True)
if verbose:
print('Extracting "%s" to "%s"' % (archive, extract_dest))
with tarfile.open(archive, 'r') as z:
z.extractall(extract_dest)
shutil.move(extract_dest, sdk_dest)
# Reads the version file and returns the bucket to download
# The file is expected to live at the flutter directory and be named .fuchsia_sdk_version.
#
# The file is a JSON file which contains a single object with the following schema:
# ```
# {
# "protocol": "gcs",
# "identifiers": [
# {
# "host_os": "linux",
# "bucket": "fuchsia-artifacts/development/8824687191341324145/sdk/linux-amd64/core.tar.gz"
# }
# ]
# }
# ```
def ReadVersionFile(host_os):
with open(SDK_VERSION_INFO_FILE) as f:
try:
version_obj = json.loads(f.read())
if version_obj['protocol'] != 'gcs':
eprint('The gcs protocol is the only suppoted protocl at this time')
return None
for id_obj in version_obj['identifiers']:
if id_obj['host_os'] == host_os:
return id_obj['bucket']
except:
eprint('Could not read JSON version file')
return None
def Main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--fail-loudly',
action='store_true',
default=False,
help="Return an error code if a prebuilt couldn't be fetched and extracted")
parser.add_argument(
'--verbose',
action='store_true',
default='LUCI_CONTEXT' in os.environ,
help='Emit verbose output')
parser.add_argument(
'--host-os',
help='The host os')
args = parser.parse_args()
fail_loudly = 1 if args.fail_loudly else 0
verbose = args.verbose
host_os = args.host_os
bucket = ReadVersionFile(host_os)
if bucket is None:
eprint('Unable to find bucket in version file')
return fail_loudly
archive = DownloadFuchsiaSDKFromGCS(bucket, verbose)
if archive is None:
eprint('Failed to download SDK from %s' % bucket)
return fail_loudly
ExtractGzipArchive(archive, host_os, verbose)
success = True
return 0 if success else fail_loudly
if __name__ == '__main__':
sys.exit(Main())
| 26.245902 | 98 | 0.684989 |
4a2518ce4917592c21b3be2f9d2140f32bc2fa13 | 2,859 | py | Python | migrations/versions/08e57c6490ee_.py | amoskipz/pitch | 477599a56958bc677e22764d7e0cc14d34510e8c | [
"Unlicense",
"MIT"
] | null | null | null | migrations/versions/08e57c6490ee_.py | amoskipz/pitch | 477599a56958bc677e22764d7e0cc14d34510e8c | [
"Unlicense",
"MIT"
] | null | null | null | migrations/versions/08e57c6490ee_.py | amoskipz/pitch | 477599a56958bc677e22764d7e0cc14d34510e8c | [
"Unlicense",
"MIT"
] | null | null | null | """empty message
Revision ID: 08e57c6490ee
Revises:
Create Date: 2021-03-08 12:35:15.305897
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '08e57c6490ee'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('categories',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('description', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=255), nullable=True),
sa.Column('email', sa.String(length=255), nullable=True),
sa.Column('password_hash', sa.String(length=255), nullable=True),
sa.Column('pass_secure', sa.String(length=255), nullable=True),
sa.Column('bio', sa.String(length=255), nullable=True),
sa.Column('profile_pic_path', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_table('pitches',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('pitch', sa.String(), nullable=True),
sa.Column('category_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['category_id'], ['categories.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('comments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('comment', sa.String(length=255), nullable=True),
sa.Column('time_posted', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('pitches_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['pitches_id'], ['pitches.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('votes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('vote', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('pitches_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['pitches_id'], ['pitches.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('votes')
op.drop_table('comments')
op.drop_table('pitches')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
op.drop_table('categories')
# ### end Alembic commands ###
| 36.653846 | 76 | 0.663519 |
4a251a114c81a5c658ae15c6d7aaba91f6981539 | 10,524 | py | Python | qrf/qrf_cdf.py | RLstat/deep-conditional-distribution-regression | 66e56bd90225f9959d00b633837285cb8b7cbf35 | [
"Apache-2.0"
] | 6 | 2020-09-13T20:16:35.000Z | 2021-09-15T06:13:31.000Z | qrf/qrf_cdf.py | RLstat/deep-conditional-distribution-regression | 66e56bd90225f9959d00b633837285cb8b7cbf35 | [
"Apache-2.0"
] | null | null | null | qrf/qrf_cdf.py | RLstat/deep-conditional-distribution-regression | 66e56bd90225f9959d00b633837285cb8b7cbf35 | [
"Apache-2.0"
] | 3 | 2019-05-06T06:45:04.000Z | 2021-04-26T03:52:12.000Z | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 9 11:36:03 2019
@author: RLstat
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from skgarden import RandomForestQuantileRegressor
from dcdr.utils import (evaluate_crps, evaluate_monotonicity,
evaluate_quantile_loss, evaluate_rmse, evaluate_coverage, quantile_to_cdf)
import gc
class QRFCDF(RandomForestQuantileRegressor):
def __init__(self, **kwargs):
super(QRFCDF, self).__init__(**kwargs)
def fit_cdf(self, train_x, train_y):
train_x = np.array(train_x)
train_y = np.array(train_y).flatten()
if train_x.ndim < 2:
train_x = train_x.reshape(-1, 1)
self.p = train_x.shape[1]
self.y_min = np.min(train_y)
self.y_max = np.max(train_y)
self.y_range = self.y_max - self.y_min
self.y_lim = [self.y_min, self.y_max]
self.fit(train_x, train_y)
def predict_cdf(self, test_x, quantiles_grid=None, quantile_lim=[0.00001, 0.99999],
n_quantiles=500, y_grid=None, pred_lim=None, pred_margin=0.1, ngrid=1000,
keep_cdf_matrix=True, overwrite_y_grid=True, keep_test_x=True):
if y_grid is None:
if pred_lim is None:
if pred_margin is None:
pred_lim = self.ylim
else:
pred_lim = [self.y_min - pred_margin*self.y_range, self.y_max + pred_margin*self.y_range]
y_grid = np.linspace(pred_lim[0], pred_lim[1], num=ngrid)
self.pred_lim = pred_lim
else:
self.pred_lim = [np.min(y_grid), np.max(y_grid)]
if not isinstance(test_x, np.ndarray):
test_x = np.array(test_x)
if test_x.ndim <2:
test_x = test_x.reshape(-1, self.p)
y_grid = y_grid.flatten()
if quantiles_grid is None:
quantiles_grid = np.linspace(quantile_lim[0], quantile_lim[1], num=n_quantiles)
if keep_test_x:
self.test_x = test_x
if isinstance(quantiles_grid, list):
rescaled_qt = [qt*100 for qt in quantiles_grid]
else:
rescaled_qt = quantiles_grid*100
quantile_output = self.predict(test_x, quantile=rescaled_qt)
TestX_CDF_matrix = quantile_to_cdf(quantile_output, quantiles_grid, y_grid)
if keep_cdf_matrix:
self.TestX_CDF_matrix = TestX_CDF_matrix
if overwrite_y_grid:
self.y_grid = y_grid
cdf_df = pd.DataFrame(TestX_CDF_matrix, columns=y_grid)
return cdf_df
def plot_PIT(self, test_x, test_y, density=True, return_cdf_value=False, block_size=None,
**kwargs):
if block_size is None:
cdf_df = self.predict_cdf(test_x, y_grid=test_y, keep_cdf_matrix=False,
overwrite_y_grid=False)
cdf_values = [cdf_df.iloc[i,i] for i in range(cdf_df.shape[0])]
else:
cdf_values = []
if test_x.shape[0] % block_size == 0:
nblocks = test_x.shape[0]//block_size
else:
nblocks = test_x.shape[0]//block_size + 1
for b in range(nblocks):
cdf_df = self.predict_cdf(test_x[b*block_size : (b+1)*block_size],
y_grid=test_y[b*block_size : (b+1)*block_size],
keep_cdf_matrix=False, overwrite_y_grid=False)
cdf_values.extend([cdf_df.iloc[i,i] for i in range(cdf_df.shape[0])])
del cdf_df
gc.collect()
fig, ax = plt.subplots(1, 1)
ax.hist(cdf_values, density=density, **kwargs)
if density:
ax.axhline(y=1, color='red')
if return_cdf_value:
return ax, cdf_values
else:
return ax
def plot_cdf(self, index=0, test_x=None, test_y=None, grid=None, pred_lim=None,
pred_margin=0.1, true_cdf_func=None, figsize=(12, 8), title=None):
if test_x is None:
cdf = self.TestX_CDF_matrix[index, :].copy()
xval = self.test_x[index, :]
grid = self.y_grid.copy()
else:
cdf = self.predict_cdf(test_x, y_grid=grid, pred_lim=pred_lim,
pred_margin=pred_margin,
keep_cdf_matrix=False,
overwrite_y_grid=True,
keep_test_x=False).values.flatten()
xval = test_x
grid = self.y_grid.copy()
cdf = cdf[grid.argsort()]
grid.sort()
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.plot(grid, cdf, label='predicted cdf', lw=3)
if true_cdf_func is not None:
true_cdf = true_cdf_func(xval, grid)
ax.plot(grid, true_cdf, label='true cdf', lw=3)
ax.legend(loc='best', prop={'size':16})
if test_y is not None:
if test_x is None:
ax.axvline(x=test_y[index], color='black', lw=3)
else:
ax.axvline(x=test_y, color='black', lw=3)
if title:
ax.set_title(title, fontsize=20)
tlt = ax.title
tlt.set_position([0.5, 1.02])
ax.get_xaxis().set_tick_params(direction='out', labelsize=16)
ax.get_yaxis().set_tick_params(direction='out', labelsize=16)
ax.set_xlim(self.pred_lim)
return ax
def plot_density(self, index=0, test_x=None, test_y=None, grid=None, pred_lim=None,
pred_margin=0.1, window=1, true_density_func=None,
figsize=(12, 8), title=None, label=None, xlabel=None,
ylabel=None, figure=None):
if test_x is None:
cdf = self.TestX_CDF_matrix[index, :].copy()
xval = self.test_x[index, :]
grid = self.y_grid.copy()
else:
cdf = self.predict_cdf(test_x, y_grid=grid, pred_lim=pred_lim,
pred_margin=pred_margin,
keep_cdf_matrix=False,
overwrite_y_grid=True,
keep_test_x=False).values.flatten()
xval = test_x
grid = self.y_grid.copy()
if len(grid) < 2*window + 1:
raise ValueError('''The density of the most left {0} and the most right {1}
grid points won't be plotted, so it requires at least
{2} grid points to make density plot'''.format(window, window, 2*window + 1))
cdf = cdf[grid.argsort()]
grid.sort()
density_binwidth = grid[(2*window):] - grid[:-(2*window)]
cdf_diff = cdf[(2*window):] - cdf[:-(2*window)]
density = cdf_diff/density_binwidth
if figure is not None:
fig, ax = figure
else:
fig, ax = plt.subplots(1, 1, figsize=figsize)
if label is None:
label = 'predicted density'
ax.plot(grid[window:-window], density, label=label, lw=3)
if true_density_func is not None:
true_density = true_density_func(xval, grid[window:-window])
ax.plot(grid[window:-window], true_density, label='true density', lw=3)
ax.legend(loc='best', prop={'size':16})
if title:
ax.set_title(title, fontsize=20)
tlt = ax.title
tlt.set_position([0.5, 1.02])
if test_y is not None:
if test_x is None:
ax.axvline(x=test_y[index], color='black', lw=3)
else:
ax.axvline(x=test_y, color='black', lw=3)
ax.get_xaxis().set_tick_params(direction='out', labelsize=16)
ax.get_yaxis().set_tick_params(direction='out', labelsize=16)
if xlabel is not None:
ax.set_xlabel(xlabel, fontsize=18)
if ylabel is not None:
ax.set_ylabel(ylabel, fontsize=18)
ax.set_xlim(self.pred_lim)
return (fig, ax)
def predict_quantile(self, test_x, quantiles):
if isinstance(quantiles, list):
rescaled_qt = [qt*100 for qt in quantiles]
else:
rescaled_qt = quantiles*100
quantile_output = self.predict(test_x, quantile=rescaled_qt)
test_qt_df = pd.DataFrame(quantile_output, columns=quantiles)
return test_qt_df
def predict_mean(self, test_x):
test_mean = self.predict(test_x)
return test_mean
def evaluate(self, test_x, test_y, y_grid=None, pred_margin=0.1,
ngrid=1000, quantiles=None, interval=None, mode='CRPS'):
if mode == 'QuantileLoss' and quantiles is not None:
quantile_matrix = self.predict_quantile(test_x, quantiles).values
test_score = evaluate_quantile_loss(quantile_matrix, test_y, quantiles)
else:
cdf_matrix = self.predict_cdf(test_x, y_grid=y_grid,
pred_margin=pred_margin,
ngrid=ngrid).values
if mode == 'CRPS':
test_score = evaluate_crps(cdf_matrix, test_y, self.y_grid)
elif mode == 'RMSE':
test_score = evaluate_rmse(cdf_matrix, test_y, self.y_grid)
elif mode == 'Coverage' and interval is not None:
test_score = evaluate_coverage(cdf_matrix, test_y, interval, self.y_grid)
elif mode == 'Monotonicity':
test_score = evaluate_monotonicity(cdf_matrix, self.y_grid)
elif mode == 'Crossing':
test_score = evaluate_monotonicity(cdf_matrix, self.y_grid, return_crossing_freq=True)
return test_score | 37.319149 | 125 | 0.528981 |
4a251a73f29ae5e8da6221d9d91e6789969e9dcf | 253 | py | Python | lisa/features/__init__.py | KsenijaS/lisa | f09291a088c81de40e57bc4e37e9348220a87417 | [
"MIT"
] | 1 | 2021-06-17T13:02:44.000Z | 2021-06-17T13:02:44.000Z | lisa/features/__init__.py | KsenijaS/lisa | f09291a088c81de40e57bc4e37e9348220a87417 | [
"MIT"
] | null | null | null | lisa/features/__init__.py | KsenijaS/lisa | f09291a088c81de40e57bc4e37e9348220a87417 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from .gpu import Gpu
from .serial_console import SerialConsole
from .sriov import Sriov
from .startstop import StartStop
__all__ = ["Gpu", "SerialConsole", "Sriov", "StartStop"]
| 25.3 | 56 | 0.766798 |
4a251be34511c1b4f2e8866f78b9e10c6c3cadb0 | 3,990 | py | Python | tools/pytorch2onnx.py | dyhan0920/mmdetection | adc65ffc3e78c4dfd5f9c693210b69140c3f9a56 | [
"Apache-2.0"
] | 38 | 2020-11-03T14:45:49.000Z | 2022-03-19T01:14:49.000Z | tools/pytorch2onnx.py | 13952522076/mmdet0 | 7c914eceda1e61bd7aea78d9db924ead1c582daa | [
"Apache-2.0"
] | 8 | 2021-02-05T07:30:39.000Z | 2022-03-16T06:45:46.000Z | tools/pytorch2onnx.py | 13952522076/mmdet0 | 7c914eceda1e61bd7aea78d9db924ead1c582daa | [
"Apache-2.0"
] | 9 | 2020-11-10T16:29:15.000Z | 2021-06-07T08:00:38.000Z | import argparse
import io
import mmcv
import onnx
import torch
from mmcv.ops import RoIAlign, RoIPool
from mmcv.runner import load_checkpoint
from onnx import optimizer
from torch.onnx import OperatorExportTypes
from mmdet.models import build_detector
def export_onnx_model(model, inputs, passes):
"""Trace and export a model to onnx format. Modified from
https://github.com/facebookresearch/detectron2/
Args:
model (nn.Module):
inputs (tuple[args]): the model will be called by `model(*inputs)`
passes (None or list[str]): the optimization passed for ONNX model
Returns:
an onnx model
"""
assert isinstance(model, torch.nn.Module)
# make sure all modules are in eval mode, onnx may change the training
# state of the module if the states are not consistent
def _check_eval(module):
assert not module.training
model.apply(_check_eval)
# Export the model to ONNX
with torch.no_grad():
with io.BytesIO() as f:
torch.onnx.export(
model,
inputs,
f,
operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK,
# verbose=True, # NOTE: uncomment this for debugging
# export_params=True,
)
onnx_model = onnx.load_from_string(f.getvalue())
# Apply ONNX's Optimization
if passes is not None:
all_passes = optimizer.get_available_passes()
assert all(p in all_passes for p in passes), \
f'Only {all_passes} are supported'
onnx_model = optimizer.optimize(onnx_model, passes)
return onnx_model
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet pytorch model conversion to ONNX')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--out', type=str, required=True, help='output ONNX filename')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[1280, 800],
help='input image size')
parser.add_argument(
'--passes', type=str, nargs='+', help='ONNX optimization passes')
args = parser.parse_args()
return args
def main():
args = parse_args()
if not args.out.endswith('.onnx'):
raise ValueError('The output file must be a onnx file.')
if len(args.shape) == 1:
input_shape = (3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (3, ) + tuple(args.shape)
else:
raise ValueError('invalid input shape')
cfg = mmcv.Config.fromfile(args.config)
cfg.model.pretrained = None
# build the model and load checkpoint
model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
load_checkpoint(model, args.checkpoint, map_location='cpu')
# Only support CPU mode for now
model.cpu().eval()
# Customized ops are not supported, use torchvision ops instead.
for m in model.modules():
if isinstance(m, (RoIPool, RoIAlign)):
# set use_torchvision on-the-fly
m.use_torchvision = True
# TODO: a better way to override forward function
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
else:
raise NotImplementedError(
'ONNX conversion is currently not currently supported with '
f'{model.__class__.__name__}')
input_data = torch.empty((1, *input_shape),
dtype=next(model.parameters()).dtype,
device=next(model.parameters()).device)
onnx_model = export_onnx_model(model, (input_data, ), args.passes)
# Print a human readable representation of the graph
onnx.helper.printable_graph(onnx_model.graph)
print(f'saving model in {args.out}')
onnx.save(onnx_model, args.out)
if __name__ == '__main__':
main()
| 31.92 | 76 | 0.646115 |
4a251cb5868c668b353aed5a150dcca6742005f7 | 460 | py | Python | user.py | fuaad001/password-locker | 4cae9f14f4ca646898b4cc76690b3500750de5e2 | [
"MIT"
] | null | null | null | user.py | fuaad001/password-locker | 4cae9f14f4ca646898b4cc76690b3500750de5e2 | [
"MIT"
] | null | null | null | user.py | fuaad001/password-locker | 4cae9f14f4ca646898b4cc76690b3500750de5e2 | [
"MIT"
] | null | null | null | class User:
"""
Class that generates new instances of users
"""
user_list = []
def __init__(self, user_name, password):
'''
__init__ method that defines properties for our users objects.
'''
self.user_name = user_name
self.password = password
def create_user(self):
'''
create_user method adds new users objects into user_list
'''
User.user_list.append(self)
| 20.909091 | 70 | 0.591304 |
4a251e70b17c01a72e1dd1bf5fa3daa003b2fd91 | 561 | py | Python | Anil/sixth.py | tf153/Connect-Bud | 99489f0494c7a7eb7a4586a8d33f9e261f93419e | [
"MIT"
] | null | null | null | Anil/sixth.py | tf153/Connect-Bud | 99489f0494c7a7eb7a4586a8d33f9e261f93419e | [
"MIT"
] | null | null | null | Anil/sixth.py | tf153/Connect-Bud | 99489f0494c7a7eb7a4586a8d33f9e261f93419e | [
"MIT"
] | null | null | null | c1=50
c2=20
print(c1+c2)
print(c1-c2)
print(c1*c2)
print(c1/c2)
print(c1%c2)
print("---------------")
c1=c1+30
c2+=40
print(c1)
print(c2)
print("-------------")
c1-=10
c2*=20
print(c1)
print(c2)
print("-------------")
c1/=7
print(c1)
c1%=3
print(c1)
print("-------------")
c3=50
print(c3//5)
c3//=10
print(c3)
print("---------------")
print(10==20)
print(25==25)
print(10<25)
print("--------------")
if(10==20):
print("not equal")
else:
print("equal")
print("-------------")
operator=input("Operator = ")
if operator=="+":
print("30+70")
| 11 | 29 | 0.493761 |
4a251e94b4f0952acb6ae0d030351cdaeb53b9e8 | 1,097 | py | Python | sofi/ui/basicblock.py | screamingskulls/sofi | 1d75bc97683151864f8a4cafb59ef8e50de63ee4 | [
"MIT"
] | 402 | 2016-04-05T23:11:07.000Z | 2022-02-22T21:10:49.000Z | sofi/ui/basicblock.py | screamingskulls/sofi | 1d75bc97683151864f8a4cafb59ef8e50de63ee4 | [
"MIT"
] | 87 | 2016-03-31T00:09:39.000Z | 2021-02-22T04:49:25.000Z | sofi/ui/basicblock.py | screamingskulls/sofi | 1d75bc97683151864f8a4cafb59ef8e50de63ee4 | [
"MIT"
] | 54 | 2016-03-31T00:10:33.000Z | 2021-06-23T21:38:36.000Z | from .element import Element
class BasicBlock(Element):
"""Implements <pre> tag"""
def __init__(self, text=None, cl=None, ident=None, style=None, attrs=None):
super().__init__(cl=cl, ident=ident, style=style, attrs=attrs)
if text:
self._children.append(text)
def __repr__(self):
return "<BasicBlock>"
def __str__(self):
output = [ "<pre" ]
if self.ident:
output.append(" id=\"")
output.append(self.ident)
output.append("\"")
if self.cl:
output.append(" class=\"")
output.append(self.cl)
output.append("\"")
if self.style:
output.append(" style=\"")
output.append(self.style)
output.append("\"")
if self.attrs:
for k in self.attrs.keys():
output.append(' ' + k + '="' + self.attrs[k] + '"')
output.append(">")
for child in self._children:
output.append(str(child))
output.append("</pre>")
return "".join(output)
| 23.847826 | 79 | 0.511395 |
4a251eb35e43cc2ca3a014243ba4202984d870d1 | 2,960 | py | Python | pokedex.py | trlucasr/pokedex | 9afb6d6ee413d9840aa0e43a8685bfbdafcd4020 | [
"MIT"
] | null | null | null | pokedex.py | trlucasr/pokedex | 9afb6d6ee413d9840aa0e43a8685bfbdafcd4020 | [
"MIT"
] | null | null | null | pokedex.py | trlucasr/pokedex | 9afb6d6ee413d9840aa0e43a8685bfbdafcd4020 | [
"MIT"
] | null | null | null | from flask import Flask, render_template, request, redirect, session, flash, url_for
app = Flask(__name__)
app.secret_key = 'flask'
class Pokemon:
def __init__(self, nome, especie, tipo):
self.nome = nome
self.especie = especie
self.tipo = tipo
class Treinadora:
def __init__(self, id, nome, senha):
self.id = id
self.nome = nome
self.senha = senha
#criação das terindoras
treinadora1 = Treinadora('Mary', 'Mary Jackson ', '1234')
treinadora2 = Treinadora('Ada', 'Ada Lovelace', '1234')
treinadora3 = Treinadora('Katherine', 'Katherine Johnson', '1234')
treinadoras = {treinadora1.id: treinadora1,
treinadora2.id: treinadora2,
treinadora3.id: treinadora3}
#base de dados de pokemons
pokemon1 = Pokemon('Meowth', 'Arranha Gato', 'Normal')
pokemon2 = Pokemon('Charmander', 'Lagarto', 'Fogo')
pokemon3 = Pokemon('Clefairy', 'Fada', 'Fada')
pokemon4 = Pokemon('Machop', 'Superpoder', 'Lutador')
pokemon5 = Pokemon('Rhyhorn', 'Espigão', 'Terrestre/pedra')
pokemon6 = Pokemon('Cyndaquil', 'Rato de fogo', 'Fogo')
pokemon7 = Pokemon('Shuckle', 'Mofo', 'Pedra')
pokemon8 = Pokemon('Whismur', 'Sussuro', 'Normal')
pokemon9 = Pokemon('Swablu', 'Pássaro de algodão', 'Voador')
pokemon10 = Pokemon('Bidoof', 'Rato Gorducho', 'Normal')
lista = [pokemon1, pokemon2,pokemon3,pokemon4,pokemon5,pokemon6,pokemon7,pokemon8,pokemon9,pokemon10]
#configuração da rota index.
@app.route('/')
def index():
return render_template('lista.html', titulo='Pokedex', pokemons=lista)
@app.route('/novo')
def novo():
if 'usuario_logado' not in session or session['usuario_logado'] == None:
return redirect(url_for('login', proxima=url_for('novo')))
return render_template('novo.html', titulo='Novo Pokemon')
@app.route('/criar', methods=['POST',])
def criar():
nome = request. form['nome']
especie = request. form['especie']
tipo = request. form['tipo']
pokemon = Pokemon(nome, especie, tipo)
lista.append(pokemon)
return redirect(url_for('index'))
@app.route('/login')
def login():
proxima = request.args.get('proxima')
return render_template('login.html', proxima=proxima)
@app.route('/autenticar', methods=['POST', ])
def autenticar():
if request.form['treinadora'] in treinadoras:
treinadora = treinadoras[request.form['treinadora']]
if treinadora.senha == request.form['senha']:
session['usuario_logado'] = treinadora.id
flash(treinadora.nome + ' acesso permitido!')
proxima_pagina = request.form['proxima']
return redirect(proxima_pagina)
else:
flash('Acesso negado, digite novamente!')
return redirect(url_for('login'))
@app.route('/logout')
def logout():
session['usuario_logado'] = None
flash('Treinadora, logue novamente para cadastrar os pokemons que encontrar!')
return redirect(url_for('index'))
app.run(debug=True) | 34.418605 | 101 | 0.671959 |
4a251f3038bd15ebaf503f0cc92bbdb5567bc535 | 737 | py | Python | agsconfig/services/jpip_server_extension.py | DavidWhittingham/agsconfig | c0ac6c37e5e49f87d2812220d756aef118c08024 | [
"BSD-3-Clause"
] | 1 | 2019-05-17T01:44:41.000Z | 2019-05-17T01:44:41.000Z | agsconfig/services/jpip_server_extension.py | DavidWhittingham/agsconfig | c0ac6c37e5e49f87d2812220d756aef118c08024 | [
"BSD-3-Clause"
] | 2 | 2019-04-09T02:01:26.000Z | 2019-06-25T05:27:11.000Z | agsconfig/services/jpip_server_extension.py | DavidWhittingham/agsconfig | c0ac6c37e5e49f87d2812220d756aef118c08024 | [
"BSD-3-Clause"
] | 2 | 2019-03-21T04:58:18.000Z | 2019-09-09T23:00:48.000Z | """This module contains the jpip Server extension class"""
# Python 2/3 compatibility
# pylint: disable=wildcard-import,unused-wildcard-import,wrong-import-order,wrong-import-position
from __future__ import (absolute_import, division, print_function, unicode_literals)
from future.builtins.disabled import *
from future.builtins import *
from future.standard_library import install_aliases
install_aliases()
# pylint: enable=wildcard-import,unused-wildcard-import,wrong-import-order,wrong-import-position
from .extension_base import ExtensionBase
class JPIPServerExtension(ExtensionBase):
""" jpip server extension properties for arcGIS services """
def __init__(self, editor):
super().__init__(editor, "JPIPServer")
| 38.789474 | 97 | 0.800543 |
4a251f7c95a035b4bc1bce0fc0a2b5387d19b634 | 16,145 | py | Python | test/functional/mempool_accept.py | Zachinquarantine/bitcoindx | 1a106a5ed6e6bfd0432368e71bdd2ca342afe6f9 | [
"MIT"
] | 1 | 2022-01-25T15:50:11.000Z | 2022-01-25T15:50:11.000Z | test/functional/mempool_accept.py | Zachinquarantine/bitcoindx | 1a106a5ed6e6bfd0432368e71bdd2ca342afe6f9 | [
"MIT"
] | 3 | 2021-12-31T16:41:15.000Z | 2022-01-23T12:22:46.000Z | test/functional/mempool_accept.py | Zachinquarantine/bitcoindx | 1a106a5ed6e6bfd0432368e71bdd2ca342afe6f9 | [
"MIT"
] | 2 | 2022-01-15T15:52:28.000Z | 2022-01-22T00:37:46.000Z | #!/usr/bin/env python3
# Copyright (c) 2017-2020 The BitcoinDX Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool acceptance of raw transactions."""
from decimal import Decimal
import math
from test_framework.test_framework import BitcoinDXTestFramework
from test_framework.key import ECKey
from test_framework.messages import (
BIP125_SEQUENCE_NUMBER,
COIN,
COutPoint,
CTxIn,
CTxOut,
MAX_BLOCK_BASE_SIZE,
MAX_MONEY,
tx_from_hex,
)
from test_framework.script import (
CScript,
OP_0,
OP_2,
OP_3,
OP_CHECKMULTISIG,
OP_HASH160,
OP_RETURN,
)
from test_framework.script_util import (
script_to_p2sh_script,
)
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class MempoolAcceptanceTest(BitcoinDXTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [[
'-txindex','-permitbaremultisig=0',
]] * self.num_nodes
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def check_mempool_result(self, result_expected, *args, **kwargs):
"""Wrapper to check result of testmempoolaccept on node_0's mempool"""
result_test = self.nodes[0].testmempoolaccept(*args, **kwargs)
for r in result_test:
r.pop('wtxid') # Skip check for now
assert_equal(result_expected, result_test)
assert_equal(self.nodes[0].getmempoolinfo()['size'], self.mempool_size) # Must not change mempool state
def run_test(self):
node = self.nodes[0]
self.log.info('Start with empty mempool, and 200 blocks')
self.mempool_size = 0
assert_equal(node.getblockcount(), 200)
assert_equal(node.getmempoolinfo()['size'], self.mempool_size)
coins = node.listunspent()
self.log.info('Should not accept garbage to testmempoolaccept')
assert_raises_rpc_error(-3, 'Expected type array, got string', lambda: node.testmempoolaccept(rawtxs='ff00baar'))
assert_raises_rpc_error(-8, 'Array must contain between 1 and 25 transactions.', lambda: node.testmempoolaccept(rawtxs=['ff22']*26))
assert_raises_rpc_error(-8, 'Array must contain between 1 and 25 transactions.', lambda: node.testmempoolaccept(rawtxs=[]))
assert_raises_rpc_error(-22, 'TX decode failed', lambda: node.testmempoolaccept(rawtxs=['ff00baar']))
self.log.info('A transaction already in the blockchain')
coin = coins.pop() # Pick a random coin(base) to spend
raw_tx_in_block = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[{'txid': coin['txid'], 'vout': coin['vout']}],
outputs=[{node.getnewaddress(): 0.3}, {node.getnewaddress(): 49}],
))['hex']
txid_in_block = node.sendrawtransaction(hexstring=raw_tx_in_block, maxfeerate=0)
node.generate(1)
self.mempool_size = 0
self.check_mempool_result(
result_expected=[{'txid': txid_in_block, 'allowed': False, 'reject-reason': 'txn-already-known'}],
rawtxs=[raw_tx_in_block],
)
self.log.info('A transaction not in the mempool')
fee = Decimal('0.000007')
raw_tx_0 = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[{"txid": txid_in_block, "vout": 0, "sequence": BIP125_SEQUENCE_NUMBER}], # RBF is used later
outputs=[{node.getnewaddress(): Decimal('0.3') - fee}],
))['hex']
tx = tx_from_hex(raw_tx_0)
txid_0 = tx.rehash()
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': True, 'vsize': tx.get_vsize(), 'fees': {'base': fee}}],
rawtxs=[raw_tx_0],
)
self.log.info('A final transaction not in the mempool')
coin = coins.pop() # Pick a random coin(base) to spend
output_amount = Decimal('0.025')
raw_tx_final = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[{'txid': coin['txid'], 'vout': coin['vout'], "sequence": 0xffffffff}], # SEQUENCE_FINAL
outputs=[{node.getnewaddress(): output_amount}],
locktime=node.getblockcount() + 2000, # Can be anything
))['hex']
tx = tx_from_hex(raw_tx_final)
fee_expected = coin['amount'] - output_amount
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': True, 'vsize': tx.get_vsize(), 'fees': {'base': fee_expected}}],
rawtxs=[tx.serialize().hex()],
maxfeerate=0,
)
node.sendrawtransaction(hexstring=raw_tx_final, maxfeerate=0)
self.mempool_size += 1
self.log.info('A transaction in the mempool')
node.sendrawtransaction(hexstring=raw_tx_0)
self.mempool_size += 1
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': False, 'reject-reason': 'txn-already-in-mempool'}],
rawtxs=[raw_tx_0],
)
self.log.info('A transaction that replaces a mempool transaction')
tx = tx_from_hex(raw_tx_0)
tx.vout[0].nValue -= int(fee * COIN) # Double the fee
tx.vin[0].nSequence = BIP125_SEQUENCE_NUMBER + 1 # Now, opt out of RBF
raw_tx_0 = node.signrawtransactionwithwallet(tx.serialize().hex())['hex']
tx = tx_from_hex(raw_tx_0)
txid_0 = tx.rehash()
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': True, 'vsize': tx.get_vsize(), 'fees': {'base': (2 * fee)}}],
rawtxs=[raw_tx_0],
)
self.log.info('A transaction that conflicts with an unconfirmed tx')
# Send the transaction that replaces the mempool transaction and opts out of replaceability
node.sendrawtransaction(hexstring=tx.serialize().hex(), maxfeerate=0)
# take original raw_tx_0
tx = tx_from_hex(raw_tx_0)
tx.vout[0].nValue -= int(4 * fee * COIN) # Set more fee
# skip re-signing the tx
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'txn-mempool-conflict'}],
rawtxs=[tx.serialize().hex()],
maxfeerate=0,
)
self.log.info('A transaction with missing inputs, that never existed')
tx = tx_from_hex(raw_tx_0)
tx.vin[0].prevout = COutPoint(hash=int('ff' * 32, 16), n=14)
# skip re-signing the tx
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'missing-inputs'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('A transaction with missing inputs, that existed once in the past')
tx = tx_from_hex(raw_tx_0)
tx.vin[0].prevout.n = 1 # Set vout to 1, to spend the other outpoint (49 coins) of the in-chain-tx we want to double spend
raw_tx_1 = node.signrawtransactionwithwallet(tx.serialize().hex())['hex']
txid_1 = node.sendrawtransaction(hexstring=raw_tx_1, maxfeerate=0)
# Now spend both to "clearly hide" the outputs, ie. remove the coins from the utxo set by spending them
raw_tx_spend_both = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[
{'txid': txid_0, 'vout': 0},
{'txid': txid_1, 'vout': 0},
],
outputs=[{node.getnewaddress(): 0.1}]
))['hex']
txid_spend_both = node.sendrawtransaction(hexstring=raw_tx_spend_both, maxfeerate=0)
node.generate(1)
self.mempool_size = 0
# Now see if we can add the coins back to the utxo set by sending the exact txs again
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': False, 'reject-reason': 'missing-inputs'}],
rawtxs=[raw_tx_0],
)
self.check_mempool_result(
result_expected=[{'txid': txid_1, 'allowed': False, 'reject-reason': 'missing-inputs'}],
rawtxs=[raw_tx_1],
)
self.log.info('Create a signed "reference" tx for later use')
raw_tx_reference = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[{'txid': txid_spend_both, 'vout': 0}],
outputs=[{node.getnewaddress(): 0.05}],
))['hex']
tx = tx_from_hex(raw_tx_reference)
# Reference tx should be valid on itself
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': True, 'vsize': tx.get_vsize(), 'fees': { 'base': Decimal('0.1') - Decimal('0.05')}}],
rawtxs=[tx.serialize().hex()],
maxfeerate=0,
)
self.log.info('A transaction with no outputs')
tx = tx_from_hex(raw_tx_reference)
tx.vout = []
# Skip re-signing the transaction for context independent checks from now on
# tx = tx_from_hex(node.signrawtransactionwithwallet(tx.serialize().hex())['hex'])
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-vout-empty'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('A really large transaction')
tx = tx_from_hex(raw_tx_reference)
tx.vin = [tx.vin[0]] * math.ceil(MAX_BLOCK_BASE_SIZE / len(tx.vin[0].serialize()))
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-oversize'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('A transaction with negative output value')
tx = tx_from_hex(raw_tx_reference)
tx.vout[0].nValue *= -1
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-vout-negative'}],
rawtxs=[tx.serialize().hex()],
)
# The following two validations prevent overflow of the output amounts (see CVE-2010-5139).
self.log.info('A transaction with too large output value')
tx = tx_from_hex(raw_tx_reference)
tx.vout[0].nValue = MAX_MONEY + 1
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-vout-toolarge'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('A transaction with too large sum of output values')
tx = tx_from_hex(raw_tx_reference)
tx.vout = [tx.vout[0]] * 2
tx.vout[0].nValue = MAX_MONEY
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-txouttotal-toolarge'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('A transaction with duplicate inputs')
tx = tx_from_hex(raw_tx_reference)
tx.vin = [tx.vin[0]] * 2
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-inputs-duplicate'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('A non-coinbase transaction with coinbase-like outpoint')
tx = tx_from_hex(raw_tx_reference)
tx.vin.append(CTxIn(COutPoint(hash=0, n=0xffffffff)))
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-prevout-null'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('A coinbase transaction')
# Pick the input of the first tx we signed, so it has to be a coinbase tx
raw_tx_coinbase_spent = node.getrawtransaction(txid=node.decoderawtransaction(hexstring=raw_tx_in_block)['vin'][0]['txid'])
tx = tx_from_hex(raw_tx_coinbase_spent)
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'coinbase'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('Some nonstandard transactions')
tx = tx_from_hex(raw_tx_reference)
tx.nVersion = 3 # A version currently non-standard
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'version'}],
rawtxs=[tx.serialize().hex()],
)
tx = tx_from_hex(raw_tx_reference)
tx.vout[0].scriptPubKey = CScript([OP_0]) # Some non-standard script
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'scriptpubkey'}],
rawtxs=[tx.serialize().hex()],
)
tx = tx_from_hex(raw_tx_reference)
key = ECKey()
key.generate()
pubkey = key.get_pubkey().get_bytes()
tx.vout[0].scriptPubKey = CScript([OP_2, pubkey, pubkey, pubkey, OP_3, OP_CHECKMULTISIG]) # Some bare multisig script (2-of-3)
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bare-multisig'}],
rawtxs=[tx.serialize().hex()],
)
tx = tx_from_hex(raw_tx_reference)
tx.vin[0].scriptSig = CScript([OP_HASH160]) # Some not-pushonly scriptSig
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'scriptsig-not-pushonly'}],
rawtxs=[tx.serialize().hex()],
)
tx = tx_from_hex(raw_tx_reference)
tx.vin[0].scriptSig = CScript([b'a' * 1648]) # Some too large scriptSig (>1650 bytes)
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'scriptsig-size'}],
rawtxs=[tx.serialize().hex()],
)
tx = tx_from_hex(raw_tx_reference)
output_p2sh_burn = CTxOut(nValue=540, scriptPubKey=script_to_p2sh_script(b'burn'))
num_scripts = 100000 // len(output_p2sh_burn.serialize()) # Use enough outputs to make the tx too large for our policy
tx.vout = [output_p2sh_burn] * num_scripts
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'tx-size'}],
rawtxs=[tx.serialize().hex()],
)
tx = tx_from_hex(raw_tx_reference)
tx.vout[0] = output_p2sh_burn
tx.vout[0].nValue -= 1 # Make output smaller, such that it is dust for our policy
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'dust'}],
rawtxs=[tx.serialize().hex()],
)
tx = tx_from_hex(raw_tx_reference)
tx.vout[0].scriptPubKey = CScript([OP_RETURN, b'\xff'])
tx.vout = [tx.vout[0]] * 2
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'multi-op-return'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('A timelocked transaction')
tx = tx_from_hex(raw_tx_reference)
tx.vin[0].nSequence -= 1 # Should be non-max, so locktime is not ignored
tx.nLockTime = node.getblockcount() + 1
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'non-final'}],
rawtxs=[tx.serialize().hex()],
)
self.log.info('A transaction that is locked by BIP68 sequence logic')
tx = tx_from_hex(raw_tx_reference)
tx.vin[0].nSequence = 2 # We could include it in the second block mined from now, but not the very next one
# Can skip re-signing the tx because of early rejection
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'non-BIP68-final'}],
rawtxs=[tx.serialize().hex()],
maxfeerate=0,
)
if __name__ == '__main__':
MempoolAcceptanceTest().main()
| 46.393678 | 147 | 0.623599 |
4a2521866f35e621ccfdd067a898cbce77063711 | 9,377 | py | Python | debug_tools/test_volsdf_algo.py | YangChenye/neurecon | 972e810ec252cfd16f630b1de6d2802d1b8de59a | [
"MIT"
] | 432 | 2021-09-08T07:38:36.000Z | 2022-03-30T08:35:14.000Z | debug_tools/test_volsdf_algo.py | ashawkey/neurecon | 972e810ec252cfd16f630b1de6d2802d1b8de59a | [
"MIT"
] | 7 | 2021-09-16T03:08:58.000Z | 2022-03-08T01:46:50.000Z | debug_tools/test_volsdf_algo.py | ashawkey/neurecon | 972e810ec252cfd16f630b1de6d2802d1b8de59a | [
"MIT"
] | 43 | 2021-09-08T07:43:39.000Z | 2022-03-28T12:18:31.000Z | from models.frameworks import get_model
from models.frameworks.volsdf import error_bound, sdf_to_sigma
from utils import io_util, rend_util
import torch
import numpy as np
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--load_pt", type=str, default=None)
parser.add_argument("--config", type=str, default=None)
# play yourself!
parser.add_argument("--beta_net", type=float, default=0.003)
parser.add_argument("--init_num", type=int, default=128)
parser.add_argument("--eps", type=float, default=0.1)
parser.add_argument("--far", type=float, default=6.0)
parser.add_argument("--max_iter", type=int, default=5)
args = parser.parse_args()
args, unknown = parser.parse_known_args()
if args.config is not None:
config = io_util.load_yaml(args.config)
other_dict = vars(args)
other_dict.pop("config")
config.update(other_dict)
args = config
model, trainer, render_kwargs_train, render_kwargs_test, volume_render_fn = get_model(args)
model.cuda()
if args.load_pt is not None:
state_dict = torch.load(args.load_pt, map_location='cuda')
model.load_state_dict(state_dict['model'])
sdf_model = model.forward_surface
# # NOTE: you can also try this out: on real sdf model.
def sdf1d(x: torch.Tensor, netchunk=1024):
global sdf_model
device = x.device
x = x.cuda()
# # some test rays @scene adius=3.0
rays_o = torch.tensor([ 0.8598, 1.0232, -1.4689]).float().cuda().reshape(1, 3)
rays_d = torch.tensor([-0.4857, -0.4841, 0.7386]).float().cuda().reshape(1, 3)
pts = rays_o + rays_d * x[..., None]
with torch.no_grad():
sdf = []
for i in range(0, pts.shape[0], netchunk):
pts_i = pts[i:i+netchunk]
sdf_i = sdf_model(pts_i)
sdf.append(sdf_i)
sdf = torch.cat(sdf, dim=0)
return sdf.to(device)
else:
# def sdf1d(x: torch.Tensor):
# # deviding point: 1.6, 1.8
# # (0, 1.8)
# # (1.6, 0.2)
# # (1.8, 0.4)
# # (2.2, 0.)
# y_cond1 = -x + 1.8
# y_cond2 = x - 1.4
# y_cond3 = -x + 2.2
# cond12 = x < 1.8
# y = torch.zeros_like(x)
# y[cond12] = torch.where(x[cond12] < 1.6, y_cond1[cond12], y_cond2[cond12])
# y[~cond12] = y_cond3[~cond12]
# return y
# NOTE: you can also try this out
def sdf1d(x: torch.Tensor):
# deviding point: 1.6, 1.8
# (0, 1.65)
# (1.6, 0.05)
# (1.8, 0.25)
# (2.05, 0.)
y_cond1 = -x + 1.65
y_cond2 = x - 1.55
y_cond3 = -x + 2.05
cond12 = x < 1.8
y = torch.zeros_like(x)
y[cond12] = torch.where(x[cond12] < 1.6, y_cond1[cond12], y_cond2[cond12])
y[~cond12] = y_cond3[~cond12]
return y
# def sdf1d(x: torch.Tensor):
# return torch.ones_like(x)
def plot(x, sdf, sigma, bounds, alpha, beta, upsampled_x=None):
device = sdf.device
# [N-1]
delta_i = x[..., 1:] - x[..., :-1]
# [N]
R_t = torch.cat(
[
torch.zeros([*sdf.shape[:-1], 1], device=device),
torch.cumsum(sigma[..., :-1] * delta_i, dim=-1)
], dim=-1)
opacity_approx = 1 - torch.exp(-R_t[..., :-1])
# -------------- ground truth compare
# ground truth data
dense_sigma = sdf_to_sigma(dense_sdf, alpha, beta)
# [N-1]
dense_delta_i = dense_x[..., 1:] - dense_x[..., :-1]
# [N]
dense_R_t = torch.cat(
[
torch.zeros([*dense_sdf.shape[:-1], 1]),
torch.cumsum(dense_sigma[..., :-1] * dense_delta_i, dim=-1)
], dim=-1)
dense_opacity_current_beta = 1 - torch.exp(-dense_R_t[..., :-1])
# get nearest neibor
dis = torch.abs(x[..., :-1, None] - dense_x[..., None, :-1])
ind = torch.argmin(dis, dim=-1)
opaticy_real = dense_opacity_current_beta[ind]
error = torch.abs(opacity_approx - opaticy_real)
# -------------- try inverse cdf sampling
d_fine = rend_util.sample_cdf(x, opacity_approx, 32)
# plot
# x_np = x.data.cpu().numpy()
# sdf_np = sdf.data.cpu().numpy()
# sigma_np = sigma.data.cpu().numpy()
# bounds_np = bounds.data.cpu().numpy()
fig, (ax1, ax2) = plt.subplots(2,1, figsize=(15, 15))
ax1.plot(x, sdf, label='sdf')
ax1.plot(x, sigma / sigma.max(), label='normalized sigma')
ax1.plot(x[..., :-1], opacity_approx, label='opacity')
ax1.plot(x[..., :-1], opaticy_real, label='opacity gt for current beta')
ax1.plot(dense_x, opaticy_oracal, label='oracal opacity')
ax1.scatter(d_fine, np.zeros_like(d_fine), s=20.0, label='try O^{-1} sampling')
ax1.legend()
ax2.step(x[..., :-1], bounds, label='error bounds')
ax2.step(x[..., :-1], error, label='error')
if upsampled_x is not None:
ax2.scatter(upsampled_x, np.zeros_like(upsampled_x), label='upsampled points')
ax2.legend()
plt.show()
# settings or oracal data
eps = args.eps
init_num = args.init_num
beta_net = args.beta_net
alpha_net = 1./beta_net
M = args.far
max_iter = args.max_iter
x = torch.linspace(0, M, init_num)
dense_x = torch.linspace(0, M, 100001)
dense_sdf = sdf1d(dense_x)
opaticy_oracal = torch.where(dense_sdf > 0, torch.zeros_like(dense_sdf), torch.ones_like(dense_sdf))
# init
beta = np.sqrt((M**2) / (4 * (init_num-1) * np.log(1+eps)))
# beta = alpha_net * (M**2) / (4 * (init_num-1) * np.log(1+eps))
# algorithm
alpha = 1./beta
# alpha = alpha_net
# ------------- calculating
sdf = sdf1d(x)
sigma = sdf_to_sigma(sdf, alpha, beta)
bounds = error_bound(x, sdf, alpha, beta)
bounds_net = error_bound(x, sdf, alpha_net, beta_net)
print("init beta+ = {:.3f}".format(beta))
is_end_with_matching = False
it_algo = 0
while it_algo < max_iter and (net_bound_max := bounds_net.max()) > eps:
print("it =", it_algo)
print("net_bound_max = {:.6f}".format(net_bound_max.item()))
it_algo += 1
#------------- update: upsample
upsampled_x = rend_util.sample_pdf(x, bounds, init_num, det=True)
plot(x, sdf, sigma, bounds, alpha, beta, upsampled_x=upsampled_x)
x = torch.cat([x, upsampled_x], dim=-1)
# x, _ = torch.sort(x, dim=-1)
# sdf = sdf1d(x)
x, sort_indices = torch.sort(x, dim=-1)
sdf = torch.cat([sdf, sdf1d(upsampled_x)], dim=-1)
sdf = torch.gather(sdf, dim=-1, index=sort_indices)
print("more samples:", x.shape[-1])
bounds_net = error_bound(x, sdf, alpha_net, beta_net)
if bounds_net.max() > eps:
#-------------- find beta using bisection methods
# left: > eps
# right: < eps
beta_left = beta_net
beta_right = beta
for _ in range(10):
beta_tmp = 0.5 * (beta_left + beta_right)
alpha_tmp = 1./beta_tmp
# alpha_tmp = alpha_net
bounds_tmp = error_bound(x, sdf, alpha_tmp, beta_tmp)
bounds_max_tmp = bounds_tmp.max()
if bounds_max_tmp < eps:
beta_right = beta_tmp
elif bounds_max_tmp == eps:
beta_right = beta_tmp
break
else:
beta_left = beta_tmp
beta = beta_right
alpha = 1./beta
# alpha = alpha_net
sigma = sdf_to_sigma(sdf, alpha, beta)
bounds = error_bound(x, sdf, alpha, beta)
else:
is_end_with_matching = True
break
print("new beta+ = {:.3f}".format(beta))
if (not is_end_with_matching) and (it_algo != 0):
beta_net = beta_right
alpha_net = 1./beta_net
print("it=", it_algo)
print("final beta:", beta_net)
sigma = sdf_to_sigma(sdf, alpha_net, beta_net)
bounds = error_bound(x, sdf, alpha_net, beta_net)
print("final error bound max:", bounds.max())
plot(x, sdf, sigma, bounds, alpha_net, beta_net)
## ---------------------- backup
# def sdf_to_sigma(sdf: torch.Tensor, alpha, beta):
# # sdf *= -1 # NOTE: this will cause inplace opt!
# sdf = -sdf
# expsbeta = torch.exp(sdf / beta)
# psi = torch.where(sdf <= 0, 0.5 * expsbeta, 1 - 0.5 / expsbeta)
# return alpha * psi
# def error_bound(d_vals, sdf, alpha, beta):
# """
# d_vals: [(B), N_rays, N_pts]
# sdf: [(B), N_rays, N_pts]
# """
# device = sdf.device
# sigma = sdf_to_sigma(sdf, alpha, beta)
# # [(B), N_rays, N_pts]
# sdf_abs_i = torch.abs(sdf)
# # [(B), N_rays, N_pts-1]
# delta_i = d_vals[..., 1:] - d_vals[..., :-1]
# # [(B), N_rays, N_pts]
# R_t = torch.cat(
# [
# torch.zeros([*sdf.shape[:-1], 1], device=device),
# torch.cumsum(sigma[..., :-1] * delta_i, dim=-1)
# ], dim=-1)
# # [(B), N_rays, N_pts-1]
# d_i_star = torch.clamp_min(0.5 * (sdf_abs_i[..., :-1] + sdf_abs_i[..., 1:] - delta_i), 0.)
# # [(B), N_rays, N_pts-1]
# errors = alpha/(4*beta) * (delta_i**2) * torch.exp(-d_i_star / beta)
# # [(B), N_rays, N_pts-1]
# errors_t = torch.cumsum(errors, dim=-1)
# # [(B), N_rays, N_pts-1]
# bounds = torch.exp(-R_t[..., :-1]) * (torch.exp(errors_t) - 1.)
# # TODO: better solution
# # NOTE: nan comes from 0 * inf
# # NOTE: every situation where nan appears will also appears c * inf = "true" inf, so below solution is acceptable
# bounds[torch.isnan(bounds)] = np.inf
# return bounds
| 34.222628 | 119 | 0.580783 |
4a2522001b01921f369b2d45c3b3ffc6715c8d9c | 7,713 | py | Python | custom_components/shelly/binary_sensor.py | cetex/ShellyForHASS | 7d722e738e7b283a0944c6f64bf598518112533f | [
"MIT"
] | 541 | 2019-05-25T10:48:19.000Z | 2022-03-25T07:38:09.000Z | custom_components/shelly/binary_sensor.py | cetex/ShellyForHASS | 7d722e738e7b283a0944c6f64bf598518112533f | [
"MIT"
] | 579 | 2019-05-25T15:37:48.000Z | 2022-03-31T19:37:41.000Z | custom_components/shelly/binary_sensor.py | cetex/ShellyForHASS | 7d722e738e7b283a0944c6f64bf598518112533f | [
"MIT"
] | 125 | 2019-05-29T15:00:50.000Z | 2022-03-31T02:50:08.000Z | """
Shelly platform for the sensor component.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/shelly/
"""
import logging
import time
from threading import Timer
from homeassistant.util import slugify
from homeassistant.helpers.dispatcher import async_dispatcher_connect
try:
from homeassistant.components.binary_sensor import BinarySensorEntity
except:
from homeassistant.components.binary_sensor import \
BinarySensorDevice as BinarySensorEntity
from homeassistant.helpers.restore_state import RestoreStateData
from . import (CONF_OBJECT_ID_PREFIX)
from .device import ShellyDevice
from .block import ShellyBlock
from .const import *
_LOGGER = logging.getLogger(__name__)
CLICK_EVENTS = {
'S' : 'single',
'SS' : 'double',
'SSS': 'triple',
'L': 'long',
'SL': 'short-long',
'LS': 'long-short'
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Shelly sensor dynamically."""
async def async_discover_sensor(dev, instance):
"""Discover and add a discovered sensor."""
if isinstance(dev, dict):
if 'sensor_type' in dev:
sensor_type = dev['sensor_type']
async_add_entities([ShellyBinaryInfoSensor(dev['itm'], instance,
sensor_type, sensor_type)])
return
if dev.device_type == "SWITCH":
async_add_entities([ShellySwitch(dev, instance)])
elif dev.device_type == "BINARY_SENSOR":
async_add_entities([
ShellyBinarySensor(dev, instance, dev.sensor_type,
dev.sensor_type)
])
async_dispatcher_connect(
hass,
"shelly_new_binary_sensor",
async_discover_sensor
)
class ShellySwitch(ShellyDevice, BinarySensorEntity):
"""Representation of a Shelly Switch state."""
def __init__(self, dev, instance):
"""Initialize an ShellySwitch."""
ShellyDevice.__init__(self, dev, instance)
self._unique_id += "_switch"
self.entity_id += "_switch"
self._state = None
self._click_delay = 700
self._last_state_change = 0
self._click_cnt = 0
self._click_timer = None
self._name_ext = "Switch"
self._last_event = None
self._event_cnt = None
self.update()
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self._state
@property
def icon(self):
"""Return the button icon."""
return "mdi:light-switch"
def _millis(self):
return int(round(time.time() * 1000))
def _click_timeout(self):
self._send_click_event()
self._click_cnt = 0
self._click_timer = None
def _send_click_event(self):
self.hass.bus.fire('shelly_switch_click', \
{'entity_id' : self.entity_id,
'click_cnt': self._click_cnt,
'state' : self._state})
def _send_event(self, type):
self.hass.bus.fire('shellyforhass.click', \
{'entity_id' : self.entity_id,
'click_type' : type})
def update(self):
"""Fetch new state data for this switch."""
millis = self._millis()
new_state = None if self._dev.state is None else self._dev.state != 0
if self._state is not None and new_state != self._state:
if self._click_timer is not None:
self._click_timer.cancel()
diff = millis - self._last_state_change
if diff < self._click_delay or self._click_cnt == 0:
self._click_cnt += 1
else:
self._click_cnt = 1
self._last_state_change = millis
self._click_timer = Timer(self._click_delay/1000,
self._click_timeout)
self._click_timer.start()
self._state = new_state
if self._dev.event_cnt != self._event_cnt:
event = CLICK_EVENTS.get(self._dev.last_event, None)
if not self._event_cnt is None:
self._send_event(event)
self._event_cnt = self._dev.event_cnt
self._last_event = event
@property
def device_state_attributes(self):
attrs = super().device_state_attributes
if self._last_event:
attrs[ATTRIBUTE_CLICK_TYPE] = self._last_event
attrs[ATTRIBUTE_CLICK_CNT] = self._event_cnt
return attrs
class ShellyBinarySensor(ShellyDevice, BinarySensorEntity):
"""Representation of a Shelly Sensor."""
def __init__(self, dev, instance, sensor_type, sensor_name):
"""Initialize an ShellySensor."""
self._sensor_cfg = SENSOR_TYPES_CFG[SENSOR_TYPE_DEFAULT]
ShellyDevice.__init__(self, dev, instance)
self._unique_id += "_" + sensor_name
self.entity_id += "_" + sensor_name
self._sensor_type = sensor_type
self._sensor_name = sensor_name
#self._battery = None
self._config = instance.conf
self._state = None
if self._sensor_type in SENSOR_TYPES_CFG:
self._sensor_cfg = SENSOR_TYPES_CFG[self._sensor_type]
self._master_unit = True
self.update()
@property
def is_on(self):
"""State"""
return self._state
@property
def quantity_name(self):
"""Name of quantity."""
return self._sensor_cfg[0]
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._sensor_cfg[1]
@property
def icon(self):
"""Return the icon."""
return self._sensor_cfg[2]
@property
def device_class(self):
"""Return the device class."""
return self._sensor_cfg[3]
def update(self):
"""Fetch new state data for this sensor."""
self._state = self._dev.state
class ShellyBinaryInfoSensor(ShellyBlock, BinarySensorEntity):
"""Representation of a Shelly Info Sensor."""
def __init__(self, block, instance, sensor_type, sensor_name):
self._sensor_cfg = SENSOR_TYPES_CFG[SENSOR_TYPE_DEFAULT]
ShellyBlock.__init__(self, block, instance, "_" + sensor_name)
self.entity_id = "sensor" + self.entity_id
self._sensor_name = sensor_name
self._sensor_type = sensor_type
if self._sensor_type in SENSOR_TYPES_CFG:
self._sensor_cfg = SENSOR_TYPES_CFG[self._sensor_type]
self._state = None
self._name_ext = self.quantity_name()
self.update()
def update(self):
"""Fetch new state data for this sensor."""
if self._block.info_values is not None:
self._state = self._block.info_values.get(self._sensor_name, None)
@property
def is_on(self):
"""Return the state of the sensor."""
return self._state
def quantity_name(self):
"""Name of quantity."""
return self._sensor_cfg[0]
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._sensor_cfg[1]
@property
def icon(self):
"""Return the icon."""
return self._sensor_cfg[2]
@property
def device_class(self):
"""Return the device class."""
return self._sensor_cfg[3]
| 33.38961 | 81 | 0.599767 |
4a252244456ae8a40bacff74c63ab9f432e78833 | 823 | py | Python | functional_tests/test_layout_and_styling.py | gajimenezmaggiora/gus-django | cbfcd3109c1b8b3c9192020968e0e42e18364ebe | [
"MIT"
] | null | null | null | functional_tests/test_layout_and_styling.py | gajimenezmaggiora/gus-django | cbfcd3109c1b8b3c9192020968e0e42e18364ebe | [
"MIT"
] | null | null | null | functional_tests/test_layout_and_styling.py | gajimenezmaggiora/gus-django | cbfcd3109c1b8b3c9192020968e0e42e18364ebe | [
"MIT"
] | null | null | null | from .base import FunctionalTest
class LayoutAndStylingTest(FunctionalTest):
def test_layout_and_styling(self):
# Edith goes to the home page
self.browser.get(self.server_url)
self.browser.set_window_size(1024, 768)
# She notices the input box is nicely centered
inputbox = self.get_item_input_box()
self.assertAlmostEqual(
inputbox.location['x'] + inputbox.size['width'] / 2,
512,
delta=5
)
# She starts a new list and sees the input is nicely
# centered there too
inputbox.send_keys('testing\n')
inputbox = self.get_item_input_box()
self.assertAlmostEqual(
inputbox.location['x'] + inputbox.size['width'] / 2,
512,
delta=5
)
| 30.481481 | 64 | 0.592953 |
4a2522f8d2d12d0295749f0b52e2c6f1291959c7 | 1,742 | py | Python | desktop/core/ext-py/django-nose-1.4.5/testapp/tests.py | kokosing/hue | 2307f5379a35aae9be871e836432e6f45138b3d9 | [
"Apache-2.0"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | desktop/core/ext-py/django-nose-1.4.5/testapp/tests.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | desktop/core/ext-py/django-nose-1.4.5/testapp/tests.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | """Django model tests."""
from datetime import datetime
from django.test import TestCase
from testapp.models import Question, Choice
class NoDatabaseTestCase(TestCase):
"""Tests that don't read or write to the database."""
def test_question_str(self):
"""Test Question.__str__ method."""
question = Question(question_text="What is your name?")
self.assertEqual("What is your name?", str(question))
def test_choice_str(self):
"""Test Choice.__str__ method."""
choice = Choice(choice_text='My name is Sir Lancelot of Camelot.')
self.assertEqual('My name is Sir Lancelot of Camelot.', str(choice))
class UsesDatabaseTestCase(TestCase):
"""Tests that read and write to the database."""
def test_question(self):
"""Test that votes is initialized to 0."""
question = Question.objects.create(
question_text="What is your quest?", pub_date=datetime(1975, 4, 9))
Choice.objects.create(
question=question, choice_text="To seek the Holy Grail.")
self.assertTrue(question.choice_set.exists())
the_choice = question.choice_set.get()
self.assertEqual(0, the_choice.votes)
class UsesFixtureTestCase(TestCase):
"""Tests that use a test fixture."""
fixtures = ["testdata.json"]
def test_fixture_loaded(self):
"""Test that fixture was loaded."""
question = Question.objects.get()
self.assertEqual(
'What is your favorite color?', question.question_text)
self.assertEqual(datetime(1975, 4, 9), question.pub_date)
choice = question.choice_set.get()
self.assertEqual("Blue.", choice.choice_text)
self.assertEqual(3, choice.votes)
| 34.156863 | 79 | 0.665901 |
4a252330bcb4745491c4f46df7a32afdb94b7928 | 24,492 | py | Python | electrum_dsv/plugins/coldcard/coldcard.py | mboyd1/electrum-dsv | 1f8e26e6f6a50827fd83dfe018c5916fadde10c1 | [
"MIT"
] | null | null | null | electrum_dsv/plugins/coldcard/coldcard.py | mboyd1/electrum-dsv | 1f8e26e6f6a50827fd83dfe018c5916fadde10c1 | [
"MIT"
] | null | null | null | electrum_dsv/plugins/coldcard/coldcard.py | mboyd1/electrum-dsv | 1f8e26e6f6a50827fd83dfe018c5916fadde10c1 | [
"MIT"
] | null | null | null | #
# Coldcard Electrum plugin main code.
#
#
import os, time, io
import traceback
from typing import TYPE_CHECKING, Optional
import struct
from electrum_dsv import bip32
from electrum_dsv.bip32 import BIP32Node, InvalidMasterKeyVersionBytes
from electrum_dsv.i18n import _
from electrum_dsv.plugin import Device, hook, runs_in_hwd_thread
from electrum_dsv.keystore import Hardware_KeyStore, KeyStoreWithMPK
from electrum_dsv.transaction import PartialTransaction
from electrum_dsv.wallet import Standard_Wallet, Multisig_Wallet, Abstract_Wallet
from electrum_dsv.util import bfh, bh2u, versiontuple, UserFacingException
from electrum_dsv.base_wizard import ScriptTypeNotSupported
from electrum_dsv.logging import get_logger
from ..hw_wallet import HW_PluginBase, HardwareClientBase
from ..hw_wallet.plugin import LibraryFoundButUnusable, only_hook_if_libraries_available
_logger = get_logger(__name__)
try:
import hid
from ckcc.protocol import CCProtocolPacker, CCProtocolUnpacker
from ckcc.protocol import CCProtoError, CCUserRefused, CCBusyError
from ckcc.constants import (MAX_MSG_LEN, MAX_BLK_LEN, MSG_SIGNING_MAX_LENGTH, MAX_TXN_LEN,
AF_CLASSIC, AF_P2SH, AF_P2WPKH, AF_P2WSH, AF_P2WPKH_P2SH, AF_P2WSH_P2SH)
from ckcc.client import ColdcardDevice, COINKITE_VID, CKCC_PID, CKCC_SIMULATOR_PATH
requirements_ok = True
class ElectrumColdcardDevice(ColdcardDevice):
# avoid use of pycoin for MiTM message signature test
def mitm_verify(self, sig, expect_xpub):
# verify a signature (65 bytes) over the session key, using the master bip32 node
# - customized to use specific EC library of Electrum.
pubkey = BIP32Node.from_xkey(expect_xpub).eckey
try:
pubkey.verify_message_hash(sig[1:65], self.session_key)
return True
except:
return False
except ImportError:
requirements_ok = False
COINKITE_VID = 0xd13e
CKCC_PID = 0xcc10
CKCC_SIMULATED_PID = CKCC_PID ^ 0x55aa
class CKCCClient(HardwareClientBase):
def __init__(self, plugin, handler, dev_path, *, is_simulator=False):
HardwareClientBase.__init__(self, plugin=plugin)
self.device = plugin.device
self.handler = handler
# if we know what the (xfp, xpub) "should be" then track it here
self._expected_device = None
if is_simulator:
self.dev = ElectrumColdcardDevice(dev_path, encrypt=True)
else:
# open the real HID device
hd = hid.device(path=dev_path)
hd.open_path(dev_path)
self.dev = ElectrumColdcardDevice(dev=hd, encrypt=True)
# NOTE: MiTM test is delayed until we have a hint as to what XPUB we
# should expect. It's also kinda slow.
def __repr__(self):
return '<CKCCClient: xfp=%s label=%r>' % (xfp2str(self.dev.master_fingerprint),
self.label())
@runs_in_hwd_thread
def verify_connection(self, expected_xfp: int, expected_xpub=None):
ex = (expected_xfp, expected_xpub)
if self._expected_device == ex:
# all is as expected
return
if expected_xpub is None:
expected_xpub = self.dev.master_xpub
if ( (self._expected_device is not None)
or (self.dev.master_fingerprint != expected_xfp)
or (self.dev.master_xpub != expected_xpub)):
# probably indicating programing error, not hacking
_logger.info(f"xpubs. reported by device: {self.dev.master_xpub}. "
f"stored in file: {expected_xpub}")
raise RuntimeError("Expecting %s but that's not what's connected?!" %
xfp2str(expected_xfp))
# check signature over session key
# - mitm might have lied about xfp and xpub up to here
# - important that we use value capture at wallet creation time, not some value
# we read over USB today
self.dev.check_mitm(expected_xpub=expected_xpub)
self._expected_device = ex
if not getattr(self, 'ckcc_xpub', None):
self.ckcc_xpub = expected_xpub
_logger.info("Successfully verified against MiTM")
def is_pairable(self):
# can't do anything w/ devices that aren't setup (this code not normally reachable)
return bool(self.dev.master_xpub)
@runs_in_hwd_thread
def close(self):
# close the HID device (so can be reused)
self.dev.close()
self.dev = None
def is_initialized(self):
return bool(self.dev.master_xpub)
def label(self):
# 'label' of this Coldcard. Warning: gets saved into wallet file, which might
# not be encrypted, so better for privacy if based on xpub/fingerprint rather than
# USB serial number.
if self.dev.is_simulator:
lab = 'Coldcard Simulator ' + xfp2str(self.dev.master_fingerprint)
elif not self.dev.master_fingerprint:
# failback; not expected
lab = 'Coldcard #' + self.dev.serial
else:
lab = 'Coldcard ' + xfp2str(self.dev.master_fingerprint)
return lab
def manipulate_keystore_dict_during_wizard_setup(self, d: dict):
master_xpub = self.dev.master_xpub
if master_xpub is not None:
try:
node = BIP32Node.from_xkey(master_xpub)
except InvalidMasterKeyVersionBytes:
raise UserFacingException(
_('Invalid xpub magic. Make sure your {} device is set to the correct chain.').format(self.device) + ' ' +
_('You might have to unplug and plug it in again.')
) from None
d['ckcc_xpub'] = master_xpub
@runs_in_hwd_thread
def has_usable_connection_with_device(self):
# Do end-to-end ping test
try:
self.ping_check()
return True
except:
return False
@runs_in_hwd_thread
def get_xpub(self, bip32_path, xtype):
assert xtype in ColdcardPlugin.SUPPORTED_XTYPES
_logger.info('Derive xtype = %r' % xtype)
xpub = self.dev.send_recv(CCProtocolPacker.get_xpub(bip32_path), timeout=5000)
# TODO handle timeout?
# change type of xpub to the requested type
try:
node = BIP32Node.from_xkey(xpub)
except InvalidMasterKeyVersionBytes:
raise UserFacingException(_('Invalid xpub magic. Make sure your {} device is set to the correct chain.')
.format(self.device)) from None
if xtype != 'standard':
xpub = node._replace(xtype=xtype).to_xpub()
return xpub
@runs_in_hwd_thread
def ping_check(self):
# check connection is working
assert self.dev.session_key, 'not encrypted?'
req = b'1234 Electrum Plugin 4321' # free up to 59 bytes
try:
echo = self.dev.send_recv(CCProtocolPacker.ping(req))
assert echo == req
except:
raise RuntimeError("Communication trouble with Coldcard")
@runs_in_hwd_thread
def show_address(self, path, addr_fmt):
# prompt user w/ address, also returns it immediately.
return self.dev.send_recv(CCProtocolPacker.show_address(path, addr_fmt), timeout=None)
@runs_in_hwd_thread
def show_p2sh_address(self, *args, **kws):
# prompt user w/ p2sh address, also returns it immediately.
return self.dev.send_recv(CCProtocolPacker.show_p2sh_address(*args, **kws), timeout=None)
@runs_in_hwd_thread
def get_version(self):
# gives list of strings
return self.dev.send_recv(CCProtocolPacker.version(), timeout=1000).split('\n')
@runs_in_hwd_thread
def sign_message_start(self, path, msg):
# this starts the UX experience.
self.dev.send_recv(CCProtocolPacker.sign_message(msg, path), timeout=None)
@runs_in_hwd_thread
def sign_message_poll(self):
# poll device... if user has approved, will get tuple: (addr, sig) else None
return self.dev.send_recv(CCProtocolPacker.get_signed_msg(), timeout=None)
@runs_in_hwd_thread
def sign_transaction_start(self, raw_psbt: bytes, *, finalize: bool = False):
# Multiple steps to sign:
# - upload binary
# - start signing UX
# - wait for coldcard to complete process, or have it refused.
# - download resulting txn
assert 20 <= len(raw_psbt) < MAX_TXN_LEN, 'PSBT is too big'
dlen, chk = self.dev.upload_file(raw_psbt)
resp = self.dev.send_recv(CCProtocolPacker.sign_transaction(dlen, chk, finalize=finalize),
timeout=None)
if resp != None:
raise ValueError(resp)
@runs_in_hwd_thread
def sign_transaction_poll(self):
# poll device... if user has approved, will get tuple: (legnth, checksum) else None
return self.dev.send_recv(CCProtocolPacker.get_signed_txn(), timeout=None)
@runs_in_hwd_thread
def download_file(self, length, checksum, file_number=1):
# get a file
return self.dev.download_file(length, checksum, file_number=file_number)
class Coldcard_KeyStore(Hardware_KeyStore):
hw_type = 'coldcard'
device = 'Coldcard'
plugin: 'ColdcardPlugin'
def __init__(self, d):
Hardware_KeyStore.__init__(self, d)
# Errors and other user interaction is done through the wallet's
# handler. The handler is per-window and preserved across
# device reconnects
self.force_watching_only = False
self.ux_busy = False
# we need to know at least the fingerprint of the master xpub to verify against MiTM
# - device reports these value during encryption setup process
# - full xpub value now optional
self.ckcc_xpub = d.get('ckcc_xpub', None)
def dump(self):
# our additions to the stored data about keystore -- only during creation?
d = Hardware_KeyStore.dump(self)
d['ckcc_xpub'] = self.ckcc_xpub
return d
def get_xfp_int(self) -> int:
xfp = self.get_root_fingerprint()
assert xfp is not None
return xfp_int_from_xfp_bytes(bfh(xfp))
def get_client(self):
# called when user tries to do something like view address, sign somthing.
# - not called during probing/setup
# - will fail if indicated device can't produce the xpub (at derivation) expected
rv = self.plugin.get_client(self)
if rv:
xfp_int = self.get_xfp_int()
rv.verify_connection(xfp_int, self.ckcc_xpub)
return rv
def give_error(self, message, clear_client=False):
self.logger.info(message)
if not self.ux_busy:
self.handler.show_error(message)
else:
self.ux_busy = False
if clear_client:
self.client = None
raise UserFacingException(message)
def wrap_busy(func):
# decorator: function takes over the UX on the device.
def wrapper(self, *args, **kwargs):
try:
self.ux_busy = True
return func(self, *args, **kwargs)
finally:
self.ux_busy = False
return wrapper
def decrypt_message(self, pubkey, message, password):
raise UserFacingException(_('Encryption and decryption are currently not supported for {}').format(self.device))
@wrap_busy
def sign_message(self, sequence, message, password):
# Sign a message on device. Since we have big screen, of course we
# have to show the message unabiguously there first!
try:
msg = message.encode('ascii', errors='strict')
assert 1 <= len(msg) <= MSG_SIGNING_MAX_LENGTH
except (UnicodeError, AssertionError):
# there are other restrictions on message content,
# but let the device enforce and report those
self.handler.show_error('Only short (%d max) ASCII messages can be signed.'
% MSG_SIGNING_MAX_LENGTH)
return b''
path = self.get_derivation_prefix() + ("/%d/%d" % sequence)
try:
cl = self.get_client()
try:
self.handler.show_message("Signing message (using %s)..." % path)
cl.sign_message_start(path, msg)
while 1:
# How to kill some time, without locking UI?
time.sleep(0.250)
resp = cl.sign_message_poll()
if resp is not None:
break
finally:
self.handler.finished()
assert len(resp) == 2
addr, raw_sig = resp
# already encoded in Bitcoin fashion, binary.
assert 40 < len(raw_sig) <= 65
return raw_sig
except (CCUserRefused, CCBusyError) as exc:
self.handler.show_error(str(exc))
except CCProtoError as exc:
self.logger.exception('Error showing address')
self.handler.show_error('{}\n\n{}'.format(
_('Error showing address') + ':', str(exc)))
except Exception as e:
self.give_error(e, True)
# give empty bytes for error cases; it seems to clear the old signature box
return b''
@wrap_busy
def sign_transaction(self, tx, password):
# Upload PSBT for signing.
# - we can also work offline (without paired device present)
if tx.is_complete():
return
client = self.get_client()
assert client.dev.master_fingerprint == self.get_xfp_int()
raw_psbt = tx.serialize_as_bytes()
try:
try:
self.handler.show_message("Authorize Transaction...")
client.sign_transaction_start(raw_psbt)
while 1:
# How to kill some time, without locking UI?
time.sleep(0.250)
resp = client.sign_transaction_poll()
if resp is not None:
break
rlen, rsha = resp
# download the resulting txn.
raw_resp = client.download_file(rlen, rsha)
finally:
self.handler.finished()
except (CCUserRefused, CCBusyError) as exc:
self.logger.info(f'Did not sign: {exc}')
self.handler.show_error(str(exc))
return
except BaseException as e:
self.logger.exception('')
self.give_error(e, True)
return
tx2 = PartialTransaction.from_raw_psbt(raw_resp)
# apply partial signatures back into txn
tx.combine_with_other_psbt(tx2)
# caller's logic looks at tx now and if it's sufficiently signed,
# will send it if that's the user's intent.
@staticmethod
def _encode_txin_type(txin_type):
# Map from Electrum code names to our code numbers.
return {'standard': AF_CLASSIC, 'p2pkh': AF_CLASSIC,
'p2sh': AF_P2SH,
'p2wpkh-p2sh': AF_P2WPKH_P2SH,
'p2wpkh': AF_P2WPKH,
'p2wsh-p2sh': AF_P2WSH_P2SH,
'p2wsh': AF_P2WSH,
}[txin_type]
@wrap_busy
def show_address(self, sequence, txin_type):
client = self.get_client()
address_path = self.get_derivation_prefix()[2:] + "/%d/%d"%sequence
addr_fmt = self._encode_txin_type(txin_type)
try:
try:
self.handler.show_message(_("Showing address ..."))
dev_addr = client.show_address(address_path, addr_fmt)
# we could double check address here
finally:
self.handler.finished()
except CCProtoError as exc:
self.logger.exception('Error showing address')
self.handler.show_error('{}\n\n{}'.format(
_('Error showing address') + ':', str(exc)))
except BaseException as exc:
self.logger.exception('')
self.handler.show_error(exc)
@wrap_busy
def show_p2sh_address(self, M, script, xfp_paths, txin_type):
client = self.get_client()
addr_fmt = self._encode_txin_type(txin_type)
try:
try:
self.handler.show_message(_("Showing address ..."))
dev_addr = client.show_p2sh_address(M, xfp_paths, script, addr_fmt=addr_fmt)
# we could double check address here
finally:
self.handler.finished()
except CCProtoError as exc:
self.logger.exception('Error showing address')
self.handler.show_error('{}.\n{}\n\n{}'.format(
_('Error showing address'),
_('Make sure you have imported the correct wallet description '
'file on the device for this multisig wallet.'),
str(exc)))
except BaseException as exc:
self.logger.exception('')
self.handler.show_error(exc)
class ColdcardPlugin(HW_PluginBase):
keystore_class = Coldcard_KeyStore
minimum_library = (0, 7, 7)
DEVICE_IDS = [
(COINKITE_VID, CKCC_PID),
(COINKITE_VID, CKCC_SIMULATED_PID)
]
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
self.device_manager().register_devices(self.DEVICE_IDS, plugin=self)
self.device_manager().register_enumerate_func(self.detect_simulator)
def get_library_version(self):
import ckcc
try:
version = ckcc.__version__
except AttributeError:
version = 'unknown'
if requirements_ok:
return version
else:
raise LibraryFoundButUnusable(library_version=version)
def detect_simulator(self):
# if there is a simulator running on this machine,
# return details about it so it's offered as a pairing choice
fn = CKCC_SIMULATOR_PATH
if os.path.exists(fn):
return [Device(path=fn,
interface_number=-1,
id_=fn,
product_key=(COINKITE_VID, CKCC_SIMULATED_PID),
usage_page=0,
transport_ui_string='simulator')]
return []
@runs_in_hwd_thread
def create_client(self, device, handler):
if handler:
self.handler = handler
# We are given a HID device, or at least some details about it.
# Not sure why not we aren't just given a HID library handle, but
# the 'path' is unabiguous, so we'll use that.
try:
rv = CKCCClient(self, handler, device.path,
is_simulator=(device.product_key[1] == CKCC_SIMULATED_PID))
return rv
except Exception as e:
self.logger.exception('late failure connecting to device?')
return None
def setup_device(self, device_info, wizard, purpose):
device_id = device_info.device.id_
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
return client
def get_xpub(self, device_id, derivation, xtype, wizard):
# this seems to be part of the pairing process only, not during normal ops?
# base_wizard:on_hw_derivation
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
client.ping_check()
xpub = client.get_xpub(derivation, xtype)
return xpub
@runs_in_hwd_thread
def get_client(self, keystore, force_pair=True, *,
devices=None, allow_user_interaction=True) -> Optional['CKCCClient']:
# Acquire a connection to the hardware device (via USB)
client = super().get_client(keystore, force_pair,
devices=devices,
allow_user_interaction=allow_user_interaction)
if client is not None:
client.ping_check()
return client
@staticmethod
def export_ms_wallet(wallet: Multisig_Wallet, fp, name):
# Build the text file Coldcard needs to understand the multisig wallet
# it is participating in. All involved Coldcards can share same file.
assert isinstance(wallet, Multisig_Wallet)
print('# Exported from Electrum', file=fp)
print(f'Name: {name:.20s}', file=fp)
print(f'Policy: {wallet.m} of {wallet.n}', file=fp)
print(f'Format: {wallet.txin_type.upper()}' , file=fp)
xpubs = []
derivs = set()
for xpub, ks in zip(wallet.get_master_public_keys(), wallet.get_keystores()): # type: str, KeyStoreWithMPK
fp_bytes, der_full = ks.get_fp_and_derivation_to_be_used_in_partial_tx(der_suffix=[], only_der_suffix=False)
fp_hex = fp_bytes.hex().upper()
der_prefix_str = bip32.convert_bip32_intpath_to_strpath(der_full)
xpubs.append( (fp_hex, xpub, der_prefix_str) )
derivs.add(der_prefix_str)
# Derivation doesn't matter too much to the Coldcard, since it
# uses key path data from PSBT or USB request as needed. However,
# if there is a clear value, provide it.
if len(derivs) == 1:
print("Derivation: " + derivs.pop(), file=fp)
print('', file=fp)
assert len(xpubs) == wallet.n
for xfp, xpub, der_prefix in xpubs:
if derivs:
# show as a comment if unclear
print(f'# derivation: {der_prefix}', file=fp)
print(f'{xfp}: {xpub}\n', file=fp)
def show_address(self, wallet, address, keystore: 'Coldcard_KeyStore' = None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
txin_type = wallet.get_txin_type(address)
# Standard_Wallet => not multisig, must be bip32
if type(wallet) is Standard_Wallet:
sequence = wallet.get_address_index(address)
keystore.show_address(sequence, txin_type)
elif type(wallet) is Multisig_Wallet:
assert isinstance(wallet, Multisig_Wallet) # only here for type-hints in IDE
# More involved for P2SH/P2WSH addresses: need M, and all public keys, and their
# derivation paths. Must construct script, and track fingerprints+paths for
# all those keys
pubkey_deriv_info = wallet.get_public_keys_with_deriv_info(address)
pubkey_hexes = sorted([pk.hex() for pk in list(pubkey_deriv_info)])
xfp_paths = []
for pubkey_hex in pubkey_hexes:
pubkey = bytes.fromhex(pubkey_hex)
ks, der_suffix = pubkey_deriv_info[pubkey]
fp_bytes, der_full = ks.get_fp_and_derivation_to_be_used_in_partial_tx(der_suffix, only_der_suffix=False)
xfp_int = xfp_int_from_xfp_bytes(fp_bytes)
xfp_paths.append([xfp_int] + list(der_full))
script = bfh(wallet.pubkeys_to_scriptcode(pubkey_hexes))
keystore.show_p2sh_address(wallet.m, script, xfp_paths, txin_type)
else:
keystore.handler.show_error(_('This function is only available for standard wallets when using {}.').format(self.device))
return
def xfp_int_from_xfp_bytes(fp_bytes: bytes) -> int:
return int.from_bytes(fp_bytes, byteorder="little", signed=False)
def xfp2str(xfp: int) -> str:
# Standardized way to show an xpub's fingerprint... it's a 4-byte string
# and not really an integer. Used to show as '0x%08x' but that's wrong endian.
return struct.pack('<I', xfp).hex().lower()
# EOF
| 38.031056 | 133 | 0.621591 |
4a2524653fa4c95c4b1c1d98c2b2572aa54c6668 | 69,092 | py | Python | reflred/steps.py | krzywon/reductus | cc5c42ac619bf6150b7f07bde02a689affa910a1 | [
"Unlicense"
] | null | null | null | reflred/steps.py | krzywon/reductus | cc5c42ac619bf6150b7f07bde02a689affa910a1 | [
"Unlicense"
] | null | null | null | reflred/steps.py | krzywon/reductus | cc5c42ac619bf6150b7f07bde02a689affa910a1 | [
"Unlicense"
] | null | null | null | # This program is public domain
import os
import numpy as np
from copy import copy
from dataflow.automod import cache, nocache, module
# TODO: maybe bring back formula to show the math of each step
# TODO: what about polarized data?
@module
def nop(data):
"""
No operation.
**Inputs**
data (refldata[]): Input data
**Returns**
output (refldata[]): Unaltered data
2015-12-31 Paul Kienzle
"""
return data
@module
def ncnr_load(filelist=None, check_timestamps=True):
"""
Load a list of nexus files from the NCNR data server.
**Inputs**
filelist (fileinfo[]): List of files to open.
check_timestamps (bool): verify that timestamps on file match request
**Returns**
output (refldata[]): All entries of all files in the list.
2016-06-29 Brian Maranville
| 2017-08-21 Brian Maranville Change to refldata, force cache invalidate
| 2018-06-18 Brian Maranville Change to nexusref to ignore areaDetector
| 2018-12-10 Brian Maranville get_plottable routines moved to python data container from js
| 2020-03-03 Paul Kienzle Just load. Don't even compute divergence
"""
# NB: used mainly to set metadata for processing, so keep it minimal
# TODO: make a metadata loader that does not send all data to browser
# NB: Fileinfo is a structure with
# { path: "location/on/server", mtime: timestamp }
from .load import url_load_list
datasets = []
for data in url_load_list(filelist, check_timestamps=check_timestamps):
datasets.append(data)
return datasets
@module
def dark_current(data, poly_coeff=[0], poly_cov=None):
r"""
Correct for the dark current, which is the average number of
spurious counts per minute of measurement on each detector channel.
**Inputs**
data (refldata[]) : data to scale
poly_coeff {Polynomial coefficients of dark current vs slit1} (float[])
: Polynomial coefficients (highest order to lowest) representing the dark current as a function of slit 1 opening. Units in counts/(minute . mm ^ N), for the coefficient of (slit1 ^ N). Default is [0].
poly_cov {Polynomial covariance matrix} (float[])
: Flattened covariance matrix for polynomial coefficients if error propagation is desired. For an order N polynomial, must have size N^2. If left blank, no error propagation will occur.
**Returns**
output (refldata[]): Dark current subtracted data.
darkcurrent (refldata[]): Dark current that was subtracted (for plotting)
| 2020-03-04 Paul Kienzle
| 2020-03-12 Paul Kienzle Add slit 1 dependence for DC rate
| 2021-06-11 David Hoogerheide generalize to refldata, prevent either adding counts or oversubtracting
| 2021-06-13 David Hoogerheide add dark current output
| 2021-06-14 David Hoogerheide change to polynomial input and add error propagation
"""
# TODO: datatype hierarchy: accepts any kind of refldata
from dataflow.lib.uncertainty import Uncertainty as U
datasets = list()
dcs = list()
order = len(poly_coeff)
for d in data:
dcdata = copy(d) # hackish way to get dark current counts
dcdata.detector = copy(d.detector)
# calculate rate and Jacobian at each point
rate = np.polyval(poly_coeff, dcdata.slit1.x)
# error propagation
rate_var = np.zeros_like(rate)
if poly_cov is not None:
poly_cov = np.array(poly_cov).reshape((order, order))
for i, s1 in enumerate(dcdata.slit1.x):
J = np.array([s1**float(i) for i in range(0, order)[::-1]])
rate_var[i] = np.dot(J.T, np.dot(poly_cov, J))
dc = dcdata.monitor.count_time*(rate/60.)
dc[dc < 0] = 0.0 # do not allow addition of dark counts from negative rates
dc_var = rate_var * (dcdata.monitor.count_time/60.)**2
# condition dark counts to the correct dimensionality
ndetectordims = np.ndim(d.detector.counts)
dc = np.expand_dims(dc, tuple(range(1, ndetectordims)))
dc_var = np.expand_dims(dc_var, tuple(range(1, ndetectordims)))
dcdata.detector.counts = np.ones_like(dcdata.detector.counts) * dc # should preserve dimensionality correctly
dcdata.detector.counts_variance = np.ones_like(dcdata.detector.counts_variance) * dc_var # should preserve dimensionality correctly
detcounts = U(d.detector.counts, d.detector.counts_variance)
darkcounts = U(dcdata.detector.counts, dcdata.detector.counts_variance)
detdarkdiff = detcounts - darkcounts
d.detector.counts, d.detector.counts_variance = detdarkdiff.x, detdarkdiff.variance
d.detector.counts[d.detector.counts < 0] = 0.0
# only renormalize if apply_norm has already populated d.normbase, i.e. if it's a standalone module
if d.normbase is not None:
d = normalize(d, d.normbase)
dcdata = normalize(dcdata, dcdata.normbase)
# create outputs
datasets.append(d)
dcs.append(dcdata)
return datasets, dcs
@module
def fit_dead_time(data, source='detector', mode='auto'):
"""
Fit detector dead time constants (paralyzing and non-paralyzing) from
measurement of attenuated and unattenuated data for a range of count rates.
**Inputs**
data (refldata[]): Data sets with different attenuation levels
source (opt:detector|monitor): Tube that is being measured
mode (opt:P|NP|mixed|auto): Dead-time mode
**Returns**
dead_time (deadtime): Dead time constants, attenuator estimate and beam rate
2015-12-17 Paul Kienzle
"""
from .deadtime import fit_dead_time
dead_time = fit_dead_time(data, source=source, mode=mode)
return dead_time
@module
def monitor_dead_time(data, dead_time, nonparalyzing=0.0, paralyzing=0.0):
"""
Correct the monitor dead time from the fitted dead time.
The deadtime constants are chosen as follows:
#. If either *tau_NP* and *tau_P* are non-zero, then use them.
#. If the dead time terminal is attached to a dead time fit, use it.
#. If the dead time constants are given in the data file, then use them.
#. Otherwise don't do any dead time correction.
**Inputs**
data (refldata) : Uncorrected data
dead_time (deadtime?) : Output of dead time estimator
nonparalyzing (float:us<0,inf>) : non-paralyzing dead time constant
paralyzing (float:us<0,inf>) : paralyzing dead time constant
**Returns**
output (refldata): Dead-time corrected data
2015-12-17 Paul Kienzle
"""
from .deadtime import apply_monitor_dead_time
data = copy(data)
data.monitor = copy(data.monitor)
if nonparalyzing != 0.0 or paralyzing != 0.0:
apply_monitor_dead_time(data, tau_NP=nonparalyzing,
tau_P=paralyzing)
elif dead_time is not None:
apply_monitor_dead_time(data, tau_NP=dead_time.tau_NP,
tau_P=dead_time.tau_P)
elif data.monitor.deadtime is not None and np.isfinite(data.monitor.deadtime).all():
try:
tau_NP, tau_P = data.monitor.deadtime
except Exception:
tau_NP, tau_P = data.monitor.deadtime, 0.0
apply_monitor_dead_time(data, tau_NP=tau_NP, tau_P=tau_P)
else:
pass # no deadtime correction parameters available.
return data
@module
def detector_dead_time(data, dead_time, nonparalyzing=0.0, paralyzing=0.0):
"""
Correct the detector dead time from the fitted dead time.
If *tau_NP* and *tau_P* are non-zero, then use them. If a dead_time
fit result is supplied, then use it. If the dead time constants are
given in the data file, then use them. Otherwise don't do any
dead time correction.
**Inputs**
data (refldata) : Uncorrected data
dead_time (deadtime?) : Output from dead time estimator
nonparalyzing (float:us<0,inf>) : non-paralyzing dead time constant
paralyzing (float:us<0,inf>) : paralyzing dead time constant
**Returns**
output (refldata): Dead-time corrected data
2016-03-21 Paul Kienzle
"""
from .deadtime import apply_detector_dead_time
data = copy(data)
if nonparalyzing != 0.0 or paralyzing != 0.0:
data.detector = copy(data.detector)
apply_detector_dead_time(data, tau_NP=nonparalyzing,
tau_P=paralyzing)
elif dead_time is not None:
data.detector = copy(data.detector)
apply_detector_dead_time(data, tau_NP=dead_time.tau_NP,
tau_P=dead_time.tau_P)
elif data.detector.deadtime is not None and not np.all(np.isnan(data.detector.deadtime)):
try:
tau_NP, tau_P = data.detector.deadtime
except Exception:
tau_NP, tau_P = data.detector.deadtime, 0.0
data.detector = copy(data.detector)
apply_detector_dead_time(data, tau_NP=tau_NP, tau_P=tau_P)
else:
raise ValueError("no valid deadtime provided in file or parameter")
return data
@module
def monitor_saturation(data):
"""
Correct the monitor dead time from stored saturation curve.
**Inputs**
data (refldata): Uncorrected data
**Returns**
output (refldata): Dead-time corrected data
2017-02-22 Paul Kienzle
"""
from .deadtime import apply_monitor_saturation
data = copy(data)
if getattr(data.monitor, 'saturation', None) is not None:
data.monitor = copy(data.monitor)
apply_monitor_saturation(data)
else:
data.warn("no monitor saturation for %r"%data.name)
return data
@module
def detector_saturation(data):
"""
Correct the detector dead time from stored saturation curve.
**Inputs**
data (refldata): Uncorrected data
**Returns**
output (refldata): Dead-time corrected data
2015-12-17 Paul Kienzle
"""
from .deadtime import apply_detector_saturation
data = copy(data)
if getattr(data.detector, 'saturation', None) is not None:
#print("detector "+str(data.detector.__dict__))
data.detector = copy(data.detector)
apply_detector_saturation(data)
else:
data.warn("no detector saturation for %r"%data.name)
return data
@module
def theta_offset(data, offset=0.0):
"""
Correct the theta offset of the data for a misaligned sample, shifting
sample and detector angle and updating $q_x$ and $q_z$.
**Inputs**
data (refldata) : Uncorrected data
offset (float:degree) : amount of shift to add to sample angle and subtract
from detector angle
**Returns**
output (refldata): Offset corrected data
2015-12-17 Paul Kienzle
"""
from .angles import apply_theta_offset
data = copy(data)
data.sample = copy(data.sample)
data.detector = copy(data.detector)
data.sample.angle_x = copy(data.sample.angle_x)
data.detector.angle_x = copy(data.detector.angle_x)
apply_theta_offset(data, offset)
return data
@module
def back_reflection(data):
"""
Reverse the sense of the reflection angles, making positive angles
negative and vice versa.
**Inputs**
data (refldata): Uncorrected data
**Returns**
output (refldata): Angle corrected data
2015-12-17 Paul Kienzle
"""
from .angles import apply_back_reflection
data = copy(data)
data.sample = copy(data.sample)
data.detector = copy(data.detector)
data.sample.angle_x = copy(data.sample.angle_x)
data.detector.angle_x = copy(data.detector.angle_x)
apply_back_reflection(data)
return data
@module
def absolute_angle(data):
"""
Assume all reflection is off the top surface, reversing the sense
of negative angles.
**Inputs**
data (refldata): Uncorrected data
**Returns**
output (refldata): Angle corrected data
2015-12-17 Paul Kienzle
"""
from .angles import apply_absolute_angle
data = copy(data)
data.sample = copy(data.sample)
data.detector = copy(data.detector)
data.sample.angle_x = copy(data.sample.angle_x)
data.detector.angle_x = copy(data.detector.angle_x)
apply_absolute_angle(data)
return data
@module
def sample_broadening(data, width=0):
r"""
Increase (or decrease) nominal divergence due to the effects of sample
broadening (or focussing) if this is not supported by the reflectivity
analysis program.
**Inputs**
data (refldata): data without resolution estimate
width (float) : amount of increased divergence in degrees, using
1-\ $\sigma$ change in width. This can be estimated from the FWHM of the
rocking curve relative to the expected value with no broadening, divided
by 2.35 to convert FWHM to 1-\ $\sigma$.
**Returns**
output (refldata): data with resolution estimate
2020-05-05 Paul Kienzle
"""
from .angles import apply_sample_broadening
if width != 0:
data = copy(data)
apply_sample_broadening(data, width)
return data
@module
def divergence_fb(data, sample_width=None):
r"""
Estimate divergence from slit openings. Does nothing if divergence
is already defined by the instrument.
**Inputs**
data (refldata): data without resolution estimate
sample_width (float?:<0,inf>) : width of the sample in mm if it acts like a slit.
By default, this uses the value in the file.
**Returns**
output (refldata): data with resolution estimate
2020-05-05 Paul Kienzle
"""
from .angles import apply_divergence_front_back
if data.angular_resolution is None:
data = copy(data)
apply_divergence_front_back(data, sample_width)
return data
@module
def divergence(data, sample_width=None, sample_broadening=0):
r"""
Estimate divergence from slit openings. Does nothing if divergence
is already defined by the instrument.
**DEPRECATED** use divergence_fb instead
**Inputs**
data (refldata): data without resolution estimate
sample_width (float?:<0,inf>) : width of the sample in mm if it acts like a slit.
By default, this uses the value in the file.
sample_broadening (float) : amount of increased divergence in degrees, using
1-\ $\sigma$ change in width. This can be estimated from the FWHM of the
rocking curve relative to the expected value with no broadening, divided
by 2.35 to convert FWHM to 1-\ $\sigma$.
**Returns**
output (refldata): data with resolution estimate
2016-06-15 Paul Kienzle
2020-05-05 Paul Kienzle
"""
from .angles import apply_divergence_simple, apply_sample_broadening
if data.angular_resolution is None:
data = copy(data)
apply_divergence_simple(data, sample_width)
apply_sample_broadening(data, sample_broadening)
return data
#@module
def mask_specular(data):
"""
Identify and mask out specular points.
This defines the *mask* attribute of *data* as including all points that
are not specular or not previously masked. The points are not actually
removed from the data, since this operation is done by *join*.
**Inputs**
data (refldata) : background data which may contain specular point
**Returns**
output (refldata) : masked data
2015-12-17 Paul Kienzle
"""
from .background import apply_specular_mask
data = copy(data)
apply_specular_mask(data)
return data
def mask_action(data=None, mask_indices=None, **kwargs):
"""
Remove data at the indicated indices
"""
if mask_indices:
data = copy(data)
data.apply_mask(mask_indices)
return data
@module
def mask_points(data, mask_indices=None):
"""
Identify and mask out user-specified points.
This defines the *mask* attribute of *data* to include all data
except those indicated in *mask_indices*. Any previous mask is cleared.
The masked data are not actually removed from the data, since this
operation is done by *join*.
**Inputs**
data (refldata) : background data which may contain specular point
mask_indices (index[]*) : 0-origin data point indices to mask. For example,
*mask_indices=[1,4,6]* masks the 2nd, 5th and 7th point respectively. Each
dataset should have its own mask.
**Returns**
output (refldata) : masked data
| 2018-04-30 Brian Maranville
| 2019-07-02 Brian Maranville: change self.points after mask
"""
data = copy(data)
output = mask_action(data=data, mask_indices=mask_indices)
return output
@module
def mark_intent(data, intent='auto'):
r"""
Mark the file type based on the contents of the file, or override.
*intent* can be 'infer', to guess the intent from the measurement geometry,
'auto' to use the recorded value for the intent if present, otherwise
infer it from the geometry, or the name of the intent.
For inferred intent, it is 'specular' if incident angle matches detector
angle within 0.1*angular divergence, 'background+' if incident angle is
greater than detector angle, 'background-' if incident angle is less
than detector angle, 'slit' if incident and detector angles are zero,
'rock sample' if only the incident angle changes, 'rock detector' if
only the detector angle changes, or 'rock qx' if only $Q_x$ is changing
throughout the scan.
**Inputs**
data (refldata) : data file which may or may not have intent marked
intent (opt:auto|infer|specular|background+\|background-\|slit
\|rock sample|rock detector|rock qx) : intent to register with the
datafile, or auto/infer to guess
**Returns**
output (refldata) : marked data
2016-03-20 Paul Kienzle
"""
from .intent import apply_intent
data = copy(data)
apply_intent(data, intent)
return data
@module
def group_by_intent(data):
"""
Split a bundle into multiple bundles using intent.
**Inputs**
data (refldata[]) : data files with intent marked
**Returns**
specular (refldata[]) : specular measurements
backp {Background+} (refldata[]) : positive offset background measurements
backm {Background-} (refldata[]) : negative offset background measurements
intensity (refldata[]) : beam intensity measurements
rock {Rocking curve} (refldata[]) : rocking curve measurements
other (refldata[]) : everything else
2016-07-20 Brian Maranville
"""
map_intent = {
'specular': 'specular',
'intensity': 'intensity',
'background+': 'backp',
'background-': 'backm',
'rock sample': 'rock',
'rock detector': 'rock',
'rock qx': 'rock',
}
groups = {}
for intent in set(map_intent.values()):
groups[intent] = []
groups['other'] = []
for d in data:
#print("intent %s %s"%(d.intent, d.path))
groups[map_intent.get(d.intent, 'other')].append(d)
return [groups[intent]
for intent in 'specular backp backm intensity rock other'.split()]
@module
def extract_xs(data, xs="++"):
r"""
Get a polarization cross-section from a bundle
**Inputs**
data (refldata[]): data files in of all cross sections
xs {Cross-section} (opt:++\|--\|+-\|-+\|+\|-\|unpolarized): cross-section to extract
**Returns**
output (refldata[]): data matching just that cross-section
| 2016-05-05 Brian Maranville
| 2020-03-24 Brian Maranville: added half-pol cross-sections
"""
# Note: no need to copy data since it is not being modified
if xs == 'unpolarized':
xs = ''
output = [d for d in data if d.polarization == xs]
return output
@module
def filter(data, key="", comparator="eq", value=None):
r"""
Get a subset of the datasets bundle based on the test
**Inputs**
data (refldata[]): data files in
key (str): name to test in the dataset
value (str?): value to compare
comparator {Compare operator} (opt:eq|ne|lt|le|gt|ge): comparison operator
**Returns**
output (refldata[]): data matching the comparison
2017-02-24 Brian Maranville
"""
import operator
compare_lookup = {
"==": operator.eq,
"!=": operator.ne,
"<": operator.lt,
"<=": operator.le,
">=": operator.ge,
">": operator.gt,
"eq": operator.eq,
"ne": operator.ne,
"lt": operator.lt,
"le": operator.le,
"ge": operator.ge,
"gt": operator.gt,
}
compare_op = compare_lookup[comparator]
return [d for d in data if hasattr(d, key) and compare_op(getattr(d, key), value)]
@module
def normalize(data, base='auto'):
"""
Estimate the detector count rate.
*base* can be monitor, time, roi, power, or none for no normalization.
For example, if base='monitor' then the count rate will be counts
per monitor count. Note that operations that combine datasets require
the same normalization on the points.
If *base* is auto then the default will be chosen, which is 'monitor'
if the monitor exists, otherwise it is 'time'. If neither exists
(not sure that can happen) then the data will be unnormalized.
The detector region of interest (*roi*) and reactor *power* have not been
tested and should not be used. The detector efficient, the dead time
corrections and attenuator scaling have not been applied to the roi
measurement. Since the measurement is only useful if a portion of the
detector is exposed to the incident beam, this corrections will be
especially important. In the case where the monitor is unreliable and
reactor power has been fluctuating, you may be able to estimate the
incident intensity based on the integrated reactor power. This uses
a simple average of the reactor power measurements multiplied by the
measurement time.
**Inputs**
data (refldata) : data to normalize
base {Normalize by} (opt:auto|monitor|time|roi|power|none)
: how to convert from counts to count rates
**Returns**
output (refldata) : data with count rate rather than counts
2015-12-17 Paul Kienzle
2020-03-10 Paul Kienzle auto almost always equals monitor
"""
# Note: reflpak supported visualization like "counts per 10000 monitor"
# so that the displayed data looked roughly like the measured data, except
# all scaled to a common monitor. This is not available in reductus.
# TODO: consistent use of data.detector.counts vs. data.v
# see in particular the detector/monitor dead time, spectral efficiency,
# dark current, etc.
from .scale import apply_norm
data = copy(data)
apply_norm(data, base)
return data
@module
def psd_center(data, center=128):
"""
Set center pixel for the detector.
**Inputs**
data (psddata) : data to scale
center (float) : beam center pixel (should be the same for all datasets)
**Returns**
output (psddata) : scaled data
2020-02-04 Paul Kienzle
"""
data = copy(data)
data.detector = copy(data.detector)
data.detector.center = (center, 0)
return data
@module
def psd_integrate(
data, spec_scale=1, spec_pixel=5.,
left_scale=1., left_pixel=5., right_scale=1., right_pixel=5.,
min_pixel=5., max_pixel=251., degree=1., mc_samples=1000,
slices=None, #(0.01, 0.05, 0.10, 0.15),
):
r"""
Integrate specular and background from psd.
Specular and background regions are computed from beam divergence and
pixel width using the following:
spec = spec scale * divergence + spec pixels
left = left scale * divergence + left pixels
right = right scale * divergence + right pixels
The beam divergence used in the equations above is estimated from
the slit openings. The specular signal is the sum over pixels
in [-spec, +spec]. The background signal is determined by fitting
a polynomial of degree n to the pixels in [-spec - left, -spec)
and (spec, spec + right), then integrating that polynomial over
the specular pixels.
Specular uncertainty comes from simply integrating over the pixels.
Background uncertainty comes from the uncertainty in the polynomial
fitting parameters. It can be estimated using Monte Carlo sampling,
or by simple Gaussian propagation of uncertainty if mc samples is 0.
MC estimates are stochastic, so rerunning with a different random
number sequence will give a different result. To make the reduction
reproducible, the number of samples is used as the seed value for the
random number generator. To assess the variation in the background
estimate, try slightly longer sequence lengths. We have found 100
samples gives a background estimate that is approximately stable
(variation in estimate is well within uncertainty). A default value
of 1000 was chosen because it is reasonably fast and reasonably stable.
Test on your own data by comparing mc_samples to mc_samples+1
The residual after subtracting the background estimate is also
returned. Use this to verify that the integration ranges are
chosen appropriately. There is an additional output to show slices
for selected frames---this will show the quality of background estimate.
**Inputs**
data (refldata) : data to integrate
spec_scale {Spec scale}(float:<0,inf>) : Specular width as a divergence multiple, or zero for fixed width.
spec_pixel {Spec offset}(float:pixel) : Fixed broadening in pixels (or narrowing if negative)
left_scale {Back- scale}(float:<0,inf>) : Left background width as a divergence multiple, or zero for fixed width.
left_pixel {Back- offset}(float:pixel) : Left background shift in pixels.
right_scale {Back+ scale}(float:<0,inf>) : Right background width as a divergence multiple, or zero for fixed width.
right_pixel {Back+ offset}(float:pixel) : Right background shift in pixels.
min_pixel {Left edge}(float:pixel<1,256>) : Left background cutoff pixel.
max_pixel {Right edge}(float:pixel<1,256>) : Right background cutoff pixel.
degree {Polynomial degree}(int:<0,9>) : Background polynomial degree.
mc_samples {MC samples}(int:<0,inf>) : Number of MC samples for uncertainty analysis, or zero for simple gaussian.
slices {Slice value}(float) : Display data cross-sections at the given values.
**Returns**
specular (refldata) : integrated specular
background (refldata) : integrated background
residual (psddata) : background subtracted psd data
sliceplot (plot) : slices plot
| 2020-02-03 Paul Kienzle
"""
from .ng7psd import apply_integration
from dataflow.data import Plottable
mc_seed = mc_samples if mc_samples > 0 else None
#print("slices", slices)
spec, back, resid, sliceplot = apply_integration(
data, spec=(spec_scale, spec_pixel),
left=(left_scale, left_pixel), right=(right_scale, right_pixel),
pixel_range=(min_pixel, max_pixel),
degree=degree, mc_samples=mc_samples, seed=mc_seed,
slices=[slices] if slices is not None else [],
)
return spec, back, resid, Plottable(sliceplot)
@module
def rescale(data, scale=1.0, dscale=0.0):
"""
Rescale the count rate by some scale and uncertainty.
**Inputs**
data (refldata) : data to scale
scale (scale*) : amount to scale, one for each dataset
dscale {Scale err} (float*:<0,inf>) : scale uncertainty for gaussian error propagation
**Returns**
output (refldata) : scaled data
2015-12-17 Paul Kienzle
"""
from .scale import apply_rescale
data = copy(data)
apply_rescale(data, scale, dscale)
return data
#@nocache
@module
def join(data, Q_tolerance=0.5, dQ_tolerance=0.002, order='file',
group_by="polarization", tolerance=None):
r"""
Join operates on a list of datasets, returning a list with one dataset,
or one dataset per polarization state. When operating on a single
dataset, it joins repeated points into single points.
*Qtol* and *dQtol* are scale factors on $\Delta \theta$ used to
determine whether two angles are equivalent. For a given tolerance
$\epsilon_Q, \epsilon_{\Delta Q}$, a point at incident angle
$\theta_1$ can be joined with one with incident angle $\theta_2$ when
$|\theta_1 - \theta_2| < \epsilon_Q \cdot \Delta\theta$ and
$|\Delta\theta_1 - \Delta\theta_2| < \epsilon_{\Delta Q} \cdot \Delta\theta$.
Values of $\epsilon_Q=0.5$ and $\epsilon_{\Delta Q}=0.002$ work well in
practice. If the tolerances are both 0 then join is performed against
the desired positions rather than the actual positions; this more
closely corresponds with user intent.
The join algorithm is greedy, so if you have a sequence of points with
individual separation less than $\epsilon\cdot\Delta\theta$ but total
spread greater than $\epsilon\cdot\Delta\theta$, they will be joined
into multiple points with the final with the final point having worse
statistics than the prior points. Join operates on one dimension at
a time, first grouping points with common $\Delta\theta$, then joining
points within each $\Delta\theta$ by common $\theta_\text{incident}$,
then by common $\theta_\text{detector}$. This algorithm should work
well enough on the common reflectometry scans, but it may fail for example
if applied to a set of $Q_x$ scans with different $Q_z$ values.
*order* is the sort order of the files that are joined. The first
file in the sorted list determines the metadata such as the base
file name for the joined file.
The joined datasets will be sorted as appropriate for the the
measurement intent. Masked points will be removed.
Data must be normalized before join.
**Inputs**
data (refldata[]) : data to join
Q_tolerance (float:1-sigma<0,inf>) : allowed separation between points
while still joining them to a single point; this is relative to the angular
resolution and wavelength dispersion of each point
dQ_tolerance (float:1-sigma<0,inf>) : allowed difference in resolution
between combined points; this is relative to the angular resolution and
wavelength dispersion of each point
order (opt:file|time|theta|slit|none) : order determines which file is the
base file, supplying the metadata for the joined set
group_by (opt:polarization|probe|entry|filenumber|instrument|intent|sample.name|sample.description) : key by which the files are grouped prior to join
tolerance(float?:1-sigma<0,inf>) : **deprecated** value for Qtol and dQtol;
ignored if the value is None or not specified.
**Returns**
output (refldata[]) : joined data
| 2017-02-09 Paul Kienzle: split tolerance into Qtol and dQtol
| 2017-05-05 Paul Kienzle: consistent sort order for outputs
| 2017-07-03 Brian Maranville: rearrange to group by Ti, Td before dT, dQ
| 2018-05-14 Brian Maranville: group by Qx first for all rocking curves
| 2020-10-14 Paul Kienzle fixed uncertainty for time normalized data
| 2020-12-15 Brian Maranville added roi_counts and source_power to columns
"""
from .joindata import sort_files, join_datasets
from .util import group_by_key
# No copy necessary; join is never in-place.
# TODO: parse **deprecated** in automod and hide deprecated inputs on ui
if tolerance is not None:
Q_tolerance = dQ_tolerance = tolerance
datasets = [v for k, v in sorted(group_by_key(group_by, data).items())]
output = []
for group in datasets:
group = sort_files(group, order)
result = join_datasets(group, Q_tolerance, dQ_tolerance)
output.append(result)
return output
@module
def mix_cross_sections(data, mix_sf=False, mix_nsf=False):
"""
Mix (combine) cross-sections, usually to improve statistics when cross-sections
are expected to be indistinguishable in the model (e.g. spin-flip when no chirality)
Typically this is done after load and before "join"
All inputs are passed to the output, and in addition:
When *mix_sf* is enabled, all input datasets with polarization "-+" will be copied and
added to the output with polarization = "+-", and vice-versa for "+-" inputs.
When *mix_nsf* is enabled, all input datasets with polarization "++" will be copied and
added to the output with polarization = "--", and similarly "--" inputs sent to "++"
**Inputs**
data (refldata[]) : datasets in
mix_sf {Mix Spin-Flip?} (bool) : Perform mixing on spin-flip cross-sections,
i.e. "+-" and "-+"
mix_nsf {Mix Non-Spin-Flip?} (bool) : Perform mixing on spin-flip cross-sections,
i.e. "++" and "--" or "+" and "-"
**Returns**
output (refldata[]) : relabeled and copied datasets (around twice as many as in the input)
2021-11-17 Brian Maranville
"""
output = copy(data)
mappings = {
"sf": {
"+-": "-+",
"-+": "+-"
},
"nsf": {
"++": "--",
"--": "++",
"+": "-",
"-": "+"
}
}
def duplicate_and_remap_items(xs_type):
mapping = mappings[xs_type]
items = [d for d in data if d.polarization in mapping]
for item in items:
new_item = copy(item)
new_item.polarization = mapping[item.polarization]
output.append(new_item)
if mix_sf:
duplicate_and_remap_items("sf")
if mix_nsf:
duplicate_and_remap_items("nsf")
return output
#@module
def align_background(data, align='auto'):
"""
Determine the Qz value associated with the background measurement.
The *align* flag determines which background points are matched
to the sample points. It can be 'sample' if background is
measured using an offset from the sample angle, or 'detector'
if it is offset from detector angle. If *align* is 'auto', then
use 'Qz_target' to align the background scan.
For 'auto' alignment without Qz_target set, we can only distinguish
relative and constant offsets, and cannot determine which of sample
and detector is offset from the specular condition, so we must rely
on convention. If the offset is constant for each angle, then it is
assumed to be a sample offset. If the the offset is proportional to
the angle (and therefore offset divided by angle is constant), then
it is assumed to be a detector offset. If neither condition is met,
it is assumed to be a sample offset.
The 'auto' test is robust: 90% of the points should be within 5% of the
median value of the vector for the offset to be considered a constant.
**Inputs**
data (refldata) : background data with unknown $q$
align (opt:auto|sample|detector) : angle which determines $q_z$
**Returns**
output (refldata) : background with known $q$
2015-12-17 Paul Kienzle
2020-10-16 Paul Kienzle rename 'offset' to 'align'
"""
from .background import set_background_alignment
data = copy(data)
set_background_alignment(data, align)
return data
@module
def subtract_background(data, backp, backm, align="none"):
"""
Subtract the background datasets from the specular dataset.
The specular, background+ and background- signals should already be
joined into single datasets. For each, the background is interpolated
onto the specular Q values, extending above and below with the final
background measurement. If there are no backgrounds, then data is
sent through unchanged.
Background subtraction is applied independently to the different
polarization cross sections.
The *align* flag determines which background points are matched
to the sample points. It can be 'sample' if background is
measured using an offset from the sample angle, or 'detector'
if it is offset from detector angle. If it is 'none' then use
the 'Qz_basis' value set in the loader. The 'auto' option uses
'Qz_target' if it exists, or tries to guess from the measured angles.
**Inputs**
data (refldata) : specular data
backp {Background+} (refldata?) : plus-offset background data
backm {Background-} (refldata?) : minus-offset background data
align (opt:none|sample|detector|auto) : apply align_background to
background inputs with offset='auto'
**Returns**
output (refldata) : background subtracted specular data
2016-03-23 Paul Kienzle
"""
from .background import apply_background_subtraction
# Note: This changes backp and backm, so copy first.
if align != "none":
if backp is not None:
backp = copy(backp)
align_background(backp, align=align)
if backm is not None:
backm = copy(backm)
align_background(backm, align=align)
#print "%s - (%s+%s)/2"%(data.name, (backp.name if backp else "none"), (backm.name if backm else "none"))
data = copy(data)
apply_background_subtraction(data, backp, backm)
return data
@module
def interpolate_background(data, backp, backm, align='auto'):
"""
Interpolate background data onto specular angles.
The *align* flag determines which background points are matched
to the specular points. The 'auto' option uses the Qz_basis set
in the file loader, otherwise align the sample angle or the detector
angle. This sets Qz_basis to align so that a subsequent subtraction
operation will use the same interpolation.
Masking of background or specular should occur before interpolation
or after subtraction.
**Inputs**
data (refldata) : specular data
backp {Background+} (refldata?) : plus-offset background data
backm {Background-} (refldata?) : minus-offset background data
align (opt:auto|sample|detector|sample_target|detector_target)
: angle which determines $q_z$
**Returns**
output (refldata) : unchanged specular
outp (refldata) : interpolated plus-offset background data
outm (refldata) : interpolated minus-offset background data
2020-11-20 Paul Kienzle new module
"""
from .background import apply_interpolation
if backp is not None:
backp = copy(backp)
backp.sample = copy(backp.sample)
backp.detector = copy(backp.detector)
apply_interpolation(data=backp, base=data, align=align)
if backm is not None:
backm = copy(backp)
backm.sample = copy(backp.sample)
backm.detector = copy(backp.detector)
apply_interpolation(data=backm, base=data, align=align)
return data, backp, backm
@module
def fit_background_field(back, fit_scale=True, scale=1.0, epsD0=0.01, epssi=1.109e-4, LS3=380, LS4=1269, LSD=1675, HD=150, maxF=76.2, Qcutoff=0.05):
"""
Fit the background field from a thin liquid reservoir to background
datasets. Background datasets:
o Can be any at any (non-specular) condition
o Should already be normalized by incident intensity
o Can involve any number of scans
The background datasets are fit using a Levenberg-Marquardt algorithm to a model
involving a two parameters: epsD, the product of the incoherent attenuation coefficient
of the reservoir (eps) and the reservoir thickness D, and a scale factor that accounts for
uncertainty in the instrument geometry, i.e. the post-sample slit distances and/or the
solid angle subtended by the detector.
The uncertainty in the optimized parameters are estimated from the covariance matrix,
and the chi-squared value of the fit (typically 1.5 or less) is also calculated and available
in the parameter output. Note that the covariance matrix itself is passed to subract_background_field
because the parameters are correlated.
If the scale factor is not included in the fit, the accuracy of the calculation depends critically on
correct values for the instrument geometry. The geometry is inferred from the data files;
if it is missing, certain values can be overridden using the data entry boxes. These include the
slit4-detector distance (0 for instruments with no slit4) and the detector height (for horizontal
reflectometers, the width).
The calculation assumes a geometry like that of the NIST liquid flow cell, in which the liquid reservoir
is encased in silicon. For a different cell material (e.g. sapphire), the appropriate incoherent
extinction coefficient for the material should replace the default Si value.
**Inputs**
back (refldata[]) : group of background datasets
epsD0 {Reservoir extinction coefficient guess (mm^-1)} (float:mm^-1) : initial guess
for product of incoherent extinction coefficient of reservoir and reservoir thickness
epssi {Extinction coefficient of Si (mm^-1)} (float:mm^-1) : incoherent
extinction coefficient of Si or cell materials
fit_scale {Include scale factor in fit?} (bool) : True if scale factor on detector solid angle
should be included in fit (use if uncertain of the instrument geometry)
scale {Scale factor value} (float) : Value of scale factor. Initial guess if scale
factor is included in fit; otherwise fixed scale factor value.
Qcutoff {Target Qz cutoff (Ang^-1)} (float:Ang^-1) : Cutoff target Q_z value below which background data
are excluded from background fit
LS3 {sample-slit3 distance (mm)} (float:mm) : Distance from sample to slit3
LS4 {sample-slit4 distance (mm)} (float:mm) : Distance from sample to slit4
LSD {sample-detector distance (mm)} (float:mm) : Distance from sample to detector
HD {detector height (mm)} (float:mm) : Height of detector
maxF {maximum beam footprint (mm)} (float:mm) : Sample dimension in beam direction
**Returns**
fitparams (ncnr.refl.backgroundfield.params) : fit parameters, covariances, and chi-squared
fit (refldata[]) : ReflData structure containing fit outputs (for plotting against
background inputs to inspect background field fit)
2018-06-12 David P. Hoogerheide; last updated 2018-10-12
"""
from .backgroundfield import fit_background_field
bff, outfitdata = fit_background_field(back, epsD0, epssi, fit_scale, scale, LS3, LS4, LSD, HD, Qcutoff, maxF)
return bff, outfitdata
@module
def subtract_background_field(data, bfparams, epsD=None, epsD_var=None, scale=None, scale_var=None, scale_epsD_covar=None):
"""
Subtract the background field from a thin liquid reservoir from
a specular dataset, which should already be normalized by the incident intensity.
Applies the background field fit with the "Fit Background Field" module. See that
module's description for more details.
**Inputs**
data (refldata[]) : specular data
bfparams (ncnr.refl.backgroundfield.params) : background field parameters
epsD {epsD} (float) : p
epsD_var {epsD variance} (float) : dp2
scale {scale} (float) : s
scale_var {scale factor variance} : ds2
scale_epsD_covar {covariance of scale factor and epsD} : dsp
**Returns**
output (refldata[]) : background subtracted specular data
2018-06-12 David P. Hoogerheide; last updated 2018-10-12
"""
from .backgroundfield import apply_background_field_subtraction
data = [copy(d) for d in data]
if epsD is None: epsD = bfparams.p
if epsD_var is None: epsD_var = bfparams.dp2
if scale is None: scale = bfparams.s
if scale_var is None: scale_var = bfparams.ds2
if scale_epsD_covar is None: scale_epsD_covar = bfparams.dsp
pcov = np.array([[scale_var, scale_epsD_covar],
[scale_epsD_covar, epsD_var]])
apply_background_field_subtraction(data, epsD, bfparams.epssi, bfparams.LS3, bfparams.LS4, bfparams.L4D, bfparams.HD, bfparams.maxF, scale, pcov)
return data
@module
def divide_intensity(data, base):
"""
Scale data by incident intensity.
Data is matched to the incident scan according to the measurement type:
By default it is aligned by the angular resolution of both scans,
assuming all data with the same angular resolution was subject to the
same incident intensity.
For Candor data it is aligned by the slit 1 opening (slit.x),
and for MAGIK horizontal mode it is aligned by the incident angle (sample.angle_x)
**Inputs**
data (refldata) : specular, background or subtracted data
base (refldata) : intensity data
**Returns**
output (refldata) : reflected intensity
| 2015-12-17 Paul Kienzle
| 2020-07-23 Brian Maranville add align_intensity flag to data to match incident and reflected points
"""
if base is not None:
from .scale import apply_intensity_norm
data = copy(data)
apply_intensity_norm(data, base)
return data
@module
def smooth_slits(datasets, degree=1, span=2, dx=0.01):
"""
Align slits with a moving window 1-D polynomial least squares filter.
Updates *slit1.x*, *slit2.x* and *angular_resolution* attributes of the
slit measurements so they all use a common set of points.
Updates divergence automatically after smoothing.
**Inputs**
datasets (refldata[]) : slits to align and smooth
degree (int) : polynomial degree on smoothing filter
span (int) : number of consecutive points to use in the fit. Odd
sized *span* is preferred. *span* must be larger than *degree*.
*degree=1* and *span=2* is equivalent to linear interpolation.
dx (float:mm<0,>) : size within which slits can be merged.
**Returns**
outputs (refldata[]) : aligned and smoothed slits.
2015-12-17 Paul Kienzle
"""
from .smoothslits import apply_smoothing
datasets = [copy(d) for d in datasets]
for d in datasets:
d.slit1, d.slit2 = copy(d.slit1), copy(d.slit2)
apply_smoothing(datasets, dx=dx, degree=degree, span=span)
return datasets
@module
def abinitio_footprint(data, Io=1., width=None, offset=0.):
"""
Apply an *ab initio* footprint correction to the data.
Footprint is computed from the slits and the sample angle so those must
be available in the data. If the data has been stitched to common Q
from different theta, lambda combinations, then footprint will no
longer be available.
Footprint is computed using sample angle. If background is measured using
sample angle offset, then footprint should be applied before background
subtraction. For detector angle offset the correction is the same for
specular and background, so it can be applied before or after subtraction.
**Inputs**
data (refldata) : uncorrected measurement
Io (float:): scale factor to account for vertical beam spill.
width (float:mm) : sample width along the beam. If not provided, use the
value stored in the file.
offset (float:mm) : offset of the center of rotation of the sample in
the direction of the beam, toward the detector.
**Returns**
outputs (refldata): footprint-corrected data
2016-09-02 Paul Kienzle
"""
from .footprint import apply_abinitio_footprint
data = copy(data)
apply_abinitio_footprint(data, Io, width, offset)
return data
@module
def fit_footprint(data, fit_range=[None, None], origin=False):
"""
Fit a footprint using a range of data below the critical edge.
If a range is not provided, then no footprint is fitted and instead the
footprint slope and intercept from the *correct_footprint* component are
used.
**Inputs**
data (refldata[]) : uncorrected measurement
fit_range (range?:x): x-region over which to fit
origin (bool) : True if data should go through the origin
**Returns**
fitted_footprint (ncnr.refl.footprint.params?) : slope and intercept
2016-04-30 Paul Kienzle
"""
from .footprint import fit_footprint
if fit_range is None:
fit_range = [None, None]
footprint = fit_footprint(data, fit_range[0], fit_range[1], kind='slope' if origin else 'line')
return footprint
@module
def correct_footprint(data, fitted_footprint, correction_range=[None, None],
slope=None, slope_error=0.0, intercept=None,
intercept_error=0.0):
"""
Apply fitted footprint correction to each data set.
If not footprint is fitted, then values must be entered for *slope* and
*intercept*.
**Inputs**
data (refldata) : uncorrected measurement
fitted_footprint (ncnr.refl.footprint.params?) : fitted footprint
correction_range {Correction Application Range} (range?:x) : Lower bound of region to apply footprint correction
slope (float) : footprint slope
slope_error {Error on slope} (float): and uncertainty
intercept (float) : footprint intercept
intercept_error {Error on intercept} (float): and uncertainty
**Returns**
outputs (refldata): footprint-corrected data
2017-06-29 Paul Kienzle
"""
from .footprint import apply_fitted_footprint, FootprintData
if correction_range is None:
correction_range = [None, None]
# always use manually-provided error on slope and intercept (not fitted)
dp = np.array([slope_error, intercept_error])
if fitted_footprint is None:
# make empty footprint data object
p = np.array([None, None])
fitted_footprint = FootprintData(p, None)
if slope is not None:
fitted_footprint.p[0] = slope
if intercept is not None:
fitted_footprint.p[1] = intercept
# in all cases, overwrite the error in fitted_footprint with specified
# values:
fitted_footprint.dp = dp
data = copy(data)
apply_fitted_footprint(data, fitted_footprint, correction_range)
return data
@nocache
@module
def estimate_polarization(data, FRbalance=50.0, Emin=0.0, Imin=0.0, clip=False):
"""
Compute polarizer and flipper efficiencies from the intensity data.
If clip is true, reject points above or below particular efficiencies.
The minimum intensity is 1e-10. The minimum efficiency is 0.9.
The computed values are systematically related to the efficiencies:
beta: intensity/2
fp: front polarizer efficiency is F
rp: rear polarizer efficiency is R
ff: front flipper efficiency is (1-x)/2
rf: rear flipper efficiency is (1-y)/2
reject is the indices of points which are clipped because they
are below the minimum efficiency or intensity.
See PolarizationEfficiency.pdf for details on the calculation.
**Inputs**
data (refldata[]) : direct beam measurement to determine polarization
FRbalance (float:%<0,100>) : front/rear balance of to use for efficiency loss
Emin (float:%<0,100>) : minimum efficiency cutoff
Imin (float:counts/s<0,>) : minimum intensity cutoff
clip {Clip efficiency} (bool) : clip efficiency between Emin and one
**Returns**
polarization (poldata) : estimated polarization correction factors
2015-12-18 Paul Kienzle
"""
from .polarization import PolarizationData
poldata = PolarizationData(data, FRbal=0.01*FRbalance,
Emin=0.01*Emin, Imin=Imin, clip=clip)
return poldata
@nocache
@module
def correct_polarization(data, polarization, spinflip=True):
"""
Correct data for polarizer and flipper efficiencies.
**Inputs**
data (refldata[]) : polarized data to be corrected
polarization (poldata) : estimated polarization efficiency
spinflip {Correct spinflip data} (bool) : correct spinflip data if available
**Returns**
output (refldata[]) : polarization corrected data
2015-12-18 Paul Kienzle
| 2017-08-22 Brian Maranville interpolate back to Qz-basis for that cross-section
"""
from .polarization import apply_polarization_correction
data = copy(data)
apply_polarization_correction(data, polarization, spinflip)
return data
@module
def save(data, name='auto', ext='auto', path='auto'):
"""
Save data to a particular file
**Inputs**
data (refldata) : data to save
name (opt:auto|...) : name of the file, or 'auto' to use the basename
ext {Extension} (opt:auto|...) : file extension, or 'auto' to use the
id of the last step
path (opt:auto|...) : data path, or 'auto' to use the current directory
2015-12-17 Paul Kienzle
"""
if path == 'auto':
path = '.'
if ext == 'auto':
ext = '.dat'
elif not ext.startswith('.'):
ext = '.' + ext
if name == 'auto':
name = data.name
filename = os.path.join(path, name+ext)
data.save(filename)
@cache
@module
def ng7_psd(
filelist=None,
detector_correction=False,
monitor_correction=False,
center=None,
intent='auto',
sample_width=None,
base='none'):
r"""
Load a list of NG7 PSD files from the NCNR data server.
**Inputs**
filelist (fileinfo[]): List of files to open.
detector_correction {Apply detector deadtime correction} (bool)
: If True, use deadtime constants in file to correct detector counts.
monitor_correction {Apply monitor saturation correction} (bool)
: If True, use the measured saturation curve in file to correct
: the monitor counts.
center {Beam center} (int)
: Detector pixel containing the beam center. This is needed for
: plotting Qx-Qz, etc., and for setting the specular integration region.
intent (opt:auto|specular|intensity|scan)
: Measurement intent (specular, slit, or some other scan), auto or infer.
: If intent is 'scan', then use the first scanned variable.
sample_width {Sample width (mm)} (float?)
: Width of the sample along the beam direction in mm, used for
calculating the effective resolution when the sample is smaller
than the beam. Leave blank to use value from data file.
base {Normalize by} (opt:auto|monitor|time|roi|power|none)
: How to convert from counts to count rates.
: Leave this as none if your template does normalization after integration.
**Returns**
output (refldata[]): All entries of all files in the list.
| 2020-02-05 Paul Kienzle
| 2020-02-11 Paul Kienzle include divergence estimate in startup
"""
from .load import url_load_list
from .ng7psd import load_entries
# Note: divergence is required for join, so always calculate it. If you
# really want it optional then use:
#
# auto_divergence {Calculate dQ} (bool)
# : Automatically calculate the angular divergence of the beam.
#
auto_divergence = True
datasets = []
for data in url_load_list(filelist, loader=load_entries):
data.Qz_basis = 'target'
if intent not in [None, 'auto']:
data.intent = intent
if center is not None:
data = psd_center(data, center)
if auto_divergence:
data = divergence(data, sample_width)
if detector_correction:
data = detector_dead_time(data, None)
if monitor_correction:
data = monitor_saturation(data)
data = normalize(data, base=base)
#print "data loaded and normalized"
datasets.append(data)
return datasets
@cache
@module
def super_load(filelist=None,
detector_correction=False,
monitor_correction=False,
intent='auto',
Qz_basis='actual',
sample_width=None,
base='auto'):
r"""
Load a list of nexus files from the NCNR data server.
*Qz_basis* uses one of the following values:
**actual**
calculates Qx and Qz as (x,z)-components of
$(\vec k_{\text{out}} - \vec k_\text{in})$ in sample coordinates,
**detector**
ignores the sample angle and calculates Qz
as $(4\pi/\lambda \sin(\theta_\text{detector}/2))$,
**sample**
ignores the detector angle and calculates Qz
as $(4\pi/\lambda \sin(\theta_\text{sample}))$
**target**
uses the user-supplied Qz_target values
**Inputs**
filelist (fileinfo[]): List of files to open.
detector_correction {Apply detector deadtime correction} (bool)
: Which deadtime constant to use for detector deadtime.
monitor_correction {Apply monitor deadtime correction} (bool)
: Which deadtime constant to use for monitor deadtime.
intent (opt:auto|specular|background+\|background-\|intensity|rock sample|rock detector|rock qx|scan)
: Measurement intent (specular, background+, background-, slit, rock),
auto or infer. If intent is 'scan', then use the first scanned variable.
Qz_basis (opt:actual|detector|sample|target)
: How to calculate Qz from instrument angles.
sample_width {Sample width (mm)} (float?)
: Width of the sample along the beam direction in mm, used for
calculating the effective resolution when the sample is smaller
than the beam. Leave blank to use value from data file.
base {Normalize by} (opt:auto|monitor|time|roi|power|none)
: how to convert from counts to count rates
**Returns**
output (refldata[]): All entries of all files in the list.
| 2017-01-13 Brian Maranville
| 2017-02-15 Paul Kienzle normalize by time if monitor is not present
| 2017-08-21 Brian Maranville use fileName from trajectory
| 2018-05-01 Brian Maranville import temperature metadata
| 2018-05-07 Brian Maranville detector deadtime correction defaults to True
| 2018-05-10 Brian Maranville export all columns if intent is scan
| 2018-05-11 Brian Maranville detector deadtime correction defaults to False
| 2018-06-18 Brian Maranville change to nexusref to ignore areaDetector
| 2018-06-20 Brian Maranville promote detector.wavelength to column (and resolution)
| 2018-08-29 Paul Kienzle ignore sampleTilt field for NG7
| 2018-12-10 Brian Maranville get_plottable routines moved to python data container from js
| 2020-01-21 Brian Maranville updated loader to handle hdf-nexus
| 2020-12-18 Brian Maranville adding source_power column to monitor
"""
from .load import url_load_list
#from .intent import apply_intent
#from .angles import apply_divergence
# Note: Fileinfo is a structure with
# { path: "location/on/server", mtime: timestamp }
# Note: divergence is required for join, so always calculate it. If you
# really want it optional then use:
#
# auto_divergence {Calculate dQ} (bool)
# : Automatically calculate the angular divergence of the beam.
#
auto_divergence = True
# TODO: sample_width is ignored if datafile defines angular_divergence
datasets = []
for data in url_load_list(filelist):
data.Qz_basis = Qz_basis
if intent not in [None, 'auto']:
data.intent = intent
if auto_divergence:
data = divergence(data, sample_width)
if detector_correction:
data = detector_dead_time(data, None)
if monitor_correction:
data = monitor_dead_time(data, None)
data = normalize(data, base=base)
#print "data loaded and normalized"
datasets.append(data)
return datasets
@cache
@module
def magik_horizontal(filelist=None,
detector_correction=False,
monitor_correction=False,
intent='auto',
Qz_basis='actual',
sample_width=None,
base='auto'):
r"""
Load a list of nexus files from the NCNR data server.
*Qz_basis* uses one of the following values:
**actual**
calculates Qx and Qz as (x,z)-components of
$(\vec k_{\text{out}} - \vec k_\text{in})$ in sample coordinates,
**detector**
ignores the sample angle and calculates Qz
as $(4\pi/\lambda \sin(\theta_\text{detector}/2))$,
**sample**
ignores the detector angle and calculates Qz
as $(4\pi/\lambda \sin(\theta_\text{sample}))$
**target**
uses the user-supplied Qz_target values
**Inputs**
filelist (fileinfo[]): List of files to open.
detector_correction {Apply detector deadtime correction} (bool)
: Which deadtime constant to use for detector deadtime.
monitor_correction {Apply monitor deadtime correction} (bool)
: Which deadtime constant to use for monitor deadtime.
intent (opt:auto|specular|background+\|background-\|intensity|rock sample|rock detector|rock qx|scan)
: Measurement intent (specular, background+, background-, slit, rock),
auto or infer. If intent is 'scan', then use the first scanned variable.
Qz_basis (opt:actual|detector|sample|target)
: How to calculate Qz from instrument angles.
sample_width {Sample width (mm)} (float?)
: Width of the sample along the beam direction in mm, used for
calculating the effective resolution when the sample is smaller
than the beam. Leave blank to use value from data file.
base {Normalize by} (opt:auto|monitor|time|roi|power|none)
: how to convert from counts to count rates
**Returns**
output (refldata[]): All entries of all files in the list.
| 2020-07-21 Brian Maranville
| 2020-07-23 Brian Maranville Added a flag to the loader, to control divide_intensity align_by
| 2020-09-03 Brian Maranville Vertical slit readout changed
| 2021-09-20 Brian Maranville use horizontalGeom.angle for sample.angle_x (ignore tilt except in ROCK)
"""
from .load import url_load_list
from .magik_horizontal import load_entries
# Note: Fileinfo is a structure with
# { path: "location/on/server", mtime: timestamp }
# Note: divergence is required for join, so always calculate it. If you
# really want it optional then use:
#
# auto_divergence {Calculate dQ} (bool)
# : Automatically calculate the angular divergence of the beam.
#
auto_divergence = True
# TODO: sample_width is ignored if datafile defines angular_divergence
datasets = []
for data in url_load_list(filelist, loader=load_entries):
data.Qz_basis = Qz_basis
if intent not in [None, 'auto']:
data.intent = intent
if auto_divergence:
data = divergence(data, sample_width)
if detector_correction:
data = detector_dead_time(data, None)
if monitor_correction:
data = monitor_dead_time(data, None)
data = normalize(data, base=base)
#print "data loaded and normalized"
datasets.append(data)
return datasets
@cache
@module
def super_load_sorted(filelist=None,
detector_correction=False,
monitor_correction=False,
sample_width=None,
base='auto'):
"""
Load a list of nexus files from the NCNR data server, to be sorted by
the intent stored in the file. If intent does not match
'specular', 'background+', 'background-' or 'intensity', it is not returned.
**Inputs**
filelist (fileinfo[]): List of files to open.
detector_correction {Apply detector deadtime correction} (bool)
: Which deadtime constant to use for detector deadtime.
monitor_correction {Apply monitor deadtime correction} (bool)
: Which deadtime constant to use for monitor deadtime.
sample_width {Sample width (mm)} (float): Width of the sample along the
beam direction in mm, used for calculating the effective resolution when
the sample is smaller than the beam. Leave blank to use value from data file.
base {Normalize by} (opt:auto|monitor|time|roi|power|none)
: how to convert from counts to count rates
**Returns**
spec (refldata[]): All entries of all spec files in the list.
bgp (refldata[]): All entries of all bg+ files in the list.
bgm (refldata[]): All entries of all bg- files in the list.
slit (refldata[]): All entries of all slit files in the list.
2016-06-30 Brian Maranville
"""
from .load import url_load_list
auto_divergence = True
sorting_key = "intent"
sort_values = ["specular", "background+", "background-", "intensity"]
outputs = dict([(key, []) for key in sort_values])
for data in url_load_list(filelist):
if auto_divergence:
data = divergence(data, sample_width)
if detector_correction:
data = detector_dead_time(data, None)
if monitor_correction:
data = monitor_dead_time(data, None)
data = normalize(data, base=base)
intent = getattr(data, sorting_key, None)
if intent in outputs:
outputs[intent].append(data)
return tuple([outputs[k] for k in sort_values])
@module
def spin_asymmetry(data):
"""
Do the calculation (up-up - down-down) / (up-up + down-down) and
return a single dataset.
**Inputs**
data (refldata[]): input data; must contain up-up and down-down polarizations
**Returns**
output (refldata): calculated spin asymmetry.
2016-04-04 Brian Maranville
"""
mm = [d for d in data if d.polarization in ('-', '--')][0]
pp = [d for d in data if d.polarization in ('+', '++')][0]
output = copy(mm)
output.vscale = "linear"
output.vlabel = "Spin asymmetry (pp-mm)/(pp+mm) "
output.vunits = "unitless"
shortest = min(mm.v.shape[0], pp.v.shape[0])
mmv = mm.v[:shortest]
mmdv = mm.dv[:shortest]
ppv = pp.v[:shortest]
ppdv = pp.dv[:shortest]
denom = (mmv + ppv)
output.v = (ppv - mmv) / denom
# d(sa)/d(x) = 2*x/(x+y)**2, d(sa)/d(y) = -2*y/(x+y)**2
output.dv = np.sqrt(((2.0*mmv*mmdv)/(denom**2))**2 + ((2.0*ppv*ppdv)/(denom**2))**2)
return output
@module
def average_flux(data, base, beam_height=25):
"""
Calculate time-averaged flux on the sample
Data is matched according to angular resolution, assuming all data with
the same angular resolution was subject to the same incident intensity.
Does not work on polarized beam data with multiple slit scans
Beam area is taken to be beam_height * slit2 aperture (from file)
**Inputs**
data (refldata[]) : specular, background or subtracted data
base (refldata) : intensity data
beam_height (float:mm): height of the beam at the sample position
**Returns**
flux (ncnr.refl.flux.params?) : integrated flux data
2018-03-01 Brian Maranville
"""
from dataflow.modules import refl
TIME_RESOLUTION = 1e-6 # 1 microsecond for NCNR timers.
if base is not None:
from .scale import calculate_number
from dataflow.lib import err1d
fluxes = []
total_number = 0.0
total_number_variance = 0.0
total_time = 0.0
total_time_variance = 0.0
sum_weighted_flux = 0.0
sum_weighted_flux_variance = 0.0
for datum in data:
datum = copy(datum)
beam_area = datum.slit2.x * beam_height / 100.0 # both in mm, convert to cm
N, varN = calculate_number(datum, base, time_uncertainty=TIME_RESOLUTION)
S, varS = err1d.sum(N, varN)
P, varP = err1d.div(N, varN, beam_area, 0.0)
A, varA = err1d.sum(P, varP) # time-weighted average of Flux/Area
T, varT = err1d.sum(datum.monitor.count_time, TIME_RESOLUTION**2)
F, varF = err1d.div(A, varA, T, varT) # average Flux/(Area * Time)
fluxes.append({
"name": datum.name,
"number_incident": S,
"number_incident_error": np.sqrt(varS),
"number_incident_units": "neutrons",
"average_flux": F,
"average_flux_error": np.sqrt(varF),
"average_flux_units": "neutrons/(second * cm^2)",
"total_time": float(T),
"total_time_error": float(np.sqrt(varT))
})
total_number += S
total_number_variance += varS
total_time += T
total_time_variance += varT
sum_weighted_flux += A
sum_weighted_flux_variance += varA
aggregated_flux, aggregated_flux_variance = err1d.div(sum_weighted_flux, sum_weighted_flux_variance, total_time, total_time_variance)
output = refl.FluxData(fluxes, {
"aggregated_average_flux": aggregated_flux,
"aggregated_average_flux_error": np.sqrt(aggregated_flux_variance),
"aggregated_time": total_time,
"aggregated_time_error": np.sqrt(total_time_variance)
})
else:
output = refl.FluxData([], None)
return output
| 33.736328 | 205 | 0.678776 |
4a2525ae8706f36fdc033866a349e07912666da3 | 1,281 | py | Python | preprocess/load_data/data_loader.py | Doarakko/awesomebook | 650aa3833fb3110ad1c1ed804530617196e4989a | [
"BSD-3-Clause"
] | 123 | 2018-04-08T10:35:22.000Z | 2022-02-17T14:13:43.000Z | preprocess/load_data/data_loader.py | Doarakko/awesomebook | 650aa3833fb3110ad1c1ed804530617196e4989a | [
"BSD-3-Clause"
] | null | null | null | preprocess/load_data/data_loader.py | Doarakko/awesomebook | 650aa3833fb3110ad1c1ed804530617196e4989a | [
"BSD-3-Clause"
] | 116 | 2018-04-17T06:31:16.000Z | 2022-03-18T12:08:15.000Z | import pandas as pd
import os
def load_hotel_reserve():
customer_tb = pd.read_csv(os.path.dirname(__file__)+'/../../data/customer.csv')
hotel_tb = pd.read_csv(os.path.dirname(__file__) + '/../../data/hotel.csv')
reserve_tb = pd.read_csv(os.path.dirname(__file__) + '/../../data/reserve.csv')
return customer_tb, hotel_tb, reserve_tb
def load_holiday_mst():
holiday_tb = pd.read_csv(os.path.dirname(__file__)+'/../../data/holiday_mst.csv',
index_col=False)
return holiday_tb
def load_production():
production_tb = pd.read_csv(os.path.dirname(__file__)+'/../../data/production.csv')
return production_tb
def load_production_missing_num():
production_tb = pd.read_csv(os.path.dirname(__file__)+'/../../data/production_missing_num.csv')
return production_tb
def load_production_missing_category():
production_tb = pd.read_csv(os.path.dirname(__file__)+'/../../data/production_missing_category.csv')
return production_tb
def load_monthly_index():
monthly_index_tb = \
pd.read_csv(os.path.dirname(__file__)+'/../../data/monthly_index.csv')
return monthly_index_tb
def load_meros_txt():
with open(os.path.dirname(__file__)+'/../../data/txt/meros.txt', 'r') as f:
meros = f.read()
f.close()
return meros
| 29.113636 | 102 | 0.706479 |
4a2527d480a35b364d2acdfe133f4bbf72eea545 | 18,097 | py | Python | demeter/model.py | shemic/demeter | 01f91aac43c325c48001dda86af17da43fb8d6fe | [
"MIT"
] | 1 | 2017-12-05T08:17:53.000Z | 2017-12-05T08:17:53.000Z | demeter/model.py | shemic/demeter | 01f91aac43c325c48001dda86af17da43fb8d6fe | [
"MIT"
] | null | null | null | demeter/model.py | shemic/demeter | 01f91aac43c325c48001dda86af17da43fb8d6fe | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
demeter
name:model.py
author:rabin
"""
import os
import uuid
import short_url
import json
import traceback
import re
import math
import datetime
from demeter.core import *
class Model(object):
__table__ = ''
__comment__ = ''
def __init__(self, type, db, config):
self.db = db
self._type = type
self._config = config
self._set = ''
self._bind = {}
self._attr = {}
self._key = {}
self.call = False
self.log = []
self.sql = False
self.bind = False
self.place = '%s'
if self._type == 'sqlite':
self.place = '?'
self.setTable(self.__table__)
self.create()
def setTable(self, name):
if 'prefix' in self._config and self._config['prefix']:
self._table = self._config['prefix'] + '_' + name
else:
self._table = name
def setCall(self, call):
self.call = call
return self
def addLog(self, value):
self.log.append(value)
def getLog(self):
return self.log
def cur(self):
return self.db.cursor()
def commit(self):
return self.db.commit()
def lastId(self, cur):
if hasattr(cur, 'lastrowid'):
id = cur.lastrowid
if not id:
id = cur.fetchone()[0]
else:
id = cur.fetchone()[0]
return id
def query(self, sql, bind=[], fetch='fetchone', method='', cur=False, call=False):
if call:
self.setCall(call)
if not cur:
cur = self.cur()
if not method:
if 'select' in sql:
method = 'select'
if 'insert' in sql:
method = 'insert'
self.sql = sql
self.bind = bind
self.addLog((sql, bind))
try:
cur.execute(sql, bind)
if method == 'select':
return self.fetch(cur, fetch)
id = True
if method == 'insert':
id = self.lastId(cur)
self.commit()
except Exception as e:
self.addLog(str(e))
return False
self._set = {}
return id
def execute(self, sql, method='select', fetch='fetchall'):
cur = self.cur()
bind = []
state_true = True
state_false = False
if self._type == 'mysql':
state_true = '1'
state_false = '2'
if self._set:
for key in self._set:
self.check(key, self._set[key], self._attr[key])
if self._set[key] == 'time':
self._set[key] = self.time()
elif self._set[key] == 'True':
self._set[key] = state_true
elif self._set[key] == 'False':
self._set[key] = state_false
elif 'date' in key and type(self._set[key]) != int:
self._set[key] = self.mktime(self._set[key])
elif self._attr[key].md5:
self._set[key] = self.createMd5(self._set[key])
bind.append(self._set[key])
for value in self._key:
if value[0] in self._bind and self._bind[value[0]] != None:
val = self._bind[value[0]]
if method == 'insert':
self.check(value[0], val, self._attr[value[0]])
self._attr[value[0]].unset()
if type(val) == list and val:
for i in val:
bind.append(i)
else:
bind.append(val)
if method == 'select' and ';' in sql:
temp = sql.split(';')
sql = temp[1]
totalSql = temp[0]
cur.execute(totalSql, bind)
Demeter.config['page']['totalNum'] = self.fetch(cur, 'fetchone', 'count')
Demeter.config['page']['total'] = int(math.ceil(round(float(Demeter.config['page']['totalNum'])/float(Demeter.config['page']['num']),2)))
elif ';' in sql:
temp = sql.split(';')
result = []
for v in temp:
result.append(self.query(v, (), fetch=fetch, method=method, cur=cur))
return result
return self.query(sql, bind, fetch=fetch, method=method, cur=cur)
def fetchAll(self):
return self.fetch(self.cur(), type='fetchall')
def fetch(self, cur, type='fetchall', method = ''):
load = getattr(cur, type)
rows = load()
desc = self._key
desc = cur.description
if type == 'fetchall':
result = []
if rows:
for key in rows:
row = {}
i = 0
for v in key:
row[desc[i][0]] = self.data(desc[i][0], v)
i = i + 1
result.append(row)
elif method == 'count':
return rows[0]
else:
result = {}
i = 0
if rows:
for v in rows:
if not v:
v = ''
result[desc[i][0]] = self.data(desc[i][0], v)
i = i + 1
return result
def data(self, key, value):
if type(value) == datetime.datetime:
value = str(value)
if self.call:
value = self.call(key, value)
if value == None or not value:
value = ''
return value
def attr(self, method):
fields = vars(self.__class__)
self._attr = {}
self._bind = {}
self._key = {}
if Demeter.checkPy3():
col = (int, str, float, bool, uuid.UUID)
code = (str,)
else:
col = (int, str, long, float, unicode, bool, uuid.UUID)
code = (str, unicode)
for field in fields:
if isinstance(fields[field], Fields):
self._attr[field] = fields[field]
self._key[field] = self._attr[field].getKey()
insert = (method == 'insert')
update = (insert or method == 'update')
if insert and self._attr[field].uuid:
self.setUuid(field, col)
bind = False
val = self._attr[field].getArgv()
if val or val == False:
bind = True
else:
val = getattr(self, field)
if isinstance(val, col):
setattr(self, field, self._attr[field])
bind = True
elif insert and self._attr[field].default:
val = self._attr[field].default
bind = True
if val == 'time':
val = self.time()
elif '.' in val:
temp = val.split('.')
val = Demeter.config[temp[0]][temp[1]]
elif method == 'select' and self._attr[field].default and field == 'state':
val = self._attr[field].default
bind = True
if bind and val != None:
if type(val) == list:
length = len(val)
if length <= 1:
val = val[0]
if insert and self._attr[field].md5:
val = self.createMd5(val)
if self._attr[field].type == 'boolean' and isinstance(val, code):
val = Demeter.bool(val, self._type)
if type(val) == list:
val = tuple(val)
self._bind[field] = val
self._attr[field].val(self._bind[field])
self._attr[field].bind(self.place)
self._key = sorted(self._key.items(), key=lambda d:d[1], reverse = False)
Counter().unset()
def check(self, field, val, attr):
if val == 'undefined':
self.error(error)
if attr.match == 'not':
if not val:
self.error(field + ' not exists')
elif attr.match:
if '|' in attr.match:
temp = attr.match.split('|')
match = temp[0]
error = temp[1]
else:
match = attr.match
error = field + ' not match:' + match
if hasattr(Check, match):
method = getattr(Check, match)
result = method(val)
else:
result = re.search(match, val)
if not result:
self.error(error)
def error(self, msg):
for value in self._key:
if value[0] in self._bind and self._bind[value[0]] != None:
self._attr[value[0]].unset()
self._set = {}
Demeter.error(msg)
def time(self):
return Demeter.time()
def mktime(self, value):
return Demeter.mktime(value)
def setUuid(self, field, col):
id = getattr(self, self._attr[field].uuid)
if isinstance(id, col):
system = short_url.encode_url(id)
else:
system = self._attr[field].uuid
name = system + '.' + self.__table__
result = Demeter.uuid(name)
setattr(self, field, result)
def createMd5(self, value):
return Demeter.md5(value, salt=True)
def createState(self):
if 'create' in self._config:
create = Demeter.bool(self._config['create'])
if create:
return Demeter.runtime(self._type, self.__table__, json.dumps(self._key))
return False
def drop(self):
return self.handle('drop')
def create(self):
return self.handle('create')
def insert(self):
return self.handle('insert')
def update(self, *args, **kwargs):
if args:
self._set = args[0]
else:
self._set = kwargs
return self.handle('update', set=self._set)
def delete(self):
return self.handle('delete')
def select(self, type='fetchall',col = '*', order = 'cdate desc', group = '', limit = '0,100', page=False, call=False):
if call:
self.setCall(call)
pageConfig = {}
if page and 'page' in Demeter.config:
pageConfig['current'] = Demeter.config['page']['current']
if page == True:
pageConfig['num'] = 15
elif 'num' in page:
pageConfig['num'] = page['num']
Demeter.config['page']['num'] = pageConfig['num']
return self.handle('select', type=type, col=col, order=order, group=group, limit=limit, page=pageConfig)
def manage(self):
self.attr(method)
return
def handle(self, method='select', type='fetchall', col = '*', order = '', group = '', limit = '0,100', page=False, set = ''):
self.attr(method)
if method == 'create':
create = self.createState()
if create == False:
return False
if type == 'fetchone':
limit = '0,1'
load = getattr(Sql(self._type, self.place), method)
return self.execute(load(self._table, {'key':self._key, 'fields':self._attr, 'col':col, 'order':order, 'group':group, 'limit':limit, 'page':page, 'set':set, 'table_comment':self.__comment__}), method, type)
class Fields(object):
def __init__(self, type='', default='', primaryKey=False, autoIncrement=False, null=True, unique=False, check='', constraint='', comment='', uuid='', index=False, indexs=False, md5=False, match='', manage=''):
self.type = type
self.default = default
self.primaryKey = primaryKey
self.autoIncrement = autoIncrement
self.null = null
self.unique = unique
self.check = check
self.constraint = constraint
self.comment = comment
self.uuid = uuid
self.index = index
self.indexs = indexs
self.md5 = md5
self.key = Counter().inc()
self.match = match
self.value = ''
self.argv = ''
self.bindValue = ''
self.expValue = '='
self.logicValue = 'and'
self.manage = manage
# set value
def assign(self, value, exp='=', logic='and'):
self.add(value)
self.exp(exp)
self.logic(logic)
return self
def ins(self, value):
self.argv = value
self.exp('in')
return self
def nq(self, value):
self.argv = value
self.exp('!=')
return self
def like(self, value):
self.argv = '%' + value + '%'
self.exp('like')
return self
def mlike(self, value):
self.argv = value
self.exp('~')
self.logic('and')
return self
def time(self, value):
self.add(Demeter.mktime(value))
return self
def start(self, value):
self.time(value)
self.exp('>=')
self.logic('and')
return self
def end(self, value):
self.time(value)
self.exp('<=')
self.logic('and')
return self
def bind(self, value):
self.bindValue = value
return self
def exp(self, value):
"""
if type(self.expValue) != list:
self.expValue = []
self.expValue.append(value)
"""
self.expValue = value
return self
def logic(self, value):
"""
if type(self.logicValue) != list:
self.logicValue = []
self.logicValue.append(value)
"""
self.logicValue = value
return self
def val(self, value, exp='=', logic='and'):
if type(value) == list:
length = len(value)
if length <= 1:
value = value[0]
self.value = value
if not self.expValue:
self.exp(exp)
if not self.logicValue:
self.logic(logic)
return self
def getArgv(self):
return self.argv
def getVal(self):
return self.value
def getBind(self):
return self.bindValue
def getExp(self):
if not self.expValue:
return ''
if type(self.expValue) == list:
length = len(self.expValue)
if length <= 1:
result = self.expValue[0]
else:
result = self.expValue
else:
result = self.expValue
return result
def getKey(self):
return self.key
def getLogic(self):
if not self.logicValue:
return ''
if type(self.logicValue) == list:
length = len(self.logicValue)
if length <= 1:
result = self.logicValue[0]
else:
result = self.logicValue
else:
result = self.logicValue
return result
def unset(self):
self.argv = None
self.value = None
self.bindValue = None
self.expValue = '='
self.logicValue = 'and'
return self
def add(self, value):
"""
if not self.argv:
self.argv = []
self.argv.append(value)
"""
self.argv = value
return self
class Counter(object):
num = 0
"""
instance = None
def __new__(cls, *args, **kwd):
if Counter.instance is None:
Counter.instance = object.__new__(cls, *args, **kwd)
return Counter.instance
"""
def inc(self):
self.num = self.num + 1
return self.num
def dec(self):
self.num = self.num - 1
return self.num
def unset(self):
self.num = 0
return self.num
class Sql(object):
"""
instance = None
def __new__(cls, *args, **kwd):
if Sql.instance is None:
Sql.instance = object.__new__(cls, *args, **kwd)
return Sql.instance
"""
def __init__(self, type, place):
self.type = type
self.place = place
self.prefix = '`'
if self.type == 'postgresql':
self.prefix = ''
def drop(self, table, args):
sql = 'DROP TABLE IF EXISTS ' + table
return sql
def alter(self, table, args):
sql = 'ALTER TABLE ' + table + ' ADD COLUMN '
return sql
def create(self, table, args):
create = []
primary = []
unique = []
indexs = []
index = []
comment = {}
for value in args['key']:
key = value[0]
val = args['fields'][key]
if val.primaryKey:
primary.append(key)
if val.unique:
unique.append(key)
if val.index:
index.append((key, val.index))
if val.indexs:
indexs.append(key)
fields = []
fields.append(self.prefix + key + self.prefix)
if val.autoIncrement and self.type == 'postgresql':
fields.append('SERIAL')
elif self.type == 'mysql' and val.type == 'boolean':
fields.append('int')
elif self.type == 'sqlite' and val.type == 'int':
if val.autoIncrement and val.primaryKey:
fields.append('integer PRIMARY KEY autoincrement')
else:
fields.append('integer')
else:
fields.append(val.type)
if not val.null:
fields.append('NOT NULL')
if val.autoIncrement and self.type == 'mysql':
fields.append('AUTO_INCREMENT')
#约束
if val.constraint:
fields.append('CONSTRAINT ' + val.constraint)
if val.check:
fields.append('CHECK ' + val.check)
if val.default:
default = val.default
if val.default == 'time':
default = '0'
if '.' in val.default:
temp = val.default.split('.')
default = Demeter.config[temp[0]][temp[1]]
if self.type == 'mysql' and val.type == 'boolean':
default = Demeter.bool(default, self.type)
fields.append('DEFAULT \'' + str(default) + '\'')
if val.comment:
if self.type == 'mysql':
fields.append('COMMENT \'' + val.comment + '\'')
else:
comment[key] = val.comment
fields = ' '.join(fields)
create.append(fields)
if primary and self.type != 'sqlite':
create.append('PRIMARY KEY (' + ','.join(primary) + ')')
if unique:
create.append('UNIQUE (' + ','.join(unique) + ')')
create = ','.join(create)
sql = 'CREATE TABLE ' + table + '(' + create + ')'
sql = self.drop(table, args) + ';' + sql
if indexs:
name = '_'.join(indexs)
value = ','.join(indexs)
sql = sql + ';' + 'CREATE INDEX ' + table + '_' + name +' ON ' + table + '(' + value + ')'
if index:
for value in index:
sql = sql + ';' + 'CREATE INDEX ' + table + '_' + value[0] +' ON ' + table + value[1]
if comment and self.type != 'sqlite':
if args['table_comment']:
sql = sql + ';' + 'COMMENT ON TABLE ' + table + ' IS \''+args['table_comment']+'\''
for key in comment:
sql = sql + ';' + 'COMMENT ON COLUMN ' + table + '.'+key+' IS \''+comment[key]+'\''
return sql
def insert(self, table, args):
fields = []
values = []
for value in args['key']:
key = value[0]
val = args['fields'][key].getBind()
if val:
values.append(val)
fields.append(self.prefix + key + self.prefix)
fields = ','.join(fields)
values = ','.join(values)
sql = 'INSERT INTO ' + table + ' (' + fields + ') VALUES (' + values + ')'
if self.type == 'postgresql':
sql = sql + ' RETURNING id'
return sql
def update(self, table, args):
fields = []
for key in args['set']:
fields.append(self.prefix + key + self.prefix + ' = ' + self.place)
fields = ','.join(fields)
sql = 'UPDATE ' + table + ' SET ' + fields + self.where(args['key'], args['fields'])
return sql
def delete(self, table, args):
sql = 'DELETE FROM ' + table + self.where(args['key'], args['fields'])
return sql
def select(self, table, args):
string = ' FROM ' + table + self.where(args['key'], args['fields']) + ' ' + self.group(args['group'])
sql = ''
if args['page']:
sql = 'SELECT count(1) as total' + string + ';'
sql = sql + 'SELECT ' + args['col'] + string + ' ' + self.order(args['order']) + ' ' + self.limit(args['limit'], args['page'])
return sql
def where(self, key, fields):
fields = self.fields(key, fields)
if fields:
return ' WHERE ' + fields
return ''
def fields(self, key, fields):
result = ''
k = 0
for value in key:
key = value[0]
field = fields[key]
bind = field.getBind()
val = field.getVal()
logic = field.getLogic()
exp = field.getExp()
if type(val) == list and val:
n = 0
for i in val:
data = self.field(field, bind, key, k, logic[n], exp[n])
n = n + 1
if data:
result = result + data
k = 1
else:
data = self.field(field, bind, key, k, logic, exp)
if data:
result = result + data
k = 1
return result
def field(self, field, val, key, k, logic, exp):
result = ''
if val:
if k == 0:
logic = ''
else:
logic = ' ' + logic
result = logic + ' ' + self.prefix + key + self.prefix + ' ' + exp + ' ' + str(val)
return result
def order(self, value):
result = ''
if value:
result = ' ORDER BY ' + value
return result
def group(self, value):
result = ''
if value:
result = ' GROUP BY ' + value
return result
def limit(self, value, page):
result = ''
if page:
value = str((int(page['current'])-1) * page['num']) + ',' + str(page['num'])
if value:
value = value.split(',')
if self.type == 'mysql':
result = ' LIMIT ' + value[0] + ',' + value[1]
elif self.type == 'postgresql':
result = ' LIMIT ' + value[1] + ' OFFSET ' + value[0]
return result | 24.323925 | 210 | 0.608333 |
4a2527e4c3f4f06e72c2d3b3d594040739f717c7 | 1,932 | py | Python | bin/abi2fasta.py | dytk2134/genomic_data_processing | c22ad51f67fd6563d40ec749a95725fc11bcf19e | [
"MIT"
] | null | null | null | bin/abi2fasta.py | dytk2134/genomic_data_processing | c22ad51f67fd6563d40ec749a95725fc11bcf19e | [
"MIT"
] | null | null | null | bin/abi2fasta.py | dytk2134/genomic_data_processing | c22ad51f67fd6563d40ec749a95725fc11bcf19e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Contributed by Li-Mei Chiang <dytk2134 [at] gmail [dot] com> (2020)
import os
import sys
import logging
from Bio import SeqIO
__version__ = '1.0.0'
# logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
if not logger.handlers:
lh = logging.StreamHandler()
lh.setFormatter(logging.Formatter('%(levelname)-8s %(message)s'))
logger.addHandler(lh)
def abi2fasta(input_files):
fasta_dict = dict()
for input_file in input_files:
record = SeqIO.read(input_file, 'abi')
if record.id in fasta_dict:
logger.error('Duplicate Seq ID: %s' % (record.id))
sys.exit(1)
fasta_dict[record.id] = record.seq
return fasta_dict
def write_fasta(fasta_dict, output_prefix):
output_file = output_prefix + '.fasta'
with open(output_file, 'w') as out_f:
for seq_id in fasta_dict:
out_f.write('>' + seq_id + '\n')
seqs = [str(fasta_dict[seq_id])[i:i+80] for i in range(0, len(str(fasta_dict[seq_id])), 80)]
for seq in seqs:
out_f.write(seq + '\n')
def main():
import argparse
from textwrap import dedent
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=dedent("""\
This script is for converting ABI files to FASTA files.
Quick start:
%(prog)s
"""))
# argument
parser.add_argument('-i', '--input_files', nargs='+', help='Input ABI files', required=True)
parser.add_argument('-o', '--output_prefix', type=str, help='Specify the output prefix', default='output')
parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + __version__)
args = parser.parse_args()
fasta_dict = abi2fasta(args.input_files)
write_fasta(fasta_dict, args.output_prefix)
if __name__ == '__main__':
main() | 33.310345 | 115 | 0.641304 |
4a25281ed2190927b0a11504bc18ceb15dee38c0 | 300 | py | Python | isimip_data/download/viewsets.py | ISI-MIP/isimip-data | a0e4772362cc60db91e7689ec397840dcaaacddb | [
"MIT"
] | 3 | 2020-02-10T10:13:17.000Z | 2021-12-21T09:10:50.000Z | isimip_data/download/viewsets.py | ISI-MIP/isimip-data | a0e4772362cc60db91e7689ec397840dcaaacddb | [
"MIT"
] | 17 | 2020-02-10T16:09:12.000Z | 2021-07-02T09:03:37.000Z | isimip_data/download/viewsets.py | ISI-MIP/isimip-data | a0e4772362cc60db91e7689ec397840dcaaacddb | [
"MIT"
] | null | null | null | from rest_framework.mixins import ListModelMixin
from rest_framework.viewsets import GenericViewSet
from .constants import COUNTRIES
from .serializers import CountrySerializer
class CountryViewSet(ListModelMixin, GenericViewSet):
serializer_class = CountrySerializer
queryset = COUNTRIES
| 25 | 53 | 0.843333 |
4a25286a91f3878ec1fbe919ad84dcca09ee49e9 | 594 | py | Python | tests/test_open.py | GitBib/asstosrt | 1e671a9d7339dba3567ceeba37239469775f68d3 | [
"Apache-2.0"
] | null | null | null | tests/test_open.py | GitBib/asstosrt | 1e671a9d7339dba3567ceeba37239469775f68d3 | [
"Apache-2.0"
] | null | null | null | tests/test_open.py | GitBib/asstosrt | 1e671a9d7339dba3567ceeba37239469775f68d3 | [
"Apache-2.0"
] | null | null | null | from pathlib import Path
import pytest
from pyasstosrt import Subtitle
def test_open():
path = 'tests/sub.ass'
sub = Subtitle(path)
assert sub
def test_open_use_pathlib():
path = Path('tests/sub.ass')
sub = Subtitle(path)
assert sub
def test_open_use_object():
file = open('tests/sub.ass', 'r')
with pytest.raises(TypeError):
Subtitle(file)
def test_open_folder():
with pytest.raises(FileNotFoundError):
Subtitle('tests/')
def test_open_broken_file():
with pytest.raises(FileNotFoundError):
Subtitle('tests/sub1.ass')
| 17.470588 | 42 | 0.675084 |
4a2528abdbbd943137e5fd547b3bbbe0474d4310 | 3,384 | py | Python | fabric_mb/message_bus/messages/extend_reservation_avro.py | fabric-testbed/MessageBus | 2f44f1adca9cfe1551116d4b2d9484c4aa94a2aa | [
"MIT"
] | null | null | null | fabric_mb/message_bus/messages/extend_reservation_avro.py | fabric-testbed/MessageBus | 2f44f1adca9cfe1551116d4b2d9484c4aa94a2aa | [
"MIT"
] | null | null | null | fabric_mb/message_bus/messages/extend_reservation_avro.py | fabric-testbed/MessageBus | 2f44f1adca9cfe1551116d4b2d9484c4aa94a2aa | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2020 FABRIC Testbed
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# Author: Komal Thareja ([email protected])
from fabric_mb.message_bus.message_bus_exception import MessageBusException
from fabric_mb.message_bus.messages.auth_avro import AuthAvro
from fabric_mb.message_bus.messages.abc_message_avro import AbcMessageAvro
from fabric_mb.message_bus.messages.constants import Constants
class ExtendReservationAvro(AbcMessageAvro):
"""
Implements Avro representation of an Extend Reservation Message
"""
def __init__(self):
super(ExtendReservationAvro, self).__init__()
self.name = AbcMessageAvro.extend_reservation
self.guid = None
self.auth = None
self.reservation_id = None
self.end_time = None
self.new_units = -1
self.new_resource_type = None
self.request_properties = None
self.config_properties = None
self.id_token = None
def from_dict(self, value: dict):
"""
The Avro Python library does not support code generation.
For this reason we must provide conversion from dict to our class for de-serialization
:param value: incoming message dictionary
"""
if value[Constants.NAME] != AbcMessageAvro.extend_reservation:
raise MessageBusException(Constants.ERROR_INVALID_MESSAGE)
for k, v in value.items():
if k in self.__dict__ and v is not None:
if k == Constants.AUTH:
self.__dict__[k] = AuthAvro()
self.__dict__[k].from_dict(value=v)
else:
self.__dict__[k] = v
def get_id_token(self) -> str:
"""
Return identity token
"""
return self.id_token
def validate(self) -> bool:
"""
Check if the object is valid and contains all mandatory fields
:return True on success; False on failure
"""
ret_val = super().validate()
if self.guid is None or self.auth is None or self.callback_topic is None or self.reservation_id is None or \
self.new_units is None or self.new_resource_type is None or self.request_properties is None or \
self.config_properties is None:
ret_val = False
return ret_val
| 40.285714 | 116 | 0.690307 |
4a2528c993c6e1826fed9a7a1940a9ea576d4a85 | 409 | py | Python | mongo/venv/Scripts/pip3.7-script.py | AnastasiaYiChen/Python | 13b1ce38b6f0e7b83c72c825ca24523c08d4e489 | [
"CNRI-Python"
] | null | null | null | mongo/venv/Scripts/pip3.7-script.py | AnastasiaYiChen/Python | 13b1ce38b6f0e7b83c72c825ca24523c08d4e489 | [
"CNRI-Python"
] | null | null | null | mongo/venv/Scripts/pip3.7-script.py | AnastasiaYiChen/Python | 13b1ce38b6f0e7b83c72c825ca24523c08d4e489 | [
"CNRI-Python"
] | null | null | null | #!C:\git\w0443276\PROG1700\mongo\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
| 31.461538 | 70 | 0.662592 |
4a2529040553172fb55e66794275d0e43e7cc416 | 5,745 | py | Python | Q Learning Discretization Tile Coding/Q_Learning_with_Tile_Coding_for_Continuous_RL.py | eayvali/DeepRL | 4722af0f75487dd3167faafd4eabe8f01aea4305 | [
"MIT"
] | 2 | 2020-01-29T20:49:29.000Z | 2020-03-27T21:45:12.000Z | Q Learning Discretization Tile Coding/Q_Learning_with_Tile_Coding_for_Continuous_RL.py | eayvali/DeepRL | 4722af0f75487dd3167faafd4eabe8f01aea4305 | [
"MIT"
] | null | null | null | Q Learning Discretization Tile Coding/Q_Learning_with_Tile_Coding_for_Continuous_RL.py | eayvali/DeepRL | 4722af0f75487dd3167faafd4eabe8f01aea4305 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 24 20:43:09 2019
@author: elif.ayvali
"""
import sys
import gym
import pandas as pd
import numpy as np
import matplotlib.collections as mc
import matplotlib.pyplot as plt
from helpers import create_uniform_grid,discretize,visualize_samples,\
create_tilings,create_tiling_grid,TiledQTable
class QLearningAgent:
"""Q-Learning agent that can act on a continuous state space by discretizing it."""
def __init__(self, env, tq, alpha=0.02, gamma=0.99, epsilon=1.0, epsilon_decay_rate=0.9995, min_epsilon=.01, seed=505):
"""Initialize variables, create grid for discretization."""
# Environment info
self.env = env
self.tq = tq
self.state_sizes = tq.state_sizes # list of state sizes for each tiling
self.action_size = self.env.action_space.n # 1-dimensional discrete action space
self.seed = np.random.seed(seed)
print("Environment:", self.env)
print("State space size:", self.state_sizes)
print("Action space size:", self.action_size)
# Learning parameters
self.alpha = alpha # learning rate
self.gamma = gamma # discount factor
self.epsilon = self.initial_epsilon = epsilon # initial exploration rate
self.epsilon_decay_rate = epsilon_decay_rate # how quickly should we decrease epsilon
self.min_epsilon = min_epsilon
def reset_episode(self, state):
"""Reset variables for a new episode."""
# Gradually decrease exploration rate
self.epsilon *= self.epsilon_decay_rate
self.epsilon = max(self.epsilon, self.min_epsilon)
self.last_state = state
Q_s = [self.tq.get(state, action) for action in range(self.action_size)]
self.last_action = np.argmax(Q_s)
return self.last_action
def reset_exploration(self, epsilon=None):
"""Reset exploration rate used when training."""
self.epsilon = epsilon if epsilon is not None else self.initial_epsilon
def act(self, state, reward=None, done=None, mode='train'):
"""Pick next action and update internal Q table (when mode != 'test')."""
Q_s = [self.tq.get(state, action) for action in range(self.action_size)]
# Pick the best action from Q table
greedy_action = np.argmax(Q_s)
if mode == 'test':
# Test mode: Simply produce an action
action =greedy_action
else:
# Train mode (default): Update Q table, pick next action
# Note: We update the Q table entry for the *last* (state, action) pair with current state, reward
#Instead of overwriting Q(state, action) with value, perform soft-update:
#Q(state, action) = alpha * value + (1.0 - alpha) * Q(state, action)
value = reward + self.gamma * max(Q_s)
self.tq.update(self.last_state, self.last_action, value, self.alpha)
# Exploration vs. exploitation
do_exploration = np.random.uniform(0, 1) < self.epsilon
if do_exploration:
# Pick a random action
action = np.random.randint(0, self.action_size)
else:
# Pick the best action from Q table
action = greedy_action
# Roll over current state, action for next step
self.last_state = state
self.last_action = action
return action
def run(agent, env, num_episodes=10000, mode='train'):
"""Run agent in given reinforcement learning environment and return scores."""
scores = []
max_avg_score = -np.inf
for i_episode in range(1, num_episodes+1):
# Initialize episode
state = env.reset()
action = agent.reset_episode(state)
total_reward = 0
done = False
# Roll out steps until done
while not done:
state, reward, done, info = env.step(action)
total_reward += reward
action = agent.act(state, reward, done, mode)
# Save final score
scores.append(total_reward)
# Print episode stats
if mode == 'train':
if len(scores) > 100:
avg_score = np.mean(scores[-100:])
if avg_score > max_avg_score:
max_avg_score = avg_score
if i_episode % 100 == 0:
print("\rEpisode {}/{} | Max Average Score: {}".format(i_episode, num_episodes, max_avg_score), end="")
sys.stdout.flush()
return scores
def plot_scores(scores, rolling_window=100):
"""Plot scores and optional rolling mean using specified window."""
plt.plot(scores); plt.title("Scores");
rolling_mean = pd.Series(scores).rolling(rolling_window).mean()
plt.plot(rolling_mean);
return rolling_mean
# Create an environment
env = gym.make('Acrobot-v1')
env.seed(505);
# Explore state (observation) space
print("State space:", env.observation_space)
print("- low:", env.observation_space.low)
print("- high:", env.observation_space.high)
# Explore action space
print("Action space:", env.action_space)
n_bins = 5
low = [-1.0, -5.0]
high = [1.0, 5.0]
bins = tuple([n_bins]*env.observation_space.shape[0])
offset_pos = (env.observation_space.high - env.observation_space.low)/(3*n_bins)
tiling_specs = [(bins, -offset_pos),
(bins, tuple([0.0]*env.observation_space.shape[0])),
(bins, offset_pos)]
tq = TiledQTable(env.observation_space.low,
env.observation_space.high,
tiling_specs,
env.action_space.n)
agent = QLearningAgent(env, tq)
scores = run(agent, env)
rolling_mean = plot_scores(scores) | 38.046358 | 123 | 0.63168 |
4a2529b225600e591cf6c8b93048d658f5d5fa6b | 14,752 | py | Python | dvc/state.py | kss682/dvc | 550cd28028cb872f83c5482d7bffbc9852e33da6 | [
"Apache-2.0"
] | 1 | 2019-09-02T00:28:11.000Z | 2019-09-02T00:28:11.000Z | dvc/state.py | kss682/dvc | 550cd28028cb872f83c5482d7bffbc9852e33da6 | [
"Apache-2.0"
] | null | null | null | dvc/state.py | kss682/dvc | 550cd28028cb872f83c5482d7bffbc9852e33da6 | [
"Apache-2.0"
] | 1 | 2019-09-02T00:29:40.000Z | 2019-09-02T00:29:40.000Z | """Manages state database used for checksum caching."""
from __future__ import unicode_literals
import os
import sqlite3
import logging
from dvc.config import Config
from dvc.utils import remove, current_timestamp, relpath, to_chunks
from dvc.exceptions import DvcException
from dvc.utils.fs import get_mtime_and_size, get_inode
from dvc.utils.compat import fspath_py35
SQLITE_MAX_VARIABLES_NUMBER = 999
logger = logging.getLogger(__name__)
class StateVersionTooNewError(DvcException):
"""Thrown when dvc version is older than the state database version."""
def __init__(self, dvc_version, expected, actual):
super(StateVersionTooNewError, self).__init__(
"you are using an old version '{dvc_version}' of dvc that is "
"using state file version '{expected}' which is not compatible "
"with the state file version '{actual}' that is used in this "
"repo. Please upgrade right now!".format(
dvc_version=dvc_version, expected=expected, actual=actual
)
)
class StateNoop(object):
files = []
def save(self, path_info, checksum):
pass
def get(self, path_info):
return None
def save_link(self, path_info):
pass
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
class State(object): # pylint: disable=too-many-instance-attributes
"""Class for the state database.
Args:
repo (dvc.repo.Repo): repo instance that this state belongs to.
config (configobj.ConfigObj): config for the state.
Raises:
StateVersionTooNewError: thrown when dvc version is older than the
state database version.
"""
VERSION = 3
STATE_FILE = "state"
STATE_TABLE = "state"
STATE_TABLE_LAYOUT = (
"inode INTEGER PRIMARY KEY, "
"mtime TEXT NOT NULL, "
"size TEXT NOT NULL, "
"md5 TEXT NOT NULL, "
"timestamp TEXT NOT NULL"
)
STATE_INFO_TABLE = "state_info"
STATE_INFO_TABLE_LAYOUT = "count INTEGER"
STATE_INFO_ROW = 1
LINK_STATE_TABLE = "link_state"
LINK_STATE_TABLE_LAYOUT = (
"path TEXT PRIMARY KEY, "
"inode INTEGER NOT NULL, "
"mtime TEXT NOT NULL"
)
STATE_ROW_LIMIT = 100000000
STATE_ROW_CLEANUP_QUOTA = 50
MAX_INT = 2 ** 63 - 1
MAX_UINT = 2 ** 64 - 2
def __init__(self, repo, config):
self.repo = repo
self.dvc_dir = repo.dvc_dir
self.root_dir = repo.root_dir
state_config = config.get(Config.SECTION_STATE, {})
self.row_limit = state_config.get(
Config.SECTION_STATE_ROW_LIMIT, self.STATE_ROW_LIMIT
)
self.row_cleanup_quota = state_config.get(
Config.SECTION_STATE_ROW_CLEANUP_QUOTA,
self.STATE_ROW_CLEANUP_QUOTA,
)
if not self.dvc_dir:
self.state_file = None
return
self.state_file = os.path.join(self.dvc_dir, self.STATE_FILE)
# https://www.sqlite.org/tempfiles.html
self.temp_files = [
self.state_file + "-journal",
self.state_file + "-wal",
]
self.database = None
self.cursor = None
self.inserts = 0
@property
def files(self):
return self.temp_files + [self.state_file]
def __enter__(self):
self.load()
def __exit__(self, typ, value, tbck):
self.dump()
def _execute(self, cmd, parameters=()):
logger.debug(cmd)
return self.cursor.execute(cmd, parameters)
def _fetchall(self):
ret = self.cursor.fetchall()
logger.debug("fetched: {}".format(ret))
return ret
def _to_sqlite(self, num):
assert num >= 0
assert num < self.MAX_UINT
# NOTE: sqlite stores unit as signed ints, so maximum uint is 2^63-1
# see http://jakegoulding.com/blog/2011/02/06/sqlite-64-bit-integers/
if num > self.MAX_INT:
ret = -(num - self.MAX_INT)
else:
ret = num
assert self._from_sqlite(ret) == num
return ret
def _from_sqlite(self, num):
assert abs(num) <= self.MAX_INT
if num < 0:
return abs(num) + self.MAX_INT
assert num < self.MAX_UINT
assert num >= 0
return num
def _prepare_db(self, empty=False):
from dvc import __version__
if not empty:
cmd = "PRAGMA user_version;"
self._execute(cmd)
ret = self._fetchall()
assert len(ret) == 1
assert len(ret[0]) == 1
assert isinstance(ret[0][0], int)
version = ret[0][0]
if version > self.VERSION:
raise StateVersionTooNewError(
__version__, self.VERSION, version
)
elif version < self.VERSION:
msg = (
"State file version '{}' is too old. "
"Reformatting to the current version '{}'."
)
logger.warning(msg.format(version, self.VERSION))
cmd = "DROP TABLE IF EXISTS {};"
self._execute(cmd.format(self.STATE_TABLE))
self._execute(cmd.format(self.STATE_INFO_TABLE))
self._execute(cmd.format(self.LINK_STATE_TABLE))
# Check that the state file is indeed a database
cmd = "CREATE TABLE IF NOT EXISTS {} ({})"
self._execute(cmd.format(self.STATE_TABLE, self.STATE_TABLE_LAYOUT))
self._execute(
cmd.format(self.STATE_INFO_TABLE, self.STATE_INFO_TABLE_LAYOUT)
)
self._execute(
cmd.format(self.LINK_STATE_TABLE, self.LINK_STATE_TABLE_LAYOUT)
)
cmd = (
"INSERT OR IGNORE INTO {} (count) SELECT 0 "
"WHERE NOT EXISTS (SELECT * FROM {})"
)
self._execute(cmd.format(self.STATE_INFO_TABLE, self.STATE_INFO_TABLE))
cmd = "PRAGMA user_version = {};"
self._execute(cmd.format(self.VERSION))
def load(self):
"""Loads state database."""
retries = 1
while True:
assert self.database is None
assert self.cursor is None
assert self.inserts == 0
empty = not os.path.exists(self.state_file)
self.database = sqlite3.connect(self.state_file)
self.cursor = self.database.cursor()
# Try loading once to check that the file is indeed a database
# and reformat it if it is not.
try:
self._prepare_db(empty=empty)
return
except sqlite3.DatabaseError:
self.cursor.close()
self.database.close()
self.database = None
self.cursor = None
self.inserts = 0
if retries > 0:
os.unlink(self.state_file)
retries -= 1
else:
raise
def _vacuum(self):
# NOTE: see https://bugs.python.org/issue28518
self.database.isolation_level = None
self._execute("VACUUM")
self.database.isolation_level = ""
def dump(self):
"""Saves state database."""
assert self.database is not None
cmd = "SELECT count from {} WHERE rowid=?".format(
self.STATE_INFO_TABLE
)
self._execute(cmd, (self.STATE_INFO_ROW,))
ret = self._fetchall()
assert len(ret) == 1
assert len(ret[0]) == 1
count = self._from_sqlite(ret[0][0]) + self.inserts
if count > self.row_limit:
msg = "cleaning up state, this might take a while."
logger.warning(msg)
delete = count - self.row_limit
delete += int(self.row_limit * (self.row_cleanup_quota / 100.0))
cmd = (
"DELETE FROM {} WHERE timestamp IN ("
"SELECT timestamp FROM {} ORDER BY timestamp ASC LIMIT {});"
)
self._execute(
cmd.format(self.STATE_TABLE, self.STATE_TABLE, delete)
)
self._vacuum()
cmd = "SELECT COUNT(*) FROM {}"
self._execute(cmd.format(self.STATE_TABLE))
ret = self._fetchall()
assert len(ret) == 1
assert len(ret[0]) == 1
count = ret[0][0]
cmd = "UPDATE {} SET count = ? WHERE rowid = ?".format(
self.STATE_INFO_TABLE
)
self._execute(cmd, (self._to_sqlite(count), self.STATE_INFO_ROW))
self.database.commit()
self.cursor.close()
self.database.close()
self.database = None
self.cursor = None
self.inserts = 0
@staticmethod
def _file_metadata_changed(actual_mtime, mtime, actual_size, size):
return actual_mtime != mtime or actual_size != size
def _update_state_record_timestamp_for_inode(self, actual_inode):
cmd = "UPDATE {} SET timestamp = ? WHERE inode = ?".format(
self.STATE_TABLE
)
self._execute(
cmd, (current_timestamp(), self._to_sqlite(actual_inode))
)
def _update_state_for_path_changed(
self, actual_inode, actual_mtime, actual_size, checksum
):
cmd = (
"UPDATE {} SET "
"mtime = ?, size = ?, "
"md5 = ?, timestamp = ? "
"WHERE inode = ?"
).format(self.STATE_TABLE)
self._execute(
cmd,
(
actual_mtime,
actual_size,
checksum,
current_timestamp(),
self._to_sqlite(actual_inode),
),
)
def _insert_new_state_record(
self, actual_inode, actual_mtime, actual_size, checksum
):
assert checksum is not None
cmd = (
"INSERT INTO {}(inode, mtime, size, md5, timestamp) "
"VALUES (?, ?, ?, ?, ?)"
).format(self.STATE_TABLE)
self._execute(
cmd,
(
self._to_sqlite(actual_inode),
actual_mtime,
actual_size,
checksum,
current_timestamp(),
),
)
self.inserts += 1
def get_state_record_for_inode(self, inode):
cmd = (
"SELECT mtime, size, md5, timestamp from {} WHERE "
"inode=?".format(self.STATE_TABLE)
)
self._execute(cmd, (self._to_sqlite(inode),))
results = self._fetchall()
if results:
# uniquness constrain on inode
assert len(results) == 1
return results[0]
return None
def save(self, path_info, checksum):
"""Save checksum for the specified path info.
Args:
path_info (dict): path_info to save checksum for.
checksum (str): checksum to save.
"""
assert path_info.scheme == "local"
assert checksum is not None
path = fspath_py35(path_info)
assert os.path.exists(path)
actual_mtime, actual_size = get_mtime_and_size(
path, self.repo.dvcignore
)
actual_inode = get_inode(path)
existing_record = self.get_state_record_for_inode(actual_inode)
if not existing_record:
self._insert_new_state_record(
actual_inode, actual_mtime, actual_size, checksum
)
return
self._update_state_for_path_changed(
actual_inode, actual_mtime, actual_size, checksum
)
def get(self, path_info):
"""Gets the checksum for the specified path info. Checksum will be
retrieved from the state database if available.
Args:
path_info (dict): path info to get the checksum for.
Returns:
str or None: checksum for the specified path info or None if it
doesn't exist in the state database.
"""
assert path_info.scheme == "local"
path = fspath_py35(path_info)
if not os.path.exists(path):
return None
actual_mtime, actual_size = get_mtime_and_size(
path, self.repo.dvcignore
)
actual_inode = get_inode(path)
existing_record = self.get_state_record_for_inode(actual_inode)
if not existing_record:
return None
mtime, size, checksum, _ = existing_record
if self._file_metadata_changed(actual_mtime, mtime, actual_size, size):
return None
self._update_state_record_timestamp_for_inode(actual_inode)
return checksum
def save_link(self, path_info):
"""Adds the specified path to the list of links created by dvc. This
list is later used on `dvc checkout` to cleanup old links.
Args:
path_info (dict): path info to add to the list of links.
"""
assert path_info.scheme == "local"
path = fspath_py35(path_info)
if not os.path.exists(path):
return
mtime, _ = get_mtime_and_size(path, self.repo.dvcignore)
inode = get_inode(path)
relative_path = relpath(path, self.root_dir)
cmd = "REPLACE INTO {}(path, inode, mtime) " "VALUES (?, ?, ?)".format(
self.LINK_STATE_TABLE
)
self._execute(cmd, (relative_path, self._to_sqlite(inode), mtime))
def remove_unused_links(self, used):
"""Removes all saved links except the ones that are used.
Args:
used (list): list of used links that should not be removed.
"""
unused = []
self._execute("SELECT * FROM {}".format(self.LINK_STATE_TABLE))
for row in self.cursor:
relpath, inode, mtime = row
inode = self._from_sqlite(inode)
path = os.path.join(self.root_dir, relpath)
if path in used:
continue
if not os.path.exists(path):
continue
actual_inode = get_inode(path)
actual_mtime, _ = get_mtime_and_size(path, self.repo.dvcignore)
if inode == actual_inode and mtime == actual_mtime:
logger.debug("Removing '{}' as unused link.".format(path))
remove(path)
unused.append(relpath)
for chunk_unused in to_chunks(
unused, chunk_size=SQLITE_MAX_VARIABLES_NUMBER
):
cmd = "DELETE FROM {} WHERE path IN ({})".format(
self.LINK_STATE_TABLE, ",".join(["?"] * len(chunk_unused))
)
self._execute(cmd, tuple(chunk_unused))
| 30.991597 | 79 | 0.57016 |
4a2529c3d2076f65ff853b389c32c1f2201cf644 | 4,571 | py | Python | test/Fortran/FORTRAN.py | bdbaddog/scons-gh-migrate | c76589c83ec00650a2d07dce79fc6dc5ca6465fb | [
"MIT"
] | null | null | null | test/Fortran/FORTRAN.py | bdbaddog/scons-gh-migrate | c76589c83ec00650a2d07dce79fc6dc5ca6465fb | [
"MIT"
] | null | null | null | test/Fortran/FORTRAN.py | bdbaddog/scons-gh-migrate | c76589c83ec00650a2d07dce79fc6dc5ca6465fb | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import TestSCons
from common import write_fake_link
_python_ = TestSCons._python_
_exe = TestSCons._exe
test = TestSCons.TestSCons()
write_fake_link(test)
test.write('myg77.py', r"""
import getopt
import sys
opts, args = getopt.getopt(sys.argv[1:], 'co:')
for opt, arg in opts:
if opt == '-o': out = arg
infile = open(args[0], 'rb')
outfile = open(out, 'wb')
for l in infile.readlines():
if l[:4] != '#g77':
outfile.write(l)
sys.exit(0)
""")
test.write('SConstruct', """
env = Environment(LINK = r'%(_python_)s mylink.py',
LINKFLAGS = [],
FORTRAN = r'%(_python_)s myg77.py')
env.Program(target = 'test01', source = 'test01.f')
env.Program(target = 'test02', source = 'test02.F')
env.Program(target = 'test03', source = 'test03.for')
env.Program(target = 'test04', source = 'test04.FOR')
env.Program(target = 'test05', source = 'test05.ftn')
env.Program(target = 'test06', source = 'test06.FTN')
env.Program(target = 'test07', source = 'test07.fpp')
env.Program(target = 'test08', source = 'test08.FPP')
""" % locals())
test.write('test01.f', "This is a .f file.\n#link\n#g77\n")
test.write('test02.F', "This is a .F file.\n#link\n#g77\n")
test.write('test03.for', "This is a .for file.\n#link\n#g77\n")
test.write('test04.FOR', "This is a .FOR file.\n#link\n#g77\n")
test.write('test05.ftn', "This is a .ftn file.\n#link\n#g77\n")
test.write('test06.FTN', "This is a .FTN file.\n#link\n#g77\n")
test.write('test07.fpp', "This is a .fpp file.\n#link\n#g77\n")
test.write('test08.FPP', "This is a .FPP file.\n#link\n#g77\n")
test.run(arguments = '.', stderr = None)
test.must_match('test01' + _exe, "This is a .f file.\n")
test.must_match('test02' + _exe, "This is a .F file.\n")
test.must_match('test03' + _exe, "This is a .for file.\n")
test.must_match('test04' + _exe, "This is a .FOR file.\n")
test.must_match('test05' + _exe, "This is a .ftn file.\n")
test.must_match('test06' + _exe, "This is a .FTN file.\n")
test.must_match('test07' + _exe, "This is a .fpp file.\n")
test.must_match('test08' + _exe, "This is a .FPP file.\n")
fc = 'f77'
f77 = test.detect_tool(fc)
FTN_LIB = test.gccFortranLibs()
if f77:
test.write("wrapper.py",
"""import os
import sys
open('%s', 'wb').write("wrapper.py\\n")
os.system(" ".join(sys.argv[1:]))
""" % test.workpath('wrapper.out').replace('\\', '\\\\'))
test.write('SConstruct', """
foo = Environment(FORTRAN = '%(fc)s')
f77 = foo.Dictionary('FORTRAN')
bar = foo.Clone(FORTRAN = r'%(_python_)s wrapper.py ' + f77)
foo.Program(target = 'foo', source = 'foo.f')
bar.Program(target = 'bar', source = 'bar.f')
""" % locals())
test.write('foo.f', r"""
PROGRAM FOO
PRINT *,'foo.f'
STOP
END
""")
test.write('bar.f', r"""
PROGRAM BAR
PRINT *,'bar.f'
STOP
END
""")
test.run(arguments = 'foo' + _exe, stderr = None)
test.run(program = test.workpath('foo'), stdout = " foo.f\n")
test.must_not_exist('wrapper.out')
import sys
if sys.platform[:5] == 'sunos':
test.run(arguments = 'bar' + _exe, stderr = None)
else:
test.run(arguments = 'bar' + _exe)
test.run(program = test.workpath('bar'), stdout = " bar.f\n")
test.must_match('wrapper.out', "wrapper.py\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 31.095238 | 73 | 0.661781 |
4a2529e172c55034bf5f5c1dbb6aecad2867b00f | 1,287 | py | Python | 2020/d18/d18.py | adam-blinzler/advent_of_code_2020 | a9681c04e1618090913411c24d3872cd97fc6ec9 | [
"MIT"
] | null | null | null | 2020/d18/d18.py | adam-blinzler/advent_of_code_2020 | a9681c04e1618090913411c24d3872cd97fc6ec9 | [
"MIT"
] | null | null | null | 2020/d18/d18.py | adam-blinzler/advent_of_code_2020 | a9681c04e1618090913411c24d3872cd97fc6ec9 | [
"MIT"
] | null | null | null | def run_calc(total, operation, number):
if operation == '':
total = number
elif operation == '+':
total += number
elif operation == '*':
total *= number
else:
print("FAILUE: Invalid operation")
return total
def solve_calc(line):
calc = 0
op = ''
no_calc = False
for char in list(line):
if no_calc:
if char == '(':
nested += 1
elif char == ')':
nested -= 1
if nested > 0:
sub_set = sub_set + char
else:
if char == ')':
no_calc = False
calc = run_calc(calc,op,solve_calc(sub_set))
else:
if char.isdigit():
calc = run_calc(calc,op,int(char))
elif char in ['+','*']:
op = char
elif char == '(':
sub_set = ''
nested = 1
no_calc = True
return calc
#################
total = 0
for i, line in enumerate(open("input.txt"),1):
if line:
calc = solve_calc(line.strip().replace(' ',''))
#print(i," = ",calc)
total += calc
print("Sum Total of all values =",total)
# 30263286928615 too low
| 25.74 | 64 | 0.43512 |
4a252a4fa97cf450b05dab733c1378e7112b7eed | 19,098 | py | Python | fantasyopt/optimizer/dfs.py | lynshi/fantasy-optimizer | d24d79ffc7220e85255219973dcbcf416292a2df | [
"MIT"
] | null | null | null | fantasyopt/optimizer/dfs.py | lynshi/fantasy-optimizer | d24d79ffc7220e85255219973dcbcf416292a2df | [
"MIT"
] | null | null | null | fantasyopt/optimizer/dfs.py | lynshi/fantasy-optimizer | d24d79ffc7220e85255219973dcbcf416292a2df | [
"MIT"
] | null | null | null | import json
import pulp
from fantasyopt.optimizer.exceptions import OptimizerException
from fantasyopt.player import Player
class DfsOptimizer:
UTILITY_CONSTRAINT = 'utility_constraint'
IP_STATUS = 'ip_solve_status'
LINEUP_SALARY = 'lineup_cost'
LINEUP_POINTS = 'lineup_points'
LINEUP_PLAYERS = 'lineup_players'
IGNORE_PLAYER_PREFIX = 'ignore_'
TEAM_MAX_PREFIX = 'team_max_'
REQUIRE_PLAYER_PREFIX = 'require_'
AVOID_OPPONENT_PREFIX = 'avoid_opponent_'
def __init__(self, players, positions, budget, flex_positions=None,
utility_requirement=0):
"""
Construct IP model for player selection optimization
:param players: dictionary of dictionaries representing players
:param positions: dictionary of position -> requirement
:param budget: budget for player selection
:param flex_positions: dict of
flex position name -> (set of valid positions, number required)
One position should not be present in two flex positions
:param utility_requirement: number of utility players required. This
parameter should only be used for roster positions that accept
players from any position (e.g. Util in Yahoo NBA DFS)
:raises ValueError: if any player does not have all required attributes
"""
for player, attributes in players.items():
for attr in [Player.NAME, Player.POINTS_PROJECTION,
Player.OPPONENT, Player.GAME_TIME, Player.SALARY,
Player.INJURY_STATUS, Player.TEAM, Player.POSITION]:
if attr not in attributes.keys():
raise ValueError('player \'' + player + '\' is missing '
'required '
'attribute \'' +
str(attr) + '\'')
self.players = players
# pass '%' so there is no leading underscore in the variable name
player_variables = pulp.LpVariable.dict('%s', players.keys(),
lowBound=0, upBound=1,
cat='Integer')
position_constraints = {}
non_flex_count = self.add_position_constraints(player_variables,
position_constraints,
positions,
flex_positions,
utility_requirement)
if flex_positions is not None:
self.add_flex_constraints(player_variables,
position_constraints, flex_positions,
non_flex_count,
utility_requirement)
if utility_requirement > 0:
non_utility_count = 0
for pos, count in positions.items():
non_utility_count += count
if flex_positions is not None:
for flex, (pos, count) in flex_positions.items():
non_utility_count += count
self.add_utility_constraint(player_variables, position_constraints,
utility_requirement, non_utility_count)
budget_expression = \
pulp.LpAffineExpression([(player_variables[player],
attributes[Player.SALARY])
for player, attributes in players.items()])
budget_constraint = pulp.LpConstraint(
budget_expression, pulp.LpConstraintLE,
DfsOptimizer.LINEUP_SALARY, budget)
self.model = pulp.LpProblem('DFS Optimizer', pulp.LpMaximize)
self.model += sum([
attributes[Player.POINTS_PROJECTION] * player_variables[player]
for player, attributes in players.items()
])
self.model.constraints = position_constraints
self.model.constraints.update({
self.LINEUP_SALARY: budget_constraint
})
def add_position_constraints(self, player_variables,
position_constraints, positions,
flex_positions,
utility_requirement):
"""
Add position constraints
:param player_variables: dict of player id -> pulp variable
:param position_constraints: dict of constraint name -> pulp constraint
:param positions: dictionary of position -> requirement
:param flex_positions: dict of
flex position name -> (set of valid positions, number required)
One position should not be present in two flex positions
:param utility_requirement: number of utility players required. This
parameter should only be used for roster positions that accept
players from any position (e.g. Util in Yahoo NBA DFS)
:return: total number of players required
"""
# the number of players at flex positions that
# aren't used for the flex position
non_flex_count = {}
# position to the flex position they can contribute to
position_to_flex_map = {}
if flex_positions is not None:
for flex, (allowed, requirement) in flex_positions.items():
non_flex_count[flex] = 0
for p in allowed:
non_flex_count[flex] += positions[p]
position_to_flex_map[p] = flex
for position, requirement in positions.items():
affine_expression = \
pulp.LpAffineExpression([(player_variables[player], 1)
for player, attributes in
self.players.items() if
attributes[Player.POSITION] ==
position])
sense = pulp.LpConstraintEQ
if position in position_to_flex_map or utility_requirement > 0:
sense = pulp.LpConstraintGE
position_constraints[position] = pulp.LpConstraint(
affine_expression, sense, position, requirement)
return non_flex_count
def add_flex_constraints(self, player_variables, position_constraints,
flex_positions, non_flex_count,
utility_requirement):
"""
Add flex constraints
:param player_variables: dict of player id -> pulp variable
:param position_constraints: dict of constraint name -> pulp constraint
:param flex_positions: dict of
flex position name -> (set of valid positions, number required)
One position should not be present in two flex positions
:param non_flex_count: dict of flex position -> number of players
required in positional requirements
:param utility_requirement: number of utility players required. This
parameter should only be used for roster positions that accept
players from any position (e.g. Util in Yahoo NBA DFS)
:return: None
"""
for flex, (allowed, requirement) in flex_positions.items():
affine_expression = \
pulp.LpAffineExpression([(player_variables[player], 1)
for player, attributes in
self.players.items() if
attributes[Player.POSITION] in
allowed])
sense = pulp.LpConstraintEQ
if utility_requirement > 0:
sense = pulp.LpConstraintGE
position_constraints[flex] = pulp.LpConstraint(
affine_expression, sense, flex,
requirement + non_flex_count[flex])
def add_utility_constraint(self, player_variables, position_constraints,
utility_requirement, non_utility_count):
"""
Add utility position requirement. A utility position is one that accepts
a player from any position.
:param player_variables: dict of player id -> pulp variable
:param position_constraints: dict of constraint name -> pulp constraint
:param utility_requirement: number of utility players required
:param non_utility_count: number of players required to be non-utility
:return: None
"""
affine_expression = \
pulp.LpAffineExpression([(player_variables[player], 1)
for player, attributes in
self.players.items()])
position_constraints[DfsOptimizer.UTILITY_CONSTRAINT] = \
pulp.LpConstraint(affine_expression, pulp.LpConstraintEQ,
DfsOptimizer.UTILITY_CONSTRAINT,
utility_requirement + non_utility_count)
def optimize(self) -> dict:
"""
Optimize IP to find best lineup for given model
:return: dictionary of the form {
DfsOptimizer.IP_STATUS_STR: solve status,
DfsOptimizer.LINEUP_SALARY_STR: cost of lineup,
DfsOptimizer.LINEUP_PLAYERS_STR: set of players to put in lineup,
DfsOptimizer.LINEUP_POINTS_STR: total points scored projection
}
"""
self.model.solve()
result = {DfsOptimizer.IP_STATUS: self.model.status,
DfsOptimizer.LINEUP_SALARY: None,
DfsOptimizer.LINEUP_PLAYERS: set(),
DfsOptimizer.LINEUP_POINTS: None}
if self.model.status != pulp.LpStatusOptimal:
raise OptimizerException('Model exited with status ' +
str(self.model.status))
result[DfsOptimizer.LINEUP_SALARY] = \
pulp.value(
self.model.constraints[DfsOptimizer.LINEUP_SALARY]) - \
self.model.constraints[DfsOptimizer.LINEUP_SALARY].constant
result[DfsOptimizer.LINEUP_POINTS] = \
pulp.value(self.model.objective)
for var_name, var in self.model.variablesDict().items():
if var.varValue == 1:
result[DfsOptimizer.LINEUP_PLAYERS].add(var.name)
return result
def generate_lineup(self, display_lineup=True):
"""
Generate optimal DFS lineup based on player salaries and point
projections
:param display_lineup: if true, print lineup in JSON to console
:return: dict that is the lineup, organized by position
"""
result = self.optimize()
lineup = {}
for p in result[DfsOptimizer.LINEUP_PLAYERS]:
player = self.players[p]
pos = player[Player.POSITION]
if pos not in lineup:
lineup[pos] = []
# to remove extra information that may be contained in player
lineup[pos].append({
Player.NAME: player[Player.NAME],
Player.POINTS_PROJECTION: player[Player.POINTS_PROJECTION],
Player.OPPONENT: player[Player.OPPONENT],
Player.GAME_TIME: player[Player.GAME_TIME],
Player.SALARY: player[Player.SALARY],
Player.INJURY_STATUS: player[Player.INJURY_STATUS],
Player.TEAM: player[Player.TEAM]
})
if display_lineup is True:
print(json.dumps(lineup, sort_keys=True, indent=4))
return lineup
def ignore_player(self, player_name, player_position=None,
player_team=None):
"""
Ignore player named by player_name so that it is never used in the
lineup. The position and team of the player can be specified for
further granularity (e.g. in the case two players have the same name).
All players satisfying the given conditions are ignored.
:param player_name: name of the player; Player.NAME
:param player_position: position of the player; Player.POSITION
:param player_team: team of the player; Player.TEAM
:return: None
:raises RuntimeError: if no player with the given conditions is found
"""
constraint_name = DfsOptimizer.IGNORE_PLAYER_PREFIX + player_name
player_variables = []
for var_id, var in self.model.variablesDict().items():
if self.players[var.name][Player.NAME] == player_name:
player_variables.append((var, 1))
if player_position is not None:
constraint_name += player_position
temp = []
for var, coefficient in player_variables:
if self.players[var.name][Player.POSITION] == player_position:
temp.append((var, coefficient))
player_variables = temp
if player_team is not None:
constraint_name += player_team
temp = []
for var, coefficient in player_variables:
if self.players[var.name][Player.TEAM] == player_team:
temp.append((var, coefficient))
player_variables = temp
if len(player_variables) == 0:
error_msg = 'No player named ' + player_name
if player_position is not None:
error_msg += ' (' + player_position + ')'
if player_team is not None:
error_msg += ' on ' + player_team
raise RuntimeError(error_msg + ' found')
affine_expression = pulp.LpAffineExpression(player_variables)
self.model.constraints[constraint_name] = pulp.LpConstraint(
affine_expression, pulp.LpConstraintEQ, constraint_name, 0)
def ignore_team(self, team_name):
"""
Constrain the solver so that no players from the given team can be
placed in the lineup.
:param team_name: name of the team to ignore
:return: None
:raises RuntimeError: if no team with the given name is found
"""
constraint_name = DfsOptimizer.IGNORE_PLAYER_PREFIX + team_name
player_variables = []
for var_id, var in self.model.variablesDict().items():
if self.players[var.name][Player.TEAM] == team_name:
player_variables.append((var, 1))
if len(player_variables) == 0:
raise RuntimeError('No players on ' + team_name + ' found')
affine_expression = pulp.LpAffineExpression(player_variables)
self.model.constraints[constraint_name] = pulp.LpConstraint(
affine_expression, pulp.LpConstraintEQ, constraint_name, 0)
def avoid_opponent(self, team_name):
"""
Constrain the solver so that no players playing against team_name
are selected for the lineup
:param team_name: name of the opponent to avoid
:return: None
:raises RuntimeError: if no team with the given name is found
"""
constraint_name = DfsOptimizer.AVOID_OPPONENT_PREFIX + team_name
player_variables = []
for var_id, var in self.model.variablesDict().items():
if self.players[var.name][Player.OPPONENT] == team_name:
player_variables.append((var, 1))
if len(player_variables) == 0:
raise RuntimeError('No players playing against ' + team_name +
' found')
affine_expression = pulp.LpAffineExpression(player_variables)
self.model.constraints[constraint_name] = pulp.LpConstraint(
affine_expression, pulp.LpConstraintEQ, constraint_name, 0)
def set_max_players_from_same_team(self, maximum):
"""
Constrain the solver so that at most 'maximum' players from the same
team can be placed in the lineup.
:param maximum: max number of players from team team_name allowed
:return: None
"""
team_expressions = {}
for var_id, var in self.model.variablesDict().items():
team = self.players[var.name][Player.TEAM]
if team not in team_expressions:
team_expressions[team] = pulp.LpAffineExpression(name=team, constant=0)
team_expressions[team] = team_expressions[team] + var
team_constraints = {}
for team in team_expressions:
team_constraints[DfsOptimizer.TEAM_MAX_PREFIX + team] = \
pulp.LpConstraint(team_expressions[team], pulp.LpConstraintLE,
DfsOptimizer.TEAM_MAX_PREFIX + team, maximum)
self.model.constraints.update(team_constraints)
def require_player(self, player_name, player_position=None,
player_team=None):
"""
Requires player named by player_name so that it is always used in the
lineup. he position and team of the player can be specified for further
granularity (e.g. in the case two players have the same name). If many
players satisfy the given condition, exactly one is allowed to be
chosen.
:param player_name: name of the player; Player.NAME
:param player_position: position of the player; Player.POSITION
:param player_team: team of the player; Player.TEAM
:return: None
:raises RuntimeError: if no player with the given conditions is found
"""
constraint_name = DfsOptimizer.REQUIRE_PLAYER_PREFIX + player_name
player_variables = []
for var_id, var in self.model.variablesDict().items():
if self.players[var.name][Player.NAME] == player_name:
player_variables.append((var, 1))
if player_position is not None:
constraint_name += player_position
temp = []
for var, coefficient in player_variables:
if self.players[var.name][Player.POSITION] == player_position:
temp.append((var, coefficient))
player_variables = temp
if player_team is not None:
constraint_name += player_team
temp = []
for var, coefficient in player_variables:
if self.players[var.name][Player.TEAM] == player_team:
temp.append((var, coefficient))
player_variables = temp
if len(player_variables) == 0:
error_msg = 'No player named ' + player_name
if player_position is not None:
error_msg += ' (' + player_position + ')'
if player_team is not None:
error_msg += ' on ' + player_team
raise RuntimeError(error_msg + ' found')
affine_expression = pulp.LpAffineExpression(player_variables)
self.model.constraints[constraint_name] = pulp.LpConstraint(
affine_expression, pulp.LpConstraintEQ, constraint_name, 1)
| 44.004608 | 87 | 0.594198 |
4a252a5ed70467fa88ca94455f3bc12068b92194 | 610 | py | Python | task2/train.py | RainEggplant/traffic-sign-classification | 413d9229576d50e25031868229a1397b7b6e763d | [
"MIT"
] | null | null | null | task2/train.py | RainEggplant/traffic-sign-classification | 413d9229576d50e25031868229a1397b7b6e763d | [
"MIT"
] | null | null | null | task2/train.py | RainEggplant/traffic-sign-classification | 413d9229576d50e25031868229a1397b7b6e763d | [
"MIT"
] | null | null | null | from argparse import ArgumentParser
from pytorch_lightning import Trainer
from stn_cnn import StnCnn
def main(args):
model = StnCnn(args)
trainer = Trainer.from_argparse_args(args)
trainer.fit(model)
if __name__ == '__main__':
parser = ArgumentParser()
# add PROGRAM level args
# parser.add_argument('--conda_env', type=str, default='some_name')
# add model specific args
parser = StnCnn.add_model_specific_args(parser)
# add all the available trainer options to argparse
parser = Trainer.add_argparse_args(parser)
args = parser.parse_args()
main(args)
| 22.592593 | 71 | 0.721311 |
4a252b113c55bf722325a7812537c3b4f2c1ab8d | 1,691 | py | Python | code/maciek_code/Hamming_extended.py | mholowko/Solaris | 25f65e72667f1e92e0d5c26bc9cbe159a6a15ace | [
"Apache-2.0"
] | null | null | null | code/maciek_code/Hamming_extended.py | mholowko/Solaris | 25f65e72667f1e92e0d5c26bc9cbe159a6a15ace | [
"Apache-2.0"
] | null | null | null | code/maciek_code/Hamming_extended.py | mholowko/Solaris | 25f65e72667f1e92e0d5c26bc9cbe159a6a15ace | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 22 10:00:48 2021
@author: hol428
"""
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 10 10:16:38 2021
@author: hol428
"""
from scipy.spatial.distance import hamming
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
def separate(sequence):
sequence2 = []
for elem in sequence:
sequence2.append(elem)
return(sequence2)
df1 = pd.read_excel("Designs.xlsx", sheet_name = "TOP15")
df2 = pd.read_excel("Designs.xlsx", sheet_name = "All")
instances1 = []
instances2 = []
for seq in df1['Core'].to_list():
seq = str(seq)
instances1.append(seq.upper())
for seq in df2['Core'].to_list():
seq = str(seq)
instances2.append(seq.upper())
sequences = []
distance = []
values = []
number=[]
difference = []
violindf = pd.DataFrame(columns=['Parent','Neighbour'])
for seq1 in instances1[0:1]:
print(instances1[0:1])
seq1sep = separate(seq1)
container = []
for seq2 in instances2:
seq2sep = separate(seq2)
for h in range (6):
h += 1
if hamming(seq1sep,seq2sep)*len(seq1sep) == h:
container.append(seq2)
distance.append(h)
values.append(df2.loc[df2['Core'] == container[-1]]['TIR'].values.astype(int)[0])
sequences.append(seq1)
number.append(len(container))
print(distance)
print(values)
data = pd.DataFrame ({'TIR': values, 'Distance': distance}, columns = ['TIR','Distance'])
print(data.head(60))
# data = data.sort_values(by=['TIR'])
ax = sns.swarmplot(data=data, x='Distance', y='TIR', palette='viridis')
ax.axhline(78,color='black')
plt.show() | 23.486111 | 97 | 0.627439 |
4a252c3895ec3e40d7310e8dad0b40c1ca5b50b3 | 1,665 | py | Python | modulo/conf/settings_class.py | Exanis/modulo | 6b1c17b4cdb4100055e521a7e8f6fdd69d1c9f08 | [
"MIT"
] | null | null | null | modulo/conf/settings_class.py | Exanis/modulo | 6b1c17b4cdb4100055e521a7e8f6fdd69d1c9f08 | [
"MIT"
] | null | null | null | modulo/conf/settings_class.py | Exanis/modulo | 6b1c17b4cdb4100055e521a7e8f6fdd69d1c9f08 | [
"MIT"
] | null | null | null | from os import environ
from tempfile import gettempdir()
from typing import Any, Optional
from importlib import import_module
from .exceptions import MissingConfigurationException, MissingConfigurationKeyException
from .env import env_bool, env_list, env_str
from modulo.session.backend import File
DEFAULT_SETTINGS = {
'APPLICATION_KEY': env_str('APPLICATION_KEY'),
'DEBUG': env_bool('DEBUG'),
'CUSTOM_HANDLERS': {}
'ROUTES': []
'SESSION_BACKEND': File,
'SESSION_OPTIONS': {
'folder': gettempdir()
},
'SESSION_LIFETIME': 86400
}
class Settings():
def __init__(self: Settings, module: Optional[str] = None) -> None:
if module is None:
module = environ.get('MODULO_CONFIG_MODULE', None)
if module is None:
raise MissingConfigurationException('Missing configuration module. You must either set the MODULO_CONFIG_MODULE environment variable or pass it as a parameter to your settings instance.')
try:
self._original_values = import_module(module)
except ModuleNotFoundError:
raise MissingConfigurationException(f'Module {module} cannot be loaded.')
self._values = {}
def __getattr__(self: Settings, key: str) -> Any:
if key not in self._values:
try:
self._values[key] = getattr(self.values, key)
except AttributeError:
if key in DEFAULT_SETTINGS:
self._values[key] = DEFAULT_SETTINGS[key]
else:
raise AttributeError(f'Setting {key} is not defined and have no default value')
return self._values[key]
| 37.840909 | 199 | 0.663063 |
4a252c6db93f398fd30a64d99060e32d22fc17b1 | 2,575 | py | Python | ip-memo/tests/unit/test_handler.py | rosolovskiy/ip-memo-lambda | 8e9de8b70d202fbbd8a6ee1c0299aa6a75104d05 | [
"Apache-2.0"
] | null | null | null | ip-memo/tests/unit/test_handler.py | rosolovskiy/ip-memo-lambda | 8e9de8b70d202fbbd8a6ee1c0299aa6a75104d05 | [
"Apache-2.0"
] | null | null | null | ip-memo/tests/unit/test_handler.py | rosolovskiy/ip-memo-lambda | 8e9de8b70d202fbbd8a6ee1c0299aa6a75104d05 | [
"Apache-2.0"
] | null | null | null | import json
import pytest
from ip_memo import app
@pytest.fixture()
def apigw_event():
""" Generates API GW Event"""
return {
"body": '{ "test": "body"}',
"resource": "/{proxy+}",
"requestContext": {
"resourceId": "123456",
"apiId": "1234567890",
"resourcePath": "/{proxy+}",
"httpMethod": "POST",
"requestId": "c6af9ac6-7b61-11e6-9a41-93e8deadbeef",
"accountId": "123456789012",
"identity": {
"apiKey": "",
"userArn": "",
"cognitoAuthenticationType": "",
"caller": "",
"userAgent": "Custom User Agent String",
"user": "",
"cognitoIdentityPoolId": "",
"cognitoIdentityId": "",
"cognitoAuthenticationProvider": "",
"sourceIp": "127.0.0.1",
"accountId": "",
},
"stage": "prod",
},
"queryStringParameters": {"foo": "bar"},
"headers": {
"Via": "1.1 08f323deadbeefa7af34d5feb414ce27.cloudfront.net (CloudFront)",
"Accept-Language": "en-US,en;q=0.8",
"CloudFront-Is-Desktop-Viewer": "true",
"CloudFront-Is-SmartTV-Viewer": "false",
"CloudFront-Is-Mobile-Viewer": "false",
"X-Forwarded-For": "127.0.0.1, 127.0.0.2",
"CloudFront-Viewer-Country": "US",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Upgrade-Insecure-Requests": "1",
"X-Forwarded-Port": "443",
"Host": "1234567890.execute-api.us-east-1.amazonaws.com",
"X-Forwarded-Proto": "https",
"X-Amz-Cf-Id": "aaaaaaaaaae3VYQb9jd-nvCd-de396Uhbp027Y2JvkCPNLmGJHqlaA==",
"CloudFront-Is-Tablet-Viewer": "false",
"Cache-Control": "max-age=0",
"User-Agent": "Custom User Agent String",
"CloudFront-Forwarded-Proto": "https",
"Accept-Encoding": "gzip, deflate, sdch",
},
"pathParameters": {"proxy": "/examplepath"},
"httpMethod": "POST",
"stageVariables": {"baz": "qux"},
"path": "/examplepath",
}
def test_lambda_handler(apigw_event, mocker):
ret = app.lambda_handler(apigw_event, "")
data = json.loads(ret["body"])
assert ret["statusCode"] == 200
assert "message" in ret["body"]
assert data["message"] == "hello world"
# assert "location" in data.dict_keys()
| 34.797297 | 99 | 0.515728 |
4a252cde6cd9c1ac1d1a5292e2447b2dcde98862 | 11,413 | py | Python | pandaserver/server/panda.py | mkycanopus/panda-server | 0f7c36800c033fada8bbde53dceaab98770b6df2 | [
"Apache-2.0"
] | 1 | 2019-08-30T13:47:51.000Z | 2019-08-30T13:47:51.000Z | pandaserver/server/panda.py | mkycanopus/panda-server | 0f7c36800c033fada8bbde53dceaab98770b6df2 | [
"Apache-2.0"
] | null | null | null | pandaserver/server/panda.py | mkycanopus/panda-server | 0f7c36800c033fada8bbde53dceaab98770b6df2 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
"""
entry point
"""
import datetime
import traceback
import types
# config file
from config import panda_config
# initialize cx_Oracle using dummy connection
from taskbuffer.Initializer import initializer
initializer.init()
# initialzie TaskBuffer
from taskbuffer.TaskBuffer import taskBuffer
taskBuffer.init(panda_config.dbhost,panda_config.dbpasswd,panda_config.nDBConnection,True)
# initialize JobDispatcher
from jobdispatcher.JobDispatcher import jobDispatcher
if panda_config.nDBConnection != 0:
jobDispatcher.init(taskBuffer)
# initialize DataService
from dataservice.DataService import dataService
if panda_config.nDBConnection != 0:
dataService.init(taskBuffer)
# initialize UserIF
from userinterface.UserIF import userIF
if panda_config.nDBConnection != 0:
userIF.init(taskBuffer)
# import web I/F
allowedMethods = []
from taskbuffer.Utils import isAlive, putFile, deleteFile, getServer, updateLog, fetchLog,\
touchFile, getVomsAttr, putEventPickingRequest, getAttr, getFile, uploadLog
allowedMethods += ['isAlive','putFile','deleteFile','getServer','updateLog','fetchLog',
'touchFile','getVomsAttr','putEventPickingRequest','getAttr','getFile',
'uploadLog']
from dataservice.DataService import datasetCompleted, updateFileStatusInDisp
allowedMethods += ['datasetCompleted', 'updateFileStatusInDisp']
from jobdispatcher.JobDispatcher import getJob, updateJob, getStatus, genPilotToken,\
getEventRanges, updateEventRange, getKeyPair, updateEventRanges, getDNsForS3, getProxy, getCommands, ackCommands,\
checkJobStatus, checkEventsAvailability, updateJobsInBulk, getResourceTypes
allowedMethods += ['getJob', 'updateJob', 'getStatus', 'genPilotToken',
'getEventRanges', 'updateEventRange', 'getKeyPair',
'updateEventRanges', 'getDNsForS3', 'getProxy', 'getCommands', 'ackCommands',
'checkJobStatus', 'checkEventsAvailability', 'updateJobsInBulk', 'getResourceTypes']
from userinterface.UserIF import submitJobs, getJobStatus, queryPandaIDs, killJobs, reassignJobs,\
getJobStatistics, getJobStatisticsPerSite, resubmitJobs, queryLastFilesInDataset, getPandaIDsSite,\
getJobsToBeUpdated, updateProdDBUpdateTimes, runTaskAssignment, getAssigningTask, getSiteSpecs,\
getCloudSpecs, runBrokerage, seeCloudTask, queryJobInfoPerCloud, registerProxyKey, getProxyKey,\
getJobIDsInTimeRange, getPandIDsWithJobID, getFullJobStatus, getJobStatisticsForBamboo,\
getNUserJobs, addSiteAccess, listSiteAccess, getFilesInUseForAnal, updateSiteAccess,\
getPandaClientVer, getSlimmedFileInfoPandaIDs, runReBrokerage, getQueuedAnalJobs, getHighestPrioJobStat,\
getActiveDatasets, setCloudTaskByUser, getSerialNumberForGroupJob, getCachePrefixes,\
checkMergeGenerationStatus, sendLogInfo, getNumPilots, retryFailedJobsInActive,\
getJobStatisticsWithLabel, getPandaIDwithJobExeID, getJobStatisticsPerUserSite,\
getDisInUseForAnal, getLFNsInUseForAnal, getScriptOfflineRunning, setDebugMode,\
insertSandboxFileInfo, checkSandboxFile, changeJobPriorities, insertTaskParams,\
killTask, finishTask, getCmtConfigList, getJediTasksInTimeRange, getJediTaskDetails,\
retryTask, getRetryHistory, changeTaskPriority, reassignTask, changeTaskAttributePanda,\
pauseTask, resumeTask, increaseAttemptNrPanda, killUnfinishedJobs, changeTaskSplitRulePanda,\
changeTaskModTimePanda, avalancheTask, getPandaIDsWithTaskID, reactivateTask, getTaskStatus, \
reassignShare, listTasksInShare, getTaskParamsMap, updateWorkers, harvesterIsAlive,\
reportWorkerStats, addHarvesterDialogs, getJobStatisticsPerSiteResource, setNumSlotsForWP,\
reloadInput, enableJumboJobs, updateServiceMetrics, getUserJobMetadata, getJumboJobDatasets, getGShareStatus
allowedMethods += ['submitJobs','getJobStatus','queryPandaIDs','killJobs','reassignJobs',
'getJobStatistics','getJobStatisticsPerSite','resubmitJobs','queryLastFilesInDataset','getPandaIDsSite',
'getJobsToBeUpdated','updateProdDBUpdateTimes','runTaskAssignment','getAssigningTask','getSiteSpecs',
'getCloudSpecs','runBrokerage','seeCloudTask','queryJobInfoPerCloud','registerProxyKey','getProxyKey',
'getJobIDsInTimeRange','getPandIDsWithJobID','getFullJobStatus','getJobStatisticsForBamboo',
'getNUserJobs','addSiteAccess','listSiteAccess','getFilesInUseForAnal','updateSiteAccess',
'getPandaClientVer','getSlimmedFileInfoPandaIDs','runReBrokerage','getQueuedAnalJobs','getHighestPrioJobStat',
'getActiveDatasets','setCloudTaskByUser','getSerialNumberForGroupJob','getCachePrefixes',
'checkMergeGenerationStatus','sendLogInfo','getNumPilots','retryFailedJobsInActive',
'getJobStatisticsWithLabel','getPandaIDwithJobExeID','getJobStatisticsPerUserSite',
'getDisInUseForAnal','getLFNsInUseForAnal','getScriptOfflineRunning','setDebugMode',
'insertSandboxFileInfo','checkSandboxFile','changeJobPriorities','insertTaskParams',
'killTask','finishTask','getCmtConfigList','getJediTasksInTimeRange','getJediTaskDetails',
'retryTask','getRetryHistory','changeTaskPriority','reassignTask','changeTaskAttributePanda',
'pauseTask','resumeTask','increaseAttemptNrPanda','killUnfinishedJobs','changeTaskSplitRulePanda',
'changeTaskModTimePanda','avalancheTask','getPandaIDsWithTaskID', 'reactivateTask', 'getTaskStatus',
'reassignShare', 'listTasksInShare', 'getTaskParamsMap', 'updateWorkers', 'harvesterIsAlive',
'reportWorkerStats', 'addHarvesterDialogs', 'getJobStatisticsPerSiteResource', 'setNumSlotsForWP',
'reloadInput', 'enableJumboJobs', 'updateServiceMetrics', 'getUserJobMetadata', 'getJumboJobDatasets',
'getGShareStatus']
# import error
import taskbuffer.ErrorCode
# FastCGI/WSGI entry
if panda_config.useFastCGI or panda_config.useWSGI:
import os
import cgi
import sys
from pandalogger.PandaLogger import PandaLogger
# logger
_logger = PandaLogger().getLogger('Entry')
# dummy request object
class DummyReq:
def __init__(self,env,):
# environ
self.subprocess_env = env
# header
self.headers_in = {}
# content-length
if self.subprocess_env.has_key('CONTENT_LENGTH'):
self.headers_in["content-length"] = self.subprocess_env['CONTENT_LENGTH']
# get remote host
def get_remote_host(self):
if self.subprocess_env.has_key('REMOTE_HOST'):
return self.subprocess_env['REMOTE_HOST']
return ""
# accept json
def acceptJson(self):
try:
if self.subprocess_env.has_key('HTTP_ACCEPT'):
return 'application/json' in self.subprocess_env['HTTP_ACCEPT']
except:
pass
return False
# application
def application(environ, start_response):
# get method name
methodName = ''
if environ.has_key('SCRIPT_NAME'):
methodName = environ['SCRIPT_NAME'].split('/')[-1]
_logger.debug("PID=%s %s start" % (os.getpid(),methodName))
regStart = datetime.datetime.utcnow()
retType = None
# check method name
if not methodName in allowedMethods:
_logger.error("PID=%s %s is forbidden" % (os.getpid(),methodName))
exeRes = "False : %s is forbidden" % methodName
else:
# get method object
tmpMethod = None
try:
exec "tmpMethod = %s" % methodName
except:
pass
# object not found
if tmpMethod == None:
_logger.error("PID=%s %s is undefined" % (os.getpid(),methodName))
exeRes = "False"
else:
try:
# get params
tmpPars = cgi.FieldStorage(environ['wsgi.input'], environ=environ,
keep_blank_values=1)
# convert to map
params = {}
for tmpKey in tmpPars.keys():
if tmpPars[tmpKey].file != None and tmpPars[tmpKey].filename != None:
# file
params[tmpKey] = tmpPars[tmpKey]
else:
# string
params[tmpKey] = tmpPars.getfirst(tmpKey)
if panda_config.entryVerbose:
_logger.debug("PID=%s %s with %s" % (os.getpid(),methodName,str(params.keys())))
# dummy request object
dummyReq = DummyReq(environ)
# exec
exeRes = apply(tmpMethod,[dummyReq],params)
# extract return type
if type(exeRes) == types.DictType:
retType = exeRes['type']
exeRes = exeRes['content']
# convert bool to string
if exeRes in [True,False]:
exeRes = str(exeRes)
except:
errType,errValue = sys.exc_info()[:2]
_logger.error("execution failure : %s %s %s" % (errType,errValue,traceback.format_exc()))
errStr = ""
for tmpKey,tmpVal in environ.iteritems():
errStr += "%s : %s\n" % (tmpKey,str(tmpVal))
_logger.error(errStr)
# return internal server error
start_response('500 INTERNAL SERVER ERROR', [('Content-Type', 'text/plain')])
return ["%s %s" % (errType,errValue)]
if panda_config.entryVerbose:
_logger.debug("PID=%s %s out" % (os.getpid(),methodName))
regTime = datetime.datetime.utcnow() - regStart
_logger.info("PID=%s %s exec_time=%s.%03d sec, return len=%s B" % (os.getpid(),
methodName,regTime.seconds,
regTime.microseconds/1000,
len(str(exeRes))))
# return
if exeRes == taskbuffer.ErrorCode.EC_NotFound:
start_response('404 Not Found', [('Content-Type', 'text/plain')])
return ['not found']
elif isinstance(exeRes,taskbuffer.ErrorCode.EC_Redirect):
start_response('302 Redirect', [('Location', exeRes.url)])
return ['redirect']
else:
if retType == 'json':
start_response('200 OK', [('Content-Type', 'application/json')])
else:
start_response('200 OK', [('Content-Type', 'text/plain')])
return [exeRes]
# start server
if panda_config.useFastCGI:
from flup.server.fcgi import WSGIServer
WSGIServer(application,multithreaded=False).run()
| 50.277533 | 129 | 0.634014 |
4a252d074a34f116ccb5de75b663c01cbf4014dd | 3,257 | py | Python | Backend/project/views.py | divSivasankaran/BioSecure | 0bd1494da2a3c7463809ea4d8a6957170e449503 | [
"Unlicense"
] | 3 | 2019-09-19T12:36:07.000Z | 2020-09-21T02:09:21.000Z | Backend/project/views.py | div1090/BioSecure | 0bd1494da2a3c7463809ea4d8a6957170e449503 | [
"Unlicense"
] | null | null | null | Backend/project/views.py | div1090/BioSecure | 0bd1494da2a3c7463809ea4d8a6957170e449503 | [
"Unlicense"
] | 2 | 2019-07-08T11:58:34.000Z | 2020-12-13T13:10:29.000Z | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 23 15:48:21 2017
@author: divya
"""
from flask import jsonify
from flask import request, render_template
from flask import abort, make_response
from PIL import Image
from scipy.spatial import distance
import numpy as np
import io
import os
from .import app
from . import FaceVerifier
base = "/biosecure/api/v1"
FV = None
########################
#### helper methods ####
########################
def import_data(request):
img = None
try:
if 'image' in request.files:
img = request.files['image']
print("Image Received")
except KeyError as e:
raise ValidationError('Invalid image: Operation failed')
return img
def initVGG():
global FV
FV = FaceVerifier.FaceVerifier()
FV.init()
FV.loadVGGFace()
def getVGGFeature(image):
global FV
feature = None
if FV != None:
feature = FV.get_caffe_feature(image)
return feature
def saveEnrolledFeature(feature):
np.array(feature)
np.savetxt(os.path.join('project', 'data',"enrolled_feature.csv"),feature)
return None
def loadEnrolledFeature():
return np.loadtxt(os.path.join('project', 'data',"enrolled_feature.csv"))
def compare(feature1,feature2):
score = -1
score = distance.cosine(feature1,feature2)
return score
########################
#### api methods ####
########################
@app.route(base + '/')
def index():
return jsonify(render_template('index.html')),200
@app.route(base + '/enroll', methods=['POST'])
def enroll():
print("args ", request.args)
print("files ", request.files)
print("data ",request.data)
print("val ", request.values)
print("json ", request.form)
img = import_data(request)
if img == None:
abort(415)
try:
img.save(os.path.join('project', 'data', 'enrolled_image.jpg'))
fp = os.path.join(os.getcwd(), 'project', 'data', 'enrolled_image.jpg')
print(fp)
feature = getVGGFeature(fp)
saveEnrolledFeature(feature)
except KeyError as e:
abort(503)
return jsonify({'status': 'Success'}),200
@app.route(base + '/verify', methods=['POST'])
def verify():
if not os.path.isfile("project/data/enrolled_feature.csv"):
abort(412)
img = import_data(request)
if img == None:
abort(415)
img.save(os.path.join('project', 'data', 'verify_image.jpg'))
test_feature = getVGGFeature(os.path.join(os.getcwd(), 'project', 'data', 'verify_image.jpg'))
enrolled_feature = loadEnrolledFeature()
score = compare(test_feature,enrolled_feature)
print("SCORE ", score)
return jsonify({'status': 'Success', 'score': str(score)}),200
####### ERROR HANDLERS #######
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
@app.errorhandler(415)
def invalid_image(error):
return make_response(jsonify({'error': 'Invalid Image'}), 415)
@app.errorhandler(503)
def enroll_fail(error):
return make_response(jsonify({'error': 'Failed to enroll. Try again'}), 503)
@app.errorhandler(412)
def no_enrollments(error):
return make_response(jsonify({'error': 'No enrollments. Enroll first.'}), 412)
| 26.056 | 98 | 0.63494 |
4a252dbfe52b29810de92aee140d2618ecc9b1bf | 838 | py | Python | heuristics/opponentdistance_heuristic.py | TeamJumpstart/InformatiCup2021 | a4d07992f772d3a1e9ef715fa8e9ce2234cd47a4 | [
"MIT"
] | 10 | 2021-04-18T17:54:02.000Z | 2021-07-26T19:58:41.000Z | heuristics/opponentdistance_heuristic.py | DiddiZ/InformatiCup2021 | a4d07992f772d3a1e9ef715fa8e9ce2234cd47a4 | [
"MIT"
] | 1 | 2021-04-21T15:13:41.000Z | 2021-04-21T15:13:41.000Z | heuristics/opponentdistance_heuristic.py | DiddiZ/InformatiCup2021 | a4d07992f772d3a1e9ef715fa8e9ce2234cd47a4 | [
"MIT"
] | 1 | 2021-04-20T09:42:50.000Z | 2021-04-20T09:42:50.000Z | import numpy as np
from heuristics.heuristic import Heuristic
class OpponentDistanceHeuristic(Heuristic):
"""Computes the distance sum to all players up to a threshold."""
def __init__(self, dist_threshold=16):
"""Initialize OpponentDistanceHeuristic."""
self.dist_threshold = dist_threshold
def score(self, cells, player, opponents, rounds, deadline):
"""Computes the distance to all players."""
min_opponent_dist = min(
min(np.sum(np.abs((player.position - o.position))) for o in opponents if o.active), self.dist_threshold
)
return min_opponent_dist / np.sum(cells.shape)
def __str__(self):
"""Get readable representation."""
return "OpponentDistanceHeuristic(" + \
f"dist_threshold={self.dist_threshold}" + \
")"
| 34.916667 | 115 | 0.661098 |
4a252dc2f707ab536c65c08bd3ec27c5049a0ee9 | 36,565 | py | Python | gs_quant/test/timeseries/test_measures_reports.py | webclinic017/gs-quant | ebb8ee5e1d954ab362aa567293906ce51818cfa8 | [
"Apache-2.0"
] | null | null | null | gs_quant/test/timeseries/test_measures_reports.py | webclinic017/gs-quant | ebb8ee5e1d954ab362aa567293906ce51818cfa8 | [
"Apache-2.0"
] | null | null | null | gs_quant/test/timeseries/test_measures_reports.py | webclinic017/gs-quant | ebb8ee5e1d954ab362aa567293906ce51818cfa8 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2020 Goldman Sachs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime
import pandas
import pandas as pd
import pytest
from testfixtures import Replacer
from testfixtures.mock import Mock
import gs_quant.timeseries.measures_reports as mr
from gs_quant.api.gs.assets import GsTemporalXRef
from gs_quant.api.gs.data import MarketDataResponseFrame
from gs_quant.data.core import DataContext
from gs_quant.errors import MqValueError
from gs_quant.markets.portfolio_manager import CustomAUMDataPoint
from gs_quant.markets.report import PerformanceReport, ThematicReport
from gs_quant.markets.securities import Stock
from gs_quant.models.risk_model import FactorRiskModel as Factor_Risk_Model
from gs_quant.target.common import ReportParameters, XRef
from gs_quant.target.portfolios import RiskAumSource
from gs_quant.target.reports import Report, PositionSourceType, ReportType
from gs_quant.target.risk_models import RiskModel, RiskModelCoverage, RiskModelTerm, RiskModelUniverseIdentifier
risk_model = RiskModel(coverage=RiskModelCoverage.Country, id_='model_id', name='Fake Risk Model',
term=RiskModelTerm.Long, universe_identifier=RiskModelUniverseIdentifier.gsid, vendor='GS',
version=1.0)
factor_risk_report = Report(position_source_id='position source id',
position_source_type=PositionSourceType.Portfolio,
type_=ReportType.Portfolio_Factor_Risk,
id_='report_id',
parameters=ReportParameters(risk_model='risk_model_id'),
status='new')
asset_factor_risk_report = Report(position_source_id='position source id',
position_source_type=PositionSourceType.Asset,
type_=ReportType.Portfolio_Factor_Risk,
id_='report_id',
parameters=ReportParameters(risk_model='risk_model_id'),
status='new')
ppa_report = Report(position_source_id='position source id',
position_source_type=PositionSourceType.Portfolio,
type_=ReportType.Portfolio_Performance_Analytics,
id_='report_id',
parameters=ReportParameters(risk_model='risk_model_id'),
status='new')
factor_data = [
{
'date': '2020-11-23',
'reportId': 'report_id',
'factor': 'factor_id',
'factorCategory': 'CNT',
'pnl': 11.23,
'exposure': -11.23,
'proportionOfRisk': 1
},
{
'date': '2020-11-24',
'reportId': 'report_id',
'factor': 'factor_id',
'factorCategory': 'CNT',
'pnl': 11.24,
'exposure': -11.24,
'proportionOfRisk': 2
},
{
'date': '2020-11-25',
'reportId': 'report_id',
'factor': 'factor_id',
'factorCategory': 'CNT',
'pnl': 11.25,
'exposure': -11.25,
'proportionOfRisk': 3
}
]
aggregate_factor_data = [
{
'date': '2020-11-23',
'reportId': 'report_id',
'factor': 'Factor',
'factorCategory': 'CNT',
'pnl': 11.23,
'exposure': -11.23,
'proportionOfRisk': 1,
'dailyRisk': 1,
'annualRisk': 1
},
{
'date': '2020-11-24',
'reportId': 'report_id',
'factor': 'Factor',
'factorCategory': 'CNT',
'pnl': 11.24,
'exposure': -11.24,
'proportionOfRisk': 2,
'dailyRisk': 2,
'annualRisk': 2
},
{
'date': '2020-11-25',
'reportId': 'report_id',
'factor': 'Factor',
'factorCategory': 'CNT',
'pnl': 11.25,
'exposure': -11.25,
'proportionOfRisk': 3,
'dailyRisk': 3,
'annualRisk': 3
}
]
constituents_data_l_s = {
'assetId': [
"MA1",
"MA1",
"MA1",
"MA2",
"MA2",
"MA2"
],
'quantity': [
-1.,
-2.,
-3.,
1.,
2.,
3.
],
'netExposure': [
-1.,
-2.,
-3.,
1.,
2.,
3.
],
'pnl': [
0.,
-1.,
-1.,
0.,
1.,
1.
],
'date': [
'2020-01-02',
'2020-01-03',
'2020-01-04',
'2020-01-02',
'2020-01-03',
'2020-01-04'
]
}
pnl_data_l_s = {
'quantity': [
-1.,
-2.,
-3.,
-1.,
-2.,
-3.,
1.,
2.,
3.,
1.,
2.,
3.
],
'pnl': [
0.,
-1.,
-1.,
0.,
-1.,
-1.,
0.,
1.,
1.,
0.,
1.,
1.
],
'date': [
'2020-01-02',
'2020-01-03',
'2020-01-04',
'2020-01-02',
'2020-01-03',
'2020-01-04',
'2020-01-02',
'2020-01-03',
'2020-01-04',
'2020-01-02',
'2020-01-03',
'2020-01-04'
]
}
constituents_data = {
'netExposure': [
1.,
2.,
3.
],
'assetId': [
"MA",
"MA",
"MA"
],
'quantity': [
1.,
1.,
1.
],
'pnl': [
0.,
1.,
1.
],
'date': [
'2020-01-02',
'2020-01-03',
'2020-01-04'
]
}
constituents_data_s = {
'netExposure': [
-1.,
-2.,
-3.
],
'assetId': [
"MA",
"MA",
"MA"
],
'quantity': [
-1.,
-1.,
-1.
],
'pnl': [
0.,
1.,
1.
],
'date': [
'2020-01-02',
'2020-01-03',
'2020-01-04'
]
}
thematic_data = [
{
"date": "2021-07-12",
"reportId": "PTAID",
"basketId": "MA01GPR89HZF1FZ5",
"region": "Asia",
"grossExposure": 3.448370345015856E8,
"thematicExposure": 2,
"thematicBeta": 1,
"updateTime": "2021-07-20T23:43:38Z"
},
{
"date": "2021-07-13",
"reportId": "PTAID",
"basketId": "MA01GPR89HZF1FZ5",
"region": "Asia",
"grossExposure": 3.375772519907556E8,
"thematicExposure": 2,
"thematicBeta": 1,
"updateTime": "2021-07-20T23:43:38Z"
},
{
"date": "2021-07-14",
"reportId": "PTAID",
"basketId": "MA01GPR89HZF1FZ5",
"region": "Asia",
"grossExposure": 3.321189950666118E8,
"thematicExposure": 2,
"thematicBeta": 1,
"updateTime": "2021-07-20T23:43:38Z"
},
{
"date": "2021-07-15",
"reportId": "PTAID",
"basketId": "MA01GPR89HZF1FZ5",
"region": "Asia",
"grossExposure": 3.274071805135091E8,
"thematicExposure": 2,
"thematicBeta": 1,
"updateTime": "2021-07-20T23:43:38Z"
}
]
def mock_risk_model():
risk_model = RiskModel(coverage=RiskModelCoverage.Country, id_='model_id', name='Fake Risk Model',
term=RiskModelTerm.Long, universe_identifier=RiskModelUniverseIdentifier.gsid, vendor='GS',
version=1.0)
replace = Replacer()
# mock getting risk model entity()
mock = replace('gs_quant.api.gs.risk_models.GsRiskModelApi.get_risk_model', Mock())
mock.return_value = risk_model
actual = Factor_Risk_Model.get(model_id='model_id')
replace.restore()
return actual
def test_factor_exposure():
replace = Replacer()
# mock getting risk model entity()
mock = replace('gs_quant.api.gs.risk_models.GsRiskModelApi.get_risk_model', Mock())
mock.return_value = risk_model
mock = replace('gs_quant.api.gs.reports.GsReportApi.get_report', Mock())
mock.return_value = factor_risk_report
# mock getting report factor data
mock = replace('gs_quant.api.gs.reports.GsReportApi.get_factor_risk_report_results', Mock())
mock.return_value = factor_data
# mock getting risk model dates
mock = replace('gs_quant.api.gs.risk_models.GsRiskModelApi.get_risk_model_dates', Mock())
mock.return_value = ['2010-01-01']
# mock getting risk model factor category
mock = replace('gs_quant.api.gs.risk_models.GsFactorRiskModelApi.get_risk_model_data', Mock())
mock.return_value = {
'results': [{
'factorData': [{
'factorId': 'factor_id',
'factorCategory': 'Factor Name'
}]}
]}
# mock getting risk model factor entity
mock = replace('gs_quant.api.gs.risk_models.GsFactorRiskModelApi.get_risk_model_factor_data', Mock())
mock.return_value = [{
'identifier': 'factor_id',
'type': 'Factor',
'name': 'Factor Name',
'factorCategory': 'Factor Name'
}]
with DataContext(datetime.date(2020, 11, 23), datetime.date(2020, 11, 25)):
actual = mr.factor_exposure('report_id', 'Factor Name')
assert all(actual.values == [-11.23, -11.24, -11.25])
with pytest.raises(MqValueError):
mr.factor_exposure('report_id', 'Wrong Factor Name')
replace.restore()
def test_factor_exposure_percent():
replace = Replacer()
# mock getting risk model entity()
mock = replace('gs_quant.api.gs.risk_models.GsRiskModelApi.get_risk_model', Mock())
mock.return_value = risk_model
mock = replace('gs_quant.api.gs.reports.GsReportApi.get_report', Mock())
mock.return_value = factor_risk_report
# mock getting report factor data
mock = replace('gs_quant.api.gs.reports.GsReportApi.get_factor_risk_report_results', Mock())
mock.return_value = factor_data
# mock getting risk model dates
mock = replace('gs_quant.api.gs.risk_models.GsRiskModelApi.get_risk_model_dates', Mock())
mock.return_value = ['2010-01-01']
# mock getting risk model factor category
mock = replace('gs_quant.api.gs.risk_models.GsFactorRiskModelApi.get_risk_model_data', Mock())
mock.return_value = {
'results': [{
'factorData': [{
'factorId': 'factor_id',
'factorCategory': 'Factor Name'
}]}
]}
# mock getting risk model factor entity
mock = replace('gs_quant.api.gs.risk_models.GsFactorRiskModelApi.get_risk_model_factor_data', Mock())
mock.return_value = [{
'identifier': 'factor_id',
'type': 'Factor',
'name': 'Factor Name',
'factorCategory': 'Factor Name'
}]
# mock getting aum source
mock = replace('gs_quant.markets.portfolio_manager.PortfolioManager.get_aum_source', Mock())
mock.return_value = RiskAumSource.Custom_AUM
with DataContext(datetime.date(2020, 11, 23), datetime.date(2020, 11, 25)):
# mock getting aum
mock = replace('gs_quant.markets.portfolio_manager.PortfolioManager.get_custom_aum', Mock())
mock.return_value = [CustomAUMDataPoint(date=datetime.date(2020, 11, 23), aum=2),
CustomAUMDataPoint(date=datetime.date(2020, 11, 24), aum=2),
CustomAUMDataPoint(date=datetime.date(2020, 11, 25), aum=2)]
actual = mr.factor_exposure('report_id', 'Factor Name', 'Percent')
assert all(actual.values == [-11.23 * 50, -11.24 * 50, -11.25 * 50])
with pytest.raises(MqValueError):
# mock getting aum with missing data
mock = replace('gs_quant.markets.portfolio_manager.PortfolioManager.get_custom_aum', Mock())
mock.return_value = [CustomAUMDataPoint(date=datetime.date(2020, 11, 23), aum=2),
CustomAUMDataPoint(date=datetime.date(2020, 11, 25), aum=2)]
mr.factor_exposure('report_id', 'Factor Name', 'Percent')
replace.restore()
def test_factor_pnl():
replace = Replacer()
# mock getting risk model entity()
mock = replace('gs_quant.api.gs.risk_models.GsRiskModelApi.get_risk_model', Mock())
mock.return_value = risk_model
mock = replace('gs_quant.api.gs.reports.GsReportApi.get_report', Mock())
mock.return_value = factor_risk_report
# mock getting report factor data
mock = replace('gs_quant.api.gs.reports.GsReportApi.get_factor_risk_report_results', Mock())
mock.return_value = factor_data
# mock getting risk model dates
mock = replace('gs_quant.api.gs.risk_models.GsRiskModelApi.get_risk_model_dates', Mock())
mock.return_value = ['2010-01-01']
# mock getting risk model factor category
mock = replace('gs_quant.api.gs.risk_models.GsFactorRiskModelApi.get_risk_model_data', Mock())
mock.return_value = {
'results': [{
'factorData': [{
'factorId': 'factor_id',
'factorCategory': 'Factor Name'
}]}
]}
# mock getting risk model factor entity
mock = replace('gs_quant.api.gs.risk_models.GsFactorRiskModelApi.get_risk_model_factor_data', Mock())
mock.return_value = [{
'identifier': 'factor_id',
'type': 'Factor',
'name': 'Factor Name',
'factorCategory': 'Factor Name'
}]
with DataContext(datetime.date(2020, 11, 23), datetime.date(2020, 11, 25)):
actual = mr.factor_pnl('report_id', 'Factor Name')
assert all(actual.values == [11.23, 11.24, 11.25])
with pytest.raises(MqValueError):
mr.factor_pnl('report_id', 'Wrong Factor Name')
replace.restore()
def test_factor_pnl_percent():
replace = Replacer()
# mock getting risk model entity()
mock = replace('gs_quant.api.gs.risk_models.GsRiskModelApi.get_risk_model', Mock())
mock.return_value = risk_model
mock = replace('gs_quant.api.gs.reports.GsReportApi.get_report', Mock())
mock.return_value = factor_risk_report
# mock getting report factor data
mock = replace('gs_quant.api.gs.reports.GsReportApi.get_factor_risk_report_results', Mock())
mock.return_value = factor_data
# mock getting risk model dates
mock = replace('gs_quant.api.gs.risk_models.GsRiskModelApi.get_risk_model_dates', Mock())
mock.return_value = ['2010-01-01']
# mock getting risk model factor category
mock = replace('gs_quant.api.gs.risk_models.GsFactorRiskModelApi.get_risk_model_data', Mock())
mock.return_value = {
'results': [{
'factorData': [{
'factorId': 'factor_id',
'factorCategory': 'Factor Name'
}]}
]}
# mock getting risk model factor entity
mock = replace('gs_quant.api.gs.risk_models.GsFactorRiskModelApi.get_risk_model_factor_data', Mock())
mock.return_value = [{
'identifier': 'factor_id',
'type': 'Factor',
'name': 'Factor Name',
'factorCategory': 'Factor Name'
}]
with DataContext(datetime.date(2020, 11, 23), datetime.date(2020, 11, 25)):
# mock getting aum source
mock = replace('gs_quant.markets.portfolio_manager.PortfolioManager.get_aum_source', Mock())
mock.return_value = RiskAumSource.Long
# mock getting performance report
mock = replace('gs_quant.markets.portfolio_manager.PortfolioManager.get_performance_report', Mock())
mock.return_value = PerformanceReport(id='ID')
# mock getting aum
mock = replace('gs_quant.markets.report.PerformanceReport.get_long_exposure', Mock())
mock.return_value = pandas.DataFrame.from_dict({'date': ['2020-11-25'], 'longExposure': [100]})
actual = mr.factor_pnl('report_id', 'Factor Name', 'Percent')
assert all(actual.values == [11.23, 11.24, 11.25])
with DataContext(datetime.date(2020, 11, 23), datetime.date(2020, 11, 25)):
# mock getting aum source
mock = replace('gs_quant.markets.portfolio_manager.PortfolioManager.get_aum_source', Mock())
mock.return_value = RiskAumSource.Short
# mock getting performance report
mock = replace('gs_quant.markets.portfolio_manager.PortfolioManager.get_performance_report', Mock())
mock.return_value = PerformanceReport(id='ID')
# mock getting aum
mock = replace('gs_quant.markets.report.PerformanceReport.get_short_exposure', Mock())
mock.return_value = pandas.DataFrame.from_dict({'date': ['2020-11-25'], 'shortExposure': [100]})
actual = mr.factor_pnl('report_id', 'Factor Name', 'Percent')
assert all(actual.values == [11.23, 11.24, 11.25])
with DataContext(datetime.date(2020, 11, 23), datetime.date(2020, 11, 25)):
# mock getting aum source
mock = replace('gs_quant.markets.portfolio_manager.PortfolioManager.get_aum_source', Mock())
mock.return_value = RiskAumSource.Gross
# mock getting performance report
mock = replace('gs_quant.markets.portfolio_manager.PortfolioManager.get_performance_report', Mock())
mock.return_value = PerformanceReport(id='ID')
# mock getting aum
mock = replace('gs_quant.markets.report.PerformanceReport.get_gross_exposure', Mock())
mock.return_value = pandas.DataFrame.from_dict({'date': ['2020-11-25'], 'grossExposure': [400]})
actual = mr.factor_pnl('report_id', 'Factor Name', 'Percent')
assert all(actual.values == [11.23 / 4, 11.24 / 4, 11.25 / 4])
with DataContext(datetime.date(2020, 11, 23), datetime.date(2020, 11, 25)):
# mock getting aum source
mock = replace('gs_quant.markets.portfolio_manager.PortfolioManager.get_aum_source', Mock())
mock.return_value = RiskAumSource.Net
# mock getting performance report
mock = replace('gs_quant.markets.portfolio_manager.PortfolioManager.get_performance_report', Mock())
mock.return_value = PerformanceReport(id='ID')
# mock getting aum
mock = replace('gs_quant.markets.report.PerformanceReport.get_net_exposure', Mock())
mock.return_value = pandas.DataFrame.from_dict({'date': ['2020-11-25'], 'netExposure': [200]})
actual = mr.factor_pnl('report_id', 'Factor Name', 'Percent')
assert all(actual.values == [11.23 / 2, 11.24 / 2, 11.25 / 2])
with pytest.raises(MqValueError):
# mock getting aum source
mock = replace('gs_quant.markets.portfolio_manager.PortfolioManager.get_aum_source', Mock())
mock.return_value = RiskAumSource.Net
# mock getting performance report
mock = replace('gs_quant.markets.portfolio_manager.PortfolioManager.get_performance_report', Mock())
mock.return_value = PerformanceReport(id='ID')
# mock getting aum
mock = replace('gs_quant.markets.report.PerformanceReport.get_net_exposure', Mock())
mock.return_value = pandas.DataFrame.from_dict({'date': ['2020-11-24'], 'netExposure': [200]})
mr.factor_pnl('report_id', 'Factor Name', 'Percent')
replace.restore()
def test_asset_factor_pnl_percent():
replace = Replacer()
# mock getting risk model entity()
mock = replace('gs_quant.api.gs.risk_models.GsRiskModelApi.get_risk_model', Mock())
mock.return_value = risk_model
mock = replace('gs_quant.api.gs.reports.GsReportApi.get_report', Mock())
mock.return_value = asset_factor_risk_report
# mock getting report factor data
mock = replace('gs_quant.api.gs.reports.GsReportApi.get_factor_risk_report_results', Mock())
mock.return_value = factor_data
# mock getting risk model dates
mock = replace('gs_quant.api.gs.risk_models.GsRiskModelApi.get_risk_model_dates', Mock())
mock.return_value = ['2010-01-01']
# mock getting risk model factor category
mock = replace('gs_quant.api.gs.risk_models.GsFactorRiskModelApi.get_risk_model_data', Mock())
mock.return_value = {
'results': [{
'factorData': [{
'factorId': 'factor_id',
'factorCategory': 'Factor Name'
}]}
]}
# mock getting risk model factor entity
mock = replace('gs_quant.api.gs.risk_models.GsFactorRiskModelApi.get_risk_model_factor_data', Mock())
mock.return_value = [{
'identifier': 'factor_id',
'type': 'Factor',
'name': 'Factor Name',
'factorCategory': 'Factor Name'
}]
with pytest.raises(MqValueError):
mr.factor_pnl('report_id', 'Factor Name', 'Percent')
replace.restore()
def test_factor_proportion_of_risk():
replace = Replacer()
# mock getting risk model entity()
mock = replace('gs_quant.api.gs.risk_models.GsRiskModelApi.get_risk_model', Mock())
mock.return_value = risk_model
mock = replace('gs_quant.api.gs.reports.GsReportApi.get_report', Mock())
mock.return_value = factor_risk_report
# mock getting report factor data
mock = replace('gs_quant.api.gs.reports.GsReportApi.get_factor_risk_report_results', Mock())
mock.return_value = factor_data
# mock getting risk model dates
mock = replace('gs_quant.api.gs.risk_models.GsRiskModelApi.get_risk_model_dates', Mock())
mock.return_value = ['2010-01-01']
# mock getting risk model factor category
mock = replace('gs_quant.api.gs.risk_models.GsFactorRiskModelApi.get_risk_model_data', Mock())
mock.return_value = {
'results': [{
'factorData': [{
'factorId': 'factor_id',
'factorCategory': 'Factor Name'
}]}
]}
# mock getting risk model factor entity
mock = replace('gs_quant.api.gs.risk_models.GsFactorRiskModelApi.get_risk_model_factor_data', Mock())
mock.return_value = [{
'identifier': 'factor_id',
'type': 'Factor',
'name': 'Factor Name',
'factorCategory': 'Factor Name'
}]
with DataContext(datetime.date(2020, 11, 23), datetime.date(2020, 11, 25)):
actual = mr.factor_proportion_of_risk('report_id', 'Factor Name')
assert all(actual.values == [1, 2, 3])
with pytest.raises(MqValueError):
mr.factor_proportion_of_risk('report_id', 'Wrong Factor Name')
replace.restore()
def test_get_factor_data():
replace = Replacer()
mock = replace('gs_quant.api.gs.reports.GsReportApi.get_report', Mock())
mock.return_value = ppa_report
with pytest.raises(MqValueError):
mr.factor_proportion_of_risk('report_id', 'Factor Name')
replace.restore()
def test_aggregate_factor_support():
replace = Replacer()
# mock getting risk model entity()
mock = replace('gs_quant.api.gs.risk_models.GsRiskModelApi.get_risk_model', Mock())
mock.return_value = risk_model
mock = replace('gs_quant.api.gs.reports.GsReportApi.get_report', Mock())
mock.return_value = factor_risk_report
# mock getting report factor data
mock = replace('gs_quant.api.gs.reports.GsReportApi.get_factor_risk_report_results', Mock())
mock.return_value = aggregate_factor_data
# mock getting risk model dates
mock = replace('gs_quant.api.gs.risk_models.GsRiskModelApi.get_risk_model_dates', Mock())
mock.return_value = ['2010-01-01']
# mock getting risk model factor category
mock = replace('gs_quant.api.gs.risk_models.GsFactorRiskModelApi.get_risk_model_data', Mock())
mock.return_value = {
'results': [{
'factorData': [{
'factorId': 'factor_id',
'factorCategory': 'Factor Name'
}]}
]}
# mock getting risk model factor entity
mock = replace('gs_quant.api.gs.risk_models.GsFactorRiskModelApi.get_risk_model_factor_data', Mock())
mock.return_value = [{
'identifier': 'factor_id',
'type': 'Factor',
'name': 'Factor Name',
'factorCategory': 'Factor Name'
}]
with DataContext(datetime.date(2020, 11, 23), datetime.date(2020, 11, 25)):
actual = mr.factor_proportion_of_risk('report_id', 'Factor')
assert all(actual.values == [1, 2, 3])
with DataContext(datetime.date(2020, 11, 23), datetime.date(2020, 11, 25)):
actual = mr.daily_risk('report_id', 'Factor')
assert all(actual.values == [1, 2, 3])
with DataContext(datetime.date(2020, 11, 23), datetime.date(2020, 11, 25)):
actual = mr.annual_risk('report_id', 'Factor')
assert all(actual.values == [1, 2, 3])
with pytest.raises(MqValueError):
mr.daily_risk('report_id', 'Factor Name')
with pytest.raises(MqValueError):
mr.annual_risk('report_id', 'Factor Name')
replace.restore()
def test_normalized_performance():
idx = pd.date_range('2020-01-02', freq='D', periods=3)
replace = Replacer()
expected = {None: pd.Series(data=[1, 2, 3], index=idx,
name='normalizedPerformance', dtype='float64'),
"Long": pd.Series(data=[1, 2, 3], index=idx,
name='normalizedPerformance', dtype='float64')}
mock = replace('gs_quant.api.gs.portfolios.GsPortfolioApi.get_reports', Mock())
mock.return_value = [
Report.from_dict({'id': 'RP1', 'positionSourceType': 'Portfolio', 'positionSourceId': 'MP1',
'type': 'Portfolio Performance Analytics',
'parameters': {'transactionCostModel': 'FIXED'}})]
# mock PerformanceReport.get_portfolio_constituents()
mock = replace('gs_quant.markets.report.PerformanceReport.get_portfolio_constituents', Mock())
mock.return_value = MarketDataResponseFrame(data=constituents_data)
# mock PerformanceReport.get()
mock = replace('gs_quant.markets.report.PerformanceReport.get', Mock())
mock.return_value = PerformanceReport(report_id='RP1',
position_source_type='Portfolio',
position_source_id='MP1',
report_type='Portfolio Performance Analytics',
parameters=ReportParameters(transaction_cost_model='FIXED'))
for k, v in expected.items():
with DataContext(datetime.date(2020, 1, 1), datetime.date(2019, 1, 3)):
actual = mr.normalized_performance('MP1', k)
assert all(actual.values == v.values)
replace.restore()
def test_normalized_performance_short():
idx = pd.date_range('2020-01-02', freq='D', periods=3)
replace = Replacer()
expected = {"Short": pd.Series(data=[1, 1 / 2, 1 / 3], index=idx,
name='normalizedPerformance', dtype='float64'),
"Long": pd.Series(data=[1, 2, 3], index=idx,
name='normalizedPerformance', dtype='float64'),
None: pd.Series(data=[1, (2 + 1 / 2) / 2, (3 + 1 / 3) / 2], index=idx,
name='normalizedPerformance', dtype='float64')}
mock = replace('gs_quant.api.gs.portfolios.GsPortfolioApi.get_reports', Mock())
mock.return_value = [
Report.from_dict({'id': 'RP1', 'positionSourceType': 'Portfolio', 'positionSourceId': 'MP1',
'type': 'Portfolio Performance Analytics',
'parameters': {'transactionCostModel': 'FIXED'}})]
# mock PerformanceReport.get_portfolio_constituents()
mock = replace('gs_quant.markets.report.PerformanceReport.get_portfolio_constituents', Mock())
mock.return_value = MarketDataResponseFrame(data=constituents_data_l_s)
# mock PerformanceReport.get()
mock = replace('gs_quant.markets.report.PerformanceReport.get', Mock())
mock.return_value = PerformanceReport(report_id='RP1',
position_source_type='Portfolio',
position_source_id='MP1',
report_type='Portfolio Performance Analytics',
parameters=ReportParameters(transaction_cost_model='FIXED'))
for k, v in expected.items():
with DataContext(datetime.date(2020, 1, 1), datetime.date(2019, 1, 3)):
actual = mr.normalized_performance('MP1', k)
assert all((actual.values - v.values) < 0.01)
replace.restore()
def test_get_long_pnl():
idx = pd.date_range('2020-01-02', freq='D', periods=3)
replace = Replacer()
expected = pd.Series(data=[0, 2, 2], index=idx, name='longPnl', dtype='float64')
mock = replace('gs_quant.api.gs.portfolios.GsPortfolioApi.get_reports', Mock())
mock.return_value = [
Report.from_dict({'id': 'RP1', 'positionSourceType': 'Portfolio', 'positionSourceId': 'MP1',
'type': 'Portfolio Performance Analytics',
'parameters': {'transactionCostModel': 'FIXED'}})]
# mock PerformanceReport.get_portfolio_constituents()
mock = replace('gs_quant.markets.report.PerformanceReport.get_portfolio_constituents', Mock())
mock.return_value = MarketDataResponseFrame(data=pnl_data_l_s)
# mock PerformanceReport.get()
mock = replace('gs_quant.markets.report.PerformanceReport.get', Mock())
mock.return_value = PerformanceReport(report_id='RP1',
position_source_type='Portfolio',
position_source_id='MP1',
report_type='Portfolio Performance Analytics',
parameters=ReportParameters(transaction_cost_model='FIXED'))
with DataContext(datetime.date(2020, 1, 1), datetime.date(2019, 1, 3)):
actual = mr.long_pnl('MP1')
assert all(actual.values == expected.values)
replace.restore()
def test_get_short_pnl():
idx = pd.date_range('2020-01-02', freq='D', periods=3)
replace = Replacer()
expected = pd.Series(data=[0, -2, -2], index=idx, name='shortPnl', dtype='float64')
mock = replace('gs_quant.api.gs.portfolios.GsPortfolioApi.get_reports', Mock())
mock.return_value = [
Report.from_dict({'id': 'RP1', 'positionSourceType': 'Portfolio', 'positionSourceId': 'MP1',
'type': 'Portfolio Performance Analytics',
'parameters': {'transactionCostModel': 'FIXED'}})]
# mock PerformanceReport.get_portfolio_constituents()
mock = replace('gs_quant.markets.report.PerformanceReport.get_portfolio_constituents', Mock())
mock.return_value = MarketDataResponseFrame(data=pnl_data_l_s)
# mock PerformanceReport.get()
mock = replace('gs_quant.markets.report.PerformanceReport.get', Mock())
mock.return_value = PerformanceReport(report_id='RP1',
position_source_type='Portfolio',
position_source_id='MP1',
report_type='Portfolio Performance Analytics',
parameters=ReportParameters(transaction_cost_model='FIXED'))
with DataContext(datetime.date(2020, 1, 1), datetime.date(2019, 1, 3)):
actual = mr.short_pnl('MP1')
assert all(actual.values == expected.values)
replace.restore()
def test_get_short_pnl_empty():
replace = Replacer()
expected = pd.Series(dtype=float)
mock = replace('gs_quant.api.gs.portfolios.GsPortfolioApi.get_reports', Mock())
mock.return_value = [
Report.from_dict({'id': 'RP1', 'positionSourceType': 'Portfolio', 'positionSourceId': 'MP1',
'type': 'Portfolio Performance Analytics',
'parameters': {'transactionCostModel': 'FIXED'}})]
# mock PerformanceReport.get_portfolio_constituents()
mock = replace('gs_quant.markets.report.PerformanceReport.get_portfolio_constituents', Mock())
mock.return_value = MarketDataResponseFrame(data=constituents_data)
# mock PerformanceReport.get()
mock = replace('gs_quant.markets.report.PerformanceReport.get', Mock())
mock.return_value = PerformanceReport(report_id='RP1',
position_source_type='Portfolio',
position_source_id='MP1',
report_type='Portfolio Performance Analytics',
parameters=ReportParameters(transaction_cost_model='FIXED'))
with DataContext(datetime.date(2020, 1, 1), datetime.date(2019, 1, 3)):
actual = mr.short_pnl('MP1')
assert all(actual.values == expected.values)
replace.restore()
def test_get_long_pnl_empty():
replace = Replacer()
expected = pd.Series(dtype=float)
mock = replace('gs_quant.api.gs.portfolios.GsPortfolioApi.get_reports', Mock())
mock.return_value = [
Report.from_dict({'id': 'RP1', 'positionSourceType': 'Portfolio', 'positionSourceId': 'MP1',
'type': 'Portfolio Performance Analytics',
'parameters': {'transactionCostModel': 'FIXED'}})]
# mock PerformanceReport.get_portfolio_constituents()
mock = replace('gs_quant.markets.report.PerformanceReport.get_portfolio_constituents', Mock())
mock.return_value = MarketDataResponseFrame(data=constituents_data_s)
# mock PerformanceReport.get()
mock = replace('gs_quant.markets.report.PerformanceReport.get', Mock())
mock.return_value = PerformanceReport(report_id='RP1',
position_source_type='Portfolio',
position_source_id='MP1',
report_type='Portfolio Performance Analytics',
parameters=ReportParameters(transaction_cost_model='FIXED'))
with DataContext(datetime.date(2020, 1, 1), datetime.date(2019, 1, 3)):
actual = mr.long_pnl('MP1')
assert all(actual.values == expected.values)
replace.restore()
def test_thematic_exposure():
replace = Replacer()
# mock getting PTA report
mock = replace('gs_quant.markets.report.ThematicReport.get', Mock())
mock.return_value = ThematicReport(id='report_id')
# mock getting thematic exposure
mock = replace('gs_quant.markets.report.ThematicReport.get_thematic_exposure', Mock())
mock.return_value = pd.DataFrame(thematic_data)
# mock getting asset
mock = Stock('MAA0NE9QX2ABETG6', 'Test Asset')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [
GsTemporalXRef(datetime.date(2019, 1, 1),
datetime.date(2952, 12, 31),
XRef(ticker='basket_ticker', ))
]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = mock
with DataContext(datetime.date(2020, 7, 12), datetime.date(2020, 7, 15)):
actual = mr.thematic_exposure('report_id', 'basket_ticker')
assert all(actual.values == [2, 2, 2, 2])
replace.restore()
def test_thematic_beta():
replace = Replacer()
# mock getting PTA report
mock = replace('gs_quant.markets.report.ThematicReport.get', Mock())
mock.return_value = ThematicReport(id='report_id')
# mock getting thematic exposure
mock = replace('gs_quant.markets.report.ThematicReport.get_thematic_betas', Mock())
mock.return_value = pd.DataFrame(thematic_data)
# mock getting asset
mock = Stock('MAA0NE9QX2ABETG6', 'Test Asset')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [
GsTemporalXRef(datetime.date(2019, 1, 1),
datetime.date(2952, 12, 31),
XRef(ticker='basket_ticker', ))
]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = mock
with DataContext(datetime.date(2020, 7, 12), datetime.date(2020, 7, 15)):
actual = mr.thematic_beta('report_id', 'basket_ticker')
assert all(actual.values == [1, 1, 1, 1])
replace.restore()
if __name__ == '__main__':
pytest.main(args=[__file__])
| 36.934343 | 118 | 0.617011 |
4a252e69a870d22d7b4d6220d6bc4c2a57053958 | 36,447 | py | Python | apigpio_fork/apigpio.py | magnusnordlander/silvia-pi | 3b927f73f8c8608a17f1f0e6458d06eff0f1d09a | [
"MIT"
] | 16 | 2020-06-09T22:34:18.000Z | 2021-02-09T15:31:16.000Z | apigpio_fork/apigpio.py | magnusnordlander/silvia-pi | 3b927f73f8c8608a17f1f0e6458d06eff0f1d09a | [
"MIT"
] | null | null | null | apigpio_fork/apigpio.py | magnusnordlander/silvia-pi | 3b927f73f8c8608a17f1f0e6458d06eff0f1d09a | [
"MIT"
] | 1 | 2020-09-03T15:21:15.000Z | 2020-09-03T15:21:15.000Z | import asyncio
import socket
import struct
import sys
import functools
from .ctes import *
exceptions = True
# pigpio command numbers
_PI_CMD_MODES = 0
_PI_CMD_MODEG = 1
_PI_CMD_PUD = 2
_PI_CMD_READ = 3
_PI_CMD_WRITE = 4
_PI_CMD_PWM = 5
_PI_CMD_PRS = 6
_PI_CMD_PFS = 7
_PI_CMD_SERVO = 8
_PI_CMD_WDOG = 9
_PI_CMD_BR1 = 10
_PI_CMD_BR2 = 11
_PI_CMD_BC1 = 12
_PI_CMD_BC2 = 13
_PI_CMD_BS1 = 14
_PI_CMD_BS2 = 15
_PI_CMD_TICK = 16
_PI_CMD_HWVER = 17
_PI_CMD_NO = 18
_PI_CMD_NB = 19
_PI_CMD_NP = 20
_PI_CMD_NC = 21
_PI_CMD_PRG = 22
_PI_CMD_PFG = 23
_PI_CMD_PRRG = 24
_PI_CMD_HELP = 25
_PI_CMD_PIGPV = 26
_PI_CMD_WVCLR = 27
_PI_CMD_WVAG = 28
_PI_CMD_WVAS = 29
_PI_CMD_WVGO = 30
_PI_CMD_WVGOR = 31
_PI_CMD_WVBSY = 32
_PI_CMD_WVHLT = 33
_PI_CMD_WVSM = 34
_PI_CMD_WVSP = 35
_PI_CMD_WVSC = 36
_PI_CMD_TRIG = 37
_PI_CMD_PROC = 38
_PI_CMD_PROCD = 39
_PI_CMD_PROCR = 40
_PI_CMD_PROCS = 41
_PI_CMD_SLRO = 42
_PI_CMD_SLR = 43
_PI_CMD_SLRC = 44
_PI_CMD_PROCP = 45
_PI_CMD_MICRO = 46
_PI_CMD_MILLI = 47
_PI_CMD_PARSE = 48
_PI_CMD_WVCRE = 49
_PI_CMD_WVDEL = 50
_PI_CMD_WVTX = 51
_PI_CMD_WVTXR = 52
_PI_CMD_WVNEW = 53
_PI_CMD_I2CO = 54
_PI_CMD_I2CC = 55
_PI_CMD_I2CRD = 56
_PI_CMD_I2CWD = 57
_PI_CMD_I2CWQ = 58
_PI_CMD_I2CRS = 59
_PI_CMD_I2CWS = 60
_PI_CMD_I2CRB = 61
_PI_CMD_I2CWB = 62
_PI_CMD_I2CRW = 63
_PI_CMD_I2CWW = 64
_PI_CMD_I2CRK = 65
_PI_CMD_I2CWK = 66
_PI_CMD_I2CRI = 67
_PI_CMD_I2CWI = 68
_PI_CMD_I2CPC = 69
_PI_CMD_I2CPK = 70
_PI_CMD_SPIO = 71
_PI_CMD_SPIC = 72
_PI_CMD_SPIR = 73
_PI_CMD_SPIW = 74
_PI_CMD_SPIX = 75
_PI_CMD_SERO = 76
_PI_CMD_SERC = 77
_PI_CMD_SERRB = 78
_PI_CMD_SERWB = 79
_PI_CMD_SERR = 80
_PI_CMD_SERW = 81
_PI_CMD_SERDA = 82
_PI_CMD_GDC = 83
_PI_CMD_GPW = 84
_PI_CMD_HC = 85
_PI_CMD_HP = 86
_PI_CMD_CF1 = 87
_PI_CMD_CF2 = 88
_PI_CMD_FG = 97
_PI_CMD_FN = 98
_PI_CMD_NOIB = 99
_PI_CMD_BI2CC = 89
_PI_CMD_BI2CO = 90
_PI_CMD_BI2CZ = 91
_PI_CMD_I2CZ = 92
_PI_CMD_WVCHA = 93
_PI_CMD_SLRI = 94
_PI_CMD_BSPIC = 111
_PI_CMD_BSPIO = 112
_PI_CMD_BSPIX = 113
# pigpio error text
_errors = [
[PI_INIT_FAILED, "pigpio initialisation failed"],
[PI_BAD_USER_GPIO, "gpio not 0-31"],
[PI_BAD_GPIO, "gpio not 0-53"],
[PI_BAD_MODE, "mode not 0-7"],
[PI_BAD_LEVEL, "level not 0-1"],
[PI_BAD_PUD, "pud not 0-2"],
[PI_BAD_PULSEWIDTH, "pulsewidth not 0 or 500-2500"],
[PI_BAD_DUTYCYCLE, "dutycycle not 0-range (default 255)"],
[PI_BAD_TIMER, "timer not 0-9"],
[PI_BAD_MS, "ms not 10-60000"],
[PI_BAD_TIMETYPE, "timetype not 0-1"],
[PI_BAD_SECONDS, "seconds < 0"],
[PI_BAD_MICROS, "micros not 0-999999"],
[PI_TIMER_FAILED, "gpioSetTimerFunc failed"],
[PI_BAD_WDOG_TIMEOUT, "timeout not 0-60000"],
[PI_NO_ALERT_FUNC, "DEPRECATED"],
[PI_BAD_CLK_PERIPH, "clock peripheral not 0-1"],
[PI_BAD_CLK_SOURCE, "DEPRECATED"],
[PI_BAD_CLK_MICROS, "clock micros not 1, 2, 4, 5, 8, or 10"],
[PI_BAD_BUF_MILLIS, "buf millis not 100-10000"],
[PI_BAD_DUTYRANGE, "dutycycle range not 25-40000"],
[PI_BAD_SIGNUM, "signum not 0-63"],
[PI_BAD_PATHNAME, "can't open pathname"],
[PI_NO_HANDLE, "no handle available"],
[PI_BAD_HANDLE, "unknown handle"],
[PI_BAD_IF_FLAGS, "ifFlags > 3"],
[PI_BAD_CHANNEL, "DMA channel not 0-14"],
[PI_BAD_SOCKET_PORT, "socket port not 1024-30000"],
[PI_BAD_FIFO_COMMAND, "unknown fifo command"],
[PI_BAD_SECO_CHANNEL, "DMA secondary channel not 0-6"],
[PI_NOT_INITIALISED, "function called before gpioInitialise"],
[PI_INITIALISED, "function called after gpioInitialise"],
[PI_BAD_WAVE_MODE, "waveform mode not 0-1"],
[PI_BAD_CFG_INTERNAL, "bad parameter in gpioCfgInternals call"],
[PI_BAD_WAVE_BAUD, "baud rate not 50-250000(RX)/1000000(TX)"],
[PI_TOO_MANY_PULSES, "waveform has too many pulses"],
[PI_TOO_MANY_CHARS, "waveform has too many chars"],
[PI_NOT_SERIAL_GPIO, "no bit bang serial read in progress on gpio"],
[PI_NOT_PERMITTED, "no permission to update gpio"],
[PI_SOME_PERMITTED, "no permission to update one or more gpios"],
[PI_BAD_WVSC_COMMND, "bad WVSC subcommand"],
[PI_BAD_WVSM_COMMND, "bad WVSM subcommand"],
[PI_BAD_WVSP_COMMND, "bad WVSP subcommand"],
[PI_BAD_PULSELEN, "trigger pulse length not 1-100"],
[PI_BAD_SCRIPT, "invalid script"],
[PI_BAD_SCRIPT_ID, "unknown script id"],
[PI_BAD_SER_OFFSET, "add serial data offset > 30 minute"],
[PI_GPIO_IN_USE, "gpio already in use"],
[PI_BAD_SERIAL_COUNT, "must read at least a byte at a time"],
[PI_BAD_PARAM_NUM, "script parameter id not 0-9"],
[PI_DUP_TAG, "script has duplicate tag"],
[PI_TOO_MANY_TAGS, "script has too many tags"],
[PI_BAD_SCRIPT_CMD, "illegal script command"],
[PI_BAD_VAR_NUM, "script variable id not 0-149"],
[PI_NO_SCRIPT_ROOM, "no more room for scripts"],
[PI_NO_MEMORY, "can't allocate temporary memory"],
[PI_SOCK_READ_FAILED, "socket read failed"],
[PI_SOCK_WRIT_FAILED, "socket write failed"],
[PI_TOO_MANY_PARAM, "too many script parameters (> 10)"],
[PI_NOT_HALTED, "script already running or failed"],
[PI_BAD_TAG, "script has unresolved tag"],
[PI_BAD_MICS_DELAY, "bad MICS delay (too large)"],
[PI_BAD_MILS_DELAY, "bad MILS delay (too large)"],
[PI_BAD_WAVE_ID, "non existent wave id"],
[PI_TOO_MANY_CBS, "No more CBs for waveform"],
[PI_TOO_MANY_OOL, "No more OOL for waveform"],
[PI_EMPTY_WAVEFORM, "attempt to create an empty waveform"],
[PI_NO_WAVEFORM_ID, "No more waveform ids"],
[PI_I2C_OPEN_FAILED, "can't open I2C device"],
[PI_SER_OPEN_FAILED, "can't open serial device"],
[PI_SPI_OPEN_FAILED, "can't open SPI device"],
[PI_BAD_I2C_BUS, "bad I2C bus"],
[PI_BAD_I2C_ADDR, "bad I2C address"],
[PI_BAD_SPI_CHANNEL, "bad SPI channel"],
[PI_BAD_FLAGS, "bad i2c/spi/ser open flags"],
[PI_BAD_SPI_SPEED, "bad SPI speed"],
[PI_BAD_SER_DEVICE, "bad serial device name"],
[PI_BAD_SER_SPEED, "bad serial baud rate"],
[PI_BAD_PARAM, "bad i2c/spi/ser parameter"],
[PI_I2C_WRITE_FAILED, "I2C write failed"],
[PI_I2C_READ_FAILED, "I2C read failed"],
[PI_BAD_SPI_COUNT, "bad SPI count"],
[PI_SER_WRITE_FAILED, "ser write failed"],
[PI_SER_READ_FAILED, "ser read failed"],
[PI_SER_READ_NO_DATA, "ser read no data available"],
[PI_UNKNOWN_COMMAND, "unknown command"],
[PI_SPI_XFER_FAILED, "SPI xfer/read/write failed"],
[PI_BAD_POINTER, "bad (NULL) pointer"],
[PI_NO_AUX_SPI, "need a A+/B+/Pi2 for auxiliary SPI"],
[PI_NOT_PWM_GPIO, "gpio is not in use for PWM"],
[PI_NOT_SERVO_GPIO, "gpio is not in use for servo pulses"],
[PI_NOT_HCLK_GPIO, "gpio has no hardware clock"],
[PI_NOT_HPWM_GPIO, "gpio has no hardware PWM"],
[PI_BAD_HPWM_FREQ, "hardware PWM frequency not 1-125M"],
[PI_BAD_HPWM_DUTY, "hardware PWM dutycycle not 0-1M"],
[PI_BAD_HCLK_FREQ, "hardware clock frequency not 4689-250M"],
[PI_BAD_HCLK_PASS, "need password to use hardware clock 1"],
[PI_HPWM_ILLEGAL, "illegal, PWM in use for main clock"],
[PI_BAD_DATABITS, "serial data bits not 1-32"],
[PI_BAD_STOPBITS, "serial (half) stop bits not 2-8"],
[PI_MSG_TOOBIG, "socket/pipe message too big"],
[PI_BAD_MALLOC_MODE, "bad memory allocation mode"],
[PI_TOO_MANY_SEGS, "too many I2C transaction segments"],
[PI_BAD_I2C_SEG, "an I2C transaction segment failed"],
[PI_BAD_SMBUS_CMD, "SMBus command not supported"],
[PI_NOT_I2C_GPIO, "no bit bang I2C in progress on gpio"],
[PI_BAD_I2C_WLEN, "bad I2C write length"],
[PI_BAD_I2C_RLEN, "bad I2C read length"],
[PI_BAD_I2C_CMD, "bad I2C command"],
[PI_BAD_I2C_BAUD, "bad I2C baud rate, not 50-500k"],
[PI_CHAIN_LOOP_CNT, "bad chain loop count"],
[PI_BAD_CHAIN_LOOP, "empty chain loop"],
[PI_CHAIN_COUNTER, "too many chain counters"],
[PI_BAD_CHAIN_CMD, "bad chain command"],
[PI_BAD_CHAIN_DELAY, "bad chain delay micros"],
[PI_CHAIN_NESTING, "chain counters nested too deeply"],
[PI_CHAIN_TOO_BIG, "chain is too long"],
[PI_DEPRECATED, "deprecated function removed"],
[PI_BAD_SER_INVERT, "bit bang serial invert not 0 or 1"],
]
class ApigpioError(Exception):
"""pigpio module exception"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def error_text(errnum):
"""
Returns a text description of a pigpio error.
errnum:= <0, the error number
...
print(pigpio.error_text(-5))
level not 0-1
...
"""
for e in _errors:
if e[0] == errnum:
return e[1]
return "unknown error ({})".format(errnum)
# A couple of hacks to cope with different string handling
# between various Python versions
# 3 != 2.7.8 != 2.7.3
# fixme : remove as we only support 3.4+
if sys.hexversion < 0x03000000:
def _b(x):
return x
else:
def _b(x):
return x.encode('latin-1')
if sys.hexversion < 0x02070800:
def _str(x):
return buffer(x)
else:
def _str(x):
return x
def u2i(uint32):
"""
Converts a 32 bit unsigned number to signed.
uint32:= an unsigned 32 bit number
...
print(u2i(4294967272))
-24
print(u2i(37))
37
...
"""
mask = (2 ** 32) - 1
if uint32 & (1 << 31):
v = uint32 | ~mask
else:
v = uint32 & mask
return v
def _u2i(uint32):
"""
Converts a 32 bit unsigned number to signed. If the number
is negative it indicates an error. On error a pigpio
exception will be raised if exceptions is True.
"""
v = u2i(uint32)
if v < 0:
if exceptions:
raise ApigpioError(error_text(v))
return v
class _callback_ADT:
"""An ADT class to hold callback information."""
def __init__(self, gpio, edge, func):
"""
Initialises a callback ADT.
gpio:= Broadcom gpio number.
edge:= EITHER_EDGE, RISING_EDGE, or FALLING_EDGE.
func:= a user function taking three arguments (gpio, level, tick).
"""
self.gpio = gpio
self.edge = edge
self._func = func
self.bit = 1 << gpio
@property
def func(self):
def _f(*args, **kwargs):
# protect our-self from faulty callbacks
try:
self._func(*args, **kwargs)
except Exception as e:
print('Exception raised when running callback {}'.format(e))
return _f
class _callback_handler(object):
"""
A class to handle callbacks.
Each instance of this class open it's own connection to gpiod, which is
only used to listen for notifications.
"""
def __init__(self, pi):
self._loop = pi._loop
self.pi = pi
self.handle = None
self.monitor = 0
self.callbacks = []
self.f_stop = asyncio.Future(loop=self._loop)
self.f_stopped = asyncio.Future(loop=self._loop)
@asyncio.coroutine
def _connect(self, address):
# FIXME: duplication with pi.connect
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Disable the Nagle algorithm.
self.s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.s.setblocking(False)
# TODO: handle connection errors !
yield from self._loop.sock_connect(self.s, address)
self.handle = yield from self._pigpio_aio_command(_PI_CMD_NOIB, 0, 0)
asyncio.ensure_future(self._wait_for_notif(), loop=self._loop)
@asyncio.coroutine
def close(self):
if not self.f_stop.done():
self.handle = yield from self._pigpio_aio_command(_PI_CMD_NC,
self.handle, 0)
self.f_stop.set_result(True)
yield from self.f_stopped
@asyncio.coroutine
def _wait_for_notif(self):
last_level = 0
while True:
MSG_SIZ = 12
f_recv = self._loop.sock_recv(self.s, MSG_SIZ)
tup = yield from asyncio.\
wait([f_recv, self.f_stop],
return_when=asyncio.FIRST_COMPLETED)
done, pending = tup
if self.f_stop in done:
break
else:
buf = done.pop().result()
# buf = yield from self._loop.sock_recv(self.s, MSG_SIZ)
while len(buf) < MSG_SIZ:
yield from self._loop.sock_recv(self.s, MSG_SIZ-len(buf))
seq, flags, tick, level = (struct.unpack('HHII', buf))
if flags == 0:
changed = level ^ last_level
last_level = level
for cb in self.callbacks:
if cb.bit & changed:
new_level = 0
if cb.bit & level:
new_level = 1
if cb.edge ^ new_level:
cb.func(cb.gpio, new_level, tick)
else:
if flags & NTFY_FLAGS_WDOG:
print('watchdog signal')
gpio = flags & NTFY_FLAGS_GPIO
for cb in self.callbacks:
if cb.gpio == gpio:
cb.func(cb.gpio, TIMEOUT, tick)
if flags & NTFY_FLAGS_ALIVE:
print('keep alive signal')
# no event for now
# elif flags & NTFY_FLAGS_EVENT:
# event = flags & NTFY_FLAGS_GPIO
# for cb in self.events:
# if cb.event == event:
# cb.func(event, tick)
self.s.close()
self.f_stopped.set_result(True)
@asyncio.coroutine
def append(self, cb):
"""Adds a callback."""
self.callbacks.append(cb.callb)
self.monitor = self.monitor | cb.callb.bit
yield from self.pi._pigpio_aio_command(_PI_CMD_NB, self.handle,
self.monitor)
@asyncio.coroutine
def remove(self, cb):
"""Removes a callback."""
if cb in self.callbacks:
self.callbacks.remove(cb)
new_monitor = 0
for c in self.callbacks:
new_monitor |= c.bit
if new_monitor != self.monitor:
self.monitor = new_monitor
yield from self.pi._pigpio_aio_command(
_PI_CMD_NB, self.handle, self.monitor)
@asyncio.coroutine
def _pigpio_aio_command(self, cmd, p1, p2,):
# FIXME: duplication with pi._pigpio_aio_command
data = struct.pack('IIII', cmd, p1, p2, 0)
yield from self._loop.sock_sendall(self.s, data)
response = yield from self._loop.sock_recv(self.s, 16)
_, res = struct.unpack('12sI', response)
return res
class Callback:
"""A class to provide gpio level change callbacks."""
def __init__(self, notify, user_gpio, edge=RISING_EDGE, func=None):
"""
Initialise a callback and adds it to the notification thread.
"""
self._notify = notify
self.count = 0
if func is None:
func = self._tally
self.callb = _callback_ADT(user_gpio, edge, func)
# FIXME yield from self._notify.append(self.callb)
@asyncio.coroutine
def cancel(self):
"""Cancels a callback by removing it from the notification thread."""
yield from self._notify.remove(self.callb)
def _tally(self, user_gpio, level, tick):
"""Increment the callback called count."""
self.count += 1
def tally(self):
"""
Provides a count of how many times the default tally
callback has triggered.
The count will be zero if the user has supplied their own
callback function.
"""
return self.count
class Pi(object):
@asyncio.coroutine
def _pigpio_aio_command(self, cmd, p1, p2,):
"""
Runs a pigpio socket command.
sl:= command socket and lock.
cmd:= the command to be executed.
p1:= command parameter 1 (if applicable).
p2:= command parameter 2 (if applicable).
"""
with (yield from self._lock):
data = struct.pack('IIII', cmd, p1, p2, 0)
yield from self._loop.sock_sendall(self.s, data)
response = yield from self._loop.sock_recv(self.s, 16)
_, res = struct.unpack('12sI', response)
return res
@asyncio.coroutine
def _pigpio_aio_command_ext(self, cmd, p1, p2, p3, extents):
"""
Runs an extended pigpio socket command.
sl:= command socket and lock.
cmd:= the command to be executed.
p1:= command parameter 1 (if applicable).
p2:= command parameter 2 (if applicable).
p3:= total size in bytes of following extents
extents:= additional data blocks
"""
with (yield from self._lock):
return (yield from self._pigpio_aio_command_ext_unlocked(cmd, p1, p2, p3, extents))
@asyncio.coroutine
def _pigpio_aio_command_ext_unlocked(self, cmd, p1, p2, p3, extents):
"""Run extended pigpio socket command without any lock."""
ext = bytearray(struct.pack('IIII', cmd, p1, p2, p3))
for x in extents:
if isinstance(x, str):
ext.extend(_b(x))
else:
ext.extend(x)
yield from self._loop.sock_sendall(self.s, ext)
response = yield from self._loop.sock_recv(self.s, 16)
_, res = struct.unpack('12sI', response)
return res
@asyncio.coroutine
def connect(self, address):
"""
Connect to a remote or local gpiod daemon.
:param address: a pair (address, port), the address must be already
resolved (for example an ip address)
:return:
"""
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.setblocking(False)
# Disable the Nagle algorithm.
self.s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
yield from self._loop.sock_connect(self.s, address)
yield from self._notify._connect(address)
@asyncio.coroutine
def stop(self):
"""
:return:
"""
print('closing notifier')
yield from self._notify.close()
print('closing socket')
self.s.close()
@asyncio.coroutine
def get_version(self):
res = yield from self._pigpio_aio_command(_PI_CMD_PIGPV)
print('version: {}'.format(res))
@asyncio.coroutine
def get_pigpio_version(self):
"""
Returns the pigpio software version.
...
v = pi.get_pigpio_version()
...
"""
res = yield from self._pigpio_aio_command(_PI_CMD_PIGPV, 0, 0)
@asyncio.coroutine
def store_script(self, script):
"""
Store a script for later execution.
script:= the script text as a series of bytes.
Returns a >=0 script id if OK.
...
sid = pi.store_script(
b'tag 0 w 22 1 mils 100 w 22 0 mils 100 dcr p0 jp 0')
...
"""
if len(script):
res = yield from self._pigpio_aio_command_ext(_PI_CMD_PROC, 0, 0,
len(script),
[script])
return _u2i(res)
else:
return 0
@asyncio.coroutine
def run_script(self, script_id, params=None):
"""
Runs a stored script.
script_id:= id of stored script.
params:= up to 10 parameters required by the script.
...
s = pi.run_script(sid, [par1, par2])
s = pi.run_script(sid)
s = pi.run_script(sid, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
...
"""
# I p1 script id
# I p2 0
# I p3 params * 4 (0-10 params)
# (optional) extension
# I[] params
if params is not None:
ext = bytearray()
for p in params:
ext.extend(struct.pack("I", p))
nump = len(params)
extents = [ext]
else:
nump = 0
extents = []
res = yield from self._pigpio_aio_command_ext(_PI_CMD_PROCR, script_id,
0, nump * 4, extents)
return _u2i(res)
@asyncio.coroutine
def script_status(self, script_id):
"""
Returns the run status of a stored script as well as the
current values of parameters 0 to 9.
script_id:= id of stored script.
The run status may be
. .
PI_SCRIPT_INITING
PI_SCRIPT_HALTED
PI_SCRIPT_RUNNING
PI_SCRIPT_WAITING
PI_SCRIPT_FAILED
. .
The return value is a tuple of run status and a list of
the 10 parameters. On error the run status will be negative
and the parameter list will be empty.
...
(s, pars) = pi.script_status(sid)
...
"""
res = yield from self._pigpio_aio_command(_PI_CMD_PROCP, script_id, 0)
bytes = u2i(res)
if bytes > 0:
# Fixme : this sould be the same a _rxbuf
# data = self._rxbuf(bytes)
data = yield from self._loop.sock_recv(self.s, bytes)
while len(data) < bytes:
b = yield from self._loop.sock_recv(self.s, bytes-len(data))
data.extend(b)
pars = struct.unpack('11i', _str(data))
status = pars[0]
params = pars[1:]
else:
status = bytes
params = ()
return status, params
@asyncio.coroutine
def stop_script(self, script_id):
"""
Stops a running script.
script_id:= id of stored script.
...
status = pi.stop_script(sid)
...
"""
res = yield from self._pigpio_aio_command(_PI_CMD_PROCS, script_id, 0)
return _u2i(res)
@asyncio.coroutine
def delete_script(self, script_id):
"""
Deletes a stored script.
script_id:= id of stored script.
...
status = pi.delete_script(sid)
...
"""
res = yield from self._pigpio_aio_command(_PI_CMD_PROCD, script_id, 0)
return _u2i(res)
@asyncio.coroutine
def read_bank_1(self):
"""
Returns the levels of the bank 1 gpios (gpios 0-31).
The returned 32 bit integer has a bit set if the corresponding
gpio is high. Gpio n has bit value (1<<n).
...
print(bin(pi.read_bank_1()))
0b10010100000011100100001001111
...
"""
res = yield from self._pigpio_aio_command(_PI_CMD_BR1, 0, 0)
return res
@asyncio.coroutine
def clear_bank_1(self, bits):
"""
Clears gpios 0-31 if the corresponding bit in bits is set.
bits:= a 32 bit mask with 1 set if the corresponding gpio is
to be cleared.
A returned status of PI_SOME_PERMITTED indicates that the user
is not allowed to write to one or more of the gpios.
...
pi.clear_bank_1(int("111110010000",2))
...
"""
res = yield from self._pigpio_aio_command(_PI_CMD_BC1, bits, 0)
return _u2i(res)
@asyncio.coroutine
def set_bank_1(self, bits):
"""
Sets gpios 0-31 if the corresponding bit in bits is set.
bits:= a 32 bit mask with 1 set if the corresponding gpio is
to be set.
A returned status of PI_SOME_PERMITTED indicates that the user
is not allowed to write to one or more of the gpios.
...
pi.set_bank_1(int("111110010000",2))
...
"""
res = yield from self._pigpio_aio_command(_PI_CMD_BS1, bits, 0)
return _u2i(res)
@asyncio.coroutine
def set_mode(self, gpio, mode):
"""
Sets the gpio mode.
gpio:= 0-53.
mode:= INPUT, OUTPUT, ALT0, ALT1, ALT2, ALT3, ALT4, ALT5.
...
pi.set_mode( 4, apigpio.INPUT) # gpio 4 as input
pi.set_mode(17, apigpio.OUTPUT) # gpio 17 as output
pi.set_mode(24, apigpio.ALT2) # gpio 24 as ALT2
...
"""
res = yield from self._pigpio_aio_command(_PI_CMD_MODES, gpio, mode)
return _u2i(res)
@asyncio.coroutine
def set_pull_up_down(self, gpio, pud):
"""
Sets or clears the internal GPIO pull-up/down resistor.
gpio:= 0-53.
pud:= PUD_UP, PUD_DOWN, PUD_OFF.
...
yield from pi.set_pull_up_down(17, apigpio.PUD_OFF)
yield from pi.set_pull_up_down(23, apigpio.PUD_UP)
yield from pi.set_pull_up_down(24, apigpio.PUD_DOWN)
...
"""
res = yield from self._pigpio_aio_command(_PI_CMD_PUD, gpio, pud)
return _u2i(res)
@asyncio.coroutine
def get_mode(self, gpio):
"""
Returns the gpio mode.
gpio:= 0-53.
Returns a value as follows
. .
0 = INPUT
1 = OUTPUT
2 = ALT5
3 = ALT4
4 = ALT0
5 = ALT1
6 = ALT2
7 = ALT3
. .
...
print(pi.get_mode(0))
4
...
"""
res = yield from self._pigpio_aio_command(_PI_CMD_MODEG, gpio, 0)
return _u2i(res)
@asyncio.coroutine
def write(self, gpio, level):
"""
Sets the gpio level.
gpio:= 0-53.
level:= 0, 1.
If PWM or servo pulses are active on the gpio they are
switched off.
...
pi.set_mode(17, pigpio.OUTPUT)
pi.write(17,0)
print(pi.read(17))
0
pi.write(17,1)
print(pi.read(17))
1
...
"""
res = yield from self._pigpio_aio_command(_PI_CMD_WRITE, gpio, level)
return _u2i(res)
@asyncio.coroutine
def read(self, gpio):
"""
Returns the GPIO level.
gpio:= 0-53.
...
yield from pi.set_mode(23, pigpio.INPUT)
yield from pi.set_pull_up_down(23, pigpio.PUD_DOWN)
print(yield from pi.read(23))
0
yield from pi.set_pull_up_down(23, pigpio.PUD_UP)
print(yield from pi.read(23))
1
...
"""
res = yield from self._pigpio_aio_command(_PI_CMD_READ, gpio, 0)
return _u2i(res)
@asyncio.coroutine
def gpio_trigger(self, user_gpio, pulse_len=10, level=1):
"""
Send a trigger pulse to a GPIO. The GPIO is set to
level for pulse_len microseconds and then reset to not level.
user_gpio:= 0-31
pulse_len:= 1-100
level:= 0-1
...
pi.gpio_trigger(23, 10, 1)
...
"""
# pigpio message format
# I p1 user_gpio
# I p2 pulse_len
# I p3 4
## extension ##
# I level
extents = [struct.pack("I", level)]
res = yield from self._pigpio_aio_command_ext(_PI_CMD_TRIG, user_gpio,
pulse_len, 4, extents)
return _u2i(res)
@asyncio.coroutine
def set_glitch_filter(self, user_gpio, steady):
"""
Sets a glitch filter on a GPIO.
Level changes on the GPIO are not reported unless the level
has been stable for at least [*steady*] microseconds. The
level is then reported. Level changes of less than [*steady*]
microseconds are ignored.
user_gpio:= 0-31
steady:= 0-300000
Returns 0 if OK, otherwise PI_BAD_USER_GPIO, or PI_BAD_FILTER.
This filter affects the GPIO samples returned to callbacks set up
with [*callback*] and [*wait_for_edge*].
It does not affect levels read by [*read*],
[*read_bank_1*], or [*read_bank_2*].
Each (stable) edge will be timestamped [*steady*]
microseconds after it was first detected.
...
pi.set_glitch_filter(23, 100)
...
"""
res = yield from self._pigpio_aio_command(_PI_CMD_FG, user_gpio, steady)
return _u2i(res)
@asyncio.coroutine
def set_noise_filter(self, user_gpio, steady, active):
"""
Sets a noise filter on a GPIO.
Level changes on the GPIO are ignored until a level which has
been stable for [*steady*] microseconds is detected. Level
changes on the GPIO are then reported for [*active*]
microseconds after which the process repeats.
user_gpio:= 0-31
steady:= 0-300000
active:= 0-1000000
Returns 0 if OK, otherwise PI_BAD_USER_GPIO, or PI_BAD_FILTER.
This filter affects the GPIO samples returned to callbacks set up
with [*callback*] and [*wait_for_edge*].
It does not affect levels read by [*read*],
[*read_bank_1*], or [*read_bank_2*].
Level changes before and after the active period may
be reported. Your software must be designed to cope with
such reports.
...
pi.set_noise_filter(23, 1000, 5000)
...
"""
# pigpio message format
# I p1 user_gpio
# I p2 steady
# I p3 4
## extension ##
# I active
extents = [struct.pack("I", active)]
res = yield from self._pigpio_aio_command_ext(_PI_CMD_FN, user_gpio,
steady, 4, extents)
return _u2i(res)
@asyncio.coroutine
def set_PWM_dutycycle(self, user_gpio, dutycycle):
"""
Starts (non-zero dutycycle) or stops (0) PWM pulses on the GPIO.
user_gpio:= 0-31.
dutycycle:= 0-range (range defaults to 255).
The [*set_PWM_range*] function can change the default range of 255.
...
pi.set_PWM_dutycycle(4, 0) # PWM off
pi.set_PWM_dutycycle(4, 64) # PWM 1/4 on
pi.set_PWM_dutycycle(4, 128) # PWM 1/2 on
pi.set_PWM_dutycycle(4, 192) # PWM 3/4 on
pi.set_PWM_dutycycle(4, 255) # PWM full on
...
"""
res = yield from self._pigpio_aio_command(_PI_CMD_PWM, user_gpio, int(dutycycle))
return _u2i(res)
@asyncio.coroutine
def add_callback(self, user_gpio, edge=RISING_EDGE, func=None):
"""
Calls a user supplied function (a callback) whenever the
specified gpio edge is detected.
user_gpio:= 0-31.
edge:= EITHER_EDGE, RISING_EDGE (default), or FALLING_EDGE.
func:= user supplied callback function.
The user supplied callback receives three parameters, the gpio,
the level, and the tick.
If a user callback is not specified a default tally callback is
provided which simply counts edges. The count may be retrieved
by calling the tally function.
The callback may be cancelled by calling the cancel function.
A gpio may have multiple callbacks (although I can't think of
a reason to do so).
...
def cbf(gpio, level, tick):
print(gpio, level, tick)
cb1 = pi.callback(22, pigpio.EITHER_EDGE, cbf)
cb2 = pi.callback(4, pigpio.EITHER_EDGE)
cb3 = pi.callback(17)
print(cb3.tally())
cb1.cancel() # To cancel callback cb1.
...
"""
cb = Callback(self._notify, user_gpio, edge, func)
yield from self._notify.append(cb)
return cb
@asyncio.coroutine
def set_servo_pulsewidth(self, user_gpio, pulsewidth):
"""
Starts (500-2500) or stops (0) servo pulses on the GPIO.
user_gpio:= 0-31.
pulsewidth:= 0 (off),
500 (most anti-clockwise) - 2500 (most clockwise).
The selected pulsewidth will continue to be transmitted until
changed by a subsequent call to set_servo_pulsewidth.
The pulsewidths supported by servos varies and should probably
be determined by experiment. A value of 1500 should always be
safe and represents the mid-point of rotation.
You can DAMAGE a servo if you command it to move beyond its
limits.
...
yield from pi.set_servo_pulsewidth(17, 0) # off
yield from pi.set_servo_pulsewidth(17, 1000) # safe anti-clockwise
yield from pi.set_servo_pulsewidth(17, 1500) # centre
yield from pi.set_servo_pulsewidth(17, 2000) # safe clockwise
...
"""
res = yield from self._pigpio_aio_command(_PI_CMD_SERVO, user_gpio, int(pulsewidth))
return _u2i(res)
@asyncio.coroutine
def bb_spi_open(self, cs, miso, mosi, sclk ,b, spf):
"""Open a bspi device on a bus."""
extents = [struct.pack('IIIII', int(miso), int(mosi), int(sclk), int(b), int(spf))]
res = yield from self._pigpio_aio_command_ext(_PI_CMD_BSPIO, cs, 0, 20, extents=extents)
return _u2i(res)
@asyncio.coroutine
def bb_spi_close(self, cs):
"""Close a bspi device on a bus."""
res = yield from self._pigpio_aio_command(_PI_CMD_BSPIC, cs, 0)
return _u2i(res)
@asyncio.coroutine
def bb_spi_xfer(self, cs, data):
bytes = PI_CMD_INTERRUPTED
with (yield from self._lock):
bytes = yield from self._pigpio_aio_command_ext_unlocked(
_PI_CMD_BSPIX, cs, 0, len(data), [data])
if bytes > 0:
data = yield from self._rxbuf(bytes)
else:
data = ""
return bytes, data
@asyncio.coroutine
def spi_open(self, spi_channel, baud, spi_flags=0):
# I p1 spi_channel
# I p2 baud
# I p3 4
## extension ##
# I spi_flags
"""Open an spi device on a bus."""
extents = [struct.pack('I', spi_flags)]
res = yield from self._pigpio_aio_command_ext(_PI_CMD_SPIO, spi_channel, baud, 4, extents=extents)
return _u2i(res)
@asyncio.coroutine
def spi_close(self, handle):
"""Close a spi device on a bus."""
res = yield from self._pigpio_aio_command(_PI_CMD_SPIC, handle, 0)
return _u2i(res)
@asyncio.coroutine
def spi_write(self, handle, data):
# I p1 handle
# I p2 0
# I p3 len
## extension ##
# s len data bytes
res = yield from self._pigpio_aio_command_ext(_PI_CMD_SPIW, handle, 0, len(data), [data])
return _u2i(res)
@asyncio.coroutine
def i2c_open(self, bus, address):
"""Open an i2c device on a bus."""
res = yield from self._pigpio_aio_command(_PI_CMD_I2CO, int(bus), int(address))
return _u2i(res)
@asyncio.coroutine
def i2c_close(self, handle):
"""Close an i2c handle."""
res = yield from self._pigpio_aio_command(_PI_CMD_I2CC, handle)
return _u2i(res)
@asyncio.coroutine
def i2c_write_byte_data(self, handle, register, data):
"""Write byte to i2c register on handle."""
extents = [struct.pack("I", data)]
res = yield from self._pigpio_aio_command_ext(_PI_CMD_I2CWB, handle, int(register), 1, extents)
return _u2i(res)
@asyncio.coroutine
def _rxbuf(self, count):
""""Returns count bytes from the command socket."""
ext = yield from self._loop.sock_recv(self.s, count)
while len(ext) < count:
ext.extend((yield from self._loop.sock_recv(self.s, count - len(ext))))
return ext
@asyncio.coroutine
def i2c_read_byte_data(self, handle, register):
"""Write byte to i2c register on handle."""
res = yield from self._pigpio_aio_command(_PI_CMD_I2CRB, handle, int(register))
return _u2i(res)
@asyncio.coroutine
def i2c_read_i2c_block_data(self, handle, register, count):
"""Read count bytes from an i2c handle."""
extents = [struct.pack("I", count)]
with (yield from self._lock):
bytes = yield from self._pigpio_aio_command_ext_unlocked(_PI_CMD_I2CRI, handle, int(register), 4, extents)
if bytes > 0:
data = yield from self._rxbuf(count)
else:
data = ""
return data
def __init__(self, loop=None):
if loop is None:
loop = asyncio.get_event_loop()
self._loop = loop
self.s = None
self._notify = _callback_handler(self)
self._lock = asyncio.Lock()
| 31.12468 | 118 | 0.601257 |
4a252e7ad3a5302fd540eac89093d6b1d1cb09c3 | 3,459 | py | Python | install/app_store/tk-multi-publish/v0.10.7/python/tk_multi_publish/ui/publish_progress_form.py | JoanAzpeitia/lp_sg | e0ee79555e419dd2ae3a5f31e5515b3f40b22a62 | [
"MIT"
] | null | null | null | install/app_store/tk-multi-publish/v0.10.7/python/tk_multi_publish/ui/publish_progress_form.py | JoanAzpeitia/lp_sg | e0ee79555e419dd2ae3a5f31e5515b3f40b22a62 | [
"MIT"
] | null | null | null | install/app_store/tk-multi-publish/v0.10.7/python/tk_multi_publish/ui/publish_progress_form.py | JoanAzpeitia/lp_sg | e0ee79555e419dd2ae3a5f31e5515b3f40b22a62 | [
"MIT"
] | 1 | 2020-02-15T10:42:56.000Z | 2020-02-15T10:42:56.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'publish_progress_form.ui'
#
# by: pyside-uic 0.2.15 running on PySide 1.2.2
#
# WARNING! All changes made in this file will be lost!
from tank.platform.qt import QtCore, QtGui
class Ui_PublishProgressForm(object):
def setupUi(self, PublishProgressForm):
PublishProgressForm.setObjectName("PublishProgressForm")
PublishProgressForm.resize(651, 384)
self.verticalLayout_4 = QtGui.QVBoxLayout(PublishProgressForm)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.verticalLayout_3 = QtGui.QVBoxLayout()
self.verticalLayout_3.setSpacing(-1)
self.verticalLayout_3.setObjectName("verticalLayout_3")
spacerItem1 = QtGui.QSpacerItem(20, 100, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
self.verticalLayout_3.addItem(spacerItem1)
self.title = QtGui.QLabel(PublishProgressForm)
self.title.setStyleSheet("#title {\n"
"font-size: 24px;\n"
"}")
self.title.setObjectName("title")
self.verticalLayout_3.addWidget(self.title)
self.progress_bar = QtGui.QProgressBar(PublishProgressForm)
self.progress_bar.setProperty("value", 24)
self.progress_bar.setObjectName("progress_bar")
self.verticalLayout_3.addWidget(self.progress_bar)
self.details = QtGui.QLabel(PublishProgressForm)
self.details.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.details.setWordWrap(False)
self.details.setObjectName("details")
self.verticalLayout_3.addWidget(self.details)
self.stage_progress_bar = QtGui.QProgressBar(PublishProgressForm)
self.stage_progress_bar.setProperty("value", 24)
self.stage_progress_bar.setObjectName("stage_progress_bar")
self.verticalLayout_3.addWidget(self.stage_progress_bar)
spacerItem2 = QtGui.QSpacerItem(20, 0, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_3.addItem(spacerItem2)
self.verticalLayout_3.setStretch(5, 1)
self.horizontalLayout.addLayout(self.verticalLayout_3)
spacerItem3 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem3)
self.horizontalLayout.setStretch(0, 1)
self.horizontalLayout.setStretch(1, 5)
self.horizontalLayout.setStretch(2, 1)
self.verticalLayout_4.addLayout(self.horizontalLayout)
self.verticalLayout_4.setStretch(0, 1)
self.retranslateUi(PublishProgressForm)
QtCore.QMetaObject.connectSlotsByName(PublishProgressForm)
def retranslateUi(self, PublishProgressForm):
PublishProgressForm.setWindowTitle(QtGui.QApplication.translate("PublishProgressForm", "Form", None, QtGui.QApplication.UnicodeUTF8))
self.title.setText(QtGui.QApplication.translate("PublishProgressForm", "Publishing...", None, QtGui.QApplication.UnicodeUTF8))
self.details.setText(QtGui.QApplication.translate("PublishProgressForm", "(Details)", None, QtGui.QApplication.UnicodeUTF8))
from . import resources_rc
| 52.409091 | 141 | 0.740966 |
4a2530028727febb561176eec21ca810e97f7674 | 81,111 | py | Python | syncplay/ui/GuiConfiguration.py | nipkownix/syncplay | 5b93aeb20e80d3c208a766583363e87ea61ee85b | [
"Apache-2.0"
] | null | null | null | syncplay/ui/GuiConfiguration.py | nipkownix/syncplay | 5b93aeb20e80d3c208a766583363e87ea61ee85b | [
"Apache-2.0"
] | null | null | null | syncplay/ui/GuiConfiguration.py | nipkownix/syncplay | 5b93aeb20e80d3c208a766583363e87ea61ee85b | [
"Apache-2.0"
] | 1 | 2022-01-15T19:24:48.000Z | 2022-01-15T19:24:48.000Z |
import os
import sys
import threading
from datetime import datetime
from syncplay import constants
from syncplay import utils
from syncplay.messages import getMessage, getLanguages, setLanguage, getInitialLanguage
from syncplay.players.playerFactory import PlayerFactory
from syncplay.utils import isBSD, isLinux, isMacOS, isWindows
from syncplay.utils import resourcespath, posixresourcespath, playerPathExists
from syncplay.vendor.Qt import QtCore, QtWidgets, QtGui, __binding__, IsPySide, IsPySide2
from syncplay.vendor.Qt.QtCore import Qt, QSettings, QCoreApplication, QSize, QPoint, QUrl, QLine, QEventLoop, Signal
from syncplay.vendor.Qt.QtWidgets import QApplication, QLineEdit, QLabel, QCheckBox, QButtonGroup, QRadioButton, QDoubleSpinBox, QPlainTextEdit
from syncplay.vendor.Qt.QtGui import QCursor, QIcon, QImage, QDesktopServices
try:
if hasattr(QtCore.Qt, 'AA_EnableHighDpiScaling'):
QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling, True)
except AttributeError:
pass # To ignore error "Attribute Qt::AA_EnableHighDpiScaling must be set before QCoreApplication is created"
if hasattr(QtCore.Qt, 'AA_UseHighDpiPixmaps'):
QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps, True)
if IsPySide2:
from PySide2.QtCore import QStandardPaths
class GuiConfiguration:
def __init__(self, config, error=None, defaultConfig=None):
self.defaultConfig = defaultConfig
self.config = config
self._availablePlayerPaths = []
self.error = error
constants.DEBUG_MODE = config['debug']
def run(self):
if QCoreApplication.instance() is None:
self.app = QtWidgets.QApplication(sys.argv)
dialog = ConfigDialog(self.config, self._availablePlayerPaths, self.error, self.defaultConfig)
configLoop = QEventLoop()
dialog.show()
dialog.closed.connect(configLoop.quit)
configLoop.exec_()
def setAvailablePaths(self, paths):
self._availablePlayerPaths = paths
def getProcessedConfiguration(self):
return self.config
class WindowClosed(Exception):
pass
class GetPlayerIconThread(threading.Thread, QtCore.QObject):
daemon = True
done = QtCore.Signal(str, str)
def __init__(self):
threading.Thread.__init__(self, name='GetPlayerIcon')
QtCore.QObject.__init__(self)
self.condvar = threading.Condition()
self.playerpath = None
def setPlayerPath(self, playerpath):
self.condvar.acquire()
was_none = self.playerpath is None
self.playerpath = playerpath
if was_none:
self.condvar.notify()
self.condvar.release()
def run(self):
while True:
self.condvar.acquire()
if self.playerpath is None:
self.condvar.wait()
playerpath = self.playerpath
self.playerpath = None
self.condvar.release()
self.done.emit('spinner.mng', '')
iconpath = PlayerFactory().getPlayerIconByPath(playerpath)
self.done.emit(iconpath, playerpath)
class ConfigDialog(QtWidgets.QDialog):
pressedclosebutton = True
moreToggling = False
closed = Signal()
def automaticUpdatePromptCheck(self):
if self.automaticupdatesCheckbox.checkState() == Qt.PartiallyChecked:
reply = QtWidgets.QMessageBox.question(
self, "Syncplay",
getMessage("promptforupdate-label"),
QtWidgets.QMessageBox.StandardButton.Yes | QtWidgets.QMessageBox.StandardButton.No)
if reply == QtWidgets.QMessageBox.Yes:
self.automaticupdatesCheckbox.setChecked(True)
else:
self.automaticupdatesCheckbox.setChecked(False)
def moreToggled(self):
self.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
if self.moreToggling is False:
self.moreToggling = True
if self.showmoreCheckbox.isChecked():
self.tabListFrame.show()
self.resetButton.show()
self.playerargsTextbox.show()
self.playerargsLabel.show()
self.mediapathTextbox.show()
self.mediapathLabel.show()
self.mediabrowseButton.show()
self.runButton.show()
self.saveMoreState(True)
self.tabListWidget.setCurrentRow(0)
self.ensureTabListIsVisible()
if isMacOS(): self.mediaplayerSettingsGroup.setFixedHeight(self.mediaplayerSettingsGroup.minimumSizeHint().height())
self.stackedFrame.setFixedHeight(self.stackedFrame.minimumSizeHint().height())
else:
self.tabListFrame.hide()
self.resetButton.hide()
self.playerargsTextbox.hide()
self.playerargsLabel.hide()
self.runButton.hide()
if self.mediapathTextbox.text() == "":
self.mediapathTextbox.hide()
self.mediapathLabel.hide()
self.mediabrowseButton.hide()
else:
self.mediapathTextbox.show()
self.mediapathLabel.show()
self.mediabrowseButton.show()
self.saveMoreState(False)
self.stackedLayout.setCurrentIndex(0)
if isMacOS():
self.mediaplayerSettingsGroup.setFixedHeight(self.mediaplayerSettingsGroup.minimumSizeHint().height())
newHeight = self.connectionSettingsGroup.minimumSizeHint().height()+self.mediaplayerSettingsGroup.minimumSizeHint().height()+self.bottomButtonFrame.minimumSizeHint().height()+50
else:
newHeight = self.connectionSettingsGroup.minimumSizeHint().height()+self.mediaplayerSettingsGroup.minimumSizeHint().height()+self.bottomButtonFrame.minimumSizeHint().height()+13
if self.error:
newHeight += self.errorLabel.height()+3
self.stackedFrame.setFixedHeight(newHeight)
self.adjustSize()
if isMacOS():
newHeight = self.connectionSettingsGroup.minimumSizeHint().height()+self.mediaplayerSettingsGroup.minimumSizeHint().height()+self.bottomButtonFrame.minimumSizeHint().height()+50+16
self.setFixedWidth(self.sizeHint().width())
self.setFixedHeight(newHeight)
else:
self.setFixedSize(self.sizeHint())
self.moreToggling = False
self.setFixedWidth(self.minimumSizeHint().width())
def openHelp(self):
self.QtGui.QDesktopServices.openUrl(QUrl("https://syncplay.pl/guide/client/"))
def openRoomsDialog(self):
RoomsDialog = QtWidgets.QDialog()
RoomsLayout = QtWidgets.QGridLayout()
RoomsTextbox = QtWidgets.QPlainTextEdit()
RoomsDialog.setWindowTitle(getMessage("roomlist-msgbox-label"))
RoomsPlaylistLabel = QtWidgets.QLabel(getMessage("roomlist-msgbox-label"))
RoomsTextbox.setLineWrapMode(QtWidgets.QPlainTextEdit.NoWrap)
RoomsTextbox.setPlainText(utils.getListAsMultilineString(self.config['roomList']))
RoomsLayout.addWidget(RoomsPlaylistLabel, 0, 0, 1, 1)
RoomsLayout.addWidget(RoomsTextbox, 1, 0, 1, 1)
RoomsButtonBox = QtWidgets.QDialogButtonBox()
RoomsButtonBox.setOrientation(Qt.Horizontal)
RoomsButtonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel)
RoomsButtonBox.accepted.connect(RoomsDialog.accept)
RoomsButtonBox.rejected.connect(RoomsDialog.reject)
RoomsLayout.addWidget(RoomsButtonBox, 2, 0, 1, 1)
RoomsDialog.setLayout(RoomsLayout)
RoomsDialog.setModal(True)
RoomsDialog.setWindowFlags(RoomsDialog.windowFlags() & ~Qt.WindowContextHelpButtonHint)
RoomsDialog.show()
result = RoomsDialog.exec_()
if result == QtWidgets.QDialog.Accepted:
newRooms = utils.convertMultilineStringToList(RoomsTextbox.toPlainText())
newRooms = sorted(newRooms)
self.relistRoomList(newRooms)
def safenormcaseandpath(self, path):
if utils.isURL(path):
return path
else:
return os.path.normcase(os.path.normpath(path))
def _tryToFillPlayerPath(self, playerpath, playerpathlist):
settings = QSettings("Syncplay", "PlayerList")
settings.beginGroup("PlayerList")
savedPlayers = settings.value("PlayerList", [])
if not isinstance(savedPlayers, list):
savedPlayers = []
else:
for i, savedPlayer in enumerate(savedPlayers):
savedPlayers[i] = self.safenormcaseandpath(savedPlayer)
playerpathlist = list(set(playerpathlist + savedPlayers))
settings.endGroup()
foundpath = ""
if playerpath is not None and playerpath != "":
if utils.isURL(playerpath):
foundpath = playerpath
self.executablepathCombobox.addItem(foundpath)
else:
if not playerPathExists(playerpath):
expandedpath = PlayerFactory().getExpandedPlayerPathByPath(playerpath)
if expandedpath is not None and playerPathExists(expandedpath):
playerpath = expandedpath
elif "mpvnet.exe" in playerpath and playerPathExists(playerpath.replace("mpvnet.exe","mpvnet.com")):
self.executablepathCombobox.addItem(playerpath)
if playerPathExists(playerpath):
foundpath = playerpath
self.executablepathCombobox.addItem(foundpath)
for path in playerpathlist:
if utils.isURL(path):
if foundpath == "":
foundpath = path
if path != playerpath:
self.executablepathCombobox.addItem(path)
elif playerPathExists(path) and os.path.normcase(os.path.normpath(path)) != os.path.normcase(os.path.normpath(foundpath)):
self.executablepathCombobox.addItem(path)
if foundpath == "":
foundpath = path
if foundpath != "":
settings.beginGroup("PlayerList")
playerpathlist.append(self.safenormcaseandpath(foundpath))
settings.setValue("PlayerList", list(set(playerpathlist)))
settings.endGroup()
return foundpath
@QtCore.Slot(str, str)
def _updateExecutableIcon(self, iconpath, playerpath):
if iconpath is not None and iconpath != "":
if iconpath.endswith('.mng'):
movie = QtGui.QMovie(resourcespath + iconpath)
movie.setCacheMode(QtGui.QMovie.CacheMode.CacheAll)
self.executableiconLabel.setMovie(movie)
movie.start()
else:
self.executableiconImage.load(resourcespath + iconpath)
self.executableiconLabel.setPixmap(QtGui.QPixmap.fromImage(self.executableiconImage))
else:
self.executableiconLabel.setPixmap(QtGui.QPixmap.fromImage(QtGui.QImage()))
self.updatePlayerArguments(playerpath)
def updateExecutableIcon(self):
"""
Start getting the icon path in another thread, which will set the GUI
icon if valid.
This is performed outside the main thread because networked players may
take a long time to perform their checks and hang the GUI while doing
so.
"""
currentplayerpath = str(self.executablepathCombobox.currentText())
self._playerProbeThread.setPlayerPath(currentplayerpath)
def updatePlayerArguments(self, currentplayerpath):
argumentsForPath = utils.getPlayerArgumentsByPathAsText(self.perPlayerArgs, currentplayerpath)
self.playerargsTextbox.blockSignals(True)
self.playerargsTextbox.setText(argumentsForPath)
self.playerargsTextbox.blockSignals(False)
def changedPlayerArgs(self):
currentplayerpath = self.executablepathCombobox.currentText()
if currentplayerpath:
NewPlayerArgs = self.playerargsTextbox.text().split(" ") if self.playerargsTextbox.text() else ""
self.perPlayerArgs[self.executablepathCombobox.currentText()] = NewPlayerArgs
def languageChanged(self):
setLanguage(str(self.languageCombobox.itemData(self.languageCombobox.currentIndex())))
QtWidgets.QMessageBox.information(self, "Syncplay", getMessage("language-changed-msgbox-label"))
def browsePlayerpath(self):
options = QtWidgets.QFileDialog.Options()
defaultdirectory = ""
browserfilter = "All files (*)"
if os.name == 'nt':
browserfilter = "Executable files (*.exe);;All files (*)"
if "PROGRAMFILES(X86)" in os.environ:
defaultdirectory = os.environ["ProgramFiles(x86)"]
elif "PROGRAMFILES" in os.environ:
defaultdirectory = os.environ["ProgramFiles"]
elif "PROGRAMW6432" in os.environ:
defaultdirectory = os.environ["ProgramW6432"]
elif isLinux():
defaultdirectory = "/usr/bin"
elif isMacOS():
defaultdirectory = "/Applications/"
elif isBSD():
defaultdirectory = "/usr/local/bin"
fileName, filtr = QtWidgets.QFileDialog.getOpenFileName(
self,
"Browse for media player executable",
defaultdirectory,
browserfilter, "", options)
if fileName:
if isMacOS() and fileName.endswith('.app'): # see GitHub issue #91
# Mac OS X application bundles contain a Info.plist in the Contents subdirectory of the .app.
# This plist file includes the 'CFBundleExecutable' key, which specifies the name of the
# executable. I would have used plistlib here, but since the version of this library in
# py < 3.4 can't read from binary plist files it's pretty much useless. Therefore, let's
# play a game of "Guess my executable!"
# Step 1: get all the executable files. In a Mac OS X Application bundle, executables are stored
# inside <bundle root>/Contents/MacOS.
execPath = os.path.join(os.path.normpath(fileName), 'Contents', 'MacOS')
execFiles = []
for fn in os.listdir(execPath):
fn = os.path.join(execPath, fn)
if os.path.isfile(fn) and os.access(fn, os.X_OK):
execFiles.append(fn)
# Step 2: figure out which file name looks like the application name
baseAppName = os.path.basename(fileName).replace('.app', '').lower()
foundExe = False
for fn in execFiles:
baseExecName = os.path.basename(fn).lower()
if baseAppName == baseExecName:
fileName = fn
foundExe = True
break
# Step 3: use the first executable in the list if no executable was found
try:
if not foundExe:
fileName = execFiles[0]
except IndexError: # whoops, looks like this .app doesn't contain a executable file at all
pass
self.executablepathCombobox.setEditText(os.path.normpath(fileName))
def loadLastUpdateCheckDate(self):
settings = QSettings("Syncplay", "Interface")
settings.beginGroup("Update")
try:
self.lastCheckedForUpdates = settings.value("lastCheckedQt", None)
if self.lastCheckedForUpdates:
if self.config["lastCheckedForUpdates"] != None and self.config["lastCheckedForUpdates"] != "":
if self.lastCheckedForUpdates.toPython() > datetime.strptime(self.config["lastCheckedForUpdates"], "%Y-%m-%d %H:%M:%S.%f"):
self.config["lastCheckedForUpdates"] = self.lastCheckedForUpdates.toString("yyyy-MM-d HH:mm:ss.z")
else:
self.config["lastCheckedForUpdates"] = self.lastCheckedForUpdates.toString("yyyy-MM-d HH:mm:ss.z")
except:
self.config["lastCheckedForUpdates"] = None
def loadSavedPublicServerList(self):
settings = QSettings("Syncplay", "Interface")
settings.beginGroup("PublicServerList")
self.publicServers = settings.value("publicServers", None)
def loadMediaBrowseSettings(self):
settings = QSettings("Syncplay", "MediaBrowseDialog")
settings.beginGroup("MediaBrowseDialog")
self.mediadirectory = settings.value("mediadir", "")
settings.endGroup()
def saveMediaBrowseSettings(self):
settings = QSettings("Syncplay", "MediaBrowseDialog")
settings.beginGroup("MediaBrowseDialog")
settings.setValue("mediadir", self.mediadirectory)
settings.endGroup()
def getMoreState(self):
settings = QSettings("Syncplay", "MoreSettings")
settings.beginGroup("MoreSettings")
morestate = str.lower(str(settings.value("ShowMoreSettings", "false")))
settings.endGroup()
if morestate == "true":
return True
else:
return False
def saveMoreState(self, morestate):
settings = QSettings("Syncplay", "MoreSettings")
settings.beginGroup("MoreSettings")
settings.setValue("ShowMoreSettings", morestate)
settings.endGroup()
def updateServerList(self):
try:
servers = utils.getListOfPublicServers()
except IOError as e:
self.showErrorMessage(e.args[0])
return
currentServer = self.hostCombobox.currentText()
self.hostCombobox.clear()
if servers:
i = 0
for server in servers:
self.hostCombobox.addItem(server[1])
self.hostCombobox.setItemData(i, server[0], Qt.ToolTipRole)
i += 1
settings = QSettings("Syncplay", "Interface")
settings.beginGroup("PublicServerList")
settings.setValue("publicServers", servers)
self.hostCombobox.setEditText(currentServer)
def fillRoomsCombobox(self):
previousRoomSelection = self.roomsCombobox.currentText()
self.roomsCombobox.clear()
for roomListValue in self.config['roomList']:
self.roomsCombobox.addItem(roomListValue)
self.roomsCombobox.setEditText(previousRoomSelection)
def relistRoomList(self, newRooms):
filteredNewRooms = [room for room in newRooms if room and not room.isspace()]
self.config['roomList'] = filteredNewRooms
self.fillRoomsCombobox()
def addRoomToList(self, newRoom=None):
if newRoom is None:
newRoom = self.roomsCombobox.currentText()
if not newRoom:
return
roomList = self.config['roomList']
if newRoom not in roomList:
roomList.append(newRoom)
roomList = sorted(roomList)
self.config['roomList'] = roomList
def showErrorMessage(self, errorMessage):
QtWidgets.QMessageBox.warning(self, "Syncplay", errorMessage)
def browseMediapath(self):
self.loadMediaBrowseSettings()
options = QtWidgets.QFileDialog.Options()
if IsPySide:
if self.config["mediaSearchDirectories"] and os.path.isdir(self.config["mediaSearchDirectories"][0]):
defaultdirectory = self.config["mediaSearchDirectories"][0]
elif os.path.isdir(self.mediadirectory):
defaultdirectory = self.mediadirectory
elif os.path.isdir(QDesktopServices.storageLocation(QDesktopServices.MoviesLocation)):
defaultdirectory = QDesktopServices.storageLocation(QDesktopServices.MoviesLocation)
elif os.path.isdir(QDesktopServices.storageLocation(QDesktopServices.HomeLocation)):
defaultdirectory = QDesktopServices.storageLocation(QDesktopServices.HomeLocation)
else:
defaultdirectory = ""
elif IsPySide2:
if self.config["mediaSearchDirectories"] and os.path.isdir(self.config["mediaSearchDirectories"][0]):
defaultdirectory = self.config["mediaSearchDirectories"][0]
elif os.path.isdir(self.mediadirectory):
defaultdirectory = self.mediadirectory
elif os.path.isdir(QStandardPaths.standardLocations(QStandardPaths.MoviesLocation)[0]):
defaultdirectory = QStandardPaths.standardLocations(QStandardPaths.MoviesLocation)[0]
elif os.path.isdir(QStandardPaths.standardLocations(QStandardPaths.HomeLocation)[0]):
defaultdirectory = QStandardPaths.standardLocations(QStandardPaths.HomeLocation)[0]
else:
defaultdirectory = ""
browserfilter = "All files (*)"
fileName, filtr = QtWidgets.QFileDialog.getOpenFileName(
self, "Browse for media files", defaultdirectory,
browserfilter, "", options)
if fileName:
self.mediapathTextbox.setText(os.path.normpath(fileName))
self.mediadirectory = os.path.dirname(fileName)
self.saveMediaBrowseSettings()
def _runWithoutStoringConfig(self):
self._saveDataAndLeave(False)
def _saveDataAndLeave(self, storeConfiguration=True):
self.config['noStore'] = not storeConfiguration
if storeConfiguration:
self.automaticUpdatePromptCheck()
self.loadLastUpdateCheckDate()
self.config["perPlayerArguments"] = self.perPlayerArgs
self.config["mediaSearchDirectories"] = utils.convertMultilineStringToList(self.mediasearchTextEdit.toPlainText())
self.config["trustedDomains"] = utils.convertMultilineStringToList(self.trusteddomainsTextEdit.toPlainText())
if self.serverpassTextbox.isEnabled():
self.config['password'] = self.serverpassTextbox.text()
self.processWidget(self, lambda w: self.saveValues(w))
if self.hostCombobox.currentText():
self.config['host'] = self.hostCombobox.currentText() if ":" in self.hostCombobox.currentText() else self.hostCombobox.currentText() + ":" + str(constants.DEFAULT_PORT)
self.config['host'] = self.config['host'].replace(" ", "").replace("\t", "").replace("\n", "").replace("\r", "")
else:
self.config['host'] = None
self.config['playerPath'] = str(self.safenormcaseandpath(self.executablepathCombobox.currentText()))
self.config['language'] = str(self.languageCombobox.itemData(self.languageCombobox.currentIndex()))
if self.mediapathTextbox.text() == "":
self.config['file'] = None
elif os.path.isfile(os.path.abspath(self.mediapathTextbox.text())):
self.config['file'] = os.path.abspath(self.mediapathTextbox.text())
else:
self.config['file'] = str(self.mediapathTextbox.text())
self.config['publicServers'] = self.publicServerAddresses
self.config['room'] = self.roomsCombobox.currentText()
if self.config['autosaveJoinsToList']:
self.addRoomToList(self.config['room'])
self.pressedclosebutton = False
self.close()
self.closed.emit()
def closeEvent(self, event):
if self.pressedclosebutton:
super(ConfigDialog, self).closeEvent(event)
self.closed.emit()
sys.exit()
def keyPressEvent(self, event):
if event.key() == Qt.Key_Escape:
sys.exit()
def dragEnterEvent(self, event):
data = event.mimeData()
urls = data.urls()
if urls and urls[0].scheme() == 'file':
event.acceptProposedAction()
def dropEvent(self, event):
data = event.mimeData()
urls = data.urls()
if urls and urls[0].scheme() == 'file':
dropfilepath = os.path.abspath(str(event.mimeData().urls()[0].toLocalFile()))
if dropfilepath[-4:].lower() == ".exe":
self.executablepathCombobox.setEditText(dropfilepath)
else:
self.mediapathTextbox.setText(dropfilepath)
self.mediapathTextbox.show()
self.mediapathLabel.show()
self.mediabrowseButton.show()
if not self.showmoreCheckbox.isChecked():
newHeight = self.connectionSettingsGroup.minimumSizeHint().height() + self.mediaplayerSettingsGroup.minimumSizeHint().height() + self.bottomButtonFrame.minimumSizeHint().height() + 3
if self.error:
newHeight += self.errorLabel.height() + 3
self.stackedFrame.setFixedHeight(newHeight)
self.adjustSize()
self.setFixedSize(self.sizeHint())
def processWidget(self, container, torun):
for widget in container.children():
self.processWidget(widget, torun)
if hasattr(widget, 'objectName') and widget.objectName() and widget.objectName()[:3] != "qt_":
torun(widget)
def loadTooltips(self, widget):
tooltipName = widget.objectName().lower().split(constants.CONFIG_NAME_MARKER)[0] + "-tooltip"
if tooltipName[:1] == constants.INVERTED_STATE_MARKER or tooltipName[:1] == constants.LOAD_SAVE_MANUALLY_MARKER:
tooltipName = tooltipName[1:]
widget.setToolTip(getMessage(tooltipName))
def loadValues(self, widget):
valueName = str(widget.objectName())
if valueName[:1] == constants.LOAD_SAVE_MANUALLY_MARKER:
return
if isinstance(widget, QCheckBox) and widget.objectName():
if valueName[:1] == constants.INVERTED_STATE_MARKER:
valueName = valueName[1:]
inverted = True
else:
inverted = False
if self.config[valueName] is None:
widget.setTristate(True)
widget.setCheckState(Qt.PartiallyChecked)
widget.stateChanged.connect(lambda: widget.setTristate(False))
else:
widget.setChecked(self.config[valueName] != inverted)
elif isinstance(widget, QRadioButton):
radioName, radioValue = valueName.split(constants.CONFIG_NAME_MARKER)[1].split(constants.CONFIG_VALUE_MARKER)
if self.config[radioName] == radioValue:
widget.setChecked(True)
elif isinstance(widget, QLineEdit):
widget.setText(self.config[valueName])
def saveValues(self, widget):
valueName = str(widget.objectName())
if valueName[:1] == constants.LOAD_SAVE_MANUALLY_MARKER:
return
if isinstance(widget, QCheckBox) and widget.objectName():
if widget.checkState() == Qt.PartiallyChecked:
self.config[valueName] = None
else:
if valueName[:1] == constants.INVERTED_STATE_MARKER:
valueName = valueName[1:]
inverted = True
else:
inverted = False
self.config[valueName] = widget.isChecked() != inverted
elif isinstance(widget, QRadioButton):
radioName, radioValue = valueName.split(constants.CONFIG_NAME_MARKER)[1].split(constants.CONFIG_VALUE_MARKER)
if widget.isChecked():
self.config[radioName] = radioValue
elif isinstance(widget, QLineEdit):
self.config[valueName] = widget.text()
def connectChildren(self, widget):
widgetName = str(widget.objectName())
if widgetName in self.subitems:
widget.stateChanged.connect(lambda: self.updateSubwidgets(self, widget))
self.updateSubwidgets(self, widget)
def updateSubwidgets(self, container, parentwidget, subwidgets=None):
widgetName = parentwidget.objectName()
if not subwidgets:
subwidgets = self.subitems[widgetName]
for widget in container.children():
self.updateSubwidgets(widget, parentwidget, subwidgets)
if hasattr(widget, 'objectName') and widget.objectName() and widget.objectName() in subwidgets:
widget.setDisabled(not parentwidget.isChecked())
def addBasicTab(self):
config = self.config
playerpaths = self.playerpaths
error = self.error
if self.datacleared == True:
error = constants.ERROR_MESSAGE_MARKER + "{}".format(getMessage("gui-data-cleared-notification"))
self.error = error
if config['host'] is None:
host = ""
elif ":" in config['host'] and '[' not in config['host']:
host = config['host']
else:
host = config['host'] + ":" + str(config['port'])
self.perPlayerArgs = self.config["perPlayerArguments"]
self.mediaSearchDirectories = self.config["mediaSearchDirectories"]
self.trustedDomains = self.config["trustedDomains"]
self.connectionSettingsGroup = QtWidgets.QGroupBox(getMessage("connection-group-title"))
self.loadSavedPublicServerList()
self.hostCombobox = QtWidgets.QComboBox(self)
if self.publicServers:
i = 0
for publicServer in self.publicServers:
serverTitle = publicServer[0]
serverAddressPort = publicServer[1]
self.hostCombobox.addItem(serverAddressPort)
self.hostCombobox.setItemData(i, serverTitle, Qt.ToolTipRole)
if serverAddressPort not in self.publicServerAddresses:
self.publicServerAddresses.append(serverAddressPort)
i += 1
self.hostCombobox.setEditable(True)
self.hostCombobox.setEditText(host)
self.hostLabel = QLabel(getMessage("host-label"), self)
self.usernameTextbox = QLineEdit(self)
self.usernameTextbox.setObjectName("name")
self.serverpassLabel = QLabel(getMessage("password-label"), self)
self.roomsCombobox = QtWidgets.QComboBox(self)
self.roomsCombobox.setEditable(True)
caseSensitiveCompleter = QtWidgets.QCompleter("", self)
caseSensitiveCompleter.setCaseSensitivity(Qt.CaseSensitive)
self.roomsCombobox.setCompleter(caseSensitiveCompleter)
self.fillRoomsCombobox()
self.roomsCombobox.setEditText(config['room'])
self.usernameLabel = QLabel(getMessage("name-label"), self)
self.serverpassTextbox = QLineEdit(self)
self.serverpassTextbox.setText(self.storedPassword)
self.defaultroomLabel = QLabel(getMessage("room-label"), self)
self.editRoomsButton = QtWidgets.QToolButton()
self.editRoomsButton.setIcon(QtGui.QIcon(resourcespath + 'bullet_edit_centered.png'))
self.editRoomsButton.setObjectName(constants.LOAD_SAVE_MANUALLY_MARKER + "edit-rooms")
self.editRoomsButton.released.connect(self.openRoomsDialog)
self.hostLabel.setObjectName("host")
self.hostCombobox.setObjectName(constants.LOAD_SAVE_MANUALLY_MARKER + "host")
self.usernameLabel.setObjectName("name")
self.usernameTextbox.setObjectName("name")
self.serverpassLabel.setObjectName("password")
self.serverpassTextbox.setObjectName(constants.LOAD_SAVE_MANUALLY_MARKER + "password")
self.hostCombobox.editTextChanged.connect(self.updatePasswordVisibilty)
self.hostCombobox.currentIndexChanged.connect(self.updatePasswordVisibilty)
self.defaultroomLabel.setObjectName("room")
self.roomsCombobox.setObjectName("room")
self.connectionSettingsLayout = QtWidgets.QGridLayout()
self.connectionSettingsLayout.addWidget(self.hostLabel, 0, 0)
self.connectionSettingsLayout.addWidget(self.hostCombobox, 0, 1)
self.connectionSettingsLayout.addWidget(self.serverpassLabel, 1, 0)
self.connectionSettingsLayout.addWidget(self.serverpassTextbox, 1, 1)
self.connectionSettingsLayout.addWidget(self.usernameLabel, 2, 0)
self.connectionSettingsLayout.addWidget(self.usernameTextbox, 2, 1)
self.connectionSettingsLayout.addWidget(self.defaultroomLabel, 3, 0)
self.connectionSettingsLayout.addWidget(self.editRoomsButton, 3, 2, Qt.AlignRight)
self.connectionSettingsLayout.addWidget(self.roomsCombobox, 3, 1)
self.connectionSettingsLayout.setSpacing(10)
self.connectionSettingsGroup.setLayout(self.connectionSettingsLayout)
if isMacOS():
self.connectionSettingsGroup.setFixedHeight(self.connectionSettingsGroup.minimumSizeHint().height())
else:
self.connectionSettingsGroup.setMaximumHeight(self.connectionSettingsGroup.minimumSizeHint().height())
self.playerargsTextbox = QLineEdit("", self)
self.playerargsTextbox.textEdited.connect(self.changedPlayerArgs)
self.playerargsLabel = QLabel(getMessage("player-arguments-label"), self)
self.mediaplayerSettingsGroup = QtWidgets.QGroupBox(getMessage("media-setting-title"))
self.executableiconImage = QtGui.QImage()
self.executableiconLabel = QLabel(self)
self.executableiconLabel.setFixedWidth(16)
self.executableiconLabel.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.executablepathCombobox = QtWidgets.QComboBox(self)
self.executablepathCombobox.setEditable(True)
self.executablepathCombobox.currentIndexChanged.connect(self.updateExecutableIcon)
self.executablepathCombobox.setEditText(self._tryToFillPlayerPath(config['playerPath'], playerpaths))
self.executablepathCombobox.editTextChanged.connect(self.updateExecutableIcon)
self.executablepathLabel = QLabel(getMessage("executable-path-label"), self)
self.executablebrowseButton = QtWidgets.QPushButton(QtGui.QIcon(resourcespath + 'folder_explore.png'), getMessage("browse-label"))
self.executablebrowseButton.clicked.connect(self.browsePlayerpath)
self.mediapathTextbox = QLineEdit(config['file'], self)
self.mediapathLabel = QLabel(getMessage("media-path-label"), self)
self.mediabrowseButton = QtWidgets.QPushButton(QtGui.QIcon(resourcespath + 'folder_explore.png'), getMessage("browse-label"))
self.mediabrowseButton.clicked.connect(self.browseMediapath)
self.executablepathLabel.setObjectName("executable-path")
self.executablepathCombobox.setObjectName("executable-path")
self.executablepathCombobox.setMinimumContentsLength(constants.EXECUTABLE_COMBOBOX_MINIMUM_LENGTH)
self.executablepathCombobox.setSizeAdjustPolicy(QtWidgets.QComboBox.AdjustToMinimumContentsLength)
self.mediapathLabel.setObjectName("media-path")
self.mediapathTextbox.setObjectName(constants.LOAD_SAVE_MANUALLY_MARKER + "media-path")
self.playerargsLabel.setObjectName("player-arguments")
self.playerargsTextbox.setObjectName(constants.LOAD_SAVE_MANUALLY_MARKER + "player-arguments")
self.mediaplayerSettingsLayout = QtWidgets.QGridLayout()
self.mediaplayerSettingsLayout.addWidget(self.executablepathLabel, 0, 0, 1, 1)
self.mediaplayerSettingsLayout.addWidget(self.executableiconLabel, 0, 1, 1, 1)
self.mediaplayerSettingsLayout.addWidget(self.executablepathCombobox, 0, 2, 1, 1)
self.mediaplayerSettingsLayout.addWidget(self.executablebrowseButton, 0, 3, 1, 1)
self.mediaplayerSettingsLayout.addWidget(self.mediapathLabel, 1, 0, 1, 2)
self.mediaplayerSettingsLayout.addWidget(self.mediapathTextbox, 1, 2, 1, 1)
self.mediaplayerSettingsLayout.addWidget(self.mediabrowseButton, 1, 3, 1, 1)
self.mediaplayerSettingsLayout.addWidget(self.playerargsLabel, 2, 0, 1, 2)
self.mediaplayerSettingsLayout.addWidget(self.playerargsTextbox, 2, 2, 1, 2)
self.mediaplayerSettingsLayout.setSpacing(10)
self.mediaplayerSettingsGroup.setLayout(self.mediaplayerSettingsLayout)
iconWidth = self.executableiconLabel.minimumSize().width()+self.mediaplayerSettingsLayout.spacing()
maxWidth = max(
self.hostLabel.minimumSizeHint().width(),
self.usernameLabel.minimumSizeHint().width(),
self.serverpassLabel.minimumSizeHint().width(),
self.defaultroomLabel.minimumSizeHint().width(),
self.executablepathLabel.minimumSizeHint().width(),
self.mediapathLabel.minimumSizeHint().width(),
self.playerargsLabel.minimumSizeHint().width()
)
self.hostLabel.setMinimumWidth(maxWidth+iconWidth)
self.usernameLabel.setMinimumWidth(maxWidth+iconWidth)
self.serverpassLabel.setMinimumWidth(maxWidth+iconWidth)
self.defaultroomLabel.setMinimumWidth(maxWidth+iconWidth)
self.executablepathLabel.setMinimumWidth(maxWidth)
self.mediapathLabel.setMinimumWidth(maxWidth+iconWidth)
self.playerargsLabel.setMinimumWidth(maxWidth+iconWidth)
self.showmoreCheckbox = QCheckBox(getMessage("more-title"))
self.showmoreCheckbox.setObjectName(constants.LOAD_SAVE_MANUALLY_MARKER + "more")
self.basicOptionsFrame = QtWidgets.QFrame()
self.basicOptionsLayout = QtWidgets.QVBoxLayout()
if error:
error = str(error)
self.errorLabel = QLabel(self)
if error[:1] != constants.ERROR_MESSAGE_MARKER:
self.errorLabel.setStyleSheet(constants.STYLE_ERRORLABEL)
else:
error = error[1:]
self.errorLabel.setStyleSheet(constants.STYLE_SUCCESSLABEL)
self.errorLabel.setText(error)
self.errorLabel.setAlignment(Qt.AlignCenter)
self.basicOptionsLayout.addWidget(self.errorLabel)
self.connectionSettingsGroup.setMaximumHeight(self.connectionSettingsGroup.minimumSizeHint().height())
self.basicOptionsLayout.setAlignment(Qt.AlignTop)
self.basicOptionsLayout.addWidget(self.connectionSettingsGroup)
self.basicOptionsLayout.addSpacing(5)
self.basicOptionsLayout.addWidget(self.mediaplayerSettingsGroup)
self.basicOptionsFrame.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
self.basicOptionsFrame.setLayout(self.basicOptionsLayout)
self.stackedLayout.addWidget(self.basicOptionsFrame)
def addReadinessTab(self):
self.readyFrame = QtWidgets.QFrame()
self.readyLayout = QtWidgets.QVBoxLayout()
self.readyFrame.setLayout(self.readyLayout)
# Initial state
self.readyInitialGroup = QtWidgets.QGroupBox(getMessage("readiness-title"))
self.readyInitialLayout = QtWidgets.QVBoxLayout()
self.readyInitialGroup.setLayout(self.readyInitialLayout)
self.readyatstartCheckbox = QCheckBox(getMessage("readyatstart-label"))
self.readyatstartCheckbox.setObjectName("readyAtStart")
self.readyInitialLayout.addWidget(self.readyatstartCheckbox)
self.readyLayout.addWidget(self.readyInitialGroup)
# Automatically pausing
self.readyPauseGroup = QtWidgets.QGroupBox(getMessage("pausing-title"))
self.readyPauseLayout = QtWidgets.QVBoxLayout()
self.readyPauseGroup.setLayout(self.readyPauseLayout)
self.pauseonleaveCheckbox = QCheckBox(getMessage("pauseonleave-label"))
self.pauseonleaveCheckbox.setObjectName("pauseOnLeave")
self.readyPauseLayout.addWidget(self.pauseonleaveCheckbox)
self.readyLayout.addWidget(self.readyPauseGroup)
# Unpausing
self.readyUnpauseGroup = QtWidgets.QGroupBox(getMessage("unpause-title"))
self.readyUnpauseLayout = QtWidgets.QVBoxLayout()
self.readyUnpauseGroup.setLayout(self.readyUnpauseLayout)
self.readyUnpauseButtonGroup = QButtonGroup()
self.unpauseIfAlreadyReadyOption = QRadioButton(getMessage("unpause-ifalreadyready-option"))
self.readyUnpauseButtonGroup.addButton(self.unpauseIfAlreadyReadyOption)
self.unpauseIfAlreadyReadyOption.setStyleSheet(constants.STYLE_SUBCHECKBOX.format(posixresourcespath + "chevrons_right.png"))
self.unpauseIfAlreadyReadyOption.setObjectName("unpause-ifalreadyready" + constants.CONFIG_NAME_MARKER + "unpauseAction" + constants.CONFIG_VALUE_MARKER + constants.UNPAUSE_IFALREADYREADY_MODE)
self.readyUnpauseLayout.addWidget(self.unpauseIfAlreadyReadyOption)
self.unpauseIfOthersReadyOption = QRadioButton(getMessage("unpause-ifothersready-option"))
self.readyUnpauseButtonGroup.addButton(self.unpauseIfOthersReadyOption)
self.unpauseIfOthersReadyOption.setStyleSheet(constants.STYLE_SUBCHECKBOX.format(posixresourcespath + "chevrons_right.png"))
self.unpauseIfOthersReadyOption.setObjectName("unpause-ifothersready" + constants.CONFIG_NAME_MARKER + "unpauseAction" + constants.CONFIG_VALUE_MARKER + constants.UNPAUSE_IFOTHERSREADY_MODE)
self.readyUnpauseLayout.addWidget(self.unpauseIfOthersReadyOption)
self.unpauseIfMinUsersReadyOption = QRadioButton(getMessage("unpause-ifminusersready-option"))
self.readyUnpauseButtonGroup.addButton(self.unpauseIfMinUsersReadyOption)
self.unpauseIfMinUsersReadyOption.setStyleSheet(constants.STYLE_SUBCHECKBOX.format(posixresourcespath + "chevrons_right.png"))
self.unpauseIfMinUsersReadyOption.setObjectName("unpause-ifminusersready" + constants.CONFIG_NAME_MARKER + "unpauseAction" + constants.CONFIG_VALUE_MARKER + constants.UNPAUSE_IFMINUSERSREADY_MODE)
self.readyUnpauseLayout.addWidget(self.unpauseIfMinUsersReadyOption)
self.unpauseAlwaysUnpauseOption = QRadioButton(getMessage("unpause-always"))
self.readyUnpauseButtonGroup.addButton(self.unpauseAlwaysUnpauseOption)
self.unpauseAlwaysUnpauseOption.setStyleSheet(constants.STYLE_SUBCHECKBOX.format(posixresourcespath + "chevrons_right.png"))
self.unpauseAlwaysUnpauseOption.setObjectName("unpause-always" + constants.CONFIG_NAME_MARKER + "unpauseAction" + constants.CONFIG_VALUE_MARKER + constants.UNPAUSE_ALWAYS_MODE)
self.readyUnpauseLayout.addWidget(self.unpauseAlwaysUnpauseOption)
self.readyLayout.addWidget(self.readyUnpauseGroup)
self.readyLayout.setAlignment(Qt.AlignTop)
self.stackedLayout.addWidget(self.readyFrame)
def addMiscTab(self):
self.miscFrame = QtWidgets.QFrame()
self.miscLayout = QtWidgets.QVBoxLayout()
self.miscFrame.setLayout(self.miscLayout)
self.coreSettingsGroup = QtWidgets.QGroupBox(getMessage("core-behaviour-title"))
self.coreSettingsLayout = QtWidgets.QGridLayout()
self.coreSettingsGroup.setLayout(self.coreSettingsLayout)
### Privacy:
self.filenameprivacyLabel = QLabel(getMessage("filename-privacy-label"), self)
self.filenameprivacyButtonGroup = QButtonGroup()
self.filenameprivacySendRawOption = QRadioButton(getMessage("privacy-sendraw-option"))
self.filenameprivacySendHashedOption = QRadioButton(getMessage("privacy-sendhashed-option"))
self.filenameprivacyDontSendOption = QRadioButton(getMessage("privacy-dontsend-option"))
self.filenameprivacyButtonGroup.addButton(self.filenameprivacySendRawOption)
self.filenameprivacyButtonGroup.addButton(self.filenameprivacySendHashedOption)
self.filenameprivacyButtonGroup.addButton(self.filenameprivacyDontSendOption)
self.filesizeprivacyLabel = QLabel(getMessage("filesize-privacy-label"), self)
self.filesizeprivacyButtonGroup = QButtonGroup()
self.filesizeprivacySendRawOption = QRadioButton(getMessage("privacy-sendraw-option"))
self.filesizeprivacySendHashedOption = QRadioButton(getMessage("privacy-sendhashed-option"))
self.filesizeprivacyDontSendOption = QRadioButton(getMessage("privacy-dontsend-option"))
self.filesizeprivacyButtonGroup.addButton(self.filesizeprivacySendRawOption)
self.filesizeprivacyButtonGroup.addButton(self.filesizeprivacySendHashedOption)
self.filesizeprivacyButtonGroup.addButton(self.filesizeprivacyDontSendOption)
self.filenameprivacyLabel.setObjectName("filename-privacy")
self.filenameprivacySendRawOption.setObjectName("privacy-sendraw" + constants.CONFIG_NAME_MARKER + "filenamePrivacyMode" + constants.CONFIG_VALUE_MARKER + constants.PRIVACY_SENDRAW_MODE)
self.filenameprivacySendHashedOption.setObjectName("privacy-sendhashed" + constants.CONFIG_NAME_MARKER + "filenamePrivacyMode" + constants.CONFIG_VALUE_MARKER + constants.PRIVACY_SENDHASHED_MODE)
self.filenameprivacyDontSendOption.setObjectName("privacy-dontsend" + constants.CONFIG_NAME_MARKER + "filenamePrivacyMode" + constants.CONFIG_VALUE_MARKER + constants.PRIVACY_DONTSEND_MODE)
self.filesizeprivacyLabel.setObjectName("filesize-privacy")
self.filesizeprivacySendRawOption.setObjectName("privacy-sendraw" + constants.CONFIG_NAME_MARKER + "filesizePrivacyMode" + constants.CONFIG_VALUE_MARKER + constants.PRIVACY_SENDRAW_MODE)
self.filesizeprivacySendHashedOption.setObjectName("privacy-sendhashed" + constants.CONFIG_NAME_MARKER + "filesizePrivacyMode" + constants.CONFIG_VALUE_MARKER + constants.PRIVACY_SENDHASHED_MODE)
self.filesizeprivacyDontSendOption.setObjectName("privacy-dontsend" + constants.CONFIG_NAME_MARKER + "filesizePrivacyMode" + constants.CONFIG_VALUE_MARKER + constants.PRIVACY_DONTSEND_MODE)
self.coreSettingsLayout.addWidget(self.filenameprivacyLabel, 3, 0)
self.coreSettingsLayout.addWidget(self.filenameprivacySendRawOption, 3, 1, Qt.AlignLeft)
self.coreSettingsLayout.addWidget(self.filenameprivacySendHashedOption, 3, 2, Qt.AlignLeft)
self.coreSettingsLayout.addWidget(self.filenameprivacyDontSendOption, 3, 3, Qt.AlignLeft)
self.coreSettingsLayout.addWidget(self.filesizeprivacyLabel, 4, 0)
self.coreSettingsLayout.addWidget(self.filesizeprivacySendRawOption, 4, 1, Qt.AlignLeft)
self.coreSettingsLayout.addWidget(self.filesizeprivacySendHashedOption, 4, 2, Qt.AlignLeft)
self.coreSettingsLayout.addWidget(self.filesizeprivacyDontSendOption, 4, 3, Qt.AlignLeft)
## Syncplay internals
self.internalSettingsGroup = QtWidgets.QGroupBox(getMessage("syncplay-internals-title"))
self.internalSettingsLayout = QtWidgets.QVBoxLayout()
self.internalSettingsGroup.setLayout(self.internalSettingsLayout)
self.alwaysshowCheckbox = QCheckBox(getMessage("forceguiprompt-label"))
self.alwaysshowCheckbox.setObjectName(constants.INVERTED_STATE_MARKER + "forceGuiPrompt")
self.internalSettingsLayout.addWidget(self.alwaysshowCheckbox)
self.automaticupdatesCheckbox = QCheckBox(getMessage("checkforupdatesautomatically-label"))
self.automaticupdatesCheckbox.setObjectName("checkForUpdatesAutomatically")
self.internalSettingsLayout.addWidget(self.automaticupdatesCheckbox)
self.autosaveJoinsToListCheckbox = QCheckBox(getMessage("autosavejoinstolist-label"))
self.autosaveJoinsToListCheckbox.setObjectName("autosaveJoinsToList")
self.internalSettingsLayout.addWidget(self.autosaveJoinsToListCheckbox)
## Media path directories
self.mediasearchSettingsGroup = QtWidgets.QGroupBox(getMessage("syncplay-mediasearchdirectories-title"))
self.mediasearchSettingsLayout = QtWidgets.QVBoxLayout()
self.mediasearchSettingsGroup.setLayout(self.mediasearchSettingsLayout)
self.mediasearchTextEdit = QPlainTextEdit(utils.getListAsMultilineString(self.mediaSearchDirectories))
self.mediasearchTextEdit.setObjectName(constants.LOAD_SAVE_MANUALLY_MARKER + "mediasearcdirectories-arguments")
self.mediasearchTextEdit.setLineWrapMode(QtWidgets.QPlainTextEdit.NoWrap)
self.mediasearchSettingsLayout.addWidget(self.mediasearchTextEdit)
self.mediasearchSettingsGroup.setMaximumHeight(self.mediasearchSettingsGroup.minimumSizeHint().height())
self.miscLayout.addWidget(self.coreSettingsGroup)
self.miscLayout.addWidget(self.internalSettingsGroup)
self.miscLayout.addWidget(self.mediasearchSettingsGroup)
self.miscLayout.setAlignment(Qt.AlignTop)
self.stackedLayout.addWidget(self.miscFrame)
def addSyncTab(self):
self.syncSettingsFrame = QtWidgets.QFrame()
self.syncSettingsLayout = QtWidgets.QVBoxLayout()
self.desyncSettingsGroup = QtWidgets.QGroupBox(getMessage("sync-otherslagging-title"))
self.desyncOptionsFrame = QtWidgets.QFrame()
self.desyncSettingsOptionsLayout = QtWidgets.QHBoxLayout()
config = self.config
self.slowdownCheckbox = QCheckBox(getMessage("slowondesync-label"))
self.slowdownCheckbox.setObjectName("slowOnDesync")
self.rewindCheckbox = QCheckBox(getMessage("rewindondesync-label"))
self.rewindCheckbox.setObjectName("rewindOnDesync")
self.fastforwardCheckbox = QCheckBox(getMessage("fastforwardondesync-label"))
self.fastforwardCheckbox.setObjectName("fastforwardOnDesync")
self.desyncSettingsLayout = QtWidgets.QGridLayout()
self.desyncSettingsLayout.setSpacing(2)
self.desyncFrame = QtWidgets.QFrame()
self.desyncFrame.setLineWidth(0)
self.desyncFrame.setMidLineWidth(0)
self.desyncSettingsLayout.addWidget(self.slowdownCheckbox, 0, 0, 1, 2, Qt.AlignLeft)
self.desyncSettingsLayout.addWidget(self.rewindCheckbox, 1, 0, 1, 2, Qt.AlignLeft)
self.desyncSettingsLayout.setAlignment(Qt.AlignLeft)
self.desyncSettingsGroup.setLayout(self.desyncSettingsLayout)
self.desyncSettingsOptionsLayout.addWidget(self.desyncFrame)
self.desyncFrame.setLayout(self.syncSettingsLayout)
self.othersyncSettingsGroup = QtWidgets.QGroupBox(getMessage("sync-youlaggging-title"))
self.othersyncOptionsFrame = QtWidgets.QFrame()
self.othersyncSettingsLayout = QtWidgets.QGridLayout()
self.dontslowwithmeCheckbox = QCheckBox(getMessage("dontslowdownwithme-label"))
self.dontslowwithmeCheckbox.setObjectName("dontSlowDownWithMe")
self.othersyncSettingsLayout.addWidget(self.dontslowwithmeCheckbox, 2, 0, 1, 2, Qt.AlignLeft)
self.othersyncSettingsLayout.setAlignment(Qt.AlignLeft)
self.othersyncSettingsLayout.addWidget(self.fastforwardCheckbox, 3, 0, 1, 2, Qt.AlignLeft)
## Trusted domains
self.trusteddomainsSettingsGroup = QtWidgets.QGroupBox(getMessage("syncplay-trusteddomains-title"))
self.trusteddomainsSettingsLayout = QtWidgets.QVBoxLayout()
self.trusteddomainsSettingsGroup.setLayout(self.trusteddomainsSettingsLayout)
self.trusteddomainsTextEdit = QPlainTextEdit(utils.getListAsMultilineString(self.trustedDomains))
self.trusteddomainsTextEdit.setObjectName(constants.LOAD_SAVE_MANUALLY_MARKER + "trusteddomains-arguments")
self.trusteddomainsTextEdit.setLineWrapMode(QtWidgets.QPlainTextEdit.NoWrap)
self.trusteddomainsSettingsLayout.addWidget(self.trusteddomainsTextEdit)
self.trusteddomainsSettingsGroup.setMaximumHeight(self.trusteddomainsSettingsGroup.minimumSizeHint().height())
self.othersyncSettingsGroup.setLayout(self.othersyncSettingsLayout)
self.othersyncSettingsGroup.setMaximumHeight(self.othersyncSettingsGroup.minimumSizeHint().height())
self.syncSettingsLayout.addWidget(self.othersyncSettingsGroup)
self.syncSettingsLayout.addWidget(self.desyncSettingsGroup)
self.syncSettingsLayout.addWidget(self.trusteddomainsSettingsGroup)
self.syncSettingsFrame.setLayout(self.syncSettingsLayout)
self.desyncSettingsGroup.setMaximumHeight(self.desyncSettingsGroup.minimumSizeHint().height())
self.syncSettingsLayout.setAlignment(Qt.AlignTop)
self.stackedLayout.addWidget(self.syncSettingsFrame)
def addChatTab(self):
self.chatFrame = QtWidgets.QFrame()
self.chatLayout = QtWidgets.QVBoxLayout()
self.chatLayout.setAlignment(Qt.AlignTop)
# Input
self.chatInputGroup = QtWidgets.QGroupBox(getMessage("chat-title"))
self.chatInputLayout = QtWidgets.QGridLayout()
self.chatLayout.addWidget(self.chatInputGroup)
self.chatInputGroup.setLayout(self.chatInputLayout)
self.chatInputEnabledCheckbox = QCheckBox(getMessage("chatinputenabled-label"))
self.chatInputEnabledCheckbox.setObjectName("chatInputEnabled")
self.chatInputLayout.addWidget(self.chatInputEnabledCheckbox, 1, 0, 1, 1, Qt.AlignLeft)
self.chatDirectInputCheckbox = QCheckBox(getMessage("chatdirectinput-label"))
self.chatDirectInputCheckbox.setObjectName("chatDirectInput")
self.chatDirectInputCheckbox.setStyleSheet(
constants.STYLE_SUBCHECKBOX.format(self.posixresourcespath + "chevrons_right.png"))
self.chatInputLayout.addWidget(self.chatDirectInputCheckbox, 2, 0, 1, 1, Qt.AlignLeft)
self.inputFontLayout = QtWidgets.QHBoxLayout()
self.inputFontLayout.setContentsMargins(0, 0, 0, 0)
self.inputFontFrame = QtWidgets.QFrame()
self.inputFontFrame.setLayout(self.inputFontLayout)
self.inputFontFrame.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
self.chatFontLabel = QLabel(getMessage("chatinputfont-label"), self)
self.chatFontLabel.setStyleSheet(constants.STYLE_SUBCHECKBOX.format(self.posixresourcespath + "chevrons_right.png"))
self.chatFontLabel.setObjectName("font-label")
self.chatInputFontButton = QtWidgets.QPushButton(getMessage("chatfont-label"))
self.chatInputFontButton.setObjectName("set-input-font")
self.chatInputFontButtonGroup = QtWidgets.QButtonGroup()
self.chatInputFontButtonGroup.addButton(self.chatInputFontButton)
self.chatInputFontButton.released.connect(lambda: self.fontDialog("chatInput"))
self.chatInputColourButton = QtWidgets.QPushButton(getMessage("chatcolour-label"))
self.chatInputColourButton.setObjectName("set-input-colour")
self.chatInputColourButtonGroup = QtWidgets.QButtonGroup()
self.chatInputColourButtonGroup.addButton(self.chatInputColourButton)
self.chatInputColourButton.released.connect(lambda: self.colourDialog("chatInput"))
self.inputFontLayout.addWidget(self.chatFontLabel, Qt.AlignLeft)
self.inputFontLayout.addWidget(self.chatInputFontButton, Qt.AlignLeft)
self.inputFontLayout.addWidget(self.chatInputColourButton, Qt.AlignLeft)
self.chatInputLayout.addWidget(self.inputFontFrame, 3, 0, 1, 3, Qt.AlignLeft)
self.chatInputPositionFrame = QtWidgets.QFrame()
self.chatInputPositionLayout = QtWidgets.QHBoxLayout()
self.chatInputPositionLayout.setContentsMargins(0, 0, 0, 0)
self.chatInputPositionFrame.setLayout(self.chatInputPositionLayout)
self.chatInputPositionFrame.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
self.chatInputPositionLabel = QLabel(getMessage("chatinputposition-label"), self)
self.chatInputPositionLabel.setStyleSheet(constants.STYLE_SUBCHECKBOX.format(self.posixresourcespath + "chevrons_right.png"))
self.chatInputPositionGroup = QButtonGroup()
self.chatInputTopOption = QRadioButton(getMessage("chat-top-option"))
self.chatInputMiddleOption = QRadioButton(getMessage("chat-middle-option"))
self.chatInputBottomOption = QRadioButton(getMessage("chat-bottom-option"))
self.chatInputPositionGroup.addButton(self.chatInputTopOption)
self.chatInputPositionGroup.addButton(self.chatInputMiddleOption)
self.chatInputPositionGroup.addButton(self.chatInputBottomOption)
self.chatInputPositionLabel.setObjectName("chatinputposition")
self.chatInputTopOption.setObjectName("chatinputposition-top" + constants.CONFIG_NAME_MARKER + "chatInputPosition" + constants.CONFIG_VALUE_MARKER + constants.INPUT_POSITION_TOP)
self.chatInputMiddleOption.setObjectName("chatinputposition-middle" + constants.CONFIG_NAME_MARKER + "chatInputPosition" + constants.CONFIG_VALUE_MARKER + constants.INPUT_POSITION_MIDDLE)
self.chatInputBottomOption.setObjectName("chatinputposition-bottom" + constants.CONFIG_NAME_MARKER + "chatInputPosition" + constants.CONFIG_VALUE_MARKER + constants.INPUT_POSITION_BOTTOM)
self.chatInputPositionLayout.addWidget(self.chatInputPositionLabel)
self.chatInputPositionLayout.addWidget(self.chatInputTopOption)
self.chatInputPositionLayout.addWidget(self.chatInputMiddleOption)
self.chatInputPositionLayout.addWidget(self.chatInputBottomOption)
self.chatInputLayout.addWidget(self.chatInputPositionFrame)
self.subitems['chatInputEnabled'] = [self.chatInputPositionLabel.objectName(), self.chatInputTopOption.objectName(),
self.chatInputMiddleOption.objectName(), self.chatInputBottomOption.objectName(),
self.chatInputFontButton.objectName(), self.chatFontLabel.objectName(),
self.chatInputColourButton.objectName(), self.chatDirectInputCheckbox.objectName()]
# Output
self.chatOutputGroup = QtWidgets.QGroupBox(getMessage("chatoutputheader-label"))
self.chatOutputLayout = QtWidgets.QGridLayout()
self.chatLayout.addWidget(self.chatOutputGroup)
self.chatOutputGroup.setLayout(self.chatOutputLayout)
self.chatOutputEnabledCheckbox = QCheckBox(getMessage("chatoutputenabled-label"))
self.chatOutputEnabledCheckbox.setObjectName("chatOutputEnabled")
self.chatOutputLayout.addWidget(self.chatOutputEnabledCheckbox, 1, 0, 1, 1, Qt.AlignLeft)
self.outputFontLayout = QtWidgets.QHBoxLayout()
self.outputFontLayout.setContentsMargins(0, 0, 0, 0)
self.outputFontFrame = QtWidgets.QFrame()
self.outputFontFrame.setLayout(self.outputFontLayout)
self.outputFontFrame.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
self.chatOutputFontLabel = QLabel(getMessage("chatoutputfont-label"), self)
self.chatOutputFontLabel.setStyleSheet(constants.STYLE_SUBCHECKBOX.format(self.posixresourcespath + "chevrons_right.png"))
self.chatOutputFontLabel.setObjectName("font-output-label")
self.chatOutputFontButton = QtWidgets.QPushButton(getMessage("chatfont-label"))
self.chatOutputFontButton.setObjectName("set-output-font")
self.chatOutputFontButtonGroup = QtWidgets.QButtonGroup()
self.chatOutputFontButtonGroup.addButton(self.chatOutputFontButton)
self.chatOutputFontButton.released.connect(lambda: self.fontDialog("chatOutput"))
self.chatOutputColourButton = QtWidgets.QPushButton(getMessage("chatcolour-label"))
self.outputFontLayout.addWidget(self.chatOutputFontLabel, Qt.AlignLeft)
self.outputFontLayout.addWidget(self.chatOutputFontButton, Qt.AlignLeft)
self.chatOutputLayout.addWidget(self.outputFontFrame, 2, 0, 1, 3, Qt.AlignLeft)
self.chatOutputModeLabel = QLabel(getMessage("chatoutputposition-label"), self)
self.chatOutputModeLabel.setStyleSheet(constants.STYLE_SUBCHECKBOX.format(self.posixresourcespath + "chevrons_right.png"))
self.chatOutputModeGroup = QButtonGroup()
self.chatOutputChatroomOption = QRadioButton(getMessage("chat-chatroom-option"))
self.chatOutputScrollingOption = QRadioButton(getMessage("chat-scrolling-option"))
self.chatOutputModeGroup.addButton(self.chatOutputChatroomOption)
self.chatOutputModeGroup.addButton(self.chatOutputScrollingOption)
self.chatOutputModeLabel.setObjectName("chatoutputmode")
self.chatOutputChatroomOption.setObjectName("chatoutputmode-chatroom" + constants.CONFIG_NAME_MARKER + "chatOutputMode" + constants.CONFIG_VALUE_MARKER + constants.CHATROOM_MODE)
self.chatOutputScrollingOption.setObjectName("chatoutputmode-scrolling" + constants.CONFIG_NAME_MARKER + "chatOutputMode" + constants.CONFIG_VALUE_MARKER + constants.SCROLLING_MODE)
self.chatOutputModeFrame = QtWidgets.QFrame()
self.chatOutputModeLayout = QtWidgets.QHBoxLayout()
self.chatOutputModeLayout.setContentsMargins(0, 0, 0, 0)
self.chatOutputModeFrame.setLayout(self.chatOutputModeLayout)
self.chatOutputModeFrame.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
self.chatOutputModeLayout.addWidget(self.chatOutputModeLabel)
self.chatOutputModeLayout.addWidget(self.chatOutputChatroomOption)
self.chatOutputModeLayout.addWidget(self.chatOutputScrollingOption)
self.chatOutputLayout.addWidget(self.chatOutputModeFrame)
self.subitems['chatOutputEnabled'] = [self.chatOutputModeLabel.objectName(), self.chatOutputChatroomOption.objectName(),
self.chatOutputScrollingOption.objectName(), self.chatOutputFontButton.objectName(),
self.chatOutputFontLabel.objectName()]
# chatFrame
self.chatFrame.setLayout(self.chatLayout)
self.stackedLayout.addWidget(self.chatFrame)
def fontDialog(self, configName):
font = QtGui.QFont()
font.setFamily(self.config[configName + "FontFamily"])
font.setPointSize(self.config[configName + "RelativeFontSize"])
font.setWeight(self.config[configName + "FontWeight"])
font.setUnderline(self.config[configName + "FontUnderline"])
ok, value = QtWidgets.QFontDialog.getFont(font)
if ok:
self.config[configName + "FontFamily"] = value.family()
self.config[configName + "RelativeFontSize"] = value.pointSize()
self.config[configName + "FontWeight"] = value.weight()
self.config[configName + "FontUnderline"] = value.underline()
def colourDialog(self, configName):
oldColour = QtGui.QColor()
oldColour.setNamedColor(self.config[configName + "FontColor"])
colour = QtWidgets.QColorDialog.getColor(oldColour, self)
if colour.isValid():
self.config[configName + "FontColor"] = colour.name()
def addMessageTab(self):
self.messageFrame = QtWidgets.QFrame()
self.messageLayout = QtWidgets.QVBoxLayout()
self.messageLayout.setAlignment(Qt.AlignTop)
# OSD
self.osdSettingsGroup = QtWidgets.QGroupBox(getMessage("messages-osd-title"))
self.osdSettingsLayout = QtWidgets.QVBoxLayout()
self.osdSettingsFrame = QtWidgets.QFrame()
self.showOSDCheckbox = QCheckBox(getMessage("showosd-label"))
self.showOSDCheckbox.setObjectName("showOSD")
self.osdSettingsLayout.addWidget(self.showOSDCheckbox)
self.showSameRoomOSDCheckbox = QCheckBox(getMessage("showsameroomosd-label"))
self.showSameRoomOSDCheckbox.setObjectName("showSameRoomOSD")
self.showSameRoomOSDCheckbox.setStyleSheet(constants.STYLE_SUBCHECKBOX.format(posixresourcespath + "chevrons_right.png"))
self.osdSettingsLayout.addWidget(self.showSameRoomOSDCheckbox)
self.showNonControllerOSDCheckbox = QCheckBox(getMessage("shownoncontrollerosd-label"))
self.showNonControllerOSDCheckbox.setObjectName("showNonControllerOSD")
self.showNonControllerOSDCheckbox.setStyleSheet(constants.STYLE_SUBCHECKBOX.format(posixresourcespath + "chevrons_right.png"))
self.osdSettingsLayout.addWidget(self.showNonControllerOSDCheckbox)
self.showDifferentRoomOSDCheckbox = QCheckBox(getMessage("showdifferentroomosd-label"))
self.showDifferentRoomOSDCheckbox.setObjectName("showDifferentRoomOSD")
self.showDifferentRoomOSDCheckbox.setStyleSheet(constants.STYLE_SUBCHECKBOX.format(posixresourcespath + "chevrons_right.png"))
self.osdSettingsLayout.addWidget(self.showDifferentRoomOSDCheckbox)
self.slowdownOSDCheckbox = QCheckBox(getMessage("showslowdownosd-label"))
self.slowdownOSDCheckbox.setObjectName("showSlowdownOSD")
self.slowdownOSDCheckbox.setStyleSheet(constants.STYLE_SUBCHECKBOX.format(posixresourcespath + "chevrons_right.png"))
self.osdSettingsLayout.addWidget(self.slowdownOSDCheckbox)
self.showOSDWarningsCheckbox = QCheckBox(getMessage("showosdwarnings-label"))
self.showOSDWarningsCheckbox.setObjectName("showOSDWarnings")
self.showOSDWarningsCheckbox.setStyleSheet(constants.STYLE_SUBCHECKBOX.format(posixresourcespath + "chevrons_right.png"))
self.osdSettingsLayout.addWidget(self.showOSDWarningsCheckbox)
self.subitems['showOSD'] = ["showSameRoomOSD", "showDifferentRoomOSD", "showSlowdownOSD", "showOSDWarnings", "showNonControllerOSD"]
self.osdSettingsGroup.setLayout(self.osdSettingsLayout)
self.osdSettingsGroup.setMaximumHeight(self.osdSettingsGroup.minimumSizeHint().height())
self.osdSettingsLayout.setAlignment(Qt.AlignTop)
self.messageLayout.addWidget(self.osdSettingsGroup)
# Other display
self.displaySettingsGroup = QtWidgets.QGroupBox(getMessage("messages-other-title"))
self.displaySettingsLayout = QtWidgets.QVBoxLayout()
self.displaySettingsLayout.setAlignment(Qt.AlignTop & Qt.AlignLeft)
self.displaySettingsFrame = QtWidgets.QFrame()
self.showDurationNotificationCheckbox = QCheckBox(getMessage("showdurationnotification-label"))
self.showDurationNotificationCheckbox.setObjectName("showDurationNotification")
self.displaySettingsLayout.addWidget(self.showDurationNotificationCheckbox)
self.languageFrame = QtWidgets.QFrame()
self.languageLayout = QtWidgets.QHBoxLayout()
self.languageLayout.setContentsMargins(0, 0, 0, 0)
self.languageFrame.setLayout(self.languageLayout)
self.languageFrame.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
self.languageLayout.setAlignment(Qt.AlignTop & Qt.AlignLeft)
self.languageLabel = QLabel(getMessage("language-label"), self)
self.languageCombobox = QtWidgets.QComboBox(self)
self.languageCombobox.addItem(getMessage("automatic-language").format(getMessage("LANGUAGE", getInitialLanguage())))
self.languages = getLanguages()
for lang in self.languages:
self.languageCombobox.addItem(self.languages[lang], lang)
if lang == self.config['language']:
self.languageCombobox.setCurrentIndex(self.languageCombobox.count()-1)
self.languageCombobox.currentIndexChanged.connect(self.languageChanged)
self.languageLayout.addWidget(self.languageLabel, 1, Qt.AlignLeft)
self.languageLayout.addWidget(self.languageCombobox, 1, Qt.AlignLeft)
self.displaySettingsLayout.addWidget(self.languageFrame)
self.languageLabel.setObjectName("language")
self.languageCombobox.setObjectName("language")
self.languageFrame.setMaximumWidth(self.languageFrame.minimumSizeHint().width())
self.displaySettingsGroup.setLayout(self.displaySettingsLayout)
self.displaySettingsGroup.setMaximumHeight(self.displaySettingsGroup.minimumSizeHint().height())
self.displaySettingsLayout.setAlignment(Qt.AlignTop & Qt.AlignLeft)
self.messageLayout.addWidget(self.displaySettingsGroup)
# messageFrame
self.messageFrame.setLayout(self.messageLayout)
self.stackedLayout.addWidget(self.messageFrame)
def addBottomLayout(self):
config = self.config
self.bottomButtonFrame = QtWidgets.QFrame()
self.bottomButtonLayout = QtWidgets.QHBoxLayout()
self.helpButton = QtWidgets.QPushButton(QtGui.QIcon(resourcespath + 'help.png'), getMessage("help-label"))
self.helpButton.setObjectName("help")
self.helpButton.setMaximumSize(self.helpButton.sizeHint())
self.helpButton.released.connect(self.openHelp)
self.resetButton = QtWidgets.QPushButton(QtGui.QIcon(resourcespath + 'cog_delete.png'), getMessage("reset-label"))
self.resetButton.setMaximumSize(self.resetButton.sizeHint())
self.resetButton.setObjectName("reset")
self.resetButton.released.connect(self.resetSettings)
self.runButton = QtWidgets.QPushButton(QtGui.QIcon(resourcespath + 'accept.png'), getMessage("run-label"))
self.runButton.released.connect(self._runWithoutStoringConfig)
self.runButton = QtWidgets.QPushButton(QtGui.QIcon(resourcespath + 'accept.png'), getMessage("run-label"))
self.runButton.pressed.connect(self._runWithoutStoringConfig)
self.runButton.setToolTip(getMessage("nostore-tooltip"))
self.storeAndRunButton = QtWidgets.QPushButton(QtGui.QIcon(resourcespath + 'accept.png'), getMessage("storeandrun-label"))
self.storeAndRunButton.released.connect(self._saveDataAndLeave)
self.bottomButtonLayout.addWidget(self.helpButton)
self.bottomButtonLayout.addWidget(self.resetButton)
self.bottomButtonLayout.addWidget(self.runButton)
self.bottomButtonLayout.addWidget(self.storeAndRunButton)
self.bottomButtonFrame.setLayout(self.bottomButtonLayout)
if isMacOS():
self.bottomButtonLayout.setContentsMargins(15, 0, 15, 0)
else:
self.bottomButtonLayout.setContentsMargins(5, 0, 5, 0)
self.mainLayout.addWidget(self.bottomButtonFrame, 1, 0, 1, 2)
self.bottomCheckboxFrame = QtWidgets.QFrame()
if isMacOS():
self.bottomCheckboxFrame.setContentsMargins(3, 0, 6, 0)
else:
self.bottomCheckboxFrame.setContentsMargins(0, 0, 0, 0)
self.bottomCheckboxLayout = QtWidgets.QGridLayout()
self.alwaysshowCheckbox = QCheckBox(getMessage("forceguiprompt-label"))
self.enableplaylistsCheckbox = QCheckBox(getMessage("sharedplaylistenabled-label"))
self.bottomCheckboxLayout.addWidget(self.showmoreCheckbox)
self.bottomCheckboxLayout.addWidget(self.enableplaylistsCheckbox, 0, 2, Qt.AlignRight)
self.alwaysshowCheckbox.setObjectName(constants.INVERTED_STATE_MARKER + "forceGuiPrompt")
self.enableplaylistsCheckbox.setObjectName("sharedPlaylistEnabled")
self.bottomCheckboxFrame.setLayout(self.bottomCheckboxLayout)
self.mainLayout.addWidget(self.bottomCheckboxFrame, 2, 0, 1, 2)
def tabList(self):
self.tabListLayout = QtWidgets.QHBoxLayout()
self.tabListFrame = QtWidgets.QFrame()
self.tabListWidget = QtWidgets.QListWidget()
self.tabListWidget.addItem(QtWidgets.QListWidgetItem(QtGui.QIcon(resourcespath + "house.png"), getMessage("basics-label")))
self.tabListWidget.addItem(QtWidgets.QListWidgetItem(QtGui.QIcon(resourcespath + "control_pause_blue.png"), getMessage("readiness-label")))
self.tabListWidget.addItem(QtWidgets.QListWidgetItem(QtGui.QIcon(resourcespath + "film_link.png"), getMessage("sync-label")))
self.tabListWidget.addItem(QtWidgets.QListWidgetItem(QtGui.QIcon(resourcespath + "user_comment.png"), getMessage("chat-label")))
self.tabListWidget.addItem(QtWidgets.QListWidgetItem(QtGui.QIcon(resourcespath + "error.png"), getMessage("messages-label")))
self.tabListWidget.addItem(QtWidgets.QListWidgetItem(QtGui.QIcon(resourcespath + "cog.png"), getMessage("misc-label")))
self.tabListLayout.addWidget(self.tabListWidget)
self.tabListFrame.setLayout(self.tabListLayout)
self.tabListFrame.setFixedWidth(self.tabListFrame.minimumSizeHint().width() + constants.TAB_PADDING)
self.tabListWidget.setStyleSheet(constants.STYLE_TABLIST)
self.tabListWidget.currentItemChanged.connect(self.tabChange)
self.tabListWidget.itemClicked.connect(self.tabChange)
self.tabListWidget.itemPressed.connect(self.tabChange)
self.mainLayout.addWidget(self.tabListFrame, 0, 0, 1, 1)
def ensureTabListIsVisible(self):
self.stackedFrame.setFixedWidth(self.stackedFrame.width())
while self.tabListWidget.horizontalScrollBar().isVisible() and self.tabListFrame.width() < constants.MAXIMUM_TAB_WIDTH:
self.tabListFrame.setFixedWidth(self.tabListFrame.width()+1)
def tabChange(self):
self.setFocus()
self.stackedLayout.setCurrentIndex(self.tabListWidget.currentRow())
def resetSettings(self):
self.clearGUIData(leaveMore=True)
self.config['resetConfig'] = True
self.pressedclosebutton = False
self.close()
self.closed.emit()
def showEvent(self, *args, **kwargs):
self.ensureTabListIsVisible()
self.setFixedWidth(self.minimumSizeHint().width())
def clearGUIData(self, leaveMore=False):
settings = QSettings("Syncplay", "PlayerList")
settings.clear()
settings = QSettings("Syncplay", "MediaBrowseDialog")
settings.clear()
settings = QSettings("Syncplay", "MainWindow")
settings.clear()
settings = QSettings("Syncplay", "Interface")
settings.beginGroup("Update")
settings.setValue("lastChecked", None)
settings.setValue("lastCheckedQt", None)
settings.endGroup()
settings.beginGroup("PublicServerList")
settings.setValue("publicServers", None)
settings.endGroup()
if not leaveMore:
settings = QSettings("Syncplay", "MoreSettings")
settings.clear()
self.datacleared = True
def populateEmptyServerList(self):
if self.publicServers is None:
if self.config["checkForUpdatesAutomatically"] == True:
self.updateServerList()
else:
currentServer = self.hostCombobox.currentText()
self.publicServers = constants.FALLBACK_PUBLIC_SYNCPLAY_SERVERS
i = 0
for server in self.publicServers:
self.hostCombobox.addItem(server[1])
self.hostCombobox.setItemData(i, server[0], Qt.ToolTipRole)
if not server[1] in self.publicServerAddresses:
self.publicServerAddresses.append(server[1])
i += 1
self.hostCombobox.setEditText(currentServer)
def updatePasswordVisibilty(self):
if (self.hostCombobox.currentText() == "" and self.serverpassTextbox.text() == "") or str(self.hostCombobox.currentText()) in self.publicServerAddresses:
self.serverpassTextbox.setDisabled(True)
self.serverpassTextbox.setReadOnly(True)
if self.serverpassTextbox.text() != "":
self.storedPassword = self.serverpassTextbox.text()
self.serverpassTextbox.setText("")
else:
self.serverpassTextbox.setEnabled(True)
self.serverpassTextbox.setReadOnly(False)
self.serverpassTextbox.setText(self.storedPassword)
def createMenubar(self):
self.menuBar = QtWidgets.QMenuBar()
# Edit menu
self.editMenu = QtWidgets.QMenu(getMessage("edit-menu-label"), self)
self.cutAction = self.editMenu.addAction(getMessage("cut-menu-label"))
self.cutAction.setShortcuts(QtGui.QKeySequence.Cut)
self.copyAction = self.editMenu.addAction(getMessage("copy-menu-label"))
self.copyAction.setShortcuts(QtGui.QKeySequence.Copy)
self.pasteAction = self.editMenu.addAction(getMessage("paste-menu-label"))
self.pasteAction.setShortcuts(QtGui.QKeySequence.Paste)
self.selectAction = self.editMenu.addAction(getMessage("selectall-menu-label"))
self.selectAction.setShortcuts(QtGui.QKeySequence.SelectAll)
self.editMenu.addSeparator()
self.menuBar.addMenu(self.editMenu)
self.mainLayout.setMenuBar(self.menuBar)
def __init__(self, config, playerpaths, error, defaultConfig):
self.config = config
self.defaultConfig = defaultConfig
self.playerpaths = playerpaths
self.datacleared = False
self.config['resetConfig'] = False
self.subitems = {}
self.publicServers = None
self.publicServerAddresses = []
self._playerProbeThread = GetPlayerIconThread()
self._playerProbeThread.done.connect(self._updateExecutableIcon)
self._playerProbeThread.start()
if self.config['clearGUIData'] == True:
self.config['clearGUIData'] = False
self.clearGUIData()
self.QtWidgets = QtWidgets
self.QtGui = QtGui
self.error = error
if isWindows():
resourcespath = utils.findWorkingDir() + "\\resources\\"
else:
resourcespath = utils.findWorkingDir() + "/resources/"
self.posixresourcespath = utils.findWorkingDir().replace("\\", "/") + "/resources/"
self.resourcespath = resourcespath
super(ConfigDialog, self).__init__()
self.setWindowTitle(getMessage("config-window-title"))
self.setWindowFlags(self.windowFlags() & Qt.WindowCloseButtonHint & ~Qt.WindowContextHelpButtonHint)
self.setWindowIcon(QtGui.QIcon(resourcespath + "syncplay.png"))
self.stackedLayout = QtWidgets.QStackedLayout()
self.stackedFrame = QtWidgets.QFrame()
self.stackedFrame.setLayout(self.stackedLayout)
self.mainLayout = QtWidgets.QGridLayout()
self.mainLayout.setSpacing(0)
self.mainLayout.setContentsMargins(0, 0, 0, 0)
self.storedPassword = self.config['password']
self.addBasicTab()
self.addReadinessTab()
self.addSyncTab()
self.addChatTab()
self.addMessageTab()
self.addMiscTab()
self.tabList()
if isMacOS():
self.createMenubar()
self.config['menuBar'] = dict()
self.config['menuBar']['bar'] = self.menuBar
self.config['menuBar']['editMenu'] = self.editMenu
else:
self.config['menuBar'] = None
self.mainLayout.addWidget(self.stackedFrame, 0, 1)
self.addBottomLayout()
self.updatePasswordVisibilty()
if self.getMoreState() == False:
self.tabListFrame.hide()
self.resetButton.hide()
self.playerargsTextbox.hide()
self.playerargsLabel.hide()
self.runButton.hide()
if self.mediapathTextbox.text() == "":
self.mediapathTextbox.hide()
self.mediapathLabel.hide()
self.mediabrowseButton.hide()
else:
self.mediapathTextbox.show()
self.mediapathLabel.show()
self.mediabrowseButton.show()
if isMacOS():
newHeight = self.connectionSettingsGroup.minimumSizeHint().height()+self.mediaplayerSettingsGroup.minimumSizeHint().height()+self.bottomButtonFrame.minimumSizeHint().height()+50
else:
newHeight = self.connectionSettingsGroup.minimumSizeHint().height()+self.mediaplayerSettingsGroup.minimumSizeHint().height()+self.bottomButtonFrame.minimumSizeHint().height()+13
if self.error:
newHeight += self.errorLabel.height() + 3
self.stackedFrame.setFixedHeight(newHeight)
else:
self.showmoreCheckbox.setChecked(True)
self.tabListWidget.setCurrentRow(0)
self.stackedFrame.setFixedHeight(self.stackedFrame.minimumSizeHint().height())
self.showmoreCheckbox.toggled.connect(self.moreToggled)
self.setLayout(self.mainLayout)
if self.config['noStore']:
self.runButton.setFocus()
else:
self.storeAndRunButton.setFocus()
if isMacOS():
initialHeight = self.connectionSettingsGroup.minimumSizeHint().height()+self.mediaplayerSettingsGroup.minimumSizeHint().height()+self.bottomButtonFrame.minimumSizeHint().height()+50
if self.error:
initialHeight += 40
self.setFixedWidth(self.sizeHint().width())
self.setFixedHeight(initialHeight)
else:
self.setFixedSize(self.sizeHint())
self.setAcceptDrops(True)
if constants.SHOW_TOOLTIPS:
self.processWidget(self, lambda w: self.loadTooltips(w))
self.processWidget(self, lambda w: self.loadValues(w))
self.processWidget(self, lambda w: self.connectChildren(w))
self.populateEmptyServerList()
| 53.858566 | 204 | 0.699375 |
Subsets and Splits