content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
from env import lineFollower
from stable_baselines import PPO2
import imageio
import numpy as np
# Load separate environment for evaluation
env = lineFollower()
# load model
model = PPO2.load("model_final.zip")
# Store image
images = []
# Set environment and get image
obs = env.reset()
images.append(obs)
done = False
while not done:
action, _states = model.predict(obs, deterministic=True)
obs, reward, done, info = env.step(action)
images.append(obs)
# shutdown environment
env.shutdown()
imageio.mimsave('foundation.gif', [np.array(img) for i, img in enumerate(images) if i%4 == 0], fps=29) | 22.777778 | 102 | 0.730081 | [
"MIT"
] | MajesticKhan/Reinforcement-Learning | Simulation/play.py | 615 | Python |
from flask import json
from authlib.common.urls import urlparse, url_decode
from authlib.oauth2.rfc6749 import grants
from authlib.oauth2.rfc7636 import (
CodeChallenge as _CodeChallenge,
create_s256_code_challenge,
)
from .models import db, User, Client
from .models import CodeGrantMixin, generate_authorization_code
from .oauth2_server import TestCase
from .oauth2_server import create_authorization_server
class AuthorizationCodeGrant(CodeGrantMixin, grants.AuthorizationCodeGrant):
def create_authorization_code(self, client, grant_user, request):
code_challenge = request.data.get('code_challenge')
code_challenge_method = request.data.get('code_challenge_method')
return generate_authorization_code(
client, grant_user, request,
code_challenge=code_challenge,
code_challenge_method=code_challenge_method,
)
class CodeChallenge(_CodeChallenge):
SUPPORTED_CODE_CHALLENGE_METHOD = ['plain', 'S256', 'S128']
class CodeChallengeTest(TestCase):
def prepare_data(self, token_endpoint_auth_method='none'):
server = create_authorization_server(self.app)
server.register_grant(
AuthorizationCodeGrant,
[CodeChallenge(required=True)]
)
user = User(username='foo')
db.session.add(user)
db.session.commit()
client_secret = ''
if token_endpoint_auth_method != 'none':
client_secret = 'code-secret'
client = Client(
user_id=user.id,
client_id='code-client',
client_secret=client_secret,
redirect_uri='https://a.b',
scope='profile address',
token_endpoint_auth_method=token_endpoint_auth_method,
response_type='code',
grant_type='authorization_code',
)
self.authorize_url = (
'/oauth/authorize?response_type=code'
'&client_id=code-client'
)
db.session.add(client)
db.session.commit()
def test_missing_code_challenge(self):
self.prepare_data()
rv = self.client.get(self.authorize_url)
self.assertIn(b'Missing', rv.data)
def test_has_code_challenge(self):
self.prepare_data()
rv = self.client.get(self.authorize_url + '&code_challenge=abc')
self.assertEqual(rv.data, b'ok')
def test_invalid_code_challenge_method(self):
self.prepare_data()
suffix = '&code_challenge=abc&code_challenge_method=invalid'
rv = self.client.get(self.authorize_url + suffix)
self.assertIn(b'Unsupported', rv.data)
def test_supported_code_challenge_method(self):
self.prepare_data()
suffix = '&code_challenge=abc&code_challenge_method=plain'
rv = self.client.get(self.authorize_url + suffix)
self.assertEqual(rv.data, b'ok')
def test_trusted_client_without_code_challenge(self):
self.prepare_data('client_secret_basic')
rv = self.client.get(self.authorize_url)
self.assertEqual(rv.data, b'ok')
rv = self.client.post(self.authorize_url, data={'user_id': '1'})
self.assertIn('code=', rv.location)
params = dict(url_decode(urlparse.urlparse(rv.location).query))
code = params['code']
headers = self.create_basic_header('code-client', 'code-secret')
rv = self.client.post('/oauth/token', data={
'grant_type': 'authorization_code',
'code': code,
}, headers=headers)
resp = json.loads(rv.data)
self.assertIn('access_token', resp)
def test_missing_code_verifier(self):
self.prepare_data()
url = self.authorize_url + '&code_challenge=foo'
rv = self.client.post(url, data={'user_id': '1'})
self.assertIn('code=', rv.location)
params = dict(url_decode(urlparse.urlparse(rv.location).query))
code = params['code']
rv = self.client.post('/oauth/token', data={
'grant_type': 'authorization_code',
'code': code,
'client_id': 'code-client',
})
resp = json.loads(rv.data)
self.assertIn('Missing', resp['error_description'])
def test_trusted_client_missing_code_verifier(self):
self.prepare_data('client_secret_basic')
url = self.authorize_url + '&code_challenge=foo'
rv = self.client.post(url, data={'user_id': '1'})
self.assertIn('code=', rv.location)
params = dict(url_decode(urlparse.urlparse(rv.location).query))
code = params['code']
headers = self.create_basic_header('code-client', 'code-secret')
rv = self.client.post('/oauth/token', data={
'grant_type': 'authorization_code',
'code': code,
}, headers=headers)
resp = json.loads(rv.data)
self.assertIn('Missing', resp['error_description'])
def test_plain_code_challenge_failed(self):
self.prepare_data()
url = self.authorize_url + '&code_challenge=foo'
rv = self.client.post(url, data={'user_id': '1'})
self.assertIn('code=', rv.location)
params = dict(url_decode(urlparse.urlparse(rv.location).query))
code = params['code']
rv = self.client.post('/oauth/token', data={
'grant_type': 'authorization_code',
'code': code,
'code_verifier': 'bar',
'client_id': 'code-client',
})
resp = json.loads(rv.data)
self.assertIn('failed', resp['error_description'])
def test_plain_code_challenge_success(self):
self.prepare_data()
url = self.authorize_url + '&code_challenge=foo'
rv = self.client.post(url, data={'user_id': '1'})
self.assertIn('code=', rv.location)
params = dict(url_decode(urlparse.urlparse(rv.location).query))
code = params['code']
rv = self.client.post('/oauth/token', data={
'grant_type': 'authorization_code',
'code': code,
'code_verifier': 'foo',
'client_id': 'code-client',
})
resp = json.loads(rv.data)
self.assertIn('access_token', resp)
def test_s256_code_challenge_success(self):
self.prepare_data()
code_challenge = create_s256_code_challenge('foo')
url = self.authorize_url + '&code_challenge=' + code_challenge
url += '&code_challenge_method=S256'
rv = self.client.post(url, data={'user_id': '1'})
self.assertIn('code=', rv.location)
params = dict(url_decode(urlparse.urlparse(rv.location).query))
code = params['code']
rv = self.client.post('/oauth/token', data={
'grant_type': 'authorization_code',
'code': code,
'code_verifier': 'foo',
'client_id': 'code-client',
})
resp = json.loads(rv.data)
self.assertIn('access_token', resp)
def test_not_implemented_code_challenge_method(self):
self.prepare_data()
url = self.authorize_url + '&code_challenge=foo'
url += '&code_challenge_method=S128'
rv = self.client.post(url, data={'user_id': '1'})
self.assertIn('code=', rv.location)
params = dict(url_decode(urlparse.urlparse(rv.location).query))
code = params['code']
self.assertRaises(
RuntimeError, self.client.post, '/oauth/token',
data={
'grant_type': 'authorization_code',
'code': code,
'code_verifier': 'foo',
'client_id': 'code-client',
}
)
| 36.697115 | 76 | 0.62256 | [
"BSD-3-Clause"
] | AngelLiang/hacking-authlib | tests/flask/test_oauth2/test_code_challenge.py | 7,633 | Python |
#!/usr/bin/env python
"""
Matplotlib provides sophisticated date plotting capabilities, standing on the
shoulders of python :mod:`datetime`, the add-on modules :mod:`pytz` and
:mod:`dateutil`. :class:`datetime` objects are converted to floating point
numbers which represent time in days since 0001-01-01 UTC, plus 1. For
example, 0001-01-01, 06:00 is 1.25, not 0.25. The helper functions
:func:`date2num`, :func:`num2date` and :func:`drange` are used to facilitate
easy conversion to and from :mod:`datetime` and numeric ranges.
.. note::
Like Python's datetime, mpl uses the Gregorian calendar for all
conversions between dates and floating point numbers. This practice
is not universal, and calendar differences can cause confusing
differences between what Python and mpl give as the number of days
since 0001-01-01 and what other software and databases yield. For
example, the US Naval Observatory uses a calendar that switches
from Julian to Gregorian in October, 1582. Hence, using their
calculator, the number of days between 0001-01-01 and 2006-04-01 is
732403, whereas using the Gregorian calendar via the datetime
module we find::
In [31]:date(2006,4,1).toordinal() - date(1,1,1).toordinal()
Out[31]:732401
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pythonhosted.org/pytz/>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <https://dateutil.readthedocs.org>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# import constants for the days of the week
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, e.g., MO, TU
* :class:`MonthLocator`: locate months, e.g., 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutil.rrule` (`dateutil
<https://dateutil.readthedocs.org>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
* :class:`AutoDateLocator`: On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
Date formatters
---------------
Here all all the date formatters:
* :class:`AutoDateFormatter`: attempts to figure out the best format
to use. This is most useful when used with the :class:`AutoDateLocator`.
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange, zip
import re
import time
import math
import datetime
import warnings
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import numpy as np
import matplotlib
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
__all__ = ('date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'AutoDateFormatter', 'DateLocator',
'RRuleLocator', 'AutoDateLocator', 'YearLocator',
'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'MicrosecondLocator',
'rrule', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU',
'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'MICROSECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
# Make a simple UTC instance so we don't always have to import
# pytz. From the python datetime library docs:
class _UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
UTC = _UTC()
def _get_rc_timezone():
"""
Retrieve the preferred timeszone from the rcParams dictionary.
"""
s = matplotlib.rcParams['timezone']
if s == 'UTC':
return UTC
import pytz
return pytz.timezone(s)
"""
Time-related constants.
"""
EPOCH_OFFSET = float(datetime.datetime(1970, 1, 1).toordinal())
JULIAN_OFFSET = 1721424.5 # Julian date at 0001-01-01
MICROSECONDLY = SECONDLY + 1
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
MONTHS_PER_YEAR = 12.
DAYS_PER_WEEK = 7.
DAYS_PER_MONTH = 30.
DAYS_PER_YEAR = 365.0
MINUTES_PER_DAY = MIN_PER_HOUR * HOURS_PER_DAY
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
SEC_PER_WEEK = SEC_PER_DAY * DAYS_PER_WEEK
MUSECONDS_PER_DAY = 1e6 * SEC_PER_DAY
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` or :mod:`date` to the Gregorian date as UTC float
days, preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if isinstance(dt, datetime.datetime):
# Get a datetime object at midnight in the same time zone as dt.
cdate = dt.date()
midnight_time = datetime.time(0, 0, 0, tzinfo=dt.tzinfo)
rdt = datetime.datetime.combine(cdate, midnight_time)
td_remainder = _total_seconds(dt - rdt)
if td_remainder > 0:
base += td_remainder / SEC_PER_DAY
return base
# a version of _to_ordinalf that can operate on numpy arrays
_to_ordinalf_np_vectorized = np.vectorize(_to_ordinalf)
try:
# Available as a native method in Python >= 2.7.
_total_seconds = datetime.timedelta.total_seconds
except AttributeError:
def _total_seconds(tdelta):
"""
Alias providing support for datetime.timedelta.total_seconds() function
calls even in Python < 2.7.
The input `tdelta` is a datetime.timedelta object, and returns a float
containing the total number of seconds representing the `tdelta`
duration. For large durations (> 270 on most platforms), this loses
microsecond accuracy.
"""
return (tdelta.microseconds +
(tdelta.seconds + tdelta.days * SEC_PER_DAY) * 1e6) * 1e-6
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
The input date `x` is a float in ordinal days at UTC, and the output will
be the specified :class:`datetime` object corresponding to that time in
timezone `tz`, or if `tz` is `None`, in the timezone specified in
`rcParams['timezone']`.
"""
if tz is None:
tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix).replace(tzinfo=UTC)
remainder = float(x) - ix
# Round down to the nearest microsecond.
dt += datetime.timedelta(microseconds=int(remainder * MUSECONDS_PER_DAY))
# Compensate for rounding errors
if dt.microsecond < 10:
dt = dt.replace(microsecond=0)
elif dt.microsecond > 999990:
dt += datetime.timedelta(microseconds=1e6 - dt.microsecond)
return dt.astimezone(tz)
# a version of _from_ordinalf that can operate on numpy arrays
_from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf)
class strpdate2num(object):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
class bytespdate2num(strpdate2num):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt, encoding='utf-8'):
"""
Args:
fmt: any valid strptime format is supported
encoding: encoding to use on byte input (default: 'utf-8')
"""
super(bytespdate2num, self).__init__(fmt)
self.encoding = encoding
def __call__(self, b):
"""
Args:
b: byte input to be converted
Returns:
A date2num float
"""
s = b.decode(self.encoding)
return super(bytespdate2num, self).__call__(s)
# a version of dateutil.parser.parse that can operate on nump0y arrays
_dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse)
def datestr2num(d, default=None):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`.
Parameters
----------
d : string or sequence of strings
The dates to convert.
default : datetime instance
The default date to use when fields are missing in `d`.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d, default=default)
return date2num(dt)
else:
if default is not None:
d = [dateutil.parser.parse(s, default=default) for s in d]
d = np.asarray(d)
if not d.size:
return d
return date2num(_dateutil_parser_parse_np_vectorized(d))
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives the number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC, *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
"""
if not cbook.iterable(d):
return _to_ordinalf(d)
else:
d = np.asarray(d)
if not d.size:
return d
return _to_ordinalf_np_vectorized(d)
def julian2num(j):
"""
Convert a Julian date (or sequence) to a matplotlib date (or sequence).
"""
if cbook.iterable(j):
j = np.asarray(j)
return j - JULIAN_OFFSET
def num2julian(n):
"""
Convert a matplotlib date (or sequence) to a Julian date (or sequence).
"""
if cbook.iterable(n):
n = np.asarray(n)
return n + JULIAN_OFFSET
def num2date(x, tz=None):
"""
*x* is a float value which gives the number of days
(fraction part represents hours, minutes, seconds) since
0001-01-01 00:00:00 UTC *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None:
tz = _get_rc_timezone()
if not cbook.iterable(x):
return _from_ordinalf(x, tz)
else:
x = np.asarray(x)
if not x.size:
return x
return _from_ordinalf_np_vectorized(x, tz).tolist()
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
step = _total_seconds(delta) / SEC_PER_DAY
# calculate the difference between dend and dstart in times of delta
num = int(np.ceil((f2 - f1) / step))
# calculate end of the interval which will be generated
dinterval_end = dstart + num * delta
# ensure, that an half open interval will be generated [dstart, dend)
if dinterval_end >= dend:
# if the endpoint is greated than dend, just subtract one delta
dinterval_end -= delta
num -= 1
f2 = _to_ordinalf(dinterval_end) # new float-endpoint
return np.linspace(f1, f2, num + 1)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is a :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
if x == 0:
raise ValueError('DateFormatter found a value of x=0, which is '
'an illegal date. This usually occurs because '
'you have not informed the axis that it is '
'plotting dates, e.g., with ax.xaxis_date()')
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _replace_common_substr(self, s1, s2, sub1, sub2, replacement):
"""Helper function for replacing substrings sub1 and sub2
located at the same indexes in strings s1 and s2 respectively,
with the string replacement. It is expected that sub1 and sub2
have the same length. Returns the pair s1, s2 after the
substitutions.
"""
# Find common indexes of substrings sub1 in s1 and sub2 in s2
# and make substitutions inplace. Because this is inplace,
# it is okay if len(replacement) != len(sub1), len(sub2).
i = 0
while True:
j = s1.find(sub1, i)
if j == -1:
break
i = j + 1
if s2[j:j + len(sub2)] != sub2:
continue
s1 = s1[:j] + replacement + s1[j + len(sub1):]
s2 = s2[:j] + replacement + s2[j + len(sub2):]
return s1, s2
def strftime_pre_1900(self, dt, fmt=None):
"""Call time.strftime for years before 1900 by rolling
forward a multiple of 28 years.
*fmt* is a :func:`strftime` format string.
Dalke: I hope I did this math right. Every 28 years the
calendar repeats, except through century leap years excepting
the 400 year leap years. But only if you're using the Gregorian
calendar.
"""
if fmt is None:
fmt = self.fmt
# Since python's time module's strftime implementation does not
# support %f microsecond (but the datetime module does), use a
# regular expression substitution to replace instances of %f.
# Note that this can be useful since python's floating-point
# precision representation for datetime causes precision to be
# more accurate closer to year 0 (around the year 2000, precision
# can be at 10s of microseconds).
fmt = re.sub(r'((^|[^%])(%%)*)%f',
r'\g<1>{0:06d}'.format(dt.microsecond), fmt)
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to between the years 1973 and 2000
year1 = year + ((2000 - year) // 28) * 28
year2 = year1 + 28
timetuple = dt.timetuple()
# Generate timestamp string for year and year+28
s1 = time.strftime(fmt, (year1,) + timetuple[1:])
s2 = time.strftime(fmt, (year2,) + timetuple[1:])
# Replace instances of respective years (both 2-digit and 4-digit)
# that are located at the same indexes of s1, s2 with dt's year.
# Note that C++'s strftime implementation does not use padded
# zeros or padded whitespace for %y or %Y for years before 100, but
# uses padded zeros for %x. (For example, try the runnable examples
# with .tm_year in the interval [-1900, -1800] on
# http://en.cppreference.com/w/c/chrono/strftime.) For ease of
# implementation, we always use padded zeros for %y, %Y, and %x.
s1, s2 = self._replace_common_substr(s1, s2,
"{0:04d}".format(year1),
"{0:04d}".format(year2),
"{0:04d}".format(dt.year))
s1, s2 = self._replace_common_substr(s1, s2,
"{0:02d}".format(year1 % 100),
"{0:02d}".format(year2 % 100),
"{0:02d}".format(dt.year % 100))
return cbook.unicode_safe(s1)
def strftime(self, dt, fmt=None):
"""Refer to documentation for datetime.strftime.
*fmt* is a :func:`strftime` format string.
Warning: For years before 1900, depending upon the current
locale it is possible that the year displayed with %x might
be incorrect. For years before 100, %y and %Y will yield
zero-padded strings.
"""
if fmt is None:
fmt = self.fmt
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year >= 1900:
# Note: in python 3.3 this is okay for years >= 1000,
# refer to http://bugs.python.org/issue177742
return cbook.unicode_safe(dt.strftime(fmt))
return self.strftime_pre_1900(dt, fmt)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None:
tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(round(x))
if ind >= len(self.t) or ind <= 0:
return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
The AutoDateFormatter has a scale dictionary that maps the scale
of the tick (the distance in days between one major tick) and a
format string. The default looks like this::
self.scaled = {
365.0 : '%Y',
30. : '%b %Y',
1.0 : '%b %d %Y',
1./24. : '%H:%M:%S',
1. / (24. * 60.): '%H:%M:%S.%f',
}
The algorithm picks the key in the dictionary that is >= the
current scale and uses that format string. You can customize this
dictionary by doing::
>>> formatter = AutoDateFormatter()
>>> formatter.scaled[1/(24.*60.)] = '%M:%S' # only show min and sec
A custom :class:`~matplotlib.ticker.FuncFormatter` can also be used.
The following example shows how to use a custom format function to strip
trailing zeros from decimal seconds and adds the date to the first
ticklabel::
>>> def my_format_function(x, pos=None):
... x = matplotlib.dates.num2date(x)
... if pos == 0:
... fmt = '%D %H:%M:%S.%f'
... else:
... fmt = '%H:%M:%S.%f'
... label = x.strftime(fmt)
... label = label.rstrip("0")
... label = label.rstrip(".")
... return label
>>> from matplotlib.ticker import FuncFormatter
>>> formatter.scaled[1/(24.*60.)] = FuncFormatter(my_format_function)
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
"""
Autoformat the date labels. The default format is the one to use
if none of the values in ``self.scaled`` are greater than the unit
returned by ``locator._get_unit()``.
"""
self._locator = locator
self._tz = tz
self.defaultfmt = defaultfmt
self._formatter = DateFormatter(self.defaultfmt, tz)
self.scaled = {DAYS_PER_YEAR: '%Y',
DAYS_PER_MONTH: '%b %Y',
1.0: '%b %d %Y',
1. / HOURS_PER_DAY: '%H:%M:%S',
1. / (MINUTES_PER_DAY): '%H:%M:%S.%f'}
def __call__(self, x, pos=None):
locator_unit_scale = float(self._locator._get_unit())
fmt = self.defaultfmt
# Pick the first scale which is greater than the locator unit.
for possible_scale in sorted(self.scaled):
if possible_scale >= locator_unit_scale:
fmt = self.scaled[possible_scale]
break
if isinstance(fmt, six.string_types):
self._formatter = DateFormatter(fmt, self._tz)
result = self._formatter(x, pos)
elif six.callable(fmt):
result = fmt(x, pos)
else:
raise TypeError('Unexpected type passed to {0!r}.'.format(self))
return result
class rrulewrapper(object):
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
"""
Determines the tick locations when plotting dates.
"""
hms0d = {'byhour': 0, 'byminute': 0, 'bysecond': 0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
"""
Set time zone info.
"""
self.tz = tz
def datalim_to_dt(self):
"""
Convert axis data interval to datetime objects.
"""
dmin, dmax = self.axis.get_data_interval()
if dmin > dmax:
dmin, dmax = dmax, dmin
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
"""
Converts the view interval to datetime objects.
"""
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return 1
def nonsingular(self, vmin, vmax):
"""
Given the proposed upper and lower extent, adjust the range
if it is too close to being singular (i.e. a range of ~0).
"""
unit = self._get_unit()
interval = self._get_interval()
if abs(vmax - vmin) < 1e-6:
vmin -= 2 * unit * interval
vmax += 2 * unit * interval
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
delta = relativedelta(vmax, vmin)
# We need to cap at the endpoints of valid datetime
try:
start = vmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = vmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop, count=self.MAXTICKS + 1)
# estimate the number of ticks very approximately so we don't
# have to do a very expensive (and potentially near infinite)
# 'between' calculation, only to find out it will fail.
nmax, nmin = date2num((vmax, vmin))
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
# This estimate is only an estimate, so be really conservative
# about bailing...
if estimate > self.MAXTICKS * 2:
raise RuntimeError(
'RRuleLocator estimated to generate %d ticks from %s to %s: '
'exceeds Locator.MAXTICKS * 2 (%d) ' % (estimate, vmin, vmax,
self.MAXTICKS * 2))
dates = self.rule.between(vmin, vmax, True)
if len(dates) == 0:
return date2num([vmin, vmax])
return self.raise_if_exceeds(date2num(dates))
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
return self.get_unit_generic(freq)
@staticmethod
def get_unit_generic(freq):
if freq == YEARLY:
return DAYS_PER_YEAR
elif freq == MONTHLY:
return DAYS_PER_MONTH
elif freq == WEEKLY:
return DAYS_PER_WEEK
elif freq == DAILY:
return 1.0
elif freq == HOURLY:
return 1.0 / HOURS_PER_DAY
elif freq == MINUTELY:
return 1.0 / MINUTES_PER_DAY
elif freq == SECONDLY:
return 1.0 / SEC_PER_DAY
else:
# error
return -1 # or should this just return '1'?
def _get_interval(self):
return self.rule._rrule._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
delta = relativedelta(dmax, dmin)
# We need to cap at the endpoints of valid datetime
try:
start = dmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin:
vmin = dmin
vmax = self.rule.after(dmax, True)
if not vmax:
vmax = dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`DateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None, minticks=5, maxticks=None,
interval_multiples=False):
"""
*minticks* is the minimum number of ticks desired, which is used to
select the type of ticking (yearly, monthly, etc.).
*maxticks* is the maximum number of ticks desired, which controls
any interval between ticks (ticking every other, every 3, etc.).
For really fine-grained control, this can be a dictionary mapping
individual rrule frequency constants (YEARLY, MONTHLY, etc.)
to their own maximum number of ticks. This can be used to keep
the number of ticks appropriate to the format chosen in
:class:`AutoDateFormatter`. Any frequency not specified in this
dictionary is given a default value.
*tz* is a :class:`tzinfo` instance.
*interval_multiples* is a boolean that indicates whether ticks
should be chosen to be multiple of the interval. This will lock
ticks to 'nicer' locations. For example, this will force the
ticks to be at hours 0,6,12,18 when hourly ticking is done at
6 hour intervals.
The AutoDateLocator has an interval dictionary that maps the
frequency of the tick (a constant from dateutil.rrule) and a
multiple allowed for that ticking. The default looks like this::
self.intervald = {
YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY : [1, 2, 3, 4, 6],
DAILY : [1, 2, 3, 7, 14],
HOURLY : [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000],
}
The interval is used to specify multiples that are appropriate for
the frequency of ticking. For instance, every 7 days is sensible
for daily ticks, but for minutes/seconds, 15 or 30 make sense.
You can customize this dictionary by doing::
locator = AutoDateLocator()
locator.intervald[HOURLY] = [3] # only show every 3 hours
"""
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY,
SECONDLY, MICROSECONDLY]
self.minticks = minticks
self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12,
MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8}
if maxticks is not None:
try:
self.maxticks.update(maxticks)
except TypeError:
# Assume we were given an integer. Use this as the maximum
# number of ticks for every frequency and create a
# dictionary for this
self.maxticks = dict(zip(self._freqs,
[maxticks] * len(self._freqs)))
self.interval_multiples = interval_multiples
self.intervald = {
YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY: [1, 2, 3, 4, 6],
DAILY: [1, 2, 3, 7, 14, 21],
HOURLY: [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000]}
self._byranges = [None, range(1, 13), range(1, 32),
range(0, 24), range(0, 60), range(0, 60), None]
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def tick_values(self, vmin, vmax):
return self.get_locator(vmin, vmax).tick_values(vmin, vmax)
def nonsingular(self, vmin, vmax):
# whatever is thrown at us, we can scale the unit.
# But default nonsingular date plots at an ~4 year period.
if vmin == vmax:
vmin = vmin - DAYS_PER_YEAR * 2
vmax = vmax + DAYS_PER_YEAR * 2
return vmin, vmax
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if self._freq in [MICROSECONDLY]:
return 1. / MUSECONDS_PER_DAY
else:
return RRuleLocator.get_unit_generic(self._freq)
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
tdelta = dmax - dmin
# take absolute difference
if dmin > dmax:
delta = -delta
tdelta = -tdelta
# The following uses a mix of calls to relativedelta and timedelta
# methods because there is incomplete overlap in the functionality of
# these similar functions, and it's best to avoid doing our own math
# whenever possible.
numYears = float(delta.years)
numMonths = (numYears * MONTHS_PER_YEAR) + delta.months
numDays = tdelta.days # Avoids estimates of days/month, days/year
numHours = (numDays * HOURS_PER_DAY) + delta.hours
numMinutes = (numHours * MIN_PER_HOUR) + delta.minutes
numSeconds = np.floor(_total_seconds(tdelta))
numMicroseconds = np.floor(_total_seconds(tdelta) * 1e6)
nums = [numYears, numMonths, numDays, numHours, numMinutes,
numSeconds, numMicroseconds]
use_rrule_locator = [True] * 6 + [False]
# Default setting of bymonth, etc. to pass to rrule
# [unused (for year), bymonth, bymonthday, byhour, byminute,
# bysecond, unused (for microseconds)]
byranges = [None, 1, 1, 0, 0, 0, None]
# Loop over all the frequencies and try to find one that gives at
# least a minticks tick positions. Once this is found, look for
# an interval from an list specific to that frequency that gives no
# more than maxticks tick positions. Also, set up some ranges
# (bymonth, etc.) as appropriate to be passed to rrulewrapper.
for i, (freq, num) in enumerate(zip(self._freqs, nums)):
# If this particular frequency doesn't give enough ticks, continue
if num < self.minticks:
# Since we're not using this particular frequency, set
# the corresponding by_ to None so the rrule can act as
# appropriate
byranges[i] = None
continue
# Find the first available interval that doesn't give too many
# ticks
for interval in self.intervald[freq]:
if num <= interval * (self.maxticks[freq] - 1):
break
else:
# We went through the whole loop without breaking, default to
# the last interval in the list and raise a warning
warnings.warn('AutoDateLocator was unable to pick an '
'appropriate interval for this date range. '
'It may be necessary to add an interval value '
"to the AutoDateLocator's intervald dictionary."
' Defaulting to {0}.'.format(interval))
# Set some parameters as appropriate
self._freq = freq
if self._byranges[i] and self.interval_multiples:
byranges[i] = self._byranges[i][::interval]
interval = 1
else:
byranges[i] = self._byranges[i]
# We found what frequency to use
break
else:
raise ValueError('No sensible date limit could be found in the '
'AutoDateLocator.')
if use_rrule_locator[i]:
_, bymonth, bymonthday, byhour, byminute, bysecond, _ = byranges
rrule = rrulewrapper(self._freq, interval=interval,
dtstart=dmin, until=dmax,
bymonth=bymonth, bymonthday=bymonthday,
byhour=byhour, byminute=byminute,
bysecond=bysecond)
locator = RRuleLocator(rrule, self.tz)
else:
locator = MicrosecondLocator(interval, tz=self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = {'month': month,
'day': day,
'hour': 0,
'minute': 0,
'second': 0,
'tzinfo': tz
}
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
ymin = self.base.le(vmin.year)
ymax = self.base.ge(vmax.year)
ticks = [vmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year >= ymax:
return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, e.g., 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None:
bymonth = range(1, 13)
elif isinstance(bymonth, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonth = [x.item() for x in bymonth.astype(int)]
rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutil.rrule`, which have been
imported into the :mod:`matplotlib.dates` namespace.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
if isinstance(byweekday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
[x.item() for x in byweekday.astype(int)]
rule = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if bymonthday is None:
bymonthday = range(1, 32)
elif isinstance(bymonthday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonthday = [x.item() for x in bymonthday.astype(int)]
rule = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None:
byhour = range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None:
byminute = range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None:
bysecond = range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
class MicrosecondLocator(DateLocator):
"""
Make ticks on occurances of each microsecond.
"""
def __init__(self, interval=1, tz=None):
"""
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second microsecond.
"""
self._interval = interval
self._wrapped_locator = ticker.MultipleLocator(interval)
self.tz = tz
def set_axis(self, axis):
self._wrapped_locator.set_axis(axis)
return DateLocator.set_axis(self, axis)
def set_view_interval(self, vmin, vmax):
self._wrapped_locator.set_view_interval(vmin, vmax)
return DateLocator.set_view_interval(self, vmin, vmax)
def set_data_interval(self, vmin, vmax):
self._wrapped_locator.set_data_interval(vmin, vmax)
return DateLocator.set_data_interval(self, vmin, vmax)
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
nmin, nmax = date2num((vmin, vmax))
nmin *= MUSECONDS_PER_DAY
nmax *= MUSECONDS_PER_DAY
ticks = self._wrapped_locator.tick_values(nmin, nmax)
ticks = [tick / MUSECONDS_PER_DAY for tick in ticks]
return ticks
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1. / MUSECONDS_PER_DAY
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return self._interval
def _close_to_dt(d1, d2, epsilon=5):
"""
Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.
"""
delta = d2 - d1
mus = abs(_total_seconds(delta) * 1e6)
assert mus < epsilon
def _close_to_num(o1, o2, epsilon=5):
"""
Assert that float ordinals *o1* and *o2* are within *epsilon*
microseconds.
"""
delta = abs((o2 - o1) * MUSECONDS_PER_DAY)
assert delta < epsilon
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
return EPOCH_OFFSET + np.asarray(e) / SEC_PER_DAY
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
return (np.asarray(d) - EPOCH_OFFSET) * SEC_PER_DAY
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar:
return ret[0]
else:
return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span == 0:
span = 1 / HOURS_PER_DAY
mins = span * MINUTES_PER_DAY
hrs = span * HOURS_PER_DAY
days = span
wks = span / DAYS_PER_WEEK
months = span / DAYS_PER_MONTH # Approx
years = span / DAYS_PER_YEAR # Approx
if years > numticks:
locator = YearLocator(int(years / numticks), tz=tz) # define
fmt = '%Y'
elif months > numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif wks > numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days > numticks:
locator = DayLocator(interval=int(math.ceil(days / numticks)), tz=tz)
fmt = '%b %d'
elif hrs > numticks:
locator = HourLocator(interval=int(math.ceil(hrs / numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif mins > numticks:
locator = MinuteLocator(interval=int(math.ceil(mins / numticks)),
tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
"""
Return seconds as days.
"""
return float(s) / SEC_PER_DAY
def minutes(m):
"""
Return minutes as days.
"""
return float(m) / MINUTES_PER_DAY
def hours(h):
"""
Return hours as days.
"""
return h / HOURS_PER_DAY
def weeks(w):
"""
Return weeks as days.
"""
return w * DAYS_PER_WEEK
class DateConverter(units.ConversionInterface):
"""
Converter for datetime.date and datetime.datetime data,
or for date/time data represented as it would be converted
by :func:`date2num`.
The 'unit' tag for such data is None or a tzinfo instance.
"""
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = AutoDateLocator(tz=tz)
majfmt = AutoDateFormatter(majloc, tz=tz)
datemin = datetime.date(2000, 1, 1)
datemax = datetime.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
@staticmethod
def convert(value, unit, axis):
"""
If *value* is not already a number or sequence of numbers,
convert it with :func:`date2num`.
The *unit* and *axis* arguments are not used.
"""
if units.ConversionInterface.is_numlike(value):
return value
return date2num(value)
@staticmethod
def default_units(x, axis):
"""
Return the tzinfo instance of *x* or of its first element, or None
"""
if isinstance(x, np.ndarray):
x = x.ravel()
try:
x = cbook.safe_first_element(x)
except (TypeError, StopIteration):
pass
try:
return x.tzinfo
except AttributeError:
pass
return None
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
| 33.547876 | 79 | 0.600397 | [
"MIT"
] | rbalda/neural_ocr | env/lib/python2.7/site-packages/matplotlib/dates.py | 52,905 | Python |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Widget captioning model."""
import collections
import os
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow as tf
from widget_caption import widget_caption_config
from widget_caption import widget_caption_eval
from widget_caption import widget_caption_input as input_utils
from tensorflow_models.official.legacy.transformer import model_params
from tensorflow_models.official.legacy.transformer import model_utils
from tensorflow_models.official.legacy.transformer import optimizer
from tensorflow_models.official.legacy.transformer import transformer as nlp_transformer
from tensorflow_models.official.nlp.modeling import layers
from tensorflow_models.official.nlp.modeling import ops
flags.DEFINE_string('experiment', 'debug',
'Experiment name defined in widget_caption_config.py.')
flags.DEFINE_string('model_dir', None, 'Model dir')
flags.DEFINE_string('ckpt_filepath', None,
'Checkpoint path for saving weights of every epoch.')
FLAGS = flags.FLAGS
def create_hparams(experiment):
"""Creates the hyper parameters."""
hparams = {}
# General parameters.
hparams['batch_size'] = 64
hparams['eval_batch_size'] = 64
hparams['learning_rate_warmup_steps'] = 2000
hparams['learning_rate_constant'] = 1
hparams['learning_rate'] = 0.001
hparams['train_epoches'] = 20
hparams['steps_per_epoch'] = 30
hparams['train_steps'] = 100 * 1000
hparams['eval_steps'] = 100
hparams['caption_optimizer'] = 't2t'
hparams['clip_norm'] = 5.0
hparams['widget_encoder_checkpoint'] = ''
hparams['train_files'] = ''
hparams['eval_files'] = ''
hparams['train_buffer_size'] = 2000
hparams['eval_buffer_size'] = 500
hparams['train_pixel_encoder'] = True
hparams['debug'] = False
hparams['distribution_strategy'] = 'mirrored'
# Train model using decoding task, classification task, or both.
hparams['decoding_task'] = True
hparams['classification_task'] = False
# Whether to use decoding for phrase classification: <START> phrase_id <EOS>.
hparams['use_decoding_for_classification'] = False
# Weight for the classification loss.
hparams['classification_loss_weight'] = 1
hparams['train_with_one_node'] = False
# Embedding parameters.
hparams['embedding_file'] = ''
hparams['word_vocab_path'] = ''
hparams['glove_trainable'] = True
hparams['vocab_size'] = 10000
hparams['phrase_vocab_size'] = 10000
# View hierarchy encoder parameters.
hparams['max_pixel_pos'] = 100
hparams['max_dom_pos'] = 500
hparams['screen_encoder'] = 'gcn'
hparams['screen_embedding_feature'] = ['text', 'type', 'pos', 'click', 'dom']
hparams['obj_text_aggregation'] = 'max'
hparams['synthetic_screen_noise'] = 0.
# Whether to add pixel encoding as input to view hierarchy encoder.
hparams['encode_screen_with_context'] = False
# Whether to add a residual link for pixel encoding.
hparams['add_pixel_skip_link'] = False
# General parameters.
hparams['num_hidden_layers'] = 2
hparams['hidden_size'] = 2
hparams['filter_size'] = 2
hparams['num_heads'] = 2
hparams['dropout'] = 0.2
hparams['layer_prepostprocess_dropout'] = 0.2
hparams['attention_dropout'] = 0.2
hparams['relu_dropout'] = 0.2
transformer_hparams = model_params.BASE_PARAMS
# Add parameters from transformer model.
hparams.update(transformer_hparams)
# Rewrite all the parameters from command-line flags.
config = widget_caption_config.experiments[experiment]
hparams.update(config)
return hparams
def load_embed(file_name, vocab_size):
"""Loads a pre-trained embedding matrix.
Args:
file_name: the file name of the embedding file.
vocab_size: if > 0, only load embedding weights for vocab_size words.
Returns:
vocab: a list of tokens.
embeds: a numpy array of embeddings for each token plus an OOV embedding.
depth: the depth of the embedding.
Raises:
ValueError: embeddings have different depths.
"""
with tf.io.gfile.GFile(file_name, 'r') as embed_file:
vocab = []
embeds = []
depth = -1
for index, line in enumerate(embed_file):
if vocab_size > 0 and index >= vocab_size:
break
line = line.strip()
tokens = line.strip().split(' ')
word = tokens[0]
vocab.append(word)
if depth == -1:
embed = [float(token) for token in tokens[1:]]
else:
embed = [float(token) for token in tokens[-depth:]]
d = len(embed)
if depth == -1:
depth = d
if d != depth:
raise ValueError('Inconsistent embedding sizes')
embeds.append(embed)
embeds = np.stack(embeds)
return vocab, embeds, depth
def compute_score(predictions, references, vocab=None):
"""Computes the bleu score.
Args:
predictions: a numpy arrary in the shape of [batch_size, max_phrase_length]
references: a numpy array in the shape of [batch_size, 7, 10]
vocab: the vocabulary file.
Returns:
a scalar value for the corpus level bleu score.
"""
assert np.rank(predictions) == 2
assert predictions.shape[0] == references.shape[0]
batch_size = predictions.shape[0]
predictions = tf.make_ndarray(tf.make_tensor_proto(predictions)).tolist()
references = tf.make_ndarray(tf.make_tensor_proto(references)).tolist()
hypotheses_list = []
references_list = []
for index in range(batch_size):
h = predictions[index]
try:
eos_index = h.index(input_utils.EOS)
except ValueError:
eos_index = len(h)
hypotheses_list.append(h[:eos_index])
ref = references[index].decode().split('|')
ref_list = [r.strip().split(' ') for r in ref if r.strip()]
references_list.append(ref_list)
all_scores = collections.defaultdict(list)
for hypothesis, references in zip(hypotheses_list, references_list):
if vocab is not None and len(vocab):
# Skip PADDING, UNK, EOS, START (0-3).
hypothesis = [
vocab[word_id].numpy().decode()
for word_id in hypothesis
if word_id > 3
]
logging.info('hypothesis: %s', str(hypothesis))
logging.info('references: %s', str(references))
h_str = ' '.join(str(e) for e in hypothesis)
r_str = [' '.join(str(e) for e in ref) for ref in references]
scores = widget_caption_eval.coco_evaluate(r_str, h_str)
for key, score in scores.items():
all_scores[key].append(score)
score_names = [
'BLEU-1', 'BLEU-2', 'BLEU-3', 'BLEU-4', 'ROUGE-1-f1-mean',
'ROUGE-1-f1-min', 'ROUGE-1-f1-max', 'ROUGE-2-f1-mean', 'ROUGE-2-f1-min',
'ROUGE-2-f1-max', 'ROUGE-L-f1-mean', 'ROUGE-L-f1-min', 'ROUGE-L-f1-max'
]
return [np.array(all_scores[name], dtype=np.float32) for name in score_names]
class EmbeddingLayer(tf.keras.layers.Layer):
"""Embedding layer."""
def __init__(self,
name,
vocab_size,
embedding_dim,
embedding_file=None,
hidden_dim=None,
trainable=True):
super(EmbeddingLayer, self).__init__(name=name)
self._vocab_size = vocab_size
self._hidden_dim = hidden_dim
self._embedding_dim = embedding_dim
self._embedding_file = embedding_file
self._trainable = trainable
def build(self, input_shape):
if self._embedding_file:
logging.info('Load embedding file for %s of vocab size %s: %s',
self._name, self._vocab_size, self._embedding_file)
_, embedding_weights, depth = load_embed(
file_name=self._embedding_file, vocab_size=self._vocab_size)
self._embedding_dim = depth
initializer = tf.constant_initializer(
embedding_weights[:self._vocab_size, :])
else:
logging.info('Create random embedding matrix for %s of size %s',
self._name, self._vocab_size)
initializer = tf.keras.initializers.RandomNormal(
mean=0.0, stddev=0.1, seed=None)
self.embeddings = self.add_weight(
name='{}_weights'.format(self._name),
shape=(self._vocab_size, self._embedding_dim),
initializer=initializer,
trainable=self._trainable,
dtype='float32')
if self._hidden_dim:
self._project_layer = tf.keras.layers.Dense(self._hidden_dim)
def call(self, inputs):
embeddings = tf.nn.embedding_lookup(self.embeddings, inputs)
if self._hidden_dim:
embeddings = self._project_layer(embeddings)
return embeddings
class PixelEncoderLayer(tf.keras.layers.Layer):
"""Pixel encoding layer (ResNet)."""
def __init__(self, name, filters, kernel_sizes):
super(PixelEncoderLayer, self).__init__(name=name)
self._filters = filters
self._kernel_sizes = kernel_sizes
def build(self, input_shape):
self._conv_layer_1 = tf.keras.layers.Conv2D(
filters=self._filters[0],
kernel_size=self._kernel_sizes[0],
strides=1,
padding='same')
self._conv_layer_2 = tf.keras.layers.Conv2D(
filters=self._filters[1],
kernel_size=self._kernel_sizes[1],
strides=1,
padding='same')
self._conv_layer_3 = tf.keras.layers.Conv2D(
filters=self._filters[2],
kernel_size=self._kernel_sizes[2],
strides=2,
padding='same')
self._batch_norm_layer_1 = tf.keras.layers.BatchNormalization()
self._batch_norm_layer_2 = tf.keras.layers.BatchNormalization()
self._batch_norm_layer_3 = tf.keras.layers.BatchNormalization()
def call(self, input_tensor, training, dropout=0.0):
"""Defines a single encoding layer."""
x = input_tensor
skip = x
x = self._conv_layer_1(x)
x = self._batch_norm_layer_1(x, training=training)
x = tf.nn.relu(x)
if training:
x = tf.nn.dropout(x, rate=dropout)
x = self._conv_layer_2(x)
x = self._batch_norm_layer_2(x, training=training)
x += skip
x = tf.nn.relu(x)
if training:
x = tf.nn.dropout(x, rate=dropout)
x = self._conv_layer_3(x)
x = self._batch_norm_layer_3(x, training=training)
x = tf.nn.relu(x)
if training:
x = tf.nn.dropout(x, rate=dropout)
return x
class EncoderLayer(tf.keras.layers.Layer):
"""Generates encoder outputs for both the pixels and view hierarchy."""
def __init__(self, hparams, word_embedding_layer):
super(EncoderLayer, self).__init__(name='dual_encoder')
self._hparams = hparams
self._word_embedding_layer = word_embedding_layer
def build(self, input_shape):
self._type_embedding_layer = EmbeddingLayer(
name='object_type',
vocab_size=100,
embedding_dim=self._hparams['hidden_size'])
self._clickable_embedding_layer = EmbeddingLayer(
name='object_clickable',
vocab_size=2,
embedding_dim=self._hparams['hidden_size'])
self._pos_embedding_layers = [
EmbeddingLayer(
name='object_pos_0',
vocab_size=self._hparams['max_pixel_pos'],
embedding_dim=self._hparams['hidden_size']),
EmbeddingLayer(
name='object_pos_1',
vocab_size=self._hparams['max_pixel_pos'],
embedding_dim=self._hparams['hidden_size']),
EmbeddingLayer(
name='object_pos_2',
vocab_size=self._hparams['max_pixel_pos'],
embedding_dim=self._hparams['hidden_size']),
EmbeddingLayer(
name='object_pos_3',
vocab_size=self._hparams['max_pixel_pos'],
embedding_dim=self._hparams['hidden_size'],
)
]
self._dom_embedding_layers = [
EmbeddingLayer(
name='object_dom_pos_0',
vocab_size=self._hparams['max_dom_pos'],
embedding_dim=self._hparams['hidden_size']),
EmbeddingLayer(
name='object_dom_pos_1',
vocab_size=self._hparams['max_dom_pos'],
embedding_dim=self._hparams['hidden_size']),
EmbeddingLayer(
name='object_dom_pos_2',
vocab_size=self._hparams['max_dom_pos'],
embedding_dim=self._hparams['hidden_size'])
]
self._final_layer = tf.keras.layers.Dense(
self._hparams['hidden_size'], activation=None)
self._vh_final_layer = tf.keras.layers.Dense(
self._hparams['hidden_size'], activation=tf.nn.tanh)
self._pixel_layers = self._get_encoder3(initial_channel_size=1)
self._transformer_encoder = nlp_transformer.EncoderStack(self._hparams)
def call(self, features, object_selector, training):
# Compute encoding
with tf.name_scope('encoder'):
pixel_encoding = self._encode_pixel(features, object_selector, training)
vh_encoding, obj_embedding = self._encode_view_hierarchy(
features, object_selector, training)
logging.info('Screen encoder: %s', self._hparams['screen_encoder'])
if self._hparams['screen_encoder'] == 'pixel_only':
combined_output = pixel_encoding
elif self._hparams['screen_encoder'] == 'pixel_transformer':
combined_output = tf.concat([pixel_encoding, vh_encoding], -1)
elif self._hparams['screen_encoder'] == 'pixel_mlp':
combined_output = tf.concat([pixel_encoding, obj_embedding], -1)
else:
raise ValueError
# [valid_obj, hidden_size]
logits = self._final_layer(combined_output)
logits = tf.nn.relu(logits)
if training:
logits = tf.nn.dropout(logits, rate=self._hparams['dropout'])
# Add the length dimension.
logits = tf.expand_dims(logits, 1)
return logits
def _encode_pixel(self, features, object_selector, training):
# Flatten object pixels.
obj_pixels = tf.reshape(features['obj_pixels'], [-1, 64, 64, 1])
# Otherwise, we just encode worker nodes' pixels.
valid_obj_pixels = tf.gather(obj_pixels, object_selector)
thumbnail_encoding = valid_obj_pixels
for layer in self._pixel_layers:
thumbnail_encoding = layer(
thumbnail_encoding,
training=training,
dropout=self._hparams['dropout'])
# [worker_node, 256]
thumbnail_encoding = tf.reshape(thumbnail_encoding, [-1, 256])
return thumbnail_encoding
def _get_encoder3(self, initial_channel_size=3):
"""Defines the encoding model with a pre-defined filter/kernel sizes."""
pixel_layers = []
filter_groups = [[initial_channel_size, initial_channel_size, 4],
[4, 4, 16], [16, 16, 32], [32, 32, 64], [64, 64, 128],
[128, 128, 256]]
kernel_size_groups = [[5, 3, 5], [5, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3],
[3, 3, 3]]
for index, (filters, kernel_sizes) in enumerate(
zip(filter_groups, kernel_size_groups)):
assert len(filters) == len(kernel_sizes)
name = 'pixel_encoder_{}'.format(index)
layer = PixelEncoderLayer(name, filters, kernel_sizes)
pixel_layers.append(layer)
return pixel_layers
def _embed_composite_feature(self, features, embedding_layers):
"""Embed a position feature."""
embedding_list = []
for i in range(len(embedding_layers)):
embedding_list.append(embedding_layers[i](features[:, :, i]))
embedding = tf.add_n(embedding_list)
return embedding
def _encode_view_hierarchy(self, features, object_selector, training):
"""Encodes view hierarchy."""
logging.info('Using Transformer screen encoder')
# obj_text only contain the first phrase if multiple exist.
# [batch, node_num, 10, hidden_size]
developer_embeddings = self._word_embedding_layer(
features['developer_token_id'])
resource_embeddings = self._word_embedding_layer(
features['resource_token_id'])
developer_embeddings = self._aggregate_text_embedding(
features['developer_token_id'], developer_embeddings)
resource_embeddings = self._aggregate_text_embedding(
features['resource_token_id'], resource_embeddings)
type_embedding = self._type_embedding_layer(
tf.maximum(features['obj_type'], 0))
clickable_embedding = self._clickable_embedding_layer(
features['obj_clickable'])
object_info = []
if 'text' in self._hparams['screen_embedding_feature']:
object_info.append(developer_embeddings)
object_info.append(resource_embeddings)
if 'type' in self._hparams['screen_embedding_feature']:
object_info.append(type_embedding)
if 'pos' in self._hparams['screen_embedding_feature']:
pos_embedding = self._embed_composite_feature(features['obj_screen_pos'],
self._pos_embedding_layers)
object_info.append(pos_embedding)
if 'click' in self._hparams['screen_embedding_feature']:
object_info.append(clickable_embedding)
if 'dom' in self._hparams['screen_embedding_feature']:
dom_embedding = self._embed_composite_feature(features['obj_dom_pos'],
self._dom_embedding_layers)
object_info.append(dom_embedding)
object_embed = tf.concat(object_info, -1)
object_embed = self._vh_final_layer(object_embed)
# [batch, obj_num]
object_mask = tf.cast(tf.not_equal(features['obj_type'], -1), tf.float32)
# [batch, obj_num, hidden_dim]
object_embed = object_embed * tf.expand_dims(object_mask, -1)
att_bias = model_utils.get_padding_bias(object_mask)
if training:
object_embed = tf.nn.dropout(object_embed, rate=self._hparams['dropout'])
encoder_output = self._transformer_encoder(
object_embed,
attention_bias=att_bias,
inputs_padding=None, # not used in EncoderStack.
training=training)
object_embed = tf.reshape(object_embed, [-1, self._hparams['hidden_size']])
encoder_output = tf.reshape(encoder_output,
[-1, self._hparams['hidden_size']])
valid_object_embed = tf.gather(object_embed, object_selector)
valid_screen_encoding = tf.gather(encoder_output, object_selector)
return valid_screen_encoding, valid_object_embed
def _aggregate_text_embedding(self, token_ids, embeddings):
"""Aggregate text embedding for a UI element."""
if self._hparams['obj_text_aggregation'] == 'max':
# Find valid tokens (not PADDING/EOS/UNK/START).
valid_token_mask = tf.greater_equal(token_ids, 4)
# Use large negative bias for invalid tokens.
invalid_token_bias = tf.cast(
tf.logical_not(valid_token_mask), tf.float32) * -1e9
# [batch, node_num, word_num, hidden_size]
embeddings = embeddings + tf.expand_dims(invalid_token_bias, axis=-1)
# Max value for each dimension, [batch, node_num, hidden_size].
embeddings = tf.reduce_max(embeddings, axis=-2)
# For objects with no text, use 0.
valid_object_mask = tf.cast(
tf.reduce_any(valid_token_mask, axis=-1), tf.float32)
embeddings = embeddings * tf.expand_dims(valid_object_mask, axis=-1)
elif self._hparams['obj_text_aggregation'] == 'sum':
# [batch, step, #max_obj, #max_token] 0 for padded tokens
real_objects = tf.cast(tf.greater_equal(token_ids, 4), tf.float32)
# [batch, step, #max_obj, hidden] 0s for padded objects
embeddings = tf.reduce_sum(
input_tensor=embeddings * tf.expand_dims(real_objects, 3), axis=-2)
else:
raise ValueError('Unrecognized token aggregation %s' %
(self._hparams['obj_text_aggregation']))
return embeddings
class DecoderLayer(tf.keras.layers.Layer):
"""Captioning decoder layer."""
def __init__(self, hparams, word_embedding_layer, position_embedding_layer):
super(DecoderLayer, self).__init__(name='decoder')
self._hparams = hparams
self._word_embedding_layer = word_embedding_layer
self._position_embedding_layer = position_embedding_layer
def build(self, inputs):
self._transformer_decoder = nlp_transformer.DecoderStack(self._hparams)
def call(self,
decoder_inputs,
encoder_outputs,
decoder_self_attention_bias,
attention_bias,
training,
cache=None):
"""Return the output of the decoder layer stacks.
Args:
decoder_inputs: A tensor with shape [batch_size, target_length,
hidden_size].
encoder_outputs: A tensor with shape [batch_size, input_length,
hidden_size]
decoder_self_attention_bias: A tensor with shape [1, 1, target_len,
target_length], the bias for decoder self-attention layer.
attention_bias: A tensor with shape [batch_size, 1, 1, input_length], the
bias for encoder-decoder attention layer.
training: A bool, whether in training mode or not.
cache: (Used for fast decoding) A nested dictionary storing previous
decoder self-attention values. The items are:
{layer_n: {"k": A tensor with shape [batch_size, i, key_channels],
"v": A tensor with shape [batch_size, i, value_channels]},
...}
Returns:
Output of decoder layer stack.
float32 tensor with shape [batch_size, target_length, hidden_size]
"""
# Run values
outputs = self._transformer_decoder(
decoder_inputs,
encoder_outputs,
decoder_self_attention_bias,
attention_bias,
training=training,
cache=cache)
return outputs
class WidgetCaptionModel(tf.keras.Model):
"""Widget Captioning Model."""
_SCORE_NAMES = [
'BLEU-1', 'BLEU-2', 'BLEU-3', 'BLEU-4', 'ROUGE-1-f1-mean',
'ROUGE-1-f1-min', 'ROUGE-1-f1-max', 'ROUGE-2-f1-mean', 'ROUGE-2-f1-min',
'ROUGE-2-f1-max', 'ROUGE-L-f1-mean', 'ROUGE-L-f1-min', 'ROUGE-L-f1-max'
]
# 10 words + EOS symbol.
_MAX_DECODE_LENGTH = 11
def __init__(self, hparams):
super(WidgetCaptionModel, self).__init__()
self._hparams = hparams
with tf.name_scope('captioning'):
self._word_embedding_layer = EmbeddingLayer(
name='word',
hidden_dim=self._hparams['hidden_size'],
embedding_file=self._hparams['embedding_file'],
vocab_size=self._hparams['vocab_size'],
embedding_dim=self._hparams['hidden_size'], # not used
trainable=self._hparams['glove_trainable'])
self._position_embedding_layer = layers.RelativePositionEmbedding(
hidden_size=self._hparams['hidden_size'])
self._encoder = EncoderLayer(self._hparams, self._word_embedding_layer)
self._decoder = DecoderLayer(self._hparams, self._word_embedding_layer,
self._position_embedding_layer)
self._word_layer = tf.keras.layers.Dense(
units=self._hparams['vocab_size'])
self.model_metrics = {
'loss': tf.keras.metrics.Mean(name='loss'),
'global_norm': tf.keras.metrics.Mean(name='global_norm'),
}
self.caption_metrics = {}
for score_name in self._SCORE_NAMES:
scoped_name = 'COCO/{}'.format(score_name)
self.caption_metrics[scoped_name] = tf.keras.metrics.Mean(
name=scoped_name)
self._word_vocab = []
with tf.io.gfile.GFile(self._hparams['word_vocab_path']) as f:
for index, line in enumerate(f):
if index >= self._hparams['vocab_size']:
break
self._word_vocab.append(line.strip())
def call(self, inputs, training):
features, targets = inputs
object_selector = self._caption_object_selector(features)
encoder_outputs = self._encoder(features, object_selector, training)
if self._hparams['decoding_task']:
if targets is None:
return self.predict(encoder_outputs, training)
else:
return self.decode(targets, encoder_outputs, training)
def _caption_object_selector(self, features):
worker = tf.reshape(tf.equal(features['label_flag'], 0), [-1])
# [worker_node] indices into [BxN] vector for valid worker node.
worker_position = tf.reshape(tf.where(worker), [-1])
return worker_position
def _caption_loss(self, targets, logits):
per_example_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=targets, logits=logits)
# Create non-padding mask and only compute loss for non-padding positions.
non_padding = tf.greater(targets, input_utils.PADDING)
mask = tf.cast(non_padding, tf.float32)
per_example_loss = per_example_loss * mask
avg_loss = tf.reduce_sum(per_example_loss) / tf.reduce_sum(mask)
avg_loss = tf.cond(tf.math.is_nan(avg_loss), lambda: 0.0, lambda: avg_loss)
return avg_loss
def train_step(self, data):
targets, _ = self.compute_targets(data)
with tf.GradientTape() as tape:
logits = self([data, targets], training=True)
if self._hparams['decoding_task']:
avg_loss = self._caption_loss(targets, logits)
trainable_vars = self.trainable_variables
gradients = tape.gradient(avg_loss, trainable_vars)
gradients, global_norm = tf.clip_by_global_norm(
gradients, self._hparams['clip_norm'])
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
self.model_metrics['loss'].update_state(avg_loss)
self.model_metrics['global_norm'].update_state(global_norm)
train_metrics = ['loss', 'global_norm']
return {m: self.model_metrics[m].result() for m in train_metrics}
def test_step(self, data):
targets, references = self.compute_targets(data)
logits = self([data, targets], training=False)
avg_loss = self._caption_loss(targets, logits)
decoded = self([data, None], training=False)
self.compute_caption_metrics(decoded, references)
self.model_metrics['loss'].update_state(avg_loss)
return {m.name: m.result() for m in self.model_metrics.values()}
def compute_caption_metrics(self, predictions, references):
"""Computes the eval metrics for decoding."""
py_types = [tf.float32] * len(self._SCORE_NAMES)
scores = tf.py_function(compute_score,
(predictions, references, self._word_vocab),
py_types)
for name, score in zip(self._SCORE_NAMES, scores):
scoped_name = 'COCO/{}'.format(name)
self.caption_metrics[scoped_name].update_state(score)
self.model_metrics[scoped_name] = self.caption_metrics[scoped_name]
def decode(self, targets, encoder_outputs, training):
"""Generate logits for each value in the target sequence.
Args:
targets: target values for the output sequence. int tensor with shape
[batch_size, target_length]
encoder_outputs: continuous representation of input sequence. float tensor
with shape [batch_size, input_length, hidden_size]
training: boolean, whether in training mode or not.
Returns:
float32 tensor with shape [batch_size, target_length, vocab_size]
"""
with tf.name_scope('decode'):
length = tf.shape(targets)[1]
decoder_self_attention_bias = model_utils.get_decoder_self_attention_bias(
length)
encoder_shape = tf.shape(encoder_outputs)
# [batch, 1] as there is only one object as input for decoding.
mask = tf.ones([encoder_shape[0], encoder_shape[1]])
# In mask, 1 = valid object, 0 = padding, attn_bias will have -NEG_INF for
# paddings and 0 for valid objects.
attention_bias = model_utils.get_padding_bias(mask)
# Prepare inputs to decoder layers by shifting targets, adding positional
# encoding and applying dropout.
targets = tf.pad(
targets, [[0, 0], [1, 0]], constant_values=input_utils.START)
# Remove last element.
targets = targets[:, :-1]
decoder_inputs = self._word_embedding_layer(targets)
# No need to shift, use START above to shift.
# with tf.name_scope('shift_targets'):
# # Shift targets to the right, and remove the last element
# decoder_inputs = tf.pad(decoder_inputs,
# [[0, 0], [1, 0], [0, 0]])[:, :-1, :]
with tf.name_scope('add_pos_encoding'):
pos_encoding = self._position_embedding_layer(decoder_inputs)
decoder_inputs += pos_encoding
if training:
decoder_inputs = tf.nn.dropout(
decoder_inputs, rate=self._hparams['layer_postprocess_dropout'])
decoder_outputs = self._decoder(
decoder_inputs,
encoder_outputs,
decoder_self_attention_bias,
attention_bias,
training=training)
logits = self._word_layer(decoder_outputs)
return logits
def predict(self, encoder_outputs, training):
"""Return predicted sequence."""
batch_size = tf.shape(encoder_outputs)[0]
# input_length = tf.shape(encoder_outputs)[1]
# 10 words + EOS symbol
symbols_to_logits_fn = self._get_symbols_to_logits_fn(
max_decode_length=self._MAX_DECODE_LENGTH, training=training)
# Create initial set of IDs that will be passed into symbols_to_logits_fn.
initial_ids = tf.ones([batch_size], dtype=tf.int32) * input_utils.START
# Create cache storing decoder attention values for each layer.
# pylint: disable=g-complex-comprehension
init_decode_length = 0
num_heads = self._hparams['num_heads']
dim_per_head = self._hparams['hidden_size'] // num_heads
cache = {
'layer_%d' % layer: {
'k':
tf.zeros(
[batch_size, init_decode_length, num_heads, dim_per_head]),
'v':
tf.zeros(
[batch_size, init_decode_length, num_heads, dim_per_head])
} for layer in range(self._hparams['num_hidden_layers'])
}
# pylint: enable=g-complex-comprehension
# Add encoder output and attention bias to the cache.
encoder_shape = tf.shape(encoder_outputs)
# [batch, 1] as there is only one object as input for decoding.
mask = tf.ones([encoder_shape[0], encoder_shape[1]])
# In mask, 1 = valid object, 0 = padding, attn_bias will have -NEG_INF for
# paddings and 0 for valid objects.
attention_bias = model_utils.get_padding_bias(mask)
cache['encoder_outputs'] = encoder_outputs
cache['encoder_decoder_attention_bias'] = attention_bias
# Use beam search to find the top beam_size sequences and scores.
decoded_ids, _ = ops.beam_search.sequence_beam_search(
symbols_to_logits_fn=symbols_to_logits_fn,
initial_ids=initial_ids,
initial_cache=cache,
vocab_size=self._hparams['vocab_size'],
beam_size=self._hparams['beam_size'],
alpha=1,
max_decode_length=self._MAX_DECODE_LENGTH,
eos_id=input_utils.EOS)
# Get the top sequence for each batch element and remove START symbol.
top_decoded_ids = decoded_ids[:, 0, 1:]
# top_scores = scores[:, 0]
return top_decoded_ids
def compute_targets(self, features):
"""Compute the target token ids and phrase ids."""
batch_size = tf.shape(features['label_flag'])[0]
num_objects = tf.shape(features['label_flag'])[1]
worker_position = self._caption_object_selector(features)
# [worker_node, 1]: retrieve the reference captions.
valid_references = tf.gather(
tf.reshape(features['reference'], [-1]), worker_position)
# [worker_node, seq_len]: retrieve reference phrases.
target_phrase = features['caption_token_id']
target_phrase = tf.reshape(
target_phrase, [batch_size * num_objects, self._MAX_DECODE_LENGTH])
valid_target_phrase = tf.gather(target_phrase, worker_position)
return valid_target_phrase, valid_references
def _get_symbols_to_logits_fn(self, max_decode_length, training):
"""Returns a decoding function that calculates logits of the next tokens."""
timing_signal = self._position_embedding_layer(
inputs=None, length=max_decode_length)
decoder_self_attention_bias = model_utils.get_decoder_self_attention_bias(
max_decode_length)
def symbols_to_logits_fn(ids, i, cache):
"""Generate logits for next potential IDs.
Args:
ids: Current decoded sequences. int tensor with shape [batch_size *
beam_size, i + 1].
i: Loop index.
cache: dictionary of values storing the encoder output, encoder-decoder
attention bias, and previous decoder attention values.
Returns:
Tuple of
(logits with shape [batch_size * beam_size, vocab_size],
updated cache values)
"""
# Set decoder input to the last generated IDs. The previous ids attention
# key/value are already stored in the cache.
decoder_input = ids[:, -1:]
# Preprocess decoder input by getting embeddings and adding timing signal.
decoder_input = self._word_embedding_layer(decoder_input)
decoder_input += timing_signal[i:i + 1]
self_attention_bias = decoder_self_attention_bias[:, :, i:i + 1, :i + 1]
decoder_outputs = self._decoder(
decoder_input,
cache.get('encoder_outputs'),
self_attention_bias,
cache.get('encoder_decoder_attention_bias'),
training=training,
cache=cache)
# Only use the last decoded state.
decoder_outputs = decoder_outputs[:, -1, :]
logits = self._word_layer(decoder_outputs)
return logits, cache
return symbols_to_logits_fn
class TensorBoardCallBack(tf.keras.callbacks.TensorBoard):
"""Learning rate log callback."""
def on_train_batch_begin(self, batch, logs=None):
super(TensorBoardCallBack, self).on_train_batch_begin(batch, logs)
try:
lr = self.model.optimizer.learning_rate(batch)
except TypeError:
lr = self.model.optimizer.learning_rate
if batch % 100 == 0:
try:
with self.writer.as_default():
tf.summary.scalar('learning rate', tensor=lr)
self.writer.flush()
except AttributeError:
logging.info('TensorBoard not init yet')
def init_resnet(hparams, model):
"""Init resnet weights from a TF model if provided."""
if not hparams['widget_encoder_checkpoint']:
return
reader = tf.train.load_checkpoint(hparams['widget_encoder_checkpoint'])
# Initialize model weights.
init_set = input_utils.input_fn(
hparams['train_files'],
1,
hparams['vocab_size'],
hparams['max_pixel_pos'],
hparams['max_dom_pos'],
epoches=1,
buffer_size=1)
init_features = next(iter(init_set))
init_target = model.compute_targets(init_features)
model([init_features, init_target[0]], training=True)
weight_value_tuples = []
for layer in model._encoder._pixel_layers: # pylint: disable=protected-access
for param in layer.weights:
if 'batch_normalization' in param.name:
continue
sublayer, varname = param.name.replace(':0', '').split('/')[-2:]
var_name = 'encoder/{}/{}'.format(sublayer, varname)
if reader.has_tensor(var_name):
logging.info('Found pretrained weights: %s %s, %s %s', param.name,
param.shape, var_name,
reader.get_tensor(var_name).shape)
weight_value_tuples.append((param, reader.get_tensor(var_name)))
logging.info('Load pretrained %s weights', len(weight_value_tuples))
tf.keras.backend.batch_set_value(weight_value_tuples)
def main(argv=None):
del argv
hparams = create_hparams(FLAGS.experiment)
if hparams['distribution_strategy'] == 'multi_worker_mirrored':
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
elif hparams['distribution_strategy'] == 'mirrored':
strategy = tf.distribute.MirroredStrategy()
else:
raise ValueError('Only `multi_worker_mirrored` is supported strategy '
'in Keras MNIST example at this time. Strategy passed '
'in is %s' % hparams['distribution_strategy'])
# Build the train and eval datasets from the MNIST data.
train_set = input_utils.input_fn(
hparams['train_files'],
hparams['batch_size'],
hparams['vocab_size'],
hparams['max_pixel_pos'],
hparams['max_dom_pos'],
epoches=1,
buffer_size=hparams['train_buffer_size'])
dev_set = input_utils.input_fn(
hparams['eval_files'],
hparams['eval_batch_size'],
hparams['vocab_size'],
hparams['max_pixel_pos'],
hparams['max_dom_pos'],
epoches=100,
buffer_size=hparams['eval_buffer_size'])
# Create and compile the model under Distribution strategy scope.
# `fit`, `evaluate` and `predict` will be distributed based on the strategy
# model was compiled with.
with strategy.scope():
model = WidgetCaptionModel(hparams)
lr_schedule = optimizer.LearningRateSchedule(
hparams['learning_rate_constant'], hparams['hidden_size'],
hparams['learning_rate_warmup_steps'])
opt = tf.keras.optimizers.Adam(
lr_schedule,
hparams['optimizer_adam_beta1'],
hparams['optimizer_adam_beta2'],
epsilon=hparams['optimizer_adam_epsilon'])
model.compile(optimizer=opt)
init_resnet(hparams, model)
callbacks = [tf.keras.callbacks.TerminateOnNaN()]
if FLAGS.model_dir:
ckpt_filepath = os.path.join(FLAGS.model_dir, 'saved/{epoch:04d}')
backup_dir = os.path.join(FLAGS.model_dir, 'backup')
tensorboard_callback = TensorBoardCallBack(log_dir=FLAGS.model_dir)
callbacks.append(tensorboard_callback)
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=ckpt_filepath, save_weights_only=True)
callbacks.append(model_checkpoint_callback)
if tf.executing_eagerly():
callbacks.append(
tf.keras.callbacks.experimental.BackupAndRestore(
backup_dir=backup_dir))
# Train the model with the train dataset.
history = model.fit(
x=train_set,
epochs=hparams['train_epoches'],
validation_data=dev_set,
validation_steps=10,
callbacks=callbacks)
logging.info('Training ends successfully. `model.fit()` result: %s',
history.history)
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
app.run(main)
| 37.465567 | 88 | 0.682502 | [
"Apache-2.0"
] | AI21212019/google-research | widget_caption/widget_caption_model.py | 38,627 | Python |
from django.apps import AppConfig
class AppNameConfig(AppConfig):
name = "backend"
verbose_name = "ToDo List"
| 17.142857 | 33 | 0.725 | [
"Apache-2.0"
] | AlekseyLobanov/django-todo | backend/backend/apps.py | 120 | Python |
import models
import serializers
from rest_framework import viewsets, permissions
class apiViewSet(viewsets.ModelViewSet):
"""ViewSet for the api class"""
queryset = models.api.objects.all()
serializer_class = serializers.apiSerializer
permission_classes = [permissions.IsAuthenticated]
| 22 | 54 | 0.775974 | [
"Apache-2.0"
] | mainecivichackday/invman | api_invman/api.py | 308 | Python |
from urllib.parse import quote
from django import template
register = template.Library()
@register.simple_tag
def pie_chart(items, width=440, height=190):
return '//chart.googleapis.com/chart?cht=p3&chd=t:{0}&chs={1}x{2}&chl={3}'.format(
quote(','.join([str(item[1]) for item in items])),
width,
height,
quote('|'.join([str(item[0]) for item in items])),
)
| 25.0625 | 86 | 0.630923 | [
"BSD-2-Clause"
] | Django-AI/Django-Request | request/templatetags/request_admin.py | 401 | Python |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tensor_util tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
class DeprecationTest(tf.test.TestCase):
def _assert_subset(self, expected_subset, actual_set):
self.assertTrue(
actual_set.issuperset(expected_subset),
msg="%s is not a superset of %s." % (actual_set, expected_subset))
def test_deprecated_illegal_args(self):
instructions = "This is how you update..."
with self.assertRaisesRegexp(ValueError, "date"):
deprecation.deprecated(None, instructions)
with self.assertRaisesRegexp(ValueError, "date"):
deprecation.deprecated("", instructions)
with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"):
deprecation.deprecated("07-04-2016", instructions)
date = "2016-07-04"
with self.assertRaisesRegexp(ValueError, "instructions"):
deprecation.deprecated(date, None)
with self.assertRaisesRegexp(ValueError, "instructions"):
deprecation.deprecated(date, "")
@tf.test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_with_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated(date, instructions)
def _fn(arg0, arg1):
"""fn doc.
Args:
arg0: Arg 0.
arg1: Arg 1.
Returns:
Sum of args.
"""
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"fn doc. (deprecated)"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:\n%s"
"\n"
"\n Args:"
"\n arg0: Arg 0."
"\n arg1: Arg 1."
"\n"
"\n Returns:"
"\n Sum of args."
"\n " % (date, instructions),
_fn.__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions]), set(args[1:]))
@tf.test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_with_one_line_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated(date, instructions)
def _fn(arg0, arg1):
"""fn doc."""
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"fn doc. (deprecated)"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:\n%s" % (date, instructions),
_fn.__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions]), set(args[1:]))
@tf.test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_no_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated(date, instructions)
def _fn(arg0, arg1):
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"DEPRECATED FUNCTION"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:"
"\n%s" % (date, instructions),
_fn.__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions]), set(args[1:]))
@tf.test.mock.patch.object(logging, "warning", autospec=True)
def test_instance_fn_with_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
class _Object(object):
def __init(self):
pass
@deprecation.deprecated(date, instructions)
def _fn(self, arg0, arg1):
"""fn doc.
Args:
arg0: Arg 0.
arg1: Arg 1.
Returns:
Sum of args.
"""
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual(
"fn doc. (deprecated)"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:\n%s"
"\n"
"\n Args:"
"\n arg0: Arg 0."
"\n arg1: Arg 1."
"\n"
"\n Returns:"
"\n Sum of args."
"\n " % (date, instructions),
getattr(_Object, "_fn").__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _Object()._fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions]), set(args[1:]))
@tf.test.mock.patch.object(logging, "warning", autospec=True)
def test_instance_fn_with_one_line_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
class _Object(object):
def __init(self):
pass
@deprecation.deprecated(date, instructions)
def _fn(self, arg0, arg1):
"""fn doc."""
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual(
"fn doc. (deprecated)"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:\n%s" % (date, instructions),
getattr(_Object, "_fn").__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _Object()._fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions]), set(args[1:]))
@tf.test.mock.patch.object(logging, "warning", autospec=True)
def test_instance_fn_no_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
class _Object(object):
def __init(self):
pass
@deprecation.deprecated(date, instructions)
def _fn(self, arg0, arg1):
return arg0 + arg1
# Assert function docs are properly updated.
self.assertEqual(
"DEPRECATED FUNCTION"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:"
"\n%s" % (date, instructions),
getattr(_Object, "_fn").__doc__)
# Assert calling new fn issues log warning.
self.assertEqual(3, _Object()._fn(1, 2))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions]), set(args[1:]))
def test_prop_wrong_order(self):
with self.assertRaisesRegexp(
ValueError,
"make sure @property appears before @deprecated in your source code"):
# pylint: disable=unused-variable
class _Object(object):
def __init(self):
pass
@deprecation.deprecated("2016-07-04", "Instructions.")
@property
def _prop(self):
return "prop_wrong_order"
@tf.test.mock.patch.object(logging, "warning", autospec=True)
def test_prop_with_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
class _Object(object):
def __init(self):
pass
@property
@deprecation.deprecated(date, instructions)
def _prop(self):
"""prop doc.
Returns:
String.
"""
return "prop_with_doc"
# Assert function docs are properly updated.
self.assertEqual(
"prop doc. (deprecated)"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:"
"\n%s"
"\n"
"\n Returns:"
"\n String."
"\n " % (date, instructions),
getattr(_Object, "_prop").__doc__)
# Assert calling new fn issues log warning.
self.assertEqual("prop_with_doc", _Object()._prop)
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions]), set(args[1:]))
@tf.test.mock.patch.object(logging, "warning", autospec=True)
def test_prop_no_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
class _Object(object):
def __init(self):
pass
@property
@deprecation.deprecated(date, instructions)
def _prop(self):
return "prop_no_doc"
# Assert function docs are properly updated.
self.assertEqual(
"DEPRECATED FUNCTION"
"\n"
"\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s."
"\nInstructions for updating:"
"\n%s" % (date, instructions),
getattr(_Object, "_prop").__doc__)
# Assert calling new fn issues log warning.
self.assertEqual("prop_no_doc", _Object()._prop)
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions]), set(args[1:]))
class DeprecatedArgsTest(tf.test.TestCase):
def _assert_subset(self, expected_subset, actual_set):
self.assertTrue(
actual_set.issuperset(expected_subset),
msg="%s is not a superset of %s." % (actual_set, expected_subset))
def test_deprecated_illegal_args(self):
instructions = "This is how you update..."
date = "2016-07-04"
with self.assertRaisesRegexp(ValueError, "date"):
deprecation.deprecated_args(None, instructions, "deprecated")
with self.assertRaisesRegexp(ValueError, "date"):
deprecation.deprecated_args("", instructions, "deprecated")
with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"):
deprecation.deprecated_args("07-04-2016", instructions, "deprecated")
with self.assertRaisesRegexp(ValueError, "instructions"):
deprecation.deprecated_args(date, None, "deprecated")
with self.assertRaisesRegexp(ValueError, "instructions"):
deprecation.deprecated_args(date, "", "deprecated")
with self.assertRaisesRegexp(ValueError, "argument"):
deprecation.deprecated_args(date, instructions)
def test_deprecated_missing_args(self):
date = "2016-07-04"
instructions = "This is how you update..."
def _fn(arg0, arg1, deprecated=None):
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert calls without the deprecated argument log nothing.
with self.assertRaisesRegexp(ValueError, "not present.*\\['missing'\\]"):
deprecation.deprecated_args(date, instructions, "missing")(_fn)
@tf.test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_with_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(date, instructions, "deprecated")
def _fn(arg0, arg1, deprecated=True):
"""fn doc.
Args:
arg0: Arg 0.
arg1: Arg 1.
deprecated: Deprecated!
Returns:
Sum of args.
"""
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"fn doc. (deprecated arguments)"
"\n"
"\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s."
"\nInstructions for updating:\n%s"
"\n"
"\n Args:"
"\n arg0: Arg 0."
"\n arg1: Arg 1."
"\n deprecated: Deprecated!"
"\n"
"\n Returns:"
"\n Sum of args."
"\n " % (date, instructions),
_fn.__doc__)
# Assert calls without the deprecated argument log nothing.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(0, mock_warning.call_count)
# Assert calls with the deprecated argument log a warning.
self.assertEqual(3, _fn(1, 2, True))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions]), set(args[1:]))
@tf.test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_with_one_line_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(date, instructions, "deprecated")
def _fn(arg0, arg1, deprecated=True):
"""fn doc."""
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"fn doc. (deprecated arguments)"
"\n"
"\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s."
"\nInstructions for updating:\n%s" % (date, instructions),
_fn.__doc__)
# Assert calls without the deprecated argument log nothing.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(0, mock_warning.call_count)
# Assert calls with the deprecated argument log a warning.
self.assertEqual(3, _fn(1, 2, True))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions]), set(args[1:]))
@tf.test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_no_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(date, instructions, "deprecated")
def _fn(arg0, arg1, deprecated=True):
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"DEPRECATED FUNCTION ARGUMENTS"
"\n"
"\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s."
"\nInstructions for updating:"
"\n%s" % (date, instructions),
_fn.__doc__)
# Assert calls without the deprecated argument log nothing.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(0, mock_warning.call_count)
# Assert calls with the deprecated argument log a warning.
self.assertEqual(3, _fn(1, 2, True))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions]), set(args[1:]))
@tf.test.mock.patch.object(logging, "warning", autospec=True)
def test_varargs(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(date, instructions, "deprecated")
def _fn(arg0, arg1, *deprecated):
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert calls without the deprecated argument log nothing.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(0, mock_warning.call_count)
# Assert calls with the deprecated argument log a warning.
self.assertEqual(3, _fn(1, 2, True, False))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions]), set(args[1:]))
@tf.test.mock.patch.object(logging, "warning", autospec=True)
def test_kwargs(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(date, instructions, "deprecated")
def _fn(arg0, arg1, **deprecated):
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert calls without the deprecated argument log nothing.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(0, mock_warning.call_count)
# Assert calls with the deprecated argument log a warning.
self.assertEqual(3, _fn(1, 2, a=True, b=False))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions]), set(args[1:]))
@tf.test.mock.patch.object(logging, "warning", autospec=True)
def test_positional_and_named(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_args(date, instructions, "d1", "d2")
def _fn(arg0, d1=None, arg1=2, d2=None):
return arg0 + arg1 if d1 else arg1 + arg0 if d2 else arg0 * arg1
# Assert calls without the deprecated arguments log nothing.
self.assertEqual(2, _fn(1, arg1=2))
self.assertEqual(0, mock_warning.call_count)
# Assert calls with the deprecated arguments log warnings.
self.assertEqual(2, _fn(1, None, 2, d2=False))
self.assertEqual(2, mock_warning.call_count)
(args1, _) = mock_warning.call_args_list[0]
self.assertRegexpMatches(args1[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions, "d1"]), set(args1[1:]))
(args2, _) = mock_warning.call_args_list[1]
self.assertRegexpMatches(args1[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions, "d2"]), set(args2[1:]))
class DeprecatedArgValuesTest(tf.test.TestCase):
def _assert_subset(self, expected_subset, actual_set):
self.assertTrue(
actual_set.issuperset(expected_subset),
msg="%s is not a superset of %s." % (actual_set, expected_subset))
def test_deprecated_illegal_args(self):
instructions = "This is how you update..."
with self.assertRaisesRegexp(ValueError, "date"):
deprecation.deprecated_arg_values(
None, instructions, deprecated=True)
with self.assertRaisesRegexp(ValueError, "date"):
deprecation.deprecated_arg_values(
"", instructions, deprecated=True)
with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"):
deprecation.deprecated_arg_values(
"07-04-2016", instructions, deprecated=True)
date = "2016-07-04"
with self.assertRaisesRegexp(ValueError, "instructions"):
deprecation.deprecated_arg_values(
date, None, deprecated=True)
with self.assertRaisesRegexp(ValueError, "instructions"):
deprecation.deprecated_arg_values(
date, "", deprecated=True)
with self.assertRaisesRegexp(ValueError, "argument", deprecated=True):
deprecation.deprecated_arg_values(
date, instructions)
@tf.test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_with_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_arg_values(date, instructions, deprecated=True)
def _fn(arg0, arg1, deprecated=True):
"""fn doc.
Args:
arg0: Arg 0.
arg1: Arg 1.
deprecated: Deprecated!
Returns:
Sum of args.
"""
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"fn doc. (deprecated arguments)"
"\n"
"\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s."
"\nInstructions for updating:\n%s"
"\n"
"\n Args:"
"\n arg0: Arg 0."
"\n arg1: Arg 1."
"\n deprecated: Deprecated!"
"\n"
"\n Returns:"
"\n Sum of args."
"\n " % (date, instructions),
_fn.__doc__)
# Assert calling new fn with non-deprecated value logs nothing.
self.assertEqual(3, _fn(1, 2, deprecated=False))
self.assertEqual(0, mock_warning.call_count)
# Assert calling new fn with deprecated value issues log warning.
self.assertEqual(3, _fn(1, 2, deprecated=True))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions]), set(args[1:]))
# Assert calling new fn with default deprecated value issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(2, mock_warning.call_count)
@tf.test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_with_one_line_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_arg_values(date, instructions, deprecated=True)
def _fn(arg0, arg1, deprecated=True):
"""fn doc."""
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"fn doc. (deprecated arguments)"
"\n"
"\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s."
"\nInstructions for updating:\n%s" % (date, instructions),
_fn.__doc__)
# Assert calling new fn with non-deprecated value logs nothing.
self.assertEqual(3, _fn(1, 2, deprecated=False))
self.assertEqual(0, mock_warning.call_count)
# Assert calling new fn with deprecated value issues log warning.
self.assertEqual(3, _fn(1, 2, deprecated=True))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions]), set(args[1:]))
# Assert calling new fn with default deprecated value issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(2, mock_warning.call_count)
@tf.test.mock.patch.object(logging, "warning", autospec=True)
def test_static_fn_no_doc(self, mock_warning):
date = "2016-07-04"
instructions = "This is how you update..."
@deprecation.deprecated_arg_values(date, instructions, deprecated=True)
def _fn(arg0, arg1, deprecated=True):
return arg0 + arg1 if deprecated else arg1 + arg0
# Assert function docs are properly updated.
self.assertEqual("_fn", _fn.__name__)
self.assertEqual(
"DEPRECATED FUNCTION ARGUMENTS"
"\n"
"\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s."
"\nInstructions for updating:"
"\n%s" % (date, instructions),
_fn.__doc__)
# Assert calling new fn with non-deprecated value logs nothing.
self.assertEqual(3, _fn(1, 2, deprecated=False))
self.assertEqual(0, mock_warning.call_count)
# Assert calling new fn issues log warning.
self.assertEqual(3, _fn(1, 2, deprecated=True))
self.assertEqual(1, mock_warning.call_count)
(args, _) = mock_warning.call_args
self.assertRegexpMatches(args[0], r"deprecated and will be removed after")
self._assert_subset(set([date, instructions]), set(args[1:]))
# Assert calling new fn with default deprecated value issues log warning.
self.assertEqual(3, _fn(1, 2))
self.assertEqual(2, mock_warning.call_count)
if __name__ == "__main__":
tf.test.main()
| 35.987032 | 80 | 0.66006 | [
"Apache-2.0"
] | AccelAI/tensorflow | tensorflow/python/util/deprecation_test.py | 24,975 | Python |
# # endpoints
import logging
from datetime import datetime
from dateutil.relativedelta import relativedelta
from django.contrib import messages
from django.contrib.auth.decorators import user_passes_test
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render, get_object_or_404
from django.utils.html import escape
from django.utils import timezone
from dojo.filters import EndpointFilter
from dojo.forms import EditEndpointForm, \
DeleteEndpointForm, AddEndpointForm, DojoMetaDataForm
from dojo.models import Product, Endpoint, Finding, System_Settings, DojoMeta
from dojo.utils import get_page_items, add_breadcrumb, get_period_counts, get_system_setting, Product_Tab
from django.contrib.contenttypes.models import ContentType
from custom_field.models import CustomFieldValue, CustomField
logger = logging.getLogger(__name__)
def vulnerable_endpoints(request):
endpoints = Endpoint.objects.filter(finding__active=True, finding__verified=True, finding__false_p=False,
finding__duplicate=False, finding__out_of_scope=False).distinct()
# are they authorized
if request.user.is_staff:
pass
else:
products = Product.objects.filter(authorized_users__in=[request.user])
if products.exists():
endpoints = endpoints.filter(product__in=products.all())
else:
raise PermissionDenied
product = None
if 'product' in request.GET:
p = request.GET.getlist('product', [])
if len(p) == 1:
product = get_object_or_404(Product, id=p[0])
ids = get_endpoint_ids(EndpointFilter(request.GET, queryset=endpoints, user=request.user).qs)
endpoints = EndpointFilter(request.GET, queryset=endpoints.filter(id__in=ids), user=request.user)
endpoints_query = endpoints.qs.order_by('host')
paged_endpoints = get_page_items(request, endpoints_query, 25)
add_breadcrumb(title="Vulnerable Endpoints", top_level=not len(request.GET), request=request)
system_settings = System_Settings.objects.get()
product_tab = None
view_name = "All Endpoints"
if product:
product_tab = Product_Tab(product.id, "Vulnerable Endpoints", tab="endpoints")
return render(
request, 'dojo/endpoints.html', {
'product_tab': product_tab,
"endpoints": paged_endpoints,
"filtered": endpoints,
"name": "Vulnerable Endpoints",
})
def all_endpoints(request):
endpoints = Endpoint.objects.all()
show_uri = get_system_setting('display_endpoint_uri')
# are they authorized
if request.user.is_staff:
pass
else:
products = Product.objects.filter(authorized_users__in=[request.user])
if products.exists():
endpoints = endpoints.filter(product__in=products.all())
else:
raise PermissionDenied
product = None
if 'product' in request.GET:
p = request.GET.getlist('product', [])
if len(p) == 1:
product = get_object_or_404(Product, id=p[0])
if show_uri:
endpoints = EndpointFilter(request.GET, queryset=endpoints, user=request.user)
paged_endpoints = get_page_items(request, endpoints.qs, 25)
else:
ids = get_endpoint_ids(EndpointFilter(request.GET, queryset=endpoints, user=request.user).qs)
endpoints = EndpointFilter(request.GET, queryset=endpoints.filter(id__in=ids), user=request.user)
paged_endpoints = get_page_items(request, endpoints.qs, 25)
add_breadcrumb(title="All Endpoints", top_level=not len(request.GET), request=request)
product_tab = None
view_name = "All Endpoints"
if product:
view_name = "Endpoints"
product_tab = Product_Tab(product.id, "Endpoints", tab="endpoints")
return render(
request, 'dojo/endpoints.html', {
'product_tab': product_tab,
"endpoints": paged_endpoints,
"filtered": endpoints,
"name": view_name,
"show_uri": show_uri
})
def get_endpoint_ids(endpoints):
hosts = []
ids = []
for e in endpoints:
if ":" in e.host:
host_no_port = e.host[:e.host.index(':')]
else:
host_no_port = e.host
key = host_no_port + '-' + str(e.product.id)
if key in hosts:
continue
else:
hosts.append(key)
ids.append(e.id)
return ids
def view_endpoint(request, eid):
endpoint = get_object_or_404(Endpoint, id=eid)
host = endpoint.host_no_port
endpoints = Endpoint.objects.filter(host__regex="^" + host + ":?",
product=endpoint.product).distinct()
if (request.user in endpoint.product.authorized_users.all()) or request.user.is_staff:
pass
else:
raise PermissionDenied
endpoint_cf = endpoint.endpoint_meta
endpoint_metadata = {}
for cf in endpoint_cf.all():
cfv = cf.value
if len(cfv):
endpoint_metadata[cf.name] = cfv
all_findings = Finding.objects.filter(endpoints__in=endpoints).distinct()
active_findings = Finding.objects.filter(endpoints__in=endpoints,
active=True,
verified=True).distinct()
closed_findings = Finding.objects.filter(endpoints__in=endpoints,
mitigated__isnull=False).distinct()
if all_findings:
start_date = timezone.make_aware(datetime.combine(all_findings.last().date, datetime.min.time()))
else:
start_date = timezone.now()
end_date = timezone.now()
r = relativedelta(end_date, start_date)
months_between = (r.years * 12) + r.months
# include current month
months_between += 1
monthly_counts = get_period_counts(active_findings, all_findings, closed_findings, None, months_between, start_date,
relative_delta='months')
paged_findings = get_page_items(request, active_findings, 25)
vulnerable = False
if active_findings.count() != 0:
vulnerable = True
product_tab = Product_Tab(endpoint.product.id, "Endpoint", tab="endpoints")
return render(request,
"dojo/view_endpoint.html",
{"endpoint": endpoint,
'product_tab': product_tab,
"endpoints": endpoints,
"findings": paged_findings,
'all_findings': all_findings,
'opened_per_month': monthly_counts['opened_per_period'],
'endpoint_metadata': endpoint_metadata,
'vulnerable': vulnerable,
})
@user_passes_test(lambda u: u.is_staff)
def edit_endpoint(request, eid):
endpoint = get_object_or_404(Endpoint, id=eid)
if request.method == 'POST':
form = EditEndpointForm(request.POST, instance=endpoint)
if form.is_valid():
endpoint = form.save()
tags = request.POST.getlist('tags')
t = ", ".join(tags)
endpoint.tags = t
messages.add_message(request,
messages.SUCCESS,
'Endpoint updated successfully.',
extra_tags='alert-success')
return HttpResponseRedirect(reverse('view_endpoint', args=(endpoint.id,)))
add_breadcrumb(parent=endpoint, title="Edit", top_level=False, request=request)
form = EditEndpointForm(instance=endpoint)
form.initial['tags'] = [tag.name for tag in endpoint.tags]
product_tab = Product_Tab(endpoint.product.id, "Endpoint", tab="endpoints")
return render(request,
"dojo/edit_endpoint.html",
{"endpoint": endpoint,
'product_tab': product_tab,
"form": form,
})
@user_passes_test(lambda u: u.is_staff)
def delete_endpoint(request, eid):
endpoint = get_object_or_404(Endpoint, pk=eid)
product = endpoint.product
form = DeleteEndpointForm(instance=endpoint)
from django.contrib.admin.utils import NestedObjects
from django.db import DEFAULT_DB_ALIAS
collector = NestedObjects(using=DEFAULT_DB_ALIAS)
collector.collect([endpoint])
rels = collector.nested()
if request.method == 'POST':
if 'id' in request.POST and str(endpoint.id) == request.POST['id']:
form = DeleteEndpointForm(request.POST, instance=endpoint)
if form.is_valid():
del endpoint.tags
endpoint.delete()
messages.add_message(request,
messages.SUCCESS,
'Endpoint and relationships removed.',
extra_tags='alert-success')
return HttpResponseRedirect(reverse('view_endpoint', args=(product.id,)))
product_tab = Product_Tab(endpoint.product.id, "Delete Endpoint", tab="endpoints")
return render(request, 'dojo/delete_endpoint.html',
{'endpoint': endpoint,
'product_tab': product_tab,
'form': form,
'rels': rels,
})
@user_passes_test(lambda u: u.is_staff)
def add_endpoint(request, pid):
product = get_object_or_404(Product, id=pid)
template = 'dojo/add_endpoint.html'
if '_popup' in request.GET:
template = 'dojo/add_related.html'
form = AddEndpointForm(product=product)
if request.method == 'POST':
form = AddEndpointForm(request.POST, product=product)
if form.is_valid():
endpoints = form.save()
tags = request.POST.getlist('tags')
t = ", ".join(tags)
for e in endpoints:
e.tags = t
messages.add_message(request,
messages.SUCCESS,
'Endpoint added successfully.',
extra_tags='alert-success')
if '_popup' in request.GET:
resp = '<script type="text/javascript">opener.emptyEndpoints(window);</script>'
for endpoint in endpoints:
resp += '<script type="text/javascript">opener.dismissAddAnotherPopupDojo(window, "%s", "%s");</script>' \
% (escape(endpoint._get_pk_val()), escape(endpoint))
resp += '<script type="text/javascript">window.close();</script>'
return HttpResponse(resp)
else:
return HttpResponseRedirect(reverse('endpoints') + "?product=" + pid)
product_tab = None
if '_popup' not in request.GET:
product_tab = Product_Tab(product.id, "Add Endpoint", tab="endpoints")
return render(request, template, {
'product_tab': product_tab,
'name': 'Add Endpoint',
'form': form})
@user_passes_test(lambda u: u.is_staff)
def add_product_endpoint(request):
form = AddEndpointForm()
if request.method == 'POST':
form = AddEndpointForm(request.POST)
if form.is_valid():
endpoints = form.save()
tags = request.POST.getlist('tags')
t = ", ".join(tags)
for e in endpoints:
e.tags = t
messages.add_message(request,
messages.SUCCESS,
'Endpoint added successfully.',
extra_tags='alert-success')
return HttpResponseRedirect(reverse('endpoints') + "?product=%s" % form.product.id)
add_breadcrumb(title="Add Endpoint", top_level=False, request=request)
return render(request,
'dojo/add_endpoint.html',
{'name': 'Add Endpoint',
'form': form,
})
@user_passes_test(lambda u: u.is_staff)
def add_meta_data(request, eid):
endpoint = Endpoint.objects.get(id=eid)
if request.method == 'POST':
form = DojoMetaDataForm(request.POST)
if form.is_valid():
cf, created = DojoMeta.objects.get_or_create(name=form.cleaned_data['name'],
model_name='Endpoint',
model_id=endpoint.id,
value=form.cleaned_data['value'])
cf.save()
messages.add_message(request,
messages.SUCCESS,
'Metadata added successfully.',
extra_tags='alert-success')
if 'add_another' in request.POST:
return HttpResponseRedirect(reverse('add_meta_data', args=(eid,)))
else:
return HttpResponseRedirect(reverse('view_endpoint', args=(eid,)))
else:
form = DojoMetaDataForm(initial={'content_type': endpoint})
add_breadcrumb(parent=endpoint, title="Add Metadata", top_level=False, request=request)
product_tab = Product_Tab(endpoint.product.id, "Add Metadata", tab="endpoints")
return render(request,
'dojo/add_endpoint_meta_data.html',
{'form': form,
'product_tab': product_tab,
'endpoint': endpoint,
})
@user_passes_test(lambda u: u.is_staff)
def edit_meta_data(request, eid):
endpoint = Endpoint.objects.get(id=eid)
endpoint_cf = endpoint.endpoint_meta.all()
endpoint_metadata = {}
for cf in endpoint_cf:
cfv = cf.value
if len(cfv):
endpoint_metadata[cf] = cfv
if request.method == 'POST':
for key, value in request.POST.iteritems():
if key.startswith('cfv_'):
cfv_id = int(key.split('_')[1])
cfv = get_object_or_404(CustomFieldValue, id=cfv_id)
value = value.strip()
if value:
cfv.value = value
cfv.save()
else:
cfv.delete()
messages.add_message(request,
messages.SUCCESS,
'Metadata edited successfully.',
extra_tags='alert-success')
return HttpResponseRedirect(reverse('view_endpoint', args=(eid,)))
product_tab = Product_Tab(endpoint.product.id, "Edit Metadata", tab="endpoints")
return render(request,
'dojo/edit_endpoint_meta_data.html',
{'endpoint': endpoint,
'product_tab': product_tab,
'endpoint_metadata': endpoint_metadata,
})
| 38.609819 | 126 | 0.595703 | [
"BSD-3-Clause"
] | bend18/django-DefectDojo | dojo/endpoint/views.py | 14,942 | Python |
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import contextlib
import funsor
from funsor.adjoint import AdjointTape
from pyro.contrib.funsor import to_data, to_funsor
from pyro.contrib.funsor.handlers import enum, plate, replay, trace
from pyro.contrib.funsor.infer.elbo import ELBO, Jit_ELBO
from pyro.distributions.util import copy_docs_from
from pyro.infer import TraceEnum_ELBO as _OrigTraceEnum_ELBO
# Work around a bug in unfold_contraction_generic_tuple interacting with
# Approximate introduced in https://github.com/pyro-ppl/funsor/pull/488 .
# Once fixed, this can be replaced by funsor.optimizer.apply_optimizer().
def apply_optimizer(x):
with funsor.interpretations.normalize:
expr = funsor.interpreter.reinterpret(x)
with funsor.optimizer.optimize_base:
return funsor.interpreter.reinterpret(expr)
def terms_from_trace(tr):
"""Helper function to extract elbo components from execution traces."""
# data structure containing densities, measures, scales, and identification
# of free variables as either product (plate) variables or sum (measure) variables
terms = {
"log_factors": [],
"log_measures": [],
"scale": to_funsor(1.0),
"plate_vars": frozenset(),
"measure_vars": frozenset(),
"plate_to_step": dict(),
}
for name, node in tr.nodes.items():
# add markov dimensions to the plate_to_step dictionary
if node["type"] == "markov_chain":
terms["plate_to_step"][node["name"]] = node["value"]
# ensure previous step variables are added to measure_vars
for step in node["value"]:
terms["measure_vars"] |= frozenset(
{
var
for var in step[1:-1]
if tr.nodes[var]["funsor"].get("log_measure", None) is not None
}
)
if (
node["type"] != "sample"
or type(node["fn"]).__name__ == "_Subsample"
or node["infer"].get("_do_not_score", False)
):
continue
# grab plate dimensions from the cond_indep_stack
terms["plate_vars"] |= frozenset(
f.name for f in node["cond_indep_stack"] if f.vectorized
)
# grab the log-measure, found only at sites that are not replayed or observed
if node["funsor"].get("log_measure", None) is not None:
terms["log_measures"].append(node["funsor"]["log_measure"])
# sum (measure) variables: the fresh non-plate variables at a site
terms["measure_vars"] |= (
frozenset(node["funsor"]["value"].inputs) | {name}
) - terms["plate_vars"]
# grab the scale, assuming a common subsampling scale
if (
node.get("replay_active", False)
and set(node["funsor"]["log_prob"].inputs) & terms["measure_vars"]
and float(to_data(node["funsor"]["scale"])) != 1.0
):
# model site that depends on enumerated variable: common scale
terms["scale"] = node["funsor"]["scale"]
else: # otherwise: default scale behavior
node["funsor"]["log_prob"] = (
node["funsor"]["log_prob"] * node["funsor"]["scale"]
)
# grab the log-density, found at all sites except those that are not replayed
if node["is_observed"] or not node.get("replay_skipped", False):
terms["log_factors"].append(node["funsor"]["log_prob"])
# add plate dimensions to the plate_to_step dictionary
terms["plate_to_step"].update(
{plate: terms["plate_to_step"].get(plate, {}) for plate in terms["plate_vars"]}
)
return terms
@copy_docs_from(_OrigTraceEnum_ELBO)
class TraceMarkovEnum_ELBO(ELBO):
def differentiable_loss(self, model, guide, *args, **kwargs):
# get batched, enumerated, to_funsor-ed traces from the guide and model
with plate(
size=self.num_particles
) if self.num_particles > 1 else contextlib.ExitStack(), enum(
first_available_dim=(-self.max_plate_nesting - 1)
if self.max_plate_nesting
else None
):
guide_tr = trace(guide).get_trace(*args, **kwargs)
model_tr = trace(replay(model, trace=guide_tr)).get_trace(*args, **kwargs)
# extract from traces all metadata that we will need to compute the elbo
guide_terms = terms_from_trace(guide_tr)
model_terms = terms_from_trace(model_tr)
# guide side enumeration is not supported
if any(guide_terms["plate_to_step"].values()):
raise NotImplementedError(
"TraceMarkovEnum_ELBO does not yet support guide side Markov enumeration"
)
# build up a lazy expression for the elbo
with funsor.terms.lazy:
# identify and contract out auxiliary variables in the model with partial_sum_product
contracted_factors, uncontracted_factors = [], []
for f in model_terms["log_factors"]:
if model_terms["measure_vars"].intersection(f.inputs):
contracted_factors.append(f)
else:
uncontracted_factors.append(f)
# incorporate the effects of subsampling and handlers.scale through a common scale factor
markov_dims = frozenset(
{plate for plate, step in model_terms["plate_to_step"].items() if step}
)
contracted_costs = [
model_terms["scale"] * f
for f in funsor.sum_product.dynamic_partial_sum_product(
funsor.ops.logaddexp,
funsor.ops.add,
model_terms["log_measures"] + contracted_factors,
plate_to_step=model_terms["plate_to_step"],
eliminate=model_terms["measure_vars"] | markov_dims,
)
]
costs = contracted_costs + uncontracted_factors # model costs: logp
costs += [-f for f in guide_terms["log_factors"]] # guide costs: -logq
# finally, integrate out guide variables in the elbo and all plates
plate_vars = guide_terms["plate_vars"] | model_terms["plate_vars"]
elbo = to_funsor(0, output=funsor.Real)
for cost in costs:
# compute the marginal logq in the guide corresponding to this cost term
log_prob = funsor.sum_product.sum_product(
funsor.ops.logaddexp,
funsor.ops.add,
guide_terms["log_measures"],
plates=plate_vars,
eliminate=(plate_vars | guide_terms["measure_vars"])
- frozenset(cost.inputs),
)
# compute the expected cost term E_q[logp] or E_q[-logq] using the marginal logq for q
elbo_term = funsor.Integrate(
log_prob, cost, guide_terms["measure_vars"] & frozenset(cost.inputs)
)
elbo += elbo_term.reduce(
funsor.ops.add, plate_vars & frozenset(cost.inputs)
)
# evaluate the elbo, using memoize to share tensor computation where possible
with funsor.interpretations.memoize():
return -to_data(apply_optimizer(elbo))
@copy_docs_from(_OrigTraceEnum_ELBO)
class TraceEnum_ELBO(ELBO):
def differentiable_loss(self, model, guide, *args, **kwargs):
# get batched, enumerated, to_funsor-ed traces from the guide and model
with plate(
size=self.num_particles
) if self.num_particles > 1 else contextlib.ExitStack(), enum(
first_available_dim=(-self.max_plate_nesting - 1)
if self.max_plate_nesting
else None
):
guide_tr = trace(guide).get_trace(*args, **kwargs)
model_tr = trace(replay(model, trace=guide_tr)).get_trace(*args, **kwargs)
# extract from traces all metadata that we will need to compute the elbo
guide_terms = terms_from_trace(guide_tr)
model_terms = terms_from_trace(model_tr)
# build up a lazy expression for the elbo
with funsor.terms.lazy:
# identify and contract out auxiliary variables in the model with partial_sum_product
contracted_factors, uncontracted_factors = [], []
for f in model_terms["log_factors"]:
if model_terms["measure_vars"].intersection(f.inputs):
contracted_factors.append(f)
else:
uncontracted_factors.append(f)
# incorporate the effects of subsampling and handlers.scale through a common scale factor
contracted_costs = [
model_terms["scale"] * f
for f in funsor.sum_product.partial_sum_product(
funsor.ops.logaddexp,
funsor.ops.add,
model_terms["log_measures"] + contracted_factors,
plates=model_terms["plate_vars"],
eliminate=model_terms["measure_vars"],
)
]
# accumulate costs from model (logp) and guide (-logq)
costs = contracted_costs + uncontracted_factors # model costs: logp
costs += [-f for f in guide_terms["log_factors"]] # guide costs: -logq
# compute expected cost
# Cf. pyro.infer.util.Dice.compute_expectation()
# https://github.com/pyro-ppl/pyro/blob/0.3.0/pyro/infer/util.py#L212
# TODO Replace this with funsor.Expectation
plate_vars = guide_terms["plate_vars"] | model_terms["plate_vars"]
# compute the marginal logq in the guide corresponding to each cost term
targets = dict()
for cost in costs:
input_vars = frozenset(cost.inputs)
if input_vars not in targets:
targets[input_vars] = funsor.Tensor(
funsor.ops.new_zeros(
funsor.tensor.get_default_prototype(),
tuple(v.size for v in cost.inputs.values()),
),
cost.inputs,
cost.dtype,
)
with AdjointTape() as tape:
logzq = funsor.sum_product.sum_product(
funsor.ops.logaddexp,
funsor.ops.add,
guide_terms["log_measures"] + list(targets.values()),
plates=plate_vars,
eliminate=(plate_vars | guide_terms["measure_vars"]),
)
marginals = tape.adjoint(
funsor.ops.logaddexp, funsor.ops.add, logzq, tuple(targets.values())
)
# finally, integrate out guide variables in the elbo and all plates
elbo = to_funsor(0, output=funsor.Real)
for cost in costs:
target = targets[frozenset(cost.inputs)]
logzq_local = marginals[target].reduce(
funsor.ops.logaddexp, frozenset(cost.inputs) - plate_vars
)
log_prob = marginals[target] - logzq_local
elbo_term = funsor.Integrate(
log_prob,
cost,
guide_terms["measure_vars"] & frozenset(log_prob.inputs),
)
elbo += elbo_term.reduce(
funsor.ops.add, plate_vars & frozenset(cost.inputs)
)
# evaluate the elbo, using memoize to share tensor computation where possible
with funsor.interpretations.memoize():
return -to_data(apply_optimizer(elbo))
class JitTraceEnum_ELBO(Jit_ELBO, TraceEnum_ELBO):
pass
class JitTraceMarkovEnum_ELBO(Jit_ELBO, TraceMarkovEnum_ELBO):
pass
| 44.625926 | 102 | 0.589675 | [
"Apache-2.0"
] | 1989Ryan/pyro | pyro/contrib/funsor/infer/traceenum_elbo.py | 12,049 | Python |
# -*- coding: utf-8 -*-
#
# Copyright 2020 - Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Renku service dataset jobs tests."""
import json
import uuid
import pytest
from flaky import flaky
from git import Repo
from tests.service.views.test_dataset_views import assert_rpc_response
from renku.core.errors import DatasetExistsError, ParameterError
from renku.service.jobs.cleanup import cache_project_cleanup
from renku.service.jobs.datasets import dataset_add_remote_file, dataset_import
from renku.service.utils import make_project_path
@pytest.mark.parametrize(
'url', [(
'https://dev.renku.ch/projects/rokroskar/'
'scratch-project/datasets/7eba3f50-1a19-4282-8a86-2497e0f43809/'
)]
)
@pytest.mark.integration
@flaky(max_runs=30, min_passes=1)
def test_dataset_url_import_job(url, svc_client_with_repo):
"""Test dataset import via url."""
svc_client, headers, project_id, url_components = svc_client_with_repo
user = {'user_id': headers['Renku-User-Id']}
payload = {
'project_id': project_id,
'dataset_uri': url,
}
response = svc_client.post(
'/datasets.import',
data=json.dumps(payload),
headers=headers,
)
assert response
assert_rpc_response(response)
assert {'job_id', 'created_at'} == set(response.json['result'].keys())
dest = make_project_path(
user, {
'owner': url_components.owner,
'name': url_components.name
}
)
old_commit = Repo(dest).head.commit
job_id = response.json['result']['job_id']
dataset_import(
user,
job_id,
project_id,
url,
)
new_commit = Repo(dest).head.commit
assert old_commit.hexsha != new_commit.hexsha
assert f'service: dataset import {url}' == new_commit.message
response = svc_client.get(
f'/jobs/{job_id}',
headers=headers,
)
assert response
assert_rpc_response(response)
assert 'COMPLETED' == response.json['result']['state']
@pytest.mark.parametrize(
'doi', [
'10.5281/zenodo.3239980',
'10.5281/zenodo.3188334',
'10.7910/DVN/TJCLKP',
]
)
@pytest.mark.integration
@pytest.mark.service
@flaky(max_runs=30, min_passes=1)
def test_dataset_import_job(doi, svc_client_with_repo):
"""Test dataset import via doi."""
svc_client, headers, project_id, url_components = svc_client_with_repo
user = {'user_id': headers['Renku-User-Id']}
payload = {
'project_id': project_id,
'dataset_uri': doi,
}
response = svc_client.post(
'/datasets.import',
data=json.dumps(payload),
headers=headers,
)
assert response
assert_rpc_response(response)
assert {'job_id', 'created_at'} == set(response.json['result'].keys())
dest = make_project_path(
user, {
'owner': url_components.owner,
'name': url_components.name
}
)
old_commit = Repo(dest).head.commit
job_id = response.json['result']['job_id']
dataset_import(
user,
job_id,
project_id,
doi,
)
new_commit = Repo(dest).head.commit
assert old_commit.hexsha != new_commit.hexsha
assert f'service: dataset import {doi}' == new_commit.message
response = svc_client.get(
f'/jobs/{job_id}',
headers=headers,
)
assert response
assert_rpc_response(response)
assert 'COMPLETED' == response.json['result']['state']
@pytest.mark.parametrize(
'doi,expected_err',
[
# not valid doi
('junkjunkjunk', 'Invalid parameter value'),
# not existing doi
('10.5281/zenodo.11111111111111111', 'Invalid parameter value'),
]
)
@pytest.mark.integration
@pytest.mark.service
@flaky(max_runs=30, min_passes=1)
def test_dataset_import_junk_job(doi, expected_err, svc_client_with_repo):
"""Test dataset import."""
svc_client, headers, project_id, url_components = svc_client_with_repo
user = {'user_id': headers['Renku-User-Id']}
payload = {
'project_id': project_id,
'dataset_uri': doi,
}
response = svc_client.post(
'/datasets.import',
data=json.dumps(payload),
headers=headers,
)
assert response
assert_rpc_response(response)
assert {'job_id', 'created_at'} == set(response.json['result'].keys())
dest = make_project_path(
user, {
'owner': url_components.owner,
'name': url_components.name
}
)
old_commit = Repo(dest).head.commit
job_id = response.json['result']['job_id']
with pytest.raises(ParameterError):
dataset_import(
user,
job_id,
project_id,
doi,
)
new_commit = Repo(dest).head.commit
assert old_commit.hexsha == new_commit.hexsha
response = svc_client.get(
f'/jobs/{job_id}',
data=json.dumps(payload),
headers=headers,
)
assert_rpc_response(response)
extras = response.json['result']['extras']
assert 'error' in extras
assert expected_err in extras['error']
@pytest.mark.parametrize('doi', [
'10.5281/zenodo.3634052',
])
@pytest.mark.integration
@pytest.mark.service
@flaky(max_runs=30, min_passes=1)
def test_dataset_import_twice_job(doi, svc_client_with_repo):
"""Test dataset import."""
svc_client, headers, project_id, url_components = svc_client_with_repo
user = {'user_id': headers['Renku-User-Id']}
payload = {
'project_id': project_id,
'dataset_uri': doi,
}
response = svc_client.post(
'/datasets.import',
data=json.dumps(payload),
headers=headers,
)
assert response
assert_rpc_response(response)
assert {'job_id', 'created_at'} == set(response.json['result'].keys())
dest = make_project_path(
user, {
'owner': url_components.owner,
'name': url_components.name
}
)
old_commit = Repo(dest).head.commit
job_id = response.json['result']['job_id']
dataset_import(
user,
job_id,
project_id,
doi,
)
new_commit = Repo(dest).head.commit
assert old_commit.hexsha != new_commit.hexsha
with pytest.raises(DatasetExistsError):
dataset_import(
user,
job_id,
project_id,
doi,
)
new_commit2 = Repo(dest).head.commit
assert new_commit.hexsha == new_commit2.hexsha
response = svc_client.get(
f'/jobs/{job_id}',
data=json.dumps(payload),
headers=headers,
)
assert_rpc_response(response)
extras = response.json['result']['extras']
assert 'error' in extras
assert 'Dataset exists' in extras['error']
@pytest.mark.parametrize(
'url', [
'https://gist.github.com/jsam/d957f306ed0fe4ff018e902df6a1c8e3',
]
)
@pytest.mark.integration
@pytest.mark.service
@flaky(max_runs=30, min_passes=1)
def test_dataset_add_remote_file(url, svc_client_with_repo):
"""Test dataset add a remote file."""
svc_client, headers, project_id, url_components = svc_client_with_repo
user = {'user_id': headers['Renku-User-Id']}
payload = {
'project_id': project_id,
'short_name': uuid.uuid4().hex,
'create_dataset': True,
'files': [{
'file_url': url
}]
}
response = svc_client.post(
'/datasets.add',
data=json.dumps(payload),
headers=headers,
)
assert response
assert_rpc_response(response)
assert {'files', 'short_name',
'project_id'} == set(response.json['result'].keys())
dest = make_project_path(
user, {
'owner': url_components.owner,
'name': url_components.name
}
)
old_commit = Repo(dest).head.commit
job_id = response.json['result']['files'][0]['job_id']
commit_message = 'service: dataset add remote file'
dataset_add_remote_file(
user, job_id, project_id, True, commit_message, payload['short_name'],
url
)
new_commit = Repo(dest).head.commit
assert old_commit.hexsha != new_commit.hexsha
assert commit_message == new_commit.message
@pytest.mark.parametrize('doi', [
'10.5281/zenodo.3761586',
])
@pytest.mark.integration
@pytest.mark.service
def test_dataset_project_lock(doi, svc_client_with_repo):
"""Test dataset project lock."""
svc_client, headers, project_id, url_components = svc_client_with_repo
user = {'user_id': headers['Renku-User-Id']}
payload = {
'project_id': project_id,
'dataset_uri': doi,
}
response = svc_client.post(
'/datasets.import',
data=json.dumps(payload),
headers=headers,
)
assert response
assert_rpc_response(response)
assert {'job_id', 'created_at'} == set(response.json['result'].keys())
dest = make_project_path(
user, {
'owner': url_components.owner,
'name': url_components.name
}
)
old_commit = Repo(dest).head.commit
cache_project_cleanup()
new_commit = Repo(dest).head.commit
assert old_commit.hexsha == new_commit.hexsha
assert dest.exists() and [file for file in dest.glob('*')]
| 26.932432 | 79 | 0.64275 | [
"Apache-2.0"
] | mohammad-sdsc/renku-python | tests/service/jobs/test_datasets.py | 9,970 | Python |
# specifically use concurrent.futures for threadsafety
# asyncio Futures cannot be used across threads
import asyncio
import json
import time
from functools import partial
from kubernetes_asyncio import watch
from traitlets import Any
from traitlets import Bool
from traitlets import Dict
from traitlets import Int
from traitlets import Unicode
from traitlets.config import LoggingConfigurable
from urllib3.exceptions import ReadTimeoutError
from .clients import shared_client
# This is kubernetes client implementation specific, but we need to know
# whether it was a network or watch timeout.
class ResourceReflector(LoggingConfigurable):
"""Base class for keeping a local up-to-date copy of a set of
kubernetes resources.
Must be subclassed once per kind of resource that needs watching.
Creating a reflector should be done with the create() classmethod,
since that, in addition to creating the instance starts the watch task.
Shutting down a reflector should be done by awaiting its stop() method.
KubeSpawner does not do this, because its reflectors are singleton
instances shared among multiple spawners. The watch task therefore runs
until JupyterHub exits.
"""
labels = Dict(
{},
config=True,
help="""
Labels to reflect onto local cache
""",
)
fields = Dict(
{},
config=True,
help="""
Fields to restrict the reflected objects
""",
)
resources = Dict(
{},
help="""
Dictionary of resource names to the appropriate resource objects.
This can be accessed across threads safely.
""",
)
kind = Unicode(
'resource',
help="""
Human readable name for kind of object we're watching for.
Used for diagnostic messages.
""",
)
omit_namespace = Bool(
False,
config=True,
help="""
Set this to true if the reflector is to operate across
multiple namespaces.
""",
)
namespace = Unicode(
None,
allow_none=True,
help="""
Namespace to watch for resources in; leave at 'None' for
multi-namespace reflectors.
""",
)
list_method_name = Unicode(
"",
help="""
Name of function (on apigroup respresented by
`api_group_name`) that is to be called to list resources.
This will be passed a a label selector.
If self.omit_namespace is False you want something of the form
list_namespaced_<resource> - for example,
`list_namespaced_pod` will give you a PodReflector. It will
take its namespace from self.namespace (which therefore should
not be None).
If self.omit_namespace is True, you want
list_<resource>_for_all_namespaces.
This must be set by a subclass.
It is not necessary to set it for pod or event reflectors, because
__init__ will figure it out. If you create your own reflector
subclass you probably want to add the logic to choose the method
name to that class's __init__().
""",
)
api_group_name = Unicode(
'CoreV1Api',
help="""
Name of class that represents the apigroup on which
`list_method_name` is to be found.
Defaults to CoreV1Api, which has everything in the 'core' API group. If you want to watch Ingresses,
for example, you would have to use ExtensionsV1beta1Api
""",
)
request_timeout = Int(
60,
config=True,
help="""
Network timeout for kubernetes watch.
Trigger watch reconnect when a given request is taking too long,
which can indicate network issues.
""",
)
timeout_seconds = Int(
10,
config=True,
help="""
Timeout for kubernetes watch.
Trigger watch reconnect when no watch event has been received.
This will cause a full reload of the currently existing resources
from the API server.
""",
)
restart_seconds = Int(
30,
config=True,
help="""
Maximum time before restarting a watch.
The watch will be restarted at least this often,
even if events are still arriving.
Avoids trusting kubernetes watch to yield all events,
which seems to not be a safe assumption.
""",
)
on_failure = Any(help="""Function to be called when the reflector gives up.""")
_stopping = Bool(False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Client configuration for kubernetes, as done via the load_config
# function, has already taken place in KubeSpawner or KubeIngressProxy
# initialization steps.
self.api = shared_client(self.api_group_name)
# FIXME: Protect against malicious labels?
self.label_selector = ','.join(
['{}={}'.format(k, v) for k, v in self.labels.items()]
)
self.field_selector = ','.join(
['{}={}'.format(k, v) for k, v in self.fields.items()]
)
self.first_load_future = asyncio.Future()
# Make sure that we know kind, whether we should omit the
# namespace, and what our list_method_name is. For the things
# we already know about, we can derive list_method_name from
# those two things. New reflector types should also update
# their __init__() methods to derive list_method_name, but you
# could just set it directly in the subclass.
if not self.list_method_name:
plural_to_singular = {
"endpoints": "endpoints",
"events": "event",
"ingresses": "ingress",
"pods": "pod",
"services": "service",
}
if self.kind in plural_to_singular:
if self.omit_namespace:
self.list_method_name = (
f"list_{plural_to_singular[self.kind]}_for_all_namespaces"
)
else:
self.list_method_name = (
f"list_namespaced_{plural_to_singular[self.kind]}"
)
# Make sure we have the required values.
if not self.kind:
raise RuntimeError("Reflector kind must be set!")
if not self.list_method_name:
raise RuntimeError("Reflector list_method_name must be set!")
self.watch_task = None
async def _list_and_update(self):
"""
Update current list of resources by doing a full fetch.
Overwrites all current resource info.
"""
initial_resources = None
kwargs = dict(
label_selector=self.label_selector,
field_selector=self.field_selector,
_request_timeout=self.request_timeout,
_preload_content=False,
)
if not self.omit_namespace:
kwargs["namespace"] = self.namespace
list_method = getattr(self.api, self.list_method_name)
initial_resources_raw = await list_method(**kwargs)
# This is an atomic operation on the dictionary!
initial_resources = json.loads(await initial_resources_raw.read())
self.resources = {
f'{p["metadata"]["namespace"]}/{p["metadata"]["name"]}': p
for p in initial_resources["items"]
}
if not self.first_load_future.done():
# signal that we've loaded our initial data at least once
self.first_load_future.set_result(None)
# return the resource version so we can hook up a watch
return initial_resources["metadata"]["resourceVersion"]
async def _watch_and_update(self):
"""
Keeps the current list of resources up-to-date
We first fetch the list of current resources, and store that. Then we
register to be notified of changes to those resources, and keep our
local store up-to-date based on these notifications.
We also perform exponential backoff, giving up after we hit 32s
wait time. This should protect against network connections dropping
and intermittent unavailability of the api-server. Every time we
recover from an exception we also do a full fetch, to pick up
changes that might've been missed in the time we were not doing
a watch.
Since the resources are read-only in the Spawner (where they are
used), then this is safe. The Spawner's view of the world might be
out-of-date, but it's not going to corrupt any data.
"""
selectors = []
if self.label_selector:
selectors.append("label selector=%r" % self.label_selector)
if self.field_selector:
selectors.append("field selector=%r" % self.field_selector)
log_selector = ', '.join(selectors)
cur_delay = 0.1
if self.omit_namespace:
ns_str = "all namespaces"
else:
ns_str = "namespace {}".format(self.namespace)
self.log.info(
"watching for %s with %s in %s",
self.kind,
log_selector,
ns_str,
)
while True:
self.log.debug("Connecting %s watcher", self.kind)
start = time.monotonic()
w = watch.Watch()
try:
resource_version = await self._list_and_update()
watch_args = {
"label_selector": self.label_selector,
"field_selector": self.field_selector,
"resource_version": resource_version,
}
if not self.omit_namespace:
watch_args["namespace"] = self.namespace
if self.request_timeout:
# set network receive timeout
watch_args['_request_timeout'] = self.request_timeout
if self.timeout_seconds:
# set watch timeout
watch_args['timeout_seconds'] = self.timeout_seconds
# Calling the method with _preload_content=False is a performance
# optimization making the Kubernetes client do less work. See
# https://github.com/jupyterhub/kubespawner/pull/424.
method = partial(
getattr(self.api, self.list_method_name), _preload_content=False
)
async with w.stream(method, **watch_args) as stream:
async for watch_event in stream:
# in case of timeout_seconds, the w.stream just exits (no exception thrown)
# -> we stop the watcher and start a new one
# Remember that these events are k8s api related WatchEvents
# objects, not k8s Event or Pod representations, they will
# reside in the WatchEvent's object field depending on what
# kind of resource is watched.
#
# ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#watchevent-v1-meta
# ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#event-v1-core
cur_delay = 0.1
resource = watch_event['raw_object']
ref_key = "{}/{}".format(
resource["metadata"]["namespace"],
resource["metadata"]["name"],
)
if watch_event['type'] == 'DELETED':
# This is an atomic delete operation on the dictionary!
self.resources.pop(ref_key, None)
else:
# This is an atomic operation on the dictionary!
self.resources[ref_key] = resource
if self._stopping:
self.log.info("%s watcher stopped: inner", self.kind)
break
watch_duration = time.monotonic() - start
if watch_duration >= self.restart_seconds:
self.log.debug(
"Restarting %s watcher after %i seconds",
self.kind,
watch_duration,
)
break
except ReadTimeoutError:
# network read time out, just continue and restart the watch
# this could be due to a network problem or just low activity
self.log.warning("Read timeout watching %s, reconnecting", self.kind)
continue
except asyncio.CancelledError:
self.log.debug("Cancelled watching %s", self.kind)
raise
except Exception:
cur_delay = cur_delay * 2
if cur_delay > 30:
self.log.exception("Watching resources never recovered, giving up")
if self.on_failure:
self.on_failure()
return
self.log.exception(
"Error when watching resources, retrying in %ss", cur_delay
)
await asyncio.sleep(cur_delay)
continue
else:
# no events on watch, reconnect
self.log.debug("%s watcher timeout", self.kind)
finally:
w.stop()
if self._stopping:
self.log.info("%s watcher stopped: outer", self.kind)
break
self.log.warning("%s watcher finished", self.kind)
async def start(self):
"""
Start the reflection process!
We'll do a blocking read of all resources first, so that we don't
race with any operations that are checking the state of the pod
store - such as polls. This should be called only once at the
start of program initialization (when the singleton is being created),
and not afterwards!
"""
if self.watch_task and not self.watch_task.done():
raise RuntimeError('Task watching for resources is already running')
await self._list_and_update()
self.watch_task = asyncio.create_task(self._watch_and_update())
async def stop(self):
"""
Cleanly shut down the watch task.
"""
self._stopping = True
if self.watch_task and not self.watch_task.done():
# cancel the task, wait for it to complete
self.watch_task.cancel()
try:
timeout = 5
await asyncio.wait_for(self.watch_task, timeout)
except asyncio.TimeoutError:
# Raising the TimeoutError will cancel the task.
self.log.warning(
f"Watch task did not finish in {timeout}s and was cancelled"
)
self.watch_task = None
class NamespacedResourceReflector(ResourceReflector):
"""
Watches for resources in a particular namespace. The list_methods
want both a method name and a namespace.
"""
omit_namespace = False
class MultiNamespaceResourceReflector(ResourceReflector):
"""
Watches for resources across all namespaces. The list_methods
want only a method name. Note that this requires the service account
to be significantly more powerful, since it must be bound to ClusterRoles
rather than just Roles, and therefore this is inherently more
dangerous.
"""
omit_namespace = True
| 36.949074 | 118 | 0.581631 | [
"BSD-3-Clause"
] | choldgraf/kubespawner | kubespawner/reflector.py | 15,962 | Python |
Hello World
typing something eles
| 8.75 | 21 | 0.828571 | [
"Apache-2.0"
] | mxracer409/pynet_testx | test_hello.py | 35 | Python |
# pylint: disable=line-too-long
from allennlp.data.dataset_readers.semantic_parsing.atis import AtisDatasetReader
from allennlp.data.dataset_readers.semantic_parsing.nlvr import NlvrDatasetReader
from allennlp.data.dataset_readers.semantic_parsing.wikitables import WikiTablesDatasetReader
from allennlp.data.dataset_readers.semantic_parsing.template_text2sql import TemplateText2SqlDatasetReader
| 66.166667 | 106 | 0.899244 | [
"Apache-2.0"
] | asiddhant/taskonomy-nlp | allennlp/data/dataset_readers/semantic_parsing/__init__.py | 397 | Python |
a, b, c = map(int, input().split())
if(a > c and a > b):
if(b > c):
print(b)
else:
print(c)
elif(b > a and b > c):
if(a > c):
print(a)
else:
print(c)
else:
if(a > b):
print(a)
else:
print(b) | 15.470588 | 35 | 0.391635 | [
"MIT"
] | rayleighko/iceamericano | baekjoon/10817/Python.py | 263 | Python |
import sqlite3
from .login import login
lg = login()
class account():
def atualizarBanco(self, email, nome, senha, cidade, aniversario, sexo, visibilidade=1):
conn = sqlite3.connect('./database/project.db', check_same_thread=False)
c = conn.cursor()
idLogado = lg.verLogado()
c.execute('PRAGMA foreign_keys = ON;')
c.execute('UPDATE user SET email_user = ?, nome_user = ?, senha_user = ?, cidade_user = ?, aniversario = ?, sexo = ?, visibilidade = ? WHERE id_User = ?', (email, nome, senha, cidade, aniversario, sexo, visibilidade, idLogado))
conn.commit()
return True | 42.066667 | 235 | 0.651347 | [
"MIT"
] | danielbandeir/MyOwn | codesForDatabase/account.py | 631 | Python |
import numpy as np
from basicsr.archs.rrdbnet_arch import RRDBNet
from realesrgan.utils import RealESRGANer
def test_realesrganer():
# initialize with default model
restorer = RealESRGANer(
scale=4,
model_path='experiments/pretrained_models/RealESRGAN_x4plus.pth',
model=None,
tile=10,
tile_pad=10,
pre_pad=2,
half=False)
assert isinstance(restorer.model, RRDBNet)
assert restorer.half is False
# initialize with user-defined model
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
restorer = RealESRGANer(
scale=4,
model_path='experiments/pretrained_models/RealESRGAN_x4plus_anime_6B.pth',
model=model,
tile=10,
tile_pad=10,
pre_pad=2,
half=True)
# test attribute
assert isinstance(restorer.model, RRDBNet)
assert restorer.half is True
# ------------------ test pre_process ---------------- #
img = np.random.random((12, 12, 3)).astype(np.float32)
restorer.pre_process(img)
assert restorer.img.shape == (1, 3, 14, 14)
# with modcrop
restorer.scale = 1
restorer.pre_process(img)
assert restorer.img.shape == (1, 3, 16, 16)
# ------------------ test process ---------------- #
restorer.process()
assert restorer.output.shape == (1, 3, 64, 64)
# ------------------ test post_process ---------------- #
restorer.mod_scale = 4
output = restorer.post_process()
assert output.shape == (1, 3, 60, 60)
# ------------------ test tile_process ---------------- #
restorer.scale = 4
img = np.random.random((12, 12, 3)).astype(np.float32)
restorer.pre_process(img)
restorer.tile_process()
assert restorer.output.shape == (1, 3, 64, 64)
# ------------------ test enhance ---------------- #
img = np.random.random((12, 12, 3)).astype(np.float32)
result = restorer.enhance(img, outscale=2)
assert result[0].shape == (24, 24, 3)
assert result[1] == 'RGB'
# ------------------ test enhance with 16-bit image---------------- #
img = np.random.random((4, 4, 3)).astype(np.uint16) + 512
result = restorer.enhance(img, outscale=2)
assert result[0].shape == (8, 8, 3)
assert result[1] == 'RGB'
# ------------------ test enhance with gray image---------------- #
img = np.random.random((4, 4)).astype(np.float32)
result = restorer.enhance(img, outscale=2)
assert result[0].shape == (8, 8)
assert result[1] == 'L'
# ------------------ test enhance with RGBA---------------- #
img = np.random.random((4, 4, 4)).astype(np.float32)
result = restorer.enhance(img, outscale=2)
assert result[0].shape == (8, 8, 4)
assert result[1] == 'RGBA'
# ------------------ test enhance with RGBA, alpha_upsampler---------------- #
restorer.tile_size = 0
img = np.random.random((4, 4, 4)).astype(np.float32)
result = restorer.enhance(img, outscale=2, alpha_upsampler=None)
assert result[0].shape == (8, 8, 4)
assert result[1] == 'RGBA'
| 35.102273 | 97 | 0.573648 | [
"BSD-3-Clause"
] | 2917320323/Real-ESRGAN | tests/test_utils.py | 3,089 | Python |
from golem import actions
description = 'verify_title_contains action'
def test(data):
actions.navigate(data.env.url+'elements/')
actions.verify_title_contains('Elem')
try:
actions.verify_title_contains('incorrect title')
except Exception as e:
assert "expected title to contain 'incorrect title'" in e.args[0]
| 26.615385 | 73 | 0.719653 | [
"MIT"
] | kangchenwei/keyautotest2 | projects/golem_integration/tests/actions/verify_title_contains.py | 346 | Python |
title = "Ibis"
tagline = "A template engine for people who enjoy the simpler things in life."
version = "Version 3.2.0"
| 30 | 78 | 0.725 | [
"Unlicense"
] | dmulholl/ibis | docs/config.py | 120 | Python |
import unittest
from parameterized import parameterized as p
from solns.loggerRateLimiter.loggerRateLimiter import *
class UnitTest_LoggerRateLimiter(unittest.TestCase):
@p.expand([
[]
])
def test_naive(self):
pass
| 20.416667 | 55 | 0.722449 | [
"MIT"
] | zcemycl/algoTest | py/tests/testLoggerRateLimiter/test_LoggerRateLimiter.py | 245 | Python |
# Generated by Django 3.1.5 on 2021-02-18 23:09
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0005_apirequestlog'),
]
operations = [
migrations.AlterModelOptions(
name='apirequestlog',
options={'verbose_name_plural': 'API Request Log'},
),
]
| 20.444444 | 63 | 0.61413 | [
"MIT"
] | MatthewTe/velkozz_web_api | velkozz_web_api/apps/accounts/migrations/0006_auto_20210218_2309.py | 368 | Python |
import geopy
# Here we just tested geopy functionality.
place = "kuusalu"
locator = geopy.Nominatim(user_agent="myGeocoder")
location = locator.geocode(place)
print(place + ":")
print("Latitude = {}, Longitude = {}".format(location.latitude, location.longitude))
| 26.5 | 84 | 0.739623 | [
"MIT"
] | screaminglordbyron/IDS-project | Koordinaator.py | 265 | Python |
#!/usr/bin/python3.6
import sys
import re
import csv
import numpy as np
import pandas as pd
import string
import nltk
from nltk.corpus import stopwords
import textcleaner as tc
def main(separator='\t'):
# input comes from STDIN (standard input)
topWords_list = ['club','baseball','league','team','game','soccer','nike','tennis','golf','rugby']
words = read_input(sys.stdin);
for word in words:
for currIndex in range(len(word)-1):
for nextWord in word[currIndex+1:]:
currWord = word[currIndex];
if currWord != nextWord and currWord in topWords_list:
w = currWord+ "-" +nextWord;
print ('%s%s%d' % (w, separator, 1));
def read_input(file):
for line in file:
line = strip_text(line.strip()); # strip the line and remove unnecessary parts
yield line.split(); # split the line
# def strip_line(text): # strip smileys,emojis,urls from tweet text to get only text
# smileys = """:-) :) :o) :] :3 :c) :> =] 8) =) :} :^)
# :D 8-D 8D x-D xD X-D XD =-D =D =-3 =3 B^D""".split();
# pattern = "|".join(map(re.escape, smileys));
# kaomojis = r'[^0-9A-Za-zぁ-んァ-ン一-龥ovっつ゜ニノ三二]' + '[\(∩ (]' + '[^0-9A-Za-zぁ-んァ-ン一-龥ヲ-゚\)∩ )]' + '[\)∩ )]' + '[^0-9A-Za-zぁ-んァ-ン一-龥ovっつ゜ニノ三二]*'
# text = text.strip().lower();
# link_regex = re.compile('((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)', re.DOTALL)
# links = re.findall(link_regex, text)
# for link in links:
# text = text.replace(link[0], ' ');
# text = re.sub(pattern, "", text);
# text = re.sub(kaomojis, "", text);
# text = strip_emoji(text);
# text = re.sub(r'@\w+ ?', '', text); # remove mentions
# for separator in string.punctuation: # remove punctuations
# if separator != '#':
# text = text.replace(separator, ' ');
# # nltk.download("stopwords");
# text = ' '.join([word for word in text.split() if word not in (stopwords.words('english'))]); # remove stopwords
# text = ' '.join([word for word in text.split() if not word.endswith("…")]); # remove …
# return text; # remove leading and trailing white space
def strip_text(text):
smileys = """:-) :) :o) :] :3 :c) :> =] 8) =) :} :^)
:D 8-D 8D x-D xD X-D XD =-D =D =-3 =3 B^D""".split();
pattern = "|".join(map(re.escape, smileys));
kaomojis = r'[^0-9A-Za-zぁ-んァ-ン一-龥ovっつ゜ニノ三二]' + '[\(∩ (]' + '[^0-9A-Za-zぁ-んァ-ン一-龥ヲ-゚\)∩ )]' + '[\)∩ )]' + '[^0-9A-Za-zぁ-んァ-ン一-龥ovっつ゜ニノ三二]*'
text=text.lower();
text = re.sub(r'((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)', '', text); # remove links
text = ' '.join([word for word in text.split() if not word.endswith("…")]); # remove ss…
text = re.sub(pattern, "", text);
text = re.sub(kaomojis, "", text);
text = strip_emoji(text);
text = re.sub(r'[\)\( \/\.-\][0-9]+[ \)\/\.\(-]', ' ', text); # replace (123)-> 123
text = re.sub(r'\([^()]*\)', '', text); # replace (vishal)-> vishal
text = re.sub(r'[.,-_]', ' ', text); # remove . ,
text = re.sub(r'@\w+ ?', ' ', text); # remove mentions
text = text.replace("'s", ""); # replace vishal's->vishal
text = re.sub(r'\W+', ' ', text); # replace vishal123@@@-> vishal123
text = re.sub(r'[ ][0-9]+ ', '', text); # remove
text = ' '.join([word for word in text.split() if word not in (stopwords.words('english'))]); # remove stopwords
text = ' '.join(word for word in tc.document(text).lemming().data); #do lemming
text = ' '.join( [w for w in text.split() if len(w)>1] ); # remove single character words a->''
return text;
def strip_emoji(string):
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
"]+", flags=re.UNICODE)
return emoji_pattern.sub(r'', string);
if __name__ == "__main__":
main() | 48.829545 | 144 | 0.518734 | [
"Apache-2.0"
] | vishalgawade/Data-Aggregation-Big-Data-Analysis-and-Visualization-of-Twitter-New-York-Times-Common-Crawl | part3/Commoncrawl/Code/mapper_latest_coocureence_cc.py | 4,477 | Python |
# coding=utf-8
import os
from flask import jsonify
from lib._logging import logger
def gpio_info():
gpio_info = ['ERROR']
try:
gpio_info = os.popen("gpio readall|grep -v '\-\-\-'| grep -v 'Physical'|tr -s ' ' 2>/dev/null").read().replace('||', '|').splitlines()
except:
logger.error('Error getting gpio_info')
pins = {}
for line in gpio_info:
undef,BCM,wPi,Name,Mode,V,Physical,Physical2,V2,Mode2,Name2,wPi2,BCM2,undef2 = line.replace(' ', '').split('|')
if V is "0" or V is "0.0":
V = "0v"
if V is "1":
V = "3.3v"
pins[Physical] = {
'bcm_pin': BCM,
'wPi_pin': wPi,
'name': Name,
'mode': Mode,
'v': V,
}
pins[Physical2] = {
'bcm_pin': BCM2,
'wPi_pin': wPi2,
'name': Name2,
'mode': Mode2,
'v': V2,
}
return jsonify(pins)
def set_gpio_mode(pin,mode):
result = False
try:
os.popen('gpio mode ' + pin + " " + mode).read()
result = True
except:
result = False
logger.debug('set_gpio_mode(' + str(pin) + ',' + str(mode) + ') returned ' + str(result))
return( jsonify(result) )
def set_gpio_pin(pin, status):
result = False
try:
os.popen('gpio write ' + int(pin) + ' ' + int(status))
result = True
except:
result = False
logger.debug('set_gpio_pin(' + str(pin) + ',' + str(status) + ') returned ' + str(result))
return( jsonify(result) )
| 27.928571 | 142 | 0.507673 | [
"Apache-2.0"
] | bcarroll/PiControl | lib/gpio_utils.py | 1,564 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=2
# total number=9
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.X.on(input_qubit[1])) # number=5
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=4
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=6
c.append(cirq.X.on(input_qubit[1])) # number=7
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=8
c.append(cirq.X.on(input_qubit[1])) # number=3
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq85.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | 27.596774 | 77 | 0.696084 | [
"BSD-3-Clause"
] | UCLA-SEAL/QDiff | data/p2DJ/New/R2/benchmark/startCirq85.py | 1,711 | Python |
from datetime import datetime
from infrastructure.cqrs.decorators.dtoclass import dtoclass
@dtoclass
class GetDataOperationJobListDto:
Id: int = None
JobId: int = None
DataOperationId: str = None
DataOperationName: str = None
Cron: str = None
StartDate: datetime = None
EndDate: datetime = None
NextRunTime: datetime = None
CreationDate: datetime = None
LastUpdatedDate: datetime = None
IsDeleted: int = None
| 25.277778 | 60 | 0.718681 | [
"MIT"
] | PythonDataIntegrator/pythondataintegrator | src/api/domain/operation/GetDataOperationJobList/GetDataOperationJobListDto.py | 455 | Python |
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 8 21:45:27 2018
@author: pilla
"""
| 10.625 | 35 | 0.552941 | [
"BSD-2-Clause"
] | psnipiv/LicenseGenerator | LicenseGenerator/accounts/tests/__init__.py | 85 | Python |
from datetime import datetime
from integration_tests.utils import populate_mock_db_blocks
from src.models import Challenge, ChallengeType, User, UserBankAccount, UserChallenge
from src.queries.get_undisbursed_challenges import get_undisbursed_challenges
from src.utils.db_session import get_db
def setup_challenges(app):
with app.app_context():
db = get_db()
populate_mock_db_blocks(db, 99, 110)
challenges = [
Challenge(
id="test_challenge_1",
type=ChallengeType.numeric,
amount="5",
step_count=3,
active=False,
starting_block=100,
),
Challenge(
id="test_challenge_2",
type=ChallengeType.boolean,
amount="5",
active=True,
starting_block=100,
),
Challenge(
id="test_challenge_3",
type=ChallengeType.aggregate,
amount="5",
active=True,
starting_block=100,
),
]
non_current_users = [
User(
blockhash=hex(99),
blocknumber=99,
txhash=f"xyz{i}",
user_id=i,
is_current=False,
handle=f"TestHandle{i}",
handle_lc=f"testhandle{i}",
wallet=f"0x{i}",
is_creator=False,
is_verified=False,
name=f"test_name{i}",
created_at=datetime.now(),
updated_at=datetime.now(),
)
for i in range(7)
]
users = [
User(
blockhash=hex(99),
blocknumber=99,
txhash=f"xyz{i}",
user_id=i,
is_current=True,
handle=f"TestHandle{i}",
handle_lc=f"testhandle{i}",
wallet=f"0x{i}",
is_creator=False,
is_verified=False,
name=f"test_name{i}",
created_at=datetime.now(),
updated_at=datetime.now(),
)
for i in range(7)
]
user_bank_accounts = [
UserBankAccount(
signature=f"0x{i}",
ethereum_address=users[i].wallet,
bank_account=f"0x{i}",
created_at=datetime.now(),
)
for i in range(7)
]
user_challenges = [
UserChallenge(
challenge_id="test_challenge_1",
user_id=1,
specifier="1",
is_complete=False,
current_step_count=1,
),
UserChallenge(
challenge_id="test_challenge_1",
user_id=2,
specifier="2",
is_complete=True,
current_step_count=3,
completed_blocknumber=100,
),
UserChallenge(
challenge_id="test_challenge_2",
user_id=3,
specifier="3",
is_complete=False,
),
UserChallenge(
challenge_id="test_challenge_2",
user_id=4,
specifier="4",
is_complete=True,
completed_blocknumber=102,
),
UserChallenge(
challenge_id="test_challenge_2",
user_id=5,
specifier="5",
is_complete=True,
completed_blocknumber=102,
),
UserChallenge(
challenge_id="test_challenge_3",
user_id=6,
specifier="6",
is_complete=True,
completed_blocknumber=100,
),
]
with db.scoped_session() as session:
session.add_all(challenges)
session.flush()
session.add_all(non_current_users)
session.add_all(users)
session.add_all(user_bank_accounts)
session.add_all(user_challenges)
def test_undisbursed_challenges(app):
setup_challenges(app)
with app.app_context():
db = get_db()
with db.scoped_session() as session:
# Test that all undisbursed challenges are returned in order
undisbursed = get_undisbursed_challenges(
session,
{"user_id": None, "limit": 10, "offset": 0, "completed_blocknumber": 99},
)
expected = [
{
"challenge_id": "test_challenge_3",
"user_id": 6,
"specifier": "6",
"amount": "5",
"completed_blocknumber": 100,
"handle": "TestHandle6",
"wallet": "0x6",
},
{
"challenge_id": "test_challenge_2",
"user_id": 4,
"specifier": "4",
"amount": "5",
"completed_blocknumber": 102,
"handle": "TestHandle4",
"wallet": "0x4",
},
{
"challenge_id": "test_challenge_2",
"user_id": 5,
"specifier": "5",
"amount": "5",
"completed_blocknumber": 102,
"handle": "TestHandle5",
"wallet": "0x5",
},
]
assert expected == undisbursed
# Test that it filters correctly by user_id
undisbursed = get_undisbursed_challenges(
session,
{"user_id": 6, "limit": 10, "offset": 0, "completed_blocknumber": 99},
)
expected = [
{
"challenge_id": "test_challenge_3",
"user_id": 6,
"specifier": "6",
"amount": "5",
"completed_blocknumber": 100,
"handle": "TestHandle6",
"wallet": "0x6",
},
]
assert expected == undisbursed
# Test that it filters correctly by user_id & completed blocknumber
undisbursed = get_undisbursed_challenges(
session,
{"user_id": 6, "limit": 10, "offset": 0, "completed_blocknumber": 101},
)
expected = []
assert expected == undisbursed
| 30.790476 | 85 | 0.462264 | [
"Apache-2.0"
] | RahulBansal123/audius-protocol | discovery-provider/integration_tests/queries/test_undisbursed_challeges.py | 6,466 | Python |
from collections import OrderedDict
from models.base_model import BaseModel
from optimizers.radam import RAdam
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import accuracy_score
import sys
class double_conv(nn.Module):
def __init__(self, in_ch, out_ch):
super(double_conv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.conv(x)
return x
class inconv(nn.Module):
def __init__(self, in_ch, out_ch):
super(inconv, self).__init__()
self.conv = double_conv(in_ch, out_ch)
def forward(self, x):
x = self.conv(x)
return x
class down(nn.Module):
def __init__(self, in_ch, out_ch):
super(down, self).__init__()
self.mpconv = nn.Sequential(
nn.MaxPool2d(2),
double_conv(in_ch, out_ch)
)
def forward(self, x):
x = self.mpconv(x)
return x
class up(nn.Module):
def __init__(self, in_ch, out_ch, bilinear=True):
super(up, self).__init__()
self.convtrans = nn.ConvTranspose2d(in_ch//2, in_ch//2, 2, stride=2)
self.bilinear = bilinear
self.conv = double_conv(in_ch, out_ch)
def forward(self, x1, x2):
if self.bilinear:
x1 = nn.functional.interpolate(x1, scale_factor=2, mode='bilinear', align_corners=True)
else:
x1 = self.convtrans(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, (diffX // 2, diffX - diffX//2,
diffY // 2, diffY - diffY//2))
x = torch.cat([x2, x1], dim=1)
x = self.conv(x)
return x
class outconv(nn.Module):
def __init__(self, in_ch, out_ch):
super(outconv, self).__init__()
self.conv = nn.Conv2d(in_ch, out_ch, 1)
def forward(self, x):
x = self.conv(x)
return x
class UNet(nn.Module):
"""Standard U-Net architecture network.
Input params:
n_channels: Number of input channels (usually 1 for a grayscale image).
n_classes: Number of output channels (2 for binary segmentation).
"""
def __init__(self, n_channels, n_classes):
super().__init__()
self.inc = inconv(n_channels, 64)
self.down1 = down(64, 128)
self.down2 = down(128, 256)
self.down3 = down(256, 512)
self.down4 = down(512, 512)
self.up1 = up(1024, 256)
self.up2 = up(512, 128)
self.up3 = up(256, 64)
self.up4 = up(128, 64)
self.outc = outconv(64, n_classes)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = self.outc(x)
return x
class Segmentation2DModel(BaseModel):
def __init__(self, configuration):
"""Initialize the model.
"""
super().__init__(configuration)
self.loss_names = ['segmentation']
self.network_names = ['unet']
self.netunet = UNet(1, 2)
self.netunet = self.netunet.to(self.device)
if self.is_train: # only defined during training time
self.criterion_loss = torch.nn.CrossEntropyLoss()
self.optimizer = torch.optim.Adam(self.netunet.parameters(), lr=configuration['lr'])
self.optimizers = [self.optimizer]
# storing predictions and labels for validation
self.val_predictions = []
self.val_labels = []
self.val_images = []
def forward(self):
"""Run forward pass.
"""
self.output = self.netunet(self.input)
def backward(self):
"""Calculate losses; called in every training iteration.
"""
self.loss_segmentation = self.criterion_loss(self.output, self.label)
def optimize_parameters(self):
"""Calculate gradients and update network weights.
"""
self.loss_segmentation.backward() # calculate gradients
self.optimizer.step()
self.optimizer.zero_grad()
torch.cuda.empty_cache()
def test(self):
super().test() # run the forward pass
# save predictions and labels as flat tensors
self.val_images.append(self.input)
self.val_predictions.append(self.output)
self.val_labels.append(self.label)
def post_epoch_callback(self, epoch, visualizer):
self.val_predictions = torch.cat(self.val_predictions, dim=0)
predictions = torch.argmax(self.val_predictions, dim=1)
predictions = torch.flatten(predictions).cpu()
self.val_labels = torch.cat(self.val_labels, dim=0)
labels = torch.flatten(self.val_labels).cpu()
self.val_images = torch.squeeze(torch.cat(self.val_images, dim=0)).cpu()
# Calculate and show accuracy
val_accuracy = accuracy_score(labels, predictions)
metrics = OrderedDict()
metrics['accuracy'] = val_accuracy
visualizer.plot_current_validation_metrics(epoch, metrics)
print('Validation accuracy: {0:.3f}'.format(val_accuracy))
# Here you may do something else with the validation data such as
# displaying the validation images or calculating the ROC curve
self.val_images = []
self.val_predictions = []
self.val_labels = [] | 29.769231 | 99 | 0.600689 | [
"MIT"
] | Semere-Gr/PyTorchProjectFramework | models/segmentation_model.py | 5,805 | Python |
def findMaxSubarraySum(l):
maxSum = l[0]
tempSum = l[0]
for i in range(len(l)):
tempSum = max(l[i], tempSum + l[i])
if maxSum < tempSum:
maxSum = tempSum
return maxSum
if __name__ == "__main__":
print(findMaxSubarraySum([-2, 3, 4, 2]))
| 19 | 45 | 0.526316 | [
"MIT"
] | itsvinayak/cosmos | code/kadanes.py | 304 | Python |
import warnings
import torch
import kornia
import numpy as np
class MetricMAD:
def __call__(self, pred, true):
return (pred - true).abs_().mean() * 1e3
class MetricBgrMAD:
def __call__(self, pred, true):
bgr_mask = true == 0
return (pred[bgr_mask] - true[bgr_mask]).abs_().mean() * 1e3
class MetricFgrMAD:
def __call__(self, pred, true):
fgr_mask = true > 0
return (pred[fgr_mask] - true[fgr_mask]).abs_().mean() * 1e3
class MetricMSE:
def __call__(self, pred, true):
return ((pred - true) ** 2).mean() * 1e3
class MetricGRAD:
def __init__(self, sigma=1.4):
self.filter_x, self.filter_y = self.gauss_filter(sigma)
self.filter_x = torch.from_numpy(self.filter_x).unsqueeze(0).cuda()
self.filter_y = torch.from_numpy(self.filter_y).unsqueeze(0).cuda()
def __call__(self, pred, true):
true_grad = self.gauss_gradient(true)
pred_grad = self.gauss_gradient(pred)
return ((true_grad - pred_grad) ** 2).sum() / 1000
def gauss_gradient(self, img):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
warnings.warn("deprecated", DeprecationWarning)
img_filtered_x = kornia.filters.filter2D(img[None, None, :, :], self.filter_x, border_type='replicate')[0, 0]
img_filtered_y = kornia.filters.filter2D(img[None, None, :, :], self.filter_y, border_type='replicate')[0, 0]
return (img_filtered_x ** 2 + img_filtered_y ** 2).sqrt()
@staticmethod
def gauss_filter(sigma, epsilon=1e-2):
half_size = np.ceil(sigma * np.sqrt(-2 * np.log(np.sqrt(2 * np.pi) * sigma * epsilon)))
size = np.int(2 * half_size + 1)
# create filter in x axis
filter_x = np.zeros((size, size))
for i in range(size):
for j in range(size):
filter_x[i, j] = MetricGRAD.gaussian(i - half_size, sigma) * MetricGRAD.dgaussian(
j - half_size, sigma)
# normalize filter
norm = np.sqrt((filter_x ** 2).sum())
filter_x = filter_x / norm
filter_y = np.transpose(filter_x)
return filter_x, filter_y
@staticmethod
def gaussian(x, sigma):
return np.exp(-x ** 2 / (2 * sigma ** 2)) / (sigma * np.sqrt(2 * np.pi))
@staticmethod
def dgaussian(x, sigma):
return -x * MetricGRAD.gaussian(x, sigma) / sigma ** 2
class MetricDTSSD:
def __call__(self, pred_t, pred_tm1, true_t, true_tm1):
dtSSD = ((pred_t - pred_tm1) - (true_t - true_tm1)) ** 2
dtSSD = dtSSD.sum() / true_t.numel()
dtSSD = dtSSD.sqrt()
return dtSSD * 1e2
| 33.111111 | 121 | 0.608128 | [
"MIT"
] | ivandrej/checkmatte | evaluation/evaluation_metrics.py | 2,682 | Python |
"""Calculate the mean and standard deviation (per channel) over all images in a dataset
"""
# MIT License
#
# Copyright (c) 2017 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import sys
import random
import tensorflow as tf
import numpy as np
import argparse
import facenet
def main(args):
np.random.seed(seed=args.seed)
random.seed(args.seed)
train_set = facenet.get_dataset(args.data_dir)
result_filename = os.path.join(os.path.expanduser(args.data_dir), 'statistics.txt')
with tf.Graph().as_default():
tf.set_random_seed(args.seed)
# Get a list of image paths and their labels
image_list, _ = facenet.get_image_paths_and_labels(train_set)
nrof_images = len(image_list)
assert nrof_images>0, 'The dataset should not be empty'
input_queue = tf.train.string_input_producer(image_list, num_epochs=None,
shuffle=False, seed=None, capacity=32)
nrof_preprocess_threads = 4
images = []
for _ in range(nrof_preprocess_threads):
filename = input_queue.dequeue()
file_contents = tf.read_file(filename)
image = tf.image.decode_image(file_contents)
image = tf.image.resize_image_with_crop_or_pad(image, 160, 160)
#pylint: disable=no-member
image.set_shape((args.image_size, args.image_size, 3))
image = tf.cast(image, tf.float32)
images.append((image,))
image_batch = tf.train.batch_join(images, batch_size=100, allow_smaller_final_batch=True)
#mean = tf.reduce_mean(image_batch, reduction_indices=[0,1,2])
m, v = tf.nn.moments(image_batch, [1,2])
mean = tf.reduce_mean(m, 0)
variance = tf.reduce_mean(v, 0)
# Start running operations on the Graph.
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
with sess.as_default():
# Training and validation loop
print('Running training')
nrof_batches = nrof_images // args.batch_size
#nrof_batches = 20
means = np.zeros(shape=(nrof_batches, 3), dtype=np.float32)
variances = np.zeros(shape=(nrof_batches, 3), dtype=np.float32)
for i in range(nrof_batches):
means[i,:], variances[i,:] = sess.run([mean, variance])
if (i+1)%10==0:
print('Batch: %5d/%5d, Mean: %s, Variance: %s' % (i+1, nrof_batches, np.array_str(np.mean(means[:i,:],axis=0)), np.array_str(np.mean(variances[:i,:],axis=0))))
dataset_mean = np.mean(means,axis=0)
dataset_variance = np.mean(variances,axis=0)
print('Final mean: %s' % np.array_str(dataset_mean))
print('Final variance: %s' % np.array_str(dataset_variance))
with open(result_filename, 'w') as text_file:
print('Writing result to %s' % result_filename)
text_file.write('Mean: %.5f, %.5f, %.5f\n' % (dataset_mean[0], dataset_mean[1], dataset_mean[2]))
text_file.write('Variance: %.5f, %.5f, %.5f\n' % (dataset_variance[0], dataset_variance[1], dataset_variance[2]))
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--logs_base_dir', type=str,
help='Directory where to write event logs.', default='~/logs/facenet')
parser.add_argument('--models_base_dir', type=str,
help='Directory where to write trained models and checkpoints.', default='~/models/facenet')
parser.add_argument('--gpu_memory_fraction', type=float,
help='Upper bound on the amount of GPU memory that will be used by the process.', default=1.0)
parser.add_argument('--pretrained_model', type=str,
help='Load a pretrained model before training starts.')
parser.add_argument('--data_dir', type=str,
help='Path to the data directory containing aligned face patches.',
default='~/datasets/casia/casia_maxpy_mtcnnalign_182_160')
parser.add_argument('--model_def', type=str,
help='Model definition. Points to a module containing the definition of the inference graph.', default='models.inception_resnet_v1')
parser.add_argument('--max_nrof_epochs', type=int,
help='Number of epochs to run.', default=500)
parser.add_argument('--batch_size', type=int,
help='Number of images to process in a batch.', default=90)
parser.add_argument('--image_size', type=int,
help='Image size (height, width) in pixels.', default=160)
parser.add_argument('--epoch_size', type=int,
help='Number of batches per epoch.', default=1000)
parser.add_argument('--embedding_size', type=int,
help='Dimensionality of the embedding.', default=128)
parser.add_argument('--random_crop',
help='Performs random cropping of training images. If false, the center image_size pixels from the training images are used. ' +
'If the size of the images in the data directory is equal to image_size no cropping is performed', action='store_true')
parser.add_argument('--random_flip',
help='Performs random horizontal flipping of training images.', action='store_true')
parser.add_argument('--random_rotate',
help='Performs random rotations of training images.', action='store_true')
parser.add_argument('--keep_probability', type=float,
help='Keep probability of dropout for the fully connected layer(s).', default=1.0)
parser.add_argument('--weight_decay', type=float,
help='L2 weight regularization.', default=0.0)
parser.add_argument('--decov_loss_factor', type=float,
help='DeCov loss factor.', default=0.0)
parser.add_argument('--center_loss_factor', type=float,
help='Center loss factor.', default=0.0)
parser.add_argument('--center_loss_alfa', type=float,
help='Center update rate for center loss.', default=0.95)
parser.add_argument('--optimizer', type=str, choices=['ADAGRAD', 'ADADELTA', 'ADAM', 'RMSPROP', 'MOM'],
help='The optimization algorithm to use', default='ADAGRAD')
parser.add_argument('--learning_rate', type=float,
help='Initial learning rate. If set to a negative value a learning rate ' +
'schedule can be specified in the file "learning_rate_schedule.txt"', default=0.1)
parser.add_argument('--learning_rate_decay_epochs', type=int,
help='Number of epochs between learning rate decay.', default=100)
parser.add_argument('--learning_rate_decay_factor', type=float,
help='Learning rate decay factor.', default=1.0)
parser.add_argument('--moving_average_decay', type=float,
help='Exponential decay for tracking of training parameters.', default=0.9999)
parser.add_argument('--seed', type=int,
help='Random seed.', default=666)
parser.add_argument('--nrof_preprocess_threads', type=int,
help='Number of preprocessing (data loading and augmentation) threads.', default=4)
parser.add_argument('--log_histograms',
help='Enables logging of weight/bias histograms in tensorboard.', action='store_true')
parser.add_argument('--learning_rate_schedule_file', type=str,
help='File containing the learning rate schedule that is used when learning_rate is set to to -1.', default='data/learning_rate_schedule.txt')
parser.add_argument('--filter_filename', type=str,
help='File containing image data used for dataset filtering', default='')
parser.add_argument('--filter_percentile', type=float,
help='Keep only the percentile images closed to its class center', default=100.0)
parser.add_argument('--filter_min_nrof_images_per_class', type=int,
help='Keep only the classes with this number of examples or more', default=0)
# Parameters for validation on LFW
parser.add_argument('--lfw_pairs', type=str,
help='The file containing the pairs to use for validation.', default='data/pairs.txt')
parser.add_argument('--lfw_file_ext', type=str,
help='The file extension for the LFW dataset.', default='png', choices=['jpg', 'png'])
parser.add_argument('--lfw_dir', type=str,
help='Path to the data directory containing aligned face patches.', default='')
parser.add_argument('--lfw_batch_size', type=int,
help='Number of images to process in a batch in the LFW test set.', default=100)
parser.add_argument('--lfw_nrof_folds', type=int,
help='Number of folds to use for cross validation. Mainly used for testing.', default=10)
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| 53.125654 | 180 | 0.685129 | [
"MIT"
] | AifChain/deepface | src/generative/calculate_dataset_normalization.py | 10,147 | Python |
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2020 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import io
import os.path
class File:
"""A parameter object used for :meth:`abc.Messageable.send`
for sending file objects.
.. note::
File objects are single use and are not meant to be reused in
multiple :meth:`abc.Messageable.send`s.
Attributes
-----------
fp: Union[:class:`str`, :class:`io.BufferedIOBase`]
A file-like object opened in binary mode and read mode
or a filename representing a file in the hard drive to
open.
.. note::
If the file-like object passed is opened via ``open`` then the
modes 'rb' should be used.
To pass binary data, consider usage of ``io.BytesIO``.
filename: Optional[:class:`str`]
The filename to display when uploading to Discord.
If this is not given then it defaults to ``fp.name`` or if ``fp`` is
a string then the ``filename`` will default to the string given.
spoiler: :class:`bool`
Whether the attachment is a spoiler.
"""
__slots__ = ("fp", "filename", "_original_pos", "_owner", "_closer")
def __init__(self, fp, filename=None, *, spoiler=False):
self.fp = fp
if isinstance(fp, io.IOBase):
if not (fp.seekable() and fp.readable()):
raise ValueError(
"File buffer {!r} must be seekable and readable".format(fp)
)
self.fp = fp
self._original_pos = fp.tell()
self._owner = False
else:
self.fp = open(fp, "rb")
self._original_pos = 0
self._owner = True
# aiohttp only uses two methods from IOBase
# read and close, since I want to control when the files
# close, I need to stub it so it doesn't close unless
# I tell it to
self._closer = self.fp.close
self.fp.close = lambda: None
if filename is None:
if isinstance(fp, str):
_, self.filename = os.path.split(fp)
else:
self.filename = getattr(fp, "name", None)
else:
self.filename = filename
if (
spoiler
and self.filename is not None
and not self.filename.startswith("SPOILER_")
):
self.filename = "SPOILER_" + self.filename
def reset(self, *, seek=True):
# The `seek` parameter is needed because
# the retry-loop is iterated over multiple times
# starting from 0, as an implementation quirk
# the resetting must be done at the beginning
# before a request is done, since the first index
# is 0, and thus false, then this prevents an
# unnecessary seek since it's the first request
# done.
if seek:
self.fp.seek(self._original_pos)
def close(self):
self.fp.close = self._closer
if self._owner:
self._closer()
| 34.432203 | 79 | 0.630076 | [
"MIT"
] | Kenvyra/discord.py | discord/file.py | 4,063 | Python |
# Dependencies
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.neighbors import KNeighborsClassifier
import pickle
def train_model():
# Read data set
spotify_df = pd.read_csv("spotify_data_v4.csv")
# Extract the necessary columns we need for machine learning model
spotify_df_clean = spotify_df[[
'genre', 'genre_label', 'loudness', 'energy',
'danceability', 'instrumentalness'
]]
# Assign X (data) and y (target)
X = spotify_df_clean.drop(["genre", "genre_label"], axis=1)
y = spotify_df_clean["genre_label"]
# Create train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
# Scale the data using MinMaxScaler
# Create a MinMaxScaler model and fit it to the training data
X_scaler = MinMaxScaler().fit(X_train)
# Transform the training and testing data using the X_scaler
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
return X_train_scaled, X_test_scaled, y_train, y_test
def scale_input(score_list):
# Read data set
spotify_df = pd.read_csv("spotify_data_v4.csv")
# Extract the necessary columns we need for machine learning model
spotify_df_clean = spotify_df[[
'genre', 'genre_label', 'loudness', 'energy',
'danceability', 'instrumentalness'
]]
# Assign X (data) and y (target)
X = spotify_df_clean.drop(["genre", "genre_label"], axis=1)
y = spotify_df_clean["genre_label"]
# Create train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
# Scale the data using MinMaxScaler
# Create a MinMaxScaler model and fit it to the training data
X_scaler = MinMaxScaler().fit(X_train)
# Need to scale and transform the input using X_scaler which the scaler we used while training the data
score_list_scaled = X_scaler.transform([score_list])
return score_list_scaled | 34.883333 | 107 | 0.718586 | [
"MIT"
] | ealejo1/bigdata-machine-learning-challenge | model.py | 2,093 | Python |
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.BAN/Sans_8/udhr_Latn.BAN_Sans_8.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| 30.1 | 71 | 0.810631 | [
"BSD-3-Clause"
] | antoinecarme/pdf_to_json_tests | data/multilingual/Latn.BAN/Sans_8/pdf_to_json_test_Latn.BAN_Sans_8.py | 301 | Python |
# -*- coding: utf-8 -*-
# Copyright 2018 IBM and its contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# Authors: Diego M. Rodriguez <[email protected]>
"""Helper for running the notebooks as unit tests.
Convenience script for running the notebooks as individual `unittest` tests
using the standard Python facilites. By default, only the notebooks under
`reference/` are automatically discovered (can be modified via the
`NOTEBOOK_PATH` variable).
The test can be run by using the regular unittest facilities from the root
folder of the repository:
python -m unittest --verbose
python -m unittest utils.test.test_tutorials.TutorialsTestCase.\
test_reference_algorithms_bernstein_vazirani_ipynb
Tested under the following Jupyter versions:
ipython==6.3.1
nbconvert==5.3.1
nbformat==4.4.0
"""
import glob
import os
import re
import unittest
import warnings
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
# Configurable parameters.
# List of manual exclusion (for example, ["reference/foo/problematic.ipynb"]).
EXCLUDED_NOTEBOOKS = []
# Timeout (in seconds) for a single notebook.
TIMEOUT = os.getenv('TIMEOUT', 6000)
# Jupyter kernel to execute the notebook in.
JUPYTER_KERNEL = os.getenv('JUPYTER_KERNEL', 'python3')
# Glob expression for discovering the notebooks.
NOTEBOOK_PATH = os.getenv('NOTEBOOK_PATH', 'qiskit/**/*.ipynb')
# Retrieve the notebooks recursively.
NOTEBOOK_FILENAMES = [f for f in sorted(glob.glob(NOTEBOOK_PATH,
recursive=True))
if not os.path.basename(f) in EXCLUDED_NOTEBOOKS]
class TutorialsTestCaseMeta(type):
"""
Metaclass that dynamically appends a "test_TUTORIAL_NAME" method to the
class.
"""
def __new__(mcs, name, bases, dict_):
def _str_to_identifier(string):
"""Convert a string to a valid Python identifier."""
return re.sub(r'\W|^(?=\d)', '_', string)
def create_test(filename):
"""Return a new test function."""
def test_function(self):
self._run_notebook(filename)
return test_function
for filename in NOTEBOOK_FILENAMES:
# Add a new "test_file_name_ipynb()" function to the test case.
test_name = "test_%s" % _str_to_identifier(filename)
dict_[test_name] = create_test(filename)
dict_[test_name].__doc__ = 'Test tutorial "%s"' % filename
return type.__new__(mcs, name, bases, dict_)
class TutorialsTestCase(unittest.TestCase,
metaclass=TutorialsTestCaseMeta):
"""
TestCase for running the tutorials.
"""
@staticmethod
def _run_notebook(filename):
# Create the preprocessor.
execute_preprocessor = ExecutePreprocessor(timeout=TIMEOUT,
kernel_name=JUPYTER_KERNEL)
# Open the notebook.
file_path = os.path.dirname(os.path.abspath(filename))
with open(filename) as file_:
notebook = nbformat.read(file_, as_version=4)
with warnings.catch_warnings():
# Silence some spurious warnings.
warnings.filterwarnings('ignore', category=DeprecationWarning)
# Finally, run the notebook.
execute_preprocessor.preprocess(notebook,
{'metadata': {'path': file_path}})
| 35.315789 | 79 | 0.665176 | [
"Apache-2.0"
] | ignaziopedone/qiskit-iqx-tutorials | utils/test/test_tutorials.py | 4,026 | Python |
# uncompyle6 version 3.2.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]
# Embedded file name: lib.coginvasion.suit.SuitAttacks
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.interval.IntervalGlobal import Sequence, Wait, Func, LerpPosInterval, SoundInterval
from direct.interval.IntervalGlobal import ActorInterval, Parallel, LerpScaleInterval
from direct.interval.ProjectileInterval import ProjectileInterval
from direct.showbase.DirectObject import DirectObject
from panda3d.core import CollisionSphere, CollisionNode, CollisionHandlerEvent, NodePath, Vec3, VBase4, Point3, BitMask32, Vec4
from lib.coginvasion.distributed import DelayDelete
from lib.coginvasion.toon import ParticleLoader
from direct.actor.Actor import Actor
from lib.coginvasion.globals import CIGlobals
from direct.showutil.Rope import Rope
from direct.task import Task
import random
SuitAttackLengths = {'canned': 4, 'clipontie': 4,
'sacked': 4,
'glowerpower': 2.5,
'playhardball': 4,
'marketcrash': 4,
'pickpocket': 3,
'fountainpen': 3,
'hangup': 4,
'redtape': 4,
'powertie': 4,
'halfwindsor': 4,
'bite': 4,
'chomp': 4,
'evictionnotice': 4,
'restrainingorder': 4,
'razzledazzle': 3,
'buzzword': 6,
'jargon': 6,
'mumbojumbo': 6,
'filibuster': 6,
'doubletalk': 6,
'schmooze': 6,
'fingerwag': 6}
SuitAttackDamageFactors = {'canned': 5.5, 'clipontie': 13,
'sacked': 7,
'glowerpower': 5.5,
'playhardball': 5.5,
'marketcrash': 8,
'pickpocket': 10,
'fountainpen': 9,
'hangup': 7,
'redtape': 8,
'powertie': 13,
'halfwindsor': 13,
'bite': 7,
'chomp': 5.5,
'evictionnotice': 9,
'restrainingorder': 8,
'razzledazzle': 9,
'buzzword': 10,
'jargon': 9,
'mumbojumbo': 9.5,
'filibuster': 9.5,
'doubletalk': 10,
'schmooze': 8,
'fingerwag': 8}
def setEffectTexture(effect, texture, color):
particles = effect.getParticlesNamed('particles-1')
sparticles = loader.loadModel('phase_3.5/models/props/suit-particles.bam')
np = sparticles.find('**/' + texture)
particles.renderer.setColor(color)
particles.renderer.setFromNode(np)
class Attack(DirectObject):
notify = directNotify.newCategory('Attack')
attack = 'attack'
def __init__(self, attacksClass, suit):
self.attacksClass = attacksClass
self.suit = suit
self.suitTrack = None
self.attackName2attackId = {}
for index in range(len(self.attacksClass.attackName2attackClass.keys())):
self.attackName2attackId[SuitAttackLengths.keys()[index]] = index
return
def getAttackId(self, attackStr):
return self.attackName2attackId[attackStr]
def finishedAttack(self):
messenger.send(self.attacksClass.doneEvent)
def interruptAttack(self):
self.cleanup()
def cleanup(self):
if self.suitTrack != None:
self.ignore(self.suitTrack.getDoneEvent())
self.suitTrack.finish()
DelayDelete.cleanupDelayDeletes(self.suitTrack)
self.suitTrack = None
self.attack = None
self.suit = None
self.attacksClass = None
self.attackName2attackId = None
return
class ThrowAttack(Attack):
notify = directNotify.newCategory('ThrowAttack')
attack = 'throw'
def __init__(self, attacksClass, suit):
Attack.__init__(self, attacksClass, suit)
self.weapon_state = None
self.weapon = None
self.wss = None
self.wsnp = None
self.suitTrack = None
self.weaponSfx = None
self.throwTrajectory = None
self.targetX = None
self.targetY = None
self.targetZ = None
self.startNP = None
return
def handleWeaponCollision(self, entry):
if self.suit:
self.suit.sendUpdate('toonHitByWeapon', [self.getAttackId(self.attack), base.localAvatar.doId])
base.localAvatar.b_handleSuitAttack(self.getAttackId(self.attack), self.suit.doId)
self.suit.b_handleWeaponTouch()
def doAttack(self, weapon_path, weapon_scale, track_name, animation_name, collsphere_radius, weapon_coll_id, weapon_h=0, weapon_p=0, weapon_r=0, weapon_x=0, weapon_y=0, weapon_z=0, ts=0):
self.weapon_state = 'start'
if hasattr(self.suit, 'uniqueName'):
track_name = self.suit.uniqueName(track_name)
weapon_coll_id = self.suit.uniqueName(weapon_coll_id)
self.weapon = loader.loadModel(weapon_path)
self.weapon.setScale(weapon_scale)
self.weapon.setHpr(weapon_h, weapon_p, weapon_r)
self.weapon.setPos(weapon_x, weapon_y, weapon_z)
self.wss = CollisionSphere(0, 0, 0, collsphere_radius)
self.wss.setTangible(0)
self.targetX = self.attacksClass.target.getX(render)
self.targetY = self.attacksClass.target.getY(render)
self.targetZ = self.attacksClass.target.getZ(render)
self.suitTrack = Sequence(name=track_name)
if self.attack not in ('glowerpower', ):
self.weapon.reparentTo(self.suit.find('**/joint_Rhold'))
self.suitTrack.append(Wait(1.2))
self.suitTrack.append(Func(self.suit.setPlayRate, 1.0, animation_name))
if self.suit.type == 'C':
self.suitTrack.append(Wait(0))
else:
self.suitTrack.append(Wait(0.7))
self.suit.setPlayRate(2.0, animation_name)
self.suitTrack.append(Func(self.throwObject))
self.suitTrack.append(Wait(1.0))
self.suitTrack.append(Func(self.delWeapon))
else:
self.suitTrack.append(Wait(1))
self.suitTrack.append(Func(self.throwObject))
self.suitTrack.append(Wait(0.5))
self.suitTrack.append(Func(self.delWeapon))
self.suit.play(animation_name)
wsnode = CollisionNode(weapon_coll_id)
wsnode.addSolid(self.wss)
wsnode.setCollideMask(CIGlobals.WallBitmask)
self.wsnp = self.weapon.attachNewNode(wsnode)
self.suitTrack.setDoneEvent(self.suitTrack.getName())
self.acceptOnce(self.suitTrack.getDoneEvent(), self.finishedAttack)
self.suitTrack.delayDelete = DelayDelete.DelayDelete(self.suit, track_name)
self.suitTrack.start(ts)
def playWeaponSound(self):
if self.weapon and self.weaponSfx:
base.audio3d.attachSoundToObject(self.weaponSfx, self.suit)
self.weaponSfx.play()
def throwObject(self, projectile=True):
if not self.weapon:
return
self.acceptOnce('enter' + self.wsnp.node().getName(), self.handleWeaponCollision)
self.playWeaponSound()
if self.weapon:
self.weapon.wrtReparentTo(render)
self.weapon.setHpr(Vec3(0, 0, 0))
if self.attack not in ('glowerpower', ):
parent = self.suit.find('**/joint_Rhold')
else:
parent = self.suit.find('**/joint_head')
startNP = parent.attachNewNode('startNp')
startNP.lookAt(render, self.targetX, self.targetY, self.targetZ)
pathNP = NodePath('throwPath')
pathNP.reparentTo(startNP)
pathNP.setScale(render, 1.0)
pathNP.setPos(0, 50, 0)
if self.attack in ('clipontie', 'powertie', 'halfwindsor'):
self.weapon.setHpr(pathNP.getHpr(render))
if projectile == True:
self.throwTrajectory = ProjectileInterval(self.weapon, startPos=self.suit.find('**/joint_Rhold').getPos(render), endPos=pathNP.getPos(render), gravityMult=0.7, duration=1.0)
else:
self.weapon.setH(pathNP.getH(render))
self.throwTrajectory = LerpPosInterval(self.weapon, duration=0.5, pos=pathNP.getPos(render), startPos=startNP.getPos(render) + (0,
3,
0))
self.throwTrajectory.start()
self.weapon_state = 'released'
startNP.removeNode()
del startNP
pathNP.removeNode()
del pathNP
def interruptAttack(self):
if self.throwTrajectory:
if self.throwTrajectory.isStopped():
self.delWeapon()
def handleWeaponTouch(self):
if self.throwTrajectory:
self.throwTrajectory.pause()
self.throwTrajectory = None
self.delWeapon()
return
def delWeapon(self):
if self.weapon:
self.weapon.removeNode()
self.weapon = None
return
def cleanup(self):
Attack.cleanup(self)
self.targetX = None
self.targetY = None
self.targetZ = None
self.weapon_state = None
if self.weaponSfx:
self.weaponSfx.stop()
self.weaponSfx = None
if self.throwTrajectory:
self.throwTrajectory.pause()
self.throwTrajectory = None
self.delWeapon()
self.wss = None
if self.wsnp:
self.wsnp.node().clearSolids()
self.wsnp.removeNode()
self.wsnp = None
return
class CannedAttack(ThrowAttack):
notify = directNotify.newCategory('CannedAttack')
attack = 'canned'
def doAttack(self, ts=0):
ThrowAttack.doAttack(self, 'phase_5/models/props/can.bam', 15, 'doCannedAttack', 'throw-object', 0.05, 'cannedWeaponSphere', weapon_r=180, ts=ts)
def playWeaponSound(self):
self.weaponSfx = base.audio3d.loadSfx('phase_5/audio/sfx/SA_canned_tossup_only.ogg')
ThrowAttack.playWeaponSound(self)
def handleWeaponTouch(self):
if self.weaponSfx:
self.weaponSfx.stop()
self.weaponSfx = None
ThrowAttack.handleWeaponTouch(self)
return
class HardballAttack(ThrowAttack):
notify = directNotify.newCategory('HardballAttack')
attack = 'playhardball'
def doAttack(self, ts=0):
ThrowAttack.doAttack(self, 'phase_5/models/props/baseball.bam', 10, 'doHardballAttack', 'throw-object', 0.1, 'hardballWeaponSphere', weapon_z=-0.5, ts=ts)
def playWeaponSound(self):
self.weaponSfx = base.audio3d.loadSfx('phase_5/audio/sfx/SA_hardball_throw_only.ogg')
ThrowAttack.playWeaponSound(self)
def handleWeaponTouch(self):
if self.weaponSfx:
self.weaponSfx.stop()
self.weaponSfx = None
ThrowAttack.handleWeaponTouch(self)
return
class ClipOnTieAttack(ThrowAttack):
notify = directNotify.newCategory('ClipOnTieAttack')
attack = 'clipontie'
def doAttack(self, ts=0):
ThrowAttack.doAttack(self, 'phase_3.5/models/props/clip-on-tie-mod.bam', 1, 'doClipOnTieAttack', 'throw-paper', 1.1, 'clipOnTieWeaponSphere', weapon_r=180, ts=ts)
def playWeaponSound(self):
self.weaponSfx = base.audio3d.loadSfx('phase_5/audio/sfx/SA_powertie_throw.ogg')
ThrowAttack.playWeaponSound(self)
class MarketCrashAttack(ThrowAttack):
notify = directNotify.newCategory('MarketCrashAttack')
attack = 'marketcrash'
def doAttack(self, ts=0):
ThrowAttack.doAttack(self, 'phase_5/models/props/newspaper.bam', 3, 'doMarketCrashAttack', 'throw-paper', 0.35, 'marketCrashWeaponSphere', weapon_x=0.41, weapon_y=-0.06, weapon_z=-0.06, weapon_h=90, weapon_r=270, ts=ts)
def playWeaponSound(self):
self.weaponSfx = None
ThrowAttack.playWeaponSound(self)
return
class SackedAttack(ThrowAttack):
notify = directNotify.newCategory('SackedAttack')
attack = 'sacked'
def doAttack(self, ts=0):
ThrowAttack.doAttack(self, 'phase_5/models/props/sandbag-mod.bam', 2, 'doSackedAttack', 'throw-paper', 1, 'sackedWeaponSphere', weapon_r=180, weapon_p=90, weapon_y=-2.8, weapon_z=-0.3, ts=ts)
def playWeaponSound(self):
self.weaponSfx = None
ThrowAttack.playWeaponSound(self)
return
class GlowerPowerAttack(ThrowAttack):
notify = directNotify.newCategory('GlowerPowerAttack')
attack = 'glowerpower'
def doAttack(self, ts=0):
ThrowAttack.doAttack(self, 'phase_5/models/props/dagger.bam', 1, 'doGlowerPowerAttack', 'glower', 1, 'glowerPowerWeaponSphere', ts=ts)
def throwObject(self):
ThrowAttack.throwObject(self, False)
def playWeaponSound(self):
self.weaponSfx = base.audio3d.loadSfx('phase_5/audio/sfx/SA_glower_power.ogg')
ThrowAttack.playWeaponSound(self)
class PickPocketAttack(Attack):
notify = directNotify.newCategory('PickPocketAttack')
attack = 'pickpocket'
def __init__(self, attacksClass, suit):
Attack.__init__(self, attacksClass, suit)
self.dollar = None
self.pickSfx = None
return
def doAttack(self, ts=0):
self.dollar = loader.loadModel('phase_5/models/props/1dollar-bill-mod.bam')
self.dollar.setY(0.22)
self.dollar.setHpr(289.18, 252.75, 0.0)
if hasattr(self.suit, 'uniqueName'):
name = self.suit.uniqueName('doPickPocketAttack')
else:
name = 'doPickPocketAttack'
self.suitTrack = Parallel(ActorInterval(self.suit, 'pickpocket'), Sequence(Wait(0.4), Func(self.attemptDamage)), name=name)
self.suitTrack.setDoneEvent(self.suitTrack.getName())
self.acceptOnce(self.suitTrack.getDoneEvent(), self.finishedAttack)
self.suitTrack.delayDelete = DelayDelete.DelayDelete(self.suit, name)
self.suitTrack.start(ts)
def attemptDamage(self):
shouldDamage = False
suitH = self.suit.getH(render) % 360
myH = base.localAvatar.getH(render) % 360
if not -90.0 <= suitH - myH <= 90.0:
if base.localAvatar.getDistance(self.suit) <= 15.0:
shouldDamage = True
if shouldDamage:
self.playWeaponSound()
self.dollar.reparentTo(self.suit.find('**/joint_Rhold'))
self.suit.sendUpdate('toonHitByWeapon', [self.getAttackId(self.attack), base.localAvatar.doId])
base.localAvatar.b_handleSuitAttack(self.getAttackId(self.attack), self.suit.doId)
def playWeaponSound(self):
self.pickSfx = base.audio3d.loadSfx('phase_5/audio/sfx/SA_pick_pocket.ogg')
base.audio3d.attachSoundToObject(self.pickSfx, self.suit)
self.pickSfx.play()
def cleanup(self):
Attack.cleanup(self)
if self.pickSfx:
self.pickSfx.stop()
self.pickSfx = None
if self.dollar:
self.dollar.removeNode()
self.dollar = None
return
class FountainPenAttack(Attack):
notify = directNotify.newCategory('FountainPenAttack')
attack = 'fountainpen'
def __init__(self, attacksClass, suit):
Attack.__init__(self, attacksClass, suit)
self.pen = None
self.spray = None
self.splat = None
self.spraySfx = None
self.sprayParticle = None
self.sprayScaleIval = None
self.wsnp = None
return
def loadAttack(self):
self.pen = loader.loadModel('phase_5/models/props/pen.bam')
self.pen.reparentTo(self.suit.find('**/joint_Rhold'))
self.sprayParticle = ParticleLoader.loadParticleEffect('phase_5/etc/penSpill.ptf')
self.spray = loader.loadModel('phase_3.5/models/props/spray.bam')
self.spray.setColor(VBase4(0, 0, 0, 1))
self.splat = Actor('phase_3.5/models/props/splat-mod.bam', {'chan': 'phase_3.5/models/props/splat-chan.bam'})
self.splat.setColor(VBase4(0, 0, 0, 1))
self.sprayScaleIval = LerpScaleInterval(self.spray, duration=0.3, scale=(1,
20,
1), startScale=(1,
1,
1))
sphere = CollisionSphere(0, 0, 0, 0.5)
sphere.setTangible(0)
if hasattr(self.suit, 'uniqueName'):
collName = self.suit.uniqueName('fountainPenCollNode')
else:
collName = 'fountainPenCollNode'
collNode = CollisionNode(collName)
collNode.addSolid(sphere)
collNode.setCollideMask(CIGlobals.WallBitmask)
self.wsnp = self.spray.attachNewNode(collNode)
self.wsnp.setY(1)
def doAttack(self, ts=0):
self.loadAttack()
if hasattr(self.suit, 'uniqueName'):
name = self.suit.uniqueName('doFountainPenAttack')
else:
name = 'doFountainPenAttack'
self.suitTrack = Parallel(name=name)
self.suitTrack.append(ActorInterval(self.suit, 'fountainpen'))
self.suitTrack.append(Sequence(Wait(1.2), Func(self.acceptOnce, 'enter' + self.wsnp.node().getName(), self.handleSprayCollision), Func(self.playWeaponSound), Func(self.attachSpray), Func(self.sprayParticle.start, self.pen.find('**/joint_toSpray'), self.pen.find('**/joint_toSpray')), self.sprayScaleIval, Wait(0.5), Func(self.sprayParticle.cleanup), Func(self.spray.setScale, 1), Func(self.spray.reparentTo, hidden), Func(self.ignore, 'enter' + self.wsnp.node().getName())))
self.suitTrack.setDoneEvent(self.suitTrack.getName())
self.acceptOnce(self.suitTrack.getDoneEvent(), self.finishedAttack)
self.suitTrack.delayDelete = DelayDelete.DelayDelete(self.suit, name)
self.suitTrack.start(ts)
def attachSpray(self):
self.spray.reparentTo(self.pen.find('**/joint_toSpray'))
pos = self.spray.getPos(render)
hpr = self.spray.getHpr(render)
self.spray.reparentTo(render)
self.spray.setPos(pos)
self.spray.setHpr(hpr)
self.spray.setP(0)
if self.suit.type == 'C':
self.spray.setH(self.spray.getH() + 7.5)
self.spray.setTwoSided(True)
def handleSprayCollision(self, entry):
if self.suit:
self.suit.sendUpdate('toonHitByWeapon', [self.getAttackId(self.attack), base.localAvatar.doId])
base.localAvatar.b_handleSuitAttack(self.getAttackId(self.attack), self.suit.doId)
self.sprayScaleIval.pause()
def playWeaponSound(self):
self.spraySfx = base.audio3d.loadSfx('phase_5/audio/sfx/SA_fountain_pen.ogg')
base.audio3d.attachSoundToObject(self.spraySfx, self.pen)
self.spraySfx.play()
def cleanup(self):
Attack.cleanup(self)
if self.wsnp:
self.wsnp.node().clearSolids()
self.wsnp.removeNode()
self.wsnp = None
if self.pen:
self.pen.removeNode()
self.pen = None
if self.sprayParticle:
self.sprayParticle.cleanup()
self.sprayParticle = None
if self.spray:
self.spray.removeNode()
self.spray = None
if self.splat:
self.splat.cleanup()
self.splat = None
if self.sprayScaleIval:
self.sprayScaleIval.pause()
self.sprayScaleIval = None
self.spraySfx = None
return
class HangUpAttack(Attack):
notify = directNotify.newCategory('HangUpAttack')
attack = 'hangup'
def __init__(self, attacksClass, suit):
Attack.__init__(self, attacksClass, suit)
self.phone = None
self.receiver = None
self.collNP = None
self.phoneSfx = None
self.hangupSfx = None
self.shootIval = None
self.cord = None
self.receiverOutCord = None
self.phoneOutCord = None
return
def loadAttack(self):
self.phone = loader.loadModel('phase_3.5/models/props/phone.bam')
self.phone.setHpr(0, 0, 180)
if self.suit.type == 'B':
self.phone.setPos(0.7, 0.15, 0)
else:
if self.suit.type == 'C':
self.phone.setPos(0.25, 0, 0)
self.receiver = loader.loadModel('phase_3.5/models/props/receiver.bam')
self.receiver.reparentTo(self.phone)
self.cord = Rope()
self.cord.ropeNode.setUseVertexColor(1)
self.cord.ropeNode.setUseVertexThickness(1)
self.cord.setup(3, ({'node': self.phone, 'point': (0.8, 0, 0.2), 'color': (0, 0, 0, 1), 'thickness': 1000}, {'node': self.phone, 'point': (2, 0, 0), 'color': (0, 0, 0, 1), 'thickness': 1000}, {'node': self.receiver, 'point': (1.1, 0.25, 0.5), 'color': (0, 0, 0, 1), 'thickness': 1000}), [])
self.cord.setH(180)
self.phoneSfx = base.audio3d.loadSfx('phase_3.5/audio/sfx/SA_hangup.ogg')
base.audio3d.attachSoundToObject(self.phoneSfx, self.phone)
self.hangupSfx = base.audio3d.loadSfx('phase_3.5/audio/sfx/SA_hangup_place_down.ogg')
base.audio3d.attachSoundToObject(self.hangupSfx, self.phone)
collSphere = CollisionSphere(0, 0, 0, 2)
collSphere.setTangible(0)
collNode = CollisionNode('phone_shootout')
collNode.addSolid(collSphere)
collNode.setCollideMask(CIGlobals.WallBitmask)
self.collNP = self.phone.attachNewNode(collNode)
def doAttack(self, ts=0):
self.loadAttack()
if hasattr(self.suit, 'uniqueName'):
name = self.suit.uniqueName('doHangupAttack')
else:
name = 'doHangupAttack'
if self.suit.type == 'A':
delay2playSound = 1.0
delayAfterSoundToPlaceDownReceiver = 0.2
delayAfterShootToIgnoreCollisions = 1.0
delay2PickUpReceiver = 1.0
receiverInHandPos = Point3(-0.5, 0.5, -1)
else:
if self.suit.type == 'B':
delay2playSound = 1.5
delayAfterSoundToPlaceDownReceiver = 0.7
delayAfterShootToIgnoreCollisions = 1.0
delay2PickUpReceiver = 1.5
receiverInHandPos = Point3(-0.3, 0.5, -0.8)
else:
if self.suit.type == 'C':
delay2playSound = 1.0
delayAfterSoundToPlaceDownReceiver = 1.15
delayAfterShootToIgnoreCollisions = 1.0
delay2PickUpReceiver = 1.5
receiverInHandPos = Point3(-0.3, 0.5, -0.8)
self.suitTrack = Parallel(name=name)
self.suitTrack.append(ActorInterval(self.suit, 'phone'))
self.suitTrack.append(Sequence(Wait(delay2playSound), SoundInterval(self.phoneSfx, duration=2.1), Wait(delayAfterSoundToPlaceDownReceiver), Func(self.receiver.setPos, 0, 0, 0), Func(self.receiver.setH, 0.0), Func(self.receiver.reparentTo, self.phone), Func(self.acceptOnce, 'enter' + self.collNP.node().getName(), self.handleCollision), Func(self.shootOut), Parallel(SoundInterval(self.hangupSfx), Sequence(Wait(delayAfterShootToIgnoreCollisions), Func(self.ignore, 'enter' + self.collNP.node().getName())))))
self.suitTrack.append(Sequence(Func(self.phone.reparentTo, self.suit.find('**/joint_Lhold')), Func(self.cord.reparentTo, render), Wait(delay2PickUpReceiver), Func(self.receiver.reparentTo, self.suit.find('**/joint_Rhold')), Func(self.receiver.setPos, receiverInHandPos), Func(self.receiver.setH, 270.0)))
self.suitTrack.setDoneEvent(self.suitTrack.getName())
self.acceptOnce(self.suitTrack.getDoneEvent(), self.finishedAttack)
self.suitTrack.delayDelete = DelayDelete.DelayDelete(self.suit, name)
self.suitTrack.start(ts)
def handleCollision(self, entry):
if self.suit:
self.suit.sendUpdate('toonHitByWeapon', [self.getAttackId(self.attack), base.localAvatar.doId])
base.localAvatar.b_handleSuitAttack(self.getAttackId(self.attack), self.suit.doId)
def shootOut(self):
pathNode = NodePath('path')
pathNode.reparentTo(self.suit)
pathNode.setPos(0, 50, self.phone.getZ(self.suit))
self.collNP.reparentTo(render)
self.shootIval = LerpPosInterval(self.collNP, duration=1.0, pos=pathNode.getPos(render), startPos=self.phone.getPos(render))
self.shootIval.start()
pathNode.removeNode()
del pathNode
def cleanup(self):
Attack.cleanup(self)
if self.shootIval:
self.shootIval.pause()
self.shootIval = None
if self.cord:
self.cord.removeNode()
self.cord = None
if self.phone:
self.phone.removeNode()
self.phone = None
if self.receiver:
self.receiver.removeNode()
self.receiver = None
if self.collNP:
self.collNP.node().clearSolids()
self.collNP.removeNode()
self.collNP = None
if self.phoneSfx:
self.phoneSfx.stop()
self.phoneSfx = None
return
class BounceCheckAttack(ThrowAttack):
notify = directNotify.newCategory('BounceCheckAttack')
MaxBounces = 3
WeaponHitDistance = 0.5
def __init__(self, attacksClass, suit):
ThrowAttack.__init__(self, attacksClass, suit)
self.attack = 'bouncecheck'
self.bounceSound = None
self.numBounces = 0
return
def __pollCheckDistance(self, task):
if base.localAvatar.getDistance(self.weapon) <= self.WeaponHitDistance:
self.handleWeaponCollision(None)
return Task.done
return Task.cont
return
def loadAttack(self):
self.weapon = loader.loadModel('phase_5/models/props/bounced-check.bam')
self.weapon.setScale(10)
self.weapon.setTwoSided(1)
self.bounceSound = base.audio3d.loadSfx('phase_5/audio/sfx/SA_bounce_check_bounce.ogg')
base.audio3d.attachSoundToObject(self.bounceSound, self.suit)
cSphere = CollisionSphere(0, 0, 0, 0.1)
cSphere.setTangible(0)
if hasattr(self, 'uniqueName'):
name = self.uniqueName('bounced_check_collision')
else:
name = 'bounced_check_collision'
cNode = CollisionNode(name)
cNode.addSolid(cSphere)
cNode.setFromCollideMask(CIGlobals.FloorBitmask)
cNP = self.weapon.attachNewNode(cNode)
cNP.setCollideMask(BitMask32(0))
self.event = CollisionHandlerEvent()
self.event.setInPattern('%fn-into')
self.event.setOutPattern('%fn-out')
base.cTrav.addCollider(cNP, self.event)
self.wsnp = cNP
self.wsnp.show()
def doAttack(self, ts=0):
ThrowAttack.doAttack(self, ts)
self.loadAttack()
if hasattr(self, 'uniqueName'):
name = self.uniqueName('doBounceCheckAttack')
else:
name = 'doBounceCheckAttack'
self.suitTrack = Sequence(name=name)
self.weapon.reparentTo(self.suit.find('**/joint_Rhold'))
if self.suit.type == 'C':
self.suitTrack.append(Wait(2.3))
else:
self.suitTrack.append(Wait(3))
self.suit.play('throw-paper')
self.suitTrack.append(Func(self.throwObject))
self.suitTrack.start(ts)
def throwObject(self):
ThrowAttack.throwObject(self)
taskMgr.add(self.__pollCheckDistance, 'pollCheckDistance')
self.__doThrow(0)
def __doThrow(self, alreadyThrown):
self.weapon.setScale(1)
pathNP = NodePath('throwPath')
if not alreadyThrown:
pathNP.reparentTo(self.suit)
else:
pathNP.reparentTo(self.weapon)
pathNP.setScale(render, 1.0)
pathNP.setPos(0, 30, -100)
pathNP.setHpr(90, -90, 90)
print pathNP.getPos(base.render)
if self.throwTrajectory:
self.throwTrajectory.pause()
self.throwTrajectory = None
if alreadyThrown:
startPos = self.weapon.getPos(base.render)
gravity = 0.7
else:
gravity = 0.7
startPos = self.suit.find('**/joint_Rhold').getPos(base.render)
self.throwTrajectory = ProjectileInterval(self.weapon, startPos=startPos, endPos=pathNP.getPos(base.render), gravityMult=gravity, duration=3.0)
self.throwTrajectory.start()
self.weapon.setScale(10)
self.weapon.reparentTo(render)
self.weapon.setHpr(pathNP.getHpr(render))
self.weapon_state = 'released'
self.acceptOnce(self.wsnp.node().getName() + '-into', self.__handleHitFloor)
return
def __handleHitFloor(self, entry):
self.numBounces += 1
if self.numBounces >= self.MaxBounces:
self.cleanup()
return
base.playSfx(self.bounceSound)
self.__doThrow(1)
def cleanup(self):
taskMgr.remove('pollCheckDistance')
self.ignore(self.wsnp.node().getName() + '-into')
self.bounceSound = None
ThrowAttack.cleanup(self)
return
class RedTapeAttack(ThrowAttack):
notify = directNotify.newCategory('RedTapeAttack')
attack = 'redtape'
def doAttack(self, ts=0):
ThrowAttack.doAttack(self, 'phase_5/models/props/redtape.bam', 1, 'doRedTapeAttack', 'throw-paper', 0.5, 'redTapeWeaponSphere', weapon_p=90, weapon_y=0.35, weapon_z=-0.5, ts=ts)
def playWeaponSound(self):
self.weaponSfx = base.audio3d.loadSfx('phase_5/audio/sfx/SA_red_tape.ogg')
ThrowAttack.playWeaponSound(self)
def handleWeaponTouch(self):
if self.weaponSfx:
self.weaponSfx.stop()
self.weaponSfx = None
ThrowAttack.handleWeaponTouch(self)
return
class PowerTieAttack(ThrowAttack):
notify = directNotify.newCategory('PowerTieAttack')
attack = 'powertie'
def doAttack(self, ts=0):
ThrowAttack.doAttack(self, 'phase_5/models/props/power-tie.bam', 4, 'doPowerTieAttack', 'throw-paper', 0.2, 'powerTieWeaponSphere', weapon_r=180, ts=ts)
def playWeaponSound(self):
self.weaponSfx = base.audio3d.loadSfx('phase_5/audio/sfx/SA_powertie_throw.ogg')
ThrowAttack.playWeaponSound(self)
class HalfWindsorAttack(ThrowAttack):
notify = directNotify.newCategory('HalfWindsorAttack')
attack = 'halfwindsor'
def doAttack(self, ts=0):
ThrowAttack.doAttack(self, 'phase_5/models/props/half-windsor.bam', 6, 'doHalfWindsorAttack', 'throw-paper', 0.2, 'halfWindsorWeaponSphere', weapon_r=90, weapon_p=0, weapon_h=90, weapon_z=-1, weapon_y=-1.6, ts=ts)
def playWeaponSound(self):
self.weaponSfx = base.audio3d.loadSfx('phase_5/audio/sfx/SA_powertie_throw.ogg')
ThrowAttack.playWeaponSound(self)
class BiteAttack(ThrowAttack):
notify = directNotify.newCategory('BiteAttack')
attack = 'bite'
def doAttack(self, ts=0):
ThrowAttack.doAttack(self, 'phase_5/models/props/teeth-mod.bam', 6, 'doBiteAttack', 'throw-object', 0.2, 'biteWeaponSphere', weapon_r=180, ts=ts)
def throwObject(self):
ThrowAttack.throwObject(self, False)
self.weapon.setH(self.weapon, -90)
class ChompAttack(ThrowAttack):
notify = directNotify.newCategory('ChompAttack')
attack = 'chomp'
def doAttack(self, ts=0):
ThrowAttack.doAttack(self, 'phase_5/models/props/teeth-mod.bam', 6, 'doChompAttack', 'throw-object', 0.2, 'chompWeaponSphere', weapon_r=180, ts=ts)
def throwObject(self):
ThrowAttack.throwObject(self, False)
self.weapon.setH(self.weapon, -90)
class EvictionNoticeAttack(ThrowAttack):
notify = directNotify.newCategory('EvictionNoticeAttack')
attack = 'evictionnotice'
def doAttack(self, ts=0):
ThrowAttack.doAttack(self, 'phase_3.5/models/props/shredder-paper-mod.bam', 1, 'doEvictionNoticeAttack', 'throw-paper', 1, 'evictionNoticeWeaponSphere', weapon_y=-0.15, weapon_z=-0.5, weapon_x=-1.4, weapon_r=90, weapon_h=30, ts=ts)
self.wsnp.setZ(1.5)
def throwObject(self):
ThrowAttack.throwObject(self, False)
class RestrainingOrderAttack(EvictionNoticeAttack):
notify = directNotify.newCategory('RestrainingOrderAttack')
attack = 'restrainingorder'
class ParticleAttack(Attack):
notify = directNotify.newCategory('ParticleAttack')
attack = 'particleattack'
particleIvalDur = 1
shooterDistance = 50
def __init__(self, attacksClass, suit):
Attack.__init__(self, attacksClass, suit)
self.particles = []
self.handObj = None
self.shootOutCollNP = None
self.particleSound = None
self.particleMoveIval = None
self.targetX = None
self.targetY = None
self.targetZ = None
return
def handleWeaponTouch(self):
pass
def handleCollision(self, entry):
if self.suit:
self.suit.sendUpdate('toonHitByWeapon', [self.getAttackId(self.attack), base.localAvatar.doId])
base.localAvatar.b_handleSuitAttack(self.getAttackId(self.attack), self.suit.doId)
def doAttack(self, particlePaths, track_name, particleCollId, animation_name, delayUntilRelease, animationSpeed=1, handObjPath=None, handObjParent=None, startRightAway=True, ts=0):
for path in particlePaths:
particle = ParticleLoader.loadParticleEffect(path)
self.particles.append(particle)
sphere = CollisionSphere(0, 0, 0, 1)
sphere.setTangible(0)
node = CollisionNode(particleCollId)
node.addSolid(sphere)
node.setCollideMask(CIGlobals.WallBitmask)
self.targetX = self.attacksClass.target.getX(render)
self.targetY = self.attacksClass.target.getY(render)
self.targetZ = self.attacksClass.target.getZ(render)
if len(self.particles) == 1:
self.shootOutCollNP = self.particles[0].attachNewNode(node)
else:
self.shootOutCollNP = self.suit.attachNewNode(node)
if handObjPath and handObjParent:
self.handObj = loader.loadModel(handObjPath)
self.handObj.reparentTo(handObjParent)
self.suit.setPlayRate(animationSpeed, animation_name)
self.suit.play(animation_name)
if hasattr(self.suit, 'uniqueName'):
track_name = self.suit.uniqueName(track_name)
particleCollId = self.suit.uniqueName(particleCollId)
self.suitTrack = Sequence(name=track_name)
self.suitTrack.append(Wait(delayUntilRelease))
self.suitTrack.append(Func(self.releaseAttack))
self.suitTrack.append(Wait(self.particleIvalDur))
self.suitTrack.setDoneEvent(self.suitTrack.getName())
self.acceptOnce(self.suitTrack.getDoneEvent(), self.finishedAttack)
if startRightAway:
self.suitTrack.start(ts)
def releaseAttack(self, releaseFromJoint, onlyMoveColl=True, blendType='noBlend'):
startNP = releaseFromJoint.attachNewNode('startNP')
if None not in [self.targetX, self.targetY, self.targetZ]:
startNP.lookAt(render, self.targetX, self.targetY, self.targetZ + 2)
pathNP = NodePath('path')
pathNP.reparentTo(startNP)
pathNP.setScale(render, 1.0)
pathNP.setPos(0, self.shooterDistance, 0)
for particle in self.particles:
if not onlyMoveColl:
particle.start(render)
else:
particle.start(self.suit)
particle.lookAt(pathNP)
if self.attack == 'razzledazzle':
particle.setP(particle, 90)
if onlyMoveColl:
target = self.shootOutCollNP
target.wrtReparentTo(render)
else:
target = self.particles[0]
self.particleMoveIval = LerpPosInterval(target, duration=self.particleIvalDur, pos=pathNP.getPos(render), startPos=startNP.getPos(render), blendType=blendType)
self.particleMoveIval.start()
self.acceptOnce('enter' + self.shootOutCollNP.node().getName(), self.handleCollision)
pathNP.removeNode()
startNP.removeNode()
del pathNP
del startNP
self.playParticleSound()
return
def playParticleSound(self):
if self.particleSound:
base.audio3d.attachSoundToObject(self.particleSound, self.suit)
base.playSfx(self.particleSound)
def cleanup(self):
Attack.cleanup(self)
self.targetX = None
self.targetY = None
self.targetZ = None
if self.particles:
for particle in self.particles:
particle.cleanup()
self.particles = None
if self.handObj:
self.handObj.removeNode()
self.handObj = None
if self.shootOutCollNP:
self.ignore('enter' + self.shootOutCollNP.node().getName())
self.shootOutCollNP.removeNode()
self.shootOutCollNP = None
if self.particleMoveIval:
self.particleMoveIval.pause()
self.particleMoveIval = None
self.particleSound = None
self.particleIvalDur = None
return
class RazzleDazzleAttack(ParticleAttack):
notify = directNotify.newCategory('RazzleDazzleAttack')
attack = 'razzledazzle'
particleIvalDur = 2.0
def doAttack(self, ts):
ParticleAttack.doAttack(self, ['phase_5/etc/smile.ptf'], 'doRazzleDazzle', 'razzleDazzleSphere', 'glower', 1, 1, 'phase_5/models/props/smile-mod.bam', self.suit.find('**/joint_Rhold'), ts=ts)
def releaseAttack(self):
ParticleAttack.releaseAttack(self, self.handObj.find('**/scale_joint_sign'), onlyMoveColl=False, blendType='easeIn')
def playParticleSound(self):
self.particleSound = base.audio3d.loadSfx('phase_5/audio/sfx/SA_razzle_dazzle.ogg')
ParticleAttack.playParticleSound(self)
class BuzzWordAttack(ParticleAttack):
notify = directNotify.newCategory('BuzzWordAttack')
attack = 'buzzword'
particleIvalDur = 1.5
afterIvalDur = 1.5
shooterDistance = 32.0
def doAttack(self, ts):
texturesList = [
'buzzwords-crash',
'buzzwords-inc',
'buzzwords-main',
'buzzwords-over',
'buzzwords-syn']
particleList = []
for i in xrange(0, 5):
particleList.append('phase_5/etc/buzzWord.ptf')
ParticleAttack.doAttack(self, particleList, 'doBuzzWord', 'buzzWordSphere', 'speak', 1.5, 1.5, None, None, False, ts)
for i in xrange(0, 5):
effect = self.particles[i]
if random.random() > 0.5:
setEffectTexture(effect, texturesList[i], Vec4(1, 0.94, 0.02, 1))
else:
setEffectTexture(effect, texturesList[i], Vec4(0, 0, 0, 1))
for particle in self.particles:
particle.setZ(self.suit.find('**/joint_head').getZ(render))
self.suitTrack.append(Wait(self.afterIvalDur))
self.suitTrack.start(ts)
return
def releaseAttack(self):
ParticleAttack.releaseAttack(self, self.suit.find('**/joint_head'))
def playParticleSound(self):
self.particleSound = base.audio3d.loadSfx('phase_5/audio/sfx/SA_buzz_word.ogg')
ParticleAttack.playParticleSound(self)
class JargonAttack(ParticleAttack):
notify = directNotify.newCategory('JargonAttack')
attack = 'jargon'
particleIvalDur = 1.5
afterIvalDur = 1.5
shooterDistance = 31.0
def doAttack(self, ts):
texturesList = [
'jargon-brow',
'jargon-deep',
'jargon-hoop',
'jargon-ipo']
reds = [1, 0, 1, 0]
particleList = []
for i in xrange(0, 4):
particleList.append('phase_5/etc/jargonSpray.ptf')
ParticleAttack.doAttack(self, particleList, 'doJargon', 'jargonSphere', 'speak', 1.5, 1.5, None, None, False, ts)
for i in xrange(0, 4):
effect = self.particles[i]
setEffectTexture(effect, texturesList[i], Vec4(reds[i], 0, 0, 1))
for particle in self.particles:
particle.setZ(self.suit.find('**/joint_head').getZ(render))
self.suitTrack.append(Wait(self.afterIvalDur))
self.suitTrack.start(ts)
return
def releaseAttack(self):
ParticleAttack.releaseAttack(self, self.suit.find('**/joint_head'))
def playParticleSound(self):
self.particleSound = base.audio3d.loadSfx('phase_5/audio/sfx/SA_jargon.ogg')
self.particleSound.setLoop(True)
ParticleAttack.playParticleSound(self)
class MumboJumboAttack(ParticleAttack):
notify = directNotify.newCategory('MumboJumboAttack')
attack = 'mumbojumbo'
particleIvalDur = 2.5
afterIvalDur = 1.5
shooterDistance = 25.0
def doAttack(self, ts):
texturesList = [
'mumbojumbo-boiler',
'mumbojumbo-creative',
'mumbojumbo-deben',
'mumbojumbo-high',
'mumbojumbo-iron']
particleList = []
for i in xrange(0, 2):
particleList.append('phase_5/etc/mumboJumboSpray.ptf')
for i in xrange(0, 3):
particleList.append('phase_5/etc/mumboJumboSmother.ptf')
ParticleAttack.doAttack(self, particleList, 'doMumJum', 'mumJumSphere', 'speak', 1.5, 1.5, None, None, False, ts)
for i in xrange(0, 5):
effect = self.particles[i]
setEffectTexture(effect, texturesList[i], Vec4(1, 0, 0, 1))
for particle in self.particles:
particle.setZ(self.suit.find('**/joint_head').getZ(render))
self.suitTrack.append(Wait(self.afterIvalDur))
self.suitTrack.start(ts)
return
def releaseAttack(self):
ParticleAttack.releaseAttack(self, self.suit.find('**/joint_head'), blendType='easeIn')
def playParticleSound(self):
self.particleSound = base.audio3d.loadSfx('phase_5/audio/sfx/SA_mumbo_jumbo.ogg')
self.particleSound.setLoop(True)
ParticleAttack.playParticleSound(self)
class FilibusterAttack(ParticleAttack):
notify = directNotify.newCategory('FilibusterAttack')
attack = 'filibuster'
particleIvalDur = 1.5
afterIvalDur = 1.5
shooterDistance = 20.0
def doAttack(self, ts):
texturesList = [
'filibuster-cut',
'filibuster-fiscal',
'filibuster-impeach',
'filibuster-inc']
particleList = []
for i in xrange(0, 4):
particleList.append('phase_5/etc/filibusterSpray.ptf')
ParticleAttack.doAttack(self, particleList, 'doFili', 'filiSphere', 'speak', 1.5, 1.5, None, None, False, ts)
for i in xrange(0, 4):
effect = self.particles[i]
setEffectTexture(effect, texturesList[i], Vec4(0.4, 0, 0, 1))
for particle in self.particles:
particle.setZ(self.suit.find('**/joint_head').getZ(render))
self.suitTrack.append(Wait(self.afterIvalDur))
self.suitTrack.start(ts)
return
def releaseAttack(self):
ParticleAttack.releaseAttack(self, self.suit.find('**/joint_head'))
def playParticleSound(self):
self.particleSound = base.audio3d.loadSfx('phase_5/audio/sfx/SA_filibuster.ogg')
self.particleSound.setLoop(True)
ParticleAttack.playParticleSound(self)
class DoubleTalkAttack(ParticleAttack):
notify = directNotify.newCategory('DoubleTalkAttack')
attack = 'doubletalk'
particleIvalDur = 3.0
afterIvalDur = 1.5
shooterDistance = 40.0
def doAttack(self, ts):
texturesList = [
'doubletalk-double',
'doubletalk-good']
particleList = []
particleList.append('phase_5/etc/doubleTalkLeft.ptf')
particleList.append('phase_5/etc/doubleTalkRight.ptf')
ParticleAttack.doAttack(self, particleList, 'doDT', 'DTSphere', 'speak', 1.5, 1.5, None, None, False, ts)
for i in xrange(0, 2):
effect = self.particles[i]
setEffectTexture(effect, texturesList[i], Vec4(0, 1.0, 0, 1))
for particle in self.particles:
particle.setZ(self.suit.find('**/joint_head').getZ(render))
self.suitTrack.append(Wait(self.afterIvalDur))
self.suitTrack.start(ts)
return
def releaseAttack(self):
ParticleAttack.releaseAttack(self, self.suit.find('**/joint_head'), blendType='easeIn')
def playParticleSound(self):
self.particleSound = base.audio3d.loadSfx('phase_5/audio/sfx/SA_filibuster.ogg')
self.particleSound.setLoop(True)
ParticleAttack.playParticleSound(self)
class SchmoozeAttack(ParticleAttack):
notify = directNotify.newCategory('SchmoozeAttack')
attack = 'schmooze'
particleIvalDur = 1.5
afterIvalDur = 1.5
shooterDistance = 23.0
def doAttack(self, ts):
texturesList = [
'schmooze-genius',
'schmooze-instant',
'schmooze-master',
'schmooze-viz']
particleList = []
particleList.append('phase_5/etc/schmoozeUpperSpray.ptf')
particleList.append('phase_5/etc/schmoozeLowerSpray.ptf')
ParticleAttack.doAttack(self, particleList, 'doSch', 'SchSphere', 'speak', 1.5, 1.5, None, None, False, ts)
for i in xrange(0, 2):
effect = self.particles[i]
setEffectTexture(effect, texturesList[i], Vec4(0, 0, 1, 1))
for particle in self.particles:
particle.setZ(self.suit.find('**/joint_head').getZ(render))
self.suitTrack.append(Wait(self.afterIvalDur))
self.suitTrack.start(ts)
return
def releaseAttack(self):
ParticleAttack.releaseAttack(self, self.suit.find('**/joint_head'))
def playParticleSound(self):
self.particleSound = base.audio3d.loadSfx('phase_5/audio/sfx/SA_schmooze.ogg')
self.particleSound.setLoop(True)
ParticleAttack.playParticleSound(self)
class FingerWagAttack(ParticleAttack):
notify = directNotify.newCategory('FingerWagAttack')
attack = 'fingerwag'
particleIvalDur = 2.75
afterIvalDur = 1.5
shooterDistance = 30.0
def doAttack(self, ts):
ParticleAttack.doAttack(self, ['phase_5/etc/fingerwag.ptf'], 'doFW', 'FWSphere', 'fingerwag', 1.5, 1.5, None, None, False, ts)
setEffectTexture(self.particles[0], 'blah', Vec4(0.55, 0, 0.55, 1))
self.suitTrack.append(Wait(self.afterIvalDur))
self.suitTrack.start(ts)
return
def releaseAttack(self):
ParticleAttack.releaseAttack(self, self.suit.find('**/joint_head'), blendType='easeIn')
self.particles[0].setH(self.particles[0], 90)
def playParticleSound(self):
self.particleSound = base.audio3d.loadSfx('phase_5/audio/sfx/SA_finger_wag.ogg')
self.particleSound.setLoop(False)
ParticleAttack.playParticleSound(self)
from direct.fsm.StateData import StateData
class SuitAttacks(StateData):
notify = directNotify.newCategory('SuitAttacks')
attackName2attackClass = {'canned': CannedAttack,
'clipontie': ClipOnTieAttack,
'sacked': SackedAttack,
'glowerpower': GlowerPowerAttack,
'playhardball': HardballAttack,
'marketcrash': MarketCrashAttack,
'pickpocket': PickPocketAttack,
'hangup': HangUpAttack,
'fountainpen': FountainPenAttack,
'redtape': RedTapeAttack,
'powertie': PowerTieAttack,
'halfwindsor': HalfWindsorAttack,
'bite': BiteAttack,
'chomp': ChompAttack,
'evictionnotice': EvictionNoticeAttack,
'restrainingorder': RestrainingOrderAttack,
'razzledazzle': RazzleDazzleAttack,
'buzzword': BuzzWordAttack,
'jargon': JargonAttack,
'mumbojumbo': MumboJumboAttack,
'filibuster': FilibusterAttack,
'doubletalk': DoubleTalkAttack,
'schmooze': SchmoozeAttack,
'fingerwag': FingerWagAttack}
def __init__(self, doneEvent, suit, target):
StateData.__init__(self, doneEvent)
self.suit = suit
self.target = target
self.currentAttack = None
return
def load(self, attackName):
StateData.load(self)
className = self.attackName2attackClass[attackName]
self.currentAttack = className(self, self.suit)
def enter(self, ts=0):
StateData.enter(self)
self.currentAttack.doAttack(ts)
def exit(self):
self.currentAttack.cleanup()
StateData.exit(self)
def unload(self):
self.cleanup()
StateData.unload(self)
def cleanup(self):
self.suit = None
self.currentAttack = None
self.target = None
return | 38.614105 | 517 | 0.636905 | [
"Apache-2.0"
] | theclashingfritz/Cog-Invasion-Online-Dump | lib/coginvasion/suit/SuitAttacks.py | 48,731 | Python |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RaxmlNg(CMakePackage):
"""RAxML-NG is a phylogenetic tree inference tool which uses
maximum-likelihood (ML) optimality criterion.
Its search heuristic is based on iteratively performing a series
of Subtree Pruning and Regrafting (SPR) moves,
which allows to quickly navigate to the best-known ML tree.
RAxML-NG is a successor of RAxML (Stamatakis 2014) and leverages
the highly optimized likelihood computation implemented in libpll
(Flouri et al. 2014)."""
homepage = "https://github.com/amkozlov/raxml-ng/wiki"
url = "https://github.com/amkozlov/raxml-ng/archive/1.0.1.tar.gz"
git = "https://github.com/amkozlov/raxml-ng.git"
version('1.0.2', submodules=True)
version('1.0.1', submodules=True)
variant("mpi", default=True, description="Use MPI")
depends_on('bison')
depends_on('flex')
depends_on('gmp')
depends_on('mpi', when='+mpi')
def cmake_args(self):
return [self.define_from_variant('USE_MPI', 'mpi')]
| 34.333333 | 74 | 0.703883 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | Bambi/spack | var/spack/repos/builtin/packages/raxml-ng/package.py | 1,236 | Python |
"""
Afterglow Core: settings routes
"""
import secrets
import json
from flask import Response, request, redirect
from marshmallow.fields import Integer, String
from ...oauth2 import oauth_clients
from ... import app, json_response
from ...auth import auth_required, set_access_cookies
from ...resources.users import DbPersistentToken, db, DbUser, DbIdentity, DbRole
from ...schemas import Resource
from ...errors.oauth2 import (UnknownClientError)
from . import url_prefix
@app.route(url_prefix + 'oauth2/clients/<client_id>', methods=['GET'])
@auth_required
def oauth2_clients(client_id: str) -> Response:
"""
Return OAuth2 client applications
:return:
GET /ajax/oauth2/clients: list of OAuth2 clients
"""
client = next((c for c in oauth_clients.values() if c.client_id == client_id), None)
if not client:
raise UnknownClientError()
return json_response(dict(id=client.client_id, name=client.name, description=client.description, icon=client.icon))
| 25.794872 | 119 | 0.739563 | [
"Apache-2.0"
] | SkynetRTN/afterglow-access-server | afterglow_core/views/ajax_api/oauth2_clients.py | 1,006 | Python |
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env(
"DJANGO_SECRET_KEY",
default="TPyatMWyQl9oNj8aR735PzfRJsAZf2sdHojggLgxE3d3jSpLEbfy63ypb7p45VSM",
)
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["localhost", "0.0.0.0", "127.0.0.1"]
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
}
}
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env("DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.console.EmailBackend")
# WhiteNoise
# ------------------------------------------------------------------------------
# http://whitenoise.evans.io/en/latest/django.html#using-whitenoise-in-development
INSTALLED_APPS = ["whitenoise.runserver_nostatic"] + INSTALLED_APPS # noqa F405
# django-debug-toolbar
# ------------------------------------------------------------------------------
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#prerequisites
INSTALLED_APPS += ["debug_toolbar"] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#middleware
MIDDLEWARE += [ # noqa F405
"debug_toolbar.middleware.DebugToolbarMiddleware",
"querycount.middleware.QueryCountMiddleware",
]
# https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#debug-toolbar-config
DEBUG_TOOLBAR_CONFIG = {
"DISABLE_PANELS": ["debug_toolbar.panels.redirects.RedirectsPanel"],
"SHOW_TEMPLATE_CONTEXT": True,
}
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#internal-ips
INTERNAL_IPS = ["127.0.0.1", "10.0.2.2"]
if env("USE_DOCKER") == "yes":
import socket
hostname, _, ips = socket.gethostbyname_ex(socket.gethostname())
INTERNAL_IPS += [".".join(ip.split(".")[:-1] + ["1"]) for ip in ips]
# django-extensions
# ------------------------------------------------------------------------------
# https://django-extensions.readthedocs.io/en/latest/installation_instructions.html#configuration
INSTALLED_APPS += ["django_extensions"] # noqa F405
# Your stuff...
# ------------------------------------------------------------------------------
QUERYCOUNT = {
"DISPLAY_DUPLICATES": 5,
}
| 39.128571 | 101 | 0.58525 | [
"MIT"
] | ReddyLab/cegs-portal | config/settings/local.py | 2,739 | Python |
import threading
import time
import mpl_qtthread.backend
import matplotlib
import matplotlib.backends.backend_qt
import matplotlib.pyplot as plt
from matplotlib.backends.qt_compat import QtWidgets, QtCore
# set up the teleporter
mpl_qtthread.backend.initialize_qt_teleporter()
# tell Matplotlib to use this backend
matplotlib.use("module://mpl_qtthread.backend_agg")
# suppress (now) spurious warnings for mpl3.3+
mpl_qtthread.monkeypatch_pyplot()
app = matplotlib.backends.backend_qt.qApp
# button to exit early and to make sure qapp does not quit!
bt = QtWidgets.QPushButton("Quit")
bt.pressed.connect(app.quit)
bt.show()
tot_time = 15
# stop UAT in 12s
timer = QtCore.QTimer()
timer.setSingleShot(True)
timer.timeout.connect(app.quit)
timer.start(tot_time * 1000) # this is in ms
def background():
# make a figure and plot some data
fig, ax = plt.subplots()
plt.show(block=False)
(ln,) = ax.plot(range(5))
thread_start_time = start_time = time.monotonic()
fig.canvas.flush_events()
# periodically update the figure
for j in range(5):
print(
f"starting block {j} at Δt {time.monotonic() - start_time:.3f} "
f"(expected ~{j})"
)
ln.set_color(f"C{j}")
ax.set_title(f"cycle {j}")
fig.set_size_inches(1 + j, 1 + j)
fig.canvas.draw_idle()
print(f"line should now be color 'C{j}'")
time.sleep(1)
plt.close(fig)
print("figure is now closed, take a 1s nap")
time.sleep(1)
fig2 = plt.figure()
fig2.show()
print("New figure!")
start_time = time.monotonic()
for j in range(4):
time.sleep(1)
fig2.canvas.manager.full_screen_toggle()
fig2.canvas.manager.set_window_title(f"toggled {j}")
print(f"toggled Δt {time.monotonic() - start_time:.3f} (expected ~{j+1})")
fig3, _ = plt.subplots(3, 1)
fig3.show()
print("figure is small again and there are two figures.")
print("take 1s nap")
time.sleep(1)
plt.close("all")
print(
f"all figures should be closed"
f"app will exit in {12 - (time.monotonic() - thread_start_time)}s on hit quit."
)
# start the thread
threading.Thread(target=background, daemon=True).start()
# start the QApplication main loop
app.exec()
| 26.976471 | 87 | 0.66812 | [
"BSD-3-Clause"
] | tacaswell/mpl-qtthread | UAT.py | 2,295 | Python |
# -*- coding: utf-8 -*-
"""
SHT21 Sensor Plugin.
Return temperature and relative humidity from sensor readings.
Calculate and return absolute humidity and dew point.
Source for calculations:
http://www.vaisala.com/Vaisala%20Documents/Application%20notes/Humidity_Conversion_Formulas_B210973EN-F.pdf
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from fcntl import flock, LOCK_EX, LOCK_UN
import math
import collectd
from sht21 import SHT21
sht21 = None
lock_file = None
kock_handle = None
def pws_constants(t):
"""Lookup-table for water vapor saturation pressure constants (A, m, Tn)."""
if t < -20:
raise ValueError('Temperature out of range (-20 - 350°C')
if t < 50:
return (6.116441, 7.591386, 240.7263)
if t < 100:
return (6.004918, 7.337936, 229.3975)
if t < 150:
return (5.856548, 7.27731, 225.1033)
if t < 200:
return (6.002859, 7.290361, 227.1704)
return (9.980622, 7.388931, 263.1239)
def pws(t):
r"""
Calculate water vapor saturation pressure based on temperature (in hPa).
P_{WS} = A \cdot 10^{\frac{m \cdot T}{T + T_n}}
"""
A, m, Tn = pws_constants(t) # noqa:N806
power = (m * t) / (t + Tn)
return A * 10 ** power
def pw(t, rh):
r"""
Calculate Pw (in hPa).
P_W = P_{WS} \cdot RH / 100
"""
return pws(t) * rh / 100
def td(t, rh):
r"""
Calculate the dew point (in °C).
T_d = \frac{T_n}{\frac{m}{log_{10}\left(\frac{P_w}{A}\right)} - 1}
"""
A, m, Tn = pws_constants(t) # noqa:N806
Pw = pw(t, rh) # noqa:N806
return Tn / ((m / math.log(Pw / A, 10)) - 1)
def celsius_to_kelvin(celsius):
return celsius + 273.15
def ah(t, rh):
r"""
Calculate the absolute humidity (in g/m³).
A = C \cdot P_w / T
"""
C = 2.16679 # noqa:N806
Pw = pw(t, rh) # noqa:N806
T = celsius_to_kelvin(t) # noqa:N806
return C * (Pw * 100) / T
def config(config):
global lock_file
for node in config.children:
key = node.key.lower()
val = node.values[0]
if key == 'lockfile':
lock_file = val
collectd.info('sht21 user-mode plugin: Using lock file %s' %
lock_file)
def init():
global sht21, lock_file, lock_handle
if lock_file:
# Try to open lock file, in case of failure proceed without locking
try:
lock_handle = open(lock_file, 'w')
except IOError as e:
collectd.error('sht21 plugin: Could not open lock file: %s' % e)
collectd.error('Proceeding without locking')
try:
sht21 = SHT21(1)
collectd.info('sht21 user-mode plugin initialized')
except IOError as e:
collectd.error('sht21 plugin: Could not initialize: %s' % e)
collectd.unregister_read(read)
def read():
# Read values
global sht21, lock_handle
try:
if lock_handle:
flock(lock_handle, LOCK_EX)
temperature = sht21.read_temperature()
humidity = sht21.read_humidity()
if lock_handle:
flock(lock_handle, LOCK_UN)
except IOError as e:
collectd.error('sht21 plugin: Could not read sensor data: %s' % e)
return
# Calculate values
try:
dewpoint = td(temperature, humidity)
except ValueError as e:
collectd.error('sht21 plugin: Could not calculate dew point: %s' % e)
dewpoint = 0
absolute_humidity = ah(temperature, humidity)
# Dispatch values
v_tmp = collectd.Values(plugin='sht21', type='temperature', type_instance='current')
v_tmp.dispatch(values=[temperature])
v_hum = collectd.Values(plugin='sht21', type='humidity', type_instance='relative_humidity')
v_hum.dispatch(values=[humidity])
v_abs = collectd.Values(plugin='sht21', type='gauge', type_instance='absolute_humidity')
v_abs.dispatch(values=[absolute_humidity])
v_dew = collectd.Values(plugin='sht21', type='temperature', type_instance='dewpoint')
v_dew.dispatch(values=[dewpoint])
collectd.register_config(config)
collectd.register_init(init)
collectd.register_read(read)
| 26.408805 | 107 | 0.62634 | [
"MIT"
] | AmedeeBulle/collectd-python-plugins | sht21_usermode.py | 4,202 | Python |
from .models import *
from rest_framework import serializers
class THLIC_CertificateSerializer(serializers.ModelSerializer):
class Meta:
model = THLIC_Certificate
fields = '__all__'
class Teaching_portofolioSerializer(serializers.ModelSerializer):
class Meta:
model = Teaching_portofolio
fields = '__all__' | 27.769231 | 66 | 0.717452 | [
"MIT"
] | iPelino/cst-research-api | teaching_activities/serializers.py | 361 | Python |
# Tests for client code in bin/test
| 18 | 35 | 0.75 | [
"Apache-2.0"
] | EHRI/resync | bin/test/__init__.py | 36 | Python |
import discord
from discord.ext import commands
class AttackStrats(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.group()
async def strat(self, ctx):
if ctx.invoked_subcommand is None:
await ctx.send("You need to specify a townhall and strategy! Available townhalls:\nth8\nth9\nth10\nth11\nth12\nth13")
@strat.group()
async def th8(self, ctx):
if ctx.invoked_subcommand is None:
await ctx.send("You need to specify an attack strategy!Available strategies:\nhogs\ndragons")
@th8.command(name="hogs")
async def th8_hogs(self, ctx):
th8_hogsEmbed = discord.Embed(
title="TH8 Hogs",
description="**ARMY COMPOSITION:**```\nTroops: 32 Hog Riders, 10 Wizards\nSpells: 3 Heals, 1 Poison\nCC: 5 high level hog riders\n```\n **ATTACKING:\n**",
color=self.bot.colors
)
th8_hogsEmbed.add_field(
name="Step 1- Dealing the clan castle troops:",
value="Send a hog in to lure out the cc. If it doesn't come out the send another one. Luring out the cc is very important because it can destroy all of your hogs. Once the cc is lured, drop your poison on the cc, drop your king and about 5 wizards. The king will suck up the damage while the wizards take out the cc.\n",
inline=False
)
th8_hogsEmbed.add_field(
name="Step 2- Attacking:",
value="Drop your hogs and cc on the defenses once the enemy cc is gone. It is better to drop them on each defense, but don't spread them out too much or they wont be effective. As soon as the hogs have taken out the first initial defense drop your wizards behind. Clean up is very important with hogs. As your hogs make their way around the base, they have to be kept healed. Drop heal spells as the hogs go while looking out for giant bombs. Giant bombs will take out all of your hogs, so make sure you are watching and are ready to drop a spell when you see one.\n",
inline=False
)
th8_hogsEmbed.add_field(
name="Step 3- Clean up:",
value="Once all the defenses are destroyed the wizards should have cleaned up a lot, and the hogs will then take care of the last few buildings.\n",
inline=False
)
await ctx.send(embed=th8_hogsEmbed)
@th8.command(name="dragons")
async def th8_dragons(self, ctx):
th8_dragonsEmbed = discord.Embed(
title="TH8 Dragons",
description="**ARMY COMPOSITION**```\nTroops: 10 Dragons\nSpells: 3 Rages, 1 Poison\nCC: Balloons\n```\n**ATTACKING**\n",
color = self.bot.colors
)
th8_dragonsEmbed.add_field(
name="Step 1- Funneling:",
value="Drop your king on one side of the base and a dragon on the other side. We want our dragons to go to the center, not go around the base. The king and dragon will funnel so our main army goes down to the middle.",
inline=False
)
th8_dragonsEmbed.add_field(
name="Step 2- Main Army:",
value=""
)
await ctx.send(embed=th8_dragonsEmbed)
@strat.group()
async def th9(self, ctx):
if ctx.invoked_subcommand is None:
await ctx.send("You need to specify an attack strategy!Available strategies:\n")
# @th9.command(name="dragons")
# async def dragons(self, ctx):
# await ctx.send("")
@strat.group()
async def th10(self, ctx):
if ctx.invoked_subcommand is None:
await ctx.send("You need to specify an attack strategy!Available strategies:\n")
@strat.group()
async def th11(self, ctx):
if ctx.invoked_subcommand is None:
await ctx.send("You need to specify an attack strategy!Available strategies:\nhybrid")
@th11.command(name="hybrid")
async def th11_qc_hybrid(self, ctx):
th11_qc_hybridEmbed = discord.Embed(
title="TH11 Queen Charge Hybrid",
description="**ARMY COMPOSITION:**```\nTroops: 5 Healers (for queen charge), 1 or 2 Balloons (to protect the healers), 2 Baby Dragons (for funnel), 15 or 16 Miners, 10 Hogs, 1 or 2 Super Wall Breakers (if you don't have them replace with regular wall breakers), cleanup troops with the remaining space (archers or minions)\nSpells: 2 Heals, 2 Rages, 2 Freeze, 1 Poison\nCC: More Hogs and a Rage or Heal (whatever you think you need), Siege Barracks\n```\n **ATTACKING:\n**",
color=discord.Color.dark_gold())
th11_qc_hybridEmbed.add_field(
name="Step 1- Queen Charge:",
value="Identify where to charge your queen into the base. Remember the purpose of the queen walk is to get rid of clan castle troops. Drop the baby dragons to funnel for the Queen to make sure she goes into the base. Then drop your healers and a loon or two to look for black bombs (seeking air mines that will take out a healer). Wall break into the compartment you need your queen to go in. When your queen comes under heavy fire make sure to drop the rage on both the queen and healers. If your queen is under target by a single inferno make sure to freeze it. Once you get the cc pull make sure to POISON them. You can take care of an edrag super easily if you have a poison.\n",
inline=False)
th11_qc_hybridEmbed.add_field(
name="Step 2- Funneling:",
value="Once we have dealt with the clan castle we need to identify where we're going to be sending in the Hybrid portion of the attack. Normally the Queen Charge will have taken out a chunk of the base on one of the sides. We want our hybrid to go straight for the core of the base and not down the sides. Think of the QC as one side of a funnel for your Hybrid. For the other side of the funnel we need to place our King and Siege Barracks down the other edge of the base to clear all the trash buildings (collectors, barracks, etc.) so our hybrid has a clear path into the core of the base.\n",
inline=False
)
th11_qc_hybridEmbed.add_field(
name="Step 3- The Main Army:",
value="Once the King and Siege Barracks have cleared quite a bit, we want to start our hybrid by placing all Miners, Hogs and Warden down to go on the path into the core of the base. If you didn't take out the Eagle with your Queen Charge use the Warden ability to protect the hybrid from the strike. Freezes can also be used here to freeze up Multi Infernos or a section with a lot of splash damage like Wizard Towers, Mortars or Bomb Towers. Place Heals where necessary to keep the hybrid alive.\n"
)
th11_qc_hybridEmbed.add_field(
name="Step 4- Cleanup:",
value="We also have to think about placing cleanup troops for corner builder huts, missed buildings on the other side of the base etc.\n",
inline=False
)
await ctx.send(embed=th11_qc_hybridEmbed)
@strat.group()
async def th12(self, ctx):
if ctx.invoked_subcommand is None:
await ctx.send("You need to specify an attack strategy!Available strategies:\n")
@strat.group()
async def th13(self, ctx):
if ctx.invoked_subcommand is None:
await ctx.send("You need to specify an attack strategy!Available strategies:\n")
def setup(bot):
bot.add_cog(AttackStrats(bot))
| 30.631579 | 696 | 0.663759 | [
"MIT"
] | Ay-355/the-buds-bot | cogs/attack_strats.py | 7,566 | Python |
# The followings are the DenseNets module, the training was actually taken place in the `run_dense_net.py` file.
# Sorry, I really like Pycharm (and to be fair, Pytorch is so much an easier language to debug)
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
from models import DenseNet
from data_providers.utils import get_data_provider_by_name
import tensorflow as tf
import numpy as np
import json
import pandas as pd
from tqdm import tqdm
import random
import time
from matplotlib import pyplot as plt
# Visualizations will be shown in the notebook.
# % matplotlib inline
from matplotlib import gridspec
# Load pickled data
import pickle
training_file = './data/train.p'
validation_file = './data/valid.p'
testing_file = './data/test.p'
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(validation_file, mode='rb') as f:
valid = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
X_train, y_train = train['features'], train['labels']
X_valid, y_valid = valid['features'], valid['labels']
X_test, y_test_origin = test['features'], test['labels']
train_params_cifar = {
'batch_size': 64,
'n_epochs': 500,
'initial_learning_rate': 0.05,
'reduce_lr_epoch_1': 50, # epochs * 0.5
'reduce_lr_epoch_2': 75, # epochs * 0.75
'validation_set': True,
'validation_split': None, # None or float
'shuffle': 'every_epoch', # None, once_prior_train, every_epoch
'normalization': 'by_chanels', # None, divide_256, divide_255, by_chanels
'use_YUV': True,
'use_Y': False, # use only Y channel
'data_augmentation': 0, # [0, 1]
}
# We save this model params.json from the trained model
with open('model_params.json', 'r') as fp:
model_params = json.load(fp)
# some default params dataset/architecture related
train_params = train_params_cifar
print("Params:")
for k, v in model_params.items():
print("\t%s: %s" % (k, v))
print("Train params:")
for k, v in train_params.items():
print("\t%s: %s" % (k, v))
model_params['use_Y'] = False
print("Prepare training data...")
data_provider = get_data_provider_by_name(model_params['dataset'], train_params)
print("Initialize the model..")
tf.reset_default_graph()
model = DenseNet(data_provider=data_provider, **model_params)
print("Loading trained model")
model.load_model()
print("Data provider test images: ", data_provider.test.num_examples)
print("Testing...")
loss, accuracy = model.test(data_provider.test, batch_size=30)
import cv2
def labels_to_one_hot(labels, n_classes=43+1):
"""Convert 1D array of labels to one hot representation
Args:
labels: 1D numpy array
"""
new_labels = np.zeros((n_classes,))
new_labels[labels] = 1
return new_labels
newimages = []
newlabels = []
new_onehot = []
newlabelsdata = []
directories = "./newimages"
subdirs = os.listdir(directories)
for subdir in subdirs:
classId = int(subdir.split("-")[0])
classinfo = {'label':classId,'count':0, 'samples':[]}
filepath = directories+"/"+subdir
for filename in os.listdir(filepath):
image_filepath = filepath+"/"+filename
image = cv2.imread(image_filepath)
image_rgb = cv2.resize(image, (32, 32), interpolation=cv2.INTER_AREA)
image = image_rgb.copy()
image[:, :, 0] = image_rgb[:, :, 2]
image[:, :, 2] = image_rgb[:, :, 0]
newimages.append(image)
newlabels.append(classId)
new_onehot.append(labels_to_one_hot(classId))
classinfo['count'] += 1
classinfo['samples'].append(len(newimages)-1)
if classinfo['count'] > 0:
print("appending: ", classinfo)
newlabelsdata.append(classinfo)
newimages = np.array(newimages)
newlabels = np.array(newlabels)
new_onehot = np.array(new_onehot)
from data_providers.GermanTrafficSign import RGB2YUV
X_test_new = RGB2YUV(newimages)
new_image = np.zeros(X_test_new.shape)
for i in range(X_test_new.shape[-1]):
new_image[:, :, :, i] = ((X_test_new[:, :, :, i] - data_provider._means[i]) /data_provider._stds[i])
y_new_onehot = model.predictions_one_image(new_image)[0]
predict_classId = np.argmax(y_new_onehot, axis=1)
incorrectlist = []
for i in range(len(y_new_onehot)):
correct_classId = np.argmax(new_onehot[i],0)
predict_classId = np.argmax(y_new_onehot[i],0)
incorrectlist.append({'index':i, 'correct':correct_classId, 'predicted':predict_classId})
incorrectmatrix = {}
modeCount = 0
for i in range(len(incorrectlist)):
predicted = incorrectlist[i]['predicted']
correct = incorrectlist[i]['correct']
index = incorrectlist[i]['index']
bucket = str(correct) + "+" + str(predicted)
incorrectinstance = incorrectmatrix.get(bucket, {'count': 0, 'samples': []})
# add to the count
count = incorrectinstance['count'] + 1
# add to samples of this correct to predicted condition
samples = incorrectinstance['samples']
samples.append(index)
# put back in the list
incorrectmatrix[bucket] = {'count': count, 'correct': correct, 'predicted': predicted, 'samples': samples}
# update most common error
if count > modeCount:
modeCount = count
modeBucket = bucket
# get the list of buckets and sort them
def compare_bucket_count(bucket):
return modeCount - incorrectmatrix[bucket]['count']
sortedBuckets = list(incorrectmatrix.keys())
sortedBuckets.sort(key=compare_bucket_count)
# get the unique number of original picture sizes and the min and max last instance
n_buckets = len(sortedBuckets)
# print the stats
print("\nNumber of unique buckets in incorrect set: ", n_buckets, "\n")
print("Mode Bucket: ", modeBucket, "with count: ", modeCount)
classLabelList = pd.read_csv('signnames.csv')
print("\nDistribution of buckets with predicted test dataset labels:")
for n in range(len(sortedBuckets)):
bucket = sortedBuckets[n]
cclassId = incorrectmatrix[bucket]['correct']
pclassId = incorrectmatrix[bucket]['predicted']
count = incorrectmatrix[bucket]['count']
cdescription = classLabelList[classLabelList.ClassId == cclassId].SignName.to_string(header=False, index=False)
pdescription = classLabelList[classLabelList.ClassId == pclassId].SignName.to_string(header=False, index=False)
print(
"incorrect set count: {0:4d} CClassId: {1:02d} Description: {2}\n PClassId: {3:02d} Description: {4}".format(
count, cclassId, cdescription, pclassId, pdescription))
def draw_sample_correctmatrix(datasettxt, sortedBuckets, incorrectmatix, dataset, cmap=None):
n_maxsamples = 8
n_labels = len(sortedBuckets)
# size of each sample
fig = plt.figure(figsize=(n_maxsamples * 1.8, n_labels))
w_ratios = [1 for n in range(n_maxsamples)]
w_ratios[:0] = [int(n_maxsamples * 0.8)]
h_ratios = [1 for n in range(n_labels)]
# gridspec
time.sleep(1) # wait for 1 second for the previous print to appear!
grid = gridspec.GridSpec(n_labels, n_maxsamples + 1, wspace=0.0, hspace=0.0, width_ratios=w_ratios,
height_ratios=h_ratios)
labelset_pbar = tqdm(range(n_labels), desc=datasettxt, unit='labels')
for a in labelset_pbar:
cclassId = incorrectmatrix[sortedBuckets[n_labels - a - 1]]['correct']
pclassId = incorrectmatrix[sortedBuckets[n_labels - a - 1]]['predicted']
cdescription = classLabelList[classLabelList.ClassId == cclassId].SignName.to_string(header=False, index=False)
pdescription = classLabelList[classLabelList.ClassId == pclassId].SignName.to_string(header=False, index=False)
count = incorrectmatrix[sortedBuckets[n_labels - a - 1]]['count']
for b in range(n_maxsamples + 1):
i = a * (n_maxsamples + 1) + b
ax = plt.Subplot(fig, grid[i])
if b == 0:
ax.annotate(
'CClassId %d (%d): %s\nPClassId %d: %s' % (cclassId, count, cdescription, pclassId, pdescription),
xy=(0, 0), xytext=(0.0, 0.3))
ax.set_xticks([])
ax.set_yticks([])
fig.add_subplot(ax)
else:
if (b - 1) < count:
image = dataset[incorrectmatrix[sortedBuckets[n_labels - a - 1]]['samples'][b - 1]]
if cmap == None:
ax.imshow(image)
else:
# yuv = cv2.split(image)
# ax.imshow(yuv[0], cmap=cmap)
ax.imshow(image, cmap=cmap)
ax.set_xticks([])
ax.set_yticks([])
fig.add_subplot(ax)
# hide the borders\
if a == (n_labels - 1):
all_axes = fig.get_axes()
for ax in all_axes:
for sp in ax.spines.values():
sp.set_visible(False)
plt.show() | 36.24898 | 144 | 0.660511 | [
"MIT"
] | stevenwudi/CarND-Traffic-Sign-Classifier-Project | test_single.py | 8,881 | Python |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listtransactions API
from test_framework.test_framework import SkyrusTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, COIN
from io import BytesIO
def txFromHex(hexstring):
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(hexstring))
tx.deserialize(f)
return tx
class ListTransactionsTest(SkyrusTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
self.setup_clean_chain = False
def setup_nodes(self):
#This test requires mocktime
enable_mocktime()
return start_nodes(self.num_nodes, self.options.tmpdir)
def run_test(self):
# Simple send, 0 to 1:
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0})
assert_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0})
# mine a block, confirmations should change:
self.nodes[0].generate(1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1})
assert_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1})
# send-to-self:
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"send"},
{"amount":Decimal("-0.2")})
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"receive"},
{"amount":Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = { self.nodes[0].getnewaddress() : 0.11,
self.nodes[1].getnewaddress() : 0.22,
self.nodes[0].getaccountaddress("from1") : 0.33,
self.nodes[1].getaccountaddress("toself") : 0.44 }
txid = self.nodes[1].sendmany("", send_to)
self.sync_all()
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.11")},
{"txid":txid} )
assert_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.11")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.22")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.22")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.33")},
{"txid":txid} )
assert_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.33")},
{"txid":txid, "account" : "from1"} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.44")},
{"txid":txid, "account" : ""} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.44")},
{"txid":txid, "account" : "toself"} )
multisig = self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()])
self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True)
txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1)
self.nodes[1].generate(1)
self.sync_all()
assert(len(self.nodes[0].listtransactions("watchonly", 100, 0, False)) == 0)
assert_array_result(self.nodes[0].listtransactions("watchonly", 100, 0, True),
{"category":"receive","amount":Decimal("0.1")},
{"txid":txid, "account" : "watchonly"} )
self.run_rbf_opt_in_test()
# Check that the opt-in-rbf flag works properly, for sent and received
# transactions.
def run_rbf_opt_in_test(self):
# Check whether a transaction signals opt-in RBF itself
def is_opt_in(node, txid):
rawtx = node.getrawtransaction(txid, 1)
for x in rawtx["vin"]:
if x["sequence"] < 0xfffffffe:
return True
return False
# Find an unconfirmed output matching a certain txid
def get_unconfirmed_utxo_entry(node, txid_to_match):
utxo = node.listunspent(0, 0)
for i in utxo:
if i["txid"] == txid_to_match:
return i
return None
# 1. Chain a few transactions that don't opt-in.
txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
assert(not is_opt_in(self.nodes[0], txid_1))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
# Tx2 will build off txid_1, still not opting in to RBF.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
# Create tx2 using createrawtransaction
inputs = [{"txid":utxo_to_use["txid"], "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.999}
tx2 = self.nodes[1].createrawtransaction(inputs, outputs)
tx2_signed = self.nodes[1].signrawtransaction(tx2)["hex"]
txid_2 = self.nodes[1].sendrawtransaction(tx2_signed)
# ...and check the result
assert(not is_opt_in(self.nodes[1], txid_2))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
# Tx3 will opt-in to RBF
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2)
inputs = [{"txid": txid_2, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[1].getnewaddress(): 0.998}
tx3 = self.nodes[0].createrawtransaction(inputs, outputs)
tx3_modified = txFromHex(tx3)
tx3_modified.vin[0].nSequence = 0
tx3 = bytes_to_hex_str(tx3_modified.serialize())
tx3_signed = self.nodes[0].signrawtransaction(tx3)['hex']
txid_3 = self.nodes[0].sendrawtransaction(tx3_signed)
assert(is_opt_in(self.nodes[0], txid_3))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
# Tx4 will chain off tx3. Doesn't signal itself, but depends on one
# that does.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_3)
inputs = [{"txid": txid_3, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.997}
tx4 = self.nodes[1].createrawtransaction(inputs, outputs)
tx4_signed = self.nodes[1].signrawtransaction(tx4)["hex"]
txid_4 = self.nodes[1].sendrawtransaction(tx4_signed)
assert(not is_opt_in(self.nodes[1], txid_4))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
# Replace tx3, and check that tx4 becomes unknown
tx3_b = tx3_modified
tx3_b.vout[0].nValue -= int(Decimal("0.004") * COIN) # bump the fee
tx3_b = bytes_to_hex_str(tx3_b.serialize())
tx3_b_signed = self.nodes[0].signrawtransaction(tx3_b)['hex']
txid_3b = self.nodes[0].sendrawtransaction(tx3_b_signed, True)
assert(is_opt_in(self.nodes[0], txid_3b))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
# Check gettransaction as well:
for n in self.nodes[0:2]:
assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_3)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown")
# After mining a transaction, it's no longer BIP125-replaceable
self.nodes[0].generate(1)
assert(txid_3b not in self.nodes[0].getrawmempool())
assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no")
assert_equal(self.nodes[0].gettransaction(txid_4)["bip125-replaceable"], "unknown")
if __name__ == '__main__':
ListTransactionsTest().main()
| 49.839024 | 113 | 0.598708 | [
"MIT"
] | VangYangPao/skyrus | qa/rpc-tests/listtransactions.py | 10,217 | Python |
import warnings
from importlib import import_module
from django.conf import settings
from pretalx.orga.signals import nav_event, nav_event_settings, nav_global
SessionStore = import_module(settings.SESSION_ENGINE).SessionStore
def collect_signal(signal, kwargs):
result = []
for _, response in signal.send_robust(**kwargs):
if isinstance(response, dict):
result.append(response)
elif isinstance(response, list):
result += response
return result
def orga_events(request):
"""Add data to all template contexts."""
context = {"settings": settings}
if not request.path.startswith("/orga/"):
return {}
if not getattr(request, "user", None) or not request.user.is_authenticated:
return context
if not getattr(request, "event", None):
context["nav_global"] = collect_signal(
nav_global, {"sender": None, "request": request}
)
return context
if getattr(request, "event", None):
_nav_event = []
for _, response in nav_event.send_robust(request.event, request=request):
if isinstance(response, list):
_nav_event += response
else:
_nav_event.append(response)
warnings.warn(
"Please return a list in your nav_event signal receiver, not a dictionary.",
DeprecationWarning,
)
context["nav_event"] = _nav_event
context["nav_settings"] = collect_signal(
nav_event_settings, {"sender": request.event, "request": request}
)
if (
not request.event.is_public
and request.event.settings.custom_domain
and request.user.has_perm("cfp.view_event", request.event)
):
child_session_key = f"child_session_{request.event.pk}"
child_session = request.session.get(child_session_key)
s = SessionStore()
if not child_session or not s.exists(child_session):
s[
f"pretalx_event_access_{request.event.pk}"
] = request.session.session_key
s.create()
context["new_session"] = s.session_key
request.session[child_session_key] = s.session_key
request.session["event_access"] = True
else:
context["new_session"] = child_session
request.session["event_access"] = True
return context
| 33.706667 | 96 | 0.602848 | [
"Apache-2.0"
] | ThomasWaldmann/pretalx | src/pretalx/orga/context_processors.py | 2,528 | Python |
#! /usr/bin/env python
# coding=utf-8
#================================================================
# Copyright (C) 2018 * Ltd. All rights reserved.
#
# Editor : VIM
# File name : evaluate.py
# Author : YunYang1994
# Created date: 2018-12-20 11:58:21
# Description : compute mAP
#
#================================================================
import sys
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from PIL import Image
from core import utils, yolov3
from core.dataset import dataset, Parser
sess = tf.Session()
IMAGE_H, IMAGE_W = 416, 416
CLASSES = utils.read_coco_names('./data/raccoon.names')
NUM_CLASSES = len(CLASSES)
ANCHORS = utils.get_anchors('./data/raccoon_anchors.txt', IMAGE_H, IMAGE_W)
CKPT_FILE = "./checkpoint/yolov3.ckpt-2500"
IOU_THRESH = 0.5
SCORE_THRESH = 0.3
all_detections = []
all_annotations = []
all_aver_precs = {CLASSES[i]:0. for i in range(NUM_CLASSES)}
test_tfrecord = "./raccoon_dataset/raccoon_*.tfrecords"
parser = Parser(IMAGE_H, IMAGE_W, ANCHORS, NUM_CLASSES)
testset = dataset(parser, test_tfrecord , batch_size=1, shuffle=None, repeat=False)
images_tensor, *y_true_tensor = testset.get_next()
model = yolov3.yolov3(NUM_CLASSES, ANCHORS)
with tf.variable_scope('yolov3'):
pred_feature_map = model.forward(images_tensor, is_training=False)
y_pred_tensor = model.predict(pred_feature_map)
saver = tf.train.Saver()
saver.restore(sess, CKPT_FILE)
try:
image_idx = 0
while True:
y_pred, y_true, image = sess.run([y_pred_tensor, y_true_tensor, images_tensor])
pred_boxes = y_pred[0][0]
pred_confs = y_pred[1][0]
pred_probs = y_pred[2][0]
image = Image.fromarray(np.uint8(image[0]*255))
true_labels_list, true_boxes_list = [], []
for i in range(3):
true_probs_temp = y_true[i][..., 5: ]
true_boxes_temp = y_true[i][..., 0:4]
object_mask = true_probs_temp.sum(axis=-1) > 0
true_probs_temp = true_probs_temp[object_mask]
true_boxes_temp = true_boxes_temp[object_mask]
true_labels_list += np.argmax(true_probs_temp, axis=-1).tolist()
true_boxes_list += true_boxes_temp.tolist()
pred_boxes, pred_scores, pred_labels = utils.cpu_nms(pred_boxes, pred_confs*pred_probs, NUM_CLASSES,
score_thresh=SCORE_THRESH, iou_thresh=IOU_THRESH)
# image = datasets.draw_boxes(image, pred_boxes, pred_scores, pred_labels, CLASSES, [IMAGE_H, IMAGE_W], show=True)
true_boxes = np.array(true_boxes_list)
box_centers, box_sizes = true_boxes[:,0:2], true_boxes[:,2:4]
true_boxes[:,0:2] = box_centers - box_sizes / 2.
true_boxes[:,2:4] = true_boxes[:,0:2] + box_sizes
pred_labels_list = [] if pred_labels is None else pred_labels.tolist()
all_detections.append( [pred_boxes, pred_scores, pred_labels_list])
all_annotations.append([true_boxes, true_labels_list])
image_idx += 1
if image_idx % 100 == 0:
sys.stdout.write(".")
sys.stdout.flush()
except tf.errors.OutOfRangeError:
pass
for idx in range(NUM_CLASSES):
true_positives = []
scores = []
num_annotations = 0
for i in tqdm(range(len(all_annotations)), desc="Computing AP for class %12s" %(CLASSES[idx])):
pred_boxes, pred_scores, pred_labels_list = all_detections[i]
true_boxes, true_labels_list = all_annotations[i]
detected = []
num_annotations += true_labels_list.count(idx)
for k in range(len(pred_labels_list)):
if pred_labels_list[k] != idx: continue
scores.append(pred_scores[k])
ious = utils.bbox_iou(pred_boxes[k:k+1], true_boxes)
m = np.argmax(ious)
if ious[m] > IOU_THRESH and pred_labels_list[k] == true_labels_list[m] and m not in detected:
detected.append(m)
true_positives.append(1)
else:
true_positives.append(0)
num_predictions = len(true_positives)
true_positives = np.array(true_positives)
false_positives = np.ones_like(true_positives) - true_positives
# sorted by score
indices = np.argsort(-np.array(scores))
false_positives = false_positives[indices]
true_positives = true_positives[indices]
# compute false positives and true positives
false_positives = np.cumsum(false_positives)
true_positives = np.cumsum(true_positives)
# compute recall and precision
recall = true_positives / np.maximum(num_annotations, np.finfo(np.float64).eps)
precision = true_positives / np.maximum(num_predictions, np.finfo(np.float64).eps)
# compute average precision
average_precision = utils.compute_ap(recall, precision)
all_aver_precs[CLASSES[idx]] = average_precision
for idx in range(NUM_CLASSES):
cls_name = CLASSES[idx]
print("=> Class %10s - AP: %.4f" %(cls_name, all_aver_precs[cls_name]))
print("=> mAP: %.4f" %(sum(all_aver_precs.values()) / NUM_CLASSES))
| 37.219858 | 122 | 0.634527 | [
"MIT"
] | benoitLemoine/peopleCounting | detection/yolov3/src/evaluate.py | 5,248 | Python |
'''
For this exercise, you'll use what you've learned about the zip() function and combine two lists into a dictionary.
These lists are actually extracted from a bigger dataset file of world development indicators from the World Bank. For pedagogical purposes, we have pre-processed this dataset into the lists that you'll be working with.
The first list feature_names contains header names of the dataset and the second list row_vals contains actual values of a row from the dataset, corresponding to each of the header names.
'''
# Zip lists: zipped_lists
zipped_lists = zip(feature_names, row_vals)
# Create a dictionary: rs_dict
rs_dict = dict(zipped_lists)
# Print the dictionary
print(rs_dict)
| 44 | 219 | 0.795455 | [
"MIT"
] | Baidaly/datacamp-samples | 10 - python-data-science-toolbox-part-2/case_study/1 - dictionaries for data science.py | 704 | Python |
# -*- coding: utf-8 -*-
# file: data_utils.py
# author: songyouwei <[email protected]>
# Copyright (C) 2018. All Rights Reserved.
import os
import pickle
import numpy as np
import tqdm
from findfile import find_file
from google_drive_downloader.google_drive_downloader import GoogleDriveDownloader as gdd
from torch.utils.data import Dataset
from transformers import AutoTokenizer
from pyabsa.core.apc.dataset_utils.apc_utils import load_apc_datasets
from pyabsa.utils.pyabsa_utils import check_and_fix_labels
def prepare_glove840_embedding(glove_path):
glove840_id = '1G-vd6W1oF9ByyJ-pzp9dcqKnr_plh4Em'
if not os.path.exists(glove_path):
os.mkdir(glove_path)
elif os.path.isfile(glove_path):
return glove_path
elif os.path.isdir(glove_path):
embedding_file = None
dir_path = os.path.dirname(glove_path)
if find_file(dir_path, 'glove.42B.300d.txt', exclude_key='.zip'):
embedding_file = find_file(dir_path, 'glove.42B.300d.txt', exclude_key='.zip')[0]
elif find_file(dir_path, 'glove.840B.300d.txt', exclude_key='.zip'):
embedding_file = find_file(dir_path, 'glove.840B.300d.txt', exclude_key='.zip')[0]
elif find_file(dir_path, 'glove.twitter.27B.txt', exclude_key='.zip'):
embedding_file = find_file(dir_path, 'glove.twitter.27B.txt', exclude_key='.zip')[0]
if embedding_file:
print('Find potential embedding files: {}'.format(embedding_file))
return embedding_file
zip_glove_path = os.path.join(glove_path, '__glove__.840B.300d.txt.zip')
print('No GloVe embedding found at {},'
' downloading __glove__.840B.300d.txt (2GB transferred / 5.5GB unzipped)...'.format(glove_path))
gdd.download_file_from_google_drive(file_id=glove840_id,
dest_path=zip_glove_path,
unzip=True
)
glove_path = find_file(glove_path, 'txt', exclude_key='.zip')
return glove_path
def build_tokenizer(dataset_list, max_seq_len, dat_fname, opt):
if os.path.exists(os.path.join(opt.dataset_name, dat_fname)):
print('Loading tokenizer on {}'.format(os.path.join(opt.dataset_name, dat_fname)))
tokenizer = pickle.load(open(os.path.join(opt.dataset_name, dat_fname), 'rb'))
else:
text = ''
for dataset_type in dataset_list:
for file in dataset_list[dataset_type]:
fin = open(file, 'r', encoding='utf-8', newline='\n', errors='ignore')
lines = fin.readlines()
fin.close()
for i in range(0, len(lines), 3):
text_left, _, text_right = [s.lower().strip() for s in lines[i].partition("$T$")]
aspect = lines[i + 1].lower().strip()
text_raw = text_left + " " + aspect + " " + text_right
text += text_raw + " "
tokenizer = Tokenizer(max_seq_len)
tokenizer.fit_on_text(text)
pickle.dump(tokenizer, open(os.path.join(opt.dataset_name, dat_fname), 'wb'))
return tokenizer
def _load_word_vec(path, word2idx=None, embed_dim=300):
fin = open(path, 'r', encoding='utf-8', newline='\n', errors='ignore')
word_vec = {}
for line in tqdm.tqdm(fin, postfix='Loading embedding file...'):
tokens = line.rstrip().split()
word, vec = ' '.join(tokens[:-embed_dim]), tokens[-embed_dim:]
if word in word2idx.keys():
word_vec[word] = np.asarray(vec, dtype='float32')
return word_vec
def build_embedding_matrix(word2idx, embed_dim, dat_fname, opt):
if os.path.exists(os.path.join(opt.dataset_name, dat_fname)):
print('Loading cached embedding_matrix for {}'.format(os.path.join(opt.dataset_name, dat_fname)))
embedding_matrix = pickle.load(open(os.path.join(opt.dataset_name, dat_fname), 'rb'))
else:
print('Extracting embedding_matrix for {}'.format(dat_fname))
glove_path = prepare_glove840_embedding(opt.dataset_name)
embedding_matrix = np.zeros((len(word2idx) + 2, embed_dim)) # idx 0 and len(word2idx)+1 are all-zeros
word_vec = _load_word_vec(glove_path, word2idx=word2idx, embed_dim=embed_dim)
for word, i in tqdm.tqdm(word2idx.items(), postfix='Building embedding_matrix {}'.format(dat_fname)):
vec = word_vec.get(word)
if vec is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = vec
pickle.dump(embedding_matrix, open(os.path.join(opt.dataset_name, dat_fname), 'wb'))
return embedding_matrix
def pad_and_truncate(sequence, maxlen, dtype='int64', padding='post', truncating='post', value=0):
x = (np.ones(maxlen) * value).astype(dtype)
if truncating == 'pre':
trunc = sequence[-maxlen:]
else:
trunc = sequence[:maxlen]
trunc = np.asarray(trunc, dtype=dtype)
if padding == 'post':
x[:len(trunc)] = trunc
else:
x[-len(trunc):] = trunc
return x
class Tokenizer(object):
def __init__(self, max_seq_len, lower=True):
self.lower = lower
self.max_seq_len = max_seq_len
self.word2idx = {}
self.idx2word = {}
self.idx = 1
def fit_on_text(self, text):
if self.lower:
text = text.lower()
words = text.split()
for word in words:
if word not in self.word2idx:
self.word2idx[word] = self.idx
self.idx2word[self.idx] = word
self.idx += 1
def text_to_sequence(self, text, reverse=False, padding='post', truncating='post'):
if self.lower:
text = text.lower()
words = text.split()
unknownidx = len(self.word2idx) + 1
sequence = [self.word2idx[w] if w in self.word2idx else unknownidx for w in words]
if len(sequence) == 0:
sequence = [0]
if reverse:
sequence = sequence[::-1]
return pad_and_truncate(sequence, self.max_seq_len, padding=padding, truncating=truncating)
class Tokenizer4Pretraining:
def __init__(self, max_seq_len, pretrained_bert_name):
self.tokenizer = AutoTokenizer.from_pretrained(pretrained_bert_name)
self.max_seq_len = max_seq_len
def text_to_sequence(self, text, reverse=False, padding='post', truncating='post'):
sequence = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(text))
if len(sequence) == 0:
sequence = [0]
if reverse:
sequence = sequence[::-1]
return pad_and_truncate(sequence, self.max_seq_len, padding=padding, truncating=truncating)
class BERTClassificationDataset(Dataset):
bert_baseline_input_colses = {
'bert': ['text_bert_indices']
}
def __init__(self, dataset_list, tokenizer, opt):
lines = load_apc_datasets(dataset_list)
all_data = []
label_set = set()
for i in tqdm.tqdm(range(len(lines)), postfix='building word indices...'):
line = lines[i].strip().split('$LABEL$')
text, label = line[0], line[1]
text = text.strip().lower()
label = label.strip().lower()
text_indices = tokenizer.text_to_sequence('[CLS] {} [SEP]'.format(text))
label = int(label)
data = {
'text_bert_indices': text_indices,
'label': label,
}
label_set.add(label)
all_data.append(data)
check_and_fix_labels(label_set, 'label', all_data)
opt.polarities_dim = len(label_set)
self.data = all_data
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
| 38.661765 | 110 | 0.624572 | [
"MIT"
] | Descartes627/PyABSA | pyabsa/core/tc/classic/__bert__/dataset_utils/data_utils_for_training.py | 7,887 | Python |
from typing import Any, Dict, Optional
import lumos.numpy as lnp
from lumos.models.base import StateSpaceModel, state_space_io
from lumos.models.kinematics import TrackPosition2D
from lumos.models.vehicles.simple_vehicle import SimpleVehicle
# Combine the signals to create the names. TODO: can we make it more automatic?
@state_space_io(
states=TrackPosition2D.get_direct_group_names("states")
+ SimpleVehicle.get_direct_group_names("states"),
inputs=SimpleVehicle.get_direct_group_names("inputs")
+ ("track_curvature", "track_heading"),
con_outputs=(
"vehicle.slip_ratio_fl",
"vehicle.slip_ratio_fr",
"vehicle.slip_ratio_rl",
"vehicle.slip_ratio_rr",
"vehicle.slip_angle_fl",
"vehicle.slip_angle_fr",
"vehicle.slip_angle_rl",
"vehicle.slip_angle_rr",
),
residuals=TrackPosition2D.get_direct_group_names("residuals")
+ SimpleVehicle.get_direct_group_names("residuals"),
)
class SimpleVehicleOnTrack(StateSpaceModel):
_submodel_names = ("vehicle", "kinematics")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@classmethod
def get_default_submodel_config(self):
return {
"vehicle": "SimpleVehicle",
"kinematics": "TrackPosition2D",
}
def forward(self, states: lnp.ndarray, inputs: lnp.ndarray, mesh: float):
# Pick out the vehicle inputs
vehicle_inputs = {
k: inputs[k] for k in self.get_submodel("vehicle").get_group_names("inputs")
}
# Pick out vehicle states
vehicle_states = {
k: states[k] for k in self.get_submodel("vehicle").get_group_names("states")
}
# Pick out vehicle params. NOT DONE! NOT EASY!
vehicle_return = self.get_submodel("vehicle").forward(
states=vehicle_states, inputs=vehicle_inputs
)
# Call Kinematics model
# Pick out states
kinematic_states = {
k: states[k]
for k in self.get_submodel("kinematics").get_group_names("states")
}
# pick out inputs
# NOTE: this step is very custom, because the inputs come from vehicle model
# outputs
inputs_from_vehicle = {k: vehicle_states[k] for k in ("vx", "vy", "yaw_rate")}
track_inputs = {k: inputs[k] for k in ["track_curvature", "track_heading"]}
kinematic_inputs = dict(**track_inputs, **inputs_from_vehicle)
# Pick out vehicle params. NOT DONE! NOT EASY!
kinematics_return = self.get_submodel("kinematics").forward(
states=kinematic_states, inputs=kinematic_inputs, mesh=mesh,
)
# Convert to distance domain derivatives
dt_ds = kinematics_return.states_dot["time"]
states_dot = {
**kinematics_return.states_dot,
**{k: v * dt_ds for k, v in vehicle_return.states_dot.items()},
}
# Assemble final outputs
outputs = self.combine_submodel_outputs(
vehicle=vehicle_return.outputs, kinematics=kinematics_return.outputs
)
residuals = vehicle_return.residuals
return self.make_state_space_model_return(
states_dot=states_dot, outputs=outputs, residuals=residuals,
)
| 34.113402 | 88 | 0.654881 | [
"MIT"
] | numagic/lumos | lumos/models/simple_vehicle_on_track.py | 3,309 | Python |
lst = [1,2,3,4,5]
prod = 1
for i in lst:
prod*= i
print("Product of digits in list is:",prod) | 14.142857 | 43 | 0.59596 | [
"MIT"
] | AnubhavMadhav/Learn-Python | 06 Control Statements/productoflist.py | 99 | Python |
from rando.core.models import JSONModel
class FeedbackCategory(JSONModel):
filepath = 'api/feedback/categories.json'
| 20.5 | 45 | 0.788618 | [
"BSD-2-Clause"
] | camillemonchicourt/Geotrek-rando | rando/feedback/models.py | 123 | Python |
# -*- coding: utf-8 -*-
import sys
import os
from sqlalchemy import Table
from yaml import load,dump
try:
from yaml import CSafeLoader as SafeLoader
except ImportError:
from yaml import SafeLoader
print("Using Python SafeLoader")
distribution={'twosome':1,'bubble':2}
effectcategory={}
def importyaml(connection,metadata,sourcePath,language='en'):
print("Importing dogma effects")
dgmEffects = Table('dgmEffects',metadata)
trans = connection.begin()
with open(os.path.join(sourcePath,'fsd','dogmaEffects.yaml'),'r') as yamlstream:
print("importing {}".format(os.path.basename(yamlstream.name)))
dogmaEffects=load(yamlstream,Loader=SafeLoader)
print("{} loaded".format(os.path.basename(yamlstream.name)))
for dogmaEffectsid in dogmaEffects:
effect=dogmaEffects[dogmaEffectsid]
connection.execute(dgmEffects.insert(),
effectID=dogmaEffectsid,
effectName=effect.get('effectName'),
effectCategory=effectcategory.get(effect['effectCategory']),
description=effect.get('descriptionID',{}).get(language),
guid=effect.get('guid'),
iconID=effect.get('iconID'),
isOffensive=effect['isOffensive'],
isAssistance=effect['isAssistance'],
durationAttributeID=effect.get('durationAttributeID'),
trackingSpeedAttributeID=effect.get('trackingSpeedAttributeID'),
dischargeAttributeID=effect.get('dischargeAttributeID'),
rangeAttributeID=effect.get('rangeAttributeID'),
falloffAttributeID=effect.get('falloffAttributeID'),
disallowAutoRepeat=effect.get('disallowAutoRepeat'),
published=effect.get('published'),
displayName=effect.get('displayNameID',{}).get(language),
isWarpSafe=effect.get('isWarpSafe'),
rangeChance=effect.get('rangeChance'),
electronicChance=effect.get('electronicChance'),
propulsionChance=effect.get('propulsionChance'),
distribution=distribution.get(effect.get('distribution')),
sfxName=effect.get('sfxName'),
npcUsageChanceAttributeID=effect.get('npcUsageChanceAttributeID'),
npcActivationChanceAttributeID=effect.get('npcActivationChanceAttributeID'),
fittingUsageChanceAttributeID=effect.get('fittingUsageChanceAttributeID'),
modifierInfo=dump(effect.get('modifierInfo'))
)
trans.commit()
| 51.864407 | 108 | 0.554248 | [
"MIT"
] | jayblunt/yamlloader | tableloader/tableFunctions/dogmaEffects.py | 3,060 | Python |
from django.urls import include, path
from rest_framework.routers import SimpleRouter
from grandchallenge.retina_api import views
app_name = "retina_api"
annotation_router = SimpleRouter()
annotation_router.register(
"singlepolygonannotation",
views.LegacySinglePolygonViewSet,
basename="singlepolygonannotation",
)
annotation_router.register(
"polygonannotationset",
views.LegacyPolygonAnnotationSetViewSet,
basename="polygonannotationset",
)
annotation_router.register(
"etdrsgridannotation",
views.ETDRSGridAnnotationViewSet,
basename="etdrsgridannotation",
)
annotation_router.register(
"imagequalityannotation",
views.ImageQualityAnnotationViewSet,
basename="imagequalityannotation",
)
annotation_router.register(
"imagepathologyannotation",
views.ImagePathologyAnnotationViewSet,
basename="imagepathologyannotation",
)
annotation_router.register(
"retinaimagepathologyannotation",
views.RetinaImagePathologyAnnotationViewSet,
basename="retinaimagepathologyannotation",
)
annotation_router.register(
"imagetextannotation",
views.ImageTextAnnotationViewSet,
basename="imagetextannotation",
)
urlpatterns = [
path("archives/", views.ArchiveView.as_view(), name="archives-api-view"),
path(
"archive_data/",
views.ArchiveAPIView.as_view(),
name="archive-data-api-view",
),
path(
"archive_data/<uuid:pk>/",
views.ArchiveAPIView.as_view(),
name="archive-data-api-view",
),
path(
"image/<str:image_type>/<str:patient_identifier>/<str:study_identifier>/<str:image_identifier>/<str:image_modality>/",
views.ImageView.as_view(),
name="image-api-view",
),
path(
"data/<str:data_type>/<int:user_id>/<str:archive_identifier>/<str:patient_identifier>/",
views.DataView.as_view(),
name="data-api-view",
),
path(
"annotation/polygon/<int:user_id>/<uuid:image_id>/",
views.PolygonListView.as_view(),
name="polygon-annotation-list-view",
),
path(
"annotation/polygon/users/<uuid:image_id>",
views.GradersWithPolygonAnnotationsListView.as_view(),
name="polygon-annotation-users-list-view",
),
path(
"annotation/landmark/users/<int:user_id>",
views.LandmarkAnnotationSetForImageList.as_view(),
name="landmark-annotation-images-list-view",
),
path(
"registration/octobs/<uuid:image_id>",
views.OctObsRegistrationRetrieve.as_view(),
name="octobs-registration-detail-view",
),
path(
"image/<uuid:image_id>/spacing/",
views.ImageElementSpacingView.as_view(),
name="image-element-spacing-view",
),
path("annotation/<int:user_id>/", include(annotation_router.urls)),
path(
"image/thumbnail/<uuid:pk>/",
views.B64ThumbnailAPIView.as_view(),
name="image-thumbnail",
),
path(
"image/thumbnail/<uuid:pk>/<int:width>/<int:height>/",
views.B64ThumbnailAPIView.as_view(),
name="image-thumbnail",
),
]
| 30.291262 | 126 | 0.684615 | [
"Apache-2.0"
] | MAHMOUDZAHERZORO/grand-challenge.org | app/grandchallenge/retina_api/urls.py | 3,120 | Python |
# coding: utf-8
"""
Mux Python - Copyright 2019 Mux Inc.
NOTE: This class is auto generated. Do not edit the class manually.
"""
from __future__ import absolute_import
import unittest
import mux_python
from mux_python.models.track import Track # noqa: E501
from mux_python.rest import ApiException
class TestTrack(unittest.TestCase):
"""Track unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTrack(self):
"""Test Track"""
# FIXME: construct object with mandatory attributes with example values
# model = mux_python.models.track.Track() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 19.054054 | 79 | 0.676596 | [
"MIT"
] | dgiacomo/mux-python | test/test_track.py | 705 | Python |
import os
import sys
import psutil
from monk.keras_prototype import prototype
from monk.compare_prototype import compare
from monk.pip_unit_tests.keras.common import print_start
from monk.pip_unit_tests.keras.common import print_status
import tensorflow as tf
if(tf.__version__[0] == '2'):
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np
def test_layer_global_average_pooling2d(system_dict):
forward = True;
test = "test_layer_global_average_pooling2d";
system_dict["total_tests"] += 1;
print_start(test, system_dict["total_tests"])
if(forward):
try:
gtf = prototype(verbose=0);
gtf.Prototype("sample-project-1", "sample-experiment-1");
network = [];
network.append(gtf.global_average_pooling2d());
gtf.Compile_Network(network, data_shape=(3, 32, 32), use_gpu=False);
x = tf.placeholder(tf.float32, shape=(1, 32, 32, 3))
y = gtf.system_dict["local"]["model"](x);
system_dict["successful_tests"] += 1;
print_status("Pass");
except Exception as e:
system_dict["failed_tests_exceptions"].append(e);
system_dict["failed_tests_lists"].append(test);
forward = False;
print_status("Fail");
else:
system_dict["skipped_tests_lists"].append(test);
print_status("Skipped");
return system_dict
| 29.16 | 80 | 0.651578 | [
"Apache-2.0"
] | Aanisha/monk_v1 | monk/pip_unit_tests/keras/test_layer_global_average_pooling2d.py | 1,458 | Python |
from typing import Any
from sqlalchemy.ext.declarative import as_declarative, declared_attr
@as_declarative()
class Base:
id: Any
__name__: str
# Generate __tablename__ automatically
@declared_attr
def __tablename__(cls) -> str:
return cls.__name__.lower()
def to_dict(self):
return {c.name: getattr(self, c.name, None) for c in self.__table__.columns}
def dict(self):
return self.to_dict()
| 20.454545 | 84 | 0.686667 | [
"MIT"
] | Ed-XCF/bali | bali/db/declarative.py | 450 | Python |
"""The tests for the State vacuum Mqtt platform."""
from copy import deepcopy
import json
from homeassistant.components import mqtt, vacuum
from homeassistant.components.mqtt import CONF_COMMAND_TOPIC, CONF_STATE_TOPIC
from homeassistant.components.mqtt.discovery import async_start
from homeassistant.components.mqtt.vacuum import CONF_SCHEMA, schema_state as mqttvacuum
from homeassistant.components.mqtt.vacuum.schema import services_to_strings
from homeassistant.components.mqtt.vacuum.schema_state import SERVICE_TO_STRING
from homeassistant.components.vacuum import (
ATTR_BATTERY_ICON,
ATTR_BATTERY_LEVEL,
ATTR_FAN_SPEED,
ATTR_FAN_SPEED_LIST,
DOMAIN,
SERVICE_CLEAN_SPOT,
SERVICE_LOCATE,
SERVICE_PAUSE,
SERVICE_RETURN_TO_BASE,
SERVICE_START,
SERVICE_STOP,
STATE_CLEANING,
STATE_DOCKED,
)
from homeassistant.const import (
CONF_NAME,
CONF_PLATFORM,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
ENTITY_MATCH_ALL,
)
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
async_fire_mqtt_message,
async_mock_mqtt_component,
)
from tests.components.vacuum import common
COMMAND_TOPIC = "vacuum/command"
SEND_COMMAND_TOPIC = "vacuum/send_command"
STATE_TOPIC = "vacuum/state"
DEFAULT_CONFIG = {
CONF_PLATFORM: "mqtt",
CONF_SCHEMA: "state",
CONF_NAME: "mqtttest",
CONF_COMMAND_TOPIC: COMMAND_TOPIC,
mqttvacuum.CONF_SEND_COMMAND_TOPIC: SEND_COMMAND_TOPIC,
CONF_STATE_TOPIC: STATE_TOPIC,
mqttvacuum.CONF_SET_FAN_SPEED_TOPIC: "vacuum/set_fan_speed",
mqttvacuum.CONF_FAN_SPEED_LIST: ["min", "medium", "high", "max"],
}
async def test_default_supported_features(hass, mqtt_mock):
"""Test that the correct supported features."""
assert await async_setup_component(
hass, vacuum.DOMAIN, {vacuum.DOMAIN: DEFAULT_CONFIG}
)
entity = hass.states.get("vacuum.mqtttest")
entity_features = entity.attributes.get(mqttvacuum.CONF_SUPPORTED_FEATURES, 0)
assert sorted(services_to_strings(entity_features, SERVICE_TO_STRING)) == sorted(
["start", "stop", "return_home", "battery", "status", "clean_spot"]
)
async def test_all_commands(hass, mqtt_mock):
"""Test simple commands send to the vacuum."""
config = deepcopy(DEFAULT_CONFIG)
config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(
mqttvacuum.ALL_SERVICES, SERVICE_TO_STRING
)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.services.async_call(
DOMAIN, SERVICE_START, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with(COMMAND_TOPIC, "start", 0, False)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_STOP, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with(COMMAND_TOPIC, "stop", 0, False)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_PAUSE, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with(COMMAND_TOPIC, "pause", 0, False)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_LOCATE, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with(COMMAND_TOPIC, "locate", 0, False)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_CLEAN_SPOT, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with(
COMMAND_TOPIC, "clean_spot", 0, False
)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_RETURN_TO_BASE, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with(
COMMAND_TOPIC, "return_to_base", 0, False
)
mqtt_mock.async_publish.reset_mock()
await common.async_set_fan_speed(hass, "medium", "vacuum.mqtttest")
mqtt_mock.async_publish.assert_called_once_with(
"vacuum/set_fan_speed", "medium", 0, False
)
mqtt_mock.async_publish.reset_mock()
await common.async_send_command(hass, "44 FE 93", entity_id="vacuum.mqtttest")
mqtt_mock.async_publish.assert_called_once_with(
"vacuum/send_command", "44 FE 93", 0, False
)
mqtt_mock.async_publish.reset_mock()
await common.async_send_command(
hass, "44 FE 93", {"key": "value"}, entity_id="vacuum.mqtttest"
)
assert json.loads(mqtt_mock.async_publish.mock_calls[-1][1][1]) == {
"command": "44 FE 93",
"key": "value",
}
async def test_commands_without_supported_features(hass, mqtt_mock):
"""Test commands which are not supported by the vacuum."""
config = deepcopy(DEFAULT_CONFIG)
services = mqttvacuum.STRING_TO_SERVICE["status"]
config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(
services, SERVICE_TO_STRING
)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.services.async_call(
DOMAIN, SERVICE_START, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_PAUSE, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_STOP, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_RETURN_TO_BASE, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_LOCATE, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_CLEAN_SPOT, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await common.async_set_fan_speed(hass, "medium", "vacuum.mqtttest")
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await common.async_send_command(
hass, "44 FE 93", {"key": "value"}, entity_id="vacuum.mqtttest"
)
mqtt_mock.async_publish.assert_not_called()
async def test_status(hass, mqtt_mock):
"""Test status updates from the vacuum."""
config = deepcopy(DEFAULT_CONFIG)
config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(
mqttvacuum.ALL_SERVICES, SERVICE_TO_STRING
)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
message = """{
"battery_level": 54,
"state": "cleaning",
"fan_speed": "max"
}"""
async_fire_mqtt_message(hass, "vacuum/state", message)
state = hass.states.get("vacuum.mqtttest")
assert state.state == STATE_CLEANING
assert state.attributes.get(ATTR_BATTERY_LEVEL) == 54
assert state.attributes.get(ATTR_BATTERY_ICON) == "mdi:battery-50"
assert state.attributes.get(ATTR_FAN_SPEED) == "max"
message = """{
"battery_level": 61,
"state": "docked",
"fan_speed": "min"
}"""
async_fire_mqtt_message(hass, "vacuum/state", message)
state = hass.states.get("vacuum.mqtttest")
assert state.state == STATE_DOCKED
assert state.attributes.get(ATTR_BATTERY_ICON) == "mdi:battery-charging-60"
assert state.attributes.get(ATTR_BATTERY_LEVEL) == 61
assert state.attributes.get(ATTR_FAN_SPEED) == "min"
assert state.attributes.get(ATTR_FAN_SPEED_LIST) == ["min", "medium", "high", "max"]
async def test_no_fan_vacuum(hass, mqtt_mock):
"""Test status updates from the vacuum when fan is not supported."""
config = deepcopy(DEFAULT_CONFIG)
del config[mqttvacuum.CONF_FAN_SPEED_LIST]
config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(
mqttvacuum.DEFAULT_SERVICES, SERVICE_TO_STRING
)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
message = """{
"battery_level": 54,
"state": "cleaning"
}"""
async_fire_mqtt_message(hass, "vacuum/state", message)
state = hass.states.get("vacuum.mqtttest")
assert state.state == STATE_CLEANING
assert state.attributes.get(ATTR_FAN_SPEED) is None
assert state.attributes.get(ATTR_FAN_SPEED_LIST) is None
assert state.attributes.get(ATTR_BATTERY_LEVEL) == 54
assert state.attributes.get(ATTR_BATTERY_ICON) == "mdi:battery-50"
message = """{
"battery_level": 54,
"state": "cleaning",
"fan_speed": "max"
}"""
async_fire_mqtt_message(hass, "vacuum/state", message)
state = hass.states.get("vacuum.mqtttest")
assert state.state == STATE_CLEANING
assert state.attributes.get(ATTR_FAN_SPEED) is None
assert state.attributes.get(ATTR_FAN_SPEED_LIST) is None
assert state.attributes.get(ATTR_BATTERY_LEVEL) == 54
assert state.attributes.get(ATTR_BATTERY_ICON) == "mdi:battery-50"
message = """{
"battery_level": 61,
"state": "docked"
}"""
async_fire_mqtt_message(hass, "vacuum/state", message)
state = hass.states.get("vacuum.mqtttest")
assert state.state == STATE_DOCKED
assert state.attributes.get(ATTR_BATTERY_ICON) == "mdi:battery-charging-60"
assert state.attributes.get(ATTR_BATTERY_LEVEL) == 61
async def test_status_invalid_json(hass, mqtt_mock):
"""Test to make sure nothing breaks if the vacuum sends bad JSON."""
config = deepcopy(DEFAULT_CONFIG)
config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(
mqttvacuum.ALL_SERVICES, SERVICE_TO_STRING
)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
async_fire_mqtt_message(hass, "vacuum/state", '{"asdfasas false}')
state = hass.states.get("vacuum.mqtttest")
assert state.state == STATE_UNKNOWN
async def test_default_availability_payload(hass, mqtt_mock):
"""Test availability by default payload with defined topic."""
config = deepcopy(DEFAULT_CONFIG)
config.update({"availability_topic": "availability-topic"})
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
state = hass.states.get("vacuum.mqtttest")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic", "online")
state = hass.states.get("vacuum.mqtttest")
assert STATE_UNAVAILABLE != state.state
async_fire_mqtt_message(hass, "availability-topic", "offline")
state = hass.states.get("vacuum.mqtttest")
assert state.state == STATE_UNAVAILABLE
async def test_custom_availability_payload(hass, mqtt_mock):
"""Test availability by custom payload with defined topic."""
config = deepcopy(DEFAULT_CONFIG)
config.update(
{
"availability_topic": "availability-topic",
"payload_available": "good",
"payload_not_available": "nogood",
}
)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
state = hass.states.get("vacuum.mqtttest")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic", "good")
state = hass.states.get("vacuum.mqtttest")
assert state.state != STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic", "nogood")
state = hass.states.get("vacuum.mqtttest")
assert state.state == STATE_UNAVAILABLE
async def test_discovery_removal_vacuum(hass, mqtt_mock):
"""Test removal of discovered vacuum."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, "homeassistant", {}, entry)
data = '{ "name": "Beer",' ' "command_topic": "test_topic"}'
async_fire_mqtt_message(hass, "homeassistant/vacuum/bla/config", data)
await hass.async_block_till_done()
state = hass.states.get("vacuum.beer")
assert state is not None
assert state.name == "Beer"
async_fire_mqtt_message(hass, "homeassistant/vacuum/bla/config", "")
await hass.async_block_till_done()
state = hass.states.get("vacuum.beer")
assert state is None
async def test_discovery_broken(hass, mqtt_mock, caplog):
"""Test handling of bad discovery message."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, "homeassistant", {}, entry)
data1 = '{ "name": "Beer",' ' "command_topic": "test_topic#"}'
data2 = '{ "name": "Milk",' ' "command_topic": "test_topic"}'
async_fire_mqtt_message(hass, "homeassistant/vacuum/bla/config", data1)
await hass.async_block_till_done()
state = hass.states.get("vacuum.beer")
assert state is None
async_fire_mqtt_message(hass, "homeassistant/vacuum/bla/config", data2)
await hass.async_block_till_done()
state = hass.states.get("vacuum.milk")
assert state is not None
assert state.name == "Milk"
state = hass.states.get("vacuum.beer")
assert state is None
async def test_discovery_update_vacuum(hass, mqtt_mock):
"""Test update of discovered vacuum."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, "homeassistant", {}, entry)
data1 = '{ "name": "Beer",' ' "command_topic": "test_topic"}'
data2 = '{ "name": "Milk",' ' "command_topic": "test_topic"}'
async_fire_mqtt_message(hass, "homeassistant/vacuum/bla/config", data1)
await hass.async_block_till_done()
state = hass.states.get("vacuum.beer")
assert state is not None
assert state.name == "Beer"
async_fire_mqtt_message(hass, "homeassistant/vacuum/bla/config", data2)
await hass.async_block_till_done()
state = hass.states.get("vacuum.beer")
assert state is not None
assert state.name == "Milk"
state = hass.states.get("vacuum.milk")
assert state is None
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
assert await async_setup_component(
hass,
vacuum.DOMAIN,
{
vacuum.DOMAIN: {
"platform": "mqtt",
"name": "test",
"json_attributes_topic": "attr-topic",
}
},
)
async_fire_mqtt_message(hass, "attr-topic", '{ "val": "100" }')
state = hass.states.get("vacuum.test")
assert state.attributes.get("val") == "100"
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
assert await async_setup_component(
hass,
vacuum.DOMAIN,
{
vacuum.DOMAIN: {
"platform": "mqtt",
"name": "test",
"json_attributes_topic": "attr-topic",
}
},
)
async_fire_mqtt_message(hass, "attr-topic", '[ "list", "of", "things"]')
state = hass.states.get("vacuum.test")
assert state.attributes.get("val") is None
assert "JSON result was not a dictionary" in caplog.text
async def test_update_with_json_attrs_bad_json(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
assert await async_setup_component(
hass,
vacuum.DOMAIN,
{
vacuum.DOMAIN: {
"platform": "mqtt",
"name": "test",
"json_attributes_topic": "attr-topic",
}
},
)
async_fire_mqtt_message(hass, "attr-topic", "This is not JSON")
state = hass.states.get("vacuum.test")
assert state.attributes.get("val") is None
assert "Erroneous JSON: This is not JSON" in caplog.text
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
"""Test update of discovered MQTTAttributes."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, "homeassistant", {}, entry)
data1 = (
'{ "name": "Beer",'
' "command_topic": "test_topic",'
' "json_attributes_topic": "attr-topic1" }'
)
data2 = (
'{ "name": "Beer",'
' "command_topic": "test_topic",'
' "json_attributes_topic": "attr-topic2" }'
)
async_fire_mqtt_message(hass, "homeassistant/vacuum/bla/config", data1)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "attr-topic1", '{ "val": "100" }')
state = hass.states.get("vacuum.beer")
assert state.attributes.get("val") == "100"
# Change json_attributes_topic
async_fire_mqtt_message(hass, "homeassistant/vacuum/bla/config", data2)
await hass.async_block_till_done()
# Verify we are no longer subscribing to the old topic
async_fire_mqtt_message(hass, "attr-topic1", '{ "val": "50" }')
state = hass.states.get("vacuum.beer")
assert state.attributes.get("val") == "100"
# Verify we are subscribing to the new topic
async_fire_mqtt_message(hass, "attr-topic2", '{ "val": "75" }')
state = hass.states.get("vacuum.beer")
assert state.attributes.get("val") == "75"
async def test_unique_id(hass, mqtt_mock):
"""Test unique id option only creates one vacuum per unique_id."""
await async_mock_mqtt_component(hass)
assert await async_setup_component(
hass,
vacuum.DOMAIN,
{
vacuum.DOMAIN: [
{
"platform": "mqtt",
"name": "Test 1",
"command_topic": "command-topic",
"unique_id": "TOTALLY_UNIQUE",
},
{
"platform": "mqtt",
"name": "Test 2",
"command_topic": "command-topic",
"unique_id": "TOTALLY_UNIQUE",
},
]
},
)
async_fire_mqtt_message(hass, "test-topic", "payload")
assert len(hass.states.async_entity_ids()) == 2
# all vacuums group is 1, unique id created is 1
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT vacuum device registry integration."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
entry.add_to_hass(hass)
await async_start(hass, "homeassistant", {}, entry)
registry = await hass.helpers.device_registry.async_get_registry()
data = json.dumps(
{
"platform": "mqtt",
"name": "Test 1",
"command_topic": "test-command-topic",
"device": {
"identifiers": ["helloworld"],
"connections": [["mac", "02:5b:26:a8:dc:12"]],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
},
"unique_id": "veryunique",
}
)
async_fire_mqtt_message(hass, "homeassistant/vacuum/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")}, set())
assert device is not None
assert device.identifiers == {("mqtt", "helloworld")}
assert device.connections == {("mac", "02:5b:26:a8:dc:12")}
assert device.manufacturer == "Whatever"
assert device.name == "Beer"
assert device.model == "Glass"
assert device.sw_version == "0.1-beta"
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
entry.add_to_hass(hass)
await async_start(hass, "homeassistant", {}, entry)
registry = await hass.helpers.device_registry.async_get_registry()
config = {
"platform": "mqtt",
"name": "Test 1",
"command_topic": "test-command-topic",
"device": {
"identifiers": ["helloworld"],
"connections": [["mac", "02:5b:26:a8:dc:12"]],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
},
"unique_id": "veryunique",
}
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/vacuum/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")}, set())
assert device is not None
assert device.name == "Beer"
config["device"]["name"] = "Milk"
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/vacuum/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")}, set())
assert device is not None
assert device.name == "Milk"
| 34.462541 | 88 | 0.675331 | [
"Apache-2.0"
] | FuqiangSong/home-assistant | tests/components/mqtt/test_state_vacuum.py | 21,160 | Python |
#--------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#--------------------------------------------------------------------------
import sys
from azure.core.pipeline.transport import HttpRequest
from azure.core import PipelineClient
from azure.core.pipeline.policies import RedirectPolicy
from azure.core.pipeline.policies import UserAgentPolicy
from azure.core.pipeline.policies import SansIOHTTPPolicy
def test_example_headers_policy():
url = "https://bing.com"
policies = [
UserAgentPolicy("myuseragent"),
RedirectPolicy()
]
# [START headers_policy]
from azure.core.pipeline.policies import HeadersPolicy
headers_policy = HeadersPolicy()
headers_policy.add_header('CustomValue', 'Foo')
# Or headers can be added per operation. These headers will supplement existing headers
# or those defined in the config headers policy. They will also overwrite existing
# identical headers.
policies.append(headers_policy)
client = PipelineClient(base_url=url, policies=policies)
request = client.get(url)
pipeline_response = client._pipeline.run(request, headers={'CustomValue': 'Bar'})
# [END headers_policy]
response = pipeline_response.http_response
assert response.status_code == 200
def test_example_user_agent_policy():
url = "https://bing.com"
redirect_policy = RedirectPolicy()
# [START user_agent_policy]
from azure.core.pipeline.policies import UserAgentPolicy
user_agent_policy = UserAgentPolicy()
# The user-agent policy allows you to append a custom value to the header.
user_agent_policy.add_user_agent("CustomValue")
# You can also pass in a custom value per operation to append to the end of the user-agent.
# This can be used together with the policy configuration to append multiple values.
policies=[
redirect_policy,
user_agent_policy,
]
client = PipelineClient(base_url=url, policies=policies)
request = client.get(url)
pipeline_response = client._pipeline.run(request, user_agent="AnotherValue")
# [END user_agent_policy]
response = pipeline_response.http_response
assert response.status_code == 200
def example_network_trace_logging():
filename = "log.txt"
url = "https://bing.com"
policies = [
UserAgentPolicy("myuseragent"),
RedirectPolicy()
]
# [START network_trace_logging_policy]
from azure.core.pipeline.policies import NetworkTraceLoggingPolicy
import sys
import logging
# Create a logger for the 'azure' SDK
logger = logging.getLogger("azure")
logger.setLevel(logging.DEBUG)
# Configure a console output
handler = logging.StreamHandler(stream=sys.stdout)
logger.addHandler(handler)
# Configure a file output
file_handler = logging.FileHandler(filename)
logger.addHandler(file_handler)
# Enable network trace logging. This will be logged at DEBUG level.
# By default, logging is disabled.
logging_policy = NetworkTraceLoggingPolicy()
logging_policy.enable_http_logger = True
# The logger can also be enabled per operation.
policies.append(logging_policy)
client = PipelineClient(base_url=url, policies=policies)
request = client.get(url)
pipeline_response = client._pipeline.run(request, logging_enable=True)
# [END network_trace_logging_policy]
response = pipeline_response.http_response
assert response.status_code == 200
def example_proxy_policy():
# [START proxy_policy]
from azure.core.pipeline.policies import ProxyPolicy
proxy_policy = ProxyPolicy()
# Example
proxy_policy.proxies = {'http': 'http://10.10.1.10:3148'}
# Use basic auth
proxy_policy.proxies = {'https': 'http://user:[email protected]:1180/'}
# You can also configure proxies by setting the environment variables
# HTTP_PROXY and HTTPS_PROXY.
# [END proxy_policy]
def example_on_exception():
policy = SansIOHTTPPolicy()
request = HttpRequest("GET", "https://bing.com")
# [START on_exception]
try:
response = policy.next.send(request)
except Exception:
if not policy.on_exception(request):
raise
# or use
exc_type, exc_value, exc_traceback = sys.exc_info()
# [END on_exception]
| 34.639241 | 95 | 0.711858 | [
"MIT"
] | SanjayHukumRana/azure-sdk-for-python | sdk/core/azure-core/samples/test_example_sansio.py | 5,473 | Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
__metaclass__ = type
# Copyright (c) 2020-2021, Red Hat
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# STARTREMOVE (downstream)
DOCUMENTATION = r'''
module: openshift_process
short_description: Process an OpenShift template.openshift.io/v1 Template
version_added: "0.3.0"
author: "Fabian von Feilitzsch (@fabianvf)"
description:
- Processes a specified OpenShift template with the provided template.
- Templates can be provided inline, from a file, or specified by name and namespace in the cluster.
- Analogous to `oc process`.
- For CRUD operations on Template resources themselves, see the community.okd.k8s module.
extends_documentation_fragment:
- kubernetes.core.k8s_auth_options
- kubernetes.core.k8s_wait_options
- kubernetes.core.k8s_resource_options
requirements:
- "python >= 3.6"
- "kubernetes >= 12.0.0"
- "PyYAML >= 3.11"
options:
name:
description:
- The name of the Template to process.
- The Template must be present in the cluster.
- When provided, I(namespace) is required.
- Mutually exclusive with I(resource_definition) or I(src)
type: str
namespace:
description:
- The namespace that the template can be found in.
type: str
namespace_target:
description:
- The namespace that resources should be created, updated, or deleted in.
- Only used when I(state) is present or absent.
parameters:
description:
- 'A set of key: value pairs that will be used to set/override values in the Template.'
- Corresponds to the `--param` argument to oc process.
type: dict
parameter_file:
description:
- A path to a file containing template parameter values to override/set values in the Template.
- Corresponds to the `--param-file` argument to oc process.
type: str
state:
description:
- Determines what to do with the rendered Template.
- The state I(rendered) will render the Template based on the provided parameters, and return the rendered
objects in the I(resources) field. These can then be referenced in future tasks.
- The state I(present) will cause the resources in the rendered Template to be created if they do not
already exist, and patched if they do.
- The state I(absent) will delete the resources in the rendered Template.
type: str
default: rendered
choices: [ absent, present, rendered ]
'''
EXAMPLES = r'''
- name: Process a template in the cluster
community.okd.openshift_process:
name: nginx-example
namespace: openshift # only needed if using a template already on the server
parameters:
NAMESPACE: openshift
NAME: test123
state: rendered
register: result
- name: Create the rendered resources using apply
community.okd.k8s:
namespace: default
definition: '{{ item }}'
wait: yes
apply: yes
loop: '{{ result.resources }}'
- name: Process a template with parameters from an env file and create the resources
community.okd.openshift_process:
name: nginx-example
namespace: openshift
namespace_target: default
parameter_file: 'files/nginx.env'
state: present
wait: yes
- name: Process a local template and create the resources
community.okd.openshift_process:
src: files/example-template.yaml
parameter_file: files/example.env
namespace_target: default
state: present
- name: Process a local template, delete the resources, and wait for them to terminate
community.okd.openshift_process:
src: files/example-template.yaml
parameter_file: files/example.env
namespace_target: default
state: absent
wait: yes
'''
RETURN = r'''
result:
description:
- The created, patched, or otherwise present object. Will be empty in the case of a deletion.
returned: on success when state is present or absent
type: complex
contains:
apiVersion:
description: The versioned schema of this representation of an object.
returned: success
type: str
kind:
description: Represents the REST resource this object represents.
returned: success
type: str
metadata:
description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
returned: success
type: complex
contains:
name:
description: The name of the resource
type: str
namespace:
description: The namespace of the resource
type: str
spec:
description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
returned: success
type: dict
status:
description: Current status details for the object.
returned: success
type: complex
contains:
conditions:
type: complex
description: Array of status conditions for the object. Not guaranteed to be present
items:
description: Returned only when multiple yaml documents are passed to src or resource_definition
returned: when resource_definition or src contains list of objects
type: list
duration:
description: elapsed time of task in seconds
returned: when C(wait) is true
type: int
sample: 48
resources:
type: complex
description:
- The rendered resources defined in the Template
returned: on success when state is rendered
contains:
apiVersion:
description: The versioned schema of this representation of an object.
returned: success
type: str
kind:
description: Represents the REST resource this object represents.
returned: success
type: str
metadata:
description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
returned: success
type: complex
contains:
name:
description: The name of the resource
type: str
namespace:
description: The namespace of the resource
type: str
spec:
description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
returned: success
type: dict
status:
description: Current status details for the object.
returned: success
type: dict
contains:
conditions:
type: complex
description: Array of status conditions for the object. Not guaranteed to be present
'''
# ENDREMOVE (downstream)
try:
from ansible_collections.kubernetes.core.plugins.module_utils.ansiblemodule import AnsibleModule
except ImportError:
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.kubernetes.core.plugins.module_utils.args_common import (
AUTH_ARG_SPEC, RESOURCE_ARG_SPEC, WAIT_ARG_SPEC
)
def argspec():
argument_spec = {}
argument_spec.update(AUTH_ARG_SPEC)
argument_spec.update(WAIT_ARG_SPEC)
argument_spec.update(RESOURCE_ARG_SPEC)
argument_spec['state'] = dict(type='str', default='rendered', choices=['present', 'absent', 'rendered'])
argument_spec['namespace'] = dict(type='str')
argument_spec['namespace_target'] = dict(type='str')
argument_spec['parameters'] = dict(type='dict')
argument_spec['name'] = dict(type='str')
argument_spec['parameter_file'] = dict(type='str')
return argument_spec
def main():
argument_spec = argspec()
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
from ansible_collections.community.okd.plugins.module_utils.openshift_process import (
OpenShiftProcess)
openshift_process = OpenShiftProcess(module)
# remove_aliases from kubernetes.core's common requires the argspec attribute. Ideally, it should
# read that throught the module class, but we cannot change that.
openshift_process.argspec = argument_spec
openshift_process.execute_module()
if __name__ == '__main__':
main()
| 33.052632 | 110 | 0.696717 | [
"Apache-2.0"
] | saeedya/docker-ansible | venv/lib/python3.8/site-packages/ansible_collections/community/okd/plugins/modules/openshift_process.py | 8,164 | Python |
#!/usr/bin/env python
"""continue play youtube video"""
import mac_youtube
def _cli():
mac_youtube.play()
if __name__ == "__main__":
_cli()
| 12.666667 | 33 | 0.657895 | [
"Unlicense"
] | andrewp-as-is/mac-youtube.py | mac_youtube/play.py | 152 | Python |
# -*- encoding: utf-8 -*-
import json
import importlib
import os
import builtins
from multiprocessing import Process
from importlib.util import find_spec
__all__=['run']
def run():
from utils.RedisHelper import RedisHelper
_redis=RedisHelper()
_redis.pubsub=_redis.conn.pubsub()
_redis.pubsub.subscribe(_redis.sub_name)
sub_message=next(_redis.pubsub.listen())
print(sub_message) #订阅消息
# on manage.py -e local
# {'type': 'subscribe', 'pattern': None, 'channel': b'nlp_test_pub', 'data': 1}
# if "subscribe"!=sub_message['type'] or _redis.sub_name!=sub_message["channel"].decode('utf-8','ignore'):
# raise "sub error"
for message in _redis.pubsub.listen():
if "message"!=message['type'] or _redis.sub_name!=sub_message["channel"].decode('utf-8','ignore'):
print('type erro')
continue
# 默认不会有错误
message['data']=message['data'].decode('utf-8','ignore')
try:
data=json.loads(message['data'])
except:
# 打印日志 #
print('json parse error',message)
continue
# 控制必要的字段
if "type" not in data:
continue
### 暂时只进行单项任务
if "initialize" !=data["type"]:
continue
# 获取该任务唯一id
if "uid_list" not in data["data"]:
continue
id_list = data["data"]["uid_list"]
try:
uid=_redis.conn.rpop(id_list).decode('utf-8','ignore')
except:
print("uid error",uid)
continue
if int(uid)>=data["data"]["sub_count"]:
raise "uid Index exceeded"
## 优化报错
# os.environ['uid']=uid
# print("initialize uid is ",uid)
if find_spec('handlers.'+data.get("type",""),package='..'):
handlers=importlib.import_module('handlers.'+data.get("type",""),package='..')
else:
continue
raise "import error"
# 优化容错 #
# 优化
# if hasattr(handlers,message.get("type","")):
# handlers=getattr(handlers,message.get("type",""))
p=Process(target=handlers.run,args=[data,int(uid)])
p.start()
p.join()
| 29.468354 | 111 | 0.530928 | [
"MIT"
] | Askr-ssy/r | commands/router.py | 2,424 | Python |
# MIT License
#
# Copyright (c) 2018-2019 Tskit Developers
# Copyright (c) 2015-2017 University of Oxford
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Module responsible for visualisations.
"""
import collections
import numbers
import svgwrite
import numpy as np
from _tskit import NULL
LEFT = "left"
RIGHT = "right"
TOP = "top"
BOTTOM = "bottom"
def check_orientation(orientation):
if orientation is None:
orientation = TOP
else:
orientation = orientation.lower()
orientations = [LEFT, RIGHT, TOP, BOTTOM]
if orientation not in orientations:
raise ValueError(
"Unknown orientiation: choose from {}".format(orientations))
return orientation
def check_max_tree_height(max_tree_height, allow_numeric=True):
if max_tree_height is None:
max_tree_height = "tree"
is_numeric = isinstance(max_tree_height, numbers.Real)
if max_tree_height not in ["tree", "ts"] and not allow_numeric:
raise ValueError("max_tree_height must be 'tree' or 'ts'")
if max_tree_height not in ["tree", "ts"] and (allow_numeric and not is_numeric):
raise ValueError(
"max_tree_height must be a numeric value or one of 'tree' or 'ts'")
return max_tree_height
def check_tree_height_scale(tree_height_scale):
if tree_height_scale is None:
tree_height_scale = "time"
if tree_height_scale not in ["time", "log_time", "rank"]:
raise ValueError("tree_height_scale must be 'time', 'log_time' or 'rank'")
return tree_height_scale
def check_format(format):
if format is None:
format = "SVG"
fmt = format.lower()
supported_formats = ["svg", "ascii", "unicode"]
if fmt not in supported_formats:
raise ValueError("Unknown format '{}'. Supported formats are {}".format(
format, supported_formats))
return fmt
def draw_tree(
tree, width=None, height=None, node_labels=None, node_colours=None,
mutation_labels=None, mutation_colours=None, format=None, edge_colours=None,
tree_height_scale=None, max_tree_height=None):
# See tree.draw() for documentation on these arguments.
fmt = check_format(format)
if fmt == "svg":
if width is None:
width = 200
if height is None:
height = 200
def remap(original_map, new_key, none_value):
if original_map is None:
return None
new_map = {}
for key, value in original_map.items():
if value is None:
new_map[key] = none_value
else:
new_map[key] = {new_key: value}
return new_map
# Old semantics were to not draw the node if colour is None.
# Setting opacity to zero has the same effect.
node_attrs = remap(node_colours, "fill", {'opacity': 0})
edge_attrs = remap(edge_colours, "stroke", {'opacity': 0})
mutation_attrs = remap(mutation_colours, "fill", {'opacity': 0})
node_label_attrs = None
tree = SvgTree(
tree, (width, height),
node_labels=node_labels,
mutation_labels=mutation_labels,
tree_height_scale=tree_height_scale,
max_tree_height=max_tree_height,
node_attrs=node_attrs, edge_attrs=edge_attrs,
node_label_attrs=node_label_attrs,
mutation_attrs=mutation_attrs)
return tree.drawing.tostring()
else:
if width is not None:
raise ValueError("Text trees do not support width")
if height is not None:
raise ValueError("Text trees do not support height")
if mutation_labels is not None:
raise ValueError("Text trees do not support mutation_labels")
if mutation_colours is not None:
raise ValueError("Text trees do not support mutation_colours")
if node_colours is not None:
raise ValueError("Text trees do not support node_colours")
if edge_colours is not None:
raise ValueError("Text trees do not support edge_colours")
if tree_height_scale is not None:
raise ValueError("Text trees do not support tree_height_scale")
use_ascii = fmt == "ascii"
text_tree = VerticalTextTree(
tree, node_labels=node_labels, max_tree_height=max_tree_height,
use_ascii=use_ascii, orientation=TOP)
return str(text_tree)
class SvgTreeSequence(object):
"""
Draw a TreeSequence in SVG.
"""
def __init__(
self, ts, size=None, tree_height_scale=None, max_tree_height=None,
node_labels=None, mutation_labels=None,
node_attrs=None, edge_attrs=None, node_label_attrs=None):
self.ts = ts
if size is None:
size = (200 * ts.num_trees, 200)
self.image_size = size
self.drawing = svgwrite.Drawing(size=self.image_size, debug=True)
self.node_labels = {u: str(u) for u in range(ts.num_nodes)}
# TODO add general padding arguments following matplotlib's terminology.
self.axes_x_offset = 15
self.axes_y_offset = 10
self.treebox_x_offset = self.axes_x_offset + 5
self.treebox_y_offset = self.axes_y_offset + 5
x = self.treebox_x_offset
treebox_width = size[0] - 2 * self.treebox_x_offset
treebox_height = size[1] - 2 * self.treebox_y_offset
tree_width = treebox_width / ts.num_trees
svg_trees = [
SvgTree(
tree, (tree_width, treebox_height),
max_tree_height="ts",
node_labels=node_labels,
mutation_labels=mutation_labels,
tree_height_scale=tree_height_scale,
node_attrs=node_attrs, edge_attrs=edge_attrs,
node_label_attrs=node_label_attrs)
for tree in ts.trees()]
ticks = []
y = self.treebox_y_offset
defs = self.drawing.defs
for tree, svg_tree in zip(ts.trees(), svg_trees):
defs.add(svg_tree.root_group)
for tree in ts.trees():
tree_id = "#tree_{}".format(tree.index)
use = self.drawing.use(tree_id, (x, y))
self.drawing.add(use)
ticks.append((x, tree.interval[0]))
x += tree_width
ticks.append((x, ts.sequence_length))
dwg = self.drawing
# # Debug --- draw the tree and axes boxes
# w = self.image_size[0] - 2 * self.treebox_x_offset
# h = self.image_size[1] - 2 * self.treebox_y_offset
# dwg.add(dwg.rect((self.treebox_x_offset, self.treebox_y_offset), (w, h),
# fill="white", fill_opacity=0, stroke="black", stroke_dasharray="15,15"))
# w = self.image_size[0] - 2 * self.axes_x_offset
# h = self.image_size[1] - 2 * self.axes_y_offset
# dwg.add(dwg.rect((self.axes_x_offset, self.axes_y_offset), (w, h),
# fill="white", fill_opacity=0, stroke="black", stroke_dasharray="5,5"))
axes_left = self.treebox_x_offset
axes_right = self.image_size[0] - self.treebox_x_offset
y = self.image_size[1] - 2 * self.axes_y_offset
dwg.add(dwg.line((axes_left, y), (axes_right, y), stroke="black"))
for x, genome_coord in ticks:
delta = 5
dwg.add(dwg.line((x, y - delta), (x, y + delta), stroke="black"))
dwg.add(dwg.text(
"{:.2f}".format(genome_coord), (x, y + 20),
font_size=14, text_anchor="middle", font_weight="bold"))
class SvgTree(object):
"""
An SVG representation of a single tree.
TODO should provide much more SVG structure which we document fully
to that the SVG elements can be manipulated directly by the user.
For example, every edge should be given an SVG ID so that it can
be referred to and modified.
"""
def __init__(
self, tree, size=None, node_labels=None, mutation_labels=None,
tree_height_scale=None, max_tree_height=None,
node_attrs=None, edge_attrs=None, node_label_attrs=None,
mutation_attrs=None, mutation_label_attrs=None):
self.tree = tree
if size is None:
size = (200, 200)
self.image_size = size
self.setup_drawing()
self.treebox_x_offset = 10
self.treebox_y_offset = 10
self.treebox_width = size[0] - 2 * self.treebox_x_offset
self.assign_y_coordinates(tree_height_scale, max_tree_height)
self.node_x_coord_map = self.assign_x_coordinates(
tree, self.treebox_x_offset, self.treebox_width)
self.edge_attrs = {}
self.node_attrs = {}
self.node_label_attrs = {}
for u in tree.nodes():
self.edge_attrs[u] = {}
if edge_attrs is not None and u in edge_attrs:
self.edge_attrs[u].update(edge_attrs[u])
self.node_attrs[u] = {"r": 3}
if node_attrs is not None and u in node_attrs:
self.node_attrs[u].update(node_attrs[u])
label = ""
if node_labels is None:
label = str(u)
elif u in node_labels:
label = str(node_labels[u])
self.node_label_attrs[u] = {"text": label}
if node_label_attrs is not None and u in node_label_attrs:
self.node_label_attrs[u].update(node_label_attrs[u])
self.mutation_attrs = {}
self.mutation_label_attrs = {}
for site in tree.sites():
for mutation in site.mutations:
m = mutation.id
# We need to offset the rectangle so that it's centred
self.mutation_attrs[m] = {
"size": (6, 6), "transform": "translate(-3, -3)"}
if mutation_attrs is not None and m in mutation_attrs:
self.mutation_attrs[m].update(mutation_attrs[m])
label = ""
if mutation_labels is None:
label = str(m)
elif mutation.id in mutation_labels:
label = str(mutation_labels[m])
self.mutation_label_attrs[m] = {"text": label}
if mutation_label_attrs is not None and m in mutation_label_attrs:
self.mutation_label_attrs[m].update(mutation_label_attrs[m])
self.draw()
def setup_drawing(self):
self.drawing = svgwrite.Drawing(size=self.image_size, debug=True)
dwg = self.drawing
self.root_group = dwg.add(dwg.g(id='tree_{}'.format(self.tree.index)))
self.edges = self.root_group.add(dwg.g(id='edges', stroke="black", fill="none"))
self.symbols = self.root_group.add(dwg.g(id='symbols'))
self.nodes = self.symbols.add(dwg.g(class_='nodes'))
self.mutations = self.symbols.add(dwg.g(class_='mutations', fill="red"))
self.labels = self.root_group.add(dwg.g(id='labels', font_size=14))
self.node_labels = self.labels.add(dwg.g(class_='nodes'))
self.mutation_labels = self.labels.add(
dwg.g(class_='mutations', font_style="italic"))
self.left_labels = self.node_labels.add(dwg.g(text_anchor="start"))
self.mid_labels = self.node_labels.add(dwg.g(text_anchor="middle"))
self.right_labels = self.node_labels.add(dwg.g(text_anchor="end"))
self.mutation_left_labels = self.mutation_labels.add(dwg.g(text_anchor="start"))
self.mutation_right_labels = self.mutation_labels.add(dwg.g(text_anchor="end"))
def assign_y_coordinates(self, tree_height_scale, max_tree_height):
tree_height_scale = check_tree_height_scale(tree_height_scale)
max_tree_height = check_max_tree_height(
max_tree_height, tree_height_scale != "rank")
ts = self.tree.tree_sequence
node_time = ts.tables.nodes.time
if tree_height_scale == "rank":
assert tree_height_scale == "rank"
if max_tree_height == "tree":
# We only rank the times within the tree in this case.
t = np.zeros_like(node_time) + node_time[self.tree.left_root]
for u in self.tree.nodes():
t[u] = node_time[u]
node_time = t
depth = {t: 2 * j for j, t in enumerate(np.unique(node_time))}
node_height = [depth[node_time[u]] for u in range(ts.num_nodes)]
max_tree_height = max(depth.values())
else:
assert tree_height_scale in ["time", "log_time"]
if max_tree_height == "tree":
max_tree_height = max(self.tree.time(root) for root in self.tree.roots)
elif max_tree_height == "ts":
max_tree_height = ts.max_root_time
if tree_height_scale == "log_time":
# add 1 so that don't reach log(0) = -inf error.
# just shifts entire timeset by 1 year so shouldn't affect anything
node_height = np.log(ts.tables.nodes.time + 1)
elif tree_height_scale == "time":
node_height = node_time
assert float(max_tree_height) == max_tree_height
# In pathological cases, all the roots are at 0
if max_tree_height == 0:
max_tree_height = 1
# TODO should make this a parameter somewhere. This is padding to keep the
# node labels within the treebox
label_padding = 10
y_padding = self.treebox_y_offset + 2 * label_padding
mutations_over_root = any(
any(tree.parent(mut.node) == NULL for mut in tree.mutations())
for tree in ts.trees())
root_branch_length = 0
height = self.image_size[1]
if mutations_over_root:
# Allocate a fixed about of space to show the mutations on the
# 'root branch'
root_branch_length = height / 10 # FIXME just draw branch??
# y scaling
padding_numerator = (height - root_branch_length - 2 * y_padding)
if tree_height_scale == "log_time":
# again shift time by 1 in log(max_tree_height), so consistent
y_scale = padding_numerator / (np.log(max_tree_height + 1))
else:
y_scale = padding_numerator / max_tree_height
self.node_y_coord_map = [
height - y_scale * node_height[u] - y_padding
for u in range(ts.num_nodes)]
def assign_x_coordinates(self, tree, x_start, width):
num_leaves = len(list(tree.leaves()))
x_scale = width / (num_leaves + 1)
node_x_coord_map = {}
leaf_x = x_start
for root in tree.roots:
for u in tree.nodes(root, order="postorder"):
if tree.is_leaf(u):
leaf_x += x_scale
node_x_coord_map[u] = leaf_x
else:
child_coords = [node_x_coord_map[c] for c in tree.children(u)]
if len(child_coords) == 1:
node_x_coord_map[u] = child_coords[0]
else:
a = min(child_coords)
b = max(child_coords)
assert b - a > 1
node_x_coord_map[u] = a + (b - a) / 2
return node_x_coord_map
def draw(self):
dwg = self.drawing
node_x_coord_map = self.node_x_coord_map
node_y_coord_map = self.node_y_coord_map
tree = self.tree
node_mutations = collections.defaultdict(list)
for site in tree.sites():
for mutation in site.mutations:
node_mutations[mutation.node].append(mutation)
for u in tree.nodes():
pu = node_x_coord_map[u], node_y_coord_map[u]
node_id = "node_{}_{}".format(tree.index, u)
self.nodes.add(dwg.circle(id=node_id, center=pu, **self.node_attrs[u]))
dx = 0
dy = -5
labels = self.mid_labels
if tree.is_leaf(u):
dy = 20
elif tree.parent(u) != NULL:
dx = 5
if tree.left_sib(u) == NULL:
dx *= -1
labels = self.right_labels
else:
labels = self.left_labels
# TODO add ID to node label text.
# TODO get rid of these manual positioning tweaks and add them
# as offsets the user can access via a transform or something.
labels.add(dwg.text(
insert=(pu[0] + dx, pu[1] + dy), **self.node_label_attrs[u]))
v = tree.parent(u)
if v != NULL:
edge_id = "edge_{}_{}".format(tree.index, u)
pv = node_x_coord_map[v], node_y_coord_map[v]
path = dwg.path(
[("M", pu), ("V", pv[1]), ("H", pv[0])], id=edge_id,
**self.edge_attrs[u])
self.edges.add(path)
else:
# FIXME this is pretty crappy for spacing mutations over a root.
pv = (pu[0], pu[1] - 20)
num_mutations = len(node_mutations[u])
delta = (pv[1] - pu[1]) / (num_mutations + 1)
x = pu[0]
y = pv[1] - delta
# TODO add mutation IDs
for mutation in reversed(node_mutations[u]):
self.mutations.add(dwg.rect(
insert=(x, y),
**self.mutation_attrs[mutation.id]))
dx = 5
if tree.left_sib(mutation.node) == NULL:
dx *= -1
labels = self.mutation_right_labels
else:
labels = self.mutation_left_labels
# TODO get rid of these manual positioning tweaks and add them
# as offsets the user can access via a transform or something.
dy = 4
labels.add(dwg.text(
insert=(x + dx, y + dy),
**self.mutation_label_attrs[mutation.id]))
y -= delta
class TextTreeSequence(object):
"""
Draw a tree sequence as horizontal line of trees.
"""
def __init__(
self, ts, node_labels=None, use_ascii=False, time_label_format=None,
position_label_format=None):
self.ts = ts
time_label_format = (
"{:.2f}" if time_label_format is None else time_label_format)
position_label_format = (
"{:.2f}" if position_label_format is None else position_label_format)
time = ts.tables.nodes.time
time_scale_labels = [
time_label_format.format(time[u]) for u in range(ts.num_nodes)]
position_scale_labels = [
position_label_format.format(x) for x in ts.breakpoints()]
trees = [
VerticalTextTree(
tree, max_tree_height="ts", node_labels=node_labels,
use_ascii=use_ascii)
for tree in self.ts.trees()]
self.height = 1 + max(tree.height for tree in trees)
self.width = sum(tree.width + 2 for tree in trees) - 1
max_time_scale_label_len = max(map(len, time_scale_labels))
self.width += 3 + max_time_scale_label_len + len(position_scale_labels[-1]) // 2
self.canvas = np.zeros((self.height, self.width), dtype=str)
self.canvas[:] = " "
vertical_sep = "|" if use_ascii else "┊"
x = 0
time_position = trees[0].time_position
for u, label in enumerate(map(to_np_unicode, time_scale_labels)):
y = time_position[u]
self.canvas[y, 0: label.shape[0]] = label
self.canvas[:, max_time_scale_label_len] = vertical_sep
x = 2 + max_time_scale_label_len
for j, tree in enumerate(trees):
pos_label = to_np_unicode(position_scale_labels[j])
k = len(pos_label)
label_x = max(x - k // 2 - 2, 0)
self.canvas[-1, label_x: label_x + k] = pos_label
h, w = tree.canvas.shape
self.canvas[-h - 1: -1, x: x + w - 1] = tree.canvas[:, :-1]
x += w
self.canvas[:, x] = vertical_sep
x += 2
pos_label = to_np_unicode(position_scale_labels[-1])
k = len(pos_label)
label_x = max(x - k // 2 - 2, 0)
self.canvas[-1, label_x: label_x + k] = pos_label
self.canvas[:, -1] = "\n"
def __str__(self):
return "".join(self.canvas.reshape(self.width * self.height))
def to_np_unicode(string):
"""
Converts the specified string to a numpy unicode array.
"""
# TODO: what's the clean of doing this with numpy?
# It really wants to create a zero-d Un array here
# which breaks the assignment below and we end up
# with n copies of the first char.
n = len(string)
np_string = np.zeros(n, dtype="U")
for j in range(n):
np_string[j] = string[j]
return np_string
def closest_left_node(tree, u):
"""
Returns the node that closest to u in a left-to-right sense.
"""
ret = NULL
while u != NULL and ret == NULL:
ret = tree.left_sib(u)
u = tree.parent(u)
return ret
def node_time_depth(tree, min_branch_length=None, max_tree_height="tree"):
"""
Returns a dictionary mapping nodes in the specified tree to their depth
in the specified tree (from the root direction). If min_branch_len is
provided, it specifies the minimum length of each branch. If not specified,
default to 1.
"""
if min_branch_length is None:
min_branch_length = {u: 1 for u in range(tree.tree_sequence.num_nodes)}
time_node_map = collections.defaultdict(list)
current_depth = 0
depth = {}
# TODO this is basically the same code for the two cases. Refactor so that
# we use the same code.
if max_tree_height == "tree":
for u in tree.nodes():
time_node_map[tree.time(u)].append(u)
for t in sorted(time_node_map.keys()):
for u in time_node_map[t]:
for v in tree.children(u):
current_depth = max(current_depth, depth[v] + min_branch_length[v])
for u in time_node_map[t]:
depth[u] = current_depth
current_depth += 2
for root in tree.roots:
current_depth = max(current_depth, depth[root] + min_branch_length[root])
else:
assert max_tree_height == "ts"
ts = tree.tree_sequence
for node in ts.nodes():
time_node_map[node.time].append(node.id)
node_edges = collections.defaultdict(list)
for edge in ts.edges():
node_edges[edge.parent].append(edge)
for t in sorted(time_node_map.keys()):
for u in time_node_map[t]:
for edge in node_edges[u]:
v = edge.child
current_depth = max(current_depth, depth[v] + min_branch_length[v])
for u in time_node_map[t]:
depth[u] = current_depth
current_depth += 2
return depth, current_depth
class TextTree(object):
"""
Draws a reprentation of a tree using unicode drawing characters written
to a 2D array.
"""
def __init__(
self, tree, node_labels=None, max_tree_height=None, use_ascii=False,
orientation=None):
self.tree = tree
self.max_tree_height = check_max_tree_height(
max_tree_height, allow_numeric=False)
self.use_ascii = use_ascii
self.orientation = check_orientation(orientation)
self.horizontal_line_char = '━'
self.vertical_line_char = '┃'
if use_ascii:
self.horizontal_line_char = '-'
self.vertical_line_char = '|'
# These are set below by the placement algorithms.
self.width = None
self.height = None
self.canvas = None
# Placement of nodes in the 2D space. Nodes are positioned in one
# dimension based on traversal ordering and by their time in the
# other dimension. These are mapped to x and y coordinates according
# to the orientation.
self.traversal_position = {} # Position of nodes in traversal space
self.time_position = {}
# Labels for nodes
self.node_labels = {}
# Set the node labels
for u in tree.nodes():
if node_labels is None:
# If we don't specify node_labels, default to node ID
self.node_labels[u] = str(u)
else:
# If we do specify node_labels, default an empty line
self.node_labels[u] = self.default_node_label
if node_labels is not None:
for node, label in node_labels.items():
self.node_labels[node] = label
self._assign_time_positions()
self._assign_traversal_positions()
self.canvas = np.zeros((self.height, self.width), dtype=str)
self.canvas[:] = " "
self._draw()
self.canvas[:, -1] = "\n"
def __str__(self):
return "".join(self.canvas.reshape(self.width * self.height))
class VerticalTextTree(TextTree):
"""
Text tree rendering where root nodes are at the top and time goes downwards
into the present.
"""
@property
def default_node_label(self):
return self.vertical_line_char
def _assign_time_positions(self):
tree = self.tree
# TODO when we add mutations to the text tree we'll need to take it into
# account here. Presumably we need to get the maximum number of mutations
# per branch.
self.time_position, total_depth = node_time_depth(
tree, max_tree_height=self.max_tree_height)
self.height = total_depth - 1
def _assign_traversal_positions(self):
self.label_x = {}
x = 0
for root in self.tree.roots:
for u in self.tree.nodes(root, order="postorder"):
label_size = len(self.node_labels[u])
if self.tree.is_leaf(u):
self.traversal_position[u] = x + label_size // 2
self.label_x[u] = x
x += label_size + 1
else:
coords = [self.traversal_position[c] for c in self.tree.children(u)]
if len(coords) == 1:
self.traversal_position[u] = coords[0]
else:
a = min(coords)
b = max(coords)
child_mid = int(round((a + (b - a) / 2)))
self.traversal_position[u] = child_mid
self.label_x[u] = self.traversal_position[u] - label_size // 2
sib_x = -1
sib = closest_left_node(self.tree, u)
if sib != NULL:
sib_x = self.traversal_position[sib]
self.label_x[u] = max(sib_x + 1, self.label_x[u])
x = max(x, self.label_x[u] + label_size + 1)
assert self.label_x[u] >= 0
x += 1
self.width = x - 1
def _draw(self):
if self.use_ascii:
left_child = "+"
right_child = "+"
mid_parent = "+"
mid_parent_child = "+"
mid_child = "+"
elif self.orientation == TOP:
left_child = "┏"
right_child = "┓"
mid_parent = "┻"
mid_parent_child = "╋"
mid_child = "┳"
else:
left_child = "┗"
right_child = "┛"
mid_parent = "┳"
mid_parent_child = "╋"
mid_child = "┻"
for u in self.tree.nodes():
xu = self.traversal_position[u]
yu = self.time_position[u]
label = to_np_unicode(self.node_labels[u])
label_len = label.shape[0]
label_x = self.label_x[u]
assert label_x >= 0
self.canvas[yu, label_x: label_x + label_len] = label
children = self.tree.children(u)
if len(children) > 0:
if len(children) == 1:
yv = self.time_position[children[0]]
self.canvas[yv: yu, xu] = self.vertical_line_char
else:
left = min(self.traversal_position[v] for v in children)
right = max(self.traversal_position[v] for v in children)
y = yu - 1
self.canvas[y, left + 1: right] = self.horizontal_line_char
self.canvas[y, xu] = mid_parent
for v in children:
xv = self.traversal_position[v]
yv = self.time_position[v]
self.canvas[yv: yu, xv] = self.vertical_line_char
mid_char = mid_parent_child if xv == xu else mid_child
self.canvas[y, xv] = mid_char
self.canvas[y, left] = left_child
self.canvas[y, right] = right_child
# print(self.canvas)
if self.orientation == TOP:
self.canvas = np.flip(self.canvas, axis=0)
# Reverse the time positions so that we can use them in the tree
# sequence drawing as well.
flipped_time_position = {
u: self.height - y - 1 for u, y in self.time_position.items()}
self.time_position = flipped_time_position
class HorizontalTextTree(TextTree):
"""
Text tree rendering where root nodes are at the left and time goes
rightwards into the present.
"""
@property
def default_node_label(self):
return self.horizontal_line_char
def _assign_time_positions(self):
# TODO when we add mutations to the text tree we'll need to take it into
# account here. Presumably we need to get the maximum number of mutations
# per branch.
self.time_position, total_depth = node_time_depth(
self.tree, {u: 1 + len(self.node_labels[u]) for u in self.tree.nodes()})
self.width = total_depth
def _assign_traversal_positions(self):
y = 0
for root in self.tree.roots:
for u in self.tree.nodes(root, order="postorder"):
if self.tree.is_leaf(u):
self.traversal_position[u] = y
y += 2
else:
coords = [self.traversal_position[c] for c in self.tree.children(u)]
if len(coords) == 1:
self.traversal_position[u] = coords[0]
else:
a = min(coords)
b = max(coords)
child_mid = int(round((a + (b - a) / 2)))
self.traversal_position[u] = child_mid
y += 1
self.height = y - 2
def _draw(self):
if self.use_ascii:
top_across = "+"
bot_across = "+"
mid_parent = "+"
mid_parent_child = "+"
mid_child = "+"
elif self.orientation == LEFT:
top_across = "┏"
bot_across = "┗"
mid_parent = "┫"
mid_parent_child = "╋"
mid_child = "┣"
else:
top_across = "┓"
bot_across = "┛"
mid_parent = "┣"
mid_parent_child = "╋"
mid_child = "┫"
# Draw in root-right mode as the coordinates go in the expected direction.
for u in self.tree.nodes():
yu = self.traversal_position[u]
xu = self.time_position[u]
label = to_np_unicode(self.node_labels[u])
if self.orientation == LEFT:
# We flip the array at the end so need to reverse the label.
label = label[::-1]
label_len = label.shape[0]
self.canvas[yu, xu: xu + label_len] = label
children = self.tree.children(u)
if len(children) > 0:
if len(children) == 1:
xv = self.time_position[children[0]]
self.canvas[yu, xv: xu] = self.horizontal_line_char
else:
bot = min(self.traversal_position[v] for v in children)
top = max(self.traversal_position[v] for v in children)
x = xu - 1
self.canvas[bot + 1: top, x] = self.vertical_line_char
self.canvas[yu, x] = mid_parent
for v in children:
yv = self.traversal_position[v]
xv = self.time_position[v]
self.canvas[yv, xv: x] = self.horizontal_line_char
mid_char = mid_parent_child if yv == yu else mid_child
self.canvas[yv, x] = mid_char
self.canvas[bot, x] = top_across
self.canvas[top, x] = bot_across
if self.orientation == LEFT:
self.canvas = np.flip(self.canvas, axis=1)
# Move the padding to the left.
self.canvas[:, :-1] = self.canvas[:, 1:]
self.canvas[:, -1] = " "
# print(self.canvas)
| 40.776066 | 89 | 0.573297 | [
"MIT"
] | brianzhang01/tskit | python/tskit/drawing.py | 34,461 | Python |
import pytest
from shutil import copyfile
from shutil import copytree
import os
from cli.postman2robot import run as postman2robot
@pytest.fixture(scope="function")
def collection(tmpdir, request):
if request.param:
test_name = request.param
else:
test_name = request.node.name
filepath = request.module.__file__
test_dir, f = os.path.split(filepath)
# Resolve paths
resources = os.path.join(test_dir, "resources")
resources_collection = os.path.join(resources, test_name + "_collection.json")
resources_validation = os.path.join(resources, "validation" , test_name + "_library.py")
# Prepare env
copyfile(resources_collection, os.path.join(tmpdir, "collection.json"))
copyfile(resources_validation, os.path.join(tmpdir, "validation_library.py"))
# We work in temps dir
old_cwd = os.getcwd()
os.chdir(tmpdir)
library = tmpdir.join('./postman_libraries/newmanTest.py')
library_validation = tmpdir.join('./validation_library.py')
yield {"generated": library, "expected": library_validation}
os.chdir(old_cwd)
class Test_Postman2Robot:
cli_args = {
"--ifile": "collection.json",
"--ofile": "./postman_libraries"
}
@pytest.mark.parametrize("collection", [("test_simple")], indirect=True)
def test_simple(self, tmpdir, collection):
"""
Given collection.json, generate: library.py
"""
postman2robot(self.cli_args)
assert os.path.isfile(collection["generated"])
assert collection["generated"].read() == collection["expected"].read()
@pytest.mark.parametrize("collection", [("test_with_folder")], indirect=True)
def test_with_folder(self, tmpdir, collection):
"""
Given collection.json, generate: library.py
"""
postman2robot(self.cli_args)
assert os.path.isfile(collection["generated"])
assert collection["generated"].read() == collection["expected"].read()
@pytest.mark.parametrize("collection", [("test_with_variables")], indirect=True)
def test_with_variables(self, tmpdir, collection):
"""
Given collection.json, generate: library.py
"""
postman2robot(self.cli_args)
assert os.path.isfile(collection["generated"])
assert collection["generated"].read() == collection["expected"].read() | 30.189873 | 92 | 0.672117 | [
"MIT"
] | xNok/postman2robotframework | test/test_cli_postman2robot.py | 2,385 | Python |
textSpeakDictionary = {
"rs" : "risos" ,
"tmb" : "também"
}
#imprime o dicionário inteiro
print( "Dicionário =" , textSpeakDictionary )
#imprime apenas o conteúdo relacionado à chave "rs"
print( "\nrs =" , textSpeakDictionary["rs"])
#texto que pede a entrada do usuário
key = input("\nO que você gostaria de converter? : ")
print( key , "=" , textSpeakDictionary[key] ) | 27.571429 | 53 | 0.670984 | [
"MIT"
] | daianasousa/Conversor-de-expressoes | Traduzindo_Palavras.py | 393 | Python |
from pygame import event
from albow.widgets.Control import Control
class CheckControl(Control):
def mouse_down(self, e: event):
self.value = not self.value
def get_highlighted(self):
return self.value
| 15.466667 | 41 | 0.698276 | [
"MIT"
] | hasii2011/albow-python-3 | albow/widgets/CheckControl.py | 232 | Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import time
import vcgencmd
from gpiozero import OutputDevice
# IMPORTANT: maximum temperature is 85°C and cpu throttled at 80°C
ON_THRESHOLD = 70 # (degrees Celsius) fan starts at this temperature
OFF_THRESHOLD = 60 # (degrees Celsius) fan shuts down at this temperature
SLEEP_INTERVAL = 5 # (seconds) how often the core temperature is checked
GPIO_PIN = 18 # (number) which GPIO pin is used to control the fan
def main():
vc = vcgencmd.Vcgencmd()
fan = OutputDevice(GPIO_PIN)
while True:
temperature = int(vc.measure_temp())
# NOTE: fan.value = 1 if "on" else 0
if temperature > ON_THRESHOLD and not fan.value:
fan.on()
elif fan.value and temperature < OFF_THRESHOLD:
fan.off()
time.sleep(SLEEP_INTERVAL)
if __name__ == '__main__':
main()
| 26.424242 | 73 | 0.673165 | [
"MIT"
] | olivierbenard/raspberrypi-fan-controller | src/fancontroller.py | 874 | Python |
"""
"""
import snowmobile
sn = snowmobile.connect(delay=True)
sn.alive # > False
type(sn.con) # > NoneType
type(sn.cfg) # > snowmobile.core.configuration.Configuration
str(sn.cfg) # > snowmobile.Configuration('snowmobile.toml')
print(sn.cfg.location) # > /path/to/your/snowmobile.toml
sn.cfg.connection.default_alias # > 'creds1'
print(sn.alive)
type(sn) # > snowmobile.core.connection.Snowmobile
type(sn.cfg) # > snowmobile.core.configuration.Configuration
str(sn.cfg) # > snowmobile.Configuration('snowmobile.toml')
type(sn.con) # > snowflake.connector.connection.SnowflakeConnection
type(sn.cursor) # > snowflake.connector.cursor.SnowflakeCursor
df1 = sn.query("select 1") # == pd.read_sql()
type(df1) # > pandas.core.frame.DataFrame
cur1 = sn.query("select 1", as_df=False) # == SnowflakeConnection.cursor().execute()
type(cur1) # > snowflake.connector.cursor.SnowflakeCursor
import pandas as pd
df2 = pd.read_sql(sql="select 1", con=sn.con)
cur2 = sn.con.cursor().execute("select 1")
print(df2.equals(df1)) # > True
print(cur1.fetchone() == cur2.fetchone()) # > True
# -- complete example; should run 'as is' --
| 27.404762 | 86 | 0.711555 | [
"MIT"
] | GEM7318/Snowmobile | docs/snippets/configuration.py | 1,151 | Python |
import os
import sys
import numpy as np
import torch
import pickle
import logging
log = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO
)
class Graph4D():
def __init__(self, num_envs=4096, env_size=(4,4), steps=128, save=False, data_root='./data/', num_categories=None, verbose=False):
self.num_envs = num_envs
self.steps = steps
self.env_size = env_size
self.save = False
self.data_root = data_root
self.num_categories = num_categories
self.generate_data(verbose=verbose)
log.info('''Generated Data:
\t\t\t {} Environments...
\t\t\t {} env size...
\t\t\t {} steps in each...
\t\t\t {} observable one hot categories... '''.format(
num_envs, env_size, steps, self.num_categories
))
def square_env(self):
"""
Generate map where each vertex has a one hot categorical distribution
Returns:
(N,N,num_categories) matrix with one-hot categorical observations
"""
env_size = self.env_size
env = np.zeros((env_size[0], env_size[1], self.num_categories))
for i in range(env_size[0]):
# Randomly assign categories to each vertex in a row
category = np.random.randint(0, self.num_categories, env_size[1])
# One hot encode them
env[i, np.arange(category.size), category] = 1
return env
def update_location_4way(self, env, loc):
"""
Samples a valid four-way action and updates location
"""
length = env.shape[0]
valid = False
# print(loc, end=' --> ')
while not valid:
# Sample action
action = np.random.randint(0, 4)
# Move up
if action == 0:
if loc[0] - 1 >= 0:
# print('Moving up', end=' --> ')
loc[0] -= 1
valid = True
# Move right
elif action == 1:
if loc[1] + 1 < length:
# print('Moving Right', end=' --> ')
loc[1] += 1
valid = True
# Move down
elif action == 2:
if loc[0] + 1 < length:
# print('Moving Down', end=' --> ')
loc[0] += 1
valid = True
# Move left
elif action == 3:
if loc[1] - 1 >= 0:
# print('Moving Left', end=' --> ')
loc[1] -= 1
valid = True
# One hot encode action
act = np.zeros(4)
act[action] = 1
return act, loc
def trajectory_4way(self, env):
"""
Generate trajectory of agent diffusing through 4-way connected graph
At each point we sample the one-hot observation and take an action
0 = up
1 = right
2 = down
3 = left
Params:
steps (int): Number of steps to take
env (3d np array): environment in which to wander (NxNx(num_categories))
Returns
Observations (steps, num_categories), Actions (steps, 4)
"""
observations = np.zeros((self.steps, self.num_categories))
actions = np.zeros((self.steps, 4))
positions = np.zeros((self.steps, 2))
loc = np.random.randint(0, env.shape[0], 2) # Initial Location
for step in range(self.steps):
positions[step] = loc
obs = env[loc[0], loc[1]] # Observe scene
action, loc = self.update_location_4way(env, loc) # Sample action and new location
observations[step] = obs
actions[step] = action
return observations, actions, positions
def generate_data(self, verbose=False):
"""
Generates N square environments and trajectories ((observation, action) pairs)
for each environment
Params:
envs (int): number of environments to generate
steps (int): how many steps an agent initially takes in each environment
env_size (tuple): size of environment (should be something like (4,4), (9,9), etc...)
save (bool): whether or not to save the dataset
Returns:
Dict of "environments, observations, actions", each corresponding to:
environments: Array shape: (num_envs, env_size_x, env_size_y, num_categories),
observations: Array shape: (num_envs, steps, num_categories),
actions: Array shape: (num_envs, steps, 4)
"""
env_size = self.env_size
if self.num_categories == None:
self.num_categories = env_size[0] * env_size[1]
self.environments = np.zeros((self.num_envs, env_size[0], env_size[1], self.num_categories))
self.observations = np.zeros((self.num_envs, self.steps, self.num_categories))
self.actions = np.zeros((self.num_envs, self.steps, 4))
self.positions = np.zeros((self.num_envs, self.steps, 2))
for i in range(self.num_envs):
env = self.square_env() # Generate new environment
obs, acts, pos = self.trajectory_4way(env) # Generate random walk for that environment
self.environments[i] = env
self.observations[i] = obs
self.actions[i] = acts
self.positions[i] = pos
self.data = {'environments': self.environments, 'observations': self.observations, 'actions': self.actions, 'positions': self.positions}
if self.save:
name = os.path.join(self.data_root, 'four_way_graph.pickle')
with open(name, 'wb') as handle:
pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)
if __name__=='__main__':
print('Generating 20 (8,8) environments with 256 random steps in each.')
graph = Graph4D(num_envs=20, env_size=(8,8), steps=256)
data = graph.data
envs = graph.environments
observations = graph.observations
actions = graph.actions
positions = graph.positions
print('Envs,', envs.shape)
print('Obs', observations.shape)
print('Acts', actions.shape)
print('Pos', positions.shape) | 37.528409 | 144 | 0.543679 | [
"Apache-2.0"
] | Victorwz/Generative-Hippocampal-entorhinal-System | generate.py | 6,605 | Python |
def _zero_one_validator(x, function):
message = function + " must be a float between zero and one"
if x < 0.0 or x > 1.0:
raise ValueError(message)
return x
def _intenger_validator(x, function):
message = function + " must be positive intenger"
if isinstance(x, int) and x > 0:
return x
raise TypeError(message)
def _early_stopping_validator(x):
if x:
if x <=0 or not isinstance(x, int):
raise ValueError("Early stopping must be a positive number bigger than 0 or None for disable it")
return x
return x
def _float_validator(x, function):
message = function + " must be a positive float"
if x < 0 or type(x) != float:
raise ValueError(message)
return x
def _validate_dict_of_dicts(x, function):
message = function + " must be a dict of dict representing a simmetric matrix"
if not isinstance(x, dict) and not isinstance(x[list(x.keys())], dict):
raise TypeError(message)
for k in x.keys():
if x[k].keys() != x.keys():
raise ValueError(message)
def _validate_route_example(x, fuel_costs, time_costs):
for i in x:
if not isinstance(i, int):
raise TypeError("The route provided must be a list of integers")
x_sorted = sorted(x)
dict_sorted = sorted(list(fuel_costs.keys()))
if x_sorted != dict_sorted:
raise ValueError("The ints in the random route provided must be in the dicts as keys")
if len(x) == len(list(fuel_costs.keys())) == len(list(time_costs.keys())):
pass
else:
raise ValueError("The dicts must have the same dimension and the route example must have the same lenght as the dicts keys")
| 30.642857 | 132 | 0.650932 | [
"MIT"
] | arubiales/scikit-route | skroute/_validators/_validators.py | 1,716 | Python |
######################################################################################
#FSSNet: Fast Semantic Segmentation for Scene Perception
#Paper-Link: https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8392426
######################################################################################
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
from utils.activations import NON_LINEARITY
__all__ = ["FSSNet"]
# NON_LINEARITY = {
# 'ReLU': nn.ReLU(inplace=True),
# 'PReLU': nn.PReLU(),
# 'ReLu6': nn.ReLU6(inplace=True)
# }
class InitialBlock(nn.Module):
def __init__(self, ninput, noutput, non_linear='ReLU'):
super().__init__()
self.conv = nn.Conv2d(ninput, noutput-ninput, (3, 3), stride=2, padding=1, bias=False)
self.pool = nn.MaxPool2d(2, stride=2)
self.bn = nn.BatchNorm2d(noutput-ninput, eps=1e-3)
self.relu = NON_LINEARITY[non_linear]
def forward(self, input):
output = self.relu(self.bn(self.conv(input)))
output = torch.cat([output, self.pool(input)], 1)
return output
class DownsamplingBottleneck(nn.Module):
def __init__(self, in_channels, out_channels, internal_ratio=4, kernel_size=3,
padding=0, dropout_prob=0., bias=False, non_linear='ReLU'):
super().__init__()
# Store parameters that are needed later
internal_channels = in_channels // internal_ratio
# Main branch - max pooling followed by feature map (channels) padding
self.main_max1 = nn.Sequential(
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=bias),
)
# Extension branch - 2x2 convolution, followed by a regular, dilated or
# asymmetric convolution, followed by another 1x1 convolution. Number
# of channels is doubled.
# 2x2 projection convolution with stride 2, no padding
self.ext_conv1 = nn.Sequential(
nn.Conv2d(in_channels, internal_channels, kernel_size=2, stride=2, bias=bias),
nn.BatchNorm2d(internal_channels),
NON_LINEARITY[non_linear]
)
# Convolution
self.ext_conv2 = nn.Sequential(
nn.Conv2d(internal_channels, internal_channels, kernel_size=kernel_size, stride=1, padding=padding,
bias=bias),
nn.BatchNorm2d(internal_channels),
NON_LINEARITY[non_linear]
)
# 1x1 expansion convolution
self.ext_conv3 = nn.Sequential(
nn.Conv2d(internal_channels, out_channels, kernel_size=1, stride=1, bias=bias),
nn.BatchNorm2d(out_channels),
NON_LINEARITY[non_linear]
)
self.ext_regul = nn.Dropout2d(p=dropout_prob)
# PReLU layer to apply after concatenating the branches
self.out_prelu = NON_LINEARITY[non_linear]
def forward(self, x):
# Main branch shortcut
main = self.main_max1(x)
# Extension branch
ext = self.ext_conv1(x)
ext = self.ext_conv2(ext)
ext = self.ext_conv3(ext)
ext = self.ext_regul(ext)
# Add main and extension branches
out = self.out_prelu(main + ext)
return out
class UpsamplingBottleneck(nn.Module):
def __init__(self, in_channels, out_channels, internal_ratio=4, kernel_size=2,
padding=0, dropout_prob=0., bias=False, non_linear='ReLU'):
super().__init__()
internal_channels = in_channels // internal_ratio
# Main branch - max pooling followed by feature map (channels) padding
self.main_conv1 = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias),
nn.BatchNorm2d(out_channels))
# Remember that the stride is the same as the kernel_size, just like
# the max pooling layers
# self.main_unpool1 = nn.MaxUnpool2d(kernel_size=2)
# Extension branch - 1x1 convolution, followed by a regular, dilated or
# asymmetric convolution, followed by another 1x1 convolution. Number
# of channels is doubled.
# 1x1 projection convolution with stride 1
self.ext_conv1 = nn.Sequential(
nn.Conv2d(in_channels, internal_channels, kernel_size=1, bias=bias),
nn.BatchNorm2d(internal_channels),
NON_LINEARITY[non_linear]
)
# Transposed convolution
self.ext_conv2 = nn.Sequential(
nn.ConvTranspose2d(internal_channels, internal_channels, kernel_size=kernel_size, stride=2, padding=padding,
output_padding=0, bias=bias),
nn.BatchNorm2d(internal_channels),
NON_LINEARITY[non_linear]
)
# 1x1 expansion convolution
self.ext_conv3 = nn.Sequential(
nn.Conv2d(internal_channels, out_channels, kernel_size=1, bias=bias),
nn.BatchNorm2d(out_channels),
NON_LINEARITY[non_linear]
)
self.ext_regul = nn.Dropout2d(p=dropout_prob)
# PReLU layer to apply after concatenating the branches
self.out_prelu = NON_LINEARITY[non_linear]
def forward(self, x, x_pre):
# Main branch shortcut # here different origin paper, Fig 4 contradict to Fig 9
main = x + x_pre
main = self.main_conv1(main) # 2. conv first, follow up
main = F.interpolate(main, scale_factor=2, mode="bilinear", align_corners=True) # 1. up first, follow conv
# main = self.main_conv1(main)
# Extension branch
ext = self.ext_conv1(x)
ext = self.ext_conv2(ext)
ext = self.ext_conv3(ext)
ext = self.ext_regul(ext)
# Add main and extension branches
out = self.out_prelu(main + ext)
return out
class DilatedBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1,
dropout_prob=0., bias=False, non_linear='ReLU'):
super(DilatedBlock, self).__init__()
self.relu = NON_LINEARITY[non_linear]
self.internal_channels = in_channels // 4
# compress conv
self.conv1 = nn.Conv2d(in_channels, self.internal_channels, 1, bias=bias)
self.conv1_bn = nn.BatchNorm2d(self.internal_channels)
# a relu
self.conv2 = nn.Conv2d(self.internal_channels, self.internal_channels, kernel_size,
stride, padding=int((kernel_size - 1) / 2 * dilation), dilation=dilation, groups=1,
bias=bias)
self.conv2_bn = nn.BatchNorm2d(self.internal_channels)
# a relu
self.conv4 = nn.Conv2d(self.internal_channels, out_channels, 1, bias=bias)
self.conv4_bn = nn.BatchNorm2d(out_channels)
self.regul = nn.Dropout2d(p=dropout_prob)
def forward(self, x):
residual = x
main = self.relu(self.conv1_bn(self.conv1(x)))
main = self.relu(self.conv2_bn(self.conv2(main)))
main = self.conv4_bn(self.conv4(main))
main = self.regul(main)
out = self.relu(torch.add(main, residual))
return out
class Factorized_Block(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1,
dropout_prob=0., bias=False, non_linear='ReLU'):
super(Factorized_Block, self).__init__()
self.relu = NON_LINEARITY[non_linear]
self.internal_channels = in_channels // 4
self.compress_conv1 = nn.Conv2d(in_channels, self.internal_channels, 1, padding=0, bias=bias)
self.conv1_bn = nn.BatchNorm2d(self.internal_channels)
# here is relu
self.conv2_1 = nn.Conv2d(self.internal_channels, self.internal_channels, (kernel_size, 1), stride=(stride, 1),
padding=(int((kernel_size - 1) / 2 * dilation), 0), dilation=(dilation, 1), bias=bias)
self.conv2_1_bn = nn.BatchNorm2d(self.internal_channels)
self.conv2_2 = nn.Conv2d(self.internal_channels, self.internal_channels, (1, kernel_size), stride=(1, stride),
padding=(0, int((kernel_size - 1) / 2 * dilation)), dilation=(1, dilation), bias=bias)
self.conv2_2_bn = nn.BatchNorm2d(self.internal_channels)
# here is relu
self.extend_conv3 = nn.Conv2d(self.internal_channels, out_channels, 1, padding=0, bias=bias)
self.conv3_bn = nn.BatchNorm2d(out_channels)
self.regul = nn.Dropout2d(p=dropout_prob)
def forward(self, x):
residual = x
main = self.relu((self.conv1_bn(self.compress_conv1(x))))
main = self.relu(self.conv2_1_bn(self.conv2_1(main)))
main = self.relu(self.conv2_2_bn(self.conv2_2(main)))
main = self.conv3_bn(self.extend_conv3(main))
main = self.regul(main)
out = self.relu((torch.add(residual, main)))
return out
class FSSNet(nn.Module):
def __init__(self, classes):
super().__init__()
self.initial_block = InitialBlock(3, 16)
# Stage 1 - Encoder
self.downsample1_0 = DownsamplingBottleneck(16, 64, padding=1, dropout_prob=0.03)
self.factorized1_1 = Factorized_Block(64, 64, dropout_prob=0.03)
self.factorized1_2 = Factorized_Block(64, 64, dropout_prob=0.03)
self.factorized1_3 = Factorized_Block(64, 64, dropout_prob=0.03)
self.factorized1_4 = Factorized_Block(64, 64, dropout_prob=0.03)
# Stage 2 - Encoder
self.downsample2_0 = DownsamplingBottleneck(64, 128, padding=1, dropout_prob=0.3)
self.dilated2_1 = DilatedBlock(128, 128, dilation=2, dropout_prob=0.3)
self.dilated2_2 = DilatedBlock(128, 128, dilation=5, dropout_prob=0.3)
self.dilated2_3 = DilatedBlock(128, 128, dilation=9, dropout_prob=0.3)
self.dilated2_4 = DilatedBlock(128, 128, dilation=2, dropout_prob=0.3)
self.dilated2_5 = DilatedBlock(128, 128, dilation=5, dropout_prob=0.3)
self.dilated2_6 = DilatedBlock(128, 128, dilation=9, dropout_prob=0.3)
# Stage 4 - Decoder
self.upsample4_0 = UpsamplingBottleneck(128, 64, dropout_prob=0.3)
self.bottleneck4_1 = DilatedBlock(64, 64, dropout_prob=0.3)
self.bottleneck4_2 = DilatedBlock(64, 64, dropout_prob=0.3)
# Stage 5 - Decoder
self.upsample5_0 = UpsamplingBottleneck(64, 16, dropout_prob=0.3)
self.bottleneck5_1 = DilatedBlock(16, 16, dropout_prob=0.3)
self.bottleneck5_2 = DilatedBlock(16, 16, dropout_prob=0.3)
self.transposed_conv = nn.ConvTranspose2d(16, classes, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False)
def forward(self, x):
# Initial block
# Initial block
x = self.initial_block(x)
# Encoder - Block 1
x_1= self.downsample1_0(x)
x = self.factorized1_1(x_1)
x = self.factorized1_2(x)
x = self.factorized1_3(x)
x = self.factorized1_4(x)
# Encoder - Block 2
x_2 = self.downsample2_0(x)
# print(x_2.shape)
x = self.dilated2_1(x_2)
x = self.dilated2_2(x)
x = self.dilated2_3(x)
x = self.dilated2_4(x)
x = self.dilated2_5(x)
x = self.dilated2_6(x)
# print(x.shape)
# Decoder - Block 3
x = self.upsample4_0(x, x_2)
x = self.bottleneck4_1(x)
x = self.bottleneck4_2(x)
# Decoder - Block 4
x = self.upsample5_0(x, x_1)
x = self.bottleneck5_1(x)
x = self.bottleneck5_2(x)
# Fullconv - DeConv
x = self.transposed_conv(x)
return x
"""print layers and params of network"""
if __name__ == '__main__':
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = FSSNet(classes=19).to(device)
summary(model,(3,512,1024))
| 39.71 | 128 | 0.629396 | [
"MIT"
] | Abdul-Nasir11/Efficient-Segmentation-Networks | model/FSSNet.py | 11,913 | Python |
# -*- coding: utf-8 -*-
# *****************************************************************************
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See NOTICE file for details.
#
# *****************************************************************************
import os
from setuptools.command.build_ext import build_ext
import sys
import subprocess
import distutils.cmd
import distutils.log
from distutils.errors import DistutilsPlatformError
from distutils.dir_util import copy_tree
import glob
import re
import shlex
import shutil
import sysconfig
# This setup option constructs a prototype Makefile suitable for compiling
# the _jpype extension module. It is intended to help with development
# of the extension library on unix systems. This works only on unix systems.
#
# To create a Makefile use
# python setup.py build_ext --makefile
#
# Then edit with the desired options
class FeatureNotice(Warning):
""" indicate notices about features """
class Makefile(object):
compiler_type = "unix"
def __init__(self, actual):
self.actual = actual
self.compile_command = None
self.compile_pre = None
self.compile_post = None
self.objects = []
self.sources = []
def captureCompile(self, x):
command = x[0]
x = x[1:]
includes = [i for i in x if i.startswith("-I")]
x = [i for i in x if not i.startswith("-I")]
i0 = None
i1 = None
for i, v in enumerate(x):
if v == '-c':
i1 = i
elif v == '-o':
i0 = i
pre = set(x[:i1])
post = x[i0 + 2:]
self.compile_command = command
self.compile_pre = pre
self.compile_post = post
self.includes = includes
self.sources.append(x[i1 + 1])
def captureLink(self, x):
self.link_command = x[0]
x = x[1:]
i = x.index("-o")
self.library = x[i + 1]
del x[i]
del x[i]
self.objects = [i for i in x if i.endswith(".o")]
self.link_options = [i for i in x if not i.endswith(".o")]
u = self.objects[0].split("/")
self.build_dir = "/".join(u[:2])
def compile(self, *args, **kwargs):
self.actual.spawn = self.captureCompile
rc = self.actual.compile(*args, **kwargs)
return rc
def _need_link(self, *args):
return True
def link_shared_object(self, *args, **kwargs):
self.actual._need_link = self._need_link
self.actual.spawn = self.captureLink
rc = self.actual.link_shared_object(*args, **kwargs)
self.write()
return rc
def detect_language(self, x):
return self.actual.detect_language(x)
def write(self):
print("Write makefile")
library = os.path.basename(self.library)
link_command = self.link_command
compile_command = self.compile_command
compile_pre = " ".join(list(self.compile_pre))
compile_post = " ".join(list(self.compile_post))
build = self.build_dir
link_flags = " ".join(self.link_options)
includes = " ".join(self.includes)
sources = " \\\n ".join(self.sources)
with open("Makefile", "w") as fd:
print("LIB = %s" % library, file=fd)
print("CC = %s" % compile_command, file=fd)
print("LINK = %s" % link_command, file=fd)
print("CFLAGS = %s %s" % (compile_pre, compile_post), file=fd)
print("INCLUDES = %s" % includes, file=fd)
print("BUILD = %s" % build, file=fd)
print("LINKFLAGS = %s" % link_flags, file=fd)
print("SRCS = %s" % sources, file=fd)
print("""
all: $(LIB)
rwildcard=$(foreach d,$(wildcard $(1:=/*)),$(call rwildcard,$d,$2) $(filter $(subst *,%,$2),$d))
#build/src/jp_thunk.cpp: $(call rwildcard,native/java,*.java)
# python setup.py build_thunk
DEPDIR = build/deps
$(DEPDIR): ; @mkdir -p $@
DEPFILES := $(SRCS:%.cpp=$(DEPDIR)/%.d)
deps: $(DEPFILES)
%/:
echo $@
$(DEPDIR)/%.d: %.cpp
mkdir -p $(dir $@)
$(CC) $(INCLUDES) -MT $(patsubst $(DEPDIR)%,'$$(BUILD)%',$(patsubst %.d,%.o,$@)) -MM $< -o $@
OBJS = $(addprefix $(BUILD)/, $(SRCS:.cpp=.o))
$(BUILD)/%.o: %.cpp
mkdir -p $(dir $@)
$(CC) $(CFLAGS) $(INCLUDES) -c $< -o $@
$(LIB): $(OBJS)
$(LINK) $(OBJS) $(LINKFLAGS) -o $@
-include $(DEPFILES)
""", file=fd)
# Customization of the build_ext
class BuildExtCommand(build_ext):
"""
Override some behavior in extension building:
1. handle compiler flags for different compilers via a dictionary.
2. try to disable warning -Wstrict-prototypes is valid for C/ObjC but not for C++
"""
user_options = build_ext.user_options + [
('android', None, 'configure for android'),
('makefile', None, 'Build a makefile for extensions'),
('jar', None, 'Build the jar only'),
]
def initialize_options(self, *args):
"""omit -Wstrict-prototypes from CFLAGS since its only valid for C code."""
self.android = False
self.makefile = False
self.jar = False
import distutils.sysconfig
cfg_vars = distutils.sysconfig.get_config_vars()
replacement = {
'-Wstrict-prototypes': '',
'-Wimplicit-function-declaration': '',
}
tracing = self.distribution.enable_tracing
# Arguments to remove so we set debugging and optimization level
remove_args = ['-O0', '-O1', '-O2', '-O3', '-g']
for k, v in cfg_vars.items():
if not isinstance(v, str):
continue
if not k == "OPT" and not "FLAGS" in k:
continue
args = v.split()
for r in remove_args:
args = list(filter((r).__ne__, args))
cfg_vars[k] = " ".join(args)
super().initialize_options()
def _set_cflags(self):
# set compiler flags
c = self.compiler.compiler_type
jpypeLib = [i for i in self.extensions if i.name == '_jpype'][0]
if c == 'unix' and self.distribution.enable_coverage:
jpypeLib.extra_compile_args.extend(
['-ggdb', '--coverage', '-ftest-coverage'])
jpypeLib.extra_compile_args = ['-O0' if x == '-O2' else x for x in jpypeLib.extra_compile_args]
jpypeLib.extra_link_args.extend(['--coverage'])
if c == 'unix' and self.distribution.enable_tracing:
jpypeLib.extra_compile_args = ['-O0' if x == '-O2' else x for x in jpypeLib.extra_compile_args]
def build_extensions(self):
if self.makefile:
self.compiler = Makefile(self.compiler)
self.force = True
jpypeLib = [i for i in self.extensions if i.name == '_jpype'][0]
tracing = self.distribution.enable_tracing
self._set_cflags()
if tracing:
jpypeLib.define_macros.append(('JP_TRACING_ENABLE', 1))
coverage = self.distribution.enable_coverage
if coverage:
jpypeLib.define_macros.append(('JP_INSTRUMENTATION', 1))
# has to be last call
print("Call build extensions")
super().build_extensions()
def build_extension(self, ext):
if ext.language == "java":
return self.build_java_ext(ext)
if self.jar:
return
print("Call build ext")
return super().build_extension(ext)
def copy_extensions_to_source(self):
build_py = self.get_finalized_command('build_py')
for ext in self.extensions:
if ext.language == "java":
fullname = self.get_ext_fullname("JAVA")
filename = ext.name + ".jar"
else:
fullname = self.get_ext_fullname(ext.name)
filename = self.get_ext_filename(fullname)
modpath = fullname.split('.')
package = '.'.join(modpath[:-1])
package_dir = build_py.get_package_dir(package)
dest_filename = os.path.join(package_dir,
os.path.basename(filename))
src_filename = os.path.join(self.build_lib, filename)
# Always copy, even if source is older than destination, to ensure
# that the right extensions for the current Python/platform are
# used.
distutils.file_util.copy_file(
src_filename, dest_filename, verbose=self.verbose,
dry_run=self.dry_run
)
if ext._needs_stub:
self.write_stub(package_dir or os.curdir, ext, True)
def build_java_ext(self, ext):
"""Run command."""
java = self.distribution.enable_build_jar
javac = "javac"
try:
if os.path.exists(os.path.join(os.environ['JAVA_HOME'], 'bin', 'javac')):
javac = '"%s"' % os.path.join(os.environ['JAVA_HOME'], 'bin', 'javac')
except KeyError:
pass
jar = "jar"
try:
if os.path.exists(os.path.join(os.environ['JAVA_HOME'], 'bin', 'jar')):
jar = '"%s"' % os.path.join(os.environ['JAVA_HOME'], 'bin', 'jar')
except KeyError:
pass
# Try to use the cache if we are not requested build
if not java:
src = os.path.join('native', 'jars')
dest = os.path.dirname(self.get_ext_fullpath("JAVA"))
if os.path.exists(src):
distutils.log.info("Using Jar cache")
copy_tree(src, dest)
return
classpath = "."
if ext.libraries:
classpath = os.path.pathsep.join(ext.libraries)
distutils.log.info(
"Jar cache is missing, using --enable-build-jar to recreate it.")
coverage = self.distribution.enable_coverage
target_version = "1.8"
# build the jar
try:
dirname = os.path.dirname(self.get_ext_fullpath("JAVA"))
jarFile = os.path.join(dirname, ext.name + ".jar")
build_dir = os.path.join(self.build_temp, ext.name, "classes")
os.makedirs(build_dir, exist_ok=True)
os.makedirs(dirname, exist_ok=True)
cmd1 = shlex.split('%s -cp "%s" -d "%s" -g:none -source %s -target %s' %
(javac, classpath, build_dir, target_version, target_version))
cmd1.extend(ext.sources)
debug = "-g:none"
if coverage:
debug = "-g:lines,vars,source"
os.makedirs("build/classes", exist_ok=True)
self.announce(" %s" % " ".join(cmd1), level=distutils.log.INFO)
subprocess.check_call(cmd1)
try:
for file in glob.iglob("native/java/**/*.*", recursive=True):
if file.endswith(".java") or os.path.isdir(file):
continue
p = os.path.join(build_dir, os.path.relpath(file, "native/java"))
print("Copy file", file, p)
shutil.copyfile(file, p)
except Exception as ex:
print("FAIL", ex)
pass
cmd3 = shlex.split(
'%s cvf "%s" -C "%s" .' % (jar, jarFile, build_dir))
self.announce(" %s" % " ".join(cmd3), level=distutils.log.INFO)
subprocess.check_call(cmd3)
except subprocess.CalledProcessError as exc:
distutils.log.error(exc.output)
raise DistutilsPlatformError("Error executing {}".format(exc.cmd))
| 35.265306 | 107 | 0.567626 | [
"Apache-2.0"
] | altendky/jpype | setupext/build_ext.py | 12,096 | Python |
"""Policy compliance
This checks that recipes are in accordance with policy (as far as it
can be mechanically checked).
"""
import glob
import os
from . import LintCheck, ERROR, WARNING, INFO
from bioconda_utils import utils
class uses_vcs_url(LintCheck):
"""The recipe downloads source from a VCS
Please build from source archives and don't use the ``git_url``,
``svn_url`` or ``hg_url`` feature of conda.
"""
def check_source(self, source, section):
for vcs in ('git', 'svn', 'hg'):
if f"{vcs}_url" in source:
self.message(section=f"{section}/{vcs}_url")
class folder_and_package_name_must_match(LintCheck):
"""The recipe folder and package name do not match.
For clarity, the name of the folder the ``meta.yaml`` resides,
in and the name of the toplevel package should match.
"""
def check_recipe(self, recipe):
recipe_base_folder, _, _ = recipe.reldir.partition('/')
if recipe.name != recipe_base_folder:
self.message(section='package/name')
class gpl_requires_license_distributed(LintCheck):
"""The recipe packages GPL software but is missing copy of license.
The GPL requires that a copy of the license accompany all distributions
of the software. Please add::
about:
license_file: name_of_license_file
If the upstream tar ball does not include a copy, please ask the
authors of the software to add it to their distribution archive.
"""
severity = WARNING
requires = ["missing_license"]
def check_recipe(self, recipe):
if 'gpl' in recipe.get('about/license').lower():
if not recipe.get('about/license_file', ''):
self.message('about/license')
class should_not_use_fn(LintCheck):
"""The recipe uses source/fn
There is no need to specify the filename as the URL should give a name
and it will in most cases be unpacked automatically.
"""
def check_source(self, source, section):
if 'fn' in source:
self.message(section=section+'/fn')
class has_windows_bat_file(LintCheck):
"""The recipe directory contains a ``bat`` file.
Bioconda does not currently build packages for Windows (and has at
this time no plans to change this), so these files cannot be
tested.
Please remove any ``*.bat`` files generated by ``conda skeleton``
from the recipe directory.
"""
def check_recipe(self, recipe):
for fname in glob.glob(os.path.join(recipe.dir, '*.bat')):
self.message(fname=fname)
class long_summary(LintCheck):
"""The summary line is rather long
Consider using the description field for longer text::
about:
summary: Fancy Read Simulator (makes drinks)
description: |
XYZ is a very fancy read simulator that will not just make coffee
while you are waiting but prepare all kinds of exquisite caffeeinated
beverages from freshly roasted, single source beans ground to match
ambient humidity.
This will fit better into the templates listing and describing
recipes, which assume the summary to be a title and the
description to be one or more paragraphs.
"""
severity = WARNING
max_length = 120
def check_recipe(self, recipe):
if len(recipe.get('about/summary', '')) > self.max_length:
self.message('about/summary')
class cran_packages_to_conda_forge(LintCheck):
"""CRAN packages not depending on Bioconda should go to Conda-Forge
This recipe builds a CRAN package and does not depend on packages
from Bioconda. It should therefore be moved to Conda-Forge.
"""
def check_deps(self, deps):
# must have R in run a run dep
if 'R' in deps and any('run' in dep for dep in deps['R']):
# and all deps satisfied in conda-forge
if all(utils.RepoData().get_package_data(name=dep, channels='conda-forge')
for dep in deps):
self.message()
| 32.095238 | 86 | 0.669387 | [
"MIT"
] | JING-XINXING/bioconda-utils | bioconda_utils/lint/check_policy.py | 4,044 | Python |
import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
class EpisodeHopperEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
self.t = 0
self.r = 0
mujoco_env.MujocoEnv.__init__(self, 'hopper.xml', 4)
utils.EzPickle.__init__(self)
def step(self, a):
posbefore = self.sim.data.qpos[0]
self.do_simulation(a, self.frame_skip)
posafter, height, ang = self.sim.data.qpos[0:3]
alive_bonus = 1.0
reward = (posafter - posbefore) / self.dt
reward += alive_bonus
reward -= 1e-3 * np.square(a).sum()
s = self.state_vector()
done = not (np.isfinite(s).all() and (np.abs(s[2:]) < 100).all() and
(height > .7) and (abs(ang) < .2))
ob = self._get_obs()
self.t += 1
self.r += reward
if done:
return_r = self.r
self.r = 0
self.t = 0
else:
return_r = 0
return ob, return_r, done, {}
def _get_obs(self):
return np.concatenate([
self.sim.data.qpos.flat[1:],
np.clip(self.sim.data.qvel.flat, -10, 10)
])
def reset_model(self):
self.t = 0
self.r = 0
qpos = self.init_qpos + self.np_random.uniform(low=-.005, high=.005, size=self.model.nq)
qvel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)
self.set_state(qpos, qvel)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.trackbodyid = 2
self.viewer.cam.distance = self.model.stat.extent * 0.75
self.viewer.cam.lookat[2] = 1.15
self.viewer.cam.elevation = -20
| 32.735849 | 96 | 0.564841 | [
"MIT"
] | daochenzha/rapid | rapid/mujoco_envs/episode_hopper.py | 1,735 | Python |
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cell messaging module.
This module defines the different message types that are passed between
cells and the methods that they can call when the target cell has been
reached.
The interface into this module is the MessageRunner class.
"""
import sys
import traceback
from eventlet import queue
from oslo.config import cfg
from oslo import messaging
from oslo.serialization import jsonutils
from oslo.utils import excutils
from oslo.utils import importutils
from oslo.utils import timeutils
import six
from nova.cells import state as cells_state
from nova.cells import utils as cells_utils
from nova import compute
from nova.compute import delete_types
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import vm_states
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import context
from nova.db import base
from nova import exception
from nova.i18n import _, _LE, _LI, _LW
from nova.network import model as network_model
from nova import objects
from nova.objects import base as objects_base
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
from nova import rpc
from nova import utils
cell_messaging_opts = [
cfg.IntOpt('max_hop_count',
default=10,
help='Maximum number of hops for cells routing.'),
cfg.StrOpt('scheduler',
default='nova.cells.scheduler.CellsScheduler',
help='Cells scheduler to use')]
CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
CONF.import_opt('call_timeout', 'nova.cells.opts', group='cells')
CONF.register_opts(cell_messaging_opts, group='cells')
LOG = logging.getLogger(__name__)
# Separator used between cell names for the 'full cell name' and routing
# path.
_PATH_CELL_SEP = cells_utils.PATH_CELL_SEP
def _reverse_path(path):
"""Reverse a path. Used for sending responses upstream."""
path_parts = path.split(_PATH_CELL_SEP)
path_parts.reverse()
return _PATH_CELL_SEP.join(path_parts)
def _response_cell_name_from_path(routing_path, neighbor_only=False):
"""Reverse the routing_path. If we only want to send to our parent,
set neighbor_only to True.
"""
path = _reverse_path(routing_path)
if not neighbor_only or len(path) == 1:
return path
return _PATH_CELL_SEP.join(path.split(_PATH_CELL_SEP)[:2])
#
# Message classes.
#
class _BaseMessage(object):
"""Base message class. It defines data that is passed with every
single message through every cell.
Messages are JSON-ified before sending and turned back into a
class instance when being received.
Every message has a unique ID. This is used to route responses
back to callers. In the future, this might be used to detect
receiving the same message more than once.
routing_path is updated on every hop through a cell. The current
cell name is appended to it (cells are separated by
_PATH_CELL_SEP ('!')). This is used to tell if we've reached the
target cell and also to determine the source of a message for
responses by reversing it.
hop_count is incremented and compared against max_hop_count. The
only current usefulness of this is to break out of a routing loop
if someone has a broken config.
fanout means to send to all nova-cells services running in a cell.
This is useful for capacity and capability broadcasting as well
as making sure responses get back to the nova-cells service that
is waiting.
"""
# Override message_type in a subclass
message_type = None
base_attrs_to_json = ['message_type',
'ctxt',
'method_name',
'method_kwargs',
'direction',
'need_response',
'fanout',
'uuid',
'routing_path',
'hop_count',
'max_hop_count']
def __init__(self, msg_runner, ctxt, method_name, method_kwargs,
direction, need_response=False, fanout=False, uuid=None,
routing_path=None, hop_count=0, max_hop_count=None,
**kwargs):
self.ctxt = ctxt
self.resp_queue = None
self.msg_runner = msg_runner
self.state_manager = msg_runner.state_manager
# Copy these.
self.base_attrs_to_json = self.base_attrs_to_json[:]
# Normally this would just be CONF.cells.name, but going through
# the msg_runner allows us to stub it more easily.
self.our_path_part = self.msg_runner.our_name
self.uuid = uuid
if self.uuid is None:
self.uuid = uuidutils.generate_uuid()
self.method_name = method_name
self.method_kwargs = method_kwargs
self.direction = direction
self.need_response = need_response
self.fanout = fanout
self.routing_path = routing_path
self.hop_count = hop_count
if max_hop_count is None:
max_hop_count = CONF.cells.max_hop_count
self.max_hop_count = max_hop_count
self.is_broadcast = False
self._append_hop()
# Each sub-class should set this when the message is inited
self.next_hops = []
self.resp_queue = None
self.serializer = objects_base.NovaObjectSerializer()
def __repr__(self):
_dict = self._to_dict()
_dict.pop('method_kwargs')
return "<%s: %s>" % (self.__class__.__name__, _dict)
def _append_hop(self):
"""Add our hop to the routing_path."""
routing_path = (self.routing_path and
self.routing_path + _PATH_CELL_SEP or '')
self.routing_path = routing_path + self.our_path_part
self.hop_count += 1
def _process_locally(self):
"""Its been determined that we should process this message in this
cell. Go through the MessageRunner to call the appropriate
method for this message. Catch the response and/or exception and
encode it within a Response instance. Return it so the caller
can potentially return it to another cell... or return it to
a caller waiting in this cell.
"""
try:
resp_value = self.msg_runner._process_message_locally(self)
failure = False
except Exception as exc:
resp_value = sys.exc_info()
failure = True
LOG.exception(_LE("Error processing message locally: %(exc)s"),
{'exc': exc})
return Response(self.routing_path, resp_value, failure)
def _setup_response_queue(self):
"""Shortcut to creating a response queue in the MessageRunner."""
self.resp_queue = self.msg_runner._setup_response_queue(self)
def _cleanup_response_queue(self):
"""Shortcut to deleting a response queue in the MessageRunner."""
if self.resp_queue:
self.msg_runner._cleanup_response_queue(self)
self.resp_queue = None
def _wait_for_json_responses(self, num_responses=1):
"""Wait for response(s) to be put into the eventlet queue. Since
each queue entry actually contains a list of JSON-ified responses,
combine them all into a single list to return.
Destroy the eventlet queue when done.
"""
if not self.resp_queue:
# Source is not actually expecting a response
return
responses = []
wait_time = CONF.cells.call_timeout
try:
for x in xrange(num_responses):
json_responses = self.resp_queue.get(timeout=wait_time)
responses.extend(json_responses)
except queue.Empty:
raise exception.CellTimeout()
finally:
self._cleanup_response_queue()
return responses
def _send_json_responses(self, json_responses, neighbor_only=False,
fanout=False):
"""Send list of responses to this message. Responses passed here
are JSON-ified. Targeted messages have a single response while
Broadcast messages may have multiple responses.
If this cell was the source of the message, these responses will
be returned from self.process().
Otherwise, we will route the response to the source of the
request. If 'neighbor_only' is True, the response will be sent
to the neighbor cell, not the original requester. Broadcast
messages get aggregated at each hop, so neighbor_only will be
True for those messages.
"""
if not self.need_response:
return
if self.source_is_us():
responses = []
for json_response in json_responses:
responses.append(Response.from_json(json_response))
return responses
direction = self.direction == 'up' and 'down' or 'up'
response_kwargs = {'orig_message': self.to_json(),
'responses': json_responses}
target_cell = _response_cell_name_from_path(self.routing_path,
neighbor_only=neighbor_only)
response = self.msg_runner._create_response_message(self.ctxt,
direction, target_cell, self.uuid, response_kwargs,
fanout=fanout)
response.process()
def _send_response(self, response, neighbor_only=False):
"""Send a response to this message. If the source of the
request was ourselves, just return the response. It'll be
passed back to the caller of self.process(). See DocString for
_send_json_responses() as it handles most of the real work for
this method.
'response' is an instance of Response class.
"""
if not self.need_response:
return
if self.source_is_us():
return response
self._send_json_responses([response.to_json()],
neighbor_only=neighbor_only)
def _send_response_from_exception(self, exc_info):
"""Take an exception as returned from sys.exc_info(), encode
it in a Response, and send it.
"""
response = Response(self.routing_path, exc_info, True)
return self._send_response(response)
def _to_dict(self):
"""Convert a message to a dictionary. Only used internally."""
_dict = {}
for key in self.base_attrs_to_json:
_dict[key] = getattr(self, key)
return _dict
def to_json(self):
"""Convert a message into JSON for sending to a sibling cell."""
_dict = self._to_dict()
# Convert context to dict.
_dict['ctxt'] = _dict['ctxt'].to_dict()
# NOTE(comstud): 'method_kwargs' needs special serialization
# because it may contain objects.
method_kwargs = _dict['method_kwargs']
for k, v in method_kwargs.items():
method_kwargs[k] = self.serializer.serialize_entity(self.ctxt, v)
return jsonutils.dumps(_dict)
def source_is_us(self):
"""Did this cell create this message?"""
return self.routing_path == self.our_path_part
def process(self):
"""Process a message. Deal with it locally and/or forward it to a
sibling cell.
Override in a subclass.
"""
raise NotImplementedError()
class _TargetedMessage(_BaseMessage):
"""A targeted message is a message that is destined for a specific
single cell.
'target_cell' can be a full cell name like 'api!child-cell' or it can
be an instance of the CellState class if the target is a neighbor cell.
"""
message_type = 'targeted'
def __init__(self, msg_runner, ctxt, method_name, method_kwargs,
direction, target_cell, **kwargs):
super(_TargetedMessage, self).__init__(msg_runner, ctxt,
method_name, method_kwargs, direction, **kwargs)
if isinstance(target_cell, cells_state.CellState):
# Neighbor cell or ourselves. Convert it to a 'full path'.
if target_cell.is_me:
target_cell = self.our_path_part
else:
target_cell = '%s%s%s' % (self.our_path_part,
_PATH_CELL_SEP,
target_cell.name)
self.target_cell = target_cell
self.base_attrs_to_json.append('target_cell')
def _get_next_hop(self):
"""Return the cell name for the next hop. If the next hop is
the current cell, return None.
"""
if self.target_cell == self.routing_path:
return self.state_manager.my_cell_state
target_cell = self.target_cell
routing_path = self.routing_path
current_hops = routing_path.count(_PATH_CELL_SEP)
next_hop_num = current_hops + 1
dest_hops = target_cell.count(_PATH_CELL_SEP)
if dest_hops < current_hops:
reason_args = {'target_cell': target_cell,
'routing_path': routing_path}
reason = _("destination is %(target_cell)s but routing_path "
"is %(routing_path)s") % reason_args
raise exception.CellRoutingInconsistency(reason=reason)
dest_name_parts = target_cell.split(_PATH_CELL_SEP)
if (_PATH_CELL_SEP.join(dest_name_parts[:next_hop_num]) !=
routing_path):
reason_args = {'target_cell': target_cell,
'routing_path': routing_path}
reason = _("destination is %(target_cell)s but routing_path "
"is %(routing_path)s") % reason_args
raise exception.CellRoutingInconsistency(reason=reason)
next_hop_name = dest_name_parts[next_hop_num]
if self.direction == 'up':
next_hop = self.state_manager.get_parent_cell(next_hop_name)
else:
next_hop = self.state_manager.get_child_cell(next_hop_name)
if not next_hop:
cell_type = 'parent' if self.direction == 'up' else 'child'
reason_args = {'cell_type': cell_type,
'target_cell': target_cell}
reason = _("Unknown %(cell_type)s when routing to "
"%(target_cell)s") % reason_args
raise exception.CellRoutingInconsistency(reason=reason)
return next_hop
def process(self):
"""Process a targeted message. This is called for all cells
that touch this message. If the local cell is the one that
created this message, we reply directly with a Response instance.
If the local cell is not the target, an eventlet queue is created
and we wait for the response to show up via another thread
receiving the Response back.
Responses to targeted messages are routed directly back to the
source. No eventlet queues are created in intermediate hops.
All exceptions for processing the message across the whole
routing path are caught and encoded within the Response and
returned to the caller.
"""
try:
next_hop = self._get_next_hop()
except Exception as exc:
exc_info = sys.exc_info()
LOG.exception(_LE("Error locating next hop for message: %(exc)s"),
{'exc': exc})
return self._send_response_from_exception(exc_info)
if next_hop.is_me:
# Final destination.
response = self._process_locally()
return self._send_response(response)
# Need to forward via neighbor cell.
if self.need_response and self.source_is_us():
# A response is needed and the source of the message is
# this cell. Create the eventlet queue.
self._setup_response_queue()
wait_for_response = True
else:
wait_for_response = False
try:
# This is inside the try block, so we can encode the
# exception and return it to the caller.
if self.hop_count >= self.max_hop_count:
raise exception.CellMaxHopCountReached(
hop_count=self.hop_count)
next_hop.send_message(self)
except Exception as exc:
exc_info = sys.exc_info()
err_str = _("Failed to send message to cell: %(next_hop)s: "
"%(exc)s")
LOG.exception(err_str, {'exc': exc, 'next_hop': next_hop})
self._cleanup_response_queue()
return self._send_response_from_exception(exc_info)
if wait_for_response:
# Targeted messages only have 1 response.
remote_response = self._wait_for_json_responses()[0]
return Response.from_json(remote_response)
class _BroadcastMessage(_BaseMessage):
"""A broadcast message. This means to call a method in every single
cell going in a certain direction.
"""
message_type = 'broadcast'
def __init__(self, msg_runner, ctxt, method_name, method_kwargs,
direction, run_locally=True, **kwargs):
super(_BroadcastMessage, self).__init__(msg_runner, ctxt,
method_name, method_kwargs, direction, **kwargs)
# The local cell creating this message has the option
# to be able to process the message locally or not.
self.run_locally = run_locally
self.is_broadcast = True
def _get_next_hops(self):
"""Set the next hops and return the number of hops. The next
hops may include ourself.
"""
if self.hop_count >= self.max_hop_count:
return []
if self.direction == 'down':
return self.state_manager.get_child_cells()
else:
return self.state_manager.get_parent_cells()
def _send_to_cells(self, target_cells):
"""Send a message to multiple cells."""
for cell in target_cells:
cell.send_message(self)
def _send_json_responses(self, json_responses):
"""Responses to broadcast messages always need to go to the
neighbor cell from which we received this message. That
cell aggregates the responses and makes sure to forward them
to the correct source.
"""
return super(_BroadcastMessage, self)._send_json_responses(
json_responses, neighbor_only=True, fanout=True)
def process(self):
"""Process a broadcast message. This is called for all cells
that touch this message.
The message is sent to all cells in the certain direction and
the creator of this message has the option of whether or not
to process it locally as well.
If responses from all cells are required, each hop creates an
eventlet queue and waits for responses from its immediate
neighbor cells. All responses are then aggregated into a
single list and are returned to the neighbor cell until the
source is reached.
When the source is reached, a list of Response instances are
returned to the caller.
All exceptions for processing the message across the whole
routing path are caught and encoded within the Response and
returned to the caller. It is possible to get a mix of
successful responses and failure responses. The caller is
responsible for dealing with this.
"""
try:
next_hops = self._get_next_hops()
except Exception as exc:
exc_info = sys.exc_info()
LOG.exception(_LE("Error locating next hops for message: %(exc)s"),
{'exc': exc})
return self._send_response_from_exception(exc_info)
# Short circuit if we don't need to respond
if not self.need_response:
if self.run_locally:
self._process_locally()
self._send_to_cells(next_hops)
return
# We'll need to aggregate all of the responses (from ourself
# and our sibling cells) into 1 response
try:
self._setup_response_queue()
self._send_to_cells(next_hops)
except Exception as exc:
# Error just trying to send to cells. Send a single response
# with the failure.
exc_info = sys.exc_info()
LOG.exception(_LE("Error sending message to next hops: %(exc)s"),
{'exc': exc})
self._cleanup_response_queue()
return self._send_response_from_exception(exc_info)
if self.run_locally:
# Run locally and store the Response.
local_response = self._process_locally()
else:
local_response = None
try:
remote_responses = self._wait_for_json_responses(
num_responses=len(next_hops))
except Exception as exc:
# Error waiting for responses, most likely a timeout.
# Send a single response back with the failure.
exc_info = sys.exc_info()
err_str = _("Error waiting for responses from neighbor cells: "
"%(exc)s")
LOG.exception(err_str, {'exc': exc})
return self._send_response_from_exception(exc_info)
if local_response:
remote_responses.append(local_response.to_json())
return self._send_json_responses(remote_responses)
class _ResponseMessage(_TargetedMessage):
"""A response message is really just a special targeted message,
saying to call 'parse_responses' when we reach the source of a 'call'.
The 'fanout' attribute on this message may be true if we're responding
to a broadcast or if we're about to respond to the source of an
original target message. Because multiple nova-cells services may
be running within a cell, we need to make sure the response gets
back to the correct one, so we have to fanout.
"""
message_type = 'response'
def __init__(self, msg_runner, ctxt, method_name, method_kwargs,
direction, target_cell, response_uuid, **kwargs):
super(_ResponseMessage, self).__init__(msg_runner, ctxt,
method_name, method_kwargs, direction, target_cell, **kwargs)
self.response_uuid = response_uuid
self.base_attrs_to_json.append('response_uuid')
def process(self):
"""Process a response. If the target is the local cell, process
the response here. Otherwise, forward it to where it needs to
go.
"""
next_hop = self._get_next_hop()
if next_hop.is_me:
self._process_locally()
return
if self.fanout is False:
# Really there's 1 more hop on each of these below, but
# it doesn't matter for this logic.
target_hops = self.target_cell.count(_PATH_CELL_SEP)
current_hops = self.routing_path.count(_PATH_CELL_SEP)
if current_hops + 1 == target_hops:
# Next hop is the target.. so we must fanout. See
# DocString above.
self.fanout = True
next_hop.send_message(self)
#
# Methods that may be called when processing messages after reaching
# a target cell.
#
class _BaseMessageMethods(base.Base):
"""Base class for defining methods by message types."""
def __init__(self, msg_runner):
super(_BaseMessageMethods, self).__init__()
self.msg_runner = msg_runner
self.state_manager = msg_runner.state_manager
self.compute_api = compute.API()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.consoleauth_rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
self.host_api = compute.HostAPI()
def task_log_get_all(self, message, task_name, period_beginning,
period_ending, host, state):
"""Get task logs from the DB. The message could have
directly targeted this cell, or it could have been a broadcast
message.
If 'host' is not None, filter by host.
If 'state' is not None, filter by state.
"""
task_logs = self.db.task_log_get_all(message.ctxt, task_name,
period_beginning,
period_ending,
host=host,
state=state)
return jsonutils.to_primitive(task_logs)
class _ResponseMessageMethods(_BaseMessageMethods):
"""Methods that are called from a ResponseMessage. There's only
1 method (parse_responses) and it is called when the message reaches
the source of a 'call'. All we do is stuff the response into the
eventlet queue to signal the caller that's waiting.
"""
def parse_responses(self, message, orig_message, responses):
self.msg_runner._put_response(message.response_uuid,
responses)
class _TargetedMessageMethods(_BaseMessageMethods):
"""These are the methods that can be called when routing a message
to a specific cell.
"""
def __init__(self, *args, **kwargs):
super(_TargetedMessageMethods, self).__init__(*args, **kwargs)
def build_instances(self, message, build_inst_kwargs):
"""Parent cell told us to schedule new instance creation."""
self.msg_runner.scheduler.build_instances(message, build_inst_kwargs)
def run_compute_api_method(self, message, method_info):
"""Run a method in the compute api class."""
method = method_info['method']
fn = getattr(self.compute_api, method, None)
if not fn:
detail = _("Unknown method '%(method)s' in compute API")
raise exception.CellServiceAPIMethodNotFound(
detail=detail % {'method': method})
args = list(method_info['method_args'])
# 1st arg is instance_uuid that we need to turn into the
# instance object.
instance_uuid = args[0]
try:
instance = self.db.instance_get_by_uuid(message.ctxt,
instance_uuid)
except exception.InstanceNotFound:
with excutils.save_and_reraise_exception():
# Must be a race condition. Let's try to resolve it by
# telling the top level cells that this instance doesn't
# exist.
instance = {'uuid': instance_uuid}
self.msg_runner.instance_destroy_at_top(message.ctxt,
instance)
# FIXME(comstud): This is temporary/transitional until I can
# work out a better way to pass full objects down.
EXPECTS_OBJECTS = ['start', 'stop', 'delete_instance_metadata',
'update_instance_metadata', 'shelve', 'unshelve']
if method in EXPECTS_OBJECTS:
inst_obj = objects.Instance()
expected_attrs = None
# shelve and unshelve requires 'info_cache' and 'metadata',
# because of this fetching same from database.
if method in ['shelve', 'unshelve']:
expected_attrs = ['metadata', 'info_cache']
inst_obj._from_db_object(message.ctxt, inst_obj, instance,
expected_attrs=expected_attrs)
instance = inst_obj
args[0] = instance
return fn(message.ctxt, *args, **method_info['method_kwargs'])
def update_capabilities(self, message, cell_name, capabilities):
"""A child cell told us about their capabilities."""
LOG.debug("Received capabilities from child cell "
"%(cell_name)s: %(capabilities)s",
{'cell_name': cell_name, 'capabilities': capabilities})
self.state_manager.update_cell_capabilities(cell_name,
capabilities)
# Go ahead and update our parents now that a child updated us
self.msg_runner.tell_parents_our_capabilities(message.ctxt)
def update_capacities(self, message, cell_name, capacities):
"""A child cell told us about their capacity."""
LOG.debug("Received capacities from child cell "
"%(cell_name)s: %(capacities)s",
{'cell_name': cell_name, 'capacities': capacities})
self.state_manager.update_cell_capacities(cell_name,
capacities)
# Go ahead and update our parents now that a child updated us
self.msg_runner.tell_parents_our_capacities(message.ctxt)
def announce_capabilities(self, message):
"""A parent cell has told us to send our capabilities, so let's
do so.
"""
self.msg_runner.tell_parents_our_capabilities(message.ctxt)
def announce_capacities(self, message):
"""A parent cell has told us to send our capacity, so let's
do so.
"""
self.msg_runner.tell_parents_our_capacities(message.ctxt)
def service_get_by_compute_host(self, message, host_name):
"""Return the service entry for a compute host."""
service = self.db.service_get_by_compute_host(message.ctxt,
host_name)
return jsonutils.to_primitive(service)
def service_update(self, message, host_name, binary, params_to_update):
"""Used to enable/disable a service. For compute services, setting to
disabled stops new builds arriving on that host.
:param host_name: the name of the host machine that the service is
running
:param binary: The name of the executable that the service runs as
:param params_to_update: eg. {'disabled': True}
"""
return jsonutils.to_primitive(
self.host_api.service_update(message.ctxt, host_name, binary,
params_to_update))
def service_delete(self, message, service_id):
"""Deletes the specified service."""
self.host_api._service_delete(message.ctxt, service_id)
def proxy_rpc_to_manager(self, message, host_name, rpc_message,
topic, timeout):
"""Proxy RPC to the given compute topic."""
# Check that the host exists.
self.db.service_get_by_compute_host(message.ctxt, host_name)
topic, _sep, server = topic.partition('.')
cctxt = rpc.get_client(messaging.Target(topic=topic,
server=server or None))
method = rpc_message['method']
kwargs = rpc_message['args']
if message.need_response:
cctxt = cctxt.prepare(timeout=timeout)
return cctxt.call(message.ctxt, method, **kwargs)
else:
cctxt.cast(message.ctxt, method, **kwargs)
def compute_node_get(self, message, compute_id):
"""Get compute node by ID."""
compute_node = self.db.compute_node_get(message.ctxt,
compute_id)
return jsonutils.to_primitive(compute_node)
def actions_get(self, message, instance_uuid):
actions = self.db.actions_get(message.ctxt, instance_uuid)
return jsonutils.to_primitive(actions)
def action_get_by_request_id(self, message, instance_uuid, request_id):
action = self.db.action_get_by_request_id(message.ctxt, instance_uuid,
request_id)
return jsonutils.to_primitive(action)
def action_events_get(self, message, action_id):
action_events = self.db.action_events_get(message.ctxt, action_id)
return jsonutils.to_primitive(action_events)
def validate_console_port(self, message, instance_uuid, console_port,
console_type):
"""Validate console port with child cell compute node."""
# 1st arg is instance_uuid that we need to turn into the
# instance object.
try:
instance = self.db.instance_get_by_uuid(message.ctxt,
instance_uuid)
except exception.InstanceNotFound:
with excutils.save_and_reraise_exception():
# Must be a race condition. Let's try to resolve it by
# telling the top level cells that this instance doesn't
# exist.
instance = {'uuid': instance_uuid}
self.msg_runner.instance_destroy_at_top(message.ctxt,
instance)
return self.compute_rpcapi.validate_console_port(message.ctxt,
instance, console_port, console_type)
def get_migrations(self, message, filters):
return self.compute_api.get_migrations(message.ctxt, filters)
def instance_update_from_api(self, message, instance,
expected_vm_state,
expected_task_state,
admin_state_reset):
"""Update an instance in this cell."""
if not admin_state_reset:
# NOTE(comstud): We don't want to nuke this cell's view
# of vm_state and task_state unless it's a forced reset
# via admin API.
instance.obj_reset_changes(['vm_state', 'task_state'])
# NOTE(alaski): A cell should be authoritative for its system_metadata
# and metadata so we don't want to sync it down from the api.
instance.obj_reset_changes(['metadata', 'system_metadata'])
instance.save(message.ctxt, expected_vm_state=expected_vm_state,
expected_task_state=expected_task_state)
def _call_compute_api_with_obj(self, ctxt, instance, method, *args,
**kwargs):
try:
# NOTE(comstud): We need to refresh the instance from this
# cell's view in the DB.
instance.refresh()
except exception.InstanceNotFound:
with excutils.save_and_reraise_exception():
# Must be a race condition. Let's try to resolve it by
# telling the top level cells that this instance doesn't
# exist.
instance = {'uuid': instance.uuid}
self.msg_runner.instance_destroy_at_top(ctxt,
instance)
except exception.InstanceInfoCacheNotFound:
if method != delete_types.DELETE:
raise
fn = getattr(self.compute_api, method, None)
return fn(ctxt, instance, *args, **kwargs)
def start_instance(self, message, instance):
"""Start an instance via compute_api.start()."""
self._call_compute_api_with_obj(message.ctxt, instance, 'start')
def stop_instance(self, message, instance, clean_shutdown=True):
"""Stop an instance via compute_api.stop()."""
do_cast = not message.need_response
return self._call_compute_api_with_obj(message.ctxt, instance,
'stop', do_cast=do_cast,
clean_shutdown=clean_shutdown)
def reboot_instance(self, message, instance, reboot_type):
"""Reboot an instance via compute_api.reboot()."""
self._call_compute_api_with_obj(message.ctxt, instance, 'reboot',
reboot_type=reboot_type)
def suspend_instance(self, message, instance):
"""Suspend an instance via compute_api.suspend()."""
self._call_compute_api_with_obj(message.ctxt, instance, 'suspend')
def resume_instance(self, message, instance):
"""Resume an instance via compute_api.suspend()."""
self._call_compute_api_with_obj(message.ctxt, instance, 'resume')
def get_host_uptime(self, message, host_name):
return self.host_api.get_host_uptime(message.ctxt, host_name)
def terminate_instance(self, message, instance):
self._call_compute_api_with_obj(message.ctxt, instance,
delete_types.DELETE)
def soft_delete_instance(self, message, instance):
self._call_compute_api_with_obj(message.ctxt, instance,
delete_types.SOFT_DELETE)
def pause_instance(self, message, instance):
"""Pause an instance via compute_api.pause()."""
self._call_compute_api_with_obj(message.ctxt, instance, 'pause')
def unpause_instance(self, message, instance):
"""Unpause an instance via compute_api.pause()."""
self._call_compute_api_with_obj(message.ctxt, instance, 'unpause')
def resize_instance(self, message, instance, flavor,
extra_instance_updates, clean_shutdown=True):
"""Resize an instance via compute_api.resize()."""
self._call_compute_api_with_obj(message.ctxt, instance, 'resize',
flavor_id=flavor['flavorid'],
clean_shutdown=clean_shutdown,
**extra_instance_updates)
def live_migrate_instance(self, message, instance, block_migration,
disk_over_commit, host_name):
"""Live migrate an instance via compute_api.live_migrate()."""
self._call_compute_api_with_obj(message.ctxt, instance,
'live_migrate', block_migration,
disk_over_commit, host_name)
def revert_resize(self, message, instance):
"""Revert a resize for an instance in its cell."""
self._call_compute_api_with_obj(message.ctxt, instance,
'revert_resize')
def confirm_resize(self, message, instance):
"""Confirm a resize for an instance in its cell."""
self._call_compute_api_with_obj(message.ctxt, instance,
'confirm_resize')
def reset_network(self, message, instance):
"""Reset networking for an instance in its cell."""
self._call_compute_api_with_obj(message.ctxt, instance,
'reset_network')
def inject_network_info(self, message, instance):
"""Inject networking for an instance in its cell."""
self._call_compute_api_with_obj(message.ctxt, instance,
'inject_network_info')
def snapshot_instance(self, message, instance, image_id):
"""Snapshot an instance in its cell."""
instance.refresh()
instance.task_state = task_states.IMAGE_SNAPSHOT_PENDING
instance.save(expected_task_state=[None])
self.compute_rpcapi.snapshot_instance(message.ctxt,
instance,
image_id)
def backup_instance(self, message, instance, image_id,
backup_type, rotation):
"""Backup an instance in its cell."""
instance.refresh()
instance.task_state = task_states.IMAGE_BACKUP
instance.save(expected_task_state=[None])
self.compute_rpcapi.backup_instance(message.ctxt,
instance,
image_id,
backup_type,
rotation)
def rebuild_instance(self, message, instance, image_href, admin_password,
files_to_inject, preserve_ephemeral, kwargs):
kwargs['preserve_ephemeral'] = preserve_ephemeral
self._call_compute_api_with_obj(message.ctxt, instance, 'rebuild',
image_href, admin_password,
files_to_inject, **kwargs)
def set_admin_password(self, message, instance, new_pass):
self._call_compute_api_with_obj(message.ctxt, instance,
'set_admin_password', new_pass)
class _BroadcastMessageMethods(_BaseMessageMethods):
"""These are the methods that can be called as a part of a broadcast
message.
"""
def _at_the_top(self):
"""Are we the API level?"""
return not self.state_manager.get_parent_cells()
def _apply_expected_states(self, instance_info):
"""To attempt to address out-of-order messages, do some sanity
checking on the VM and task states. Add some requirements for
vm_state and task_state to the instance_update() DB call if
necessary.
"""
expected_vm_state_map = {
# For updates containing 'vm_state' of 'building',
# only allow them to occur if the DB already says
# 'building' or if the vm_state is None. None
# really shouldn't be possible as instances always
# start out in 'building' anyway.. but just in case.
vm_states.BUILDING: [vm_states.BUILDING, None]}
expected_task_state_map = {
# Always allow updates when task_state doesn't change,
# but also make sure we don't set resize/rebuild task
# states for old messages when we've potentially already
# processed the ACTIVE/None messages. Ie, these checks
# will prevent stomping on any ACTIVE/None messages
# we already processed.
task_states.REBUILD_BLOCK_DEVICE_MAPPING:
[task_states.REBUILD_BLOCK_DEVICE_MAPPING,
task_states.REBUILDING],
task_states.REBUILD_SPAWNING:
[task_states.REBUILD_SPAWNING,
task_states.REBUILD_BLOCK_DEVICE_MAPPING,
task_states.REBUILDING],
task_states.RESIZE_MIGRATING:
[task_states.RESIZE_MIGRATING,
task_states.RESIZE_PREP],
task_states.RESIZE_MIGRATED:
[task_states.RESIZE_MIGRATED,
task_states.RESIZE_MIGRATING,
task_states.RESIZE_PREP],
task_states.RESIZE_FINISH:
[task_states.RESIZE_FINISH,
task_states.RESIZE_MIGRATED,
task_states.RESIZE_MIGRATING,
task_states.RESIZE_PREP]}
if 'vm_state' in instance_info:
expected = expected_vm_state_map.get(instance_info['vm_state'])
if expected is not None:
instance_info['expected_vm_state'] = expected
if 'task_state' in instance_info:
expected = expected_task_state_map.get(instance_info['task_state'])
if expected is not None:
instance_info['expected_task_state'] = expected
def instance_update_at_top(self, message, instance, **kwargs):
"""Update an instance in the DB if we're a top level cell."""
if not self._at_the_top():
return
instance_uuid = instance['uuid']
# Remove things that we can't update in the top level cells.
# 'metadata' is only updated in the API cell, so don't overwrite
# it based on what child cells say. Make sure to update
# 'cell_name' based on the routing path.
items_to_remove = ['id', 'security_groups', 'volumes', 'cell_name',
'name', 'metadata']
for key in items_to_remove:
instance.pop(key, None)
instance['cell_name'] = _reverse_path(message.routing_path)
# Fixup info_cache. We'll have to update this separately if
# it exists.
info_cache = instance.pop('info_cache', None)
if info_cache is not None:
info_cache.pop('id', None)
info_cache.pop('instance', None)
if 'system_metadata' in instance:
# Make sure we have the dict form that we need for
# instance_update.
instance['system_metadata'] = utils.instance_sys_meta(instance)
LOG.debug("Got update for instance: %(instance)s",
{'instance': instance}, instance_uuid=instance_uuid)
self._apply_expected_states(instance)
# It's possible due to some weird condition that the instance
# was already set as deleted... so we'll attempt to update
# it with permissions that allows us to read deleted.
with utils.temporary_mutation(message.ctxt, read_deleted="yes"):
try:
self.db.instance_update(message.ctxt, instance_uuid,
instance, update_cells=False)
except exception.NotFound:
# FIXME(comstud): Strange. Need to handle quotas here,
# if we actually want this code to remain..
self.db.instance_create(message.ctxt, instance)
if info_cache:
network_info = info_cache.get('network_info')
if isinstance(network_info, list):
if not isinstance(network_info, network_model.NetworkInfo):
network_info = network_model.NetworkInfo.hydrate(
network_info)
info_cache['network_info'] = network_info.json()
try:
self.db.instance_info_cache_update(
message.ctxt, instance_uuid, info_cache)
except exception.InstanceInfoCacheNotFound:
# Can happen if we try to update a deleted instance's
# network information.
pass
def instance_destroy_at_top(self, message, instance, **kwargs):
"""Destroy an instance from the DB if we're a top level cell."""
if not self._at_the_top():
return
instance_uuid = instance['uuid']
LOG.debug("Got update to delete instance",
instance_uuid=instance_uuid)
try:
self.db.instance_destroy(message.ctxt, instance_uuid,
update_cells=False)
except exception.InstanceNotFound:
pass
def instance_delete_everywhere(self, message, instance, delete_type,
**kwargs):
"""Call compute API delete() or soft_delete() in every cell.
This is used when the API cell doesn't know what cell an instance
belongs to but the instance was requested to be deleted or
soft-deleted. So, we'll run it everywhere.
"""
LOG.debug("Got broadcast to %(delete_type)s delete instance",
{'delete_type': delete_type}, instance=instance)
if delete_type == delete_types.SOFT_DELETE:
self.compute_api.soft_delete(message.ctxt, instance)
else:
self.compute_api.delete(message.ctxt, instance)
def instance_fault_create_at_top(self, message, instance_fault, **kwargs):
"""Destroy an instance from the DB if we're a top level cell."""
if not self._at_the_top():
return
items_to_remove = ['id']
for key in items_to_remove:
instance_fault.pop(key, None)
LOG.debug("Got message to create instance fault: %s", instance_fault)
fault = objects.InstanceFault(context=message.ctxt)
fault.update(instance_fault)
fault.create()
def bw_usage_update_at_top(self, message, bw_update_info, **kwargs):
"""Update Bandwidth usage in the DB if we're a top level cell."""
if not self._at_the_top():
return
self.db.bw_usage_update(message.ctxt, **bw_update_info)
def _sync_instance(self, ctxt, instance):
if instance['deleted']:
self.msg_runner.instance_destroy_at_top(ctxt, instance)
else:
self.msg_runner.instance_update_at_top(ctxt, instance)
def sync_instances(self, message, project_id, updated_since, deleted,
**kwargs):
projid_str = project_id is None and "<all>" or project_id
since_str = updated_since is None and "<all>" or updated_since
LOG.info(_LI("Forcing a sync of instances, project_id="
"%(projid_str)s, updated_since=%(since_str)s"),
{'projid_str': projid_str, 'since_str': since_str})
if updated_since is not None:
updated_since = timeutils.parse_isotime(updated_since)
instances = cells_utils.get_instances_to_sync(message.ctxt,
updated_since=updated_since, project_id=project_id,
deleted=deleted)
for instance in instances:
self._sync_instance(message.ctxt, instance)
def service_get_all(self, message, filters):
if filters is None:
filters = {}
disabled = filters.pop('disabled', None)
services = self.db.service_get_all(message.ctxt, disabled=disabled)
ret_services = []
for service in services:
service = jsonutils.to_primitive(service)
for key, val in filters.iteritems():
if service[key] != val:
break
else:
ret_services.append(service)
return ret_services
def compute_node_get_all(self, message, hypervisor_match):
"""Return compute nodes in this cell."""
if hypervisor_match is not None:
nodes = self.db.compute_node_search_by_hypervisor(message.ctxt,
hypervisor_match)
else:
nodes = self.db.compute_node_get_all(message.ctxt)
return jsonutils.to_primitive(nodes)
def compute_node_stats(self, message):
"""Return compute node stats from this cell."""
return self.db.compute_node_statistics(message.ctxt)
def consoleauth_delete_tokens(self, message, instance_uuid):
"""Delete consoleauth tokens for an instance in API cells."""
if not self._at_the_top():
return
self.consoleauth_rpcapi.delete_tokens_for_instance(message.ctxt,
instance_uuid)
def bdm_update_or_create_at_top(self, message, bdm, create):
"""Create or update a block device mapping in API cells. If
create is True, only try to create. If create is None, try to
update but fall back to create. If create is False, only attempt
to update. This maps to nova-conductor's behavior.
"""
if not self._at_the_top():
return
items_to_remove = ['id']
for key in items_to_remove:
bdm.pop(key, None)
if create is None:
self.db.block_device_mapping_update_or_create(message.ctxt,
bdm,
legacy=False)
return
elif create is True:
self.db.block_device_mapping_create(message.ctxt, bdm,
legacy=False)
return
# Unfortunately this update call wants BDM ID... but we don't know
# what it is in this cell. Search for it.. try matching either
# device_name or volume_id.
dev_name = bdm['device_name']
vol_id = bdm['volume_id']
instance_bdms = self.db.block_device_mapping_get_all_by_instance(
message.ctxt, bdm['instance_uuid'])
for instance_bdm in instance_bdms:
if dev_name and instance_bdm['device_name'] == dev_name:
break
if vol_id and instance_bdm['volume_id'] == vol_id:
break
else:
LOG.warning(_LW("No match when trying to update BDM: %(bdm)s"),
dict(bdm=bdm))
return
self.db.block_device_mapping_update(message.ctxt,
instance_bdm['id'], bdm,
legacy=False)
def bdm_destroy_at_top(self, message, instance_uuid, device_name,
volume_id):
"""Destroy a block device mapping in API cells by device name
or volume_id. device_name or volume_id can be None, but not both.
"""
if not self._at_the_top():
return
if device_name:
self.db.block_device_mapping_destroy_by_instance_and_device(
message.ctxt, instance_uuid, device_name)
elif volume_id:
self.db.block_device_mapping_destroy_by_instance_and_volume(
message.ctxt, instance_uuid, volume_id)
def get_migrations(self, message, filters):
context = message.ctxt
return self.compute_api.get_migrations(context, filters)
_CELL_MESSAGE_TYPE_TO_MESSAGE_CLS = {'targeted': _TargetedMessage,
'broadcast': _BroadcastMessage,
'response': _ResponseMessage}
_CELL_MESSAGE_TYPE_TO_METHODS_CLS = {'targeted': _TargetedMessageMethods,
'broadcast': _BroadcastMessageMethods,
'response': _ResponseMessageMethods}
#
# Below are the public interfaces into this module.
#
class MessageRunner(object):
"""This class is the main interface into creating messages and
processing them.
Public methods in this class are typically called by the CellsManager
to create a new message and process it with the exception of
'message_from_json' which should be used by CellsDrivers to convert
a JSONified message it has received back into the appropriate Message
class.
Private methods are used internally when we need to keep some
'global' state. For instance, eventlet queues used for responses are
held in this class. Also, when a Message is process()ed above and
it's determined we should take action locally,
_process_message_locally() will be called.
When needing to add a new method to call in a Cell2Cell message,
define the new method below and also add it to the appropriate
MessageMethods class where the real work will be done.
"""
def __init__(self, state_manager):
self.state_manager = state_manager
cells_scheduler_cls = importutils.import_class(
CONF.cells.scheduler)
self.scheduler = cells_scheduler_cls(self)
self.response_queues = {}
self.methods_by_type = {}
self.our_name = CONF.cells.name
for msg_type, cls in _CELL_MESSAGE_TYPE_TO_METHODS_CLS.iteritems():
self.methods_by_type[msg_type] = cls(self)
self.serializer = objects_base.NovaObjectSerializer()
def _process_message_locally(self, message):
"""Message processing will call this when its determined that
the message should be processed within this cell. Find the
method to call based on the message type, and call it. The
caller is responsible for catching exceptions and returning
results to cells, if needed.
"""
methods = self.methods_by_type[message.message_type]
fn = getattr(methods, message.method_name)
return fn(message, **message.method_kwargs)
def _put_response(self, response_uuid, response):
"""Put a response into a response queue. This is called when
a _ResponseMessage is processed in the cell that initiated a
'call' to another cell.
"""
resp_queue = self.response_queues.get(response_uuid)
if not resp_queue:
# Response queue is gone. We must have restarted or we
# received a response after our timeout period.
return
resp_queue.put(response)
def _setup_response_queue(self, message):
"""Set up an eventlet queue to use to wait for replies.
Replies come back from the target cell as a _ResponseMessage
being sent back to the source.
"""
resp_queue = queue.Queue()
self.response_queues[message.uuid] = resp_queue
return resp_queue
def _cleanup_response_queue(self, message):
"""Stop tracking the response queue either because we're
done receiving responses, or we've timed out.
"""
try:
del self.response_queues[message.uuid]
except KeyError:
# Ignore if queue is gone already somehow.
pass
def _create_response_message(self, ctxt, direction, target_cell,
response_uuid, response_kwargs, **kwargs):
"""Create a ResponseMessage. This is used internally within
the nova.cells.messaging module.
"""
return _ResponseMessage(self, ctxt, 'parse_responses',
response_kwargs, direction, target_cell,
response_uuid, **kwargs)
def _get_migrations_for_cell(self, ctxt, cell_name, filters):
method_kwargs = dict(filters=filters)
message = _TargetedMessage(self, ctxt, 'get_migrations',
method_kwargs, 'down', cell_name,
need_response=True)
response = message.process()
if response.failure and isinstance(response.value[1],
exception.CellRoutingInconsistency):
return []
return [response]
def message_from_json(self, json_message):
"""Turns a message in JSON format into an appropriate Message
instance. This is called when cells receive a message from
another cell.
"""
message_dict = jsonutils.loads(json_message)
# Need to convert context back.
ctxt = message_dict['ctxt']
message_dict['ctxt'] = context.RequestContext.from_dict(ctxt)
# NOTE(comstud): We also need to re-serialize any objects that
# exist in 'method_kwargs'.
method_kwargs = message_dict['method_kwargs']
for k, v in method_kwargs.items():
method_kwargs[k] = self.serializer.deserialize_entity(
message_dict['ctxt'], v)
message_type = message_dict.pop('message_type')
message_cls = _CELL_MESSAGE_TYPE_TO_MESSAGE_CLS[message_type]
return message_cls(self, **message_dict)
def ask_children_for_capabilities(self, ctxt):
"""Tell child cells to send us capabilities. This is typically
called on startup of the nova-cells service.
"""
child_cells = self.state_manager.get_child_cells()
for child_cell in child_cells:
message = _TargetedMessage(self, ctxt,
'announce_capabilities',
dict(), 'down', child_cell)
message.process()
def ask_children_for_capacities(self, ctxt):
"""Tell child cells to send us capacities. This is typically
called on startup of the nova-cells service.
"""
child_cells = self.state_manager.get_child_cells()
for child_cell in child_cells:
message = _TargetedMessage(self, ctxt, 'announce_capacities',
dict(), 'down', child_cell)
message.process()
def tell_parents_our_capabilities(self, ctxt):
"""Send our capabilities to parent cells."""
parent_cells = self.state_manager.get_parent_cells()
if not parent_cells:
return
my_cell_info = self.state_manager.get_my_state()
capabs = self.state_manager.get_our_capabilities()
parent_cell_names = ','.join(x.name for x in parent_cells)
LOG.debug("Updating parents [%(parent_cell_names)s] with "
"our capabilities: %(capabs)s",
{'parent_cell_names': parent_cell_names,
'capabs': capabs})
# We have to turn the sets into lists so they can potentially
# be json encoded when the raw message is sent.
for key, values in capabs.items():
capabs[key] = list(values)
method_kwargs = {'cell_name': my_cell_info.name,
'capabilities': capabs}
for cell in parent_cells:
message = _TargetedMessage(self, ctxt, 'update_capabilities',
method_kwargs, 'up', cell, fanout=True)
message.process()
def tell_parents_our_capacities(self, ctxt):
"""Send our capacities to parent cells."""
parent_cells = self.state_manager.get_parent_cells()
if not parent_cells:
return
my_cell_info = self.state_manager.get_my_state()
capacities = self.state_manager.get_our_capacities()
parent_cell_names = ','.join(x.name for x in parent_cells)
LOG.debug("Updating parents [%(parent_cell_names)s] with "
"our capacities: %(capacities)s",
{'parent_cell_names': parent_cell_names,
'capacities': capacities})
method_kwargs = {'cell_name': my_cell_info.name,
'capacities': capacities}
for cell in parent_cells:
message = _TargetedMessage(self, ctxt, 'update_capacities',
method_kwargs, 'up', cell, fanout=True)
message.process()
def build_instances(self, ctxt, target_cell, build_inst_kwargs):
"""Called by the cell scheduler to tell a child cell to build
instance(s).
"""
method_kwargs = dict(build_inst_kwargs=build_inst_kwargs)
message = _TargetedMessage(self, ctxt, 'build_instances',
method_kwargs, 'down', target_cell)
message.process()
def run_compute_api_method(self, ctxt, cell_name, method_info, call):
"""Call a compute API method in a specific cell."""
message = _TargetedMessage(self, ctxt, 'run_compute_api_method',
dict(method_info=method_info), 'down',
cell_name, need_response=call)
return message.process()
def instance_update_at_top(self, ctxt, instance):
"""Update an instance at the top level cell."""
message = _BroadcastMessage(self, ctxt, 'instance_update_at_top',
dict(instance=instance), 'up',
run_locally=False)
message.process()
def instance_destroy_at_top(self, ctxt, instance):
"""Destroy an instance at the top level cell."""
message = _BroadcastMessage(self, ctxt, 'instance_destroy_at_top',
dict(instance=instance), 'up',
run_locally=False)
message.process()
def instance_delete_everywhere(self, ctxt, instance, delete_type):
"""This is used by API cell when it didn't know what cell
an instance was in, but the instance was requested to be
deleted or soft_deleted. So, we'll broadcast this everywhere.
"""
method_kwargs = dict(instance=instance, delete_type=delete_type)
message = _BroadcastMessage(self, ctxt,
'instance_delete_everywhere',
method_kwargs, 'down',
run_locally=False)
message.process()
def instance_fault_create_at_top(self, ctxt, instance_fault):
"""Create an instance fault at the top level cell."""
message = _BroadcastMessage(self, ctxt,
'instance_fault_create_at_top',
dict(instance_fault=instance_fault),
'up', run_locally=False)
message.process()
def bw_usage_update_at_top(self, ctxt, bw_update_info):
"""Update bandwidth usage at top level cell."""
message = _BroadcastMessage(self, ctxt, 'bw_usage_update_at_top',
dict(bw_update_info=bw_update_info),
'up', run_locally=False)
message.process()
def sync_instances(self, ctxt, project_id, updated_since, deleted):
"""Force a sync of all instances, potentially by project_id,
and potentially since a certain date/time.
"""
method_kwargs = dict(project_id=project_id,
updated_since=updated_since,
deleted=deleted)
message = _BroadcastMessage(self, ctxt, 'sync_instances',
method_kwargs, 'down',
run_locally=False)
message.process()
def service_get_all(self, ctxt, filters=None):
method_kwargs = dict(filters=filters)
message = _BroadcastMessage(self, ctxt, 'service_get_all',
method_kwargs, 'down',
run_locally=True, need_response=True)
return message.process()
def service_get_by_compute_host(self, ctxt, cell_name, host_name):
method_kwargs = dict(host_name=host_name)
message = _TargetedMessage(self, ctxt,
'service_get_by_compute_host',
method_kwargs, 'down', cell_name,
need_response=True)
return message.process()
def get_host_uptime(self, ctxt, cell_name, host_name):
method_kwargs = dict(host_name=host_name)
message = _TargetedMessage(self, ctxt,
'get_host_uptime',
method_kwargs, 'down', cell_name,
need_response=True)
return message.process()
def service_update(self, ctxt, cell_name, host_name, binary,
params_to_update):
"""Used to enable/disable a service. For compute services, setting to
disabled stops new builds arriving on that host.
:param host_name: the name of the host machine that the service is
running
:param binary: The name of the executable that the service runs as
:param params_to_update: eg. {'disabled': True}
:returns: the update service object
"""
method_kwargs = dict(host_name=host_name, binary=binary,
params_to_update=params_to_update)
message = _TargetedMessage(self, ctxt,
'service_update',
method_kwargs, 'down', cell_name,
need_response=True)
return message.process()
def service_delete(self, ctxt, cell_name, service_id):
"""Deletes the specified service."""
method_kwargs = {'service_id': service_id}
message = _TargetedMessage(self, ctxt,
'service_delete',
method_kwargs, 'down', cell_name,
need_response=True)
message.process()
def proxy_rpc_to_manager(self, ctxt, cell_name, host_name, topic,
rpc_message, call, timeout):
method_kwargs = {'host_name': host_name,
'topic': topic,
'rpc_message': rpc_message,
'timeout': timeout}
message = _TargetedMessage(self, ctxt,
'proxy_rpc_to_manager',
method_kwargs, 'down', cell_name,
need_response=call)
return message.process()
def task_log_get_all(self, ctxt, cell_name, task_name,
period_beginning, period_ending,
host=None, state=None):
"""Get task logs from the DB from all cells or a particular
cell.
If 'cell_name' is None or '', get responses from all cells.
If 'host' is not None, filter by host.
If 'state' is not None, filter by state.
Return a list of Response objects.
"""
method_kwargs = dict(task_name=task_name,
period_beginning=period_beginning,
period_ending=period_ending,
host=host, state=state)
if cell_name:
message = _TargetedMessage(self, ctxt, 'task_log_get_all',
method_kwargs, 'down',
cell_name, need_response=True)
# Caller should get a list of Responses.
return [message.process()]
message = _BroadcastMessage(self, ctxt, 'task_log_get_all',
method_kwargs, 'down',
run_locally=True, need_response=True)
return message.process()
def compute_node_get_all(self, ctxt, hypervisor_match=None):
"""Return list of compute nodes in all child cells."""
method_kwargs = dict(hypervisor_match=hypervisor_match)
message = _BroadcastMessage(self, ctxt, 'compute_node_get_all',
method_kwargs, 'down',
run_locally=True, need_response=True)
return message.process()
def compute_node_stats(self, ctxt):
"""Return compute node stats from all child cells."""
method_kwargs = dict()
message = _BroadcastMessage(self, ctxt, 'compute_node_stats',
method_kwargs, 'down',
run_locally=True, need_response=True)
return message.process()
def compute_node_get(self, ctxt, cell_name, compute_id):
"""Return compute node entry from a specific cell by ID."""
method_kwargs = dict(compute_id=compute_id)
message = _TargetedMessage(self, ctxt, 'compute_node_get',
method_kwargs, 'down',
cell_name, need_response=True)
return message.process()
def actions_get(self, ctxt, cell_name, instance_uuid):
method_kwargs = dict(instance_uuid=instance_uuid)
message = _TargetedMessage(self, ctxt, 'actions_get',
method_kwargs, 'down',
cell_name, need_response=True)
return message.process()
def action_get_by_request_id(self, ctxt, cell_name, instance_uuid,
request_id):
method_kwargs = dict(instance_uuid=instance_uuid,
request_id=request_id)
message = _TargetedMessage(self, ctxt, 'action_get_by_request_id',
method_kwargs, 'down',
cell_name, need_response=True)
return message.process()
def action_events_get(self, ctxt, cell_name, action_id):
method_kwargs = dict(action_id=action_id)
message = _TargetedMessage(self, ctxt, 'action_events_get',
method_kwargs, 'down',
cell_name, need_response=True)
return message.process()
def consoleauth_delete_tokens(self, ctxt, instance_uuid):
"""Delete consoleauth tokens for an instance in API cells."""
message = _BroadcastMessage(self, ctxt, 'consoleauth_delete_tokens',
dict(instance_uuid=instance_uuid),
'up', run_locally=False)
message.process()
def validate_console_port(self, ctxt, cell_name, instance_uuid,
console_port, console_type):
"""Validate console port with child cell compute node."""
method_kwargs = {'instance_uuid': instance_uuid,
'console_port': console_port,
'console_type': console_type}
message = _TargetedMessage(self, ctxt, 'validate_console_port',
method_kwargs, 'down',
cell_name, need_response=True)
return message.process()
def bdm_update_or_create_at_top(self, ctxt, bdm, create=None):
"""Update/Create a BDM at top level cell."""
message = _BroadcastMessage(self, ctxt,
'bdm_update_or_create_at_top',
dict(bdm=bdm, create=create),
'up', run_locally=False)
message.process()
def bdm_destroy_at_top(self, ctxt, instance_uuid, device_name=None,
volume_id=None):
"""Destroy a BDM at top level cell."""
method_kwargs = dict(instance_uuid=instance_uuid,
device_name=device_name,
volume_id=volume_id)
message = _BroadcastMessage(self, ctxt, 'bdm_destroy_at_top',
method_kwargs,
'up', run_locally=False)
message.process()
def get_migrations(self, ctxt, cell_name, run_locally, filters):
"""Fetch all migrations applying the filters for a given cell or all
cells.
"""
method_kwargs = dict(filters=filters)
if cell_name:
return self._get_migrations_for_cell(ctxt, cell_name, filters)
message = _BroadcastMessage(self, ctxt, 'get_migrations',
method_kwargs, 'down',
run_locally=run_locally,
need_response=True)
return message.process()
def _instance_action(self, ctxt, instance, method, extra_kwargs=None,
need_response=False):
"""Call instance_<method> in correct cell for instance."""
cell_name = instance.cell_name
if not cell_name:
LOG.warning(_LW("No cell_name for %(method)s() from API"),
dict(method=method), instance=instance)
return
method_kwargs = {'instance': instance}
if extra_kwargs:
method_kwargs.update(extra_kwargs)
message = _TargetedMessage(self, ctxt, method, method_kwargs,
'down', cell_name,
need_response=need_response)
return message.process()
def instance_update_from_api(self, ctxt, instance,
expected_vm_state, expected_task_state,
admin_state_reset):
"""Update an instance object in its cell."""
cell_name = instance.cell_name
if not cell_name:
LOG.warning(_LW("No cell_name for instance update from API"),
instance=instance)
return
method_kwargs = {'instance': instance,
'expected_vm_state': expected_vm_state,
'expected_task_state': expected_task_state,
'admin_state_reset': admin_state_reset}
message = _TargetedMessage(self, ctxt, 'instance_update_from_api',
method_kwargs, 'down',
cell_name)
message.process()
def start_instance(self, ctxt, instance):
"""Start an instance in its cell."""
self._instance_action(ctxt, instance, 'start_instance')
def stop_instance(self, ctxt, instance, do_cast=True, clean_shutdown=True):
"""Stop an instance in its cell."""
extra_kwargs = dict(clean_shutdown=clean_shutdown)
if do_cast:
self._instance_action(ctxt, instance, 'stop_instance',
extra_kwargs=extra_kwargs)
else:
return self._instance_action(ctxt, instance, 'stop_instance',
extra_kwargs=extra_kwargs,
need_response=True)
def reboot_instance(self, ctxt, instance, reboot_type):
"""Reboot an instance in its cell."""
extra_kwargs = dict(reboot_type=reboot_type)
self._instance_action(ctxt, instance, 'reboot_instance',
extra_kwargs=extra_kwargs)
def suspend_instance(self, ctxt, instance):
"""Suspend an instance in its cell."""
self._instance_action(ctxt, instance, 'suspend_instance')
def resume_instance(self, ctxt, instance):
"""Resume an instance in its cell."""
self._instance_action(ctxt, instance, 'resume_instance')
def terminate_instance(self, ctxt, instance):
self._instance_action(ctxt, instance, 'terminate_instance')
def soft_delete_instance(self, ctxt, instance):
self._instance_action(ctxt, instance, 'soft_delete_instance')
def pause_instance(self, ctxt, instance):
"""Pause an instance in its cell."""
self._instance_action(ctxt, instance, 'pause_instance')
def unpause_instance(self, ctxt, instance):
"""Unpause an instance in its cell."""
self._instance_action(ctxt, instance, 'unpause_instance')
def resize_instance(self, ctxt, instance, flavor,
extra_instance_updates,
clean_shutdown=True):
"""Resize an instance in its cell."""
extra_kwargs = dict(flavor=flavor,
extra_instance_updates=extra_instance_updates)
self._instance_action(ctxt, instance, 'resize_instance',
extra_kwargs=extra_kwargs,
clean_shutdown=clean_shutdown)
def live_migrate_instance(self, ctxt, instance, block_migration,
disk_over_commit, host_name):
"""Live migrate an instance in its cell."""
extra_kwargs = dict(block_migration=block_migration,
disk_over_commit=disk_over_commit,
host_name=host_name)
self._instance_action(ctxt, instance, 'live_migrate_instance',
extra_kwargs=extra_kwargs)
def revert_resize(self, ctxt, instance):
"""Revert a resize for an instance in its cell."""
self._instance_action(ctxt, instance, 'revert_resize')
def confirm_resize(self, ctxt, instance):
"""Confirm a resize for an instance in its cell."""
self._instance_action(ctxt, instance, 'confirm_resize')
def reset_network(self, ctxt, instance):
"""Reset networking for an instance in its cell."""
self._instance_action(ctxt, instance, 'reset_network')
def inject_network_info(self, ctxt, instance):
"""Inject networking for an instance in its cell."""
self._instance_action(ctxt, instance, 'inject_network_info')
def snapshot_instance(self, ctxt, instance, image_id):
"""Snapshot an instance in its cell."""
extra_kwargs = dict(image_id=image_id)
self._instance_action(ctxt, instance, 'snapshot_instance',
extra_kwargs=extra_kwargs)
def backup_instance(self, ctxt, instance, image_id, backup_type,
rotation):
"""Backup an instance in its cell."""
extra_kwargs = dict(image_id=image_id, backup_type=backup_type,
rotation=rotation)
self._instance_action(ctxt, instance, 'backup_instance',
extra_kwargs=extra_kwargs)
def rebuild_instance(self, ctxt, instance, image_href, admin_password,
files_to_inject, preserve_ephemeral, kwargs):
extra_kwargs = dict(image_href=image_href,
admin_password=admin_password,
files_to_inject=files_to_inject,
preserve_ephemeral=preserve_ephemeral,
kwargs=kwargs)
self._instance_action(ctxt, instance, 'rebuild_instance',
extra_kwargs=extra_kwargs)
def set_admin_password(self, ctxt, instance, new_pass):
self._instance_action(ctxt, instance, 'set_admin_password',
extra_kwargs={'new_pass': new_pass})
@staticmethod
def get_message_types():
return _CELL_MESSAGE_TYPE_TO_MESSAGE_CLS.keys()
class Response(object):
"""Holds a response from a cell. If there was a failure, 'failure'
will be True and 'response' will contain an encoded Exception.
"""
def __init__(self, cell_name, value, failure):
self.failure = failure
self.cell_name = cell_name
self.value = value
def to_json(self):
resp_value = self.value
if self.failure:
resp_value = serialize_remote_exception(resp_value,
log_failure=False)
_dict = {'cell_name': self.cell_name,
'value': resp_value,
'failure': self.failure}
return jsonutils.dumps(_dict)
@classmethod
def from_json(cls, json_message):
_dict = jsonutils.loads(json_message)
if _dict['failure']:
resp_value = deserialize_remote_exception(_dict['value'],
rpc.get_allowed_exmods())
_dict['value'] = resp_value
return cls(**_dict)
def value_or_raise(self):
if self.failure:
if isinstance(self.value, (tuple, list)):
raise self.value[0], self.value[1], self.value[2]
else:
raise self.value
return self.value
_REMOTE_POSTFIX = '_Remote'
def serialize_remote_exception(failure_info, log_failure=True):
"""Prepares exception data to be sent over rpc.
Failure_info should be a sys.exc_info() tuple.
"""
tb = traceback.format_exception(*failure_info)
failure = failure_info[1]
if log_failure:
LOG.error(_LE("Returning exception %s to caller"),
six.text_type(failure))
LOG.error(tb)
kwargs = {}
if hasattr(failure, 'kwargs'):
kwargs = failure.kwargs
# NOTE(matiu): With cells, it's possible to re-raise remote, remote
# exceptions. Lets turn it back into the original exception type.
cls_name = str(failure.__class__.__name__)
mod_name = str(failure.__class__.__module__)
if (cls_name.endswith(_REMOTE_POSTFIX) and
mod_name.endswith(_REMOTE_POSTFIX)):
cls_name = cls_name[:-len(_REMOTE_POSTFIX)]
mod_name = mod_name[:-len(_REMOTE_POSTFIX)]
data = {
'class': cls_name,
'module': mod_name,
'message': six.text_type(failure),
'tb': tb,
'args': failure.args,
'kwargs': kwargs
}
json_data = jsonutils.dumps(data)
return json_data
def deserialize_remote_exception(data, allowed_remote_exmods):
failure = jsonutils.loads(str(data))
trace = failure.get('tb', [])
message = failure.get('message', "") + "\n" + "\n".join(trace)
name = failure.get('class')
module = failure.get('module')
# NOTE(ameade): We DO NOT want to allow just any module to be imported, in
# order to prevent arbitrary code execution.
if module != 'exceptions' and module not in allowed_remote_exmods:
return messaging.RemoteError(name, failure.get('message'), trace)
try:
mod = importutils.import_module(module)
klass = getattr(mod, name)
if not issubclass(klass, Exception):
raise TypeError("Can only deserialize Exceptions")
failure = klass(*failure.get('args', []), **failure.get('kwargs', {}))
except (AttributeError, TypeError, ImportError):
return messaging.RemoteError(name, failure.get('message'), trace)
ex_type = type(failure)
str_override = lambda self: message
new_ex_type = type(ex_type.__name__ + _REMOTE_POSTFIX, (ex_type,),
{'__str__': str_override, '__unicode__': str_override})
new_ex_type.__module__ = '%s%s' % (module, _REMOTE_POSTFIX)
try:
# NOTE(ameade): Dynamically create a new exception type and swap it in
# as the new type for the exception. This only works on user defined
# Exceptions and not core python exceptions. This is important because
# we cannot necessarily change an exception message so we must override
# the __str__ method.
failure.__class__ = new_ex_type
except TypeError:
# NOTE(ameade): If a core exception then just add the traceback to the
# first exception argument.
failure.args = (message,) + failure.args[1:]
return failure
| 43.981084 | 79 | 0.608239 | [
"Apache-2.0"
] | Metaswitch/calico-nova | nova/cells/messaging.py | 86,027 | Python |
from os import getenv
from typing import Optional, Dict
from flask import Flask
TestConfig = Optional[Dict[str, bool]]
def create_app(test_config: TestConfig = None) -> Flask:
""" App factory method to initialize the application with given configuration """
app: Flask = Flask(__name__)
if test_config is not None:
app.config.from_mapping(test_config)
@app.route("/")
def index() -> str: # pylint: disable=unused-variable
return "My Hello World App is working..."
@app.route("/version")
def version() -> str: # pylint: disable=unused-variable
"""
DOCKER_IMAGE_TAG is passed in the app from Dockerfile as ARG.
It should be setup in docker build task..
It is used in .gitlab-ci.yaml to pass the hash of the latest commit as docker image tag.
E.g. docker build --build-arg docker_image_tag="my-version" -t my-image-name:my-version .
"""
return getenv("DOCKER_IMAGE_TAG") or "DOCKER_IMAGE_TAG haven't been setup"
return app
| 32.34375 | 97 | 0.672464 | [
"MIT"
] | gsjay980/data-science-IP | my_hello_world_app/web_api/router.py | 1,035 | Python |
# Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
# This regression test checks that call_method<T>(...) where T is a
# non-reference, non-pointer type that happens to be held inside the
# result object (and thus is found as an lvalue) works.
from ben_scott1_ext import *
class CreatorImpl(Creator):
def create(self):
return Product()
factory = Factory()
c = CreatorImpl()
factory.reg(c)
a = factory.create()
| 30.833333 | 71 | 0.742342 | [
"BSD-3-Clause"
] | 0xDEC0DE8/mcsema | boost/libs/python/test/ben_scott1.py | 555 | Python |
from PPPForgivenessSDK.client import Client
# to run file 'delete_forgiveness_request.py', use valid token and a slug associated with a valid forgiveness request
client = Client(
access_token='{{YOUR_TOKEN_HERE}}',
vendor_key='{{YOUR_VENDOR_KEY}}',
environment='sandbox'
)
forgiveness_api = client.forgiveness_requests
# delete forgiveness request
result = forgiveness_api.delete(slug='{{YOUR_SLUG_HERE}}')
if result['status'] == 204:
print('deleted')
else:
print("An error occurred." + str(result['status']))
print(result['data'])
| 28 | 117 | 0.732143 | [
"BSD-3-Clause"
] | UsSbaPPP/sba-python-client | examples/delete_forgiveness_request.py | 560 | Python |
from vision_backend.models import Classifier
def deploy_request_json_as_strings(job):
"""
Get a string list representing a deploy job's request JSON.
"""
request_json = job.request_json
classifier_id = request_json['classifier_id']
try:
classifier = Classifier.objects.get(pk=classifier_id)
classifier_display = "Classifier ID {} (Source ID {})".format(
classifier_id, classifier.source.pk)
except Classifier.DoesNotExist:
classifier_display = "Classifier ID {} (deleted)".format(classifier_id)
return [
classifier_display,
"URL: {}".format(request_json['url']),
"Point count: {}".format(len(request_json['points'])),
]
| 32.681818 | 79 | 0.668985 | [
"BSD-2-Clause"
] | beijbom/coralnet | project/vision_backend_api/utils.py | 719 | Python |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'scrapmart.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.869565 | 74 | 0.657933 | [
"MIT"
] | vivekx01/oldscrapmart | manage.py | 687 | Python |
import os
import pickle
import argparse
import numpy as np
from numpy import array as npa
from IPython import embed
def create_argparse():
parser = argparse.ArgumentParser(description='compare')
parser.add_argument('src', type=str)
parser.add_argument('dst', type=str)
parser.add_argument('--box_fracs_scales', type=str, default='0,8,16,32,64,128,256,512',
help='inner-box attribution for different scale')
return parser
if __name__ == '__main__':
args_parser = create_argparse()
opt = args_parser.parse_args()
opt.box_fracs_scales = list(map(float, opt.box_fracs_scales.split(',')))
with open(os.path.join('../exp', opt.src, 'box_fracs.pkl'), 'rb') as f:
f_src_list = pickle.load(f)
s_src = pickle.load(f)
with open(os.path.join('../exp', opt.dst, 'box_fracs.pkl'), 'rb') as f:
f_dst_list = pickle.load(f)
s_dst = pickle.load(f)
opt.box_fracs_scales.append(1e9)
ratio_list = [0.5, 0.75, 1.0, 1.25, 1.5]
for idx, box_size in enumerate(opt.box_fracs_scales[:-1]):
for k in range(len(ratio_list)):
ratio = ratio_list[k]
pos, neg = 0, 0
for i in range(len(f_src_list[0])):
try:
diff = np.array(f_src_list[i][k]) - np.array(f_dst_list[i][k])
pos += ((diff > 0) * (npa(s_dst[i]).mean(1) > box_size) * (npa(s_dst[i]).mean(1) <= opt.box_fracs_scales[idx+1])).sum()
neg += ((diff < 0) * (npa(s_src[i]).mean(1) > box_size) * (npa(s_src[i]).mean(1) <= opt.box_fracs_scales[idx+1])).sum()
except Exception as e:
continue
print('size@{}~{}|ratio@{} - > : {}, < : {}'.format(box_size, opt.box_fracs_scales[idx+1], ratio, pos, neg))
| 40.155556 | 139 | 0.586054 | [
"MIT"
] | voidrank/SaccadeNet | src/compare_tool.py | 1,807 | Python |
import requests
def hello():
response = requests.get('http://weather.livedoor.com/forecast/webservice/json/v1?city=130010')
weather = response.json()["forecasts"][0]["telop"]
return 'Hello, the weather in tokyo today is ' + weather
| 29.625 | 95 | 0.729958 | [
"MIT"
] | teramonagi/sandbox-package-python | sandbox_package_python/hello.py | 237 | Python |
#!/usr/bin/python
# -*- encoding: utf-8 -*-
from logger import setup_logger
from models.model_stages import BiSeNet
from cityscapes import CityScapes
from loss.loss import OhemCELoss
from loss.detail_loss import DetailAggregateLoss
from evaluation import MscEvalV0
from optimizer_loss import Optimizer
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.distributed as dist
import os
import os.path as osp
import logging
import time
import datetime
import argparse
logger = logging.getLogger()
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Unsupported value encountered.')
def parse_args():
parse = argparse.ArgumentParser()
parse.add_argument(
'--local_rank',
dest = 'local_rank',
type = int,
default = -1,
)
parse.add_argument(
'--n_workers_train',
dest = 'n_workers_train',
type = int,
default = 8,
)
parse.add_argument(
'--n_workers_val',
dest = 'n_workers_val',
type = int,
default = 0,
)
parse.add_argument(
'--n_img_per_gpu',
dest = 'n_img_per_gpu',
type = int,
default = 4,
)
parse.add_argument(
'--max_iter',
dest = 'max_iter',
type = int,
default = 40000,
)
parse.add_argument(
'--save_iter_sep',
dest = 'save_iter_sep',
type = int,
default = 1000,
)
parse.add_argument(
'--warmup_steps',
dest = 'warmup_steps',
type = int,
default = 1000,
)
parse.add_argument(
'--mode',
dest = 'mode',
type = str,
default = 'train',
)
parse.add_argument(
'--ckpt',
dest = 'ckpt',
type = str,
default = None,
)
parse.add_argument(
'--respath',
dest = 'respath',
type = str,
default = None,
)
parse.add_argument(
'--backbone',
dest = 'backbone',
type = str,
default = 'CatNetSmall',
)
parse.add_argument(
'--pretrain_path',
dest = 'pretrain_path',
type = str,
default = '',
)
parse.add_argument(
'--use_conv_last',
dest = 'use_conv_last',
type = str2bool,
default = False,
)
parse.add_argument(
'--use_boundary_2',
dest = 'use_boundary_2',
type = str2bool,
default = False,
)
parse.add_argument(
'--use_boundary_4',
dest = 'use_boundary_4',
type = str2bool,
default = False,
)
parse.add_argument(
'--use_boundary_8',
dest = 'use_boundary_8',
type = str2bool,
default = False,
)
parse.add_argument(
'--use_boundary_16',
dest = 'use_boundary_16',
type = str2bool,
default = False,
)
return parse.parse_args()
def train():
args = parse_args()
save_pth_path = os.path.join(args.respath, 'pths')
dspth = './data'
# print(save_pth_path)
# print(osp.exists(save_pth_path))
# if not osp.exists(save_pth_path) and dist.get_rank()==0:
if not osp.exists(save_pth_path):
os.makedirs(save_pth_path)
torch.cuda.set_device(args.local_rank)
dist.init_process_group(
backend = 'nccl',
init_method = 'env://',
world_size = torch.cuda.device_count(),
rank=args.local_rank
)
setup_logger(args.respath)
## dataset
n_classes = 19
n_img_per_gpu = args.n_img_per_gpu
n_workers_train = args.n_workers_train
n_workers_val = args.n_workers_val
use_boundary_16 = args.use_boundary_16
use_boundary_8 = args.use_boundary_8
use_boundary_4 = args.use_boundary_4
use_boundary_2 = args.use_boundary_2
mode = args.mode
cropsize = [1024, 512]
randomscale = (0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1.0, 1.125, 1.25, 1.375, 1.5)
if dist.get_rank()==0:
logger.info('n_workers_train: {}'.format(n_workers_train))
logger.info('n_workers_val: {}'.format(n_workers_val))
logger.info('use_boundary_2: {}'.format(use_boundary_2))
logger.info('use_boundary_4: {}'.format(use_boundary_4))
logger.info('use_boundary_8: {}'.format(use_boundary_8))
logger.info('use_boundary_16: {}'.format(use_boundary_16))
logger.info('mode: {}'.format(args.mode))
ds = CityScapes(dspth, cropsize=cropsize, mode=mode, randomscale=randomscale)
sampler = torch.utils.data.distributed.DistributedSampler(ds)
dl = DataLoader(ds,
batch_size = n_img_per_gpu,
shuffle = False,
sampler = sampler,
num_workers = n_workers_train,
pin_memory = False,
drop_last = True)
# exit(0)
dsval = CityScapes(dspth, mode='val', randomscale=randomscale)
sampler_val = torch.utils.data.distributed.DistributedSampler(dsval)
dlval = DataLoader(dsval,
batch_size = 2,
shuffle = False,
sampler = sampler_val,
num_workers = n_workers_val,
drop_last = False)
## model
ignore_idx = 255
net = BiSeNet(backbone=args.backbone, n_classes=n_classes, pretrain_model=args.pretrain_path,
use_boundary_2=use_boundary_2, use_boundary_4=use_boundary_4, use_boundary_8=use_boundary_8,
use_boundary_16=use_boundary_16, use_conv_last=args.use_conv_last)
if not args.ckpt is None:
net.load_state_dict(torch.load(args.ckpt, map_location='cpu'))
net.cuda()
net.train()
net = nn.parallel.DistributedDataParallel(net,
device_ids = [args.local_rank, ],
output_device = args.local_rank,
find_unused_parameters=True
)
score_thres = 0.7
n_min = n_img_per_gpu*cropsize[0]*cropsize[1]//16
criteria_p = OhemCELoss(thresh=score_thres, n_min=n_min, ignore_lb=ignore_idx)
criteria_16 = OhemCELoss(thresh=score_thres, n_min=n_min, ignore_lb=ignore_idx)
criteria_32 = OhemCELoss(thresh=score_thres, n_min=n_min, ignore_lb=ignore_idx)
boundary_loss_func = DetailAggregateLoss()
## optimizer
maxmIOU50 = 0.
maxmIOU75 = 0.
momentum = 0.9
weight_decay = 5e-4
lr_start = 1e-2
max_iter = args.max_iter
save_iter_sep = args.save_iter_sep
power = 0.9
warmup_steps = args.warmup_steps
warmup_start_lr = 1e-5
if dist.get_rank()==0:
print('max_iter: ', max_iter)
print('save_iter_sep: ', save_iter_sep)
print('warmup_steps: ', warmup_steps)
optim = Optimizer(
model = net.module,
loss = boundary_loss_func,
lr0 = lr_start,
momentum = momentum,
wd = weight_decay,
warmup_steps = warmup_steps,
warmup_start_lr = warmup_start_lr,
max_iter = max_iter,
power = power)
## train loop
msg_iter = 50
loss_avg = []
loss_boundery_bce = []
loss_boundery_dice = []
st = glob_st = time.time()
diter = iter(dl)
epoch = 0
for it in range(max_iter):
try:
im, lb = next(diter)
if not im.size()[0]==n_img_per_gpu: raise StopIteration
except StopIteration:
epoch += 1
sampler.set_epoch(epoch)
diter = iter(dl)
im, lb = next(diter)
im = im.cuda()
lb = lb.cuda()
H, W = im.size()[2:]
lb = torch.squeeze(lb, 1)
optim.zero_grad()
if use_boundary_2 and use_boundary_4 and use_boundary_8:
out, out16, out32, detail2, detail4, detail8 = net(im)
if (not use_boundary_2) and use_boundary_4 and use_boundary_8:
out, out16, out32, detail4, detail8 = net(im)
if (not use_boundary_2) and (not use_boundary_4) and use_boundary_8:
out, out16, out32, detail8 = net(im)
if (not use_boundary_2) and (not use_boundary_4) and (not use_boundary_8):
out, out16, out32 = net(im)
lossp = criteria_p(out, lb)
loss2 = criteria_16(out16, lb)
loss3 = criteria_32(out32, lb)
boundery_bce_loss = 0.
boundery_dice_loss = 0.
if use_boundary_2:
# if dist.get_rank()==0:
# print('use_boundary_2')
boundery_bce_loss2, boundery_dice_loss2 = boundary_loss_func(detail2, lb)
boundery_bce_loss += boundery_bce_loss2
boundery_dice_loss += boundery_dice_loss2
if use_boundary_4:
# if dist.get_rank()==0:
# print('use_boundary_4')
boundery_bce_loss4, boundery_dice_loss4 = boundary_loss_func(detail4, lb)
boundery_bce_loss += boundery_bce_loss4
boundery_dice_loss += boundery_dice_loss4
if use_boundary_8:
# if dist.get_rank()==0:
# print('use_boundary_8')
boundery_bce_loss8, boundery_dice_loss8 = boundary_loss_func(detail8, lb)
boundery_bce_loss += boundery_bce_loss8
boundery_dice_loss += boundery_dice_loss8
loss = lossp + loss2 + loss3 + boundery_bce_loss + boundery_dice_loss
loss.backward()
optim.step()
loss_avg.append(loss.item())
loss_boundery_bce.append(boundery_bce_loss.item())
loss_boundery_dice.append(boundery_dice_loss.item())
## print training log message
if (it+1)%msg_iter==0:
loss_avg = sum(loss_avg) / len(loss_avg)
lr = optim.lr
ed = time.time()
t_intv, glob_t_intv = ed - st, ed - glob_st
eta = int((max_iter - it) * (glob_t_intv / it))
eta = str(datetime.timedelta(seconds=eta))
loss_boundery_bce_avg = sum(loss_boundery_bce) / len(loss_boundery_bce)
loss_boundery_dice_avg = sum(loss_boundery_dice) / len(loss_boundery_dice)
msg = ', '.join([
'it: {it}/{max_it}',
'lr: {lr:4f}',
'loss: {loss:.4f}',
'boundery_bce_loss: {boundery_bce_loss:.4f}',
'boundery_dice_loss: {boundery_dice_loss:.4f}',
'eta: {eta}',
'time: {time:.4f}',
]).format(
it = it+1,
max_it = max_iter,
lr = lr,
loss = loss_avg,
boundery_bce_loss = loss_boundery_bce_avg,
boundery_dice_loss = loss_boundery_dice_avg,
time = t_intv,
eta = eta
)
logger.info(msg)
loss_avg = []
loss_boundery_bce = []
loss_boundery_dice = []
st = ed
# print(boundary_loss_func.get_params())
if (it+1)%save_iter_sep==0:# and it != 0:
## model
logger.info('evaluating the model ...')
logger.info('setup and restore model')
net.eval()
# ## evaluator
logger.info('compute the mIOU')
with torch.no_grad():
single_scale1 = MscEvalV0()
mIOU50 = single_scale1(net, dlval, n_classes)
# single_scale2= MscEvalV0(scale=0.75)
# mIOU75 = single_scale2(net, dlval, n_classes)
save_pth = osp.join(save_pth_path, 'model_iter{}_mIOU50_{}.pth'
.format(it+1, str(round(mIOU50,4))))
state = net.module.state_dict() if hasattr(net, 'module') else net.state_dict()
if dist.get_rank()==0:
torch.save(state, save_pth)
logger.info('training iteration {}, model saved to: {}'.format(it+1, save_pth))
if mIOU50 > maxmIOU50:
maxmIOU50 = mIOU50
save_pth = osp.join(save_pth_path, 'model_maxmIOU50.pth'.format(it+1))
state = net.module.state_dict() if hasattr(net, 'module') else net.state_dict()
if dist.get_rank()==0:
torch.save(state, save_pth)
logger.info('max mIOU model saved to: {}'.format(save_pth))
# if mIOU75 > maxmIOU75:
# maxmIOU75 = mIOU75
# save_pth = osp.join(save_pth_path, 'model_maxmIOU75.pth'.format(it+1))
# state = net.module.state_dict() if hasattr(net, 'module') else net.state_dict()
# if dist.get_rank()==0: torch.save(state, save_pth)
# logger.info('max mIOU model saved to: {}'.format(save_pth))
logger.info('mIOU50 is: {}'.format(mIOU50))
logger.info('maxmIOU50 is: {}'.format(maxmIOU50))
net.train()
## dump the final model
save_pth = osp.join(save_pth_path, 'model_final.pth')
net.cpu()
state = net.module.state_dict() if hasattr(net, 'module') else net.state_dict()
if dist.get_rank()==0: torch.save(state, save_pth)
logger.info('training done, model saved to: {}'.format(save_pth))
print('epoch: ', epoch)
if __name__ == "__main__":
train()
| 32.786885 | 98 | 0.551714 | [
"MIT"
] | Toby-SZZ/STDC-Seg | train.py | 14,000 | Python |
# Copyright 2019 FairwindsOps Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ruamel.yaml import YAML
from ruamel.yaml.constructor import DuplicateKeyError
from reckoner.exception import ReckonerConfigException
import logging
from io import BufferedReader, StringIO
class Handler(object):
"""Yaml handler class for loading, and dumping yaml consistently"""
yaml = YAML()
yaml.preserve_quotes = True
yaml.allow_unicode = True
yaml.allow_duplicate_keys = False
@classmethod
def load(cls, yaml_file: BufferedReader):
try:
y = cls.yaml.load(yaml_file)
except DuplicateKeyError as err:
logging.error(_clean_duplicate_key_message(str(err)))
raise ReckonerConfigException(
"Duplicate key found while loading your course YAML, please remove the duplicate key shown above.")
except Exception as err:
logging.error("Unexpected error when parsing yaml. See debug for more details.")
logging.debug(err)
raise err
return y
@classmethod
def dump(cls, data: dict) -> str:
temp_file = StringIO()
cls.yaml.dump(data, temp_file)
return temp_file.getvalue()
def _clean_duplicate_key_message(msg: str):
unwanted = """
To suppress this check see:
http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys
Duplicate keys will become an error in future releases, and are errors
by default when using the new API.
"""
return msg.replace(unwanted, '')
| 34.525424 | 115 | 0.71134 | [
"Apache-2.0"
] | CroudTech/reckoner | reckoner/yaml/handler.py | 2,037 | Python |
from pathlib import Path
from typing import Union
import pandas as pd
from .convention import COLUMN_NAMES
from .table_map import table_map_read
def open(table_file: str, table_map_file: str = None) -> pd.DataFrame:
"""
Opens a dynamo table file, returning a DynamoTable object
:param table_file:
:return: dataframe
"""
# Read into dataframe
df = pd.read_csv(table_file, header=None, delim_whitespace=True)
n_cols = df.shape[1]
if n_cols <= len(COLUMN_NAMES):
column_names = COLUMN_NAMES[0:n_cols]
df.columns = column_names
# In case table has extra columns
else:
extra_columns_needed = n_cols - len(COLUMN_NAMES)
column_names = list(COLUMN_NAMES) + ['' for x in range(extra_columns_needed)]
# Take absolute value (daxis column sometimes has complex values)
df = df.apply(pd.to_numeric, errors='ignore')
# Add table map info
if table_map_file is not None and Path(table_map_file).exists():
table_map_dict = table_map_read(table_map_file)
tomo_file = [table_map_dict[tomo_idx] for tomo_idx in df['tomo']]
df['tomo_file'] = tomo_file
return df
def read(filename: str, table_map_file: str = None) -> pd.DataFrame:
"""
Opens a dynamo table file, returning a pandas DataFrame
:param filename:
:return: dataframe
"""
df = open(filename, table_map_file)
return df
def new(dataframe: pd.DataFrame, filename: str):
"""
Writes a dynamo table file from a pandas DataFrame
:param dataframe: pandas dataframe with headings matching the name from the dynamo table convention
:param filename: file in which to save data from dataframe, should end in .tbl
:return:
"""
# Get n rows
n_rows = dataframe.shape[0]
# Check if df has tomo_name but no tomo entry with indices, if so, fix
if 'tomo_file' in dataframe.columns and 'tomo' not in dataframe.columns:
tomo_names = dataframe['tomo_file'].unique()
tomo_name_idx = {name : index for index, name in enumerate(tomo_names)}
tomo_idx = [tomo_name_idx[name] for name in dataframe['tomo_file']]
dataframe['tomo'] = tomo_idx
# Check if tags present in dataframe, if not, make a set of linear tags
if 'tag' not in dataframe.columns:
tags = [x+1 for x in range(n_rows)]
dataframe['tag'] = tags
# Empty columns will be either 1 or 0, precreate these columns
zeros = [0 for x in range(n_rows)]
ones = [1 for x in range(n_rows)]
# Initialise empty dictionary to store data
data = {}
for column_name in COLUMN_NAMES:
if column_name in dataframe.columns:
data[column_name] = dataframe[column_name]
# Aligned value column should set to 1 otherwise alignment projects don't run properly
elif column_name not in dataframe.columns and column_name == 'aligned_value':
data[column_name] = ones
else:
data[column_name] = zeros
# Create properly formatted dataframe to write
table = pd.DataFrame.from_dict(data)
# Prep filename
filename = str(filename)
if not filename.endswith('.tbl'):
filename = filename.join('.tbl')
# Write out table
table.to_csv(filename, sep=' ', header=False, index=False)
# Write out doc file if appropriate
if 'tomo_file' in dataframe.columns:
# Prep table file name
table_file_name = filename.replace('.tbl', '.doc')
# Get necessary info in new dataframe
table_map = dataframe[['tomo', 'tomo_file']].drop_duplicates(subset='tomo')
table_map.to_csv(table_file_name, sep=' ', header=False, index=False)
return
def write(dataframe: pd.DataFrame, filename: str):
"""
Writes a dynamo table file from a pandas DataFrame
:param dataframe: pandas dataframe with headings matching the name from the dynamo table convention
:param filename: file in which to save data from dataframe, should end in .tbl
:return:
"""
new(dataframe, filename)
return
| 31.527132 | 103 | 0.674945 | [
"BSD-3-Clause"
] | brisvag/dynamotable | dynamotable/dynamotable.py | 4,067 | Python |
# Copyright 2019 Atalaya Tech, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from bentoml.yatai.repository.base_repository import BaseRepository
from bentoml.yatai.repository.file_system_repository import FileSystemRepository
from bentoml.yatai.repository.s3_repository import S3Repository
from bentoml.yatai.repository.gcs_repository import GCSRepository
def create_repository(
repository_type: str,
file_system_directory=None,
s3_url=None,
s3_endpoint_url=None,
gcs_url=None,
) -> BaseRepository:
"""Creates a repository based on a provided type and parameters"""
if repository_type == "s3":
return S3Repository(s3_url, endpoint_url=s3_endpoint_url)
elif repository_type == "gcs":
return GCSRepository(gcs_url)
elif repository_type == "file_system":
return FileSystemRepository(file_system_directory)
else:
raise ValueError("Unrecognized repository type {}" % repository_type)
| 39.243243 | 80 | 0.769284 | [
"Apache-2.0"
] | AnvithaGadagi/BentoML | bentoml/yatai/repository/__init__.py | 1,452 | Python |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.core.worlds import create_task
from parlai.agents.fixed_response.fixed_response import FixedResponseAgent
from parlai.tasks.self_chat.worlds import SelfChatWorld as SelfChatBaseWorld
from parlai.tasks.interactive.worlds import InteractiveWorld as InteractiveBaseWorld
import random
def get_personas(opt, shared=None):
if shared and 'personas_list' in shared:
return shared['personas_list']
return _load_personas(opt=opt)
def _load_personas(opt):
print('[ loading personas.. ]')
# Create ConvAI2 data so we can assign personas.
convai2_opt = opt.copy()
convai2_opt['task'] = 'convai2:both'
if convai2_opt['datatype'].startswith('train'):
convai2_opt['datatype'] = 'train:evalmode'
convai2_opt['interactive_task'] = False
convai2_opt['selfchat_task'] = False
convai2_agent = FixedResponseAgent({'fixed_response': None})
convai2_world = create_task(convai2_opt, convai2_agent)
personas = set()
while not convai2_world.epoch_done():
convai2_world.parley()
msg = convai2_world.get_acts()[0]
# Find a new episode
if msg.get('episode_done', False) and not convai2_world.epoch_done():
convai2_world.parley()
msg = convai2_world.get_acts()[0]
txt = msg.get('text', '').split('\n')
a1_persona = []
a2_persona = []
for t in txt:
if t.startswith("partner's persona:"):
a1_persona.append(t.replace("partner's persona:", 'your persona:'))
if t.startswith('your persona:'):
a2_persona.append(t)
personas.add('\n'.join(a1_persona))
personas.add('\n'.join(a2_persona))
print('[ loaded ' + str(len(personas)) + ' personas ]')
return list(personas)
class InteractiveWorld(InteractiveBaseWorld):
@staticmethod
def add_cmdline_args(argparser):
parser = argparser.add_argument_group('ConvAI2 Interactive World')
parser.add_argument(
'--display-partner-persona',
type='bool',
default=True,
help='Display your partner persona at the end of the chat',
)
def __init__(self, opt, agents, shared=None):
super().__init__(opt, agents, shared)
self.display_partner_persona = self.opt['display_partner_persona']
def init_contexts(self, shared=None):
self.personas_list = get_personas(opt=self.opt, shared=shared)
def get_contexts(self):
random.seed()
personas_1 = random.choice(self.personas_list)
personas_2 = random.choice(self.personas_list)
return personas_1, personas_2
def finalize_episode(self):
print("\nCHAT DONE.\n")
if self.display_partner_persona:
partner_persona = self.p2.replace('your persona:', 'partner\'s persona:')
print(f"Your partner was playing the following persona:\n{partner_persona}")
if not self.epoch_done():
print("[ Preparing new chat ... ]\n")
def share(self):
shared_data = super().share()
shared_data['personas_list'] = self.personas_list
return shared_data
class SelfChatWorld(SelfChatBaseWorld):
def init_contexts(self, shared=None):
self.personas_list = get_personas(self.opt, shared=shared)
def get_contexts(self):
random.seed()
personas_1 = random.choice(self.personas_list)
personas_2 = random.choice(self.personas_list)
return [personas_1, personas_2]
| 37.138614 | 88 | 0.659557 | [
"MIT"
] | 189569400/ParlAI | parlai/tasks/convai2/worlds.py | 3,751 | Python |
#! /usr/bin/env python3
# This was forked from https://github.com/rustyrussell/lightning-payencode/tree/acc16ec13a3fa1dc16c07af6ec67c261bd8aff23
import re
import time
from hashlib import sha256
from binascii import hexlify
from decimal import Decimal
from typing import Optional, TYPE_CHECKING, Type
import random
import bitstring
from .bitcoin import hash160_to_b58_address, b58_address_to_hash160, TOTAL_COIN_SUPPLY_LIMIT_IN_FTC
from .segwit_addr import bech32_encode, bech32_decode, CHARSET
from . import segwit_addr
from . import constants
from .constants import AbstractNet
from . import ecc
from .bitcoin import COIN
if TYPE_CHECKING:
from .lnutil import LnFeatures
class LnInvoiceException(Exception): pass
class LnDecodeException(LnInvoiceException): pass
class LnEncodeException(LnInvoiceException): pass
# BOLT #11:
#
# A writer MUST encode `amount` as a positive decimal integer with no
# leading zeroes, SHOULD use the shortest representation possible.
def shorten_amount(amount):
""" Given an amount in bitcoin, shorten it
"""
# Convert to pico initially
amount = int(amount * 10**12)
units = ['p', 'n', 'u', 'm']
for unit in units:
if amount % 1000 == 0:
amount //= 1000
else:
break
else:
unit = ''
return str(amount) + unit
def unshorten_amount(amount) -> Decimal:
""" Given a shortened amount, convert it into a decimal
"""
# BOLT #11:
# The following `multiplier` letters are defined:
#
#* `m` (milli): multiply by 0.001
#* `u` (micro): multiply by 0.000001
#* `n` (nano): multiply by 0.000000001
#* `p` (pico): multiply by 0.000000000001
units = {
'p': 10**12,
'n': 10**9,
'u': 10**6,
'm': 10**3,
}
unit = str(amount)[-1]
# BOLT #11:
# A reader SHOULD fail if `amount` contains a non-digit, or is followed by
# anything except a `multiplier` in the table above.
if not re.fullmatch("\\d+[pnum]?", str(amount)):
raise LnDecodeException("Invalid amount '{}'".format(amount))
if unit in units.keys():
return Decimal(amount[:-1]) / units[unit]
else:
return Decimal(amount)
_INT_TO_BINSTR = {a: '0' * (5-len(bin(a)[2:])) + bin(a)[2:] for a in range(32)}
# Bech32 spits out array of 5-bit values. Shim here.
def u5_to_bitarray(arr):
b = ''.join(_INT_TO_BINSTR[a] for a in arr)
return bitstring.BitArray(bin=b)
def bitarray_to_u5(barr):
assert barr.len % 5 == 0
ret = []
s = bitstring.ConstBitStream(barr)
while s.pos != s.len:
ret.append(s.read(5).uint)
return ret
def encode_fallback(fallback: str, net: Type[AbstractNet]):
""" Encode all supported fallback addresses.
"""
wver, wprog_ints = segwit_addr.decode_segwit_address(net.SEGWIT_HRP, fallback)
if wver is not None:
wprog = bytes(wprog_ints)
else:
addrtype, addr = b58_address_to_hash160(fallback)
if addrtype == net.ADDRTYPE_P2PKH:
wver = 17
elif addrtype == net.ADDRTYPE_P2SH:
wver = 18
else:
raise LnEncodeException(f"Unknown address type {addrtype} for {net}")
wprog = addr
return tagged('f', bitstring.pack("uint:5", wver) + wprog)
def parse_fallback(fallback, net: Type[AbstractNet]):
wver = fallback[0:5].uint
if wver == 17:
addr = hash160_to_b58_address(fallback[5:].tobytes(), net.ADDRTYPE_P2PKH)
elif wver == 18:
addr = hash160_to_b58_address(fallback[5:].tobytes(), net.ADDRTYPE_P2SH)
elif wver <= 16:
witprog = fallback[5:] # cut witver
witprog = witprog[:len(witprog) // 8 * 8] # can only be full bytes
witprog = witprog.tobytes()
addr = segwit_addr.encode_segwit_address(net.SEGWIT_HRP, wver, witprog)
else:
return None
return addr
BOLT11_HRP_INV_DICT = {net.BOLT11_HRP: net for net in constants.NETS_LIST}
# Tagged field containing BitArray
def tagged(char, l):
# Tagged fields need to be zero-padded to 5 bits.
while l.len % 5 != 0:
l.append('0b0')
return bitstring.pack("uint:5, uint:5, uint:5",
CHARSET.find(char),
(l.len / 5) / 32, (l.len / 5) % 32) + l
# Tagged field containing bytes
def tagged_bytes(char, l):
return tagged(char, bitstring.BitArray(l))
def trim_to_min_length(bits):
"""Ensures 'bits' have min number of leading zeroes.
Assumes 'bits' is big-endian, and that it needs to be encoded in 5 bit blocks.
"""
bits = bits[:] # copy
# make sure we can be split into 5 bit blocks
while bits.len % 5 != 0:
bits.prepend('0b0')
# Get minimal length by trimming leading 5 bits at a time.
while bits.startswith('0b00000'):
if len(bits) == 5:
break # v == 0
bits = bits[5:]
return bits
# Discard trailing bits, convert to bytes.
def trim_to_bytes(barr):
# Adds a byte if necessary.
b = barr.tobytes()
if barr.len % 8 != 0:
return b[:-1]
return b
# Try to pull out tagged data: returns tag, tagged data and remainder.
def pull_tagged(stream):
tag = stream.read(5).uint
length = stream.read(5).uint * 32 + stream.read(5).uint
return (CHARSET[tag], stream.read(length * 5), stream)
def lnencode(addr: 'LnAddr', privkey) -> str:
if addr.amount:
amount = addr.net.BOLT11_HRP + shorten_amount(addr.amount)
else:
amount = addr.net.BOLT11_HRP if addr.net else ''
hrp = 'ln' + amount
# Start with the timestamp
data = bitstring.pack('uint:35', addr.date)
tags_set = set()
# Payment hash
data += tagged_bytes('p', addr.paymenthash)
tags_set.add('p')
if addr.payment_secret is not None:
data += tagged_bytes('s', addr.payment_secret)
tags_set.add('s')
for k, v in addr.tags:
# BOLT #11:
#
# A writer MUST NOT include more than one `d`, `h`, `n` or `x` fields,
if k in ('d', 'h', 'n', 'x', 'p', 's'):
if k in tags_set:
raise LnEncodeException("Duplicate '{}' tag".format(k))
if k == 'r':
route = bitstring.BitArray()
for step in v:
pubkey, channel, feebase, feerate, cltv = step
route.append(bitstring.BitArray(pubkey) + bitstring.BitArray(channel) + bitstring.pack('intbe:32', feebase) + bitstring.pack('intbe:32', feerate) + bitstring.pack('intbe:16', cltv))
data += tagged('r', route)
elif k == 't':
pubkey, feebase, feerate, cltv = v
route = bitstring.BitArray(pubkey) + bitstring.pack('intbe:32', feebase) + bitstring.pack('intbe:32', feerate) + bitstring.pack('intbe:16', cltv)
data += tagged('t', route)
elif k == 'f':
data += encode_fallback(v, addr.net)
elif k == 'd':
# truncate to max length: 1024*5 bits = 639 bytes
data += tagged_bytes('d', v.encode()[0:639])
elif k == 'x':
expirybits = bitstring.pack('intbe:64', v)
expirybits = trim_to_min_length(expirybits)
data += tagged('x', expirybits)
elif k == 'h':
data += tagged_bytes('h', sha256(v.encode('utf-8')).digest())
elif k == 'n':
data += tagged_bytes('n', v)
elif k == 'c':
finalcltvbits = bitstring.pack('intbe:64', v)
finalcltvbits = trim_to_min_length(finalcltvbits)
data += tagged('c', finalcltvbits)
elif k == '9':
if v == 0:
continue
feature_bits = bitstring.BitArray(uint=v, length=v.bit_length())
feature_bits = trim_to_min_length(feature_bits)
data += tagged('9', feature_bits)
else:
# FIXME: Support unknown tags?
raise LnEncodeException("Unknown tag {}".format(k))
tags_set.add(k)
# BOLT #11:
#
# A writer MUST include either a `d` or `h` field, and MUST NOT include
# both.
if 'd' in tags_set and 'h' in tags_set:
raise ValueError("Cannot include both 'd' and 'h'")
if not 'd' in tags_set and not 'h' in tags_set:
raise ValueError("Must include either 'd' or 'h'")
# We actually sign the hrp, then data (padded to 8 bits with zeroes).
msg = hrp.encode("ascii") + data.tobytes()
privkey = ecc.ECPrivkey(privkey)
sig = privkey.sign_message(msg, is_compressed=False, algo=lambda x:sha256(x).digest())
recovery_flag = bytes([sig[0] - 27])
sig = bytes(sig[1:]) + recovery_flag
data += sig
return bech32_encode(segwit_addr.Encoding.BECH32, hrp, bitarray_to_u5(data))
class LnAddr(object):
def __init__(self, *, paymenthash: bytes = None, amount=None, net: Type[AbstractNet] = None, tags=None, date=None,
payment_secret: bytes = None):
self.date = int(time.time()) if not date else int(date)
self.tags = [] if not tags else tags
self.unknown_tags = []
self.paymenthash = paymenthash
self.payment_secret = payment_secret
self.signature = None
self.pubkey = None
self.net = constants.net if net is None else net # type: Type[AbstractNet]
self._amount = amount # type: Optional[Decimal] # in bitcoins
self._min_final_cltv_expiry = 18
@property
def amount(self) -> Optional[Decimal]:
return self._amount
@amount.setter
def amount(self, value):
if not (isinstance(value, Decimal) or value is None):
raise LnInvoiceException(f"amount must be Decimal or None, not {value!r}")
if value is None:
self._amount = None
return
assert isinstance(value, Decimal)
if value.is_nan() or not (0 <= value <= TOTAL_COIN_SUPPLY_LIMIT_IN_FTC):
raise ValueError(f"amount is out-of-bounds: {value!r} FTC")
if value * 10**12 % 10:
# max resolution is millisatoshi
raise LnInvoiceException(f"Cannot encode {value!r}: too many decimal places")
self._amount = value
def get_amount_sat(self) -> Optional[Decimal]:
# note that this has msat resolution potentially
if self.amount is None:
return None
return self.amount * COIN
def get_routing_info(self, tag):
# note: tag will be 't' for trampoline
r_tags = list(filter(lambda x: x[0] == tag, self.tags))
# strip the tag type, it's implicitly 'r' now
r_tags = list(map(lambda x: x[1], r_tags))
# if there are multiple hints, we will use the first one that works,
# from a random permutation
random.shuffle(r_tags)
return r_tags
def get_amount_msat(self) -> Optional[int]:
if self.amount is None:
return None
return int(self.amount * COIN * 1000)
def get_features(self) -> 'LnFeatures':
from .lnutil import LnFeatures
return LnFeatures(self.get_tag('9') or 0)
def __str__(self):
return "LnAddr[{}, amount={}{} tags=[{}]]".format(
hexlify(self.pubkey.serialize()).decode('utf-8') if self.pubkey else None,
self.amount, self.net.BOLT11_HRP,
", ".join([k + '=' + str(v) for k, v in self.tags])
)
def get_min_final_cltv_expiry(self) -> int:
return self._min_final_cltv_expiry
def get_tag(self, tag):
for k, v in self.tags:
if k == tag:
return v
return None
def get_description(self) -> str:
return self.get_tag('d') or ''
def get_expiry(self) -> int:
exp = self.get_tag('x')
if exp is None:
exp = 3600
return int(exp)
def is_expired(self) -> bool:
now = time.time()
# BOLT-11 does not specify what expiration of '0' means.
# we treat it as 0 seconds here (instead of never)
return now > self.get_expiry() + self.date
class SerializableKey:
def __init__(self, pubkey):
self.pubkey = pubkey
def serialize(self):
return self.pubkey.get_public_key_bytes(True)
def lndecode(invoice: str, *, verbose=False, net=None) -> LnAddr:
if net is None:
net = constants.net
decoded_bech32 = bech32_decode(invoice, ignore_long_length=True)
hrp = decoded_bech32.hrp
data = decoded_bech32.data
if decoded_bech32.encoding is None:
raise LnDecodeException("Bad bech32 checksum")
if decoded_bech32.encoding != segwit_addr.Encoding.BECH32:
raise LnDecodeException("Bad bech32 encoding: must be using vanilla BECH32")
# BOLT #11:
#
# A reader MUST fail if it does not understand the `prefix`.
if not hrp.startswith('ln'):
raise LnDecodeException("Does not start with ln")
if not hrp[2:].startswith(net.BOLT11_HRP):
raise LnDecodeException(f"Wrong Lightning invoice HRP {hrp[2:]}, should be {net.BOLT11_HRP}")
data = u5_to_bitarray(data)
# Final signature 65 bytes, split it off.
if len(data) < 65*8:
raise LnDecodeException("Too short to contain signature")
sigdecoded = data[-65*8:].tobytes()
data = bitstring.ConstBitStream(data[:-65*8])
addr = LnAddr()
addr.pubkey = None
m = re.search("[^\\d]+", hrp[2:])
if m:
addr.net = BOLT11_HRP_INV_DICT[m.group(0)]
amountstr = hrp[2+m.end():]
# BOLT #11:
#
# A reader SHOULD indicate if amount is unspecified, otherwise it MUST
# multiply `amount` by the `multiplier` value (if any) to derive the
# amount required for payment.
if amountstr != '':
addr.amount = unshorten_amount(amountstr)
addr.date = data.read(35).uint
while data.pos != data.len:
tag, tagdata, data = pull_tagged(data)
# BOLT #11:
#
# A reader MUST skip over unknown fields, an `f` field with unknown
# `version`, or a `p`, `h`, or `n` field which does not have
# `data_length` 52, 52, or 53 respectively.
data_length = len(tagdata) / 5
if tag == 'r':
# BOLT #11:
#
# * `r` (3): `data_length` variable. One or more entries
# containing extra routing information for a private route;
# there may be more than one `r` field, too.
# * `pubkey` (264 bits)
# * `short_channel_id` (64 bits)
# * `feebase` (32 bits, big-endian)
# * `feerate` (32 bits, big-endian)
# * `cltv_expiry_delta` (16 bits, big-endian)
route=[]
s = bitstring.ConstBitStream(tagdata)
while s.pos + 264 + 64 + 32 + 32 + 16 < s.len:
route.append((s.read(264).tobytes(),
s.read(64).tobytes(),
s.read(32).uintbe,
s.read(32).uintbe,
s.read(16).uintbe))
addr.tags.append(('r',route))
elif tag == 't':
s = bitstring.ConstBitStream(tagdata)
e = (s.read(264).tobytes(),
s.read(32).uintbe,
s.read(32).uintbe,
s.read(16).uintbe)
addr.tags.append(('t', e))
elif tag == 'f':
fallback = parse_fallback(tagdata, addr.net)
if fallback:
addr.tags.append(('f', fallback))
else:
# Incorrect version.
addr.unknown_tags.append((tag, tagdata))
continue
elif tag == 'd':
addr.tags.append(('d', trim_to_bytes(tagdata).decode('utf-8')))
elif tag == 'h':
if data_length != 52:
addr.unknown_tags.append((tag, tagdata))
continue
addr.tags.append(('h', trim_to_bytes(tagdata)))
elif tag == 'x':
addr.tags.append(('x', tagdata.uint))
elif tag == 'p':
if data_length != 52:
addr.unknown_tags.append((tag, tagdata))
continue
addr.paymenthash = trim_to_bytes(tagdata)
elif tag == 's':
if data_length != 52:
addr.unknown_tags.append((tag, tagdata))
continue
addr.payment_secret = trim_to_bytes(tagdata)
elif tag == 'n':
if data_length != 53:
addr.unknown_tags.append((tag, tagdata))
continue
pubkeybytes = trim_to_bytes(tagdata)
addr.pubkey = pubkeybytes
elif tag == 'c':
addr._min_final_cltv_expiry = tagdata.uint
elif tag == '9':
features = tagdata.uint
addr.tags.append(('9', features))
from .lnutil import validate_features
validate_features(features)
else:
addr.unknown_tags.append((tag, tagdata))
if verbose:
print('hex of signature data (32 byte r, 32 byte s): {}'
.format(hexlify(sigdecoded[0:64])))
print('recovery flag: {}'.format(sigdecoded[64]))
print('hex of data for signing: {}'
.format(hexlify(hrp.encode("ascii") + data.tobytes())))
print('SHA256 of above: {}'.format(sha256(hrp.encode("ascii") + data.tobytes()).hexdigest()))
# BOLT #11:
#
# A reader MUST check that the `signature` is valid (see the `n` tagged
# field specified below).
addr.signature = sigdecoded[:65]
hrp_hash = sha256(hrp.encode("ascii") + data.tobytes()).digest()
if addr.pubkey: # Specified by `n`
# BOLT #11:
#
# A reader MUST use the `n` field to validate the signature instead of
# performing signature recovery if a valid `n` field is provided.
ecc.ECPubkey(addr.pubkey).verify_message_hash(sigdecoded[:64], hrp_hash)
pubkey_copy = addr.pubkey
class WrappedBytesKey:
serialize = lambda: pubkey_copy
addr.pubkey = WrappedBytesKey
else: # Recover pubkey from signature.
addr.pubkey = SerializableKey(ecc.ECPubkey.from_sig_string(sigdecoded[:64], sigdecoded[64], hrp_hash))
return addr
| 34.90613 | 197 | 0.590582 | [
"MIT"
] | Feathercoin-Applications/electrum-ftc | electrum/lnaddr.py | 18,221 | Python |
from time import sleep
from enos.message.downstream.tsl.MeasurepointGetReply import MeasurepointGetReply
from enos.message.downstream.tsl.MeasurepointGetCommand import MeasurepointGetCommand
from enos.message.downstream.tsl.MeasurepointSetReply import MeasurepointSetReply
from enos.core.MqttClient import MqttClient
from enos.message.downstream.tsl.MeasurepointSetCommand import MeasurepointSetCommand
from enos.sample.SampleHelper import SampleHelper
def set_measurepoint_command_handler(arrived_message, arg_list):
""" message callback, handle the received downstream message and implement your logic
:param arrived_message: the arrived msg instance , it may instanceof class <BaseCommand> or <BaseResponse>
:param arg_list: the topic args extract from the arrived topic , including productKey , deviceKey ,etc
:return: the msg you want to reply to the cloud , if you do NOT want send msg , just return None
"""
print('receive measurepoint set command, params: {}'.format(arrived_message.get_params()))
print('product key = {}, device key= {}'.format(arg_list[0], arg_list[1]))
return MeasurepointSetReply().builder()\
.set_code(200)\
.set_message('measurepoints set success')\
.build()
def get_measurepoint_command_handler(arrived_message, arg_list):
print('receive measurepoint get command, params: {}'.format(arrived_message.get_params()))
print('product key = {}, device key= {}'.format(arg_list[0], arg_list[1]))
return MeasurepointGetReply().builder()\
.set_code(200) \
.add_measurepoint('wwww0001', 2) \
.set_message('measurepoints get success')\
.build()
if __name__ == "__main__":
client = MqttClient(SampleHelper.TCP_SERVER_URL, SampleHelper.GW_PRODUCT_KEY, SampleHelper.GW_DEVICE_KEY,
SampleHelper.GW_DEVICE_SECRET)
client.get_profile().set_auto_reconnect(True) # if connection interrupted, the client can automaticlly reconnect
client.setup_basic_logger('INFO')
client.connect() # connect in sync
# register a msg handler to handle the downstream measurepoint set command
client.register_arrived_message_handler(MeasurepointSetCommand.get_class(), set_measurepoint_command_handler)
client.register_arrived_message_handler(MeasurepointGetCommand.get_class(), get_measurepoint_command_handler)
while True:
sleep(5)
| 43.053571 | 117 | 0.757362 | [
"MIT"
] | charleshuangcai/enos-device-sdk-python | enos/sample/CommandSample.py | 2,411 | Python |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Azimuthal integration
# https://github.com/silx-kit/pyFAI
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""This modules contains a function to fit without refinement an ellipse
on a set of points ....
"""
__author__ = "Jerome Kieffer"
__contact__ = "[email protected]"
__license__ = "MIT"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "14/02/2017"
__status__ = "production"
__docformat__ = 'restructuredtext'
from collections import namedtuple
import numpy
Ellipse = namedtuple("Ellipse", ["cy", "cx", "angle", "half_long_axis", "half_short_axis"])
def fit_ellipse(pty, ptx):
"""Fit an
inspired from
http://nicky.vanforeest.com/misc/fitEllipse/fitEllipse.html
:param pty: point coordinates in the slow dimension (y)
:param ptx: point coordinates in the fast dimension (x)
"""
x = ptx[:, numpy.newaxis]
y = pty[:, numpy.newaxis]
D = numpy.hstack((x * x, x * y, y * y, x, y, numpy.ones_like(x)))
S = numpy.dot(D.T, D)
C = numpy.zeros([6, 6])
C[0, 2] = C[2, 0] = 2
C[1, 1] = -1
E, V = numpy.linalg.eig(numpy.dot(numpy.linalg.inv(S), C))
n = numpy.argmax(numpy.abs(E))
res = V[:, n]
b, c, d, f, g, a = res[1] / 2, res[2], res[3] / 2, res[4] / 2, res[5], res[0]
num = b * b - a * c
x0 = (c * d - b * f) / num
y0 = (a * f - b * d) / num
if b == 0:
if a > c:
angle = 0
else:
angle = numpy.pi / 2
else:
if a > c:
angle = numpy.arctan2(2 * b, (a - c)) / 2
else:
angle = numpy.pi / 2 + numpy.arctan2(2 * b, (a - c)) / 2
up = 2 * (a * f * f + c * d * d + g * b * b - 2 * b * d * f - a * c * g)
down1 = (b * b - a * c) * ((c - a) * numpy.sqrt(1 + 4 * b * b / ((a - c) * (a - c))) - (c + a))
down2 = (b * b - a * c) * ((a - c) * numpy.sqrt(1 + 4 * b * b / ((a - c) * (a - c))) - (c + a))
res1 = numpy.sqrt(up / down1)
res2 = numpy.sqrt(up / down2)
return Ellipse(y0, x0, angle, max(res1, res2), min(res1, res2))
| 37.850575 | 99 | 0.6198 | [
"BSD-3-Clause"
] | michel4j/auto-process | autoprocess/utils/ellipse.py | 3,295 | Python |
"""
Slixmpp: The Slick XMPP Library
Copyright (C) 2010 Nathanael C. Fritz
This file is part of Slixmpp.
See the file LICENSE for copying permission.
"""
from slixmpp.plugins.base import register_plugin
from slixmpp.plugins.xep_0199.stanza import Ping
from slixmpp.plugins.xep_0199.ping import XEP_0199
register_plugin(XEP_0199)
| 21.8125 | 50 | 0.765043 | [
"BSD-3-Clause"
] | 0mp/slixmpp | slixmpp/plugins/xep_0199/__init__.py | 349 | Python |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Rotation around the X axis."""
import math
import numpy
from qiskit.qasm import pi
from qiskit.circuit.controlledgate import ControlledGate
from qiskit.circuit.gate import Gate
from qiskit.circuit.quantumregister import QuantumRegister
class RXGate(Gate):
r"""Single-qubit rotation about the X axis.
**Circuit symbol:**
.. parsed-literal::
┌───────┐
q_0: ┤ Rx(ϴ) ├
└───────┘
**Matrix Representation:**
.. math::
\newcommand{\th}{\frac{\theta}{2}}
RX(\theta) = exp(-i \th X) =
\begin{pmatrix}
\cos{\th} & -i\sin{\th} \\
-i\sin{\th} & \cos{\th}
\end{pmatrix}
"""
def __init__(self, theta, label=None):
"""Create new RX gate."""
super().__init__('rx', 1, [theta], label=label)
def _define(self):
"""
gate rx(theta) a {r(theta, 0) a;}
"""
# pylint: disable=cyclic-import
from qiskit.circuit.quantumcircuit import QuantumCircuit
from .r import RGate
q = QuantumRegister(1, 'q')
qc = QuantumCircuit(q, name=self.name)
rules = [
(RGate(self.params[0], 0), [q[0]], [])
]
qc._data = rules
self.definition = qc
def control(self, num_ctrl_qubits=1, label=None, ctrl_state=None):
"""Return a (mutli-)controlled-RX gate.
Args:
num_ctrl_qubits (int): number of control qubits.
label (str or None): An optional label for the gate [Default: None]
ctrl_state (int or str or None): control state expressed as integer,
string (e.g. '110'), or None. If None, use all 1s.
Returns:
ControlledGate: controlled version of this gate.
"""
if num_ctrl_qubits == 1:
gate = CRXGate(self.params[0], label=label, ctrl_state=ctrl_state)
gate.base_gate.label = self.label
return gate
return super().control(num_ctrl_qubits=num_ctrl_qubits, label=label, ctrl_state=ctrl_state)
def inverse(self):
r"""Return inverted RX gate.
:math:`RX(\lambda)^{\dagger} = RX(-\lambda)`
"""
return RXGate(-self.params[0])
def to_matrix(self):
"""Return a numpy.array for the RX gate."""
cos = math.cos(self.params[0] / 2)
sin = math.sin(self.params[0] / 2)
return numpy.array([[cos, -1j * sin],
[-1j * sin, cos]], dtype=complex)
class CRXGate(ControlledGate):
r"""Controlled-RX gate.
**Circuit symbol:**
.. parsed-literal::
q_0: ────■────
┌───┴───┐
q_1: ┤ Rx(ϴ) ├
└───────┘
**Matrix representation:**
.. math::
\newcommand{\th}{\frac{\theta}{2}}
CRX(\lambda)\ q_0, q_1 =
I \otimes |0\rangle\langle 0| + RX(\theta) \otimes |1\rangle\langle 1| =
\begin{pmatrix}
1 & 0 & 0 & 0 \\
0 & \cos{\th} & 0 & -i\sin{\th} \\
0 & 0 & 1 & 0 \\
0 & -i\sin{\th} & 0 & \cos{\th}
\end{pmatrix}
.. note::
In Qiskit's convention, higher qubit indices are more significant
(little endian convention). In many textbooks, controlled gates are
presented with the assumption of more significant qubits as control,
which in our case would be q_1. Thus a textbook matrix for this
gate will be:
.. parsed-literal::
┌───────┐
q_0: ┤ Rx(ϴ) ├
└───┬───┘
q_1: ────■────
.. math::
\newcommand{\th}{\frac{\theta}{2}}
CRX(\theta)\ q_1, q_0 =
|0\rangle\langle0| \otimes I + |1\rangle\langle1| \otimes RX(\theta) =
\begin{pmatrix}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 \\
0 & 0 & \cos{\th} & -i\sin{\th} \\
0 & 0 & -i\sin{\th} & \cos{\th}
\end{pmatrix}
"""
def __init__(self, theta, label=None, ctrl_state=None):
"""Create new CRX gate."""
super().__init__('crx', 2, [theta], num_ctrl_qubits=1,
label=label, ctrl_state=ctrl_state)
self.base_gate = RXGate(theta)
def _define(self):
"""
gate cu3(theta,phi,lambda) c, t
{ u1(pi/2) t;
cx c,t;
u3(-theta/2,0,0) t;
cx c,t;
u3(theta/2,-pi/2,0) t;
}
"""
# pylint: disable=cyclic-import
from qiskit.circuit.quantumcircuit import QuantumCircuit
from .u1 import U1Gate
from .u3 import U3Gate
from .x import CXGate
q = QuantumRegister(2, 'q')
qc = QuantumCircuit(q, name=self.name)
rules = [
(U1Gate(pi / 2), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(U3Gate(-self.params[0] / 2, 0, 0), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(U3Gate(self.params[0] / 2, -pi / 2, 0), [q[1]], [])
]
qc._data = rules
self.definition = qc
def inverse(self):
"""Return inverse RX gate (i.e. with the negative rotation angle)."""
return CRXGate(-self.params[0])
def to_matrix(self):
"""Return a numpy.array for the CRX gate."""
half_theta = float(self.params[0]) / 2
cos = numpy.cos(half_theta)
isin = 1j * numpy.sin(half_theta)
if self.ctrl_state:
return numpy.array([[1, 0, 0, 0],
[0, cos, 0, -isin],
[0, 0, 1, 0],
[0, -isin, 0, cos]],
dtype=complex)
else:
return numpy.array([[cos, 0, -isin, 0],
[0, 1, 0, 0],
[-isin, 0, cos, 0],
[0, 0, 0, 1]],
dtype=complex)
| 31.278846 | 99 | 0.498156 | [
"Apache-2.0"
] | CatalinaAlbornoz/qiskit-terra | qiskit/circuit/library/standard_gates/rx.py | 6,665 | Python |
import random
from collections import defaultdict, deque
import logging
import operator as op
import time
from enum import unique, Flag
from functools import reduce
from BaseClasses import RegionType, Door, DoorType, Direction, Sector, CrystalBarrier
from Regions import key_only_locations
from Dungeons import hyrule_castle_regions, eastern_regions, desert_regions, hera_regions, tower_regions, pod_regions
from Dungeons import dungeon_regions, region_starts, split_region_starts, flexible_starts
from Dungeons import drop_entrances, dungeon_bigs, dungeon_keys, dungeon_hints
from Items import ItemFactory
from RoomData import DoorKind, PairedDoor
from DungeonGenerator import ExplorationState, convert_regions, generate_dungeon, validate_tr
from DungeonGenerator import create_dungeon_builders, split_dungeon_builder, simple_dungeon_builder
from KeyDoorShuffle import analyze_dungeon, validate_vanilla_key_logic, build_key_layout, validate_key_layout
def link_doors(world, player):
# Drop-down connections & push blocks
for exitName, regionName in logical_connections:
connect_simple_door(world, exitName, regionName, player)
# These should all be connected for now as normal connections
for edge_a, edge_b in interior_doors:
connect_interior_doors(edge_a, edge_b, world, player)
# These connections are here because they are currently unable to be shuffled
for entrance, ext in straight_staircases:
connect_two_way(world, entrance, ext, player)
for exitName, regionName in falldown_pits:
connect_simple_door(world, exitName, regionName, player)
for exitName, regionName in dungeon_warps:
connect_simple_door(world, exitName, regionName, player)
for ent, ext in ladders:
connect_two_way(world, ent, ext, player)
if world.doorShuffle[player] == 'vanilla':
for entrance, ext in open_edges:
connect_two_way(world, entrance, ext, player)
for exitName, regionName in vanilla_logical_connections:
connect_simple_door(world, exitName, regionName, player)
for entrance, ext in spiral_staircases:
connect_two_way(world, entrance, ext, player)
for entrance, ext in default_door_connections:
connect_two_way(world, entrance, ext, player)
for ent, ext in default_one_way_connections:
connect_one_way(world, ent, ext, player)
vanilla_key_logic(world, player)
elif world.doorShuffle[player] == 'basic':
# if not world.experimental[player]:
for entrance, ext in open_edges:
connect_two_way(world, entrance, ext, player)
within_dungeon(world, player)
elif world.doorShuffle[player] == 'crossed':
for entrance, ext in open_edges:
connect_two_way(world, entrance, ext, player)
cross_dungeon(world, player)
else:
logging.getLogger('').error('Invalid door shuffle setting: %s' % world.doorShuffle[player])
raise Exception('Invalid door shuffle setting: %s' % world.doorShuffle[player])
if world.doorShuffle[player] != 'vanilla':
create_door_spoiler(world, player)
# todo: I think this function is not necessary
def mark_regions(world, player):
# traverse dungeons and make sure dungeon property is assigned
player_dungeons = [dungeon for dungeon in world.dungeons if dungeon.player == player]
for dungeon in player_dungeons:
queue = deque(dungeon.regions)
while len(queue) > 0:
region = world.get_region(queue.popleft(), player)
if region.name not in dungeon.regions:
dungeon.regions.append(region.name)
region.dungeon = dungeon
for ext in region.exits:
d = world.check_for_door(ext.name, player)
connected = ext.connected_region
if d is not None and connected is not None:
if d.dest is not None and connected.name not in dungeon.regions and connected.type == RegionType.Dungeon and connected.name not in queue:
queue.append(connected) # needs to be added
elif connected is not None and connected.name not in dungeon.regions and connected.type == RegionType.Dungeon and connected.name not in queue:
queue.append(connected) # needs to be added
def create_door_spoiler(world, player):
logger = logging.getLogger('')
queue = deque(world.dungeon_layouts[player].values())
while len(queue) > 0:
builder = queue.popleft()
done = set()
start_regions = set(convert_regions(builder.layout_starts, world, player)) # todo: set all_entrances for basic
reg_queue = deque(start_regions)
visited = set(start_regions)
while len(reg_queue) > 0:
next = reg_queue.pop()
for ext in next.exits:
door_a = ext.door
connect = ext.connected_region
if door_a and door_a.type in [DoorType.Normal, DoorType.SpiralStairs] and door_a not in done:
done.add(door_a)
door_b = door_a.dest
if door_b:
done.add(door_b)
if not door_a.blocked and not door_b.blocked:
world.spoiler.set_door(door_a.name, door_b.name, 'both', player, builder.name)
elif door_a.blocked:
world.spoiler.set_door(door_b.name, door_a.name, 'entrance', player, builder.name)
elif door_b.blocked:
world.spoiler.set_door(door_a.name, door_b.name, 'entrance', player, builder.name)
else:
logger.warning('This is a bug during door spoiler')
else:
logger.warning('Door not connected: %s', door_a.name)
if connect and connect.type == RegionType.Dungeon and connect not in visited:
visited.add(connect)
reg_queue.append(connect)
def vanilla_key_logic(world, player):
builders = []
world.dungeon_layouts[player] = {}
for dungeon in [dungeon for dungeon in world.dungeons if dungeon.player == player]:
sector = Sector()
sector.name = dungeon.name
sector.regions.extend(convert_regions(dungeon.regions, world, player))
builder = simple_dungeon_builder(sector.name, [sector])
builder.master_sector = sector
builders.append(builder)
world.dungeon_layouts[player][builder.name] = builder
overworld_prep(world, player)
entrances_map, potentials, connections = determine_entrance_list(world, player)
enabled_entrances = {}
sector_queue = deque(builders)
last_key, loops = None, 0
while len(sector_queue) > 0:
builder = sector_queue.popleft()
origin_list = list(entrances_map[builder.name])
find_enabled_origins(builder.sectors, enabled_entrances, origin_list, entrances_map, builder.name)
origin_list_sans_drops = remove_drop_origins(origin_list)
if len(origin_list_sans_drops) <= 0:
if last_key == builder.name or loops > 1000:
origin_name = world.get_region(origin_list[0], player).entrances[0].parent_region.name
raise Exception('Infinite loop detected for "%s" located at %s' % builder.name, origin_name)
sector_queue.append(builder)
last_key = builder.name
loops += 1
else:
find_new_entrances(builder.master_sector, entrances_map, connections, potentials, enabled_entrances, world, player)
start_regions = convert_regions(origin_list, world, player)
doors = convert_key_doors(default_small_key_doors[builder.name], world, player)
key_layout = build_key_layout(builder, start_regions, doors, world, player)
valid = validate_key_layout(key_layout, world, player)
if not valid:
logging.getLogger('').warning('Vanilla key layout not valid %s', builder.name)
if player not in world.key_logic.keys():
world.key_logic[player] = {}
analyze_dungeon(key_layout, world, player)
world.key_logic[player][builder.name] = key_layout.key_logic
log_key_logic(builder.name, key_layout.key_logic)
last_key = None
if world.shuffle[player] == 'vanilla' and world.accessibility[player] == 'items' and not world.retro[player]:
validate_vanilla_key_logic(world, player)
# some useful functions
oppositemap = {
Direction.South: Direction.North,
Direction.North: Direction.South,
Direction.West: Direction.East,
Direction.East: Direction.West,
Direction.Up: Direction.Down,
Direction.Down: Direction.Up,
}
def switch_dir(direction):
return oppositemap[direction]
def convert_key_doors(key_doors, world, player):
result = []
for d in key_doors:
if type(d) is tuple:
result.append((world.get_door(d[0], player), world.get_door(d[1], player)))
else:
result.append(world.get_door(d, player))
return result
def connect_simple_door(world, exit_name, region_name, player):
region = world.get_region(region_name, player)
world.get_entrance(exit_name, player).connect(region)
d = world.check_for_door(exit_name, player)
if d is not None:
d.dest = region
def connect_door_only(world, exit_name, region, player):
d = world.check_for_door(exit_name, player)
if d is not None:
d.dest = region
def connect_interior_doors(a, b, world, player):
door_a = world.get_door(a, player)
door_b = world.get_door(b, player)
if door_a.blocked:
connect_one_way(world, b, a, player)
elif door_b.blocked:
connect_one_way(world, a, b, player)
else:
connect_two_way(world, a, b, player)
def connect_two_way(world, entrancename, exitname, player):
entrance = world.get_entrance(entrancename, player)
ext = world.get_entrance(exitname, player)
# if these were already connected somewhere, remove the backreference
if entrance.connected_region is not None:
entrance.connected_region.entrances.remove(entrance)
if ext.connected_region is not None:
ext.connected_region.entrances.remove(ext)
entrance.connect(ext.parent_region)
ext.connect(entrance.parent_region)
if entrance.parent_region.dungeon:
ext.parent_region.dungeon = entrance.parent_region.dungeon
x = world.check_for_door(entrancename, player)
y = world.check_for_door(exitname, player)
if x is not None:
x.dest = y
if y is not None:
y.dest = x
def connect_one_way(world, entrancename, exitname, player):
entrance = world.get_entrance(entrancename, player)
ext = world.get_entrance(exitname, player)
# if these were already connected somewhere, remove the backreference
if entrance.connected_region is not None:
entrance.connected_region.entrances.remove(entrance)
if ext.connected_region is not None:
ext.connected_region.entrances.remove(ext)
entrance.connect(ext.parent_region)
if entrance.parent_region.dungeon:
ext.parent_region.dungeon = entrance.parent_region.dungeon
x = world.check_for_door(entrancename, player)
y = world.check_for_door(exitname, player)
if x is not None:
x.dest = y
if y is not None:
y.dest = x
def fix_big_key_doors_with_ugly_smalls(world, player):
remove_ugly_small_key_doors(world, player)
unpair_big_key_doors(world, player)
def remove_ugly_small_key_doors(world, player):
for d in ['Eastern Hint Tile Blocked Path SE', 'Eastern Darkness S', 'Thieves Hallway SE', 'Mire Left Bridge S',
'TR Lava Escape SE', 'GT Hidden Spikes SE']:
door = world.get_door(d, player)
room = world.get_room(door.roomIndex, player)
room.change(door.doorListPos, DoorKind.Normal)
door.smallKey = False
door.ugly = False
def unpair_big_key_doors(world, player):
problematic_bk_doors = ['Eastern Courtyard N', 'Eastern Big Key NE', 'Thieves BK Corner NE', 'Mire BK Door Room N',
'TR Dodgers NE', 'GT Dash Hall NE']
for paired_door in world.paired_doors[player]:
if paired_door.door_a in problematic_bk_doors or paired_door.door_b in problematic_bk_doors:
paired_door.pair = False
def pair_existing_key_doors(world, player, door_a, door_b):
already_paired = False
door_names = [door_a.name, door_b.name]
for pd in world.paired_doors[player]:
if pd.door_a in door_names and pd.door_b in door_names:
already_paired = True
break
if already_paired:
return
for paired_door in world.paired_doors[player]:
if paired_door.door_a in door_names or paired_door.door_b in door_names:
paired_door.pair = False
world.paired_doors[player].append(PairedDoor(door_a, door_b))
# def unpair_all_doors(world, player):
# for paired_door in world.paired_doors[player]:
# paired_door.pair = False
def within_dungeon(world, player):
fix_big_key_doors_with_ugly_smalls(world, player)
overworld_prep(world, player)
entrances_map, potentials, connections = determine_entrance_list(world, player)
connections_tuple = (entrances_map, potentials, connections)
dungeon_builders = {}
for key in dungeon_regions.keys():
sector_list = convert_to_sectors(dungeon_regions[key], world, player)
dungeon_builders[key] = simple_dungeon_builder(key, sector_list)
dungeon_builders[key].entrance_list = list(entrances_map[key])
recombinant_builders = {}
handle_split_dungeons(dungeon_builders, recombinant_builders, entrances_map)
main_dungeon_generation(dungeon_builders, recombinant_builders, connections_tuple, world, player)
paths = determine_required_paths(world, player)
check_required_paths(paths, world, player)
# shuffle_key_doors for dungeons
start = time.process_time()
for builder in world.dungeon_layouts[player].values():
shuffle_key_doors(builder, world, player)
logging.getLogger('').info('Key door shuffle time: %s', time.process_time()-start)
smooth_door_pairs(world, player)
def handle_split_dungeons(dungeon_builders, recombinant_builders, entrances_map):
for name, split_list in split_region_starts.items():
builder = dungeon_builders.pop(name)
recombinant_builders[name] = builder
split_builders = split_dungeon_builder(builder, split_list)
dungeon_builders.update(split_builders)
for sub_name, split_entrances in split_list.items():
sub_builder = dungeon_builders[name+' '+sub_name]
sub_builder.split_flag = True
entrance_list = list(split_entrances)
if name in flexible_starts.keys():
add_shuffled_entrances(sub_builder.sectors, flexible_starts[name], entrance_list)
filtered_entrance_list = [x for x in entrance_list if x in entrances_map[name]]
sub_builder.entrance_list = filtered_entrance_list
def main_dungeon_generation(dungeon_builders, recombinant_builders, connections_tuple, world, player):
entrances_map, potentials, connections = connections_tuple
enabled_entrances = {}
sector_queue = deque(dungeon_builders.values())
last_key, loops = None, 0
while len(sector_queue) > 0:
builder = sector_queue.popleft()
split_dungeon = builder.name.startswith('Desert Palace') or builder.name.startswith('Skull Woods')
name = builder.name
if split_dungeon:
name = ' '.join(builder.name.split(' ')[:-1])
origin_list = list(builder.entrance_list)
find_enabled_origins(builder.sectors, enabled_entrances, origin_list, entrances_map, name)
origin_list_sans_drops = remove_drop_origins(origin_list)
if len(origin_list_sans_drops) <= 0 or name == "Turtle Rock" and not validate_tr(builder, origin_list_sans_drops, world, player):
if last_key == builder.name or loops > 1000:
origin_name = world.get_region(origin_list[0], player).entrances[0].parent_region.name
raise Exception('Infinite loop detected for "%s" located at %s' % builder.name, origin_name)
sector_queue.append(builder)
last_key = builder.name
loops += 1
else:
logging.getLogger('').info('Generating dungeon: %s', builder.name)
ds = generate_dungeon(builder, origin_list_sans_drops, split_dungeon, world, player)
find_new_entrances(ds, entrances_map, connections, potentials, enabled_entrances, world, player)
ds.name = name
builder.master_sector = ds
builder.layout_starts = origin_list if len(builder.entrance_list) <= 0 else builder.entrance_list
last_key = None
combine_layouts(recombinant_builders, dungeon_builders, entrances_map)
world.dungeon_layouts[player] = {}
for builder in dungeon_builders.values():
find_enabled_origins([builder.master_sector], enabled_entrances, builder.layout_starts, entrances_map, builder.name)
builder.path_entrances = entrances_map[builder.name]
world.dungeon_layouts[player] = dungeon_builders
def determine_entrance_list(world, player):
entrance_map = {}
potential_entrances = {}
connections = {}
for key, r_names in region_starts.items():
entrance_map[key] = []
for region_name in r_names:
region = world.get_region(region_name, player)
for ent in region.entrances:
parent = ent.parent_region
if parent.type != RegionType.Dungeon or parent.name == 'Sewer Drop':
if parent.name not in world.inaccessible_regions[player]:
entrance_map[key].append(region_name)
else:
if ent.parent_region not in potential_entrances.keys():
potential_entrances[parent] = []
potential_entrances[parent].append(region_name)
connections[region_name] = parent
return entrance_map, potential_entrances, connections
# todo: kill drop exceptions
def drop_exception(name):
return name in ['Skull Pot Circle', 'Skull Back Drop']
def add_shuffled_entrances(sectors, region_list, entrance_list):
for sector in sectors:
for region in sector.regions:
if region.name in region_list:
entrance_list.append(region.name)
def find_enabled_origins(sectors, enabled, entrance_list, entrance_map, key):
for sector in sectors:
for region in sector.regions:
if region.name in enabled.keys() and region.name not in entrance_list:
entrance_list.append(region.name)
origin_reg, origin_dungeon = enabled[region.name]
if origin_reg != region.name and origin_dungeon != region.dungeon:
if key not in entrance_map.keys():
key = ' '.join(key.split(' ')[:-1])
entrance_map[key].append(region.name)
if drop_exception(region.name): # only because they have unique regions
entrance_list.append(region.name)
def remove_drop_origins(entrance_list):
return [x for x in entrance_list if x not in drop_entrances]
def find_new_entrances(sector, entrances_map, connections, potentials, enabled, world, player):
for region in sector.regions:
if region.name in connections.keys() and (connections[region.name] in potentials.keys() or connections[region.name].name in world.inaccessible_regions[player]):
enable_new_entrances(region, connections, potentials, enabled, world, player)
inverted_aga_check(entrances_map, connections, potentials, enabled, world, player)
def enable_new_entrances(region, connections, potentials, enabled, world, player):
new_region = connections[region.name]
if new_region in potentials.keys():
for potential in potentials.pop(new_region):
enabled[potential] = (region.name, region.dungeon)
# see if this unexplored region connects elsewhere
queue = deque(new_region.exits)
visited = set()
while len(queue) > 0:
ext = queue.popleft()
visited.add(ext)
region_name = ext.connected_region.name
if region_name in connections.keys() and connections[region_name] in potentials.keys():
for potential in potentials.pop(connections[region_name]):
enabled[potential] = (region.name, region.dungeon)
if ext.connected_region.name in world.inaccessible_regions[player]:
for new_exit in ext.connected_region.exits:
if new_exit not in visited:
queue.append(new_exit)
def inverted_aga_check(entrances_map, connections, potentials, enabled, world, player):
if world.mode[player] == 'inverted':
if 'Agahnims Tower' in entrances_map.keys() or aga_tower_enabled(enabled):
for region in list(potentials.keys()):
if region.name == 'Hyrule Castle Ledge':
for r_name in potentials[region]:
new_region = world.get_region(r_name, player)
enable_new_entrances(new_region, connections, potentials, enabled, world, player)
def aga_tower_enabled(enabled):
for region_name, enabled_tuple in enabled.items():
entrance, dungeon = enabled_tuple
if dungeon.name == 'Agahnims Tower':
return True
return False
def within_dungeon_legacy(world, player):
# TODO: The "starts" regions need access logic
# Aerinon's note: I think this is handled already by ER Rules - may need to check correct requirements
dungeon_region_starts_es = ['Hyrule Castle Lobby', 'Hyrule Castle West Lobby', 'Hyrule Castle East Lobby', 'Sewers Secret Room']
dungeon_region_starts_ep = ['Eastern Lobby']
dungeon_region_starts_dp = ['Desert Back Lobby', 'Desert Main Lobby', 'Desert West Lobby', 'Desert East Lobby']
dungeon_region_starts_th = ['Hera Lobby']
dungeon_region_starts_at = ['Tower Lobby']
dungeon_region_starts_pd = ['PoD Lobby']
dungeon_region_lists = [
(dungeon_region_starts_es, hyrule_castle_regions),
(dungeon_region_starts_ep, eastern_regions),
(dungeon_region_starts_dp, desert_regions),
(dungeon_region_starts_th, hera_regions),
(dungeon_region_starts_at, tower_regions),
(dungeon_region_starts_pd, pod_regions),
]
for start_list, region_list in dungeon_region_lists:
shuffle_dungeon(world, player, start_list, region_list)
world.dungeon_layouts[player] = {}
for key in dungeon_regions.keys():
world.dungeon_layouts[player][key] = (key, region_starts[key])
def shuffle_dungeon(world, player, start_region_names, dungeon_region_names):
logger = logging.getLogger('')
# Part one - generate a random layout
available_regions = []
for name in [r for r in dungeon_region_names if r not in start_region_names]:
available_regions.append(world.get_region(name, player))
random.shuffle(available_regions)
# "Ugly" doors are doors that we don't want to see from the front, because of some
# sort of unsupported key door. To handle them, make a map of "ugly regions" and
# never link across them.
ugly_regions = {}
next_ugly_region = 1
# Add all start regions to the open set.
available_doors = []
for name in start_region_names:
logger.info("Starting in %s", name)
for door in get_doors(world, world.get_region(name, player), player):
ugly_regions[door.name] = 0
available_doors.append(door)
# Loop until all available doors are used
while len(available_doors) > 0:
# Pick a random available door to connect, prioritizing ones that aren't blocked.
# This makes them either get picked up through another door (so they head deeper
# into the dungeon), or puts them late in the dungeon (so they probably are part
# of a loop). Panic if neither of these happens.
random.shuffle(available_doors)
available_doors.sort(key=lambda door: 1 if door.blocked else 0 if door.ugly else 2)
door = available_doors.pop()
logger.info('Linking %s', door.name)
# Find an available region that has a compatible door
connect_region, connect_door = find_compatible_door_in_regions(world, door, available_regions, player)
# Also ignore compatible doors if they're blocked; these should only be used to
# create loops.
if connect_region is not None and not door.blocked:
logger.info(' Found new region %s via %s', connect_region.name, connect_door.name)
# Apply connection and add the new region's doors to the available list
maybe_connect_two_way(world, door, connect_door, player)
# Figure out the new room's ugliness region
new_room_ugly_region = ugly_regions[door.name]
if connect_door.ugly:
next_ugly_region += 1
new_room_ugly_region = next_ugly_region
is_new_region = connect_region in available_regions
# Add the doors
for door in get_doors(world, connect_region, player):
ugly_regions[door.name] = new_room_ugly_region
if is_new_region:
available_doors.append(door)
# If an ugly door is anything but the connect door, panic and die
if door != connect_door and door.ugly:
logger.info('Failed because of ugly door, trying again.')
shuffle_dungeon(world, player, start_region_names, dungeon_region_names)
return
# We've used this region and door, so don't use them again
if is_new_region:
available_regions.remove(connect_region)
if connect_door in available_doors:
available_doors.remove(connect_door)
else:
# If there's no available region with a door, use an internal connection
connect_door = find_compatible_door_in_list(ugly_regions, world, door, available_doors, player)
if connect_door is not None:
logger.info(' Adding loop via %s', connect_door.name)
maybe_connect_two_way(world, door, connect_door, player)
if connect_door in available_doors:
available_doors.remove(connect_door)
# Check that we used everything, and retry if we failed
if len(available_regions) > 0 or len(available_doors) > 0:
logger.info('Failed to add all regions to dungeon, trying again.')
shuffle_dungeon(world, player, start_region_names, dungeon_region_names)
return
# Connects a and b. Or don't if they're an unsupported connection type.
# TODO: This is gross, don't do it this way
def maybe_connect_two_way(world, a, b, player):
# Return on unsupported types.
if a.type in [DoorType.Open, DoorType.StraightStairs, DoorType.Hole, DoorType.Warp, DoorType.Ladder,
DoorType.Interior, DoorType.Logical]:
return
# Connect supported types
if a.type == DoorType.Normal or a.type == DoorType.SpiralStairs:
if a.blocked:
connect_one_way(world, b.name, a.name, player)
elif b.blocked:
connect_one_way(world, a.name, b.name, player)
else:
connect_two_way(world, a.name, b.name, player)
return
# If we failed to account for a type, panic
raise RuntimeError('Unknown door type ' + a.type.name)
# Finds a compatible door in regions, returns the region and door
def find_compatible_door_in_regions(world, door, regions, player):
if door.type in [DoorType.Hole, DoorType.Warp, DoorType.Logical]:
return door.dest, door
for region in regions:
for proposed_door in get_doors(world, region, player):
if doors_compatible(door, proposed_door):
return region, proposed_door
return None, None
def find_compatible_door_in_list(ugly_regions, world, door, doors, player):
if door.type in [DoorType.Hole, DoorType.Warp, DoorType.Logical]:
return door
for proposed_door in doors:
if ugly_regions[door.name] != ugly_regions[proposed_door.name]:
continue
if doors_compatible(door, proposed_door):
return proposed_door
def get_doors(world, region, player):
res = []
for exit in region.exits:
door = world.check_for_door(exit.name, player)
if door is not None:
res.append(door)
return res
def get_entrance_doors(world, region, player):
res = []
for exit in region.entrances:
door = world.check_for_door(exit.name, player)
if door is not None:
res.append(door)
return res
def doors_compatible(a, b):
if a.type != b.type:
return False
if a.type == DoorType.Open:
return doors_fit_mandatory_pair(open_edges, a, b)
if a.type == DoorType.StraightStairs:
return doors_fit_mandatory_pair(straight_staircases, a, b)
if a.type == DoorType.Interior:
return doors_fit_mandatory_pair(interior_doors, a, b)
if a.type == DoorType.Ladder:
return doors_fit_mandatory_pair(ladders, a, b)
if a.type == DoorType.Normal and (a.smallKey or b.smallKey or a.bigKey or b.bigKey):
return doors_fit_mandatory_pair(key_doors, a, b)
if a.type in [DoorType.Hole, DoorType.Warp, DoorType.Logical]:
return False # these aren't compatible with anything
return a.direction == switch_dir(b.direction)
def doors_fit_mandatory_pair(pair_list, a, b):
for pair_a, pair_b in pair_list:
if (a.name == pair_a and b.name == pair_b) or (a.name == pair_b and b.name == pair_a):
return True
return False
# goals:
# 1. have enough chests to be interesting (2 more than dungeon items)
# 2. have a balanced amount of regions added (check)
# 3. prevent soft locks due to key usage (algorithm written)
# 4. rules in place to affect item placement (lamp, keys, etc. -- in rules)
# 5. to be complete -- all doors linked (check, somewhat)
# 6. avoid deadlocks/dead end dungeon (check)
# 7. certain paths through dungeon must be possible - be able to reach goals (check)
def cross_dungeon(world, player):
fix_big_key_doors_with_ugly_smalls(world, player)
overworld_prep(world, player)
entrances_map, potentials, connections = determine_entrance_list(world, player)
connections_tuple = (entrances_map, potentials, connections)
all_sectors = []
for key in dungeon_regions.keys():
all_sectors.extend(convert_to_sectors(dungeon_regions[key], world, player))
dungeon_builders = create_dungeon_builders(all_sectors, world, player)
for builder in dungeon_builders.values():
builder.entrance_list = list(entrances_map[builder.name])
dungeon_obj = world.get_dungeon(builder.name, player)
for sector in builder.sectors:
for region in sector.regions:
region.dungeon = dungeon_obj
for loc in region.locations:
if loc.name in key_only_locations:
key_name = dungeon_keys[builder.name] if loc.name != 'Hyrule Castle - Big Key Drop' else dungeon_bigs[builder.name]
loc.forced_item = loc.item = ItemFactory(key_name, player)
recombinant_builders = {}
handle_split_dungeons(dungeon_builders, recombinant_builders, entrances_map)
main_dungeon_generation(dungeon_builders, recombinant_builders, connections_tuple, world, player)
paths = determine_required_paths(world, player)
check_required_paths(paths, world, player)
hc = world.get_dungeon('Hyrule Castle', player)
del hc.dungeon_items[0] # removes map
hc.dungeon_items.append(ItemFactory('Compass (Escape)', player))
at = world.get_dungeon('Agahnims Tower', player)
at.dungeon_items.append(ItemFactory('Compass (Agahnims Tower)', player))
gt = world.get_dungeon('Ganons Tower', player)
del gt.dungeon_items[0] # removes map
assign_cross_keys(dungeon_builders, world, player)
all_dungeon_items = [y for x in world.dungeons if x.player == player for y in x.all_items]
target_items = 34 if world.retro[player] else 63
d_items = target_items - len(all_dungeon_items)
if d_items > 0:
if d_items >= 1: # restore HC map
world.get_dungeon('Hyrule Castle', player).dungeon_items.append(ItemFactory('Map (Escape)', player))
if d_items >= 2: # restore GT map
world.get_dungeon('Ganons Tower', player).dungeon_items.append(ItemFactory('Map (Ganons Tower)', player))
if d_items > 2:
world.pool_adjustment[player] = d_items - 2
elif d_items < 0:
world.pool_adjustment[player] = d_items
smooth_door_pairs(world, player)
# Re-assign dungeon bosses
gt = world.get_dungeon('Ganons Tower', player)
for name, builder in dungeon_builders.items():
reassign_boss('GT Ice Armos', 'bottom', builder, gt, world, player)
reassign_boss('GT Lanmolas 2', 'middle', builder, gt, world, player)
reassign_boss('GT Moldorm', 'top', builder, gt, world, player)
if world.hints[player]:
refine_hints(dungeon_builders)
def assign_cross_keys(dungeon_builders, world, player):
start = time.process_time()
total_keys = remaining = 29
total_candidates = 0
start_regions_map = {}
# Step 1: Find Small Key Door Candidates
for name, builder in dungeon_builders.items():
dungeon = world.get_dungeon(name, player)
if not builder.bk_required or builder.bk_provided:
dungeon.big_key = None
elif builder.bk_required and not builder.bk_provided:
dungeon.big_key = ItemFactory(dungeon_bigs[name], player)
start_regions = convert_regions(builder.path_entrances, world, player)
find_small_key_door_candidates(builder, start_regions, world, player)
builder.key_doors_num = max(0, len(builder.candidates) - builder.key_drop_cnt)
total_candidates += builder.key_doors_num
start_regions_map[name] = start_regions
# Step 2: Initial Key Number Assignment & Calculate Flexibility
for name, builder in dungeon_builders.items():
calculated = int(round(builder.key_doors_num*total_keys/total_candidates))
max_keys = builder.location_cnt - calc_used_dungeon_items(builder)
cand_len = max(0, len(builder.candidates) - builder.key_drop_cnt)
limit = min(max_keys, cand_len)
suggested = min(calculated, limit)
combo_size = ncr(len(builder.candidates), suggested + builder.key_drop_cnt)
while combo_size > 500000 and suggested > 0:
suggested -= 1
combo_size = ncr(len(builder.candidates), suggested + builder.key_drop_cnt)
builder.key_doors_num = suggested + builder.key_drop_cnt
remaining -= suggested
builder.combo_size = combo_size
if suggested < limit:
builder.flex = limit - suggested
# Step 3: Initial valid combination find - reduce flex if needed
for name, builder in dungeon_builders.items():
suggested = builder.key_doors_num - builder.key_drop_cnt
find_valid_combination(builder, start_regions_map[name], world, player)
actual_chest_keys = builder.key_doors_num - builder.key_drop_cnt
if actual_chest_keys < suggested:
remaining += suggested - actual_chest_keys
builder.flex = 0
# Step 4: Try to assign remaining keys
builder_order = [x for x in dungeon_builders.values() if x.flex > 0]
builder_order.sort(key=lambda b: b.combo_size)
queue = deque(builder_order)
logger = logging.getLogger('')
while len(queue) > 0 and remaining > 0:
builder = queue.popleft()
name = builder.name
logger.info('Cross Dungeon: Increasing key count by 1 for %s', name)
builder.key_doors_num += 1
result = find_valid_combination(builder, start_regions_map[name], world, player, drop_keys=False)
if result:
remaining -= 1
builder.flex -= 1
if builder.flex > 0:
builder.combo_size = ncr(len(builder.candidates), builder.key_doors_num)
queue.append(builder)
queue = deque(sorted(queue, key=lambda b: b.combo_size))
else:
logger.info('Cross Dungeon: Increase failed for %s', name)
builder.key_doors_num -= 1
builder.flex = 0
logger.info('Cross Dungeon: Keys unable to assign in pool %s', remaining)
# Last Step: Adjust Small Key Dungeon Pool
if not world.retro[player]:
for name, builder in dungeon_builders.items():
reassign_key_doors(builder, world, player)
log_key_logic(builder.name, world.key_logic[player][builder.name])
actual_chest_keys = max(builder.key_doors_num - builder.key_drop_cnt, 0)
dungeon = world.get_dungeon(name, player)
if actual_chest_keys == 0:
dungeon.small_keys = []
else:
dungeon.small_keys = [ItemFactory(dungeon_keys[name], player)] * actual_chest_keys
logging.getLogger('').info('Cross Dungeon: Key door shuffle time: %s', time.process_time()-start)
def reassign_boss(boss_region, boss_key, builder, gt, world, player):
if boss_region in builder.master_sector.region_set():
new_dungeon = world.get_dungeon(builder.name, player)
if new_dungeon != gt:
gt_boss = gt.bosses.pop(boss_key)
new_dungeon.bosses[boss_key] = gt_boss
def refine_hints(dungeon_builders):
for name, builder in dungeon_builders.items():
for region in builder.master_sector.regions:
for location in region.locations:
if not location.event and '- Boss' not in location.name and '- Prize' not in location.name and location.name != 'Sanctuary':
location.hint_text = dungeon_hints[name]
def convert_to_sectors(region_names, world, player):
region_list = convert_regions(region_names, world, player)
sectors = []
while len(region_list) > 0:
region = region_list.pop()
new_sector = True
region_chunk = [region]
exits = []
exits.extend(region.exits)
outstanding_doors = []
matching_sectors = []
while len(exits) > 0:
ext = exits.pop()
door = ext.door
if ext.connected_region is not None or door is not None and door.controller is not None:
if door is not None and door.controller is not None:
connect_region = world.get_entrance(door.controller.name, player).parent_region
else:
connect_region = ext.connected_region
if connect_region not in region_chunk and connect_region in region_list:
region_list.remove(connect_region)
region_chunk.append(connect_region)
exits.extend(connect_region.exits)
if connect_region not in region_chunk:
for existing in sectors:
if connect_region in existing.regions:
new_sector = False
if existing not in matching_sectors:
matching_sectors.append(existing)
else:
if door is not None and door.controller is None and door.dest is None:
outstanding_doors.append(door)
sector = Sector()
if not new_sector:
for match in matching_sectors:
sector.regions.extend(match.regions)
sector.outstanding_doors.extend(match.outstanding_doors)
sectors.remove(match)
sector.regions.extend(region_chunk)
sector.outstanding_doors.extend(outstanding_doors)
sectors.append(sector)
return sectors
# those with split region starts like Desert/Skull combine for key layouts
def combine_layouts(recombinant_builders, dungeon_builders, entrances_map):
for recombine in recombinant_builders.values():
queue = deque(dungeon_builders.values())
while len(queue) > 0:
builder = queue.pop()
if builder.name.startswith(recombine.name):
del dungeon_builders[builder.name]
if recombine.master_sector is None:
recombine.master_sector = builder.master_sector
recombine.master_sector.name = recombine.name
recombine.pre_open_stonewall = builder.pre_open_stonewall
else:
recombine.master_sector.regions.extend(builder.master_sector.regions)
if builder.pre_open_stonewall:
recombine.pre_open_stonewall = builder.pre_open_stonewall
recombine.layout_starts = list(entrances_map[recombine.name])
dungeon_builders[recombine.name] = recombine
def valid_region_to_explore(region, world, player):
return region.type == RegionType.Dungeon or region.name in world.inaccessible_regions[player]
def shuffle_key_doors(builder, world, player):
start_regions = convert_regions(builder.path_entrances, world, player)
# count number of key doors - this could be a table?
num_key_doors = 0
skips = []
for region in builder.master_sector.regions:
for ext in region.exits:
d = world.check_for_door(ext.name, player)
if d is not None and d.smallKey:
if d not in skips:
if d.type == DoorType.Interior:
skips.append(d.dest)
if d.type == DoorType.Normal:
for dp in world.paired_doors[player]:
if d.name == dp.door_a:
skips.append(world.get_door(dp.door_b, player))
break
elif d.name == dp.door_b:
skips.append(world.get_door(dp.door_a, player))
break
num_key_doors += 1
builder.key_doors_num = num_key_doors
find_small_key_door_candidates(builder, start_regions, world, player)
find_valid_combination(builder, start_regions, world, player)
reassign_key_doors(builder, world, player)
log_key_logic(builder.name, world.key_logic[player][builder.name])
def find_current_key_doors(builder):
current_doors = []
for region in builder.master_sector.regions:
for ext in region.exits:
d = ext.door
if d and d.smallKey:
current_doors.append(d)
return current_doors
def find_small_key_door_candidates(builder, start_regions, world, player):
# traverse dungeon and find candidates
candidates = []
checked_doors = set()
for region in start_regions:
possible, checked = find_key_door_candidates(region, checked_doors, world, player)
candidates.extend(possible)
checked_doors.update(checked)
flat_candidates = []
for candidate in candidates:
# not valid if: Normal and Pair in is Checked and Pair is not in Candidates
if candidate.type != DoorType.Normal or candidate.dest not in checked_doors or candidate.dest in candidates:
flat_candidates.append(candidate)
paired_candidates = build_pair_list(flat_candidates)
builder.candidates = paired_candidates
def calc_used_dungeon_items(builder):
base = 4
if builder.bk_required and not builder.bk_provided:
base += 1
if builder.name == 'Hyrule Castle':
base -= 1 # Missing compass/map
if builder.name == 'Agahnims Tower':
base -= 2 # Missing both compass/map
# gt can lose map once compasses work
return base
def find_valid_combination(builder, start_regions, world, player, drop_keys=True):
logger = logging.getLogger('')
logger.info('Shuffling Key doors for %s', builder.name)
# find valid combination of candidates
if len(builder.candidates) < builder.key_doors_num:
if not drop_keys:
logger.info('No valid layouts for %s with %s doors', builder.name, builder.key_doors_num)
return False
builder.key_doors_num = len(builder.candidates) # reduce number of key doors
logger.info('Lowering key door count because not enough candidates: %s', builder.name)
combinations = ncr(len(builder.candidates), builder.key_doors_num)
itr = 0
start = time.process_time()
sample_list = list(range(0, int(combinations)))
random.shuffle(sample_list)
proposal = kth_combination(sample_list[itr], builder.candidates, builder.key_doors_num)
key_layout = build_key_layout(builder, start_regions, proposal, world, player)
while not validate_key_layout(key_layout, world, player):
itr += 1
stop_early = False
if itr % 1000 == 0:
mark = time.process_time()-start
if (mark > 10 and itr*100/combinations > 50) or (mark > 20 and itr*100/combinations > 25) or mark > 30:
stop_early = True
if itr >= combinations or stop_early:
if not drop_keys:
logger.info('No valid layouts for %s with %s doors', builder.name, builder.key_doors_num)
return False
logger.info('Lowering key door count because no valid layouts: %s', builder.name)
builder.key_doors_num -= 1
if builder.key_doors_num < 0:
raise Exception('Bad dungeon %s - 0 key doors not valid' % builder.name)
combinations = ncr(len(builder.candidates), builder.key_doors_num)
sample_list = list(range(0, int(combinations)))
random.shuffle(sample_list)
itr = 0
start = time.process_time() # reset time since itr reset
proposal = kth_combination(sample_list[itr], builder.candidates, builder.key_doors_num)
key_layout.reset(proposal, builder, world, player)
if (itr+1) % 1000 == 0:
mark = time.process_time()-start
logger.info('%s time elapsed. %s iterations/s', mark, itr/mark)
# make changes
if player not in world.key_logic.keys():
world.key_logic[player] = {}
analyze_dungeon(key_layout, world, player)
builder.key_door_proposal = proposal
world.key_logic[player][builder.name] = key_layout.key_logic
world.key_layout[player][builder.name] = key_layout
return True
def log_key_logic(d_name, key_logic):
logger = logging.getLogger('')
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Key Logic for %s', d_name)
if len(key_logic.bk_restricted) > 0:
logger.debug('-BK Restrictions')
for restriction in key_logic.bk_restricted:
logger.debug(restriction)
if len(key_logic.sm_restricted) > 0:
logger.debug('-Small Restrictions')
for restriction in key_logic.sm_restricted:
logger.debug(restriction)
for key in key_logic.door_rules.keys():
rule = key_logic.door_rules[key]
logger.debug('--Rule for %s: Nrm:%s Allow:%s Loc:%s Alt:%s', key, rule.small_key_num, rule.allow_small, rule.small_location, rule.alternate_small_key)
if rule.alternate_small_key is not None:
for loc in rule.alternate_big_key_loc:
logger.debug('---BK Loc %s', loc.name)
logger.debug('Placement rules for %s', d_name)
for rule in key_logic.placement_rules:
logger.debug('*Rule for %s:', rule.door_reference)
if rule.bk_conditional_set:
logger.debug('**BK Checks %s', ','.join([x.name for x in rule.bk_conditional_set]))
logger.debug('**BK Blocked By Door (%s) : %s', rule.needed_keys_wo_bk, ','.join([x.name for x in rule.check_locations_wo_bk]))
logger.debug('**BK Elsewhere (%s) : %s', rule.needed_keys_w_bk, ','.join([x.name for x in rule.check_locations_w_bk]))
def build_pair_list(flat_list):
paired_list = []
queue = deque(flat_list)
while len(queue) > 0:
d = queue.pop()
if d.dest in queue and d.type != DoorType.SpiralStairs:
paired_list.append((d, d.dest))
queue.remove(d.dest)
else:
paired_list.append(d)
return paired_list
def flatten_pair_list(paired_list):
flat_list = []
for d in paired_list:
if type(d) is tuple:
flat_list.append(d[0])
flat_list.append(d[1])
else:
flat_list.append(d)
return flat_list
def find_key_door_candidates(region, checked, world, player):
dungeon = region.dungeon
candidates = []
checked_doors = list(checked)
queue = deque([(region, None, None)])
while len(queue) > 0:
current, last_door, last_region = queue.pop()
for ext in current.exits:
d = ext.door
if d and d.controller:
d = d.controller
if d is not None and not d.blocked and d.dest is not last_door and d.dest is not last_region and d not in checked_doors:
valid = False
if 0 <= d.doorListPos < 4 and d.type in [DoorType.Interior, DoorType.Normal, DoorType.SpiralStairs]:
room = world.get_room(d.roomIndex, player)
position, kind = room.doorList[d.doorListPos]
if d.type == DoorType.Interior:
valid = kind in [DoorKind.Normal, DoorKind.SmallKey, DoorKind.Bombable, DoorKind.Dashable]
elif d.type == DoorType.SpiralStairs:
valid = kind in [DoorKind.StairKey, DoorKind.StairKey2, DoorKind.StairKeyLow]
elif d.type == DoorType.Normal:
d2 = d.dest
if d2 not in candidates:
room_b = world.get_room(d2.roomIndex, player)
pos_b, kind_b = room_b.doorList[d2.doorListPos]
okay_normals = [DoorKind.Normal, DoorKind.SmallKey, DoorKind.Bombable,
DoorKind.Dashable, DoorKind.DungeonChanger]
valid = kind in okay_normals and kind_b in okay_normals
if valid and 0 <= d2.doorListPos < 4:
candidates.append(d2)
else:
valid = True
if valid and d not in candidates:
candidates.append(d)
if ext.connected_region.type != RegionType.Dungeon or ext.connected_region.dungeon == dungeon:
queue.append((ext.connected_region, d, current))
if d is not None:
checked_doors.append(d)
return candidates, checked_doors
def kth_combination(k, l, r):
if r == 0:
return []
elif len(l) == r:
return l
else:
i = ncr(len(l)-1, r-1)
if k < i:
return l[0:1] + kth_combination(k, l[1:], r-1)
else:
return kth_combination(k-i, l[1:], r)
def ncr(n, r):
if r == 0:
return 1
r = min(r, n-r)
numerator = reduce(op.mul, range(n, n-r, -1), 1)
denominator = reduce(op.mul, range(1, r+1), 1)
return numerator / denominator
def reassign_key_doors(builder, world, player):
logger = logging.getLogger('')
logger.debug('Key doors for %s', builder.name)
proposal = builder.key_door_proposal
flat_proposal = flatten_pair_list(proposal)
queue = deque(find_current_key_doors(builder))
while len(queue) > 0:
d = queue.pop()
if d.type is DoorType.SpiralStairs and d not in proposal:
room = world.get_room(d.roomIndex, player)
if room.doorList[d.doorListPos][1] == DoorKind.StairKeyLow:
room.delete(d.doorListPos)
else:
if len(room.doorList) > 1:
room.mirror(d.doorListPos) # I think this works for crossed now
else:
room.delete(d.doorListPos)
d.smallKey = False
elif d.type is DoorType.Interior and d not in flat_proposal and d.dest not in flat_proposal:
world.get_room(d.roomIndex, player).change(d.doorListPos, DoorKind.Normal)
d.smallKey = False
d.dest.smallKey = False
queue.remove(d.dest)
elif d.type is DoorType.Normal and d not in flat_proposal:
world.get_room(d.roomIndex, player).change(d.doorListPos, DoorKind.Normal)
d.smallKey = False
for dp in world.paired_doors[player]:
if dp.door_a == d.name or dp.door_b == d.name:
dp.pair = False
for obj in proposal:
if type(obj) is tuple:
d1 = obj[0]
d2 = obj[1]
if d1.type is DoorType.Interior:
change_door_to_small_key(d1, world, player)
d2.smallKey = True # ensure flag is set
else:
names = [d1.name, d2.name]
found = False
for dp in world.paired_doors[player]:
if dp.door_a in names and dp.door_b in names:
dp.pair = True
found = True
elif dp.door_a in names:
dp.pair = False
elif dp.door_b in names:
dp.pair = False
if not found:
world.paired_doors[player].append(PairedDoor(d1.name, d2.name))
change_door_to_small_key(d1, world, player)
change_door_to_small_key(d2, world, player)
world.spoiler.set_door_type(d1.name+' <-> '+d2.name, 'Key Door', player)
logger.debug('Key Door: %s', d1.name+' <-> '+d2.name)
else:
d = obj
if d.type is DoorType.Interior:
change_door_to_small_key(d, world, player)
d.dest.smallKey = True # ensure flag is set
elif d.type is DoorType.SpiralStairs:
pass # we don't have spiral stairs candidates yet that aren't already key doors
elif d.type is DoorType.Normal:
change_door_to_small_key(d, world, player)
world.spoiler.set_door_type(d.name, 'Key Door', player)
logger.debug('Key Door: %s', d.name)
def change_door_to_small_key(d, world, player):
d.smallKey = True
room = world.get_room(d.roomIndex, player)
if room.doorList[d.doorListPos][1] != DoorKind.SmallKey:
room.change(d.doorListPos, DoorKind.SmallKey)
def smooth_door_pairs(world, player):
all_doors = [x for x in world.doors if x.player == player]
skip = set()
for door in all_doors:
if door.type in [DoorType.Normal, DoorType.Interior] and door not in skip:
partner = door.dest
skip.add(partner)
room_a = world.get_room(door.roomIndex, player)
room_b = world.get_room(partner.roomIndex, player)
type_a = room_a.kind(door)
type_b = room_b.kind(partner)
valid_pair = stateful_door(door, type_a) and stateful_door(partner, type_b)
if door.type == DoorType.Normal:
if type_a == DoorKind.SmallKey or type_b == DoorKind.SmallKey:
if valid_pair:
if type_a != DoorKind.SmallKey:
room_a.change(door.doorListPos, DoorKind.SmallKey)
if type_b != DoorKind.SmallKey:
room_b.change(partner.doorListPos, DoorKind.SmallKey)
add_pair(door, partner, world, player)
else:
if type_a == DoorKind.SmallKey:
remove_pair(door, world, player)
if type_b == DoorKind.SmallKey:
remove_pair(door, world, player)
elif type_a in [DoorKind.Bombable, DoorKind.Dashable] or type_b in [DoorKind.Bombable, DoorKind.Dashable]:
if valid_pair:
if type_a == type_b:
add_pair(door, partner, world, player)
spoiler_type = 'Bomb Door' if type_a == DoorKind.Bombable else 'Dash Door'
world.spoiler.set_door_type(door.name + ' <-> ' + partner.name, spoiler_type, player)
else:
new_type = DoorKind.Dashable if type_a == DoorKind.Dashable or type_b == DoorKind.Dashable else DoorKind.Bombable
if type_a != new_type:
room_a.change(door.doorListPos, new_type)
if type_b != new_type:
room_b.change(partner.doorListPos, new_type)
add_pair(door, partner, world, player)
spoiler_type = 'Bomb Door' if new_type == DoorKind.Bombable else 'Dash Door'
world.spoiler.set_door_type(door.name + ' <-> ' + partner.name, spoiler_type, player)
else:
if type_a in [DoorKind.Bombable, DoorKind.Dashable]:
room_a.change(door.doorListPos, DoorKind.Normal)
remove_pair(door, world, player)
elif type_b in [DoorKind.Bombable, DoorKind.Dashable]:
room_b.change(partner.doorListPos, DoorKind.Normal)
remove_pair(partner, world, player)
elif world.experimental[player] and valid_pair and type_a != DoorKind.SmallKey and type_b != DoorKind.SmallKey:
random_door_type(door, partner, world, player, type_a, type_b, room_a, room_b)
world.paired_doors[player] = [x for x in world.paired_doors[player] if x.pair or x.original]
def add_pair(door_a, door_b, world, player):
pair_a, pair_b = None, None
for paired_door in world.paired_doors[player]:
if paired_door.door_a == door_a.name and paired_door.door_b == door_b.name:
paired_door.pair = True
return
if paired_door.door_a == door_b.name and paired_door.door_b == door_a.name:
paired_door.pair = True
return
if paired_door.door_a == door_a.name or paired_door.door_b == door_a.name:
pair_a = paired_door
if paired_door.door_a == door_b.name or paired_door.door_b == door_b.name:
pair_b = paired_door
if pair_a:
pair_a.pair = False
if pair_b:
pair_b.pair = False
world.paired_doors[player].append(PairedDoor(door_a, door_b))
def remove_pair(door, world, player):
for paired_door in world.paired_doors[player]:
if paired_door.door_a == door.name or paired_door.door_b == door.name:
paired_door.pair = False
break
def stateful_door(door, kind):
if 0 <= door.doorListPos < 4:
return kind in [DoorKind.Normal, DoorKind.SmallKey, DoorKind.Bombable, DoorKind.Dashable] #, DoorKind.BigKey]
return False
def random_door_type(door, partner, world, player, type_a, type_b, room_a, room_b):
r_kind = random.choices([DoorKind.Normal, DoorKind.Bombable, DoorKind.Dashable], [5, 2, 3], k=1)[0]
if r_kind != DoorKind.Normal:
if door.type == DoorType.Normal:
add_pair(door, partner, world, player)
if type_a != r_kind:
room_a.change(door.doorListPos, r_kind)
if type_b != r_kind:
room_b.change(partner.doorListPos, r_kind)
spoiler_type = 'Bomb Door' if r_kind == DoorKind.Bombable else 'Dash Door'
world.spoiler.set_door_type(door.name + ' <-> ' + partner.name, spoiler_type, player)
def determine_required_paths(world, player):
paths = {
'Hyrule Castle': ['Hyrule Castle Lobby', 'Hyrule Castle West Lobby', 'Hyrule Castle East Lobby'],
'Eastern Palace': ['Eastern Boss'],
'Desert Palace': ['Desert Main Lobby', 'Desert East Lobby', 'Desert West Lobby', 'Desert Boss'],
'Tower of Hera': ['Hera Boss'],
'Agahnims Tower': ['Tower Agahnim 1'],
'Palace of Darkness': ['PoD Boss'],
'Swamp Palace': ['Swamp Boss'],
'Skull Woods': ['Skull 1 Lobby', 'Skull 2 East Lobby', 'Skull 2 West Lobby', 'Skull Boss'],
'Thieves Town': ['Thieves Boss', ('Thieves Blind\'s Cell', 'Thieves Boss')],
'Ice Palace': ['Ice Boss'],
'Misery Mire': ['Mire Boss'],
'Turtle Rock': ['TR Main Lobby', 'TR Lazy Eyes', 'TR Big Chest Entrance', 'TR Eye Bridge', 'TR Boss'],
'Ganons Tower': ['GT Agahnim 2']
}
if world.mode[player] == 'standard':
paths['Hyrule Castle'].append('Hyrule Dungeon Cellblock')
# noinspection PyTypeChecker
paths['Hyrule Castle'].append(('Hyrule Dungeon Cellblock', 'Sanctuary'))
if world.doorShuffle[player] in ['basic']:
paths['Thieves Town'].append('Thieves Attic Window')
return paths
def overworld_prep(world, player):
find_inaccessible_regions(world, player)
add_inaccessible_doors(world, player)
def find_inaccessible_regions(world, player):
world.inaccessible_regions[player] = []
if world.mode[player] != 'inverted':
start_regions = ['Links House', 'Sanctuary']
else:
start_regions = ['Inverted Links House', 'Inverted Dark Sanctuary']
regs = convert_regions(start_regions, world, player)
all_regions = set([r for r in world.regions if r.player == player and r.type is not RegionType.Dungeon])
visited_regions = set()
queue = deque(regs)
while len(queue) > 0:
next_region = queue.popleft()
visited_regions.add(next_region)
if next_region.name == 'Inverted Dark Sanctuary': # special spawn point in cave
for ent in next_region.entrances:
parent = ent.parent_region
if parent and parent.type is not RegionType.Dungeon and parent not in queue and parent not in visited_regions:
queue.append(parent)
for ext in next_region.exits:
connect = ext.connected_region
if connect and connect.type is not RegionType.Dungeon and connect not in queue and connect not in visited_regions:
queue.append(connect)
world.inaccessible_regions[player].extend([r.name for r in all_regions.difference(visited_regions) if valid_inaccessible_region(r)])
if world.mode[player] == 'standard':
world.inaccessible_regions[player].append('Hyrule Castle Ledge')
world.inaccessible_regions[player].append('Sewer Drop')
logger = logging.getLogger('')
logger.debug('Inaccessible Regions:')
for r in world.inaccessible_regions[player]:
logger.debug('%s', r)
def valid_inaccessible_region(r):
return r.type is not RegionType.Cave or (len(r.exits) > 0 and r.name not in ['Links House', 'Chris Houlihan Room'])
def add_inaccessible_doors(world, player):
# todo: ignore standard mode hyrule castle ledge?
for inaccessible_region in world.inaccessible_regions[player]:
region = world.get_region(inaccessible_region, player)
for ext in region.exits:
create_door(world, player, ext.name, region.name)
def create_door(world, player, entName, region_name):
entrance = world.get_entrance(entName, player)
connect = entrance.connected_region
for ext in connect.exits:
if ext.connected_region is not None and ext.connected_region.name == region_name:
d = Door(player, ext.name, DoorType.Logical, ext),
world.doors += d
connect_door_only(world, ext.name, ext.connected_region, player)
d = Door(player, entName, DoorType.Logical, entrance),
world.doors += d
connect_door_only(world, entName, connect, player)
def check_required_paths(paths, world, player):
for dungeon_name in paths.keys():
builder = world.dungeon_layouts[player][dungeon_name]
if len(paths[dungeon_name]) > 0:
states_to_explore = defaultdict(list)
for path in paths[dungeon_name]:
if type(path) is tuple:
states_to_explore[tuple([path[0]])].append(path[1])
else:
states_to_explore[tuple(builder.path_entrances)].append(path)
cached_initial_state = None
for start_regs, dest_regs in states_to_explore.items():
check_paths = convert_regions(dest_regs, world, player)
start_regions = convert_regions(start_regs, world, player)
initial = start_regs == tuple(builder.path_entrances)
if not initial or cached_initial_state is None:
init = determine_init_crystal(initial, cached_initial_state, start_regions)
state = ExplorationState(init, dungeon_name)
for region in start_regions:
state.visit_region(region)
state.add_all_doors_check_unattached(region, world, player)
explore_state(state, world, player)
if initial and cached_initial_state is None:
cached_initial_state = state
else:
state = cached_initial_state
valid, bad_region = check_if_regions_visited(state, check_paths)
if not valid:
if check_for_pinball_fix(state, bad_region, world, player):
explore_state(state, world, player)
valid, bad_region = check_if_regions_visited(state, check_paths)
if not valid:
raise Exception('%s cannot reach %s' % (dungeon_name, bad_region.name))
def determine_init_crystal(initial, state, start_regions):
if initial:
return CrystalBarrier.Orange
if state is None:
raise Exception('Please start path checking from the entrances')
if len(start_regions) > 1:
raise NotImplementedError('Path checking for multiple start regions (not the entrances) not implemented, use more paths instead')
start_region = start_regions[0]
if start_region in state.visited_blue and start_region in state.visited_orange:
return CrystalBarrier.Either
elif start_region in state.visited_blue:
return CrystalBarrier.Blue
elif start_region in state.visited_orange:
return CrystalBarrier.Orange
else:
raise Exception('Can\'t get to %s from initial state', start_region.name)
def explore_state(state, world, player):
while len(state.avail_doors) > 0:
door = state.next_avail_door().door
connect_region = world.get_entrance(door.name, player).connected_region
if state.can_traverse(door) and not state.visited(connect_region) and valid_region_to_explore(connect_region, world, player):
state.visit_region(connect_region)
state.add_all_doors_check_unattached(connect_region, world, player)
def check_if_regions_visited(state, check_paths):
valid = True
breaking_region = None
for region_target in check_paths:
if not state.visited_at_all(region_target):
valid = False
breaking_region = region_target
break
return valid, breaking_region
def check_for_pinball_fix(state, bad_region, world, player):
pinball_region = world.get_region('Skull Pinball', player)
if bad_region.name == 'Skull 2 West Lobby' and state.visited_at_all(pinball_region): #revisit this for entrance shuffle
door = world.get_door('Skull Pinball WS', player)
room = world.get_room(door.roomIndex, player)
if room.doorList[door.doorListPos][1] == DoorKind.Trap:
room.change(door.doorListPos, DoorKind.Normal)
door.trapFlag = 0x0
door.blocked = False
connect_two_way(world, door.name, door.dest.name, player)
state.add_all_doors_check_unattached(pinball_region, world, player)
return True
return False
@unique
class DROptions(Flag):
NoOptions = 0x00
Eternal_Mini_Bosses = 0x01 # If on, GT minibosses marked as defeated when they try to spawn a heart
Town_Portal = 0x02 # If on, Players will start with mirror scroll
Open_Desert_Wall = 0x80 # If on, pre opens the desert wall, no fire required
# DATA GOES DOWN HERE
logical_connections = [
('Hyrule Dungeon North Abyss Catwalk Dropdown', 'Hyrule Dungeon North Abyss'),
('Sewers Secret Room Push Block', 'Sewers Secret Room Blocked Path'),
('Eastern Hint Tile Push Block', 'Eastern Hint Tile'),
('Eastern Map Balcony Hook Path', 'Eastern Map Room'),
('Eastern Map Room Drop Down', 'Eastern Map Balcony'),
('Desert Main Lobby Left Path', 'Desert Left Alcove'),
('Desert Main Lobby Right Path', 'Desert Right Alcove'),
('Desert Left Alcove Path', 'Desert Main Lobby'),
('Desert Right Alcove Path', 'Desert Main Lobby'),
('Hera Big Chest Landing Exit', 'Hera 4F'),
('PoD Pit Room Block Path N', 'PoD Pit Room Blocked'),
('PoD Pit Room Block Path S', 'PoD Pit Room'),
('PoD Arena Bonk Path', 'PoD Arena Bridge'),
('PoD Arena Main Crystal Path', 'PoD Arena Crystal'),
('PoD Arena Crystal Path', 'PoD Arena Main'),
('PoD Arena Main Orange Barrier', 'PoD Arena North'),
('PoD Arena North Drop Down', 'PoD Arena Main'),
('PoD Arena Bridge Drop Down', 'PoD Arena Main'),
('PoD Map Balcony Drop Down', 'PoD Sexy Statue'),
('PoD Basement Ledge Drop Down', 'PoD Stalfos Basement'),
('PoD Falling Bridge Path N', 'PoD Falling Bridge Ledge'),
('PoD Falling Bridge Path S', 'PoD Falling Bridge'),
('Swamp Lobby Moat', 'Swamp Entrance'),
('Swamp Entrance Moat', 'Swamp Lobby'),
('Swamp Trench 1 Approach Dry', 'Swamp Trench 1 Nexus'),
('Swamp Trench 1 Approach Key', 'Swamp Trench 1 Key Ledge'),
('Swamp Trench 1 Approach Swim Depart', 'Swamp Trench 1 Departure'),
('Swamp Trench 1 Nexus Approach', 'Swamp Trench 1 Approach'),
('Swamp Trench 1 Nexus Key', 'Swamp Trench 1 Key Ledge'),
('Swamp Trench 1 Key Ledge Dry', 'Swamp Trench 1 Nexus'),
('Swamp Trench 1 Key Approach', 'Swamp Trench 1 Approach'),
('Swamp Trench 1 Key Ledge Depart', 'Swamp Trench 1 Departure'),
('Swamp Trench 1 Departure Dry', 'Swamp Trench 1 Nexus'),
('Swamp Trench 1 Departure Approach', 'Swamp Trench 1 Approach'),
('Swamp Trench 1 Departure Key', 'Swamp Trench 1 Key Ledge'),
('Swamp Hub Hook Path', 'Swamp Hub North Ledge'),
('Swamp Hub North Ledge Drop Down', 'Swamp Hub'),
('Swamp Compass Donut Push Block', 'Swamp Donut Top'),
('Swamp Shortcut Blue Barrier', 'Swamp Trench 2 Pots'),
('Swamp Trench 2 Pots Blue Barrier', 'Swamp Shortcut'),
('Swamp Trench 2 Pots Dry', 'Swamp Trench 2 Blocks'),
('Swamp Trench 2 Pots Wet', 'Swamp Trench 2 Departure'),
('Swamp Trench 2 Blocks Pots', 'Swamp Trench 2 Pots'),
('Swamp Trench 2 Departure Wet', 'Swamp Trench 2 Pots'),
('Swamp West Shallows Push Blocks', 'Swamp West Block Path'),
('Swamp West Block Path Drop Down', 'Swamp West Shallows'),
('Swamp West Ledge Drop Down', 'Swamp West Shallows'),
('Swamp West Ledge Hook Path', 'Swamp Barrier Ledge'),
('Swamp Barrier Ledge Drop Down', 'Swamp West Shallows'),
('Swamp Barrier Ledge - Orange', 'Swamp Barrier'),
('Swamp Barrier - Orange', 'Swamp Barrier Ledge'),
('Swamp Barrier Ledge Hook Path', 'Swamp West Ledge'),
('Swamp Drain Right Switch', 'Swamp Drain Left'),
('Swamp Flooded Spot Ladder', 'Swamp Flooded Room'),
('Swamp Flooded Room Ladder', 'Swamp Flooded Spot'),
('Skull Pot Circle Star Path', 'Skull Map Room'),
('Skull Big Chest Hookpath', 'Skull 1 Lobby'),
('Skull Back Drop Star Path', 'Skull Small Hall'),
('Thieves Rail Ledge Drop Down', 'Thieves BK Corner'),
('Thieves Hellway Orange Barrier', 'Thieves Hellway S Crystal'),
('Thieves Hellway Crystal Orange Barrier', 'Thieves Hellway'),
('Thieves Hellway Blue Barrier', 'Thieves Hellway N Crystal'),
('Thieves Hellway Crystal Blue Barrier', 'Thieves Hellway'),
('Thieves Basement Block Path', 'Thieves Blocked Entry'),
('Thieves Blocked Entry Path', 'Thieves Basement Block'),
('Thieves Conveyor Bridge Block Path', 'Thieves Conveyor Block'),
('Thieves Conveyor Block Path', 'Thieves Conveyor Bridge'),
('Ice Cross Bottom Push Block Left', 'Ice Floor Switch'),
('Ice Cross Right Push Block Top', 'Ice Bomb Drop'),
('Ice Big Key Push Block', 'Ice Dead End'),
('Ice Bomb Jump Ledge Orange Barrier', 'Ice Bomb Jump Catwalk'),
('Ice Bomb Jump Catwalk Orange Barrier', 'Ice Bomb Jump Ledge'),
('Ice Hookshot Ledge Path', 'Ice Hookshot Balcony'),
('Ice Hookshot Balcony Path', 'Ice Hookshot Ledge'),
('Ice Crystal Right Orange Barrier', 'Ice Crystal Left'),
('Ice Crystal Left Orange Barrier', 'Ice Crystal Right'),
('Ice Crystal Left Blue Barrier', 'Ice Crystal Block'),
('Ice Crystal Block Exit', 'Ice Crystal Left'),
('Ice Big Chest Landing Push Blocks', 'Ice Big Chest View'),
('Mire Lobby Gap', 'Mire Post-Gap'),
('Mire Post-Gap Gap', 'Mire Lobby'),
('Mire Hub Upper Blue Barrier', 'Mire Hub Top'),
('Mire Hub Lower Blue Barrier', 'Mire Hub Right'),
('Mire Hub Right Blue Barrier', 'Mire Hub'),
('Mire Hub Top Blue Barrier', 'Mire Hub'),
('Mire Map Spike Side Drop Down', 'Mire Lone Shooter'),
('Mire Map Spike Side Blue Barrier', 'Mire Crystal Dead End'),
('Mire Map Spot Blue Barrier', 'Mire Crystal Dead End'),
('Mire Crystal Dead End Left Barrier', 'Mire Map Spot'),
('Mire Crystal Dead End Right Barrier', 'Mire Map Spike Side'),
('Mire Hidden Shooters Block Path S', 'Mire Hidden Shooters'),
('Mire Hidden Shooters Block Path N', 'Mire Hidden Shooters Blocked'),
('Mire Left Bridge Hook Path', 'Mire Right Bridge'),
('Mire Crystal Right Orange Barrier', 'Mire Crystal Mid'),
('Mire Crystal Mid Orange Barrier', 'Mire Crystal Right'),
('Mire Crystal Mid Blue Barrier', 'Mire Crystal Left'),
('Mire Crystal Left Blue Barrier', 'Mire Crystal Mid'),
('Mire Firesnake Skip Orange Barrier', 'Mire Antechamber'),
('Mire Antechamber Orange Barrier', 'Mire Firesnake Skip'),
('Mire Compass Blue Barrier', 'Mire Compass Chest'),
('Mire Compass Chest Exit', 'Mire Compass Room'),
('Mire South Fish Blue Barrier', 'Mire Fishbone'),
('Mire Fishbone Blue Barrier', 'Mire South Fish'),
('TR Main Lobby Gap', 'TR Lobby Ledge'),
('TR Lobby Ledge Gap', 'TR Main Lobby'),
('TR Pipe Ledge Drop Down', 'TR Pipe Pit'),
('TR Big Chest Gap', 'TR Big Chest Entrance'),
('TR Big Chest Entrance Gap', 'TR Big Chest'),
('TR Crystal Maze Forwards Path', 'TR Crystal Maze End'),
('TR Crystal Maze Blue Path', 'TR Crystal Maze'),
('TR Crystal Maze Cane Path', 'TR Crystal Maze'),
('GT Blocked Stairs Block Path', 'GT Big Chest'),
('GT Speed Torch South Path', 'GT Speed Torch'),
('GT Speed Torch North Path', 'GT Speed Torch Upper'),
('GT Hookshot East-North Path', 'GT Hookshot North Platform'),
('GT Hookshot East-South Path', 'GT Hookshot South Platform'),
('GT Hookshot North-East Path', 'GT Hookshot East Platform'),
('GT Hookshot North-South Path', 'GT Hookshot South Platform'),
('GT Hookshot South-East Path', 'GT Hookshot East Platform'),
('GT Hookshot South-North Path', 'GT Hookshot North Platform'),
('GT Hookshot Platform Blue Barrier', 'GT Hookshot South Entry'),
('GT Hookshot Entry Blue Barrier', 'GT Hookshot South Platform'),
('GT Double Switch Orange Barrier', 'GT Double Switch Switches'),
('GT Double Switch Orange Barrier 2', 'GT Double Switch Key Spot'),
('GT Double Switch Transition Blue', 'GT Double Switch Exit'),
('GT Double Switch Blue Path', 'GT Double Switch Transition'),
('GT Double Switch Orange Path', 'GT Double Switch Entry'),
('GT Double Switch Key Blue Path', 'GT Double Switch Exit'),
('GT Double Switch Key Orange Path', 'GT Double Switch Entry'),
('GT Double Switch Blue Barrier', 'GT Double Switch Key Spot'),
('GT Warp Maze - Pit Section Warp Spot', 'GT Warp Maze - Pit Exit Warp Spot'),
('GT Warp Maze Exit Section Warp Spot', 'GT Warp Maze - Pit Exit Warp Spot'),
('GT Firesnake Room Hook Path', 'GT Firesnake Room Ledge'),
('GT Left Moldorm Ledge Drop Down', 'GT Moldorm'),
('GT Right Moldorm Ledge Drop Down', 'GT Moldorm'),
('GT Moldorm Gap', 'GT Validation'),
('GT Validation Block Path', 'GT Validation Door')
]
vanilla_logical_connections = [
('Ice Cross Left Push Block', 'Ice Compass Room'),
('Ice Cross Right Push Block Bottom', 'Ice Compass Room'),
('Ice Cross Bottom Push Block Right', 'Ice Pengator Switch'),
('Ice Cross Top Push Block Right', 'Ice Pengator Switch'),
]
spiral_staircases = [
('Hyrule Castle Back Hall Down Stairs', 'Hyrule Dungeon Map Room Up Stairs'),
('Hyrule Dungeon Armory Down Stairs', 'Hyrule Dungeon Staircase Up Stairs'),
('Hyrule Dungeon Staircase Down Stairs', 'Hyrule Dungeon Cellblock Up Stairs'),
('Sewers Behind Tapestry Down Stairs', 'Sewers Rope Room Up Stairs'),
('Sewers Secret Room Up Stairs', 'Sewers Pull Switch Down Stairs'),
('Eastern Darkness Up Stairs', 'Eastern Attic Start Down Stairs'),
('Desert Tiles 1 Up Stairs', 'Desert Bridge Down Stairs'),
('Hera Lobby Down Stairs', 'Hera Basement Cage Up Stairs'),
('Hera Lobby Key Stairs', 'Hera Tile Room Up Stairs'),
('Hera Lobby Up Stairs', 'Hera Beetles Down Stairs'),
('Hera Startile Wide Up Stairs', 'Hera 4F Down Stairs'),
('Hera 4F Up Stairs', 'Hera 5F Down Stairs'),
('Hera 5F Up Stairs', 'Hera Boss Down Stairs'),
('Tower Room 03 Up Stairs', 'Tower Lone Statue Down Stairs'),
('Tower Dark Chargers Up Stairs', 'Tower Dual Statues Down Stairs'),
('Tower Dark Archers Up Stairs', 'Tower Red Spears Down Stairs'),
('Tower Pacifist Run Up Stairs', 'Tower Push Statue Down Stairs'),
('PoD Left Cage Down Stairs', 'PoD Shooter Room Up Stairs'),
('PoD Middle Cage Down Stairs', 'PoD Warp Room Up Stairs'),
('PoD Basement Ledge Up Stairs', 'PoD Big Key Landing Down Stairs'),
('PoD Compass Room W Down Stairs', 'PoD Dark Basement W Up Stairs'),
('PoD Compass Room E Down Stairs', 'PoD Dark Basement E Up Stairs'),
('Swamp Entrance Down Stairs', 'Swamp Pot Row Up Stairs'),
('Swamp West Block Path Up Stairs', 'Swamp Attic Down Stairs'),
('Swamp Push Statue Down Stairs', 'Swamp Flooded Room Up Stairs'),
('Swamp Left Elbow Down Stairs', 'Swamp Drain Left Up Stairs'),
('Swamp Right Elbow Down Stairs', 'Swamp Drain Right Up Stairs'),
('Swamp Behind Waterfall Up Stairs', 'Swamp C Down Stairs'),
('Thieves Spike Switch Up Stairs', 'Thieves Attic Down Stairs'),
('Thieves Conveyor Maze Down Stairs', 'Thieves Basement Block Up Stairs'),
('Ice Jelly Key Down Stairs', 'Ice Floor Switch Up Stairs'),
('Ice Narrow Corridor Down Stairs', 'Ice Pengator Trap Up Stairs'),
('Ice Spike Room Up Stairs', 'Ice Hammer Block Down Stairs'),
('Ice Spike Room Down Stairs', 'Ice Spikeball Up Stairs'),
('Ice Lonely Freezor Down Stairs', 'Iced T Up Stairs'),
('Ice Backwards Room Down Stairs', 'Ice Anti-Fairy Up Stairs'),
('Mire Post-Gap Down Stairs', 'Mire 2 Up Stairs'),
('Mire Left Bridge Down Stairs', 'Mire Dark Shooters Up Stairs'),
('Mire Conveyor Barrier Up Stairs', 'Mire Torches Top Down Stairs'),
('Mire Falling Foes Up Stairs', 'Mire Firesnake Skip Down Stairs'),
('TR Chain Chomps Down Stairs', 'TR Pipe Pit Up Stairs'),
('TR Crystaroller Down Stairs', 'TR Dark Ride Up Stairs'),
('GT Lobby Left Down Stairs', 'GT Torch Up Stairs'),
('GT Lobby Up Stairs', 'GT Crystal Paths Down Stairs'),
('GT Lobby Right Down Stairs', 'GT Hope Room Up Stairs'),
('GT Blocked Stairs Down Stairs', 'GT Four Torches Up Stairs'),
('GT Cannonball Bridge Up Stairs', 'GT Gauntlet 1 Down Stairs'),
('GT Quad Pot Up Stairs', 'GT Wizzrobes 1 Down Stairs'),
('GT Moldorm Pit Up Stairs', 'GT Right Moldorm Ledge Down Stairs'),
('GT Frozen Over Up Stairs', 'GT Brightly Lit Hall Down Stairs')
]
straight_staircases = [
('Hyrule Castle Lobby North Stairs', 'Hyrule Castle Throne Room South Stairs'),
('Sewers Rope Room North Stairs', 'Sewers Dark Cross South Stairs'),
('Tower Catwalk North Stairs', 'Tower Antechamber South Stairs'),
('PoD Conveyor North Stairs', 'PoD Map Balcony South Stairs'),
('TR Crystal Maze North Stairs', 'TR Final Abyss South Stairs')
]
open_edges = [
('Hyrule Dungeon North Abyss South Edge', 'Hyrule Dungeon South Abyss North Edge'),
('Hyrule Dungeon North Abyss Catwalk Edge', 'Hyrule Dungeon South Abyss Catwalk North Edge'),
('Hyrule Dungeon South Abyss West Edge', 'Hyrule Dungeon Guardroom Abyss Edge'),
('Hyrule Dungeon South Abyss Catwalk West Edge', 'Hyrule Dungeon Guardroom Catwalk Edge'),
('Desert Main Lobby NW Edge', 'Desert North Hall SW Edge'),
('Desert Main Lobby N Edge', 'Desert Dead End Edge'),
('Desert Main Lobby NE Edge', 'Desert North Hall SE Edge'),
('Desert Main Lobby E Edge', 'Desert East Wing W Edge'),
('Desert East Wing N Edge', 'Desert Arrow Pot Corner S Edge'),
('Desert Arrow Pot Corner W Edge', 'Desert North Hall E Edge'),
('Desert North Hall W Edge', 'Desert Sandworm Corner S Edge'),
('Desert Sandworm Corner E Edge', 'Desert West Wing N Edge'),
('Thieves Lobby N Edge', 'Thieves Ambush S Edge'),
('Thieves Lobby NE Edge', 'Thieves Ambush SE Edge'),
('Thieves Ambush ES Edge', 'Thieves BK Corner WS Edge'),
('Thieves Ambush EN Edge', 'Thieves BK Corner WN Edge'),
('Thieves BK Corner S Edge', 'Thieves Compass Room N Edge'),
('Thieves BK Corner SW Edge', 'Thieves Compass Room NW Edge'),
('Thieves Compass Room WS Edge', 'Thieves Big Chest Nook ES Edge'),
('Thieves Cricket Hall Left Edge', 'Thieves Cricket Hall Right Edge')
]
falldown_pits = [
('Eastern Courtyard Potholes', 'Eastern Fairies'),
('Hera Beetles Holes', 'Hera Lobby'),
('Hera Startile Corner Holes', 'Hera Lobby'),
('Hera Startile Wide Holes', 'Hera Lobby'),
('Hera 4F Holes', 'Hera Lobby'), # failed bomb jump
('Hera Big Chest Landing Holes', 'Hera Startile Wide'), # the other holes near big chest
('Hera 5F Star Hole', 'Hera Big Chest Landing'),
('Hera 5F Pothole Chain', 'Hera Fairies'),
('Hera 5F Normal Holes', 'Hera 4F'),
('Hera Boss Outer Hole', 'Hera 5F'),
('Hera Boss Inner Hole', 'Hera 4F'),
('PoD Pit Room Freefall', 'PoD Stalfos Basement'),
('PoD Pit Room Bomb Hole', 'PoD Basement Ledge'),
('PoD Big Key Landing Hole', 'PoD Stalfos Basement'),
('Swamp Attic Right Pit', 'Swamp Barrier Ledge'),
('Swamp Attic Left Pit', 'Swamp West Ledge'),
('Skull Final Drop Hole', 'Skull Boss'),
('Ice Bomb Drop Hole', 'Ice Stalfos Hint'),
('Ice Falling Square Hole', 'Ice Tall Hint'),
('Ice Freezors Hole', 'Ice Big Chest View'),
('Ice Freezors Ledge Hole', 'Ice Big Chest View'),
('Ice Freezors Bomb Hole', 'Ice Big Chest Landing'),
('Ice Crystal Block Hole', 'Ice Switch Room'),
('Ice Crystal Right Blue Hole', 'Ice Switch Room'),
('Ice Backwards Room Hole', 'Ice Fairy'),
('Ice Antechamber Hole', 'Ice Boss'),
('Mire Attic Hint Hole', 'Mire BK Chest Ledge'),
('Mire Torches Top Holes', 'Mire Conveyor Barrier'),
('Mire Torches Bottom Holes', 'Mire Warping Pool'),
('GT Bob\'s Room Hole', 'GT Ice Armos'),
('GT Falling Torches Hole', 'GT Staredown'),
('GT Moldorm Hole', 'GT Moldorm Pit')
]
dungeon_warps = [
('Eastern Fairies\' Warp', 'Eastern Courtyard'),
('Hera Fairies\' Warp', 'Hera 5F'),
('PoD Warp Hint Warp', 'PoD Warp Room'),
('PoD Warp Room Warp', 'PoD Warp Hint'),
('PoD Stalfos Basement Warp', 'PoD Warp Room'),
('PoD Callback Warp', 'PoD Dark Alley'),
('Ice Fairy Warp', 'Ice Anti-Fairy'),
('Mire Lone Warp Warp', 'Mire BK Door Room'),
('Mire Warping Pool Warp', 'Mire Square Rail'),
('GT Compass Room Warp', 'GT Conveyor Star Pits'),
('GT Spike Crystals Warp', 'GT Firesnake Room'),
('GT Warp Maze - Left Section Warp', 'GT Warp Maze - Rando Rail'),
('GT Warp Maze - Mid Section Left Warp', 'GT Warp Maze - Main Rails'),
('GT Warp Maze - Mid Section Right Warp', 'GT Warp Maze - Main Rails'),
('GT Warp Maze - Right Section Warp', 'GT Warp Maze - Main Rails'),
('GT Warp Maze - Pit Exit Warp', 'GT Warp Maze - Pot Rail'),
('GT Warp Maze - Rail Choice Left Warp', 'GT Warp Maze - Left Section'),
('GT Warp Maze - Rail Choice Right Warp', 'GT Warp Maze - Mid Section'),
('GT Warp Maze - Rando Rail Warp', 'GT Warp Maze - Mid Section'),
('GT Warp Maze - Main Rails Best Warp', 'GT Warp Maze - Pit Section'),
('GT Warp Maze - Main Rails Mid Left Warp', 'GT Warp Maze - Mid Section'),
('GT Warp Maze - Main Rails Mid Right Warp', 'GT Warp Maze - Mid Section'),
('GT Warp Maze - Main Rails Right Top Warp', 'GT Warp Maze - Right Section'),
('GT Warp Maze - Main Rails Right Mid Warp', 'GT Warp Maze - Right Section'),
('GT Warp Maze - Pot Rail Warp', 'GT Warp Maze Exit Section'),
('GT Hidden Star Warp', 'GT Invisible Bridges')
]
ladders = [
('PoD Bow Statue Down Ladder', 'PoD Dark Pegs Up Ladder'),
('Ice Big Key Down Ladder', 'Ice Tongue Pull Up Ladder'),
('Ice Firebar Down Ladder', 'Ice Freezors Up Ladder'),
('GT Staredown Up Ladder', 'GT Falling Torches Down Ladder')
]
interior_doors = [
('Hyrule Dungeon Armory Interior Key Door S', 'Hyrule Dungeon Armory Interior Key Door N'),
('Hyrule Dungeon Armory ES', 'Hyrule Dungeon Armory Boomerang WS'),
('Hyrule Dungeon Map Room Key Door S', 'Hyrule Dungeon North Abyss Key Door N'),
('Sewers Rat Path WS', 'Sewers Secret Room ES'),
('Sewers Rat Path WN', 'Sewers Secret Room EN'),
('Sewers Yet More Rats S', 'Sewers Pull Switch N'),
('Eastern Lobby N', 'Eastern Lobby Bridge S'),
('Eastern Lobby NW', 'Eastern Lobby Left Ledge SW'),
('Eastern Lobby NE', 'Eastern Lobby Right Ledge SE'),
('Eastern East Wing EN', 'Eastern Pot Switch WN'),
('Eastern East Wing ES', 'Eastern Map Balcony WS'),
('Eastern Pot Switch SE', 'Eastern Map Room NE'),
('Eastern West Wing WS', 'Eastern Stalfos Spawn ES'),
('Eastern Stalfos Spawn NW', 'Eastern Compass Room SW'),
('Eastern Compass Room EN', 'Eastern Hint Tile WN'),
('Eastern Dark Square EN', 'Eastern Dark Pots WN'),
('Eastern Darkness NE', 'Eastern Rupees SE'),
('Eastern False Switches WS', 'Eastern Cannonball Hell ES'),
('Eastern Single Eyegore NE', 'Eastern Duo Eyegores SE'),
('Desert East Lobby WS', 'Desert East Wing ES'),
('Desert East Wing Key Door EN', 'Desert Compass Key Door WN'),
('Desert North Hall NW', 'Desert Map SW'),
('Desert North Hall NE', 'Desert Map SE'),
('Desert Arrow Pot Corner NW', 'Desert Trap Room SW'),
('Desert Sandworm Corner NE', 'Desert Bonk Torch SE'),
('Desert Sandworm Corner WS', 'Desert Circle of Pots ES'),
('Desert Circle of Pots NW', 'Desert Big Chest SW'),
('Desert West Wing WS', 'Desert West Lobby ES',),
('Desert Fairy Fountain SW', 'Desert West Lobby NW'),
('Desert Back Lobby NW', 'Desert Tiles 1 SW'),
('Desert Bridge SW', 'Desert Four Statues NW'),
('Desert Four Statues ES', 'Desert Beamos Hall WS',),
('Desert Tiles 2 NE', 'Desert Wall Slide SE'),
('Hera Tile Room EN', 'Hera Tridorm WN'),
('Hera Tridorm SE', 'Hera Torches NE'),
('Hera Beetles WS', 'Hera Startile Corner ES'),
('Hera Startile Corner NW', 'Hera Startile Wide SW'),
('Tower Lobby NW', 'Tower Gold Knights SW'),
('Tower Gold Knights EN', 'Tower Room 03 WN'),
('Tower Lone Statue WN', 'Tower Dark Maze EN'),
('Tower Dark Maze ES', 'Tower Dark Chargers WS'),
('Tower Dual Statues WS', 'Tower Dark Pits ES'),
('Tower Dark Pits EN', 'Tower Dark Archers WN'),
('Tower Red Spears WN', 'Tower Red Guards EN'),
('Tower Red Guards SW', 'Tower Circle of Pots NW'),
('Tower Circle of Pots ES', 'Tower Pacifist Run WS'),
('Tower Push Statue WS', 'Tower Catwalk ES'),
('Tower Antechamber NW', 'Tower Altar SW'),
('PoD Lobby N', 'PoD Middle Cage S'),
('PoD Lobby NW', 'PoD Left Cage SW'),
('PoD Lobby NE', 'PoD Middle Cage SE'),
('PoD Warp Hint SE', 'PoD Jelly Hall NE'),
('PoD Jelly Hall NW', 'PoD Mimics 1 SW'),
('PoD Falling Bridge EN', 'PoD Compass Room WN'),
('PoD Compass Room SE', 'PoD Harmless Hellway NE'),
('PoD Mimics 2 NW', 'PoD Bow Statue SW'),
('PoD Dark Pegs WN', 'PoD Lonely Turtle EN'),
('PoD Lonely Turtle SW', 'PoD Turtle Party NW'),
('PoD Turtle Party ES', 'PoD Callback WS'),
('Swamp Trench 1 Nexus N', 'Swamp Trench 1 Alcove S'),
('Swamp Trench 1 Key Ledge NW', 'Swamp Hammer Switch SW'),
('Swamp Donut Top SE', 'Swamp Donut Bottom NE'),
('Swamp Donut Bottom NW', 'Swamp Compass Donut SW'),
('Swamp Crystal Switch SE', 'Swamp Shortcut NE'),
('Swamp Trench 2 Blocks N', 'Swamp Trench 2 Alcove S'),
('Swamp Push Statue NW', 'Swamp Shooters SW'),
('Swamp Push Statue NE', 'Swamp Right Elbow SE'),
('Swamp Shooters EN', 'Swamp Left Elbow WN'),
('Swamp Drain WN', 'Swamp Basement Shallows EN'),
('Swamp Flooded Room WS', 'Swamp Basement Shallows ES'),
('Swamp Waterfall Room NW', 'Swamp Refill SW'),
('Swamp Waterfall Room NE', 'Swamp Behind Waterfall SE'),
('Swamp C SE', 'Swamp Waterway NE'),
('Swamp Waterway N', 'Swamp I S'),
('Swamp Waterway NW', 'Swamp T SW'),
('Skull 1 Lobby ES', 'Skull Map Room WS'),
('Skull Pot Circle WN', 'Skull Pull Switch EN'),
('Skull Pull Switch S', 'Skull Big Chest N'),
('Skull Left Drop ES', 'Skull Compass Room WS'),
('Skull 2 East Lobby NW', 'Skull Big Key SW'),
('Skull Big Key WN', 'Skull Lone Pot EN'),
('Skull Small Hall WS', 'Skull 2 West Lobby ES'),
('Skull 2 West Lobby NW', 'Skull X Room SW'),
('Skull 3 Lobby EN', 'Skull East Bridge WN'),
('Skull East Bridge WS', 'Skull West Bridge Nook ES'),
('Skull Star Pits ES', 'Skull Torch Room WS'),
('Skull Torch Room WN', 'Skull Vines EN'),
('Skull Spike Corner ES', 'Skull Final Drop WS'),
('Thieves Hallway WS', 'Thieves Pot Alcove Mid ES'),
('Thieves Conveyor Maze SW', 'Thieves Pot Alcove Top NW'),
('Thieves Conveyor Maze EN', 'Thieves Hallway WN'),
('Thieves Spike Track NE', 'Thieves Triple Bypass SE'),
('Thieves Spike Track WS', 'Thieves Hellway Crystal ES'),
('Thieves Hellway Crystal EN', 'Thieves Triple Bypass WN'),
('Thieves Attic ES', 'Thieves Cricket Hall Left WS'),
('Thieves Cricket Hall Right ES', 'Thieves Attic Window WS'),
('Thieves Blocked Entry SW', 'Thieves Lonely Zazak NW'),
('Thieves Lonely Zazak ES', 'Thieves Blind\'s Cell WS'),
('Thieves Conveyor Bridge WS', 'Thieves Big Chest Room ES'),
('Thieves Conveyor Block WN', 'Thieves Trap EN'),
('Ice Lobby WS', 'Ice Jelly Key ES'),
('Ice Floor Switch ES', 'Ice Cross Left WS'),
('Ice Cross Top NE', 'Ice Bomb Drop SE'),
('Ice Pengator Switch ES', 'Ice Dead End WS'),
('Ice Stalfos Hint SE', 'Ice Conveyor NE'),
('Ice Bomb Jump EN', 'Ice Narrow Corridor WN'),
('Ice Spike Cross WS', 'Ice Firebar ES'),
('Ice Spike Cross NE', 'Ice Falling Square SE'),
('Ice Hammer Block ES', 'Ice Tongue Pull WS'),
('Ice Freezors Ledge ES', 'Ice Tall Hint WS'),
('Ice Hookshot Balcony SW', 'Ice Spikeball NW'),
('Ice Crystal Right NE', 'Ice Backwards Room SE'),
('Ice Crystal Left WS', 'Ice Big Chest View ES'),
('Ice Anti-Fairy SE', 'Ice Switch Room NE'),
('Mire Lone Shooter ES', 'Mire Falling Bridge WS'), # technically one-way
('Mire Falling Bridge W', 'Mire Failure Bridge E'), # technically one-way
('Mire Falling Bridge WN', 'Mire Map Spike Side EN'), # technically one-way
('Mire Hidden Shooters WS', 'Mire Cross ES'), # technically one-way
('Mire Hidden Shooters NE', 'Mire Minibridge SE'),
('Mire Spikes NW', 'Mire Ledgehop SW'),
('Mire Spike Barrier ES', 'Mire Square Rail WS'),
('Mire Square Rail NW', 'Mire Lone Warp SW'),
('Mire Wizzrobe Bypass WN', 'Mire Compass Room EN'), # technically one-way
('Mire Conveyor Crystal WS', 'Mire Tile Room ES'),
('Mire Tile Room NW', 'Mire Compass Room SW'),
('Mire Neglected Room SE', 'Mire Chest View NE'),
('Mire BK Chest Ledge WS', 'Mire Warping Pool ES'), # technically one-way
('Mire Torches Top SW', 'Mire Torches Bottom NW'),
('Mire Torches Bottom WS', 'Mire Attic Hint ES'),
('Mire Dark Shooters SE', 'Mire Key Rupees NE'),
('Mire Dark Shooters SW', 'Mire Block X NW'),
('Mire Tall Dark and Roomy WS', 'Mire Crystal Right ES'),
('Mire Tall Dark and Roomy WN', 'Mire Shooter Rupees EN'),
('Mire Crystal Mid NW', 'Mire Crystal Top SW'),
('TR Tile Room NE', 'TR Refill SE'),
('TR Pokey 1 NW', 'TR Chain Chomps SW'),
('TR Twin Pokeys EN', 'TR Dodgers WN'),
('TR Twin Pokeys SW', 'TR Hallway NW'),
('TR Hallway ES', 'TR Big View WS'),
('TR Big Chest NE', 'TR Dodgers SE'),
('TR Dash Room ES', 'TR Tongue Pull WS'),
('TR Dash Room NW', 'TR Crystaroller SW'),
('TR Tongue Pull NE', 'TR Rupees SE'),
('GT Torch EN', 'GT Hope Room WN'),
('GT Torch SW', 'GT Big Chest NW'),
('GT Tile Room EN', 'GT Speed Torch WN'),
('GT Speed Torch WS', 'GT Pots n Blocks ES'),
('GT Crystal Conveyor WN', 'GT Compass Room EN'),
('GT Conveyor Cross WN', 'GT Hookshot EN'),
('GT Hookshot ES', 'GT Map Room WS'),
('GT Double Switch EN', 'GT Spike Crystals WN'),
('GT Firesnake Room SW', 'GT Warp Maze (Rails) NW'),
('GT Ice Armos NE', 'GT Big Key Room SE'),
('GT Ice Armos WS', 'GT Four Torches ES'),
('GT Four Torches NW', 'GT Fairy Abyss SW'),
('GT Crystal Paths SW', 'GT Mimics 1 NW'),
('GT Mimics 1 ES', 'GT Mimics 2 WS'),
('GT Mimics 2 NE', 'GT Dash Hall SE'),
('GT Cannonball Bridge SE', 'GT Refill NE'),
('GT Gauntlet 1 WN', 'GT Gauntlet 2 EN'),
('GT Gauntlet 2 SW', 'GT Gauntlet 3 NW'),
('GT Gauntlet 4 SW', 'GT Gauntlet 5 NW'),
('GT Beam Dash WS', 'GT Lanmolas 2 ES'),
('GT Lanmolas 2 NW', 'GT Quad Pot SW'),
('GT Wizzrobes 1 SW', 'GT Dashing Bridge NW'),
('GT Dashing Bridge NE', 'GT Wizzrobes 2 SE'),
('GT Torch Cross ES', 'GT Staredown WS'),
('GT Falling Torches NE', 'GT Mini Helmasaur Room SE'),
('GT Mini Helmasaur Room WN', 'GT Bomb Conveyor EN'),
('GT Bomb Conveyor SW', 'GT Crystal Circles NW')
]
key_doors = [
('Sewers Key Rat Key Door N', 'Sewers Secret Room Key Door S'),
('Sewers Dark Cross Key Door N', 'Sewers Water S'),
('Eastern Dark Square Key Door WN', 'Eastern Cannonball Ledge Key Door EN'),
('Eastern Darkness Up Stairs', 'Eastern Attic Start Down Stairs'),
('Eastern Big Key NE', 'Eastern Hint Tile Blocked Path SE'),
('Eastern Darkness S', 'Eastern Courtyard N'),
('Desert East Wing Key Door EN', 'Desert Compass Key Door WN'),
('Desert Tiles 1 Up Stairs', 'Desert Bridge Down Stairs'),
('Desert Beamos Hall NE', 'Desert Tiles 2 SE'),
('Desert Tiles 2 NE', 'Desert Wall Slide SE'),
('Desert Wall Slide NW', 'Desert Boss SW'),
('Hera Lobby Key Stairs', 'Hera Tile Room Up Stairs'),
('Hera Startile Corner NW', 'Hera Startile Wide SW'),
('PoD Middle Cage N', 'PoD Pit Room S'),
('PoD Arena Main NW', 'PoD Falling Bridge SW'),
('PoD Falling Bridge WN', 'PoD Dark Maze EN'),
]
default_small_key_doors = {
'Hyrule Castle': [
('Sewers Key Rat Key Door N', 'Sewers Secret Room Key Door S'),
('Sewers Dark Cross Key Door N', 'Sewers Water S'),
('Hyrule Dungeon Map Room Key Door S', 'Hyrule Dungeon North Abyss Key Door N'),
('Hyrule Dungeon Armory Interior Key Door N', 'Hyrule Dungeon Armory Interior Key Door S')
],
'Eastern Palace': [
('Eastern Dark Square Key Door WN', 'Eastern Cannonball Ledge Key Door EN'),
'Eastern Darkness Up Stairs',
],
'Desert Palace': [
('Desert East Wing Key Door EN', 'Desert Compass Key Door WN'),
'Desert Tiles 1 Up Stairs',
('Desert Beamos Hall NE', 'Desert Tiles 2 SE'),
('Desert Tiles 2 NE', 'Desert Wall Slide SE'),
],
'Tower of Hera': [
'Hera Lobby Key Stairs'
],
'Agahnims Tower': [
'Tower Room 03 Up Stairs',
('Tower Dark Maze ES', 'Tower Dark Chargers WS'),
'Tower Dark Archers Up Stairs',
('Tower Circle of Pots ES', 'Tower Pacifist Run WS'),
],
'Palace of Darkness': [
('PoD Middle Cage N', 'PoD Pit Room S'),
('PoD Arena Main NW', 'PoD Falling Bridge SW'),
('PoD Falling Bridge WN', 'PoD Dark Maze EN'),
'PoD Basement Ledge Up Stairs',
('PoD Compass Room SE', 'PoD Harmless Hellway NE'),
('PoD Dark Pegs WN', 'PoD Lonely Turtle EN')
],
'Swamp Palace': [
'Swamp Entrance Down Stairs',
('Swamp Pot Row WS', 'Swamp Trench 1 Approach ES'),
('Swamp Trench 1 Key Ledge NW', 'Swamp Hammer Switch SW'),
('Swamp Hub WN', 'Swamp Crystal Switch EN'),
('Swamp Hub North Ledge N', 'Swamp Push Statue S'),
('Swamp Waterway NW', 'Swamp T SW')
],
'Skull Woods': [
('Skull 1 Lobby WS', 'Skull Pot Prison ES'),
('Skull Map Room SE', 'Skull Pinball NE'),
('Skull 2 West Lobby NW', 'Skull X Room SW'),
('Skull 3 Lobby NW', 'Skull Star Pits SW'),
('Skull Spike Corner ES', 'Skull Final Drop WS')
],
'Thieves Town': [
('Thieves Hallway WS', 'Thieves Pot Alcove Mid ES'),
'Thieves Spike Switch Up Stairs',
('Thieves Conveyor Bridge WS', 'Thieves Big Chest Room ES')
],
'Ice Palace': [
'Ice Jelly Key Down Stairs',
('Ice Conveyor SW', 'Ice Bomb Jump NW'),
('Ice Spike Cross ES', 'Ice Spike Room WS'),
('Ice Tall Hint SE', 'Ice Lonely Freezor NE'),
'Ice Backwards Room Down Stairs',
('Ice Switch Room ES', 'Ice Refill WS')
],
'Misery Mire': [
('Mire Hub WS', 'Mire Conveyor Crystal ES'),
('Mire Hub Right EN', 'Mire Map Spot WN'),
('Mire Spikes NW', 'Mire Ledgehop SW'),
('Mire Fishbone SE', 'Mire Spike Barrier NE'),
('Mire Conveyor Crystal WS', 'Mire Tile Room ES'),
('Mire Dark Shooters SE', 'Mire Key Rupees NE')
],
'Turtle Rock': [
('TR Hub NW', 'TR Pokey 1 SW'),
('TR Pokey 1 NW', 'TR Chain Chomps SW'),
'TR Chain Chomps Down Stairs',
('TR Pokey 2 ES', 'TR Lava Island WS'),
'TR Crystaroller Down Stairs',
('TR Dash Bridge WS', 'TR Crystal Maze ES')
],
'Ganons Tower': [
('GT Torch EN', 'GT Hope Room WN'),
('GT Tile Room EN', 'GT Speed Torch WN'),
('GT Hookshot ES', 'GT Map Room WS'),
('GT Double Switch EN', 'GT Spike Crystals WN'),
('GT Firesnake Room SW', 'GT Warp Maze (Rails) NW'),
('GT Conveyor Star Pits EN', 'GT Falling Bridge WN'),
('GT Mini Helmasaur Room WN', 'GT Bomb Conveyor EN'),
('GT Crystal Circles SW', 'GT Left Moldorm Ledge NW')
]
}
default_door_connections = [
('Hyrule Castle Lobby W', 'Hyrule Castle West Lobby E'),
('Hyrule Castle Lobby E', 'Hyrule Castle East Lobby W'),
('Hyrule Castle Lobby WN', 'Hyrule Castle West Lobby EN'),
('Hyrule Castle West Lobby N', 'Hyrule Castle West Hall S'),
('Hyrule Castle East Lobby N', 'Hyrule Castle East Hall S'),
('Hyrule Castle East Lobby NW', 'Hyrule Castle East Hall SW'),
('Hyrule Castle East Hall W', 'Hyrule Castle Back Hall E'),
('Hyrule Castle West Hall E', 'Hyrule Castle Back Hall W'),
('Hyrule Castle Throne Room N', 'Sewers Behind Tapestry S'),
('Hyrule Dungeon Guardroom N', 'Hyrule Dungeon Armory S'),
('Sewers Dark Cross Key Door N', 'Sewers Water S'),
('Sewers Water W', 'Sewers Key Rat E'),
('Sewers Key Rat Key Door N', 'Sewers Secret Room Key Door S'),
('Eastern Lobby Bridge N', 'Eastern Cannonball S'),
('Eastern Cannonball N', 'Eastern Courtyard Ledge S'),
('Eastern Cannonball Ledge WN', 'Eastern Big Key EN'),
('Eastern Cannonball Ledge Key Door EN', 'Eastern Dark Square Key Door WN'),
('Eastern Courtyard Ledge W', 'Eastern West Wing E'),
('Eastern Courtyard Ledge E', 'Eastern East Wing W'),
('Eastern Hint Tile EN', 'Eastern Courtyard WN'),
('Eastern Big Key NE', 'Eastern Hint Tile Blocked Path SE'),
('Eastern Courtyard EN', 'Eastern Map Valley WN'),
('Eastern Courtyard N', 'Eastern Darkness S'),
('Eastern Map Valley SW', 'Eastern Dark Square NW'),
('Eastern Attic Start WS', 'Eastern False Switches ES'),
('Eastern Cannonball Hell WS', 'Eastern Single Eyegore ES'),
('Desert Compass NW', 'Desert Cannonball S'),
('Desert Beamos Hall NE', 'Desert Tiles 2 SE'),
('PoD Middle Cage N', 'PoD Pit Room S'),
('PoD Pit Room NW', 'PoD Arena Main SW'),
('PoD Pit Room NE', 'PoD Arena Bridge SE'),
('PoD Arena Main NW', 'PoD Falling Bridge SW'),
('PoD Arena Crystals E', 'PoD Sexy Statue W'),
('PoD Mimics 1 NW', 'PoD Conveyor SW'),
('PoD Map Balcony WS', 'PoD Arena Ledge ES'),
('PoD Falling Bridge WN', 'PoD Dark Maze EN'),
('PoD Dark Maze E', 'PoD Big Chest Balcony W'),
('PoD Sexy Statue NW', 'PoD Mimics 2 SW'),
('Swamp Pot Row WN', 'Swamp Map Ledge EN'),
('Swamp Pot Row WS', 'Swamp Trench 1 Approach ES'),
('Swamp Trench 1 Departure WS', 'Swamp Hub ES'),
('Swamp Hammer Switch WN', 'Swamp Hub Dead Ledge EN'),
('Swamp Hub S', 'Swamp Donut Top N'),
('Swamp Hub WS', 'Swamp Trench 2 Pots ES'),
('Swamp Hub WN', 'Swamp Crystal Switch EN'),
('Swamp Hub North Ledge N', 'Swamp Push Statue S'),
('Swamp Trench 2 Departure WS', 'Swamp West Shallows ES'),
('Swamp Big Key Ledge WN', 'Swamp Barrier EN'),
('Swamp Basement Shallows NW', 'Swamp Waterfall Room SW'),
('Skull 1 Lobby WS', 'Skull Pot Prison ES'),
('Skull Map Room SE', 'Skull Pinball NE'),
('Skull Pinball WS', 'Skull Compass Room ES'),
('Skull Compass Room NE', 'Skull Pot Prison SE'),
('Skull 2 East Lobby WS', 'Skull Small Hall ES'),
('Skull 3 Lobby NW', 'Skull Star Pits SW'),
('Skull Vines NW', 'Skull Spike Corner SW'),
('Thieves Lobby E', 'Thieves Compass Room W'),
('Thieves Ambush E', 'Thieves Rail Ledge W'),
('Thieves Rail Ledge NW', 'Thieves Pot Alcove Bottom SW'),
('Thieves BK Corner NE', 'Thieves Hallway SE'),
('Thieves Pot Alcove Mid WS', 'Thieves Spike Track ES'),
('Thieves Hellway NW', 'Thieves Spike Switch SW'),
('Thieves Triple Bypass EN', 'Thieves Conveyor Maze WN'),
('Thieves Basement Block WN', 'Thieves Conveyor Bridge EN'),
('Thieves Lonely Zazak WS', 'Thieves Conveyor Bridge ES'),
('Ice Cross Bottom SE', 'Ice Compass Room NE'),
('Ice Cross Right ES', 'Ice Pengator Switch WS'),
('Ice Conveyor SW', 'Ice Bomb Jump NW'),
('Ice Pengator Trap NE', 'Ice Spike Cross SE'),
('Ice Spike Cross ES', 'Ice Spike Room WS'),
('Ice Tall Hint SE', 'Ice Lonely Freezor NE'),
('Ice Tall Hint EN', 'Ice Hookshot Ledge WN'),
('Iced T EN', 'Ice Catwalk WN'),
('Ice Catwalk NW', 'Ice Many Pots SW'),
('Ice Many Pots WS', 'Ice Crystal Right ES'),
('Ice Switch Room ES', 'Ice Refill WS'),
('Ice Switch Room SE', 'Ice Antechamber NE'),
('Mire 2 NE', 'Mire Hub SE'),
('Mire Hub ES', 'Mire Lone Shooter WS'),
('Mire Hub E', 'Mire Failure Bridge W'),
('Mire Hub NE', 'Mire Hidden Shooters SE'),
('Mire Hub WN', 'Mire Wizzrobe Bypass EN'),
('Mire Hub WS', 'Mire Conveyor Crystal ES'),
('Mire Hub Right EN', 'Mire Map Spot WN'),
('Mire Hub Top NW', 'Mire Cross SW'),
('Mire Hidden Shooters ES', 'Mire Spikes WS'),
('Mire Minibridge NE', 'Mire Right Bridge SE'),
('Mire BK Door Room EN', 'Mire Ledgehop WN'),
('Mire BK Door Room N', 'Mire Left Bridge S'),
('Mire Spikes SW', 'Mire Crystal Dead End NW'),
('Mire Ledgehop NW', 'Mire Bent Bridge SW'),
('Mire Bent Bridge W', 'Mire Over Bridge E'),
('Mire Over Bridge W', 'Mire Fishbone E'),
('Mire Fishbone SE', 'Mire Spike Barrier NE'),
('Mire Spike Barrier SE', 'Mire Wizzrobe Bypass NE'),
('Mire Conveyor Crystal SE', 'Mire Neglected Room NE'),
('Mire Tile Room SW', 'Mire Conveyor Barrier NW'),
('Mire Block X WS', 'Mire Tall Dark and Roomy ES'),
('Mire Crystal Left WS', 'Mire Falling Foes ES'),
('TR Lobby Ledge NE', 'TR Hub SE'),
('TR Compass Room NW', 'TR Hub SW'),
('TR Hub ES', 'TR Torches Ledge WS'),
('TR Hub EN', 'TR Torches WN'),
('TR Hub NW', 'TR Pokey 1 SW'),
('TR Hub NE', 'TR Tile Room SE'),
('TR Torches NW', 'TR Roller Room SW'),
('TR Pipe Pit WN', 'TR Lava Dual Pipes EN'),
('TR Lava Island ES', 'TR Pipe Ledge WS'),
('TR Lava Dual Pipes WN', 'TR Pokey 2 EN'),
('TR Lava Dual Pipes SW', 'TR Twin Pokeys NW'),
('TR Pokey 2 ES', 'TR Lava Island WS'),
('TR Dodgers NE', 'TR Lava Escape SE'),
('TR Lava Escape NW', 'TR Dash Room SW'),
('TR Hallway WS', 'TR Lazy Eyes ES'),
('TR Dark Ride SW', 'TR Dash Bridge NW'),
('TR Dash Bridge SW', 'TR Eye Bridge NW'),
('TR Dash Bridge WS', 'TR Crystal Maze ES'),
('GT Torch WN', 'GT Conveyor Cross EN'),
('GT Hope Room EN', 'GT Tile Room WN'),
('GT Big Chest SW', 'GT Invisible Catwalk NW'),
('GT Bob\'s Room SE', 'GT Invisible Catwalk NE'),
('GT Speed Torch NE', 'GT Petting Zoo SE'),
('GT Speed Torch SE', 'GT Crystal Conveyor NE'),
('GT Warp Maze (Pits) ES', 'GT Invisible Catwalk WS'),
('GT Hookshot NW', 'GT DMs Room SW'),
('GT Hookshot SW', 'GT Double Switch NW'),
('GT Warp Maze (Rails) WS', 'GT Randomizer Room ES'),
('GT Conveyor Star Pits EN', 'GT Falling Bridge WN'),
('GT Falling Bridge WS', 'GT Hidden Star ES'),
('GT Dash Hall NE', 'GT Hidden Spikes SE'),
('GT Hidden Spikes EN', 'GT Cannonball Bridge WN'),
('GT Gauntlet 3 SW', 'GT Gauntlet 4 NW'),
('GT Gauntlet 5 WS', 'GT Beam Dash ES'),
('GT Wizzrobes 2 NE', 'GT Conveyor Bridge SE'),
('GT Conveyor Bridge EN', 'GT Torch Cross WN'),
('GT Crystal Circles SW', 'GT Left Moldorm Ledge NW')
]
default_one_way_connections = [
('Sewers Pull Switch S', 'Sanctuary N'),
('Eastern Duo Eyegores NE', 'Eastern Boss SE'),
('Desert Wall Slide NW', 'Desert Boss SW'),
('Tower Altar NW', 'Tower Agahnim 1 SW'),
('PoD Harmless Hellway SE', 'PoD Arena Main NE'),
('PoD Dark Alley NE', 'PoD Boss SE'),
('Swamp T NW', 'Swamp Boss SW'),
('Thieves Hallway NE', 'Thieves Boss SE'),
('Mire Antechamber NW', 'Mire Boss SW'),
('TR Final Abyss NW', 'TR Boss SW'),
('GT Invisible Bridges WS', 'GT Invisible Catwalk ES'),
('GT Validation WS', 'GT Frozen Over ES'),
('GT Brightly Lit Hall NW', 'GT Agahnim 2 SW')
]
# For crossed
# offset from 0x122e17, sram storage, write offset from compass_w_addr, 0 = jmp or # of nops, dungeon_id
compass_data = {
'Hyrule Castle': (0x1, 0xc0, 0x16, 0, 0x02),
'Eastern Palace': (0x1C, 0xc1, 0x28, 0, 0x04),
'Desert Palace': (0x35, 0xc2, 0x4a, 0, 0x06),
'Agahnims Tower': (0x51, 0xc3, 0x5c, 0, 0x08),
'Swamp Palace': (0x6A, 0xc4, 0x7e, 0, 0x0a),
'Palace of Darkness': (0x83, 0xc5, 0xa4, 0, 0x0c),
'Misery Mire': (0x9C, 0xc6, 0xca, 0, 0x0e),
'Skull Woods': (0xB5, 0xc7, 0xf0, 0, 0x10),
'Ice Palace': (0xD0, 0xc8, 0x102, 0, 0x12),
'Tower of Hera': (0xEB, 0xc9, 0x114, 0, 0x14),
'Thieves Town': (0x106, 0xca, 0x138, 0, 0x16),
'Turtle Rock': (0x11F, 0xcb, 0x15e, 0, 0x18),
'Ganons Tower': (0x13A, 0xcc, 0x170, 2, 0x1a)
}
| 48.242465 | 168 | 0.642514 | [
"MIT"
] | oobitydoo/ALttPDoorRandomizer | DoorShuffle.py | 108,835 | Python |
""" info API method."""
from ibsng.handler.handler import Handler
class getAllGatewayNames(Handler):
""" info method class."""
def setup(self, **kwargs):
"""Setup required parameters.
:param dict kwargs: input args
:return: void
:rtype: void
"""
for key, value in kwargs.items():
setattr(self, key, value)
| 21.055556 | 41 | 0.583113 | [
"MIT"
] | ParspooyeshFanavar/pyibsng | ibsng/handler/online_payment/get_all_gateway_names.py | 379 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._operation_group_two_operations import build_test_five_request, build_test_four_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class OperationGroupTwoOperations:
"""OperationGroupTwoOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~multiapi.v3.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def test_four(
self,
input: Optional[Union[IO, "_models.SourcePath"]] = None,
**kwargs: Any
) -> None:
"""TestFour should be in OperationGroupTwoOperations.
:param input: Input parameter.
:type input: IO or ~multiapi.v3.models.SourcePath
:keyword str content_type: Media type of the body sent to the API. Default value is
"application/json". Allowed values are: "application/pdf", "image/jpeg", "image/png",
"image/tiff", "application/json."
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[Union[str, "_models.ContentType"]]
_json = None
_content = None
if content_type.split(";")[0] in ['application/json']:
if input is not None:
_json = self._serialize.body(input, 'SourcePath')
elif content_type.split(";")[0] in ['application/pdf', 'image/jpeg', 'image/png', 'image/tiff']:
_content = input
else:
raise ValueError(
"The content_type '{}' is not one of the allowed values: "
"['application/pdf', 'image/jpeg', 'image/png', 'image/tiff', 'application/json']".format(content_type)
)
request = build_test_four_request(
content_type=content_type,
json=_json,
content=_content,
template_url=self.test_four.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
test_four.metadata = {'url': '/multiapi/two/testFourEndpoint'} # type: ignore
@distributed_trace_async
async def test_five(
self,
**kwargs: Any
) -> None:
"""TestFive should be in OperationGroupTwoOperations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_test_five_request(
template_url=self.test_five.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
test_five.metadata = {'url': '/multiapi/two/testFiveEndpoint'} # type: ignore
| 43 | 133 | 0.667774 | [
"MIT"
] | Sneezry/autorest.python | test/multiapi/Expected/AcceptanceTests/Multiapi/multiapi/v3/aio/operations/_operation_group_two_operations.py | 6,321 | Python |
import pytest
from app.db import model, session_ctx
from app.util import exceptions
from app.server.routes import routes
from app.server.requestutils import *
import flask
import flask.testing
def test_pubsubify_excs(fake_import: model.Import, client_with_modifiable_routes: flask.testing.FlaskClient):
client = client_with_modifiable_routes
# pre-populate an import that will get error'd
with session_ctx() as sess:
new_import = fake_import
sess.add(new_import)
sess.commit()
@pubsubify_excs
def ise_exc() -> flask.Response:
raise exceptions.ISvcException("a bad happened", imports=[new_import])
client.application.add_url_rule('/test_pubsubify_excs', view_func=ise_exc, methods=["GET"])
resp = client.get('/test_pubsubify_excs')
assert resp.status_code == 202
with session_ctx() as sess:
recovered_import: Import = Import.get(new_import.id, sess)
assert recovered_import.status == model.ImportStatus.Error
assert recovered_import.error_message == "a bad happened"
| 32.30303 | 109 | 0.737336 | [
"BSD-3-Clause"
] | broadinstitute/import-service | app/tests/test_requestutils.py | 1,066 | Python |
def repl(elt, req, eq):
r = req.copy()
q = r[elt] // eq['qte']
if r[elt] % eq['qte'] != 0: q += 1
for i in eq['inp']:
if i not in r: r[i] = 0
r[i] += q * eq['inp'][i]
r[elt] -= q * eq['qte']
return r
def optimize(need, dest, eqs):
req = need.copy()
while any(req[i] > 0 and i != dest for i in req):
elt = [i for i in req if req[i] > 0 and i != dest][0]
did = False
others = []
req = repl(elt, req, eqs[elt])
return req[dest]
with open('data/input14.txt') as f:
l_eq = f.readlines()
eqs = {}
for eq in l_eq:
left, right = eq[:-1].split(' => ')
out = right.split()
eqs[out[1]] = {'qte': int(out[0])}
eqs[out[1]]['inp'] = {}
for elt in left.split(', '):
item = elt.split()
eqs[out[1]]['inp'][item[1]] = int(item[0])
req = {'FUEL': 1}
q_ore = optimize(req, 'ORE', eqs)
print(q_ore)
goal = 10**12
low = goal // q_ore
high = low
i = 1
while optimize({'FUEL': high}, 'ORE', eqs) < goal:
high = low + 10**i
i += 1
while True:
m = (high + low) // 2
if optimize({'FUEL': m}, 'ORE', eqs) > goal:
high = m
else:
low = m
if high - low < 2:
print(low)
break | 21.344828 | 61 | 0.474152 | [
"MIT"
] | OpesMentis/AdventOfCode_2019 | day14.py | 1,238 | Python |
from django import forms
from .models import Produto
class ProdutoForm(forms.ModelForm):
class Meta:
model = Produto
fields = '__all__'
| 15.9 | 35 | 0.679245 | [
"MIT"
] | jonathan-mothe/estoque | produto/forms.py | 159 | Python |
Subsets and Splits