text
stringlengths 4
1.02M
| meta
dict |
---|---|
from django_notify.tests.base import BaseTest
from django_notify.storage.cookie import CookieStorage
def set_cookie_data(storage, messages, invalid=False, encode_empty=False):
"""
Set ``request.COOKIES`` with the encoded data and remove the storage
backend's loaded data cache.
"""
encoded_data = storage._encode(messages, encode_empty=encode_empty)
if invalid:
# Truncate the first character so that the hash is invalid.
encoded_data = encoded_data[1:]
storage.request.COOKIES = {CookieStorage.cookie_name: encoded_data}
if hasattr(storage, '_loaded_data'):
del storage._loaded_data
def stored_cookie_messages_count(storage, response):
"""
Return an integer containing the number of messages stored.
"""
# Get a list of cookies, excluding ones with a max-age of 0 (because
# they have been marked for deletion).
cookie = response.cookies.get(storage.cookie_name)
if not cookie or cookie['max-age'] == 0:
return 0
data = storage._decode(cookie.value)
if not data:
return 0
return len(data)
class CookieTest(BaseTest):
storage_class = CookieStorage
def stored_messages_count(self, storage, response):
return stored_cookie_messages_count(storage, response)
def test_get(self):
storage = self.storage_class(self.get_request())
# Set initial data.
example_messages = ['test', 'me']
set_cookie_data(storage, example_messages)
# Test that the message actually contains what we expect.
self.assertEqual(list(storage), example_messages)
def test_get_bad_cookie(self):
request = self.get_request()
storage = self.storage_class(request)
# Set initial (invalid) data.
example_messages = ['test', 'me']
set_cookie_data(storage, example_messages, invalid=True)
# Test that the message actually contains what we expect.
self.assertEqual(list(storage), [])
def test_max_cookie_length(self):
"""
If the data exceeds what is allowed in a cookie, older messages are
removed before saving (and returned by the ``update`` method).
"""
storage = self.get_storage()
response = self.get_response()
for i in range(5):
storage.add(str(i) * 900)
unstored_messages = storage.update(response)
cookie_storing = self.stored_messages_count(storage, response)
self.assertEqual(cookie_storing, 4)
self.assertEqual(len(unstored_messages), 1)
self.assert_(unstored_messages[0].message == '0' * 900)
| {
"content_hash": "7ab860da6a47d69b8a679aec1df86663",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 75,
"avg_line_length": 34.8421052631579,
"alnum_prop": 0.6525679758308157,
"repo_name": "Ksynko/django-crm",
"id": "660b518a9f8108164de0eebe41830c47fa7164fc",
"size": "2648",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sample_project/external_apps/django_notify/tests/cookie.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3504"
},
{
"name": "HTML",
"bytes": "57332"
},
{
"name": "JavaScript",
"bytes": "675"
},
{
"name": "PLpgSQL",
"bytes": "22814"
},
{
"name": "Python",
"bytes": "287833"
}
],
"symlink_target": ""
} |
'''
Borrowed from https://github.com/rakshasa/rtorrent-vagrant/blob/master/scripts/xmlrpc2scgi.py
Allows our script to directly communicate with the rtorrent socket instead
of needing a webserver.
EX.
xmlrpc2scgi.py -p scgi:///home/deerocket/.config/rtorrent/socket system.listMethods
'''
import sys, cStringIO as StringIO
import xmlrpclib, urllib, urlparse, socket
from urlparse import uses_netloc
uses_netloc.append('scgi')
def do_scgi_xmlrpc_request(host, methodname, params=()):
xmlreq = xmlrpclib.dumps(params, methodname)
xmlresp = SCGIRequest(host).send(xmlreq)
return xmlresp
def do_scgi_xmlrpc_request_py(host, methodname, params=()):
xmlresp = do_scgi_xmlrpc_request(host, methodname, params)
return xmlrpclib.loads(xmlresp)[0][0]
class SCGIRequest(object):
def __init__(self, url):
self.url=url
self.resp_headers=[]
def __send(self, scgireq):
scheme, netloc, path, query, frag = urlparse.urlsplit(self.url)
host, port = urllib.splitport(netloc)
if netloc:
addrinfo = socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM)
assert len(addrinfo) == 1, "There's more than one? %r"%addrinfo
sock = socket.socket(*addrinfo[0][:3])
sock.connect(addrinfo[0][4])
else:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(path)
sock.send(scgireq)
recvdata = resp = sock.recv(1024)
while recvdata != '':
recvdata = sock.recv(1024)
resp += recvdata
sock.close()
return resp
def send(self, data):
"Send data over scgi to url and get response"
scgiresp = self.__send(self.add_required_scgi_headers(data))
resp, self.resp_headers = self.get_scgi_resp(scgiresp)
return resp
@staticmethod
def encode_netstring(string):
"Encode string as netstring"
return '%d:%s,'%(len(string), string)
@staticmethod
def make_headers(headers):
"Make scgi header list"
return '\x00'.join(['%s\x00%s'%t for t in headers])+'\x00'
@staticmethod
def add_required_scgi_headers(data, headers=[]):
"Wrap data in an scgi request,\nsee spec at: http://python.ca/scgi/protocol.txt"
headers = SCGIRequest.make_headers([('CONTENT_LENGTH', str(len(data))),('SCGI', '1'),] + headers)
enc_headers = SCGIRequest.encode_netstring(headers)
return enc_headers+data
@staticmethod
def gen_headers(file):
"Get header lines from scgi response"
line = file.readline().rstrip()
while line.strip():
yield line
line = file.readline().rstrip()
@staticmethod
def get_scgi_resp(resp):
"Get xmlrpc response from scgi response"
fresp = StringIO.StringIO(resp)
headers = []
for line in SCGIRequest.gen_headers(fresp):
headers.append(line.split(': ', 1))
xmlresp = fresp.read()
return (xmlresp, headers)
class RTorrentXMLRPCClient(object):
def __init__(self, url, methodname=''):
self.url = url
self.methodname = methodname
def __call__(self, *args):
scheme, netloc, path, query, frag = urlparse.urlsplit(self.url)
xmlreq = xmlrpclib.dumps(args, self.methodname)
if scheme == 'scgi':
xmlresp = SCGIRequest(self.url).send(xmlreq)
return xmlrpclib.loads(xmlresp)[0][0]
elif scheme == 'http':
raise Exception('Unsupported protocol')
elif scheme == '':
raise Exception('Unsupported protocol')
else:
raise Exception('Unsupported protocol')
def __getattr__(self, attr):
methodname = self.methodname and '.'.join([self.methodname,attr]) or attr
return RTorrentXMLRPCClient(self.url, methodname)
def convert_params_to_native(params):
"Parse xmlrpc-c command line arg syntax"
cparams = []
for param in params:
if len(param) < 2 or param[1] != '/':
cparams.append(param)
continue
if param[0] == 'i':
ptype = int
elif param[0] == 'b':
ptype = bool
elif param[0] == 's':
ptype = str
else:
cparams.append(param)
continue
cparams.append(ptype(param[2:]))
return tuple(cparams)
def print_script(response):
for line in response:
print " ".join(map(str, line))
def main(argv):
if len(argv) < 1:
print "No arguments."
raise SystemExit, -1
if len(argv[0]) and argv[0][0] == '-':
output_arg = argv[0]
argv.pop(0)
if len(argv) < 2:
print "Too few arguments."
raise SystemExit, -1
host, methodname = argv[:2]
respxml = do_scgi_xmlrpc_request(host, methodname, convert_params_to_native(argv[2:]))
if output_arg == '-p':
print xmlrpclib.loads(respxml)[0][0]
elif output_arg == '-s':
print_script(xmlrpclib.loads(respxml)[0][0])
else:
print respxml
if __name__ == "__main__":
main(sys.argv[1:])
| {
"content_hash": "6a1335a43ed4cae1406cecd9ba3f21ef",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 99,
"avg_line_length": 27,
"alnum_prop": 0.6733809833225038,
"repo_name": "FlannelFox/FlannelFox",
"id": "e4c93e1438bbe24cee943cf673b74e93253c9f9d",
"size": "4640",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flannelfox/torrentclients/tools/xmlrpc2scgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "59"
},
{
"name": "Python",
"bytes": "276779"
},
{
"name": "Shell",
"bytes": "3125"
}
],
"symlink_target": ""
} |
import argparse
import calendar
from alc.formatter import CalendarFormatter
from alc.script_filter import ScriptFilter
from datetime import datetime
class ArgumentParserError(Exception):
'''
When failed to parse argument, this exception is raised.
'''
pass
class ThrowingArgumentParser(argparse.ArgumentParser):
'''
This is a wrapper class of *argparse.ArgumentParser*.
When failed to parse argument, raise *ArgumentParserError*.
'''
def error(self, message):
'''
:raise ArgumentParserError: failed to parse arguments.
'''
raise ArgumentParserError(message)
class CalendarCommand:
'''
Command for getting *Script Filter XML* of calendar.
:param dict config: configuration of command. The format is as follow.
example::
config = {
'query': '{query}',
'first_week_day': calendar.MONDAY,
'default_datetime_format': '%Y/%m/%d (%a)\t%H:%M:%S',
'specified_datetime_format': '%Y/%m'
}
print CalendarCommand(config).execute()
+---------------------------+------+----------------------------+
| key | type | default |
+===========================+======+============================+
| query | str | '' |
+---------------------------+------+----------------------------+
| first_week_day | int | 0 (Mon.) |
+---------------------------+------+----------------------------+
| default_datetime_format | str | '%Y/%m/%d (%a) %H:%M:%S' |
+---------------------------+------+----------------------------+
| specified_datetime_format | str | '%Y/%m' |
+---------------------------+------+----------------------------+
'''
_class_val_defs = {
'query': '',
'first_week_day': calendar.MONDAY,
'default_datetime_format': '%Y/%m/%d (%a)\t%H:%M:%S',
'specified_datetime_format': '%Y/%m'
}
def __init__(self, config):
for k, v in self._class_val_defs.items():
value = config[k] if (k in config) else v
setattr(self, '_{0}'.format(k), value)
def execute(self):
'''
Execute thie command. Return string of *Script Filter XML*.
:rtype: str
:return: *Script Filter XML*
:raise ValueError:
If return type of *take_action* or *error_action* is not string.
'''
filter_xml = ''
try:
parser = self.get_query_parser()
args = (self._query.strip().split(' ')
if self._query.strip() != '' else '')
parsed_args = vars(parser.parse_args(args))
filter_xml = self.take_action(parsed_args)
except Exception as e:
filter_xml = self.error_action(e)
if isinstance(filter_xml, basestring) is False:
raise ValueError()
return filter_xml
def get_query_parser(self):
'''
Return argument parser of this command.
:rtype: argparse.ArgumentParser
:return: argument parser
'''
parser = ThrowingArgumentParser()
parser.add_argument('month', nargs='?', type=int,
choices=range(1, (12 + 1)))
parser.add_argument('year', nargs='?', type=int,
choices=range(0, (9999 + 1)))
return parser
def take_action(self, query_args):
'''
Create script filter xml.
When it succeeds in parsing the query, this method is called.
:param dict query_args: parsed query
:rtype: str
:return: *Script Filter XML*
'''
len_query_args = len([k for k in query_args.keys()
if query_args[k] is not None])
tmp_datetime = datetime.now()
tmp_dateformat = self._default_datetime_format
if len_query_args is not 0:
month = query_args['month']
year = query_args['year'] if (len_query_args is 2) \
else tmp_datetime.year
tmp_datetime = datetime.strptime('%s-%s' % (year, month), '%Y-%m')
tmp_dateformat = self._specified_datetime_format
sf = ScriptFilter()
f = CalendarFormatter(tmp_datetime)
CalendarFormatter.setfirstweekday(self._first_week_day)
sf.append_item(f.datetime(tmp_dateformat))
sf.append_item(f.weekheader())
for w in f.weekdays():
sf.append_item(w)
return sf.xml_s
def error_action(self, e):
'''
Create error script filter xml.
This method is called when an error occurs in *'execute'*.
:param Exception e: exception from *execute*.
:rtype: str
:return: *Script Filter XML*
'''
sf = ScriptFilter()
sf.append_item('usage: alc [month (1-12)] [year (1900-9999)]')
return sf.xml_s
| {
"content_hash": "4c700c62edd6121a20d30a5144366088",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 78,
"avg_line_length": 31.980891719745223,
"alnum_prop": 0.49751045608444533,
"repo_name": "sbkro/alc",
"id": "2fe74a42f4017edfa2b93c9564570a49115341fc",
"size": "5045",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/alc/command.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "40873"
},
{
"name": "Shell",
"bytes": "7365"
}
],
"symlink_target": ""
} |
"""
This module implements general data operations.
"""
from __future__ import print_function # start to adapt to Python 3
import numpy as np
from scipy.interpolate import interp1d
from numpy.lib.recfunctions import append_fields
import warnings
class MissingLabel(Exception):
pass
class LocalFitError(Exception):
pass
class Coordinates(object):
"""Stores 2-d coordinates, e.g. timeseries
Parameters
----------
x : 1d array
x-coordinates of points
y : 1d array
y-coordinates of points, must be same length as x
x_name : str (default 'x')
name of x-coordinates
y_name : str (default 'y')
name of y-coordinates
"""
@classmethod
def from_array(cls, array):
"""Load a Coordinates instance from a Numpy 2d structured array"""
x_name = 'x'
y_name = 'y'
if array.dtype.names is not None:
x_name, y_name = array.dtype.names[:2]
if len(array) == 0:
return Coordinates(np.array([]), np.array([]), x_name, y_name)
if len(array[0]) < 2:
raise ValueError('must be a 2 column array')
if len(array[0]) > 2:
warnings.warn('Taking first 2 columns from multicol array')
if array.dtype.names is not None:
x = array[x_name]
y = array[y_name]
else:
x = array[:, 0]
y = array[:, 1]
return Coordinates(x, y, x_name, y_name)
def __init__(self, x, y, x_name='x', y_name='y'):
# if np.any(np.isnan(x)):
# msg = 'NaN value(s) detected in x-array: please remove beforehand'
# raise ValueError(msg)
if len(x) != len(y):
msg = ('Arrays of different lengths! Check x and y input\n'
'x : {}'.format(x) + '\n'
'y : {}'.format(y))
raise ValueError(msg)
self._x = np.array(x)
self.x_name = x_name
self._y = np.array(y)
self.y_name = y_name
self.valid = np.where(np.logical_and(np.logical_not(np.isnan(x)),
np.logical_not(np.isnan(y))))
return
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def clear_x(self):
"""Returns x coordinates for which y is valid"""
if len(self._y) > 0:
return self._x[self.valid]
else:
return np.array([], dtype=float)
@property
def clear_y(self):
"""Returns y coordinates when y is valid"""
if len(self._y) > 0:
return self._y[self.valid]
else:
return np.array([], dtype=float)
@property
def clear(self):
"""Returns coordinates cleared off NaNs"""
return Coordinates(self.clear_x, self.clear_y,
x_name=self.x_name, y_name=self.y_name)
def __getitem__(self, val):
"""Return the sliced Coordinates"""
return Coordinates(self.x[val], self.y[val],
x_name=self.x_name, y_name=self.y_name)
def __len__(self):
"""Returns the number of coordinates"""
return len(self.x)
def as_array(self):
array = np.array(list(zip(self.x, self.y)),
dtype=[(self.x_name, 'f8'), (self.y_name, 'f8')])
return array
def __str__(self):
return str(self.as_array())
def __repr__(self):
return repr(self.as_array())
# NEW LOCAL FIT ESTIMATE USING ARRAYS
def compute_rates(x, y, x_break=None,
anterior_x=[], anterior_y=[],
scale='log',
time_window=15., dt=5.,
join_points=3,
testing=False):
"""Computes rates of array y against x by local fits over shifting window.
Results are evaluated at coordinate array x by linear interpolation, so
that fitted and rate arrays are of same size of x and y.
When possible, it uses anterior values (in parent cell in tunacell context)
to extend the result range. When a timeseries is expected to cover multiple
cell cycles, with continuity hypothesis at the level of rates, it allows to
use parent cell data to extend the range over which the dferivative can be
evaluated.
Parameters
----------
x : 1d ndarray
co-ordinate array (usually array of times for timeseries)
y : 1d ndarray
ordinate (array of values of same length as x array)
x_break : float
value of co-ordinate at which continuity joining is performed: in
tunacell context, this is the time of birth for present cell, and it
corresponds to division time of its parent cell
anterior_x : 1d ndarray
extra-bound anterior co-ordinate array: in tunacell context, this is
the time array extracted from parent cell
anterior_y : 1d ndarray
extra-bound anterior ordinate array: in tunacell context, this is the
array of values extracted from parent cell
scale : str {'linear', 'log'}
expected scale of y versus x. For exponential growth, use 'log' scale.
time_window : float
size of time window over which local fit is performed
dt : float
acquisition period of time array
join_points : int (default 3)
minimal number of points used when performing local fits to make
continuity between anterior and present timeseries
testing : bool {False, True}
verbose output for testing
Returns
-------
rates : 1d ndarray
array of rates estimated at x array (uses interpolation)
fits : 1d ndarray
array of fitted values at x array
anterior_rates : 1d ndarray
array of rates estimated at anterior_x array
anterior_fits : 1d ndarray
array of fitter values at anterior_x array
used_x : 1d ndarray
concatenation with derivative continuity hypothesis of anterior_x and x
used_y : 1d ndarray
concatenation with derivative continuity hypothesis of anterior_y and y
"""
# check array lengths and associate cleaning ordinate NaNs
coords = Coordinates(x, y)
nans_coords = len(x) * [np.nan, ]
anteriors = Coordinates(anterior_x, anterior_y)
nans_anteriors = len(anterior_x) * [np.nan, ]
# define x_break by convention
if x_break is None:
x_break = coords.x[0] - dt/2. # artificial birth time (CONVENTION)
# operators and their inverse
if scale == 'log':
y_operator = np.log
y_inv_operator = np.exp
elif scale == 'linear':
def y_operator(vals):
return vals
def y_inv_operator(vals):
return vals
# find period (can be different from dt when multiple acquisition periods)
no_data = len(coords.clear_x) == 0
if no_data:
return nans_coords, nans_coords, nans_anteriors, nans_anteriors, [], []
too_few_data = coords.clear_x[-1] - coords.clear_x[0] < time_window
if too_few_data:
return nans_coords, nans_coords, nans_anteriors, nans_anteriors, [], []
# coords.clear_x is necesarily of length >=2
period = np.amin(coords.clear_x[1:] - coords.clear_x[:-1])
# number of points per time window
n_points = int(np.round(time_window/period, decimals=0))
if testing:
print('Local fits will be performed over {} points'.format(n_points))
if n_points < 2:
msg = ('Trying to perform linear fit over less than 2 points. '
'Please use a larger time_window parameter.')
raise LocalFitError(msg)
elif n_points == 2:
msg = ('Performing linear fit over 2 points: '
'for rate computation experimental errors are not smoothed')
warnings.warn(msg)
# define the number of points for estimates at cell birth, parent division
if n_points >= join_points:
n_joints = n_points
else:
n_joints = join_points
if testing:
print('For values at division/birth:')
print(' try to fit over {} points (local fits)'.format(n_points))
print(' but allows to reduce up to {} points'.format(
join_points))
print()
# auxiliary variables
op_y_break = None # estimate of y value at joining (value at birth)
op_ay_break = None # estimate of anterior y value at joining (at division)
op_y = y_operator(coords.clear_y)
if len(coords.clear_x) >= join_points:
# fit to at least join_points, more if possible
r, i = np.polyfit(coords.clear_x[:n_joints], op_y[:n_joints], 1)
op_y_break = i + r * x_break
if testing:
msg = ('Data to fit:\n'
'x : {}'.format(coords.clear_x) + '\n'
'y : {}'.format(coords.clear_y))
print(msg)
# try to use anterior values : compute break offset
trans_op_ay = [] # translated, operated anterior values; default: empty
offset = None
if op_y_break is not None and len(anteriors.clear_x) > 0:
op_ay = y_operator(anteriors.clear_y)
# 2 checks:
# 1. there at enough points to get the final value estimate
cdt1 = len(anteriors.clear_x) >= join_points
# 2. initial value is determined
cdt2 = op_y_break is not None
if cdt1 and cdt2:
r, i = np.polyfit(anteriors.clear_x[-n_joints:],
op_ay[-n_joints:], 1)
op_ay_break = i + r * x_break
if testing:
msg = ('Extrapolated anterior value at break:\n '
'break time, value: '
'{}, {}'.format(x_break, y_inv_operator(op_ay_break)))
print(msg)
msg = ('Anterior data used for fitting:\n'
'x : {}'.format(anteriors.clear_x) + '\n'
'y : {}'.format(anteriors.clear_y))
print(msg)
# adjust values by translating
offset = op_ay_break - op_y_break
trans_op_ay = op_ay - offset
if testing:
msg = ('Translated, operated anterior values are:\n'
'y\' {}'.format(trans_op_ay))
print(msg)
print()
# concatenate operated values if enough previous values to comppute offset
if len(trans_op_ay) > 0:
all_x = np.concatenate([anteriors.clear_x, coords.clear_x])
all_op_y = np.concatenate([trans_op_ay, op_y])
else:
all_x = coords.clear_x
all_op_y = op_y
all_y = y_inv_operator(all_op_y)
fit_x = np.zeros_like(all_x)
fit_op_y = np.zeros_like(all_x) # fitted values
rate_op_y = np.zeros_like(all_x) # rates of local fits
# sliding window
for index, t in enumerate(all_x):
t_start = t - period/2. # convention
t_stop = t_start + time_window
# time of evaluation
time_eval = (t_start + t_stop)/2.
fit_x[index] = time_eval
# check that at least one time point is larger than break point
if t_stop <= x_break:
fit_op_y[index] = np.nan
rate_op_y[index] = np.nan
if testing:
msg = ('+ window range does not intercept x range: next')
print(msg)
continue
# reduce to points to fit
lower = all_x > t_start
upper = all_x <= t_stop
boo = np.logical_and(lower, upper)
if testing:
print('+ window:', end=' ')
print('{} < t < {} '.format(t_start, t_stop), end=' ')
print('({} points)'.format(len(all_x[boo])))
# check if n_points > 3
if len(all_x[boo]) < n_points:
if testing:
print('Not enough points on this time window')
print('({} instead of {})'.format(len(all_x[boo]), n_points))
print('next')
# not enough point in this window: insert NaN
rate_op_y[index] = np.nan
fit_op_y[index] = np.nan
else:
rate, intercept = np.polyfit(all_x[boo], all_op_y[boo], 1)
fit_op_y[index] = rate * time_eval + intercept
rate_op_y[index] = rate
if testing:
msg = ('time : {}'.format(time_eval) + '\n'
'fitted value : {}'.format(fit_op_y[index]) + '\n'
'computed rate : {}'.format(rate))
print(msg)
# fit and rates coordinates that will be interpolated (if possible)
fit_op_coords = Coordinates(fit_x, fit_op_y)
rate_coords = Coordinates(fit_x, rate_op_y)
# interpolation may be defined over both x range and anterior_x range
out_y = np.array(len(coords.x) * [np.nan, ]) # initialize to NaNs
out_anterior_y = np.array(len(anteriors.x) * [np.nan, ])
out_rate = np.array(len(coords.x) * [np.nan, ])
out_anterior_rate = np.array(len(anteriors.x) * [np.nan, ])
if len(fit_op_coords.clear_x) > 1: # at least 2 points to interpolate
f = interp1d(fit_op_coords.clear_x, fit_op_coords.clear_y,
kind='linear', assume_sorted=True, bounds_error=False)
# valid data points: interpolation at initial x coordinates
out_y[coords.valid] = y_inv_operator(f(coords.clear_x))
if len(anteriors.valid) > 0 and offset is not None:
out_anterior_y[anteriors.valid] = y_inv_operator(f(anteriors.clear_x) + offset)
if len(rate_coords.clear_x) > 1:
f = interp1d(rate_coords.clear_x, rate_coords.clear_y, kind='linear',
assume_sorted=True, bounds_error=False)
# valid data points: interpolation at initial x coordinates
out_rate[coords.valid] = f(coords.clear_x)
if len(anteriors.valid) > 0 and offset is not None:
out_anterior_rate[anteriors.valid] = f(anteriors.clear_x)
return out_rate, out_y, out_anterior_rate, out_anterior_y, all_x, all_y
class ExtrapolationError(Exception):
pass
class NoTarget(ExtrapolationError):
pass
class TooFewPoints(ExtrapolationError):
pass
class TooRemoteFromTarget(ExtrapolationError):
pass
def extrapolate_endpoints(x, y, x_target,
scale='log', join_points=3,
distance_max=None):
"""Extrapolate y values at x_target
Parameters
----------
x : 1d ndarray
co-ordinate array (usually array of times for timeseries)
y : 1d ndarray
ordinate (array of values of same length as x array)
x_target : float
value of co-ordinate at which y is inter-/extra-polated
scale : str {'linear', 'log'}
expected scale of y versus x. For exponential growth, use 'log' scale.
join_points : int (default 3)
minimal number of points used when performing local fits to make
continuity between anterior and present timeseries
distance_max : float (default None)
upper bound to the distance between closest x to x_target to accept
extrapolation
Returns
-------
float
value estimated at x_target for y array
Raises
------
ExtrapolationError
when extrapolation fails due to too less points, or
when closest x to x_target is further away than distance_max
"""
if x_target is None or np.isnan(x_target):
raise NoTarget('x_target: {} is not a number'.format(x_target))
npts = join_points
if scale == 'log':
y_operator = np.log
y_inv_operator = np.exp
elif scale == 'linear':
y_operator = lambda x: x
y_inv_operator = lambda x: x
coords = Coordinates(x, y)
if len(coords.clear_x) < join_points:
raise TooFewPoints('{} < {}'.format(len(coords.clear_x), join_points))
op_values = y_operator(coords.clear_y)
# when target is inside : interpolate
if np.amin(coords.clear_x) <= x_target <= np.amax(coords.clear_x):
f = interp1d(coords.clear_x, op_values, kind='linear',
bounds_error=False)
return y_inv_operator(f(x_target))
# othgerwise we extrapolate
if distance_max is not None:
dist = np.amin(np.abs(coords.clear_x - x_target))
if dist > distance_max:
msg = ('Distance to target: {} > {}'.format(dist, distance_max))
raise TooRemoteFromTarget(msg)
if x_target > np.amax(coords.clear_x):
rate, intercept = np.polyfit(coords.clear_x[-npts:], op_values[-npts:], 1)
else:
rate, intercept = np.polyfit(coords.clear_x[:npts], op_values[:npts], 1)
return y_inv_operator(rate * x_target + intercept)
# List of operator acting on Coordinates
def _cycle_linear(coords):
if len(coords.valid) < 2:
raise ValueError('Not enough valid coordinates')
a, b = np.polyfit(coords.clear_x, coords.clear_y, 1)
return a, b
def _cycle_log(coords):
logcoords = logarithm(coords)
return _cycle_linear(logcoords)
def logarithm(coords):
# if len(coords.valid) < 2:
# raise ValueError('Not enough valid coordinates')
return Coordinates(coords.x, np.log(coords.y))
def derivative(coords):
"""Returns the derivative of coordinates evaluated by at original timings
Derivartives are computed by taking successive non-nan values and
computing finite differences. They are evaluated at half time between
points.
Parameters
----------
coords : Coordinates instance
Returns
-------
Coordinates instance
finite differences interpolated at original times where values are
non nans.
"""
out_y = np.array(len(coords.x) * [np.nan, ])
clears = coords.clear
# one need at least three valid values to get estimates of 2 points
if len(clears) < 3:
return Coordinates(coords.x, out_y) # return only nans, deal with it
delta_x = additive_increments(clears.x)
delta_y = additive_increments(clears.y)
new_x = (clears.x[1:] + clears.x[:-1])/2.
new_y = delta_y/delta_x
# interpolate to associate to initial times : at least 2 valid points
f = interp1d(new_x, new_y, kind='linear', assume_sorted=True, bounds_error=False)
out_y[coords.valid] = f(coords.clear_x)
return Coordinates(coords.x, out_y)
def logderivative(coords):
logcoords = Coordinates(coords.x, np.log(coords.y))
return derivative(logcoords)
# list of operators acting on 1-D arrays
def additive_increments(ar):
"""Computes step-wise additive increments.
Parameter
---------
ar : 1d Numpy ndarray, n items
Returns
-------
1d Numpy ndarray, n-1 items
"""
a = ar[1:]
b = ar[:len(ar)-1]
return a-b
def multiplicative_increments(ar):
"""Computes step-wise multiplicative increments.
Parameter
---------
ar : 1d Numpy ndarray, n items
Returns
-------
1d Numpy ndarray, n-1 items
"""
a = ar[1:]
b = ar[:len(ar)-1]
return a/b
# functions acting on structured arrays
def compute_secondary_observables(data):
"""Computes secondary observables and extends matrix of observables.
Argument
--------
data -- structured array
must contains following fields: length, width, fluo, area, time
Returns
-------
out -- structured array
new fields are added (check `out.dtype.names`)
"""
ell, w, fluo, area, time = map(np.array,
zip(*data[['length',
'width',
'fluo',
'area',
'time']])
)
if len(time) > 1:
delta_t = time[1]-time[0]
age = (time - time[0] + delta_t/2.)/(time[-1] - time[0] + delta_t)
else:
age = np.nan
volume = spherocylinder_volume(ell, w)
concentration = fluo/volume
density = fluo/area
ALratio = area/ell
out = append_fields(data,
['volume',
'concentration',
'density',
'ALratio',
'age'],
[volume,
concentration,
density,
ALratio,
age],
usemask=False, fill_value=np.nan)
return out
# specific functions
def spherocylinder_volume(length, width):
"""Returns volume of sphero-cylinder.
Arguments
---------
length -- float
width -- float
Returns
-------
volume -- float
Notes
-----
sphero-cylinder is composed of a cylinder of height `h`, radius `R`,
and two hemispheres of radius `R` at each side.
length is `h + 2*R`
width is `2*R`
"""
return np.pi/4.*width**2*(length-width/3.)
def gaussian_smooth(xdata, ydata, sigma=1., x=None):
"""Returns Gaussian smoothed signal.
Arguments
---------
xdata -- ndarray, co-ordinate of data points
ydata -- ndarray, ordinates of data points
sigma -- float (optional, default 1.), x-scale for smoothing
x (optional) -- float or ndarray,
values at which the Gaussian smoothed signal is computed
default xdata
Returns
-------
sequence of (x, gy) co-ordinates of the Gaussian smoothed signal.
"""
# convert axis for data
xx = np.expand_dims(xdata, axis=1)
yy = np.expand_dims(ydata, axis=1)
def g(t, t0, s):
return np.exp(-(t-t0)**2/(2.*s**2))/(np.sqrt(2.*np.pi)*s)
def num(t):
return np.sum(g(t, xx, sigma)*yy, axis=0)
def den(t):
return np.sum(g(t, xx, sigma), axis=0)
if x is not None:
u = np.array(x)
else:
u = xdata
return zip(u, num(u)/den(u))
def smooth_timeseries(t, x, option='mean', sigma=1.):
"""Smooth timeseries.
Arguments
---------
t -- array of timepoints
x -- array of values to be smoothen, same length as t
Parameters
----------
option -- str, default 'mean'
method to choose among ['mean', 'gausssian']
sigma -- float, default 1
range of t over which Gaussian smoothing is performed (parameter)
Returns
-------
t, y
t -- array of times
y -- array of smoothen values
"""
nbr = len(t)
y = np.zeros_like(x)
if option == 'gaussian':
return zip(*gaussian_smooth(t, x, sigma=sigma))
if option == 'mean':
for k in range(1, nbr-1):
y[k] = np.mean(x[k-1:k+2])
y[0] = np.mean(x[:2])
y[-1] = np.mean(x[-2:])
return t, y
def get_smooth_cell_timeseries(cell, observable='width'):
"""Returns local 3-points averaged data."""
time, obs = zip(*cell.data[['time', observable]])
smobs = []
if len(time) > 2:
# average over 3 points within segment
smobs = [np.mean(obs[i-1:i+2]) for i in range(1, len(obs)-1)]
# parent
parent = cell.parent
if parent and parent.data is not None:
pt, pobs = zip(*parent.data[['time', observable]])
smobs.insert(0, np.mean([pobs[-1], obs[0], obs[1]]))
else:
# average over first two points
smobs.insert(0, np.mean(obs[:2]))
nextobs = []
for ch in cell.childs:
chtimes, chobs = zip(*ch.data[['time', observable]])
nextobs.append(chobs[0])
if nextobs:
smobs.append(np.mean([obs[-2], obs[-1], np.mean(nextobs)]))
else:
smobs.append(np.mean(obs[-2:]))
if smobs:
return zip(time, smobs)
else:
return zip(time, obs)
def show_jumps(t, x, threshold=3., mode='multiplicative'):
"""Show data jump over given threshold
Arguments
---------
t -- ndarray
measurement times (in hours)
x -- ndarray (same size as t)
measurement values
Parameters
----------
threshold_jump : float
threshold over which jump is found
as multiplicative increase x(t+1)/x(t)
Returns
-------
Boolean ndarray with True value for points where there is a jump
Notes
-----
1) These jumps are experimental errors:
need to be discarded for further analysis.
2) threshold_jump=1.20 corresponds to 3 doublings/hr
with one point every 5 mins (2**(3*1/12)=1.19...)
"""
tdata = np.array(t)
xdata = np.array(x)
valids = np.array([False for i in range(len(t))])
for k in range(len(t)-1):
try:
delta_t = tdata[k+1] - tdata[k]
dot_log = (xdata[k+1]-xdata[k])/(xdata[k] * delta_t)
if dot_log > threshold*np.log(2.):
valids[k+1] = True
except ZeroDivisionError:
continue
return valids
if __name__ == '__main__':
x = np.arange(50, dtype=float)
y = np.array(len(x) * [np.nan, ])
anterior_x = np.arange(-20, 0, dtype=int)
anterior_y = np.array(len(anterior_x) * [np.nan, ])
anterior_y[np.arange(-20, 0, 5, dtype=int)] = 4.
y[np.arange(0, len(x), 5, dtype=int)] = 2.
r, f, ar, af, xx, yy = compute_rates(x, y, x_break=-.5, scale='linear',
anterior_x=anterior_x,
anterior_y=anterior_y,
dt=1, time_window=15., testing=True)
coords = Coordinates(np.concatenate([anterior_x, x]),
np.concatenate([af, f]),
x_name='time', y_name='value')
array = coords.as_array()
print(array['time'])
print(array['value'])
| {
"content_hash": "1060db17803265ab950e251d4f8ed1aa",
"timestamp": "",
"source": "github",
"line_count": 782,
"max_line_length": 91,
"avg_line_length": 32.89769820971867,
"alnum_prop": 0.5684521495763041,
"repo_name": "LeBarbouze/tunacell",
"id": "02a07721304a6d85b39d6da6cbdafd20b64a64e5",
"size": "25773",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tunacell/base/datatools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "768"
},
{
"name": "Python",
"bytes": "541092"
}
],
"symlink_target": ""
} |
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^polls/', include('polls.urls')),
]
| {
"content_hash": "565b3ab47b93f3546d766bd6dda04e35",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 79,
"avg_line_length": 37.09090909090909,
"alnum_prop": 0.6936274509803921,
"repo_name": "vithd/vithd.github.io",
"id": "c89122d8e3fa0fe41c191e7e35803feec6a5b165",
"size": "816",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/mysite/mysite/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10596"
},
{
"name": "HTML",
"bytes": "22909"
},
{
"name": "JavaScript",
"bytes": "329912"
},
{
"name": "Python",
"bytes": "21417"
}
],
"symlink_target": ""
} |
from selenium import webdriver
#from lxml import html
#existing tag: J-347-3553-E
URL = "https://3ds.pokemon-gl.com/user/%s/secretbase"
#tag: slovo, 3 broja, 4 broja, slovo
def get_picture_url(tr_elements):
for tr_element in tr_elements:
for th_element in tr_element.find_elements_by_xpath('.//th'):
print th_element.text
if tr_element.find_elements_by_xpath('.//th')[0].text == 'QR Code':
return tr_element.find_elements_by_xpath('.//img')[0].get_attribute('src')
def get_tag(i, j, k, l):
return '%s-%s-%s-%s' % (i, j, k, l)
def get_location(tr_elements):
for tr_element in tr_elements:
for th_element in tr_element.find_elements_by_xpath('.//th'):
print th_element.text
if tr_element.find_elements_by_xpath('.//th')[0].text == 'Location':
return tr_element.find_elements_by_xpath('.//p')[0].text
def write_to_file(file, tag, location, url):
if tag:
if location:
if url:
file.write('%s,%s,%s\n' % (tag, location, url))
#from pyvirtualdisplay import Display
#display = Display(visible=0, size=(800, 600))
#display.start()
profile = webdriver.FirefoxProfile()
profile.native_events_enabled = False
driver = webdriver.Firefox(profile)
out_file = open("secret_bases.csv", "a")
for i in xrange(ord('A'), ord('Z')+1):
for j in xrange (0, 1000):
for k in xrange (0, 10000):
for l in xrange(ord('A'), ord('Z')+1):
tag = "%s-%03d-%04d-%s" % (chr(i), j, k, chr(l))
url = URL % (tag)
print "Working on url: %s" % (url)
driver.get(url)
tr_elements = driver.find_elements_by_xpath('//tr')
location = get_location(tr_elements)
picture_url = get_picture_url(tr_elements)
write_to_file(out_file, tag, location, picture_url)
out_file.close()
driver.quit()
#display.stop()
| {
"content_hash": "2cc485a2e5dd09f9d8e4a65a6f0386a6",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 77,
"avg_line_length": 31.103448275862068,
"alnum_prop": 0.6324833702882483,
"repo_name": "mskovacic/Projekti",
"id": "f228a59ed943de755274d04c9945861f4a8a7654",
"size": "1804",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "raspberrypi/isprobavanje/pokemon/global_link_scraper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "196"
},
{
"name": "C",
"bytes": "129849"
},
{
"name": "C++",
"bytes": "3829"
},
{
"name": "CSS",
"bytes": "160"
},
{
"name": "HTML",
"bytes": "7122"
},
{
"name": "Haskell",
"bytes": "197035"
},
{
"name": "JavaScript",
"bytes": "42080"
},
{
"name": "Perl",
"bytes": "11159"
},
{
"name": "Python",
"bytes": "121078"
},
{
"name": "Raku",
"bytes": "401"
},
{
"name": "Shell",
"bytes": "319"
}
],
"symlink_target": ""
} |
__author__ = 'LimeQM'
from flask.ext.restful import Resource, abort, reqparse
from Server.models import Users
from Server import db
row_data = reqparse.RequestParser()
row_data.add_argument('data', type=str, help='Can not resolve data')
update_list = ['username', 'password', 'email', 'verify']
class User(Resource):
def get(self, token, target=None):
user = Users.verify_auth_token(token)
if user:
return {'user': user.user_info()}
else:
abort(404, message="user with token '{}' do not exist".format(token))
def post(self, token, target):
user = Users.verify_auth_token(token)
if user:
if target in update_list:
data = row_data.parse_args()
if target == 'username':
user.username = data['data']
elif target == 'password':
user.password = Users.hash_password(data['data'])
elif target == 'verify':
return {'matched': user.verify_password(data['data'])}
else:
user.email = data['data']
db.session.commit()
return {'user': user.user_info()}
else:
return abort(404, message="target error")
else:
abort(404, message="user with token '{}' do not exist".format(token))
| {
"content_hash": "fe31a688f64d5642d58d4b41451e6270",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 81,
"avg_line_length": 33.23809523809524,
"alnum_prop": 0.5444126074498568,
"repo_name": "wangjun/PassBank",
"id": "2eb4ebd7ddb72ee371920a3ebbef181ce02025b2",
"size": "1396",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Server/blueprints/API_v1/apis/user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5103"
},
{
"name": "HTML",
"bytes": "28680"
},
{
"name": "JavaScript",
"bytes": "32746"
},
{
"name": "Python",
"bytes": "25150"
},
{
"name": "Shell",
"bytes": "785"
}
],
"symlink_target": ""
} |
"""Support for the Hive switches."""
from homeassistant.components.switch import SwitchDevice
from . import DOMAIN, DATA_HIVE, HiveEntity, refresh_system
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up Hive switches."""
if discovery_info is None:
return
session = hass.data.get(DATA_HIVE)
devs = []
for dev in discovery_info:
devs.append(HiveDevicePlug(session, dev))
add_entities(devs)
class HiveDevicePlug(HiveEntity, SwitchDevice):
"""Hive Active Plug."""
@property
def unique_id(self):
"""Return unique ID of entity."""
return self._unique_id
@property
def device_info(self):
"""Return device information."""
return {"identifiers": {(DOMAIN, self.unique_id)}, "name": self.name}
@property
def name(self):
"""Return the name of this Switch device if any."""
return self.node_name
@property
def device_state_attributes(self):
"""Show Device Attributes."""
return self.attributes
@property
def current_power_w(self):
"""Return the current power usage in W."""
return self.session.switch.get_power_usage(self.node_id)
@property
def is_on(self):
"""Return true if switch is on."""
return self.session.switch.get_state(self.node_id)
@refresh_system
def turn_on(self, **kwargs):
"""Turn the switch on."""
self.session.switch.turn_on(self.node_id)
@refresh_system
def turn_off(self, **kwargs):
"""Turn the device off."""
self.session.switch.turn_off(self.node_id)
def update(self):
"""Update all Node data from Hive."""
self.session.core.update_data(self.node_id)
self.attributes = self.session.attributes.state_attributes(self.node_id)
| {
"content_hash": "7e3c8a7c6dca67e55ca57bbdacdb0456",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 80,
"avg_line_length": 28.353846153846153,
"alnum_prop": 0.631578947368421,
"repo_name": "joopert/home-assistant",
"id": "1447f5483a4a0ea4834b07f3fbdc6fbf2dbf4333",
"size": "1843",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/hive/switch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18670593"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
} |
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
O(ST)
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
if not root:
return None
if root == p or root == q:
return root
left = self.lowestCommonAncestor(root.left,p,q)
right = self.lowestCommonAncestor(root.right,p,q)
if left and right:
return root
return left if left else right | {
"content_hash": "95b84d1b8c4054bf8b6f39bbb43d30e1",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 57,
"avg_line_length": 26.65,
"alnum_prop": 0.525328330206379,
"repo_name": "youhusky/Facebook_Prepare",
"id": "1e35b714f141243865af3053f689a98913c6ce6c",
"size": "1403",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "236. Lowest Common Ancestor of a Binary Tree.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "187109"
}
],
"symlink_target": ""
} |
"""A class of Decoders that may sample to generate the next input.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import six
from tensorflow.contrib.rnn import core_rnn_cell
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.util import nest
__all__ = [
"Sampler", "SamplingDecoderOutput", "BasicSamplingDecoder",
"BasicTrainingSampler", "GreedyEmbeddingSampler", "CustomSampler",
]
_transpose_batch_time = decoder._transpose_batch_time # pylint: disable=protected-access
@six.add_metaclass(abc.ABCMeta)
class Sampler(object):
"""Sampler interface. Sampler instances are used by BasicSamplingDecoder."""
@abc.abstractproperty
def batch_size(self):
"""Returns a scalar int32 tensor."""
raise NotImplementedError("batch_size has not been implemented")
@abc.abstractmethod
def initialize(self, name=None):
"""Returns `(initial_finished, initial_inputs)`."""
pass
@abc.abstractmethod
def sample(self, time, outputs, state, name=None):
"""Returns `sample_ids`."""
pass
@abc.abstractmethod
def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""Returns `(finished, next_inputs, next_state)`."""
pass
class SamplingDecoderOutput(
collections.namedtuple("SamplingDecoderOutput",
("rnn_output", "sample_id"))):
pass
class BasicSamplingDecoder(decoder.Decoder):
"""Basic sampling decoder."""
def __init__(self, cell, sampler, initial_state):
"""Initialize BasicSamplingDecoder.
Args:
cell: An `RNNCell` instance.
sampler: A `Sampler` instance.
initial_state: A (possibly nested tuple of...) tensors and TensorArrays.
Raises:
TypeError: if `cell` is not an instance of `RNNCell` or `sampler`
is not an instance of `Sampler`.
"""
if not isinstance(cell, core_rnn_cell.RNNCell):
raise TypeError("cell must be an RNNCell, received: %s" % type(cell))
if not isinstance(sampler, Sampler):
raise TypeError("sampler must be a Sampler, received: %s" %
type(sampler))
self._cell = cell
self._sampler = sampler
self._initial_state = initial_state
@property
def batch_size(self):
return self._sampler.batch_size
@property
def output_size(self):
# Return the cell output and the id
return SamplingDecoderOutput(
rnn_output=self._cell.output_size,
sample_id=tensor_shape.TensorShape([]))
@property
def output_dtype(self):
# Assume the dtype of the cell is the output_size structure
# containing the input_state's first component's dtype.
# Return that structure and int32 (the id)
dtype = nest.flatten(self._initial_state)[0].dtype
return SamplingDecoderOutput(
nest.map_structure(lambda _: dtype, self._cell.output_size),
dtypes.int32)
def initialize(self, name=None):
"""Initialize the decoder.
Args:
name: Name scope for any created operations.
Returns:
`(finished, first_inputs, initial_state)`.
"""
return self._sampler.initialize() + (self._initial_state,)
def step(self, time, inputs, state, name=None):
"""Perform a decoding step.
Args:
time: scalar `int32` tensor.
inputs: A (structure of) input tensors.
state: A (structure of) state tensors and TensorArrays.
name: Name scope for any created operations.
Returns:
`(outputs, next_state, next_inputs, finished)`.
"""
with ops.name_scope(
name, "BasicSamplingDecoderStep", (time, inputs, state)):
cell_outputs, cell_state = self._cell(inputs, state)
sample_ids = self._sampler.sample(
time=time, outputs=cell_outputs, state=cell_state)
(finished, next_inputs, next_state) = self._sampler.next_inputs(
time=time, outputs=cell_outputs, state=cell_state,
sample_ids=sample_ids)
outputs = SamplingDecoderOutput(cell_outputs, sample_ids)
return (outputs, next_state, next_inputs, finished)
class CustomSampler(Sampler):
"""Base abstract class that allows the user to customize sampling."""
def __init__(self, initialize_fn, sample_fn, next_inputs_fn):
"""Initializer.
Args:
initialize_fn: callable that returns `(finished, next_inputs)`
for the first iteration.
sample_fn: callable that takes `(time, outputs, state)`
and emits tensor `sample_ids`.
next_inputs_fn: callable that takes `(time, outputs, state, sample_ids)`
and emits `(finished, next_inputs, next_state)`.
"""
self._initialize_fn = initialize_fn
self._sample_fn = sample_fn
self._next_inputs_fn = next_inputs_fn
self._batch_size = None
@property
def batch_size(self):
if self._batch_size is None:
raise ValueError("batch_size accessed before initialize was called")
return self._batch_size
def initialize(self, name=None):
with ops.name_scope(name, "%sInitialize" % type(self).__name__):
(finished, next_inputs) = self._initialize_fn()
if self._batch_size is None:
self._batch_size = array_ops.size(finished)
return (finished, next_inputs)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(
name, "%sSample" % type(self).__name__, (time, outputs, state)):
return self._sample_fn(time=time, outputs=outputs, state=state)
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(
name, "%sNextInputs" % type(self).__name__, (time, outputs, state)):
return self._next_inputs_fn(
time=time, outputs=outputs, state=state, sample_ids=sample_ids)
class BasicTrainingSampler(Sampler):
"""A (non-)sampler for use during training. Only reads inputs.
Returned sample_ids are the argmax of the RNN output logits.
"""
def __init__(self, inputs, sequence_length, time_major=False):
"""Initializer.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
time_major: Python bool.
Raises:
ValueError: if `sequence_length` is not a 1D tensor.
"""
inputs = ops.convert_to_tensor(inputs, name="inputs")
if not time_major:
inputs = nest.map_structure(_transpose_batch_time, inputs)
def _unstack_ta(inp):
return tensor_array_ops.TensorArray(
dtype=inp.dtype, size=array_ops.shape(inp)[0],
element_shape=inp.get_shape()[1:]).unstack(inp)
self._input_tas = nest.map_structure(_unstack_ta, inputs)
self._sequence_length = ops.convert_to_tensor(
sequence_length, name="sequence_length")
if self._sequence_length.get_shape().ndims != 1:
raise ValueError(
"Expected sequence_length to be a vector, but received shape: %s" %
self._sequence_length.get_shape())
self._zero_inputs = nest.map_structure(
lambda inp: array_ops.zeros_like(inp[0, :]), inputs)
self._batch_size = array_ops.size(sequence_length)
@property
def batch_size(self):
return self._batch_size
def initialize(self, name=None):
finished = math_ops.equal(0, self._sequence_length)
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished, lambda: self._zero_inputs,
lambda: nest.map_structure(lambda inp: inp.read(0), self._input_tas))
return (finished, next_inputs)
def sample(self, time, outputs, name=None, **unused_kwargs):
del time # unused by sample_fn
sample_ids = math_ops.cast(
math_ops.argmax(outputs, axis=-1), dtypes.int32)
return sample_ids
def next_inputs(self, time, outputs, state, name=None, **unused_kwargs):
"""next_inputs_fn for BasicTrainingSampler."""
del outputs # unused by next_inputs_fn
next_time = time + 1
finished = (next_time >= self._sequence_length)
all_finished = math_ops.reduce_all(finished)
def read_from_ta(inp):
return inp.read(next_time)
next_inputs = control_flow_ops.cond(
all_finished, lambda: self._zero_inputs,
lambda: nest.map_structure(read_from_ta, self._input_tas))
return (finished, next_inputs, state)
class GreedyEmbeddingSampler(Sampler):
"""A (non-)sampler for use during inference.
Uses the argmax of the output (treated as logits) and passes the
result through an embedding layer to get the next input.
"""
def __init__(self, embedding, start_tokens, end_token):
"""Initializer.
Args:
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`.
start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
end_token: `int32` scalar, the token that marks end of decoding.
Raises:
ValueError: if `sequence_length` is not a 1D tensor.
"""
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._start_tokens = ops.convert_to_tensor(
start_tokens, dtype=dtypes.int32, name="start_tokens")
self._end_token = ops.convert_to_tensor(
end_token, dtype=dtypes.int32, name="end_token")
if self._start_tokens.get_shape().ndims != 1:
raise ValueError("start_tokens must be a vector")
self._batch_size = array_ops.size(start_tokens)
if self._end_token.get_shape().ndims != 0:
raise ValueError("end_token must be a scalar")
self._start_inputs = self._embedding_fn(self._start_tokens)
@property
def batch_size(self):
return self._batch_size
def initialize(self, name=None):
finished = array_ops.tile([False], [self._batch_size])
return (finished, self._start_inputs)
def sample(self, time, outputs, state, name=None):
"""sample for GreedyEmbeddingSampler."""
del time, state # unused by sample_fn
# Outputs are logits, use argmax to get the most probable id
if not isinstance(outputs, ops.Tensor):
raise TypeError("Expected outputs to be a single Tensor, got: %s" %
outputs)
sample_ids = math_ops.cast(
math_ops.argmax(outputs, axis=-1), dtypes.int32)
return sample_ids
def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""next_inputs_fn for GreedyEmbeddingSampler."""
del time, outputs # unused by next_inputs_fn
finished = math_ops.equal(sample_ids, self._end_token)
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished,
# If we're finished, the next_inputs value doesn't matter
lambda: self._start_inputs,
lambda: self._embedding_fn(sample_ids))
return (finished, next_inputs, state)
| {
"content_hash": "e745386702525e56a85caa47efd5d479",
"timestamp": "",
"source": "github",
"line_count": 324,
"max_line_length": 89,
"avg_line_length": 34.68827160493827,
"alnum_prop": 0.671945902660379,
"repo_name": "odejesush/tensorflow",
"id": "0c86c00411f10de7e58209a0e7145d94f5147abb",
"size": "11928",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/seq2seq/python/ops/sampling_decoder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6963"
},
{
"name": "C",
"bytes": "177136"
},
{
"name": "C++",
"bytes": "20597014"
},
{
"name": "CMake",
"bytes": "120039"
},
{
"name": "CSS",
"bytes": "7005"
},
{
"name": "Go",
"bytes": "103991"
},
{
"name": "HTML",
"bytes": "551211"
},
{
"name": "Java",
"bytes": "209382"
},
{
"name": "JavaScript",
"bytes": "20077"
},
{
"name": "Jupyter Notebook",
"bytes": "1833831"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "32007"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64592"
},
{
"name": "Protocol Buffer",
"bytes": "187378"
},
{
"name": "Python",
"bytes": "20689461"
},
{
"name": "Shell",
"bytes": "325470"
},
{
"name": "TypeScript",
"bytes": "765149"
}
],
"symlink_target": ""
} |
"""Common network architectures implemented as Sonnet modules."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sonnet.python.modules.nets.alexnet import AlexNet
from sonnet.python.modules.nets.alexnet import AlexNetFull
from sonnet.python.modules.nets.alexnet import AlexNetMini
from sonnet.python.modules.nets.convnet import ConvNet2D
from sonnet.python.modules.nets.convnet import ConvNet2DTranspose
from sonnet.python.modules.nets.dilation import Dilation
from sonnet.python.modules.nets.dilation import identity_kernel_initializer
from sonnet.python.modules.nets.dilation import noisy_identity_kernel_initializer
from sonnet.python.modules.nets.mlp import MLP
| {
"content_hash": "6a5742778928361fa234c7849ad7aafd",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 81,
"avg_line_length": 49,
"alnum_prop": 0.8380952380952381,
"repo_name": "AlphaSmartDog/DeepLearningNotes",
"id": "98aa87c772cbdd3777bdb80fe211d842403bbd67",
"size": "1419",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "Note-1 RNN-DNC择时/Note-1-2 PonderingDNCore L2正则化示例/sonnet/python/modules/nets/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "76094"
},
{
"name": "Jupyter Notebook",
"bytes": "9463332"
},
{
"name": "Python",
"bytes": "8832736"
}
],
"symlink_target": ""
} |
__all__ = ['instagram_download']
from ..common import *
def instagram_download(url, output_dir = '.', merge = True, info_only = False):
html = get_html(url)
vid = r1(r'instagram.com/p/([^/]+)/', html)
description = r1(r'<meta property="og:description" content="([^"]*)"', html)
title = description + " [" + vid + "]"
url = r1(r'<meta property="og:video" content="([^"]*)"', html)
type, ext, size = url_info(url)
print_info(site_info, title, type, size)
if not info_only:
download_urls([url], title, ext, size, output_dir, merge = merge)
site_info = "Instagram.com"
download = instagram_download
download_playlist = playlist_not_supported('instagram')
| {
"content_hash": "9701029ccd0e4e7feccf562623b5e944",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 80,
"avg_line_length": 34.8,
"alnum_prop": 0.6206896551724138,
"repo_name": "power12317/you-get",
"id": "0605a6c30662ff88d19f43a10aa42e402fa71ac9",
"size": "719",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "src/you_get/extractors/instagram.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "818"
},
{
"name": "Python",
"bytes": "235854"
}
],
"symlink_target": ""
} |
import os,sys
BASE_DIR=os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BASE_DIR)
from asyncore import main
if __name__ == '__main__':
main.run() | {
"content_hash": "36d4da476a17205d237fb079383b935c",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 68,
"avg_line_length": 25.428571428571427,
"alnum_prop": 0.6741573033707865,
"repo_name": "5StevenWu/Coursepy",
"id": "3b4b8f99281138a02398b2261795e83a1c6fc510",
"size": "304",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "L06/ATMFF/bin/starts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "27282"
},
{
"name": "Python",
"bytes": "139220"
}
],
"symlink_target": ""
} |
import warnings
import functools
__all__ = ["deprecated"]
class deprecated:
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated()
<sklearn.utils.deprecation.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
Parameters
----------
extra : string
to be added to the deprecation messages
"""
# Adapted from https://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
self.extra = extra
def __call__(self, obj):
"""Call method
Parameters
----------
obj : object
"""
if isinstance(obj, type):
return self._decorate_class(obj)
elif isinstance(obj, property):
# Note that this is only triggered properly if the `property`
# decorator comes before the `deprecated` decorator, like so:
#
# @deprecated(msg)
# @property
# def deprecated_attribute_(self):
# ...
return self._decorate_property(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
@functools.wraps(fun)
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__doc__ = self._update_doc(wrapped.__doc__)
# Add a reference to the wrapped function so that we can introspect
# on function arguments in Python 2 (already works in Python 3)
wrapped.__wrapped__ = fun
return wrapped
def _decorate_property(self, prop):
msg = self.extra
@property
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return prop.fget(*args, **kwargs)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
def _is_deprecated(func):
"""Helper to check if func is wraped by our deprecated decorator"""
closures = getattr(func, '__closure__', [])
if closures is None:
closures = []
is_deprecated = ('deprecated' in ''.join([c.cell_contents
for c in closures
if isinstance(c.cell_contents, str)]))
return is_deprecated
| {
"content_hash": "a7fda0da25edb566aef08892e8e84529",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 78,
"avg_line_length": 29.778688524590162,
"alnum_prop": 0.5664739884393064,
"repo_name": "chrsrds/scikit-learn",
"id": "d6f64dda22a11fb4fbbe23abef5632b0aa2aa3ae",
"size": "3633",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sklearn/utils/deprecation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1786"
},
{
"name": "C",
"bytes": "385829"
},
{
"name": "C++",
"bytes": "139482"
},
{
"name": "Makefile",
"bytes": "1388"
},
{
"name": "PowerShell",
"bytes": "13427"
},
{
"name": "Python",
"bytes": "5255814"
},
{
"name": "Shell",
"bytes": "4031"
}
],
"symlink_target": ""
} |
import asyncio
from unittest import mock
import pytest
from aiohttp import errors, web
@asyncio.coroutine
def test_simple_server(raw_test_server, test_client):
@asyncio.coroutine
def handler(request):
return web.Response(text=str(request.rel_url))
server = yield from raw_test_server(handler)
client = yield from test_client(server)
resp = yield from client.get('/path/to')
assert resp.status == 200
txt = yield from resp.text()
assert txt == '/path/to'
@asyncio.coroutine
def test_raw_server_not_http_exception(raw_test_server, test_client):
exc = RuntimeError("custom runtime error")
@asyncio.coroutine
def handler(request):
raise exc
logger = mock.Mock()
server = yield from raw_test_server(handler, logger=logger)
client = yield from test_client(server)
resp = yield from client.get('/path/to')
assert resp.status == 500
txt = yield from resp.text()
assert "<h1>500 Internal Server Error</h1>" in txt
logger.exception.assert_called_with(
"Error handling request",
exc_info=exc)
@asyncio.coroutine
def test_raw_server_handler_timeout(raw_test_server, test_client):
exc = asyncio.TimeoutError("error")
@asyncio.coroutine
def handler(request):
raise exc
logger = mock.Mock()
server = yield from raw_test_server(handler, logger=logger)
client = yield from test_client(server)
resp = yield from client.get('/path/to')
assert resp.status == 504
txt = yield from resp.text()
assert "<h1>504 Gateway Timeout</h1>" in txt
logger.debug.assert_called_with("Request handler timed out.")
@asyncio.coroutine
def test_raw_server_do_not_swallow_exceptions(raw_test_server, test_client):
exc = None
@asyncio.coroutine
def handler(request):
raise exc
logger = mock.Mock()
server = yield from raw_test_server(handler, logger=logger)
client = yield from test_client(server)
for _exc, msg in (
(asyncio.CancelledError("error"), 'Request handler cancelled.'),
(errors.ClientDisconnectedError("error"),
'Ignored premature client disconnection #1.')):
exc = _exc
with pytest.raises(errors.ClientResponseError):
yield from client.get('/path/to')
logger.debug.assert_called_with(msg)
@asyncio.coroutine
def test_raw_server_not_http_exception_debug(raw_test_server, test_client):
exc = RuntimeError("custom runtime error")
@asyncio.coroutine
def handler(request):
raise exc
logger = mock.Mock()
server = yield from raw_test_server(handler, logger=logger, debug=True)
client = yield from test_client(server)
resp = yield from client.get('/path/to')
assert resp.status == 500
txt = yield from resp.text()
assert "<h2>Traceback:</h2>" in txt
logger.exception.assert_called_with(
"Error handling request",
exc_info=exc)
def test_create_web_server_with_implicit_loop(loop):
asyncio.set_event_loop(loop)
@asyncio.coroutine
def handler(request):
return web.Response() # pragma: no cover
srv = web.Server(handler)
assert srv._loop is loop
| {
"content_hash": "66d0f4128aa3513ff5e848523736c4a3",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 76,
"avg_line_length": 27.177966101694917,
"alnum_prop": 0.6688493919550982,
"repo_name": "z2v/aiohttp",
"id": "7c223dfb2f05b637b86c22d168629110e190398b",
"size": "3207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_web_server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "CSS",
"bytes": "112"
},
{
"name": "HTML",
"bytes": "4890"
},
{
"name": "Makefile",
"bytes": "3148"
},
{
"name": "PLpgSQL",
"bytes": "765"
},
{
"name": "Python",
"bytes": "1151101"
},
{
"name": "Shell",
"bytes": "2298"
}
],
"symlink_target": ""
} |
from ajax_select import LookupChannel
from django.utils.html import escape
from django.db.models import Q
from .models import Game, Expansion
from django.core.urlresolvers import reverse
class GameLookup(LookupChannel):
model = Game
def get_query(self, q, request):
return Game.objects.filter(name__icontains=q).order_by('name')
def get_result(self, obj):
u""" result is the simple text that is the completion of what the Game typed """
return obj.name
def format_match(self, obj):
""" (HTML) formatted item for display in the dropdown """
return self.format_item_display(obj)
def format_item_display(self, obj):
""" (HTML) formatted item for displaying item in the selected deck area """
url = reverse('games:game_detail', args=[obj.slug])
url_img = '<img style='' src="http://placehold.it/80x90&text=Sem imagem 40x80">'
if obj.image:
url_img = '<img style="margin-right: 5px;"" src="%s">' % obj.image.url
return u"<div><a href='%s'>%s<i>%s</i></a></div>" % (url, url_img, escape(obj.name))
class ExpansionLookup(LookupChannel):
model = Expansion
def get_query(self, q, request):
return Expansion.objects.filter(name__icontains=q).order_by('name')
def get_result(self, obj):
u""" result is the simple text that is the completion of what the Game typed """
return obj.name
def format_match(self, obj):
""" (HTML) formatted item for display in the dropdown """
return self.format_item_display(obj)
def format_item_display(self, obj):
""" (HTML) formatted item for displaying item in the selected deck area """
url = reverse('games:expansion_detail', args=[obj.slug])
return u"<div><a href='%s'><i>%s</i></a></div>" % (url, escape(obj.name)) | {
"content_hash": "d7c7dbf8afdc6cba19285574ae2baf79",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 92,
"avg_line_length": 37.795918367346935,
"alnum_prop": 0.6420086393088553,
"repo_name": "jomauricio/abgthe",
"id": "dabeef80bb7818d713f422a890553b4a65c26f1f",
"size": "1852",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "abgthe/apps/games/lookups.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "389497"
},
{
"name": "HTML",
"bytes": "64517"
},
{
"name": "JavaScript",
"bytes": "2601"
},
{
"name": "Python",
"bytes": "100843"
},
{
"name": "Shell",
"bytes": "3618"
}
],
"symlink_target": ""
} |
try:
import astropy.io.fits as pyfits
except:
import pyfits
import logging
import subprocess
from XtDac.DivideAndConquer import XMMWCS
log = logging.getLogger("HardwareUnit")
def hardwareUnitFactory(eventfile):
'''Return an instance of the appropriate HarwareUnit class,
based on the content of the event file.
'''
# Probe which hardware unit we are talking about
# instance the corresponding class and return it
with pyfits.open(eventfile) as f:
instrume = f['EVENTS'].header.get("INSTRUME")
if (instrume == "EPN"):
return PNQuadrant(eventfile)
elif (instrume == "EMOS1" or instrume == "EMOS2"):
return MOSCCD(eventfile)
elif instrume == "ACIS":
return ACISCCD(eventfile)
else:
raise NotImplemented("Don't know how to handle instrument %s" % (instrume))
pass
class HardwareUnit(object):
'''
A hardware unit is a part of a detector sharing one set of good time
intervals, and roughly the same background. For example, a quadrant of the PN
detector or one single CCD of one of the MOS are hardware units.
'''
def __init__(self, name, pixelScale,
minX, maxX,
minY, maxY):
# Verify that all inputs are integers
assert isinstance(minX, int)
assert isinstance(maxX, int)
assert isinstance(minY, int)
assert isinstance(maxY, int)
self.name = name
self.minX = minX
self.minY = minY
self.maxX = maxX
self.maxY = maxY
self.pixelScale = float(pixelScale) # arcsec
def getName(self):
return self.name
def getMaxX(self):
return self.maxX
def getMinX(self):
return self.minX
def getMaxY(self):
return self.maxY
def getMinY(self):
return self.minY
def getPixelScale(self):
return self.pixelScale
class ACISCCD(HardwareUnit):
@staticmethod
def get_ccd_name(id):
# From http://cxc.harvard.edu/contrib/jcm/ncoords.ps
ccd_ids = {0: 'I0',
1: 'I1',
2: 'I2',
3: 'I3',
4: 'S0',
5: 'S1',
6: 'S2',
7: 'S3',
8: 'S4',
9: 'S5'}
return ccd_ids[id]
def __init__(self, eventFile):
# Open the event file
with pyfits.open(eventFile) as f:
# probe which CCD we are talking about
ccd_id = f['EVENTS'].data.field("ccd_id")
minccd, maxccd = ccd_id.min(), ccd_id.max()
if minccd != maxccd:
raise RuntimeError("Provided event file contains events from more than one ACIS CCD.")
if minccd > 9:
raise RuntimeError("The provided event file is not a ACIS event file")
X = f['EVENTS'].data.field("X")
Y = f['EVENTS'].data.field("Y")
name = "ACIS (CCD %s)" % self.get_ccd_name(minccd)
self.ccd_id = minccd
# ACIS pixels are 0.492 arcsec
super(ACISCCD, self).__init__(name, 0.492,
int(X.min()), int(X.max()),
int(Y.min()), int(Y.max()))
def get_psf(self, xpos, ypos, eventfile, outfile):
"""
Produce an image containing the PSF for the given position
:return: the filename containing the image
"""
# First run the MARX simulation to get the image
# Get R.A., Dec. of the source
wcs = XMMWCS.XMMWCS(eventfile)
ra, dec = wcs.xy2sky([[xpos, ypos]])[0]
# Figure out the name of the detector (ACIS-I or ACIS-S)
# CCDs 0 to 3 are ACIS-I, the others (up to 9) are ACIS-S
if self.ccd_id <= 3:
detector_type = 'ACIS-I'
else:
detector_type = 'ACIS-S'
# Get the pointing
ra_pnt = wcs.ra_nom
dec_pnt = wcs.dec_nom
rot_pnt = wcs.rotation
# Now get the position of the SIM (http://cxc.harvard.edu/ciao/threads/marx_sim/)
header = pyfits.getheader(eventfile, "EVENTS")
obs_sim_x = header['SIM_X']
obs_sim_y = header['SIM_Y']
obs_sim_z = header['SIM_Z']
if detector_type == 'ACIS-I':
marx_sim_x, marx_sim_y, marx_sim_z = -0.7823481983384, 0, -233.5924630914
else:
marx_sim_x, marx_sim_y, marx_sim_z = -0.68426746699586, 0, -190.1325231040
delta_sim_x = obs_sim_x - marx_sim_x
delta_sim_y = obs_sim_y - marx_sim_y
delta_sim_z = obs_sim_z - marx_sim_z
# Get the DY and DZ from the eventfile
try:
dx = 0
dy = header['DY_AVG']
dz = header['DZ_AVG']
except KeyError:
raise RuntimeError("DY and DZ keywords not found in %s. You have to run r4_header_update on it.")
# Finally we can compute the offsets for MARX
DetOffsetX = dx + delta_sim_x
DetOffsetY = dy + delta_sim_y
DetOffsetZ = dz + delta_sim_z
# Run the simulation detecting 100000 events (using a negative NumRays means that the simulation
# will continue until 100000 events are detected)
cmd_line = "marx ExposureTime=0.0 NumRays=-10000 GratingType=NONE SourceRA=%s SourceDEC=%s MinEnergy=1.5 " \
"MaxEnergy=1.5 SourceFlux=1 SourceType=POINT OutputDir=__point DetectorType=%s " \
"RA_Nom=%s Dec_Nom=%s Roll_Nom=%s DetOffsetX=%s DetOffsetY=%s DetOffsetZ=%s Verbose=no" \
% (ra, dec, detector_type, ra_pnt, dec_pnt, rot_pnt, DetOffsetX, DetOffsetY, DetOffsetZ)
log.debug(cmd_line)
subprocess.check_call(cmd_line, shell=True)
# Now generate the FITS file
cmd_line = 'marx2fits __point __sim.fits'
log.debug(cmd_line)
_ = subprocess.check_output(cmd_line, shell=True)
# Finally generate the FITS image
xmin, xmax = xpos - 500, xpos + 500
ymin, ymax = ypos - 500, ypos + 500
cmd_line = "f2dhisto __sim.fits %s 1 1 X Y '%s,%s' '%s,%s' clobber=yes" % (outfile, xmin, xmax, ymin, ymax)
log.debug(cmd_line)
_ = subprocess.check_output(cmd_line, shell=True)
return outfile
class XMMCCD(object):
@staticmethod
def get_psf(xpos, ypos, eventfile, outfile):
"""
Produce an image containing the PSF for the given position
:return: the filename containing the image
"""
pars = {}
pars['output'] = outfile
pars['region'] = '"(X,Y) IN circle(%s,%s)"' % (xpos, ypos)
pars['image'] = eventfile
pars['xsize'] = 500
pars['ysize'] = 500
pars['energy'] = '"200 600 1500 6000 10000"'
pars['level'] = 'ELLBETA'
pars['-V'] = 0
cmdLine = 'psfgen %s' % (" ".join(['%s=%s' % (k, v) for k, v in pars.iteritems()]))
log.debug(cmdLine)
subprocess.check_call(cmdLine, shell=True)
return outfile
class PNQuadrant(HardwareUnit, XMMCCD):
def __init__(self, eventFile):
quadrant = None
# Open the event file
with pyfits.open(eventFile, memmap=False) as f:
# probe which quadrant we are talking about
ccdnr = f['EVENTS'].data.field("CCDNR")
(minccd, maxccd) = (ccdnr.min(), ccdnr.max())
if (minccd >= 1 and maxccd <= 3):
# Quadrant 1
quadrant = 1
(mindetx, maxdetx) = (-18283, -2260)
(mindety, maxdety) = (-1090, 15325)
elif (minccd >= 4 and maxccd <= 6):
# Quadrant 2
quadrant = 2
(mindetx, maxdetx) = (-2150, 13880)
(mindety, maxdety) = (-1090, 15325)
elif (minccd >= 7 and maxccd <= 9):
# Quadrant 3
quadrant = 3
(mindetx, maxdetx) = (-2150, 13880)
(mindety, maxdety) = (-17527, -1110)
elif (minccd >= 10 and maxccd <= 12):
# Quadrant 4
quadrant = 4
(mindetx, maxdetx) = (-18283, -2260)
(mindety, maxdety) = (-17527, -1110)
else:
raise RuntimeError(
"The provided event file %s contains events from more than one PN quadrant." % (eventFile))
X = f['EVENTS'].data.field("X")
Y = f['EVENTS'].data.field("Y")
name = "EPIC PN (quadrant %i)" % quadrant
super(PNQuadrant, self).__init__(name, 0.05,
int(X.min()), int(X.max()),
int(Y.min()), int(Y.max())
)
class MOSCCD(HardwareUnit, XMMCCD):
def __init__(self, eventFile):
ccdNumber = None
# Open the event file
with pyfits.open(eventFile) as f:
# Get the CCD number
ccdnr = f['EVENTS'].data.field("CCDNR")
# Verify that there is only one CCD number in the event file
# otherwise throw an exception
uniqueCCDnumbers = set(ccdnr)
if (len(uniqueCCDnumbers) > 1):
raise RuntimeError("The provided event file contains events from more than one MOS CCD!")
else:
ccdNumber = ccdnr[0]
pass
# Read the event coordinates
X = f['EVENTS'].data.field("X")
Y = f['EVENTS'].data.field("Y")
pass
name = "EPIC MOS (CCD %i)" % ccdNumber
super(MOSCCD, self).__init__(name, 0.05,
int(X.min()), int(X.max()),
int(Y.min()), int(Y.max())
)
| {
"content_hash": "de7e67b304435f9e5f37ca6cb9f8ada2",
"timestamp": "",
"source": "github",
"line_count": 331,
"max_line_length": 116,
"avg_line_length": 29.942598187311177,
"alnum_prop": 0.5262839269498537,
"repo_name": "giacomov/XtDac",
"id": "22752878b5c8e424600f49072902720eb6cad79b",
"size": "9963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "XtDac/DivideAndConquer/HardwareUnit.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "337802"
}
],
"symlink_target": ""
} |
import argparse
import os
import json
# List of supported icon sets
# These sets live in stashboard/static/images/icons
ICON_SETS = ["fugue", "iconic"]
parser = argparse.ArgumentParser(description="")
parser.add_argument("directory", type=str,
help="Stashboard image directory")
args = parser.parse_args()
# Icons path
icons = os.path.join(args.directory, "icons")
image_set = set()
images = []
for p in os.listdir(icons):
if p in ICON_SETS:
for i in os.listdir(os.path.join(icons, p)):
if i[0] != '.': #ignore hidden files
filename, ext = os.path.splitext(i)
if filename in image_set:
name = filename + "_alt"
else:
name = filename
image_set.add(name)
image = {
"name": name,
"set": p,
"url": os.path.join("icons", p, i),
}
images.append(image)
print json.dumps(images, indent=4)
| {
"content_hash": "323afc51acbb2562857cc387f7fbadc3",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 55,
"avg_line_length": 22.48936170212766,
"alnum_prop": 0.522232734153264,
"repo_name": "skynet/stashboard",
"id": "ab7b2a8b0da5ae56b55b0313d5fab7d710beeae7",
"size": "2172",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "scripts/gen_fixture.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "22720"
},
{
"name": "HTML",
"bytes": "52767"
},
{
"name": "JavaScript",
"bytes": "6222"
},
{
"name": "Python",
"bytes": "646761"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'PygcUI'
copyright = '2012, Erik Youngren'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['.build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PygcUIdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'PygcUI.tex', 'PygcUI Documentation',
'Erik Youngren', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pygcui', 'PygcUI Documentation',
['Erik Youngren'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PygcUI', 'PygcUI Documentation',
'Erik Youngren', 'PygcUI', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| {
"content_hash": "42d64e89a79e4225a964b878463415de",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 81,
"avg_line_length": 32.12663755458515,
"alnum_prop": 0.700829142313443,
"repo_name": "Artanis/pygcui",
"id": "869473675baedf30b16ff21d190bf43be0dfdcfc",
"size": "7797",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "33199"
},
{
"name": "Shell",
"bytes": "5206"
}
],
"symlink_target": ""
} |
from .defaultdict import DefaultDictType
from .set import SetType
from .tuple import TupleType
from .dict import DictType
from .list import ListType
from .instance import InstanceType
from .datetime_ import DatetimeType
from .timedelta import TimedeltaType
from .bytearray import ByteArrayType
from .bytes import BytesType
class _TypesManager(object):
""" Allows the definition of the types supported by the package
"""
def __init__(self):
self._types = [
SetType(),
TupleType(),
DefaultDictType(),
DictType(),
ListType(),
DatetimeType(),
TimedeltaType(),
ByteArrayType(),
BytesType(),
InstanceType()
]
def get_types(self):
return self._types
def get_str_type(self, raw_type):
""" Provides the string name for a given type
"""
for type_ in (bool, int, float, str):
if type_ == raw_type:
return type_.__name__
for type_ in self._types:
if type_.get_type() == raw_type:
return type_.__class__.__name__
def get_type(self, str_type):
""" Provides a type from its string name
"""
for type_ in (bool, int, float, str):
if str_type == type_.__name__:
return type_
for type_ in self._types:
if type_.__class__.__name__ == str_type:
return type_.get_type()
# Singleton instance to _TypesManager
TypesManager = _TypesManager()
| {
"content_hash": "c61d709bd4703e622d10c534990cc4df",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 67,
"avg_line_length": 27.54385964912281,
"alnum_prop": 0.5611464968152866,
"repo_name": "nioinnovation/safepickle",
"id": "18391c81e6408c2faba18f19398f2ddfba0da4c9",
"size": "1570",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "safepickle/types/manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "21104"
}
],
"symlink_target": ""
} |
import os
import sys
try:
from ros_buildfarm.wrapper.apt import main
except ImportError:
sys.path.insert(
0, os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
from ros_buildfarm.wrapper.apt import main
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "cdfb91a97ea4a1cf21cf8b50ab91929b",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 71,
"avg_line_length": 23.583333333333332,
"alnum_prop": 0.6607773851590106,
"repo_name": "ruffsl/ros_buildfarm",
"id": "11b198da7dd10643e74e55845b90c46bf113cbcb",
"size": "909",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/wrapper/apt.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5231"
},
{
"name": "EmberScript",
"bytes": "228125"
},
{
"name": "Groovy",
"bytes": "1561"
},
{
"name": "JavaScript",
"bytes": "12982"
},
{
"name": "Python",
"bytes": "478017"
},
{
"name": "Shell",
"bytes": "9651"
}
],
"symlink_target": ""
} |
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class appfwpolicy_stats(base_resource) :
ur""" Statistics for application firewall policy resource.
"""
def __init__(self) :
self._name = ""
self._clearstats = ""
self._pipolicyhits = 0
self._pipolicyhitsrate = 0
self._pipolicyundefhits = 0
self._pipolicyundefhitsrate = 0
@property
def name(self) :
ur"""Name of the application firewall policy.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name of the application firewall policy.
"""
try :
self._name = name
except Exception as e:
raise e
@property
def clearstats(self) :
ur"""Clear the statsistics / counters.<br/>Possible values = basic, full.
"""
try :
return self._clearstats
except Exception as e:
raise e
@clearstats.setter
def clearstats(self, clearstats) :
ur"""Clear the statsistics / counters
"""
try :
self._clearstats = clearstats
except Exception as e:
raise e
@property
def pipolicyundefhitsrate(self) :
ur"""Rate (/s) counter for pipolicyundefhits.
"""
try :
return self._pipolicyundefhitsrate
except Exception as e:
raise e
@property
def pipolicyundefhits(self) :
ur"""Number of undef hits on the policy.
"""
try :
return self._pipolicyundefhits
except Exception as e:
raise e
@property
def pipolicyhitsrate(self) :
ur"""Rate (/s) counter for pipolicyhits.
"""
try :
return self._pipolicyhitsrate
except Exception as e:
raise e
@property
def pipolicyhits(self) :
ur"""Number of hits on the policy.
"""
try :
return self._pipolicyhits
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(appfwpolicy_response, response, self.__class__.__name__.replace('_stats',''))
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.appfwpolicy
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
ur""" Use this API to fetch the statistics of all appfwpolicy_stats resources that are configured on netscaler.
"""
try :
obj = appfwpolicy_stats()
if not name :
response = obj.stat_resources(service, option_)
else :
obj.name = name
response = obj.stat_resource(service, option_)
return response
except Exception as e:
raise e
class Clearstats:
basic = "basic"
full = "full"
class appfwpolicy_response(base_response) :
def __init__(self, length=1) :
self.appfwpolicy = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.appfwpolicy = [appfwpolicy_stats() for _ in range(length)]
| {
"content_hash": "98c56aaf4e05b95e491ed4ba35743552",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 134,
"avg_line_length": 25.34013605442177,
"alnum_prop": 0.6915436241610738,
"repo_name": "atopuzov/nitro-python",
"id": "712820c63e78c4ecfdc3bf7cca8747d8b37282df",
"size": "4339",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nssrc/com/citrix/netscaler/nitro/resource/stat/appfw/appfwpolicy_stats.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10881939"
},
{
"name": "Shell",
"bytes": "513"
}
],
"symlink_target": ""
} |
from .testcases import APITestCase, TestCase
__all__ = ['APITestCase', 'TestCase']
| {
"content_hash": "a12410fb3247ad14685d92a67f2d9337",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 44,
"avg_line_length": 28,
"alnum_prop": 0.7142857142857143,
"repo_name": "Inter-Actief/alexia",
"id": "444032e2c29b01d2573f2be11a555391109ae1dd",
"size": "84",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alexia/test/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "17029"
},
{
"name": "HTML",
"bytes": "179103"
},
{
"name": "JavaScript",
"bytes": "511580"
},
{
"name": "Python",
"bytes": "372488"
},
{
"name": "Shell",
"bytes": "66"
}
],
"symlink_target": ""
} |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Difference'] , ['MovingMedian'] , ['Seasonal_Second'] , ['MLP'] ); | {
"content_hash": "95b044eb2e55931199aa0b3448356f98",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 89,
"avg_line_length": 40.5,
"alnum_prop": 0.7160493827160493,
"repo_name": "antoinecarme/pyaf",
"id": "2413370bf7c29d1572938bc7f4d7e30bfc7fcb36",
"size": "162",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_Difference/model_control_one_enabled_Difference_MovingMedian_Seasonal_Second_MLP.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
"""
Requirements file parsing
"""
from __future__ import absolute_import
import os
import re
import shlex
import optparse
import warnings
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves import filterfalse
import pip
from pip.download import get_file_content
from pip.req.req_install import InstallRequirement
from pip.exceptions import (RequirementsFileParseError)
from pip.utils.deprecation import RemovedInPip10Warning
from pip import cmdoptions
__all__ = ['parse_requirements']
SCHEME_RE = re.compile(r'^(http|https|file):', re.I)
COMMENT_RE = re.compile(r'(^|\s)+#.*$')
SUPPORTED_OPTIONS = [
cmdoptions.constraints,
cmdoptions.editable,
cmdoptions.requirements,
cmdoptions.no_index,
cmdoptions.index_url,
cmdoptions.find_links,
cmdoptions.extra_index_url,
cmdoptions.allow_external,
cmdoptions.allow_all_external,
cmdoptions.no_allow_external,
cmdoptions.allow_unsafe,
cmdoptions.no_allow_unsafe,
cmdoptions.use_wheel,
cmdoptions.no_use_wheel,
cmdoptions.always_unzip,
cmdoptions.no_binary,
cmdoptions.only_binary,
cmdoptions.pre,
cmdoptions.process_dependency_links,
cmdoptions.trusted_host,
]
# options to be passed to requirements
SUPPORTED_OPTIONS_REQ = [
cmdoptions.install_options,
cmdoptions.global_options
]
# the 'dest' string values
SUPPORTED_OPTIONS_REQ_DEST = [o().dest for o in SUPPORTED_OPTIONS_REQ]
def parse_requirements(filename, finder=None, comes_from=None, options=None,
session=None, constraint=False, wheel_cache=None):
"""Parse a requirements file and yield InstallRequirement instances.
:param filename: Path or url of requirements file.
:param finder: Instance of pip.index.PackageFinder.
:param comes_from: Origin description of requirements.
:param options: Global options.
:param session: Instance of pip.download.PipSession.
:param constraint: If true, parsing a constraint file rather than
requirements file.
:param wheel_cache: Instance of pip.wheel.WheelCache
"""
if session is None:
raise TypeError(
"parse_requirements() missing 1 required keyword argument: "
"'session'"
)
_, content = get_file_content(
filename, comes_from=comes_from, session=session
)
lines = content.splitlines()
lines = ignore_comments(lines)
lines = join_lines(lines)
lines = skip_regex(lines, options)
for line_number, line in enumerate(lines, 1):
req_iter = process_line(line, filename, line_number, finder,
comes_from, options, session, wheel_cache,
constraint=constraint)
for req in req_iter:
yield req
def process_line(line, filename, line_number, finder=None, comes_from=None,
options=None, session=None, wheel_cache=None,
constraint=False):
"""Process a single requirements line; This can result in creating/yielding
requirements, or updating the finder.
For lines that contain requirements, the only options that have an effect
are from SUPPORTED_OPTIONS_REQ, and they are scoped to the
requirement. Other options from SUPPORTED_OPTIONS may be present, but are
ignored.
For lines that do not contain requirements, the only options that have an
effect are from SUPPORTED_OPTIONS. Options from SUPPORTED_OPTIONS_REQ may
be present, but are ignored. These lines may contain multiple options
(although our docs imply only one is supported), and all our parsed and
affect the finder.
:param constraint: If True, parsing a constraints file.
"""
parser = build_parser()
defaults = parser.get_default_values()
defaults.index_url = None
if finder:
# `finder.format_control` will be updated during parsing
defaults.format_control = finder.format_control
args_str, options_str = break_args_options(line)
opts, _ = parser.parse_args(shlex.split(options_str), defaults)
# preserve for the nested code path
line_comes_from = '%s %s (line %s)' % (
'-c' if constraint else '-r', filename, line_number)
# yield a line requirement
if args_str:
isolated = options.isolated_mode if options else False
if options:
cmdoptions.check_install_build_global(options, opts)
# get the options that apply to requirements
req_options = {}
for dest in SUPPORTED_OPTIONS_REQ_DEST:
if dest in opts.__dict__ and opts.__dict__[dest]:
req_options[dest] = opts.__dict__[dest]
yield InstallRequirement.from_line(
args_str, line_comes_from, constraint=constraint,
isolated=isolated, options=req_options, wheel_cache=wheel_cache
)
# yield an editable requirement
elif opts.editables:
isolated = options.isolated_mode if options else False
default_vcs = options.default_vcs if options else None
yield InstallRequirement.from_editable(
opts.editables[0], comes_from=line_comes_from,
constraint=constraint, default_vcs=default_vcs, isolated=isolated,
wheel_cache=wheel_cache
)
# parse a nested requirements file
elif opts.requirements or opts.constraints:
if opts.requirements:
req_path = opts.requirements[0]
nested_constraint = False
else:
req_path = opts.constraints[0]
nested_constraint = True
# original file is over http
if SCHEME_RE.search(filename):
# do a url join so relative paths work
req_path = urllib_parse.urljoin(filename, req_path)
# original file and nested file are paths
elif not SCHEME_RE.search(req_path):
# do a join so relative paths work
req_dir = os.path.dirname(filename)
req_path = os.path.join(os.path.dirname(filename), req_path)
# TODO: Why not use `comes_from='-r {} (line {})'` here as well?
parser = parse_requirements(
req_path, finder, comes_from, options, session,
constraint=nested_constraint, wheel_cache=wheel_cache
)
for req in parser:
yield req
# set finder options
elif finder:
if opts.allow_external:
warnings.warn(
"--allow-external has been deprecated and will be removed in "
"the future. Due to changes in the repository protocol, it no "
"longer has any effect.",
RemovedInPip10Warning,
)
if opts.allow_all_external:
warnings.warn(
"--allow-all-external has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if opts.allow_unverified:
warnings.warn(
"--allow-unverified has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if opts.index_url:
finder.index_urls = [opts.index_url]
if opts.use_wheel is False:
finder.use_wheel = False
pip.index.fmt_ctl_no_use_wheel(finder.format_control)
if opts.no_index is True:
finder.index_urls = []
if opts.extra_index_urls:
finder.index_urls.extend(opts.extra_index_urls)
if opts.find_links:
# FIXME: it would be nice to keep track of the source
# of the find_links: support a find-links local path
# relative to a requirements file.
value = opts.find_links[0]
req_dir = os.path.dirname(os.path.abspath(filename))
relative_to_reqs_file = os.path.join(req_dir, value)
if os.path.exists(relative_to_reqs_file):
value = relative_to_reqs_file
finder.find_links.append(value)
if opts.pre:
finder.allow_all_prereleases = True
if opts.process_dependency_links:
finder.process_dependency_links = True
if opts.trusted_hosts:
finder.secure_origins.extend(
("*", host, "*") for host in opts.trusted_hosts)
def break_args_options(line):
"""Break up the line into an args and options string. We only want to shlex
(and then optparse) the options, not the args. args can contain markers
which are corrupted by shlex.
"""
tokens = line.split(' ')
args = []
options = tokens[:]
for token in tokens:
if token.startswith('-') or token.startswith('--'):
break
else:
args.append(token)
options.pop(0)
return ' '.join(args), ' '.join(options)
def build_parser():
"""
Return a parser for parsing requirement lines
"""
parser = optparse.OptionParser(add_help_option=False)
option_factories = SUPPORTED_OPTIONS + SUPPORTED_OPTIONS_REQ
for option_factory in option_factories:
option = option_factory()
parser.add_option(option)
# By default optparse sys.exits on parsing errors. We want to wrap
# that in our own exception.
def parser_exit(self, msg):
raise RequirementsFileParseError(msg)
parser.exit = parser_exit
return parser
def join_lines(iterator):
"""
Joins a line ending in '\' with the previous line.
"""
lines = []
for line in iterator:
if not line.endswith('\\'):
if lines:
lines.append(line)
yield ''.join(lines)
lines = []
else:
yield line
else:
lines.append(line.strip('\\'))
# TODO: handle space after '\'.
# TODO: handle '\' on last line.
def ignore_comments(iterator):
"""
Strips and filters empty or commented lines.
"""
for line in iterator:
line = COMMENT_RE.sub('', line)
line = line.strip()
if line:
yield line
def skip_regex(lines, options):
"""
Optionally exclude lines that match '--skip-requirements-regex'
"""
skip_regex = options.skip_requirements_regex if options else None
if skip_regex:
lines = filterfalse(re.compile(skip_regex).search, lines)
return lines
| {
"content_hash": "95f6206714ee9183d4b275e7b6fb5ca9",
"timestamp": "",
"source": "github",
"line_count": 308,
"max_line_length": 80,
"avg_line_length": 34.467532467532465,
"alnum_prop": 0.6276375282592314,
"repo_name": "prasaianooz/pip",
"id": "4b3f683c6db8ce8195772d35bd88c6fe3cf863e6",
"size": "10616",
"binary": false,
"copies": "9",
"ref": "refs/heads/develop",
"path": "pip/req/req_file.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1372"
},
{
"name": "Python",
"bytes": "2200383"
},
{
"name": "Shell",
"bytes": "2326"
}
],
"symlink_target": ""
} |
from ginga import GingaPlugin
from ginga.qtw.QtHelp import QtGui, QtCore
class Debug(GingaPlugin.GlobalPlugin):
def __init__(self, fv):
# superclass defines some variables for us, like logger
super(Debug, self).__init__(fv)
def build_gui(self, container):
rvbox = container
self.msgFont = self.fv.getFont("fixedFont", 14)
tw = QtGui.QTextEdit()
#tw.setLineWrapMode(??)
## tw.set_left_margin(4)
## tw.set_right_margin(4)
tw.setReadOnly(True)
## tw.set_left_margin(4)
## tw.set_right_margin(4)
tw.setCurrentFont(self.msgFont)
self.tw = tw
self.history = []
self.histmax = 10
sw = QtGui.QScrollArea()
sw.setWidgetResizable(True)
#sw.set_border_width(2)
sw.setWidget(self.tw)
rvbox.addWidget(sw, stretch=1)
sw.show()
self.entry = QtGui.QLineEdit()
rvbox.addWidget(self.entry, stretch=0)
self.entry.returnPressed.connect(self.command_cb)
def reloadLocalPlugin(self, plname):
self.fv.mm.loadModule(plname)
for chname in self.fv.get_channelNames():
chinfo = self.fv.get_channelInfo(chname)
chinfo.opmon.reloadPlugin(plname, chinfo=chinfo)
return True
def reloadGlobalPlugin(self, plname):
gpmon = self.fv.gpmon
pInfo = gpmon.getPluginInfo(plname)
gpmon.stop_plugin(pInfo)
self.fv.update_pending(0.5)
self.fv.mm.loadModule(plname)
gpmon.reloadPlugin(plname)
self.fv.start_global_plugin(plname)
return True
def command(self, cmdstr):
# Evaluate command
try:
result = eval(cmdstr)
except Exception, e:
result = str(e)
# TODO: add traceback
# Append command to history
self.history.append('>>> ' + cmdstr + '\n' + str(result))
# Remove all history past history size
self.history = self.history[-self.histmax:]
# Update text widget
self.tw.setText('\n'.join(self.history))
def command_cb(self):
w = self.entry
# TODO: implement a readline editing widget
cmdstr = str(w.text()).strip()
self.command(cmdstr)
w.setText("")
def __str__(self):
return 'debug'
#END
| {
"content_hash": "17c28393e0f66d49cd45518ceb758597",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 65,
"avg_line_length": 28.176470588235293,
"alnum_prop": 0.5774530271398748,
"repo_name": "Rbeaty88/ginga",
"id": "abe2b4e85b769e0dbcdec59469980ae48718a5bb",
"size": "2645",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ginga/qtw/plugins/Debug.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "2102613"
}
],
"symlink_target": ""
} |
'''
Find all possible combinations of k numbers that add up to a number n, given that only numbers from 1 to 9 can be used and each combination should be a unique set of numbers.
'''
class Solution(object):
def combinationSum3(self, size, target):
"""
:type size: int
:type target: int
:rtype: List[List[int]]
"""
max_num = target
self.output = list()
self.makeCombination(1, 9, target, size, list())
return self.output
def makeCombination(self, start, end, target, left_size, a_comb):
if (target == 0 and 0 == left_size):
self.output.append(a_comb[:])
if (left_size * start <= target):
for index in range(start, end+1):
a_comb.append(index)
self.makeCombination(index+1, end, target-index, left_size-1, a_comb)
a_comb.pop()
solution = Solution()
print solution.combinationSum3(3, 7)
| {
"content_hash": "d7e0c3fb4be109b98bbf883c818765a6",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 174,
"avg_line_length": 35.407407407407405,
"alnum_prop": 0.5910041841004184,
"repo_name": "shub0/algorithm-data-structure",
"id": "95321bce7fef2c1db44b50942fe88c2dd80817c4",
"size": "956",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/combinations_sum_3.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Java",
"bytes": "166293"
},
{
"name": "Python",
"bytes": "487573"
}
],
"symlink_target": ""
} |
"""Test configs for l2norm."""
import numpy as np
import tensorflow as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_l2norm_tests(options):
"""Make a set of tests to do l2norm."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[5, 7], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3]],
"dim": [0, 1, 2, 3, [2, 3], -2],
"epsilon": [None, 1e-12, 1e-3],
"fully_quantize": [False],
}, {
"input_shape": [[1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3]],
"dim": [3],
"epsilon": [None, 1e-12],
"fully_quantize": [True],
}, { # use another group of test so the dim is set to fuse to tfl.l2norm
"input_shape": [[5, 7]],
"dim": [1],
"epsilon": [None, 1e-12],
"fully_quantize": [True],
}]
def build_graph(parameters):
input_tensor = tf.compat.v1.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
if parameters["epsilon"]:
out = tf.nn.l2_normalize(
input_tensor, parameters["dim"], epsilon=parameters["epsilon"])
else:
out = tf.nn.l2_normalize(input_tensor, parameters["dim"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-1, max_value=1)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=9)
| {
"content_hash": "d35a421fc1518a772c239cd7fdfccab6",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 78,
"avg_line_length": 34.19230769230769,
"alnum_prop": 0.610236220472441,
"repo_name": "tensorflow/tensorflow-pywrap_saved_model",
"id": "902895f046e9102ad3e7dd65f2ab9af604cfba5e",
"size": "2467",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tensorflow/lite/testing/op_tests/l2norm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "36962"
},
{
"name": "C",
"bytes": "1392153"
},
{
"name": "C#",
"bytes": "13584"
},
{
"name": "C++",
"bytes": "125860957"
},
{
"name": "CMake",
"bytes": "182324"
},
{
"name": "Cython",
"bytes": "5003"
},
{
"name": "Dockerfile",
"bytes": "416133"
},
{
"name": "Go",
"bytes": "2123155"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "1074438"
},
{
"name": "Jupyter Notebook",
"bytes": "792906"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "11347297"
},
{
"name": "Makefile",
"bytes": "2760"
},
{
"name": "Objective-C",
"bytes": "172666"
},
{
"name": "Objective-C++",
"bytes": "300208"
},
{
"name": "Pawn",
"bytes": "5552"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "42738981"
},
{
"name": "Roff",
"bytes": "5034"
},
{
"name": "Ruby",
"bytes": "9214"
},
{
"name": "Shell",
"bytes": "621427"
},
{
"name": "Smarty",
"bytes": "89545"
},
{
"name": "SourcePawn",
"bytes": "14625"
},
{
"name": "Starlark",
"bytes": "7720442"
},
{
"name": "Swift",
"bytes": "78435"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
"""
Tests For Capacity Weigher.
"""
import mock
from oslo_config import cfg
from cinder import context
from cinder.openstack.common.scheduler import weights
from cinder.scheduler.weights import capacity
from cinder import test
from cinder.tests.unit.scheduler import fakes
from cinder.volume import utils
CONF = cfg.CONF
class CapacityWeigherTestCase(test.TestCase):
def setUp(self):
super(CapacityWeigherTestCase, self).setUp()
self.host_manager = fakes.FakeHostManager()
self.weight_handler = weights.HostWeightHandler(
'cinder.scheduler.weights')
def _get_weighed_host(self, hosts, weight_properties=None):
if weight_properties is None:
weight_properties = {'size': 1}
return self.weight_handler.get_weighed_objects(
[capacity.CapacityWeigher],
hosts,
weight_properties)[0]
@mock.patch('cinder.db.sqlalchemy.api.service_get_all_by_topic')
def _get_all_hosts(self, _mock_service_get_all_by_topic, disabled=False):
ctxt = context.get_admin_context()
fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic,
disabled=disabled)
host_states = self.host_manager.get_all_host_states(ctxt)
_mock_service_get_all_by_topic.assert_called_once_with(
ctxt, CONF.volume_topic, disabled=disabled)
return host_states
# If thin_provisioning_support = False, use the following formula:
# free = free_space - math.floor(total * reserved)
# Otherwise, use the following formula:
# free = (total * host_state.max_over_subscription_ratio
# - host_state.provisioned_capacity_gb
# - math.floor(total * reserved))
def test_default_of_spreading_first(self):
hostinfo_list = self._get_all_hosts()
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=1024-math.floor(1024*0.1)=922
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=2048*1.5-1748-math.floor(2048*0.1)=1120
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=256-512*0=256
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=2048*1.0-2047-math.floor(2048*0.05)=-101
# host5: free_capacity_gb=unknown free=-1
# so, host2 should win:
weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(weighed_host.weight, 1120.0)
self.assertEqual(
utils.extract_host(weighed_host.obj.host), 'host2')
def test_capacity_weight_multiplier1(self):
self.flags(capacity_weight_multiplier=-1.0)
hostinfo_list = self._get_all_hosts()
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=-(1024-math.floor(1024*0.1))=-922
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=-(2048*1.5-1748-math.floor(2048*0.1))=-1120
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=-(256-512*0)=-256
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=-(2048*1.0-2047-math.floor(2048*0.05))=101
# host5: free_capacity_gb=unknown free=-float('inf')
# so, host4 should win:
weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(weighed_host.weight, 101.0)
self.assertEqual(
utils.extract_host(weighed_host.obj.host), 'host4')
def test_capacity_weight_multiplier2(self):
self.flags(capacity_weight_multiplier=2.0)
hostinfo_list = self._get_all_hosts()
# host1: thin_provisioning_support = False
# free_capacity_gb=1024,
# free=(1024-math.floor(1024*0.1))*2=1844
# host2: thin_provisioning_support = True
# free_capacity_gb=300,
# free=(2048*1.5-1748-math.floor(2048*0.1))*2=2240
# host3: thin_provisioning_support = False
# free_capacity_gb=512, free=(256-512*0)*2=512
# host4: thin_provisioning_support = True
# free_capacity_gb=200,
# free=(2048*1.0-2047-math.floor(2048*0.05))*2=-202
# host5: free_capacity_gb=unknown free=-2
# so, host2 should win:
weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(weighed_host.weight, 1120.0 * 2)
self.assertEqual(
utils.extract_host(weighed_host.obj.host), 'host2')
| {
"content_hash": "10873bc5918ba2bffd896bc55a4641b2",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 77,
"avg_line_length": 41.04347826086956,
"alnum_prop": 0.613771186440678,
"repo_name": "julianwang/cinder",
"id": "25d8909e5e1ae31dc33e19dfe460e230b66ec88a",
"size": "5360",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "cinder/tests/unit/scheduler/test_capacity_weigher.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PLpgSQL",
"bytes": "2511"
},
{
"name": "Python",
"bytes": "10718052"
},
{
"name": "Shell",
"bytes": "8111"
}
],
"symlink_target": ""
} |
import time
import random
START = time.time()
def resetTimer():
global START
START = time.time()
def readTimerMilliseconds():
t = time.time() - START
return round(t * 1000)
def makeArray(strlen, arrlen):
arr = []
rand = random.Random()
alphabet = "abcdefghijklmnopqrstuvwxyz"
for i in range(arrlen):
s = "".join(rand.choice(alphabet) for j in range(strlen))
arr.append(s)
return arr
| {
"content_hash": "5f50870d154390465772c0b2582c3ebc",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 65,
"avg_line_length": 20,
"alnum_prop": 0.634090909090909,
"repo_name": "kenpu/2014-01-csci2020u",
"id": "2063d7637e7e882543e9048f2043c5949e2d19fd",
"size": "440",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lecture-code/2014-01-30-merge-sort/python/helper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "271"
},
{
"name": "Go",
"bytes": "9358"
},
{
"name": "Java",
"bytes": "20490"
},
{
"name": "Python",
"bytes": "5203"
},
{
"name": "Shell",
"bytes": "191"
}
],
"symlink_target": ""
} |
from taiga.requestmaker import RequestMaker
from taiga.models import Issue, Issues
import unittest
from mock import patch
from .tools import create_mock_json
from .tools import MockResponse
import six
if six.PY2:
import_open = '__builtin__.open'
else:
import_open = 'builtins.open'
class TestIssues(unittest.TestCase):
@patch('taiga.requestmaker.RequestMaker.get')
def test_list_attachments(self, mock_requestmaker_get):
rm = RequestMaker('/api/v1', 'fakehost', 'faketoken')
Issue(rm, id=1).list_attachments()
mock_requestmaker_get.assert_called_with(
'issues/attachments',
query={"object_id": 1},
)
@patch('taiga.requestmaker.RequestMaker.post')
def test_upvote(self, mock_requestmaker_post):
rm = RequestMaker('/api/v1', 'fakehost', 'faketoken')
issue = Issue(rm, id=1)
self.assertEqual(issue.upvote().id, 1)
mock_requestmaker_post.assert_called_with(
'/{endpoint}/{id}/upvote',
endpoint='issues', id=1
)
@patch('taiga.requestmaker.RequestMaker.post')
def test_downvote(self, mock_requestmaker_post):
rm = RequestMaker('/api/v1', 'fakehost', 'faketoken')
issue = Issue(rm, id=1)
self.assertEqual(issue.downvote().id, 1)
mock_requestmaker_post.assert_called_with(
'/{endpoint}/{id}/downvote',
endpoint='issues', id=1
)
@patch('taiga.requestmaker.RequestMaker.post')
def test_issue_creation(self, mock_requestmaker_post):
mock_requestmaker_post.return_value = MockResponse(200,
create_mock_json('tests/resources/issue_details_success.json'))
rm = RequestMaker('/api/v1', 'fakehost', 'faketoken')
issue = Issues(rm).create(1, 2, 3, 4, 5, 6)
self.assertTrue(isinstance(issue, Issue))
@patch('taiga.requestmaker.RequestMaker.post')
def test_issue_import(self, mock_requestmaker_post):
rm = RequestMaker('/api/v1', 'fakehost', 'faketoken')
issue = Issues(rm).import_(1, 'subject', 'Normal', 'Closed', 'Normal', 'Wishlist')
mock_requestmaker_post.assert_called_with(
'/{endpoint}/{id}/{type}', type='issue', payload={'type': 'Normal',
'project': 1,
'subject': 'subject',
'priority': 'Normal',
'status': 'Closed',
'severity': 'Wishlist'},
endpoint='importer', id=1
)
@patch(import_open)
@patch('taiga.models.base.ListResource._new_resource')
def test_file_attach(self, mock_new_resource, mock_open):
fd = open('tests/resources/tasks_list_success.json')
mock_open.return_value = fd
rm = RequestMaker('/api/v1', 'fakehost', 'faketoken')
issue = Issue(rm, id=1, project=1)
issue.attach('tests/resources/tasks_list_success.json')
mock_new_resource.assert_called_with(
files={'attached_file': fd},
payload={'project': 1, 'object_id': 1}
)
@patch('taiga.models.base.InstanceResource.update')
def test_add_comment(self, mock_update):
rm = RequestMaker('/api/v1', 'fakehost', 'faketoken')
issue = Issue(rm, id=1)
issue.add_comment('hola')
mock_update.assert_called_with(
comment='hola'
)
| {
"content_hash": "be7a14aa415a1f327cb3d1986278dd46",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 90,
"avg_line_length": 41.11494252873563,
"alnum_prop": 0.5705898797875314,
"repo_name": "mlq/python-taiga",
"id": "3131aff84adb0a96f7013272b797663d1ca1fd89",
"size": "3577",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_issues.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "144381"
},
{
"name": "Shell",
"bytes": "168"
}
],
"symlink_target": ""
} |
import unittest
from unittest import mock
from . import model, signing, test_config
@mock.patch('signing.commands.lenient_run_command_output')
@mock.patch('signing.commands.macos_version', return_value=[10, 15])
class TestLinkerSignedArm64NeedsForce(unittest.TestCase):
def test_oserror(self, macos_version, lenient_run_command_output):
lenient_run_command_output.return_value = (None, None, None)
self.assertFalse(signing._linker_signed_arm64_needs_force(None))
lenient_run_command_output.assert_called_once()
def test_unsigned(self, macos_version, lenient_run_command_output):
lenient_run_command_output.return_value = (
1, b'', b'test: code object is not signed at all\n')
self.assertFalse(signing._linker_signed_arm64_needs_force(None))
lenient_run_command_output.assert_called_once()
def test_not_linker_signed(self, macos_version, lenient_run_command_output):
lenient_run_command_output.return_value = (0, b'', b'''Executable=test
Identifier=test
Format=Mach-O thin (arm64)
CodeDirectory v=20100 size=592 flags=0x2(adhoc) hashes=13+2 location=embedded
Signature=adhoc
Info.plist=not bound
TeamIdentifier=not set
Sealed Resources=none
Internal requirements count=0 size=12
''')
self.assertFalse(signing._linker_signed_arm64_needs_force(None))
lenient_run_command_output.assert_called_once()
def test_linker_signed_10_15(self, macos_version,
lenient_run_command_output):
lenient_run_command_output.return_value = (0, b'', b'''Executable=test
Identifier=test
Format=Mach-O thin (arm64)
CodeDirectory v=20400 size=512 flags=0x20002(adhoc,???) hashes=13+0 location=embedded
Signature=adhoc
Info.plist=not bound
TeamIdentifier=not set
Sealed Resources=none
Internal requirements=none
''')
self.assertTrue(signing._linker_signed_arm64_needs_force(None))
lenient_run_command_output.assert_called_once()
def test_linker_signed_10_16(self, macos_version,
lenient_run_command_output):
# 10.16 is what a Python built against an SDK < 11.0 will see 11.0 as.
macos_version.return_value = [10, 16]
lenient_run_command_output.return_value = (0, b'', b'''Executable=test
Identifier=test
Format=Mach-O thin (arm64)
CodeDirectory v=20400 size=250 flags=0x20002(adhoc,linker-signed) hashes=5+0 location=embedded
Signature=adhoc
Info.plist=not bound
TeamIdentifier=not set
Sealed Resources=none
Internal requirements=none
''')
self.assertFalse(signing._linker_signed_arm64_needs_force(None))
lenient_run_command_output.assert_not_called()
def test_linker_signed_11_0(self, macos_version,
lenient_run_command_output):
macos_version.return_value = [11, 0]
lenient_run_command_output.return_value = (0, b'', b'''Executable=test
Identifier=test
Format=Mach-O thin (arm64)
CodeDirectory v=20400 size=250 flags=0x20002(adhoc,linker-signed) hashes=5+0 location=embedded
Signature=adhoc
Info.plist=not bound
TeamIdentifier=not set
Sealed Resources=none
Internal requirements=none
''')
self.assertFalse(signing._linker_signed_arm64_needs_force(None))
lenient_run_command_output.assert_not_called()
@mock.patch(
'signing.signing._linker_signed_arm64_needs_force', return_value=False)
@mock.patch('signing.commands.run_command')
class TestSignPart(unittest.TestCase):
def setUp(self):
self.paths = model.Paths('/$I', '/$O', '/$W')
self.config = test_config.TestConfig()
def test_sign_part(self, run_command, linker_signed_arm64_needs_force):
part = model.CodeSignedProduct('Test.app', 'test.signing.app')
signing.sign_part(self.paths, self.config, part)
run_command.assert_called_once_with(
['codesign', '--sign', '[IDENTITY]', '--timestamp', '/$W/Test.app'])
def test_sign_part_with_requirements(self, run_command,
linker_signed_arm64_needs_force):
part = model.CodeSignedProduct(
'Test.app', 'test.signing.app', requirements='and true')
signing.sign_part(self.paths, self.config, part)
run_command.assert_called_once_with([
'codesign', '--sign', '[IDENTITY]', '--timestamp', '--requirements',
'=designated => identifier "test.signing.app" and true',
'/$W/Test.app'
])
def test_sign_part_needs_force(self, run_command,
linker_signed_arm64_needs_force):
linker_signed_arm64_needs_force.return_value = True
part = model.CodeSignedProduct('Test.app', 'test.signing.app')
signing.sign_part(self.paths, self.config, part)
run_command.assert_called_once_with([
'codesign', '--sign', '[IDENTITY]', '--force', '--timestamp',
'/$W/Test.app'
])
def test_sign_part_with_requirements_needs_force(
self, run_command, linker_signed_arm64_needs_force):
linker_signed_arm64_needs_force.return_value = True
part = model.CodeSignedProduct(
'Test.app', 'test.signing.app', requirements='and true')
signing.sign_part(self.paths, self.config, part)
run_command.assert_called_once_with([
'codesign', '--sign', '[IDENTITY]', '--force', '--timestamp',
'--requirements',
'=designated => identifier "test.signing.app" and true',
'/$W/Test.app'
])
def test_sign_part_no_notary(self, run_command,
linker_signed_arm64_needs_force):
config = test_config.TestConfig(notary_user=None, notary_password=None)
part = model.CodeSignedProduct('Test.app', 'test.signing.app')
signing.sign_part(self.paths, config, part)
run_command.assert_called_once_with(
['codesign', '--sign', '[IDENTITY]', '/$W/Test.app'])
def test_sign_part_no_identifier_requirement(
self, run_command, linker_signed_arm64_needs_force):
part = model.CodeSignedProduct(
'Test.app', 'test.signing.app', identifier_requirement=False)
signing.sign_part(self.paths, self.config, part)
run_command.assert_called_once_with(
['codesign', '--sign', '[IDENTITY]', '--timestamp', '/$W/Test.app'])
def test_sign_with_identifier(self, run_command,
linker_signed_arm64_needs_force):
part = model.CodeSignedProduct(
'Test.app', 'test.signing.app', sign_with_identifier=True)
signing.sign_part(self.paths, self.config, part)
run_command.assert_called_once_with([
'codesign', '--sign', '[IDENTITY]', '--timestamp', '--identifier',
'test.signing.app', '/$W/Test.app'
])
def test_sign_with_requirement_and_identifier(
self, run_command, linker_signed_arm64_needs_force):
part = model.CodeSignedProduct(
'Test.app',
'test.signing.app',
requirements='and true',
sign_with_identifier=True)
signing.sign_part(self.paths, self.config, part)
run_command.assert_called_once_with([
'codesign', '--sign', '[IDENTITY]', '--timestamp', '--identifier',
'test.signing.app', '--requirements',
'=designated => identifier "test.signing.app" and true',
'/$W/Test.app'
])
def test_sign_with_identifier_no_requirement(
self, run_command, linker_signed_arm64_needs_force):
part = model.CodeSignedProduct(
'Test.app',
'test.signing.app',
sign_with_identifier=True,
identifier_requirement=False)
signing.sign_part(self.paths, self.config, part)
run_command.assert_called_once_with([
'codesign', '--sign', '[IDENTITY]', '--timestamp', '--identifier',
'test.signing.app', '/$W/Test.app'
])
def test_sign_part_with_options(self, run_command,
linker_signed_arm64_needs_force):
part = model.CodeSignedProduct(
'Test.app',
'test.signing.app',
options=model.CodeSignOptions.RESTRICT
| model.CodeSignOptions.LIBRARY_VALIDATION)
signing.sign_part(self.paths, self.config, part)
run_command.assert_called_once_with([
'codesign', '--sign', '[IDENTITY]', '--timestamp', '--options',
'library,restrict', '/$W/Test.app'
])
def test_sign_part_with_requirement_and_options(
self, run_command, linker_signed_arm64_needs_force):
config = test_config.TestConfig(codesign_requirements_basic='or false')
part = model.CodeSignedProduct(
'Test.app',
'test.signing.app',
options=model.CodeSignOptions.RESTRICT
| model.CodeSignOptions.LIBRARY_VALIDATION)
signing.sign_part(self.paths, config, part)
run_command.assert_called_once_with([
'codesign', '--sign', '[IDENTITY]', '--timestamp', '--requirements',
'=designated => identifier "test.signing.app" or false',
'--options', 'library,restrict', '/$W/Test.app'
])
def test_sign_part_with_requirements_and_options(
self, run_command, linker_signed_arm64_needs_force):
config = test_config.TestConfig(codesign_requirements_basic='or false')
part = model.CodeSignedProduct(
'Test.app',
'test.signing.app',
requirements='and true',
options=model.CodeSignOptions.RESTRICT
| model.CodeSignOptions.LIBRARY_VALIDATION)
signing.sign_part(self.paths, config, part)
run_command.assert_called_once_with([
'codesign', '--sign', '[IDENTITY]', '--timestamp', '--requirements',
'=designated => identifier "test.signing.app" and true or false',
'--options', 'library,restrict', '/$W/Test.app'
])
def test_sign_part_with_entitlements(self, run_command,
linker_signed_arm64_needs_force):
part = model.CodeSignedProduct(
'Test.app',
'test.signing.app',
entitlements='entitlements.plist',
identifier_requirement=False)
signing.sign_part(self.paths, self.config, part)
run_command.assert_called_once_with([
'codesign', '--sign', '[IDENTITY]', '--timestamp', '--entitlements',
'/$W/entitlements.plist', '/$W/Test.app'
])
def test_verify_part(self, run_command, linker_signed_arm64_needs_force):
part = model.CodeSignedProduct('Test.app', 'test.signing.app')
signing.verify_part(self.paths, part)
self.assertEqual(run_command.mock_calls, [
mock.call([
'codesign', '--display', '--verbose=5', '--requirements', '-',
'/$W/Test.app'
]),
mock.call(['codesign', '--verify', '--verbose=6', '/$W/Test.app']),
])
def test_verify_part_with_options(self, run_command,
linker_signed_arm64_needs_force):
part = model.CodeSignedProduct(
'Test.app',
'test.signing.app',
verify_options=model.VerifyOptions.DEEP
| model.VerifyOptions.IGNORE_RESOURCES)
signing.verify_part(self.paths, part)
self.assertEqual(run_command.mock_calls, [
mock.call([
'codesign', '--display', '--verbose=5', '--requirements', '-',
'/$W/Test.app'
]),
mock.call([
'codesign', '--verify', '--verbose=6', '--deep',
'--ignore-resources', '/$W/Test.app'
]),
])
| {
"content_hash": "c2f8585d44c3a75d3eff6ef4a950d69a",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 94,
"avg_line_length": 43.29197080291971,
"alnum_prop": 0.6099308716911145,
"repo_name": "chromium/chromium",
"id": "4253c59bf6338c19fa2ae77c9d50572b1d8d937d",
"size": "12003",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "chrome/installer/mac/signing/signing_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from setuptools import setup
from sys import path
path.insert(0, '.')
NAME = "zcli"
if __name__ == "__main__":
setup(
name = NAME,
version = "0.1.0",
author = "Tony Rogers",
author_email = "[email protected]",
url = "https://github.com/teriyakichild/python-zcli",
license = 'internal use',
packages = [NAME],
package_dir = {NAME: NAME},
description = "Zabbix CLI.",
install_requires = ['requests',
'argparse',
'pyzabbix',
'ConfigParser'],
entry_points={
'console_scripts': [ 'zcli = zcli:cli' ],
}
)
| {
"content_hash": "88b7de95d4419c6c603536095322e8dc",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 61,
"avg_line_length": 24.620689655172413,
"alnum_prop": 0.4733893557422969,
"repo_name": "teriyakichild/python-zcli",
"id": "994230f6b4ebf07a0d7cc91b97f4dc1767bdae63",
"size": "714",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "960"
},
{
"name": "Python",
"bytes": "25968"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function, unicode_literals
import logging
import unipath
from . import network
from .exceptions import InvalidMainIPError, MasterJailMismatchError, MissingMainIPError
from .utils import from_split_if, split_if
__logger__ = logging.getLogger('pybsd')
class BaseJailHandler(object):
"""Provides a base jail handler
Handlers allow custom parametrization and customization of all logic pertaining to the jails. Each aspect of the handling is
delegated to a method that can be called from the master or the jail.
Parameters
----------
master : Optional[:py:class:`~pybsd.systems.masters.Master`]
The handler's master.
jail_root : :py:class:`str`
the path on the host's filesystem to the jails directory that the handler will enforce
Attributes
----------
default_jail_root : :py:class:`str`
the default jail_root.
jail_class_ids : :py:class:`dict`
a dictionary linking jail class types and the numerical ids that are to be linked to them by this handler.
Raises
------
MissingMainIPError
when a master's interface does not define a main_if
InvalidMainIPError
when a master's main_if violates established rules
MasterJailMismatchError
if a `master` and a `jail` called in a method are not related
"""
default_jail_root = '/usr/jails'
jail_class_ids = {'service': 1,
'web': 2}
def __init__(self, master=None, jail_root=None):
super(BaseJailHandler, self).__init__()
self.master = master
j = jail_root or '/usr/jails'
self.jail_root = unipath.Path(j)
@classmethod
def derive_interface(cls, master_if, jail):
"""Derives a jail's :py:class:`~pybsd.network.Interface` based on the handler's master's
Parameters
----------
master_if : :py:class:`~pybsd.systems.jails.Jail`
master's :py:class:`~pybsd.network.Interface` to which the jail's is attched
jail : :py:class:`~pybsd.network.Interface`
the jail whose :py:class:`~pybsd.network.Interface` is requested
Returns
-------
: :py:class:`~pybsd.network.Interface`
the jail's :py:class:`~pybsd.network.Interface`
Raises
------
MissingMainIPError
when a master's interface does not define a main_if
InvalidMainIPError
when a master's main_if violates established rules
"""
if master_if.main_ifv4 or master_if.main_ifv6:
_if = network.Interface(master_if.name)
if master_if.main_ifv4:
ip_chunks = split_if(master_if.main_ifv4)
if int(ip_chunks[-1]) != 0:
raise InvalidMainIPError(jail.master, master_if, "an IPv4 main_ip's last octet must be equal to 0")
ip_chunks[4] = str(jail.jail_class_id)
ip_chunks[5] = str(jail.uid)
_ip = from_split_if(ip_chunks)
_if.add_ips(_ip)
if master_if.main_ifv6:
ip_chunks = split_if(master_if.main_ifv6)
if int(ip_chunks[-2]) != 0:
raise InvalidMainIPError(jail.master, master_if, "an IPv6 main_ip's penultimate octet must be equal to 0")
ip_chunks[7] = str(jail.jail_class_id)
ip_chunks[8] = str(jail.uid)
ip_chunks[9] = '1'
_ip = from_split_if(ip_chunks)
_if.add_ips(_ip)
return _if
else:
raise MissingMainIPError(jail.master, master_if)
def check_mismatch(self, jail):
"""Checks whether a given jail belongs to the handler's master
Parameters
----------
jail : :py:class:`~pybsd.systems.jails.Jail`
the jail whose status is checked
Returns
-------
: :py:class:`bool`
whether the jail belongs to the handler's master
Raises
------
MasterJailMismatchError
if a `master` and a `jail` called in a method are not related
"""
if jail.master != self.master:
raise MasterJailMismatchError(self.master, jail)
def get_jail_type(self, jail):
"""Returns a given jail's type.
The default implementation simply honours the master's default jail type and provides an esaily overridable method
where custom logic can be applied.
Parameters
----------
jail : :py:class:`~pybsd.systems.jails.Jail`
the jail whose jail type is requested
Returns
-------
: :py:class:`str`
the jail's type. For base values see :py:meth:`~pybsd.systems.jails.Jail.jail_type`
"""
self.check_mismatch(jail)
return self.master.default_jail_type
def get_jail_hostname(self, jail, strict=True):
"""Returns a given jail's hostname.
if strict is set to `False`, it will evaluate what the jail hostname would be if it were attached to the handler's master.
Parameters
----------
jail : :py:class:`~pybsd.systems.jails.Jail`
the jail whose hostname is requested
strict : Optional[ :py:class:`bool` ]
whether the handler should only return hostnames for jails attached to its master. Default is `True`.
Returns
-------
: :py:class:`unipath.Path`
the jail's path
"""
if strict:
self.check_mismatch(jail)
return '{}.{}'.format(jail.name, self.master.hostname)
def get_jail_path(self, jail):
"""Returns a given jail's path
Parameters
----------
jail : :py:class:`~pybsd.systems.jails.Jail`
the jail whose path is requested
Returns
-------
: :py:class:`unipath.Path`
the jail's path
"""
self.check_mismatch(jail)
return self.jail_root.child(jail.name)
def get_jail_ext_if(self, jail):
"""Returns a given jail's ext_if
Parameters
----------
jail : :py:class:`~pybsd.systems.jails.Jail`
the jail whose ext_if is requested
Returns
-------
: :py:class:`~pybsd.network.Interface`
the jail's ext_if
"""
self.check_mismatch(jail)
return self.derive_interface(self.master.j_if, jail=jail)
def get_jail_lo_if(self, jail):
"""Returns a given jail's lo_if
Parameters
----------
jail : :py:class:`~pybsd.systems.jails.Jail`
the jail whose lo_if is requested
Returns
-------
: :py:class:`~pybsd.network.Interface`
the jail's lo_if
"""
self.check_mismatch(jail)
return self.derive_interface(self.master.jlo_if, jail=jail)
| {
"content_hash": "e649ce9b822cd4e64cc6512cb6a470cd",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 130,
"avg_line_length": 33.679611650485434,
"alnum_prop": 0.5788411646007495,
"repo_name": "rebost/pybsd",
"id": "adb573c0de0debc62c00ae84fc2a79bcf801fca1",
"size": "6962",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pybsd/handlers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1489"
},
{
"name": "PowerShell",
"bytes": "2986"
},
{
"name": "Python",
"bytes": "121738"
}
],
"symlink_target": ""
} |
'''Example usage of dataman explained in a nice python script
The goal here is to showcase how a client application would use dataman.
For this example we'll be making a chat system (based on the tornado chat server demo)
'''
import argparse
import schema
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--storage-node", required=True)
args = parser.parse_args()
# Create the database and tables
schema.drop_db(args.storage_node)
schema.create_db(args.storage_node)
# Load some data
# Create database / table
# Load some data
# Use it
# Add a schema + Indexes
# Load some data
# Use it
# update schema
# Load some data
# Use it
| {
"content_hash": "0e3b741e130069e6941308a80b9b029a",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 86,
"avg_line_length": 19.05128205128205,
"alnum_prop": 0.6621803499327052,
"repo_name": "jacksontj/dataman",
"id": "e2cead94d3b2149fa1ad97af984711191dbf22d5",
"size": "743",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/examples/python/forum/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "691393"
}
],
"symlink_target": ""
} |
import os
port = int(os.environ.get('PORT', 5000))
bind = '0.0.0.0:{}'.format(port)
backlog = 2048
reload = True
#
# Worker processes
#
# workers - The number of worker processes that this server
# should keep alive for handling requests.
#
# A positive integer generally in the 2-4 x $(NUM_CORES)
# range. You'll want to vary this a bit to find the best
# for your particular application's work load.
#
# worker_class - The type of workers to use. The default
# sync class should handle most 'normal' types of work
# loads. You'll want to read
# http://docs.gunicorn.org/en/latest/design.html#choosing-a-worker-type
# for information on when you might want to choose one
# of the other worker classes.
#
# A string referring to a Python path to a subclass of
# gunicorn.workers.base.Worker. The default provided values
# can be seen at
# http://docs.gunicorn.org/en/latest/settings.html#worker-class
#
# worker_connections - For the eventlet and gevent worker classes
# this limits the maximum number of simultaneous clients that
# a single process can handle.
#
# A positive integer generally set to around 1000.
#
# timeout - If a worker does not notify the master process in this
# number of seconds it is killed and a new worker is spawned
# to replace it.
#
# Generally set to thirty seconds. Only set this noticeably
# higher if you're sure of the repercussions for sync workers.
# For the non sync workers it just means that the worker
# process is still communicating and is not tied to the length
# of time required to handle a single request.
#
# keepalive - The number of seconds to wait for the next request
# on a Keep-Alive HTTP connection.
#
# A positive integer. Generally set in the 1-5 seconds range.
#
workers = 4
worker_class = 'sync'
worker_connections = 1000
timeout = 30
keepalive = 2
#
# spew - Install a trace function that spews every line of Python
# that is executed when running the server. This is the
# nuclear option.
#
# True or False
#
spew = False
#
# Server mechanics
#
# daemon - Detach the main Gunicorn process from the controlling
# terminal with a standard fork/fork sequence.
#
# True or False
#
# pidfile - The path to a pid file to write
#
# A path string or None to not write a pid file.
#
# user - Switch worker processes to run as this user.
#
# A valid user id (as an integer) or the name of a user that
# can be retrieved with a call to pwd.getpwnam(value) or None
# to not change the worker process user.
#
# group - Switch worker process to run as this group.
#
# A valid group id (as an integer) or the name of a user that
# can be retrieved with a call to pwd.getgrnam(value) or None
# to change the worker processes group.
#
# umask - A mask for file permissions written by Gunicorn. Note that
# this affects unix socket permissions.
#
# A valid value for the os.umask(mode) call or a string
# compatible with int(value, 0) (0 means Python guesses
# the base, so values like "0", "0xFF", "0022" are valid
# for decimal, hex, and octal representations)
#
# tmp_upload_dir - A directory to store temporary request data when
# requests are read. This will most likely be disappearing soon.
#
# A path to a directory where the process owner can write. Or
# None to signal that Python should choose one on its own.
#
daemon = False
pidfile = None
umask = 0
user = None
group = None
tmp_upload_dir = None
#
# Logging
#
# logfile - The path to a log file to write to.
#
# A path string. "-" means log to stdout.
#
# loglevel - The granularity of log output
#
# A string of "debug", "info", "warning", "error", "critical"
#
errorlog = '-'
loglevel = 'info'
accesslog = '-'
access_log_format = '%(t)s "%(r)s" %(s)s Time: %(L)ss'
#
# Process naming
#
# proc_name - A base to use with setproctitle to change the way
# that Gunicorn processes are reported in the system process
# table. This affects things like 'ps' and 'top'. If you're
# going to be running more than one instance of Gunicorn you'll
# probably want to set a name to tell them apart. This requires
# that you install the setproctitle module.
#
# A string or None to choose a default of something like 'gunicorn'.
#
proc_name = None
#
# Server hooks
#
# post_fork - Called just after a worker has been forked.
#
# A callable that takes a server and worker instance
# as arguments.
#
# pre_fork - Called just prior to forking the worker subprocess.
#
# A callable that accepts the same arguments as after_fork
#
# pre_exec - Called just prior to forking off a secondary
# master process during things like config reloading.
#
# A callable that takes a server instance as the sole argument.
#
def post_fork(server, worker):
server.log.info("Worker spawned (pid: %s)", worker.pid)
def pre_fork(server, worker):
pass
def pre_exec(server):
server.log.info("Forked child, re-executing.")
def when_ready(server):
server.log.info("Server is ready. Spawning workers")
def worker_int(worker):
worker.log.info("worker received INT or QUIT signal")
## get traceback info
import threading, sys, traceback
id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
code = []
for threadId, stack in sys._current_frames().items():
code.append("\n# Thread: %s(%d)" % (id2name.get(threadId,""),
threadId))
for filename, lineno, name, line in traceback.extract_stack(stack):
code.append('File: "%s", line %d, in %s' % (filename,
lineno, name))
if line:
code.append(" %s" % (line.strip()))
worker.log.debug("\n".join(code))
def worker_abort(worker):
worker.log.info("worker received SIGABRT signal")
| {
"content_hash": "7ce91ce9b5bba7081ef3e35f63e01ae1",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 77,
"avg_line_length": 31.28497409326425,
"alnum_prop": 0.66445842994369,
"repo_name": "DanielAndreasen/TripMeal",
"id": "d423caf952487b83c77afb379e1d862d04d43167",
"size": "6606",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "21989"
},
{
"name": "Python",
"bytes": "24005"
}
],
"symlink_target": ""
} |
"""
WSGI config for Mysite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Mysite.settings")
application = get_wsgi_application()
| {
"content_hash": "d83157f5e18cbf796a42b0c3c42a6371",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 24.3125,
"alnum_prop": 0.7686375321336761,
"repo_name": "amendez82/Mysite",
"id": "56cda2b8907b694d9553f6c34431e15bb6703e1c",
"size": "389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Mysite/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "286"
},
{
"name": "Python",
"bytes": "8292"
}
],
"symlink_target": ""
} |
KEY_PURPOSE_AUTHENTICATION = 'authentication'
KEY_PURPOSE_SUBMISSION = 'submission'
| {
"content_hash": "2bd29704dd0c2fbe40aebb9b490aa0a0",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 45,
"avg_line_length": 42,
"alnum_prop": 0.8095238095238095,
"repo_name": "ONSdigital/eq-survey-runner",
"id": "f613af9163d4d36ededa175ce279199f09e8390c",
"size": "84",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/keys.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "520"
},
{
"name": "HTML",
"bytes": "236859"
},
{
"name": "JavaScript",
"bytes": "423942"
},
{
"name": "Python",
"bytes": "1409591"
},
{
"name": "SCSS",
"bytes": "25858"
},
{
"name": "Shell",
"bytes": "10196"
}
],
"symlink_target": ""
} |
import time
import unittest
import config
import mle
import node
LEADER = 1
ROUTER1 = 2
ROUTER2 = 3
ROUTER3 = 4
class Cert_5_1_10_RouterAttachLinkQuality(unittest.TestCase):
def setUp(self):
self.simulator = config.create_default_simulator()
self.nodes = {}
for i in range(1, 5):
self.nodes[i] = node.Node(i, simulator=self.simulator)
self.nodes[LEADER].set_panid(0xface)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER2].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[ROUTER1].set_panid(0xface)
self.nodes[ROUTER1].set_mode('rsdn')
self.nodes[ROUTER1].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER1].add_whitelist(self.nodes[ROUTER3].get_addr64())
self.nodes[ROUTER1].enable_whitelist()
self.nodes[ROUTER1].set_router_selection_jitter(1)
self.nodes[ROUTER2].set_panid(0xface)
self.nodes[ROUTER2].set_mode('rsdn')
self.nodes[ROUTER2].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER2].add_whitelist(self.nodes[ROUTER3].get_addr64(), rssi=-85)
self.nodes[ROUTER2].enable_whitelist()
self.nodes[ROUTER2].set_router_selection_jitter(1)
self.nodes[ROUTER3].set_panid(0xface)
self.nodes[ROUTER3].set_mode('rsdn')
self.nodes[ROUTER3].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[ROUTER3].add_whitelist(self.nodes[ROUTER2].get_addr64(), rssi=-85)
self.nodes[ROUTER3].enable_whitelist()
self.nodes[ROUTER3].set_router_selection_jitter(1)
def tearDown(self):
for node in list(self.nodes.values()):
node.stop()
node.destroy()
self.simulator.stop()
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
self.nodes[ROUTER2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER2].get_state(), 'router')
self.nodes[ROUTER3].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER3].get_state(), 'router')
leader_messages = self.simulator.get_messages_sent_by(LEADER)
router1_messages = self.simulator.get_messages_sent_by(ROUTER1)
router2_messages = self.simulator.get_messages_sent_by(ROUTER2)
router3_messages = self.simulator.get_messages_sent_by(ROUTER3)
# 1 - Leader, Router1, Router2
leader_messages.next_mle_message(mle.CommandType.ADVERTISEMENT)
router1_messages.next_mle_message(mle.CommandType.PARENT_REQUEST)
leader_messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
router1_messages.next_mle_message(mle.CommandType.CHILD_ID_REQUEST)
leader_messages.next_mle_message(mle.CommandType.CHILD_ID_RESPONSE)
msg = router1_messages.next_coap_message("0.02")
msg.assertCoapMessageRequestUriPath("/a/as")
msg = leader_messages.next_coap_message("2.04")
router2_messages.next_mle_message(mle.CommandType.PARENT_REQUEST)
leader_messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
router2_messages.next_mle_message(mle.CommandType.CHILD_ID_REQUEST)
leader_messages.next_mle_message(mle.CommandType.CHILD_ID_RESPONSE)
msg = router2_messages.next_coap_message("0.02")
msg.assertCoapMessageRequestUriPath("/a/as")
msg = leader_messages.next_coap_message("2.04")
router1_messages.next_mle_message(mle.CommandType.ADVERTISEMENT)
router2_messages.next_mle_message(mle.CommandType.ADVERTISEMENT)
# 3 - Router3
msg = router3_messages.next_mle_message(mle.CommandType.PARENT_REQUEST)
msg.assertSentWithHopLimit(255)
msg.assertSentToDestinationAddress("ff02::2")
msg.assertMleMessageContainsTlv(mle.Mode)
msg.assertMleMessageContainsTlv(mle.Challenge)
msg.assertMleMessageContainsTlv(mle.ScanMask)
msg.assertMleMessageContainsTlv(mle.Version)
# 4 - Router1, Router2
msg = router1_messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
msg.assertSentToNode(self.nodes[ROUTER3])
msg = router2_messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
msg.assertSentToNode(self.nodes[ROUTER3])
# 5 - Router3
msg = router3_messages.next_mle_message(mle.CommandType.CHILD_ID_REQUEST)
msg.assertSentToNode(self.nodes[ROUTER1])
msg.assertMleMessageContainsTlv(mle.Response)
msg.assertMleMessageContainsTlv(mle.LinkLayerFrameCounter)
msg.assertMleMessageContainsOptionalTlv(mle.MleFrameCounter)
msg.assertMleMessageContainsTlv(mle.Mode)
msg.assertMleMessageContainsTlv(mle.Timeout)
msg.assertMleMessageContainsTlv(mle.Version)
msg.assertMleMessageContainsTlv(mle.TlvRequest)
msg.assertMleMessageDoesNotContainTlv(mle.AddressRegistration)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "0fcb601f840f876156c28805bc6e8a4e",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 85,
"avg_line_length": 39.036764705882355,
"alnum_prop": 0.6833678658881145,
"repo_name": "erja-gp/openthread",
"id": "b26199a5412b919f13775334ad76ef5de7da574c",
"size": "6913",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/scripts/thread-cert/Cert_5_1_10_RouterAttachLinkQuality.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "15850"
},
{
"name": "C",
"bytes": "940119"
},
{
"name": "C#",
"bytes": "18077"
},
{
"name": "C++",
"bytes": "4306681"
},
{
"name": "Dockerfile",
"bytes": "6256"
},
{
"name": "M4",
"bytes": "63303"
},
{
"name": "Makefile",
"bytes": "133368"
},
{
"name": "Python",
"bytes": "2012919"
},
{
"name": "Ruby",
"bytes": "3397"
},
{
"name": "Shell",
"bytes": "74907"
}
],
"symlink_target": ""
} |
import contextlib
import sys
import numpy as np
import random
import re
import threading
import gc
from numba.core.errors import TypingError
from numba import njit
from numba.core import types, utils, config
from numba.tests.support import MemoryLeakMixin, TestCase, tag
import unittest
nrtjit = njit(_nrt=True, nogil=True)
def np_concatenate1(a, b, c):
return np.concatenate((a, b, c))
def np_concatenate2(a, b, c, axis):
return np.concatenate((a, b, c), axis=axis)
def np_stack1(a, b, c):
return np.stack((a, b, c))
def np_stack2(a, b, c, axis):
return np.stack((a, b, c), axis=axis)
def np_hstack(a, b, c):
return np.hstack((a, b, c))
def np_vstack(a, b, c):
return np.vstack((a, b, c))
def np_dstack(a, b, c):
return np.dstack((a, b, c))
def np_column_stack(a, b, c):
return np.column_stack((a, b, c))
class BaseTest(TestCase):
def check_outputs(self, pyfunc, argslist, exact=True):
cfunc = nrtjit(pyfunc)
for args in argslist:
expected = pyfunc(*args)
ret = cfunc(*args)
self.assertEqual(ret.size, expected.size)
self.assertEqual(ret.dtype, expected.dtype)
self.assertStridesEqual(ret, expected)
if exact:
np.testing.assert_equal(expected, ret)
else:
np.testing.assert_allclose(expected, ret)
class NrtRefCtTest(MemoryLeakMixin):
def assert_array_nrt_refct(self, arr, expect):
self.assertEqual(arr.base.refcount, expect)
class TestDynArray(NrtRefCtTest, TestCase):
def test_empty_0d(self):
@nrtjit
def foo():
arr = np.empty(())
arr[()] = 42
return arr
arr = foo()
self.assert_array_nrt_refct(arr, 1)
np.testing.assert_equal(42, arr)
self.assertEqual(arr.size, 1)
self.assertEqual(arr.shape, ())
self.assertEqual(arr.dtype, np.dtype(np.float64))
self.assertEqual(arr.strides, ())
arr.fill(123) # test writability
np.testing.assert_equal(123, arr)
del arr
def test_empty_1d(self):
@nrtjit
def foo(n):
arr = np.empty(n)
for i in range(n):
arr[i] = i
return arr
n = 3
arr = foo(n)
self.assert_array_nrt_refct(arr, 1)
np.testing.assert_equal(np.arange(n), arr)
self.assertEqual(arr.size, n)
self.assertEqual(arr.shape, (n,))
self.assertEqual(arr.dtype, np.dtype(np.float64))
self.assertEqual(arr.strides, (np.dtype(np.float64).itemsize,))
arr.fill(123) # test writability
np.testing.assert_equal(123, arr)
del arr
def test_empty_2d(self):
def pyfunc(m, n):
arr = np.empty((m, n), np.int32)
for i in range(m):
for j in range(n):
arr[i, j] = i + j
return arr
cfunc = nrtjit(pyfunc)
m = 4
n = 3
expected_arr = pyfunc(m, n)
got_arr = cfunc(m, n)
self.assert_array_nrt_refct(got_arr, 1)
np.testing.assert_equal(expected_arr, got_arr)
self.assertEqual(expected_arr.size, got_arr.size)
self.assertEqual(expected_arr.shape, got_arr.shape)
self.assertEqual(expected_arr.strides, got_arr.strides)
del got_arr
def test_empty_3d(self):
def pyfunc(m, n, p):
arr = np.empty((m, n, p), np.int32)
for i in range(m):
for j in range(n):
for k in range(p):
arr[i, j, k] = i + j + k
return arr
cfunc = nrtjit(pyfunc)
m = 4
n = 3
p = 2
expected_arr = pyfunc(m, n, p)
got_arr = cfunc(m, n, p)
self.assert_array_nrt_refct(got_arr, 1)
np.testing.assert_equal(expected_arr, got_arr)
self.assertEqual(expected_arr.size, got_arr.size)
self.assertEqual(expected_arr.shape, got_arr.shape)
self.assertEqual(expected_arr.strides, got_arr.strides)
del got_arr
def test_empty_2d_sliced(self):
def pyfunc(m, n, p):
arr = np.empty((m, n), np.int32)
for i in range(m):
for j in range(n):
arr[i, j] = i + j
return arr[p]
cfunc = nrtjit(pyfunc)
m = 4
n = 3
p = 2
expected_arr = pyfunc(m, n, p)
got_arr = cfunc(m, n, p)
self.assert_array_nrt_refct(got_arr, 1)
np.testing.assert_equal(expected_arr, got_arr)
self.assertEqual(expected_arr.size, got_arr.size)
self.assertEqual(expected_arr.shape, got_arr.shape)
self.assertEqual(expected_arr.strides, got_arr.strides)
del got_arr
def test_return_global_array(self):
y = np.ones(4, dtype=np.float32)
initrefct = sys.getrefcount(y)
def return_external_array():
return y
cfunc = nrtjit(return_external_array)
out = cfunc()
# out reference by cfunc
self.assertEqual(initrefct + 1, sys.getrefcount(y))
np.testing.assert_equal(y, out)
np.testing.assert_equal(y, np.ones(4, dtype=np.float32))
np.testing.assert_equal(out, np.ones(4, dtype=np.float32))
del out
gc.collect()
# out is only referenced by cfunc
self.assertEqual(initrefct + 1, sys.getrefcount(y))
del cfunc
gc.collect()
# y is no longer referenced by cfunc
self.assertEqual(initrefct, sys.getrefcount(y))
def test_return_global_array_sliced(self):
y = np.ones(4, dtype=np.float32)
def return_external_array():
return y[2:]
cfunc = nrtjit(return_external_array)
out = cfunc()
self.assertIsNone(out.base)
yy = y[2:]
np.testing.assert_equal(yy, out)
np.testing.assert_equal(yy, np.ones(2, dtype=np.float32))
np.testing.assert_equal(out, np.ones(2, dtype=np.float32))
def test_array_pass_through(self):
def pyfunc(y):
return y
arr = np.ones(4, dtype=np.float32)
cfunc = nrtjit(pyfunc)
expected = cfunc(arr)
got = pyfunc(arr)
np.testing.assert_equal(expected, arr)
np.testing.assert_equal(expected, got)
self.assertIs(expected, arr)
self.assertIs(expected, got)
def test_array_pass_through_sliced(self):
def pyfunc(y):
return y[y.size // 2:]
arr = np.ones(4, dtype=np.float32)
initrefct = sys.getrefcount(arr)
cfunc = nrtjit(pyfunc)
got = cfunc(arr)
self.assertEqual(initrefct + 1, sys.getrefcount(arr))
expected = pyfunc(arr)
self.assertEqual(initrefct + 2, sys.getrefcount(arr))
np.testing.assert_equal(expected, arr[arr.size // 2])
np.testing.assert_equal(expected, got)
del expected
self.assertEqual(initrefct + 1, sys.getrefcount(arr))
del got
self.assertEqual(initrefct, sys.getrefcount(arr))
def test_ufunc_with_allocated_output(self):
def pyfunc(a, b):
out = np.empty(a.shape)
np.add(a, b, out)
return out
cfunc = nrtjit(pyfunc)
# 1D case
arr_a = np.random.random(10)
arr_b = np.random.random(10)
np.testing.assert_equal(pyfunc(arr_a, arr_b),
cfunc(arr_a, arr_b))
self.assert_array_nrt_refct(cfunc(arr_a, arr_b), 1)
# 2D case
arr_a = np.random.random(10).reshape(2, 5)
arr_b = np.random.random(10).reshape(2, 5)
np.testing.assert_equal(pyfunc(arr_a, arr_b),
cfunc(arr_a, arr_b))
self.assert_array_nrt_refct(cfunc(arr_a, arr_b), 1)
# 3D case
arr_a = np.random.random(70).reshape(2, 5, 7)
arr_b = np.random.random(70).reshape(2, 5, 7)
np.testing.assert_equal(pyfunc(arr_a, arr_b),
cfunc(arr_a, arr_b))
self.assert_array_nrt_refct(cfunc(arr_a, arr_b), 1)
def test_allocation_mt(self):
"""
This test exercises the array allocation in multithreaded usecase.
This stress the freelist inside NRT.
"""
def pyfunc(inp):
out = np.empty(inp.size)
# Zero fill
for i in range(out.size):
out[i] = 0
for i in range(inp[0]):
# Allocate inside a loop
tmp = np.empty(inp.size)
# Write to tmp
for j in range(tmp.size):
tmp[j] = inp[j]
# out = tmp + i
for j in range(tmp.size):
out[j] += tmp[j] + i
return out
cfunc = nrtjit(pyfunc)
size = 10 # small array size so that the computation is short
arr = np.random.randint(1, 10, size)
frozen_arr = arr.copy()
np.testing.assert_equal(pyfunc(arr), cfunc(arr))
# Ensure we did not modify the input
np.testing.assert_equal(frozen_arr, arr)
workers = []
inputs = []
outputs = []
# Make wrapper to store the output
def wrapped(inp, out):
out[:] = cfunc(inp)
# Create a lot of worker threads to create contention
for i in range(100):
arr = np.random.randint(1, 10, size)
out = np.empty_like(arr)
thread = threading.Thread(target=wrapped,
args=(arr, out),
name="worker{0}".format(i))
workers.append(thread)
inputs.append(arr)
outputs.append(out)
# Launch worker threads
for thread in workers:
thread.start()
# Join worker threads
for thread in workers:
thread.join()
# Check result
for inp, out in zip(inputs, outputs):
np.testing.assert_equal(pyfunc(inp), out)
def test_refct_mt(self):
"""
This test exercises the refct in multithreaded code
"""
def pyfunc(n, inp):
out = np.empty(inp.size)
for i in range(out.size):
out[i] = inp[i] + 1
# Use swap to trigger many refct ops
for i in range(n):
out, inp = inp, out
return out
cfunc = nrtjit(pyfunc)
size = 10
input = np.arange(size, dtype=float)
expected_refct = sys.getrefcount(input)
swapct = random.randrange(1000)
expected = pyfunc(swapct, input)
np.testing.assert_equal(expected, cfunc(swapct, input))
# The following checks can discover a reference count error
del expected
self.assertEqual(expected_refct, sys.getrefcount(input))
workers = []
outputs = []
swapcts = []
# Make wrapper to store the output
def wrapped(n, input, out):
out[:] = cfunc(n, input)
# Create worker threads
for i in range(100):
out = np.empty(size)
# All thread shares the same input
swapct = random.randrange(1000)
thread = threading.Thread(target=wrapped,
args=(swapct, input, out),
name="worker{0}".format(i))
workers.append(thread)
outputs.append(out)
swapcts.append(swapct)
# Launch worker threads
for thread in workers:
thread.start()
# Join worker threads
for thread in workers:
thread.join()
# Check result
for swapct, out in zip(swapcts, outputs):
np.testing.assert_equal(pyfunc(swapct, input), out)
del outputs, workers
# The following checks can discover a reference count error
self.assertEqual(expected_refct, sys.getrefcount(input))
def test_swap(self):
def pyfunc(x, y, t):
"""Swap array x and y for t number of times
"""
for i in range(t):
x, y = y, x
return x, y
cfunc = nrtjit(pyfunc)
x = np.random.random(100)
y = np.random.random(100)
t = 100
initrefct = sys.getrefcount(x), sys.getrefcount(y)
expect, got = pyfunc(x, y, t), cfunc(x, y, t)
self.assertIsNone(got[0].base)
self.assertIsNone(got[1].base)
np.testing.assert_equal(expect, got)
del expect, got
self.assertEqual(initrefct, (sys.getrefcount(x), sys.getrefcount(y)))
def test_return_tuple_of_array(self):
def pyfunc(x):
y = np.empty(x.size)
for i in range(y.size):
y[i] = x[i] + 1
return x, y
cfunc = nrtjit(pyfunc)
x = np.random.random(5)
initrefct = sys.getrefcount(x)
expected_x, expected_y = pyfunc(x)
got_x, got_y = cfunc(x)
self.assertIs(x, expected_x)
self.assertIs(x, got_x)
np.testing.assert_equal(expected_x, got_x)
np.testing.assert_equal(expected_y, got_y)
del expected_x, got_x
self.assertEqual(initrefct, sys.getrefcount(x))
self.assertEqual(sys.getrefcount(expected_y), sys.getrefcount(got_y))
def test_return_tuple_of_array_created(self):
def pyfunc(x):
y = np.empty(x.size)
for i in range(y.size):
y[i] = x[i] + 1
out = y, y
return out
cfunc = nrtjit(pyfunc)
x = np.random.random(5)
expected_x, expected_y = pyfunc(x)
got_x, got_y = cfunc(x)
np.testing.assert_equal(expected_x, got_x)
np.testing.assert_equal(expected_y, got_y)
# getrefcount owns 1, got_y owns 1
self.assertEqual(2, sys.getrefcount(got_y))
# getrefcount owns 1, got_y owns 1
self.assertEqual(2, sys.getrefcount(got_y))
def test_issue_with_return_leak(self):
"""
Dispatcher returns a new reference.
It need to workaround it for now.
"""
@nrtjit
def inner(out):
return out
def pyfunc(x):
return inner(x)
cfunc = nrtjit(pyfunc)
arr = np.arange(10)
old_refct = sys.getrefcount(arr)
self.assertEqual(old_refct, sys.getrefcount(pyfunc(arr)))
self.assertEqual(old_refct, sys.getrefcount(cfunc(arr)))
self.assertEqual(old_refct, sys.getrefcount(arr))
class ConstructorBaseTest(NrtRefCtTest):
def check_0d(self, pyfunc):
cfunc = nrtjit(pyfunc)
expected = pyfunc()
ret = cfunc()
self.assert_array_nrt_refct(ret, 1)
self.assertEqual(ret.size, expected.size)
self.assertEqual(ret.shape, expected.shape)
self.assertEqual(ret.dtype, expected.dtype)
self.assertEqual(ret.strides, expected.strides)
self.check_result_value(ret, expected)
# test writability
expected = np.empty_like(ret) # np.full_like was not added until Numpy 1.8
expected.fill(123)
ret.fill(123)
np.testing.assert_equal(ret, expected)
def check_1d(self, pyfunc):
cfunc = nrtjit(pyfunc)
n = 3
expected = pyfunc(n)
ret = cfunc(n)
self.assert_array_nrt_refct(ret, 1)
self.assertEqual(ret.size, expected.size)
self.assertEqual(ret.shape, expected.shape)
self.assertEqual(ret.dtype, expected.dtype)
self.assertEqual(ret.strides, expected.strides)
self.check_result_value(ret, expected)
# test writability
expected = np.empty_like(ret) # np.full_like was not added until Numpy 1.8
expected.fill(123)
ret.fill(123)
np.testing.assert_equal(ret, expected)
# errors
with self.assertRaises(ValueError) as cm:
cfunc(-1)
self.assertEqual(str(cm.exception), "negative dimensions not allowed")
def check_2d(self, pyfunc):
cfunc = nrtjit(pyfunc)
m, n = 2, 3
expected = pyfunc(m, n)
ret = cfunc(m, n)
self.assert_array_nrt_refct(ret, 1)
self.assertEqual(ret.size, expected.size)
self.assertEqual(ret.shape, expected.shape)
self.assertEqual(ret.dtype, expected.dtype)
self.assertEqual(ret.strides, expected.strides)
self.check_result_value(ret, expected)
# test writability
expected = np.empty_like(ret) # np.full_like was not added until Numpy 1.8
expected.fill(123)
ret.fill(123)
np.testing.assert_equal(ret, expected)
# errors
with self.assertRaises(ValueError) as cm:
cfunc(2, -1)
self.assertEqual(str(cm.exception), "negative dimensions not allowed")
def check_alloc_size(self, pyfunc):
"""Checks that pyfunc will error, not segfaulting due to array size."""
cfunc = nrtjit(pyfunc)
with self.assertRaises(ValueError) as e:
cfunc()
self.assertIn(
"array is too big",
str(e.exception)
)
class TestNdZeros(ConstructorBaseTest, TestCase):
def setUp(self):
super(TestNdZeros, self).setUp()
self.pyfunc = np.zeros
def check_result_value(self, ret, expected):
np.testing.assert_equal(ret, expected)
def test_0d(self):
pyfunc = self.pyfunc
def func():
return pyfunc(())
self.check_0d(func)
def test_1d(self):
pyfunc = self.pyfunc
def func(n):
return pyfunc(n)
self.check_1d(func)
def test_1d_dtype(self):
pyfunc = self.pyfunc
def func(n):
return pyfunc(n, np.int32)
self.check_1d(func)
def test_1d_dtype_instance(self):
# dtype as numpy dtype, not as scalar class
pyfunc = self.pyfunc
_dtype = np.dtype('int32')
def func(n):
return pyfunc(n, _dtype)
self.check_1d(func)
def test_1d_dtype_str(self):
pyfunc = self.pyfunc
_dtype = 'int32'
def func(n):
return pyfunc(n, _dtype)
self.check_1d(func)
def func(n):
return pyfunc(n, 'complex128')
self.check_1d(func)
def test_1d_dtype_str_alternative_spelling(self):
# like test_1d_dtype_str but using the shorthand type spellings
pyfunc = self.pyfunc
_dtype = 'i4'
def func(n):
return pyfunc(n, _dtype)
self.check_1d(func)
def func(n):
return pyfunc(n, 'c8')
self.check_1d(func)
def test_1d_dtype_str_structured_dtype(self):
# test_1d_dtype_str but using a structured dtype
pyfunc = self.pyfunc
_dtype = "i4, (2,3)f8"
def func(n):
return pyfunc(n, _dtype)
self.check_1d(func)
def test_1d_dtype_non_const_str(self):
pyfunc = self.pyfunc
@njit
def func(n, dt):
return pyfunc(n, dt)
with self.assertRaises(TypingError) as raises:
func(5, 'int32')
excstr = str(raises.exception)
self.assertIn('No match', excstr)
restr = r'\b{}\(int.*?, unicode_type\)\B'
regex = re.compile(restr.format(pyfunc.__name__))
self.assertRegex(excstr, regex)
def test_1d_dtype_invalid_str(self):
pyfunc = self.pyfunc
@njit
def func(n):
return pyfunc(n, 'ABCDEF')
with self.assertRaises(TypingError) as raises:
func(5)
excstr = str(raises.exception)
self.assertIn("Invalid NumPy dtype specified: 'ABCDEF'", excstr)
def test_2d(self):
pyfunc = self.pyfunc
def func(m, n):
return pyfunc((m, n))
self.check_2d(func)
def test_2d_shape_dtypes(self):
# Test for issue #4575
pyfunc = self.pyfunc
def func1(m, n):
return pyfunc((np.int16(m), np.int32(n)))
self.check_2d(func1)
# Using a 64-bit value checks that 32 bit systems will downcast to intp
def func2(m, n):
return pyfunc((np.int64(m), np.int8(n)))
self.check_2d(func2)
# Make sure an error is thrown if we can't downcast safely
if config.IS_32BITS:
cfunc = nrtjit(lambda m, n: pyfunc((m, n)))
with self.assertRaises(ValueError):
cfunc(np.int64(1 << (32 - 1)), 1)
def test_2d_dtype_kwarg(self):
pyfunc = self.pyfunc
def func(m, n):
return pyfunc((m, n), dtype=np.complex64)
self.check_2d(func)
def test_2d_dtype_str_kwarg(self):
pyfunc = self.pyfunc
def func(m, n):
return pyfunc((m, n), dtype='complex64')
self.check_2d(func)
def test_2d_dtype_str_kwarg_alternative_spelling(self):
# as test_2d_dtype_str_kwarg but with the numpy shorthand type spelling
pyfunc = self.pyfunc
def func(m, n):
return pyfunc((m, n), dtype='c8')
self.check_2d(func)
def test_alloc_size(self):
pyfunc = self.pyfunc
width = types.intp.bitwidth
def gen_func(shape, dtype):
return lambda : pyfunc(shape, dtype)
# Under these values numba will segfault, but thats another issue
self.check_alloc_size(gen_func(1 << width - 2, np.intp))
self.check_alloc_size(gen_func((1 << width - 8, 64), np.intp))
class TestNdOnes(TestNdZeros):
def setUp(self):
super(TestNdOnes, self).setUp()
self.pyfunc = np.ones
@unittest.expectedFailure
def test_1d_dtype_str_structured_dtype(self):
super().test_1d_dtype_str_structured_dtype()
class TestNdFull(ConstructorBaseTest, TestCase):
def check_result_value(self, ret, expected):
np.testing.assert_equal(ret, expected)
def test_0d(self):
def func():
return np.full((), 4.5)
self.check_0d(func)
def test_1d(self):
def func(n):
return np.full(n, 4.5)
self.check_1d(func)
def test_1d_dtype(self):
def func(n):
return np.full(n, 4.5, np.bool_)
self.check_1d(func)
def test_1d_dtype_instance(self):
dtype = np.dtype('bool')
def func(n):
return np.full(n, 4.5, dtype)
self.check_1d(func)
def test_1d_dtype_str(self):
def func(n):
return np.full(n, 4.5, 'bool_')
self.check_1d(func)
def test_1d_dtype_str_alternative_spelling(self):
# like test_1d_dtype_str but using the shorthand type spelling
def func(n):
return np.full(n, 4.5, '?')
self.check_1d(func)
def test_1d_dtype_non_const_str(self):
@njit
def func(n, fv, dt):
return np.full(n, fv, dt)
with self.assertRaises(TypingError) as raises:
func((5,), 4.5, 'int32')
excstr = str(raises.exception)
self.assertIn('No match', excstr)
restr = r'\bfull\(UniTuple\(int.*? x 1\), float64, unicode_type\)\B'
regex = re.compile(restr)
self.assertRegex(excstr, regex)
def test_1d_dtype_invalid_str(self):
@njit
def func(n, fv):
return np.full(n, fv, 'ABCDEF')
with self.assertRaises(TypingError) as raises:
func(np.ones(4), 4.5)
excstr = str(raises.exception)
self.assertIn("Invalid NumPy dtype specified: 'ABCDEF'", excstr)
def test_2d(self):
def func(m, n):
return np.full((m, n), 4.5)
self.check_2d(func)
def test_2d_dtype_kwarg(self):
def func(m, n):
return np.full((m, n), 1 + 4.5j, dtype=np.complex64)
self.check_2d(func)
def test_2d_dtype_from_type(self):
# tests issue #2862
def func(m, n):
return np.full((m, n), np.int32(1))
self.check_2d(func)
# Complex uses `.real`, imaginary part dropped
def func(m, n):
return np.full((m, n), np.complex128(1))
self.check_2d(func)
# and that if a dtype is specified, this influences the return type
def func(m, n):
return np.full((m, n), 1, dtype=np.int8)
self.check_2d(func)
def test_2d_shape_dtypes(self):
# Test for issue #4575
def func1(m, n):
return np.full((np.int16(m), np.int32(n)), 4.5)
self.check_2d(func1)
# Using a 64-bit value checks that 32 bit systems will downcast to intp
def func2(m, n):
return np.full((np.int64(m), np.int8(n)), 4.5)
self.check_2d(func2)
# Make sure an error is thrown if we can't downcast safely
if config.IS_32BITS:
cfunc = nrtjit(lambda m, n: np.full((m, n), 4.5))
with self.assertRaises(ValueError):
cfunc(np.int64(1 << (32 - 1)), 1)
def test_alloc_size(self):
width = types.intp.bitwidth
def gen_func(shape, value):
return lambda : np.full(shape, value)
# Under these values numba will segfault, but thats another issue
self.check_alloc_size(gen_func(1 << width - 2, 1))
self.check_alloc_size(gen_func((1 << width - 8, 64), 1))
class ConstructorLikeBaseTest(object):
def mutate_array(self, arr):
try:
arr.fill(42)
except (TypeError, ValueError):
# Try something else (e.g. Numpy 1.6 with structured dtypes)
fill_value = b'x' * arr.dtype.itemsize
arr.fill(fill_value)
def check_like(self, pyfunc, dtype):
def check_arr(arr):
expected = pyfunc(arr)
ret = cfunc(arr)
self.assertEqual(ret.size, expected.size)
self.assertEqual(ret.dtype, expected.dtype)
self.assertStridesEqual(ret, expected)
self.check_result_value(ret, expected)
# test writability
self.mutate_array(ret)
self.mutate_array(expected)
np.testing.assert_equal(ret, expected)
orig = np.linspace(0, 5, 6).astype(dtype)
cfunc = nrtjit(pyfunc)
for shape in (6, (2, 3), (1, 2, 3), (3, 1, 2), ()):
if shape == ():
arr = orig[-1:].reshape(())
else:
arr = orig.reshape(shape)
check_arr(arr)
# Non-contiguous array
if arr.ndim > 0:
check_arr(arr[::2])
# Check new array doesn't inherit readonly flag
arr.flags['WRITEABLE'] = False
# verify read-only
with self.assertRaises(ValueError):
arr[0] = 1
check_arr(arr)
# Scalar argument => should produce a 0-d array
check_arr(orig[0])
class TestNdEmptyLike(ConstructorLikeBaseTest, TestCase):
def setUp(self):
super(TestNdEmptyLike, self).setUp()
self.pyfunc = np.empty_like
def check_result_value(self, ret, expected):
pass
def test_like(self):
pyfunc = self.pyfunc
def func(arr):
return pyfunc(arr)
self.check_like(func, np.float64)
def test_like_structured(self):
dtype = np.dtype([('a', np.int16), ('b', np.float32)])
pyfunc = self.pyfunc
def func(arr):
return pyfunc(arr)
self.check_like(func, dtype)
def test_like_dtype(self):
pyfunc = self.pyfunc
def func(arr):
return pyfunc(arr, np.int32)
self.check_like(func, np.float64)
def test_like_dtype_instance(self):
dtype = np.dtype('int32')
pyfunc = self.pyfunc
def func(arr):
return pyfunc(arr, dtype)
self.check_like(func, np.float64)
def test_like_dtype_structured(self):
dtype = np.dtype([('a', np.int16), ('b', np.float32)])
pyfunc = self.pyfunc
def func(arr):
return pyfunc(arr, dtype)
self.check_like(func, np.float64)
def test_like_dtype_kwarg(self):
pyfunc = self.pyfunc
def func(arr):
return pyfunc(arr, dtype=np.int32)
self.check_like(func, np.float64)
def test_like_dtype_str_kwarg(self):
pyfunc = self.pyfunc
def func(arr):
return pyfunc(arr, dtype='int32')
self.check_like(func, np.float64)
def test_like_dtype_str_kwarg_alternative_spelling(self):
pyfunc = self.pyfunc
def func(arr):
return pyfunc(arr, dtype='i4')
self.check_like(func, np.float64)
def test_like_dtype_non_const_str(self):
pyfunc = self.pyfunc
@njit
def func(n, dt):
return pyfunc(n, dt)
with self.assertRaises(TypingError) as raises:
func(np.ones(4), 'int32')
excstr = str(raises.exception)
self.assertIn('No match', excstr)
self.assertIn(
'{}(array(float64, 1d, C), unicode_type)'.format(pyfunc.__name__),
excstr)
def test_like_dtype_invalid_str(self):
pyfunc = self.pyfunc
@njit
def func(n):
return pyfunc(n, 'ABCDEF')
with self.assertRaises(TypingError) as raises:
func(np.ones(4))
excstr = str(raises.exception)
self.assertIn("Invalid NumPy dtype specified: 'ABCDEF'", excstr)
class TestNdZerosLike(TestNdEmptyLike):
def setUp(self):
super(TestNdZerosLike, self).setUp()
self.pyfunc = np.zeros_like
def check_result_value(self, ret, expected):
np.testing.assert_equal(ret, expected)
def test_like_structured(self):
super(TestNdZerosLike, self).test_like_structured()
def test_like_dtype_structured(self):
super(TestNdZerosLike, self).test_like_dtype_structured()
class TestNdOnesLike(TestNdZerosLike):
def setUp(self):
super(TestNdOnesLike, self).setUp()
self.pyfunc = np.ones_like
self.expected_value = 1
# Not supported yet.
@unittest.expectedFailure
def test_like_structured(self):
super(TestNdOnesLike, self).test_like_structured()
@unittest.expectedFailure
def test_like_dtype_structured(self):
super(TestNdOnesLike, self).test_like_dtype_structured()
class TestNdFullLike(ConstructorLikeBaseTest, TestCase):
def check_result_value(self, ret, expected):
np.testing.assert_equal(ret, expected)
def test_like(self):
def func(arr):
return np.full_like(arr, 3.5)
self.check_like(func, np.float64)
# Not supported yet.
@unittest.expectedFailure
def test_like_structured(self):
dtype = np.dtype([('a', np.int16), ('b', np.float32)])
def func(arr):
return np.full_like(arr, 4.5)
self.check_like(func, dtype)
def test_like_dtype(self):
def func(arr):
return np.full_like(arr, 4.5, np.bool_)
self.check_like(func, np.float64)
def test_like_dtype_instance(self):
dtype = np.dtype('bool')
def func(arr):
return np.full_like(arr, 4.5, dtype)
self.check_like(func, np.float64)
def test_like_dtype_kwarg(self):
def func(arr):
return np.full_like(arr, 4.5, dtype=np.bool_)
self.check_like(func, np.float64)
def test_like_dtype_str_kwarg(self):
def func(arr):
return np.full_like(arr, 4.5, 'bool_')
self.check_like(func, np.float64)
def test_like_dtype_str_kwarg_alternative_spelling(self):
def func(arr):
return np.full_like(arr, 4.5, dtype='?')
self.check_like(func, np.float64)
def test_like_dtype_non_const_str_kwarg(self):
@njit
def func(arr, fv, dt):
return np.full_like(arr, fv, dt)
with self.assertRaises(TypingError) as raises:
func(np.ones(3,), 4.5, 'int32')
excstr = str(raises.exception)
self.assertIn('No match', excstr)
self.assertIn('full_like(array(float64, 1d, C), float64, unicode_type)',
excstr)
def test_like_dtype_invalid_str(self):
@njit
def func(arr, fv):
return np.full_like(arr, fv, "ABCDEF")
with self.assertRaises(TypingError) as raises:
func(np.ones(4), 3.4)
excstr = str(raises.exception)
self.assertIn("Invalid NumPy dtype specified: 'ABCDEF'", excstr)
class TestNdIdentity(BaseTest):
def check_identity(self, pyfunc):
self.check_outputs(pyfunc, [(3,)])
def test_identity(self):
def func(n):
return np.identity(n)
self.check_identity(func)
def test_identity_dtype(self):
for dtype in (np.complex64, np.int16, np.bool_, np.dtype('bool'),
'bool_'):
def func(n):
return np.identity(n, dtype)
self.check_identity(func)
def test_like_dtype_non_const_str_kwarg(self):
@njit
def func(n, dt):
return np.identity(n, dt)
with self.assertRaises(TypingError) as raises:
func(4, 'int32')
excstr = str(raises.exception)
self.assertIn('No match', excstr)
regex = re.compile(r'\bidentity\(int.*?, unicode_type\)\B')
self.assertRegex(excstr, regex)
class TestNdEye(BaseTest):
def test_eye_n(self):
def func(n):
return np.eye(n)
self.check_outputs(func, [(1,), (3,)])
def test_eye_n_dtype(self):
# check None option, dtype class, instance of dtype class
for dt in (None, np.complex128, np.complex64(1)):
def func(n, dtype=dt):
return np.eye(n, dtype=dtype)
self.check_outputs(func, [(1,), (3,)])
def test_eye_n_m(self):
def func(n, m):
return np.eye(n, m)
self.check_outputs(func, [(1, 2), (3, 2), (0, 3)])
def check_eye_n_m_k(self, func):
self.check_outputs(func, [(1, 2, 0),
(3, 4, 1),
(3, 4, -1),
(4, 3, -2),
(4, 3, -5),
(4, 3, 5)])
def test_eye_n_m_k(self):
def func(n, m, k):
return np.eye(n, m, k)
self.check_eye_n_m_k(func)
def test_eye_n_m_k_dtype(self):
def func(n, m, k):
return np.eye(N=n, M=m, k=k, dtype=np.int16)
self.check_eye_n_m_k(func)
def test_eye_n_m_k_dtype_instance(self):
dtype = np.dtype('int16')
def func(n, m, k):
return np.eye(N=n, M=m, k=k, dtype=dtype)
self.check_eye_n_m_k(func)
class TestNdDiag(TestCase):
def setUp(self):
v = np.array([1, 2, 3])
hv = np.array([[1, 2, 3]])
vv = np.transpose(hv)
self.vectors = [v, hv, vv]
a3x4 = np.arange(12).reshape(3, 4)
a4x3 = np.arange(12).reshape(4, 3)
self.matricies = [a3x4, a4x3]
def func(q):
return np.diag(q)
self.py = func
self.jit = nrtjit(func)
def func_kwarg(q, k=0):
return np.diag(q, k=k)
self.py_kw = func_kwarg
self.jit_kw = nrtjit(func_kwarg)
def check_diag(self, pyfunc, nrtfunc, *args, **kwargs):
expected = pyfunc(*args, **kwargs)
computed = nrtfunc(*args, **kwargs)
self.assertEqual(computed.size, expected.size)
self.assertEqual(computed.dtype, expected.dtype)
# NOTE: stride not tested as np returns a RO view, nb returns new data
np.testing.assert_equal(expected, computed)
# create a diag matrix from a vector
def test_diag_vect_create(self):
for d in self.vectors:
self.check_diag(self.py, self.jit, d)
# create a diag matrix from a vector at a given offset
def test_diag_vect_create_kwarg(self):
for k in range(-10, 10):
for d in self.vectors:
self.check_diag(self.py_kw, self.jit_kw, d, k=k)
# extract the diagonal
def test_diag_extract(self):
for d in self.matricies:
self.check_diag(self.py, self.jit, d)
# extract a diagonal at a given offset
def test_diag_extract_kwarg(self):
for k in range(-4, 4):
for d in self.matricies:
self.check_diag(self.py_kw, self.jit_kw, d, k=k)
# check error handling
def test_error_handling(self):
d = np.array([[[1.]]])
cfunc = nrtjit(self.py)
# missing arg
with self.assertRaises(TypeError):
cfunc()
# > 2d
with self.assertRaises(TypingError):
cfunc(d)
with self.assertRaises(TypingError):
dfunc = nrtjit(self.py_kw)
dfunc(d, k=3)
class TestLinspace(BaseTest):
def test_linspace_2(self):
def pyfunc(n, m):
return np.linspace(n, m)
self.check_outputs(pyfunc,
[(0, 4), (1, 100), (-3.5, 2.5), (-3j, 2+3j),
(2, 1), (1+0.5j, 1.5j)], exact=False)
def test_linspace_3(self):
def pyfunc(n, m, p):
return np.linspace(n, m, p)
self.check_outputs(pyfunc,
[(0, 4, 9), (1, 4, 3), (-3.5, 2.5, 8),
(-3j, 2+3j, 7), (2, 1, 0),
(1+0.5j, 1.5j, 5), (1, 1e100, 1)],
exact=False)
def test_linspace_accuracy(self):
# Checking linspace reasonably replicates NumPy's algorithm
# see https://github.com/numba/numba/issues/6768
@nrtjit
def foo(n, m, p):
return np.linspace(n, m, p)
n, m, p = 0.0, 1.0, 100
self.assertPreciseEqual(foo(n, m, p), foo.py_func(n, m, p))
class TestNpyEmptyKeyword(TestCase):
def _test_with_dtype_kw(self, dtype):
def pyfunc(shape):
return np.empty(shape, dtype=dtype)
shapes = [1, 5, 9]
cfunc = nrtjit(pyfunc)
for s in shapes:
expected = pyfunc(s)
got = cfunc(s)
self.assertEqual(expected.dtype, got.dtype)
self.assertEqual(expected.shape, got.shape)
def test_with_dtype_kws(self):
for dtype in [np.int32, np.float32, np.complex64, np.dtype('complex64')]:
self._test_with_dtype_kw(dtype)
def _test_with_shape_and_dtype_kw(self, dtype):
def pyfunc(shape):
return np.empty(shape=shape, dtype=dtype)
shapes = [1, 5, 9]
cfunc = nrtjit(pyfunc)
for s in shapes:
expected = pyfunc(s)
got = cfunc(s)
self.assertEqual(expected.dtype, got.dtype)
self.assertEqual(expected.shape, got.shape)
def test_with_shape_and_dtype_kws(self):
for dtype in [np.int32, np.float32, np.complex64, np.dtype('complex64')]:
self._test_with_shape_and_dtype_kw(dtype)
def test_empty_no_args(self):
def pyfunc():
return np.empty()
cfunc = nrtjit(pyfunc)
# Trigger the compilation
# That will cause a TypingError due to missing shape argument
with self.assertRaises(TypingError):
cfunc()
class TestNpArray(MemoryLeakMixin, BaseTest):
def test_0d(self):
def pyfunc(arg):
return np.array(arg)
cfunc = nrtjit(pyfunc)
got = cfunc(42)
self.assertPreciseEqual(got, np.array(42, dtype=np.intp))
got = cfunc(2.5)
self.assertPreciseEqual(got, np.array(2.5))
def test_0d_with_dtype(self):
def pyfunc(arg):
return np.array(arg, dtype=np.int16)
self.check_outputs(pyfunc, [(42,), (3.5,)])
def test_1d(self):
def pyfunc(arg):
return np.array(arg)
cfunc = nrtjit(pyfunc)
# A list
got = cfunc([2, 3, 42])
self.assertPreciseEqual(got, np.intp([2, 3, 42]))
# A heterogeneous tuple
got = cfunc((1.0, 2.5j, 42))
self.assertPreciseEqual(got, np.array([1.0, 2.5j, 42]))
# An empty tuple
got = cfunc(())
self.assertPreciseEqual(got, np.float64(()))
def test_1d_with_dtype(self):
def pyfunc(arg):
return np.array(arg, dtype=np.float32)
self.check_outputs(pyfunc,
[([2, 42],),
([3.5, 1.0],),
((1, 3.5, 42),),
((),),
])
def test_1d_with_str_dtype(self):
def pyfunc(arg):
return np.array(arg, dtype='float32')
self.check_outputs(pyfunc,
[([2, 42],),
([3.5, 1.0],),
((1, 3.5, 42),),
((),),
])
def test_1d_with_non_const_str_dtype(self):
@njit
def func(arg, dt):
return np.array(arg, dtype=dt)
with self.assertRaises(TypingError) as raises:
func((5, 3), 'int32')
excstr = str(raises.exception)
self.assertIn('No match', excstr)
restr = r'\barray\(UniTuple\(int.*? x 2\), dtype=unicode_type\)\B'
regex = re.compile(restr)
self.assertRegex(excstr, regex)
def test_2d(self):
def pyfunc(arg):
return np.array(arg)
cfunc = nrtjit(pyfunc)
# A list of tuples
got = cfunc([(1, 2), (3, 4)])
self.assertPreciseEqual(got, np.intp([[1, 2], [3, 4]]))
got = cfunc([(1, 2.5), (3, 4.5)])
self.assertPreciseEqual(got, np.float64([[1, 2.5], [3, 4.5]]))
# A tuple of lists
got = cfunc(([1, 2], [3, 4]))
self.assertPreciseEqual(got, np.intp([[1, 2], [3, 4]]))
got = cfunc(([1, 2], [3.5, 4.5]))
self.assertPreciseEqual(got, np.float64([[1, 2], [3.5, 4.5]]))
# A tuple of tuples
got = cfunc(((1.5, 2), (3.5, 4.5)))
self.assertPreciseEqual(got, np.float64([[1.5, 2], [3.5, 4.5]]))
got = cfunc(((), ()))
self.assertPreciseEqual(got, np.float64(((), ())))
def test_2d_with_dtype(self):
def pyfunc(arg):
return np.array(arg, dtype=np.int32)
cfunc = nrtjit(pyfunc)
got = cfunc([(1, 2.5), (3, 4.5)])
self.assertPreciseEqual(got, np.int32([[1, 2], [3, 4]]))
def test_raises(self):
def pyfunc(arg):
return np.array(arg)
cfunc = nrtjit(pyfunc)
@contextlib.contextmanager
def check_raises(msg):
with self.assertRaises(TypingError) as raises:
yield
self.assertIn(msg, str(raises.exception))
with check_raises(('array(float64, 1d, C) not allowed in a '
'homogeneous sequence')):
cfunc(np.array([1.]))
with check_raises(('type Tuple(int64, reflected list(int64)<iv=None>) '
'does not have a regular shape')):
cfunc((np.int64(1), [np.int64(2)]))
with check_raises(
"cannot convert Tuple(int64, Record(a[type=int32;offset=0],"
"b[type=float32;offset=4];8;False)) to a homogeneous type",
):
st = np.dtype([('a', 'i4'), ('b', 'f4')])
val = np.zeros(1, dtype=st)[0]
cfunc(((1, 2), (np.int64(1), val)))
class TestNpConcatenate(MemoryLeakMixin, TestCase):
"""
Tests for np.concatenate().
"""
def _3d_arrays(self):
a = np.arange(24).reshape((4, 3, 2))
b = a + 10
c = (b + 10).copy(order='F')
d = (c + 10)[::-1]
e = (d + 10)[...,::-1]
return a, b, c, d, e
@contextlib.contextmanager
def assert_invalid_sizes_over_dim(self, axis):
with self.assertRaises(ValueError) as raises:
yield
self.assertIn("input sizes over dimension %d do not match" % axis,
str(raises.exception))
def test_3d(self):
pyfunc = np_concatenate2
cfunc = nrtjit(pyfunc)
def check(a, b, c, axis):
for ax in (axis, -3 + axis):
expected = pyfunc(a, b, c, axis=ax)
got = cfunc(a, b, c, axis=ax)
self.assertPreciseEqual(got, expected)
def check_all_axes(a, b, c):
for axis in range(3):
check(a, b, c, axis)
a, b, c, d, e = self._3d_arrays()
# Inputs with equal sizes
# C, C, C
check_all_axes(a, b, b)
# C, C, F
check_all_axes(a, b, c)
# F, F, F
check_all_axes(a.T, b.T, a.T)
# F, F, C
check_all_axes(a.T, b.T, c.T)
# F, F, A
check_all_axes(a.T, b.T, d.T)
# A, A, A
# (note Numpy may select the layout differently for other inputs)
check_all_axes(d.T, e.T, d.T)
# Inputs with compatible sizes
check(a[1:], b, c[::-1], axis=0)
check(a, b[:,1:], c, axis=1)
check(a, b, c[:,:,1:], axis=2)
# Different but compatible dtypes
check_all_axes(a, b.astype(np.float64), b)
# Exceptions leak references
self.disable_leak_check()
# Incompatible sizes
for axis in (1, 2, -2, -1):
with self.assert_invalid_sizes_over_dim(0):
cfunc(a[1:], b, b, axis)
for axis in (0, 2, -3, -1):
with self.assert_invalid_sizes_over_dim(1):
cfunc(a, b[:,1:], b, axis)
def test_3d_no_axis(self):
pyfunc = np_concatenate1
cfunc = nrtjit(pyfunc)
def check(a, b, c):
expected = pyfunc(a, b, c)
got = cfunc(a, b, c)
self.assertPreciseEqual(got, expected)
a, b, c, d, e = self._3d_arrays()
# Inputs with equal sizes
# C, C, C
check(a, b, b)
# C, C, F
check(a, b, c)
# F, F, F
check(a.T, b.T, a.T)
# F, F, C
check(a.T, b.T, c.T)
# F, F, A
check(a.T, b.T, d.T)
# A, A, A
# (note Numpy may select the layout differently for other inputs)
check(d.T, e.T, d.T)
# Inputs with compatible sizes
check(a[1:], b, c[::-1])
# Exceptions leak references
self.disable_leak_check()
# Incompatible sizes
with self.assert_invalid_sizes_over_dim(1):
cfunc(a, b[:,1:], b)
def test_typing_errors(self):
pyfunc = np_concatenate1
cfunc = nrtjit(pyfunc)
a = np.arange(15)
b = a.reshape((3, 5))
c = a.astype(np.dtype([('x', np.int8)]))
d = np.array(42)
# Different dimensionalities
with self.assertTypingError() as raises:
cfunc(a, b, b)
self.assertIn("all the input arrays must have same number of dimensions",
str(raises.exception))
# Incompatible dtypes
with self.assertTypingError() as raises:
cfunc(a, c, c)
self.assertIn("input arrays must have compatible dtypes",
str(raises.exception))
# 0-d arrays
with self.assertTypingError() as raises:
cfunc(d, d, d)
self.assertIn("zero-dimensional arrays cannot be concatenated",
str(raises.exception))
@unittest.skipUnless(hasattr(np, "stack"), "this Numpy doesn't have np.stack()")
class TestNpStack(MemoryLeakMixin, TestCase):
"""
Tests for np.stack().
"""
def _3d_arrays(self):
a = np.arange(24).reshape((4, 3, 2))
b = a + 10
c = (b + 10).copy(order='F')
d = (c + 10)[::-1]
e = (d + 10)[...,::-1]
return a, b, c, d, e
@contextlib.contextmanager
def assert_invalid_sizes(self):
with self.assertRaises(ValueError) as raises:
yield
self.assertIn("all input arrays must have the same shape",
str(raises.exception))
def check_stack(self, pyfunc, cfunc, args):
expected = pyfunc(*args)
got = cfunc(*args)
# Numba doesn't choose the same layout as Numpy.
# We would like to check the result is contiguous, but we can't
# rely on the "flags" attribute when there are 1-sized
# dimensions.
self.assertEqual(got.shape, expected.shape)
self.assertPreciseEqual(got.flatten(), expected.flatten())
def check_3d(self, pyfunc, cfunc, generate_starargs):
def check(a, b, c, args):
self.check_stack(pyfunc, cfunc, (a, b, c) + args)
def check_all_axes(a, b, c):
for args in generate_starargs():
check(a, b, c, args)
a, b, c, d, e = self._3d_arrays()
# C, C, C
check_all_axes(a, b, b)
# C, C, F
check_all_axes(a, b, c)
# F, F, F
check_all_axes(a.T, b.T, a.T)
# F, F, C
check_all_axes(a.T, b.T, c.T)
# F, F, A
check_all_axes(a.T, b.T, d.T)
# A, A, A
check_all_axes(d.T, e.T, d.T)
# Different but compatible dtypes
check_all_axes(a, b.astype(np.float64), b)
def check_runtime_errors(self, cfunc, generate_starargs):
# Exceptions leak references
self.assert_no_memory_leak()
self.disable_leak_check()
# Inputs have different shapes
a, b, c, d, e = self._3d_arrays()
with self.assert_invalid_sizes():
args = next(generate_starargs())
cfunc(a[:-1], b, c, *args)
def test_3d(self):
"""
stack(3d arrays, axis)
"""
pyfunc = np_stack2
cfunc = nrtjit(pyfunc)
def generate_starargs():
for axis in range(3):
yield (axis,)
yield (-3 + axis,)
self.check_3d(pyfunc, cfunc, generate_starargs)
self.check_runtime_errors(cfunc, generate_starargs)
def test_3d_no_axis(self):
"""
stack(3d arrays)
"""
pyfunc = np_stack1
cfunc = nrtjit(pyfunc)
def generate_starargs():
yield()
self.check_3d(pyfunc, cfunc, generate_starargs)
self.check_runtime_errors(cfunc, generate_starargs)
def test_0d(self):
"""
stack(0d arrays)
"""
pyfunc = np_stack1
cfunc = nrtjit(pyfunc)
a = np.array(42)
b = np.array(-5j)
c = np.array(True)
self.check_stack(pyfunc, cfunc, (a, b, c))
def check_xxstack(self, pyfunc, cfunc):
"""
3d and 0d tests for hstack(), vstack(), dstack().
"""
def generate_starargs():
yield()
self.check_3d(pyfunc, cfunc, generate_starargs)
# 0d
a = np.array(42)
b = np.array(-5j)
c = np.array(True)
self.check_stack(pyfunc, cfunc, (a, b, a))
def test_hstack(self):
pyfunc = np_hstack
cfunc = nrtjit(pyfunc)
self.check_xxstack(pyfunc, cfunc)
# 1d
a = np.arange(5)
b = np.arange(6) + 10
self.check_stack(pyfunc, cfunc, (a, b, b))
# 2d
a = np.arange(6).reshape((2, 3))
b = np.arange(8).reshape((2, 4)) + 100
self.check_stack(pyfunc, cfunc, (a, b, a))
def test_vstack(self):
pyfunc = np_vstack
cfunc = nrtjit(pyfunc)
self.check_xxstack(pyfunc, cfunc)
# 1d
a = np.arange(5)
b = a + 10
self.check_stack(pyfunc, cfunc, (a, b, b))
# 2d
a = np.arange(6).reshape((3, 2))
b = np.arange(8).reshape((4, 2)) + 100
self.check_stack(pyfunc, cfunc, (a, b, b))
def test_dstack(self):
pyfunc = np_dstack
cfunc = nrtjit(pyfunc)
self.check_xxstack(pyfunc, cfunc)
# 1d
a = np.arange(5)
b = a + 10
self.check_stack(pyfunc, cfunc, (a, b, b))
# 2d
a = np.arange(12).reshape((3, 4))
b = a + 100
self.check_stack(pyfunc, cfunc, (a, b, b))
def test_column_stack(self):
pyfunc = np_column_stack
cfunc = nrtjit(pyfunc)
a = np.arange(4)
b = a + 10
c = np.arange(12).reshape((4, 3))
self.check_stack(pyfunc, cfunc, (a, b, c))
# Exceptions leak references
self.assert_no_memory_leak()
self.disable_leak_check()
# Invalid dims
a = np.array(42)
with self.assertTypingError():
cfunc((a, a, a))
a = a.reshape((1, 1, 1))
with self.assertTypingError():
cfunc((a, a, a))
def benchmark_refct_speed():
def pyfunc(x, y, t):
"""Swap array x and y for t number of times
"""
for i in range(t):
x, y = y, x
return x, y
cfunc = nrtjit(pyfunc)
x = np.random.random(100)
y = np.random.random(100)
t = 10000
def bench_pyfunc():
pyfunc(x, y, t)
def bench_cfunc():
cfunc(x, y, t)
python_time = utils.benchmark(bench_pyfunc)
numba_time = utils.benchmark(bench_cfunc)
print(python_time)
print(numba_time)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "bb41571ce2657c55de1d5a404ad75653",
"timestamp": "",
"source": "github",
"line_count": 1791,
"max_line_length": 83,
"avg_line_length": 29.79452819653825,
"alnum_prop": 0.5399910048349013,
"repo_name": "stonebig/numba",
"id": "ba418f5b68f76cd9af6db4f0f152dce794da5934",
"size": "53362",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "numba/tests/test_dyn_array.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2212"
},
{
"name": "C",
"bytes": "228078"
},
{
"name": "C++",
"bytes": "18847"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "HTML",
"bytes": "98846"
},
{
"name": "PowerShell",
"bytes": "3153"
},
{
"name": "Python",
"bytes": "2965893"
},
{
"name": "Shell",
"bytes": "120"
}
],
"symlink_target": ""
} |
import sys
from typing import Any, AsyncIterable, Callable, Dict, IO, List, Optional, TypeVar, Union, cast, overload
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._invoices_operations import (
build_download_billing_subscription_invoice_request,
build_download_invoice_request,
build_download_multiple_billing_profile_invoices_request,
build_download_multiple_billing_subscription_invoices_request,
build_get_by_id_request,
build_get_by_subscription_and_invoice_id_request,
build_get_request,
build_list_by_billing_account_request,
build_list_by_billing_profile_request,
build_list_by_billing_subscription_request,
)
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class InvoicesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.billing.aio.BillingManagementClient`'s
:attr:`invoices` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_billing_account(
self, billing_account_name: str, period_start_date: str, period_end_date: str, **kwargs: Any
) -> AsyncIterable["_models.Invoice"]:
"""Lists the invoices for a billing account for a given start date and end date. The operation is
supported for billing accounts with agreement type Microsoft Partner Agreement or Microsoft
Customer Agreement.
:param billing_account_name: The ID that uniquely identifies a billing account. Required.
:type billing_account_name: str
:param period_start_date: The start date to fetch the invoices. The date should be specified in
MM-DD-YYYY format. Required.
:type period_start_date: str
:param period_end_date: The end date to fetch the invoices. The date should be specified in
MM-DD-YYYY format. Required.
:type period_end_date: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Invoice or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.billing.models.Invoice]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-05-01")) # type: Literal["2020-05-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.InvoiceListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_billing_account_request(
billing_account_name=billing_account_name,
period_start_date=period_start_date,
period_end_date=period_end_date,
api_version=api_version,
template_url=self.list_by_billing_account.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("InvoiceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_billing_account.metadata = {"url": "/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/invoices"} # type: ignore
@distributed_trace
def list_by_billing_profile(
self,
billing_account_name: str,
billing_profile_name: str,
period_start_date: str,
period_end_date: str,
**kwargs: Any
) -> AsyncIterable["_models.Invoice"]:
"""Lists the invoices for a billing profile for a given start date and end date. The operation is
supported for billing accounts with agreement type Microsoft Partner Agreement or Microsoft
Customer Agreement.
:param billing_account_name: The ID that uniquely identifies a billing account. Required.
:type billing_account_name: str
:param billing_profile_name: The ID that uniquely identifies a billing profile. Required.
:type billing_profile_name: str
:param period_start_date: The start date to fetch the invoices. The date should be specified in
MM-DD-YYYY format. Required.
:type period_start_date: str
:param period_end_date: The end date to fetch the invoices. The date should be specified in
MM-DD-YYYY format. Required.
:type period_end_date: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Invoice or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.billing.models.Invoice]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-05-01")) # type: Literal["2020-05-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.InvoiceListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_billing_profile_request(
billing_account_name=billing_account_name,
billing_profile_name=billing_profile_name,
period_start_date=period_start_date,
period_end_date=period_end_date,
api_version=api_version,
template_url=self.list_by_billing_profile.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("InvoiceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_billing_profile.metadata = {"url": "/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/billingProfiles/{billingProfileName}/invoices"} # type: ignore
@distributed_trace_async
async def get(self, billing_account_name: str, invoice_name: str, **kwargs: Any) -> _models.Invoice:
"""Gets an invoice by billing account name and ID. The operation is supported for billing accounts
with agreement type Microsoft Partner Agreement or Microsoft Customer Agreement.
:param billing_account_name: The ID that uniquely identifies a billing account. Required.
:type billing_account_name: str
:param invoice_name: The ID that uniquely identifies an invoice. Required.
:type invoice_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Invoice or the result of cls(response)
:rtype: ~azure.mgmt.billing.models.Invoice
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-05-01")) # type: Literal["2020-05-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.Invoice]
request = build_get_request(
billing_account_name=billing_account_name,
invoice_name=invoice_name,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("Invoice", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/invoices/{invoiceName}"} # type: ignore
@distributed_trace_async
async def get_by_id(self, invoice_name: str, **kwargs: Any) -> _models.Invoice:
"""Gets an invoice by ID. The operation is supported for billing accounts with agreement type
Microsoft Partner Agreement or Microsoft Customer Agreement.
:param invoice_name: The ID that uniquely identifies an invoice. Required.
:type invoice_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Invoice or the result of cls(response)
:rtype: ~azure.mgmt.billing.models.Invoice
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-05-01")) # type: Literal["2020-05-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.Invoice]
request = build_get_by_id_request(
invoice_name=invoice_name,
api_version=api_version,
template_url=self.get_by_id.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("Invoice", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_id.metadata = {"url": "/providers/Microsoft.Billing/billingAccounts/default/invoices/{invoiceName}"} # type: ignore
async def _download_invoice_initial(
self, billing_account_name: str, invoice_name: str, download_token: str, **kwargs: Any
) -> Optional[_models.DownloadUrl]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-05-01")) # type: Literal["2020-05-01"]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.DownloadUrl]]
request = build_download_invoice_request(
billing_account_name=billing_account_name,
invoice_name=invoice_name,
download_token=download_token,
api_version=api_version,
template_url=self._download_invoice_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize("DownloadUrl", pipeline_response)
if response.status_code == 202:
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("str", response.headers.get("Retry-After"))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_download_invoice_initial.metadata = {"url": "/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/invoices/{invoiceName}/download"} # type: ignore
@distributed_trace_async
async def begin_download_invoice(
self, billing_account_name: str, invoice_name: str, download_token: str, **kwargs: Any
) -> AsyncLROPoller[_models.DownloadUrl]:
"""Gets a URL to download an invoice. The operation is supported for billing accounts with
agreement type Microsoft Partner Agreement or Microsoft Customer Agreement.
:param billing_account_name: The ID that uniquely identifies a billing account. Required.
:type billing_account_name: str
:param invoice_name: The ID that uniquely identifies an invoice. Required.
:type invoice_name: str
:param download_token: Download token with document source and document ID. Required.
:type download_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DownloadUrl or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.billing.models.DownloadUrl]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-05-01")) # type: Literal["2020-05-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.DownloadUrl]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._download_invoice_initial( # type: ignore
billing_account_name=billing_account_name,
invoice_name=invoice_name,
download_token=download_token,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("DownloadUrl", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(
AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_download_invoice.metadata = {"url": "/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/invoices/{invoiceName}/download"} # type: ignore
async def _download_multiple_billing_profile_invoices_initial(
self, billing_account_name: str, download_urls: Union[List[str], IO], **kwargs: Any
) -> Optional[_models.DownloadUrl]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-05-01")) # type: Literal["2020-05-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.DownloadUrl]]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(download_urls, (IO, bytes)):
_content = download_urls
else:
_json = self._serialize.body(download_urls, "[str]")
request = build_download_multiple_billing_profile_invoices_request(
billing_account_name=billing_account_name,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._download_multiple_billing_profile_invoices_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize("DownloadUrl", pipeline_response)
if response.status_code == 202:
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("str", response.headers.get("Retry-After"))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_download_multiple_billing_profile_invoices_initial.metadata = {"url": "/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/downloadDocuments"} # type: ignore
@overload
async def begin_download_multiple_billing_profile_invoices(
self,
billing_account_name: str,
download_urls: List[str],
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.DownloadUrl]:
"""Gets a URL to download multiple invoice documents (invoice pdf, tax receipts, credit notes) as
a zip file. The operation is supported for billing accounts with agreement type Microsoft
Partner Agreement or Microsoft Customer Agreement.
:param billing_account_name: The ID that uniquely identifies a billing account. Required.
:type billing_account_name: str
:param download_urls: An array of download urls for individual documents. Required.
:type download_urls: list[str]
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DownloadUrl or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.billing.models.DownloadUrl]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_download_multiple_billing_profile_invoices(
self, billing_account_name: str, download_urls: IO, *, content_type: str = "application/json", **kwargs: Any
) -> AsyncLROPoller[_models.DownloadUrl]:
"""Gets a URL to download multiple invoice documents (invoice pdf, tax receipts, credit notes) as
a zip file. The operation is supported for billing accounts with agreement type Microsoft
Partner Agreement or Microsoft Customer Agreement.
:param billing_account_name: The ID that uniquely identifies a billing account. Required.
:type billing_account_name: str
:param download_urls: An array of download urls for individual documents. Required.
:type download_urls: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DownloadUrl or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.billing.models.DownloadUrl]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_download_multiple_billing_profile_invoices(
self, billing_account_name: str, download_urls: Union[List[str], IO], **kwargs: Any
) -> AsyncLROPoller[_models.DownloadUrl]:
"""Gets a URL to download multiple invoice documents (invoice pdf, tax receipts, credit notes) as
a zip file. The operation is supported for billing accounts with agreement type Microsoft
Partner Agreement or Microsoft Customer Agreement.
:param billing_account_name: The ID that uniquely identifies a billing account. Required.
:type billing_account_name: str
:param download_urls: An array of download urls for individual documents. Is either a list type
or a IO type. Required.
:type download_urls: list[str] or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DownloadUrl or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.billing.models.DownloadUrl]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-05-01")) # type: Literal["2020-05-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.DownloadUrl]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._download_multiple_billing_profile_invoices_initial( # type: ignore
billing_account_name=billing_account_name,
download_urls=download_urls,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("DownloadUrl", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(
AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_download_multiple_billing_profile_invoices.metadata = {"url": "/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/downloadDocuments"} # type: ignore
@distributed_trace
def list_by_billing_subscription(
self, period_start_date: str, period_end_date: str, **kwargs: Any
) -> AsyncIterable["_models.Invoice"]:
"""Lists the invoices for a subscription.
:param period_start_date: Invoice period start date. Required.
:type period_start_date: str
:param period_end_date: Invoice period end date. Required.
:type period_end_date: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Invoice or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.billing.models.Invoice]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-05-01")) # type: Literal["2020-05-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.InvoiceListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_billing_subscription_request(
subscription_id=self._config.subscription_id,
period_start_date=period_start_date,
period_end_date=period_end_date,
api_version=api_version,
template_url=self.list_by_billing_subscription.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("InvoiceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_billing_subscription.metadata = {"url": "/providers/Microsoft.Billing/billingAccounts/default/billingSubscriptions/{subscriptionId}/invoices"} # type: ignore
@distributed_trace_async
async def get_by_subscription_and_invoice_id(self, invoice_name: str, **kwargs: Any) -> _models.Invoice:
"""Gets an invoice by subscription ID and invoice ID.
:param invoice_name: The ID that uniquely identifies an invoice. Required.
:type invoice_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Invoice or the result of cls(response)
:rtype: ~azure.mgmt.billing.models.Invoice
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-05-01")) # type: Literal["2020-05-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.Invoice]
request = build_get_by_subscription_and_invoice_id_request(
invoice_name=invoice_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_by_subscription_and_invoice_id.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("Invoice", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_subscription_and_invoice_id.metadata = {"url": "/providers/Microsoft.Billing/billingAccounts/default/billingSubscriptions/{subscriptionId}/invoices/{invoiceName}"} # type: ignore
async def _download_billing_subscription_invoice_initial(
self, invoice_name: str, download_token: str, **kwargs: Any
) -> Optional[_models.DownloadUrl]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-05-01")) # type: Literal["2020-05-01"]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.DownloadUrl]]
request = build_download_billing_subscription_invoice_request(
invoice_name=invoice_name,
subscription_id=self._config.subscription_id,
download_token=download_token,
api_version=api_version,
template_url=self._download_billing_subscription_invoice_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize("DownloadUrl", pipeline_response)
if response.status_code == 202:
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("str", response.headers.get("Retry-After"))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_download_billing_subscription_invoice_initial.metadata = {"url": "/providers/Microsoft.Billing/billingAccounts/default/billingSubscriptions/{subscriptionId}/invoices/{invoiceName}/download"} # type: ignore
@distributed_trace_async
async def begin_download_billing_subscription_invoice(
self, invoice_name: str, download_token: str, **kwargs: Any
) -> AsyncLROPoller[_models.DownloadUrl]:
"""Gets a URL to download an invoice.
:param invoice_name: The ID that uniquely identifies an invoice. Required.
:type invoice_name: str
:param download_token: Download token with document source and document ID. Required.
:type download_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DownloadUrl or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.billing.models.DownloadUrl]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-05-01")) # type: Literal["2020-05-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.DownloadUrl]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._download_billing_subscription_invoice_initial( # type: ignore
invoice_name=invoice_name,
download_token=download_token,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("DownloadUrl", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(
AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_download_billing_subscription_invoice.metadata = {"url": "/providers/Microsoft.Billing/billingAccounts/default/billingSubscriptions/{subscriptionId}/invoices/{invoiceName}/download"} # type: ignore
async def _download_multiple_billing_subscription_invoices_initial(
self, download_urls: Union[List[str], IO], **kwargs: Any
) -> Optional[_models.DownloadUrl]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-05-01")) # type: Literal["2020-05-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.DownloadUrl]]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(download_urls, (IO, bytes)):
_content = download_urls
else:
_json = self._serialize.body(download_urls, "[str]")
request = build_download_multiple_billing_subscription_invoices_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._download_multiple_billing_subscription_invoices_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize("DownloadUrl", pipeline_response)
if response.status_code == 202:
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("str", response.headers.get("Retry-After"))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_download_multiple_billing_subscription_invoices_initial.metadata = {"url": "/providers/Microsoft.Billing/billingAccounts/default/billingSubscriptions/{subscriptionId}/downloadDocuments"} # type: ignore
@overload
async def begin_download_multiple_billing_subscription_invoices(
self, download_urls: List[str], *, content_type: str = "application/json", **kwargs: Any
) -> AsyncLROPoller[_models.DownloadUrl]:
"""Gets a URL to download multiple invoice documents (invoice pdf, tax receipts, credit notes) as
a zip file.
:param download_urls: An array of download urls for individual documents. Required.
:type download_urls: list[str]
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DownloadUrl or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.billing.models.DownloadUrl]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_download_multiple_billing_subscription_invoices(
self, download_urls: IO, *, content_type: str = "application/json", **kwargs: Any
) -> AsyncLROPoller[_models.DownloadUrl]:
"""Gets a URL to download multiple invoice documents (invoice pdf, tax receipts, credit notes) as
a zip file.
:param download_urls: An array of download urls for individual documents. Required.
:type download_urls: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DownloadUrl or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.billing.models.DownloadUrl]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_download_multiple_billing_subscription_invoices(
self, download_urls: Union[List[str], IO], **kwargs: Any
) -> AsyncLROPoller[_models.DownloadUrl]:
"""Gets a URL to download multiple invoice documents (invoice pdf, tax receipts, credit notes) as
a zip file.
:param download_urls: An array of download urls for individual documents. Is either a list type
or a IO type. Required.
:type download_urls: list[str] or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DownloadUrl or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.billing.models.DownloadUrl]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-05-01")) # type: Literal["2020-05-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.DownloadUrl]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._download_multiple_billing_subscription_invoices_initial( # type: ignore
download_urls=download_urls,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("DownloadUrl", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(
AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_download_multiple_billing_subscription_invoices.metadata = {"url": "/providers/Microsoft.Billing/billingAccounts/default/billingSubscriptions/{subscriptionId}/downloadDocuments"} # type: ignore
| {
"content_hash": "96e3d9bed67a238331276837a110e590",
"timestamp": "",
"source": "github",
"line_count": 1132,
"max_line_length": 211,
"avg_line_length": 49.88427561837456,
"alnum_prop": 0.6495599355398537,
"repo_name": "Azure/azure-sdk-for-python",
"id": "5d47aba1dbc2fffb46b2d96a8c910353813e56e5",
"size": "56969",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/billing/azure-mgmt-billing/azure/mgmt/billing/aio/operations/_invoices_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import sys
import os
import numpy as np
from pprint import pprint
from datetime import datetime
from datetime import timedelta
import mysql.connector
import math
import calendar
SP2_ID = 44
start = datetime(2013,7,1)
end = datetime(2013,8,1)
timestep = 1 #hours
UNIX_start = calendar.timegm(start.utctimetuple())
UNIX_end = calendar.timegm(end.utctimetuple())
#database connection
cnx = mysql.connector.connect(user='root', password='Suresh15', host='localhost', database='black_carbon')
cursor = cnx.cursor()
cursor.execute('SELECT id, UNIX_UTC_ts FROM alert_hk_data WHERE UNIX_UTC_ts >= %s and UNIX_UTC_ts < %s',(UNIX_start,UNIX_end))
data = cursor.fetchall()
print 'starting loop'
LOG_EVERY_N = 10
i=0
for data_point in data:
id = data_point[0]
start_time = data_point[1]
end_time = start_time + 60
cursor.execute(('UPDATE alert_mass_number_data_2013 SET HK_id = %s WHERE UNIX_UTC_ts_int_end >= %s AND UNIX_UTC_ts_int_end < %s'),(id,start_time,end_time))
cnx.commit()
i+=1
if (i % LOG_EVERY_N) == 0:
print 'record: ', i
cnx.close()
| {
"content_hash": "439cba287e148f2163d5b72a162eaed8",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 157,
"avg_line_length": 25.829268292682926,
"alnum_prop": 0.7110481586402266,
"repo_name": "annahs/atmos_research",
"id": "0ccce435dd05437a9a44a9e4b55faced52be8313",
"size": "1059",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AL_add_HK_keys_to_table.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1677056"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/component/droid/shared_medic_module_4.iff"
result.attribute_template_id = -1
result.stfName("craft_droid_ingredients_n","medic_module_4")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "283e66ae6c47f550313d8259c156b15f",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 78,
"avg_line_length": 25.307692307692307,
"alnum_prop": 0.7051671732522796,
"repo_name": "obi-two/Rebelion",
"id": "44a8064879ac8895f31585f39809717e32822616",
"size": "474",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/component/droid/shared_medic_module_4.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import argparse
import os
from pyulog import *
from analyse_logdata_ekf import *
"""
Performs a health assessment on the ecl EKF navigation estimator data contained in a an ULog file
Outputs a health assessment summary in a csv file named <inputfilename>.mdat.csv
Outputs summary plots in a pdf file named <inputfilename>.pdf
"""
parser = argparse.ArgumentParser(description='Analyse the estimator_status and ekf2_innovation message data')
parser.add_argument('filename', metavar='file.ulg', help='ULog input file')
parser.add_argument('--no-plots', action='store_true',
help='Whether to only analyse and not plot the summaries for developers.')
parser.add_argument('--check-level-thresholds', type=str, default=None,
help='The csv file of fail and warning test thresholds for analysis.')
def is_valid_directory(parser, arg):
if os.path.isdir(arg):
# Directory exists so return the directory
return arg
else:
parser.error('The directory {} does not exist'.format(arg))
args = parser.parse_args()
## load the log and extract the necessary data for the analyses
ulog = ULog(args.filename, None)
data = ulog.data_list
# extract data from innovations and status messages
for d in data:
if d.name == 'estimator_status':
estimator_status_data = d.data
print('found estimator_status data')
for d in data:
if d.name == 'ekf2_innovations':
ekf2_innovations_data = d.data
print('found ekf2_innovation data')
# extract data from sensor preflight check message
sensor_preflight = {}
for d in data:
if d.name == 'sensor_preflight':
sensor_preflight_data = d.data
print('found sensor_preflight data')
if args.check_level_thresholds:
check_level_dict_filename = args.check_level_thresholds
else:
file_dir = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
check_level_dict_filename = os.path.join(file_dir, "check_level_dict.csv")
# get the dictionary of fail and warning test thresholds from a csv file
with open(check_level_dict_filename, 'r') as file:
check_levels = {}
for line in file:
x = line.split(",")
a = x[0]
b = x[1]
check_levels[a] = float(b)
print('Using test criteria loaded from {:s}'.format(check_level_dict_filename))
# perform the ekf analysis
test_results = analyse_ekf(
estimator_status_data, ekf2_innovations_data, sensor_preflight_data,
check_levels, plot=not args.no_plots, output_plot_filename=args.filename + ".pdf")
# print master test status to console
if (test_results['master_status'][0] == 'Pass'):
print('No anomalies detected')
elif (test_results['master_status'][0] == 'Warning'):
print('Minor anomalies detected')
elif (test_results['master_status'][0] == 'Fail'):
print('Major anomalies detected')
# write metadata to a .csv file
with open(args.filename + ".mdat.csv", "w") as file:
file.write("name,value,description\n")
# loop through the test results dictionary and write each entry on a separate row, with data comma separated
# save data in alphabetical order
key_list = list(test_results.keys())
key_list.sort()
for key in key_list:
file.write(key+","+str(test_results[key][0])+","+test_results[key][1]+"\n")
print('Test results written to {:s}.mdat.csv'.format(args.filename))
if not args.no_plots:
print('Plots saved to {:s}.pdf'.format(args.filename))
| {
"content_hash": "95b84942a859f072fd271a874f703dfd",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 112,
"avg_line_length": 35.38383838383838,
"alnum_prop": 0.6908364259206394,
"repo_name": "mcgill-robotics/Firmware",
"id": "eec2abdac5f308a33f2713e276e6ae962783d277",
"size": "3527",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Tools/ecl_ekf/process_logdata_ekf.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3646370"
},
{
"name": "C++",
"bytes": "7297699"
},
{
"name": "CMake",
"bytes": "750867"
},
{
"name": "GDB",
"bytes": "424"
},
{
"name": "HTML",
"bytes": "2282"
},
{
"name": "Io",
"bytes": "191"
},
{
"name": "MATLAB",
"bytes": "35618"
},
{
"name": "Makefile",
"bytes": "15544"
},
{
"name": "NSIS",
"bytes": "26894"
},
{
"name": "Perl",
"bytes": "11401"
},
{
"name": "Python",
"bytes": "900464"
},
{
"name": "Shell",
"bytes": "94066"
},
{
"name": "XSLT",
"bytes": "6736"
}
],
"symlink_target": ""
} |
import matplotlib
#matplotlib.use('Agg') #used to keep Amazon happy
from os import listdir
from utils import *
import numpy as np
import sys
import matplotlib.pyplot as plt
import plotter_new as pn
def get_report(d):
"""Given a building record d, return a Building Report.
A Building Report is a dictionary mapping names of features to values.
TODO: Say what each feature is.
"""
toR = {}
kwhs, kwhs_oriflag = d["kwhs"]
temps, temps_oriflag = d["temps"]
times = d["times"]
toR["naics"] = d["naics"] #Just to see what happens here...
#General stats
toR["avg"] = np.average(kwhs[kwhs_oriflag])
toR["max"] = np.max(kwhs[kwhs_oriflag])
toR["min"] = np.min(kwhs[kwhs_oriflag])
toR["var"] = np.var(kwhs[kwhs_oriflag])
toR["med"] = np.median(kwhs[kwhs_oriflag])
toR["total"] = np.sum(kwhs[kwhs_oriflag])
#Difference between weekday and weekend
is_midnight = (lambda x: x.hour == 0)
skip_weekdays = (lambda x: x.weekday() < 5)
weekends, _ = pn.get_periods(d, 24, is_midnight, "kwhs", skip_weekdays)
skip_weekend = (lambda x: x.weekday() >= 5)
weekdays, _ = pn.get_periods(d, 24, is_midnight, "kwhs", skip_weekend)
weekday_peaks = np.ma.max(weekdays, axis = 1)
weekend_peaks = np.ma.max(weekends, axis = 1)
avg_weekday_peak = np.ma.average(weekday_peaks)
avg_weekend_peak = np.ma.average(weekend_peaks)
toR["week_day_vs_end_peaks"] = avg_weekday_peak - avg_weekend_peak
#avg hour of daily peak:
days, new_times = pn.get_periods(d, 24, is_midnight)
peak_hours = np.ma.argmax(days, axis = 1)
toR["avg_tod_peak"] = np.ma.average(peak_hours)
#TODO: Separate into weekend/day
#avg distance (in hours) to temp peak
temps, new_times = pn.get_periods(d, 24, is_midnight, which = "temps")
peak_temps = np.ma.argmax(temps, axis = 1)
dists = np.ma.abs(peak_temps - peak_hours)
toR["avg_temp_to_kwhs_peaks"] = np.ma.average(dists)
#TODO: Add avg distance (in hours) of daily peak to natural noon
#Phantom load approximation
is_midnight = (lambda x: x.hour == 0)
skip_weekdays = (lambda x: x.weekday() < 5)
weekends, new_times = pn.get_periods(d, 24, is_midnight, skip_fun = skip_weekdays)
is_midnight = (lambda x: x.hour == 0)
skip_weekends = (lambda x: x.weekday() >= 5)
weekdays, new_times = pn.get_periods(d, 24, is_midnight, skip_fun = skip_weekends)
toR["avg_weekday_min"] = np.ma.average(np.ma.min(weekdays, axis = 0))
toR["avg_weekend_min"] = np.ma.average(np.ma.min(weekends, axis = 0))
#Distance correlation between temps and kwhs (agg days)
#Note that we use imputed temps, but only original kwhs
is_midnight = (lambda x: x.hour == 0)
days, new_times = pn.get_periods(d, 24, is_midnight)
day_totals = np.ma.sum(days, axis = 1)
temps, new_times = pn.get_periods(d, 24, is_midnight, which = "temps")
temp_avgs = np.ma.average(temps, axis = 1)
toR["dCorr_kwhs_temps"] = dCorr(day_totals, temp_avgs)
#Stats regarding first derivative
first_deriv = kwhs[1:] - kwhs[:-1]
first_deriv_oriflag = np.logical_and(kwhs_oriflag[1:], kwhs_oriflag[:-1])
oris = first_deriv[first_deriv_oriflag]
increases = oris[oris > 0]
decreases = oris[oris < 0]
toR["avg_increase"] = np.average(increases)
toR["avg_decrease"] = np.average(decreases)
toR["var_change"] = np.var(oris)
#Stats regarding DFT
a = np.fft.fft(kwhs)
num_times = len(kwhs)
half = (num_times + 1) // 2
a = a[0:half +1]
a[0] = 0 #drop constant part of signal
power = a**2 #definition of power
power = np.absolute(power) #to make everything real
total_power = np.sum(power)
toR["spectral_power"] = np.real(total_power)
highlighted_periods = np.array([24, 168])
highlighted_freqs = float(num_times) / highlighted_periods
for p, f in zip(highlighted_periods, highlighted_freqs):
mykey = "prop_of_" + str(p)
toR[mykey] = np.absolute((a[f] ** 2)) / total_power
#Missing values:
toR["num_missing"] = len([x for x in kwhs_oriflag if not x])
#Relating to boxplots
hr_start = 8
hr_stop = 16
in_schedule = (lambda x: hr_start <= x.hour <= hr_stop)
out_schedule = (lambda x: x.hour < hr_start or x.hour > hr_stop)
is_weekday = (lambda x: x.weekday() < 5)
is_weekend = (lambda x: x.weekday() >= 5)
out_flag = [out_schedule(t) for t in times]
in_flag = [in_schedule(t) for t in times]
weekday_flag = [is_weekday(t) for t in times]
weekend_flag = [is_weekend(t) for t in times]
weekday_kwhs_in = kwhs[np.logical_and(kwhs_oriflag, np.logical_and(in_flag, weekday_flag))]
weekday_kwhs_out = kwhs[np.logical_and(kwhs_oriflag, np.logical_and(out_flag, weekday_flag))]
weekend_kwhs_in = kwhs[np.logical_and(kwhs_oriflag, np.logical_and(in_flag, weekend_flag))]
weekend_kwhs_out = kwhs[np.logical_and(kwhs_oriflag, np.logical_and(out_flag, weekend_flag))]
#width of 50percent block
weekday_working_75 = np.percentile(weekday_kwhs_in, 75)
weekday_working_25 = np.percentile(weekday_kwhs_in, 25)
toR["weekday_working_width"] = weekday_working_75 - weekday_working_25
weekday_nonworking_75 = np.percentile(weekday_kwhs_out, 75)
weekday_nonworking_25 = np.percentile(weekday_kwhs_out, 25)
toR["weekday_nonworking_width"] = weekday_working_75 - weekday_working_25
weekend_working_75 = np.percentile(weekend_kwhs_in, 75)
weekend_working_25 = np.percentile(weekend_kwhs_in, 25)
toR["weekend_working_width"] = weekend_working_75 - weekend_working_25
weekend_nonworking_75 = np.percentile(weekend_kwhs_out, 75)
weekend_nonworking_25 = np.percentile(weekend_kwhs_out, 25)
toR["weekend_nonworking_width"] = weekend_working_75 - weekend_working_25
#now comparing medians
weekday_working_med = np.median(weekday_kwhs_in)
weekday_nonworking_med = np.median(weekday_kwhs_out)
weekend_working_med = np.median(weekend_kwhs_in)
weekend_nonworking_med = np.median(weekend_kwhs_out)
toR["weekday_med_dif"] = weekday_working_med - weekday_nonworking_med
toR["weekend_med_dif"] = weekend_working_med - weekend_nonworking_med
toR["day_vs_end_working"] = weekday_working_med - weekend_working_med
toR["day_vs_end_nonworking"] = weekday_nonworking_med - weekend_nonworking_med
return toR
def agg_reports(list_of_brecs):
"""Given a list of building records, return an aggregate report.
In an aggregate report, each key in a Building Report is mapped to a list of values (one per building).
"""
toR = {}
naics_codes = []
errs = open("errs.txt", "w")
for d in list_of_brecs:
try:
r = get_report(d)
for k in r.keys():
if k in toR:
toR[k].append(r[k])
else:
toR[k] = [r[k]]
#naics_codes.append(d["naics"])
naics_codes.append(d["btype"])
print d["btype"]
except Exception as inst:
print "Failed", d["bid"]
print type(inst) # the exception instance
print inst.args # arguments stored in .args
print inst # __str__ allows args to printed directly
errs.write(str(d["bid"]) + "\n")
sys.stdout.flush()
return toR, naics_codes
def plot_agg_reports(agg, add_str = ""):
for k in agg.keys():
fig = plt.figure(figsize = (5, 5))
ax = fig.add_subplot(1, 1, 1)
num_bins = 100# int(np.log2(len(agg.keys())) + 1)
ax.hist(agg[k], bins = num_bins)
ax.set_title(k)
plt.savefig(fig_loc + "agg_reports_" + k + add_str + ".png")
plt.close()
def process_prison_quarters():
data, desc = qload("state_b_records_" + str(the_year) + "_quarters.pkl")
toSave = []
for d in data:
print d["bid"]
sys.stdout.flush()
toSave.append((d["bid"], get_report(d)))
qdump((toSave, "The Building Reports for the prison facilities, by quarter"), "prison_reps.pkl")
if __name__ == "__main__":
process_prison_quarters(); exit()
finns = [x for x in listdir(data_loc) if "_updated.pkl" in x and "oneyear" in x]
ds = []
add_str = "_btype"
for finn in finns:
d, desc = qload(finn)
ds.append(d)
agg, naicss = agg_reports(ds)
qdump((agg, "The aggregate reports"), "agg_reps.pkl")
#naicss = [d["naics"] for d in ds]
qdump((naicss, "The NAICS codes in the same order as the agg report"), "naics_codes" + add_str + ".pkl")
#plt_agg_reports(agg)
exit()
for naics in naicss:
new_ds = [d for d in ds if d["naics"] == naics]
agg = agg_reports(new_ds)
plot_agg_reports(agg, add_str = "_" + str(naics) + "with_" + str(len(new_ds)))
#agg = agg_reports(ds)
#plot_agg_reports(agg)
| {
"content_hash": "03d04b320aa752c7b92316875db314df",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 110,
"avg_line_length": 39.13559322033898,
"alnum_prop": 0.6014508445214378,
"repo_name": "anselmbradford/energywise",
"id": "852619a09c5ec45a7cd092a37a5460641120ce3d",
"size": "9236",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Code/report_card.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "73702"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'EnergyGoalSetting.power_meter_interval'
db.add_column('resource_goal_energygoalsetting', 'power_meter_interval', self.gf('django.db.models.fields.IntegerField')(default=10), keep_default=False)
def backwards(self, orm):
# Deleting field 'EnergyGoalSetting.power_meter_interval'
db.delete_column('resource_goal_energygoalsetting', 'power_meter_interval')
models = {
'resource_goal.energybaselinedaily': {
'Meta': {'ordering': "('team', 'day')", 'unique_together': "(('team', 'day'),)", 'object_name': 'EnergyBaselineDaily'},
'day': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['team_mgr.Team']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'usage': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'resource_goal.energybaselinehourly': {
'Meta': {'ordering': "('team', 'day', 'hour')", 'unique_together': "(('team', 'day', 'hour'),)", 'object_name': 'EnergyBaselineHourly'},
'day': ('django.db.models.fields.IntegerField', [], {}),
'hour': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['team_mgr.Team']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'usage': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'resource_goal.energygoal': {
'Meta': {'ordering': "('team', 'date')", 'unique_together': "(('team', 'date'),)", 'object_name': 'EnergyGoal'},
'date': ('django.db.models.fields.DateField', [], {}),
'goal_status': ('django.db.models.fields.CharField', [], {'default': "'Not available'", 'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['team_mgr.Team']"})
},
'resource_goal.energygoalsetting': {
'Meta': {'ordering': "('team',)", 'unique_together': "(('team',),)", 'object_name': 'EnergyGoalSetting'},
'goal_percent_reduction': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'goal_points': ('django.db.models.fields.IntegerField', [], {'default': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manual_entry': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'manual_entry_time': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'power_meter_interval': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['team_mgr.Team']"}),
'warning_percent_reduction': ('django.db.models.fields.IntegerField', [], {'default': '3'})
},
'resource_goal.waterbaselinedaily': {
'Meta': {'ordering': "('team', 'day')", 'unique_together': "(('team', 'day'),)", 'object_name': 'WaterBaselineDaily'},
'day': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['team_mgr.Team']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'usage': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'resource_goal.waterbaselinehourly': {
'Meta': {'ordering': "('team', 'day', 'hour')", 'unique_together': "(('team', 'day', 'hour'),)", 'object_name': 'WaterBaselineHourly'},
'day': ('django.db.models.fields.IntegerField', [], {}),
'hour': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['team_mgr.Team']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'usage': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'resource_goal.watergoal': {
'Meta': {'ordering': "('team', 'date')", 'unique_together': "(('team', 'date'),)", 'object_name': 'WaterGoal'},
'date': ('django.db.models.fields.DateField', [], {}),
'goal_status': ('django.db.models.fields.CharField', [], {'default': "'Not available'", 'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['team_mgr.Team']"})
},
'resource_goal.watergoalsetting': {
'Meta': {'ordering': "('team',)", 'unique_together': "(('team',),)", 'object_name': 'WaterGoalSetting'},
'goal_percent_reduction': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'goal_points': ('django.db.models.fields.IntegerField', [], {'default': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manual_entry': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'manual_entry_time': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['team_mgr.Team']"}),
'warning_percent_reduction': ('django.db.models.fields.IntegerField', [], {'default': '3'})
},
'team_mgr.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'db_index': 'True'})
},
'team_mgr.team': {
'Meta': {'object_name': 'Team'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['team_mgr.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'db_index': 'True'})
}
}
complete_apps = ['resource_goal']
| {
"content_hash": "018d86491224cc0e5279fa4f415b19a3",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 161,
"avg_line_length": 69.17924528301887,
"alnum_prop": 0.5472521478249012,
"repo_name": "vijayanandau/KnowledgeShare",
"id": "4ec578e45aac177b633af10a40f0c5c2a5798da4",
"size": "7351",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "makahiki/apps/widgets/resource_goal/migrations/0002_auto__add_field_energygoalsetting_power_meter_interval.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "620743"
},
{
"name": "Python",
"bytes": "1608424"
},
{
"name": "Shell",
"bytes": "5905"
}
],
"symlink_target": ""
} |
"""The foscam component."""
from libpyfoscam import FoscamCamera
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
Platform,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity_registry import async_migrate_entries
from .config_flow import DEFAULT_RTSP_PORT
from .const import CONF_RTSP_PORT, DOMAIN, LOGGER, SERVICE_PTZ, SERVICE_PTZ_PRESET
PLATFORMS = [Platform.CAMERA]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up foscam from a config entry."""
await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS)
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = entry.data
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
if not hass.data[DOMAIN]:
hass.services.async_remove(domain=DOMAIN, service=SERVICE_PTZ)
hass.services.async_remove(domain=DOMAIN, service=SERVICE_PTZ_PRESET)
return unload_ok
async def async_migrate_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Migrate old entry."""
LOGGER.debug("Migrating from version %s", entry.version)
if entry.version == 1:
# Change unique id
@callback
def update_unique_id(entry):
return {"new_unique_id": entry.entry_id}
await async_migrate_entries(hass, entry.entry_id, update_unique_id)
entry.unique_id = None
# Get RTSP port from the camera or use the fallback one and store it in data
camera = FoscamCamera(
entry.data[CONF_HOST],
entry.data[CONF_PORT],
entry.data[CONF_USERNAME],
entry.data[CONF_PASSWORD],
verbose=False,
)
ret, response = await hass.async_add_executor_job(camera.get_port_info)
rtsp_port = DEFAULT_RTSP_PORT
if ret != 0:
rtsp_port = response.get("rtspPort") or response.get("mediaPort")
hass.config_entries.async_update_entry(
entry, data={**entry.data, CONF_RTSP_PORT: rtsp_port}
)
# Change entry version
entry.version = 2
LOGGER.info("Migration to version %s successful", entry.version)
return True
| {
"content_hash": "c41cd281474abb4c1bf2122acf14ada2",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 84,
"avg_line_length": 30.337349397590362,
"alnum_prop": 0.6656076250992852,
"repo_name": "nkgilley/home-assistant",
"id": "ef88d0f671ac8d5915007cc0131d76fbfcf4de53",
"size": "2518",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/foscam/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "51597279"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
import sys
from vacs.models import Command, Experiment, Vac, Evaluation,\
Assignment, Participant, Score, ValAssignment, Validation
from django.contrib.auth import get_user_model
import csv
import numpy as np
# Get all the Scores for the experiment
experiment_id = 77
scores = Score.objects.filter(experiment__id=77)
vacs = Vac.objects.filter(experiment__id=77)
commands = Command.objects.all()
lexicon_index=8
l7_scores = scores.filter(lexicon_number=lexicon_index)
write_data = [["Command", "Score"]]
with open('gestureclean/analytics/best_lexicon.csv', 'w') as filewriter:
writer = csv.writer(filewriter)
counter = 0.0
over_threshold = 0.0
for command in commands:
command_scores = l7_scores.filter(command=command)
score_mean = round(np.mean([1-s.score if (s.vac.name == "Complexity" or s.vac.name == "Amount of movement") else s.score
for s in command_scores]),2)
if score_mean > 0.5:
over_threshold +=1
write_data.append([command.name,score_mean])
counter += 1
write_data.append(["Commands over threshold", str((over_threshold/counter)*100)+"%"])
writer.writerows(write_data)
fid = fopen('myfile.txt','w');
for i = 1:length(fileList)
fprintf(fid,'%s\n',fileList{i});
end
fclose(fid);
| {
"content_hash": "523d1661357f44b4c1715657ee5148e7",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 129,
"avg_line_length": 35.916666666666664,
"alnum_prop": 0.6860015467904099,
"repo_name": "glebysg/GC_server",
"id": "779de11bbb843599c2838d082e538d6c7481f83b",
"size": "1293",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gestureclean/score_statistics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "54726"
},
{
"name": "HTML",
"bytes": "37367"
},
{
"name": "JavaScript",
"bytes": "199355"
},
{
"name": "MATLAB",
"bytes": "1484"
},
{
"name": "Python",
"bytes": "125295"
}
],
"symlink_target": ""
} |
from data_collection.management.commands import BaseXpressWebLookupCsvImporter
class Command(BaseXpressWebLookupCsvImporter):
council_id = 'E07000128'
addresses_name = 'WyrePropertyPostCodePollingStationWebLookup-2017-03-08 2.CSV'
stations_name = 'WyrePropertyPostCodePollingStationWebLookup-2017-03-08 2.CSV'
elections = [
'local.lancashire.2017-05-04',
'parl.2017-06-08'
]
| {
"content_hash": "0816ba617eaa6e16c01d737f5df2ab36",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 84,
"avg_line_length": 42.6,
"alnum_prop": 0.7300469483568075,
"repo_name": "chris48s/UK-Polling-Stations",
"id": "6c2398b419b3abfbb24e1a92bb037e19179bdafe",
"size": "426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polling_stations/apps/data_collection/management/commands/import_wyre.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "347"
},
{
"name": "Gherkin",
"bytes": "3720"
},
{
"name": "HTML",
"bytes": "30715"
},
{
"name": "JavaScript",
"bytes": "3226"
},
{
"name": "Python",
"bytes": "589520"
}
],
"symlink_target": ""
} |
import sys, os
import ldac
import astropy, astropy.io.fits as pyfits
from adam_quicktools_ArgCleaner import ArgCleaner
#flinput=ArgCleaner()[0]
#flnew=flinput.replace("unstacked","unstacked.split_apers")
#flproto=flinput.replace("unstacked","unstacked.proto-tmp")
def main(flinput,flnew):
cat=ldac.openObjectFile(flinput)
flproto=flinput.replace(".cat",".proto-tmp.cat")
aper_keys=[]
aper1_keys=[]
for key in cat.keys():
if "APER-" in key:
aper_keys.append(key)
keynew=key.replace("APER-","APER1-")
aper1_keys.append(keynew)
#catnew=ldac.openObjectFile(flnew)
ncs=[]
for key in aper_keys:
keynew=key.replace("APER-","APER1-")
col=cat[key]
if not col.shape[-1]==2:
raise Exception("this column doesn't seem to need to be split (shape is "+str(col.shape)+"), but it has APER- in the name. Thats weird and contradictory")
ncs.append(pyfits.Column(name=keynew,format='1E',array=col[:,1]))
hdu = pyfits.PrimaryHDU()
hduSTDTAB = pyfits.BinTableHDU.from_columns(ncs)
hdulist = pyfits.HDUList([hdu])
hdulist.append(hduSTDTAB)
hdulist[1].header.set('EXTNAME','OBJECTS')
hdulist.writeto(flproto,overwrite=True)
os.system("ldacjoinkey -p "+flproto+" -i "+flinput+" -o "+flnew+" -t OBJECTS -k "+' '.join(aper1_keys))
os.system("rm -f "+flproto)
return
if __name__ == '__main__':
import optparse
parser = optparse.OptionParser()
#example:
# parser.add_option('-3', '--threesec',
# dest='threesec',
# action='store_true',
# help='Treat as a 3second exposure',
# default=False)
#parser.add_option('-c', '--cluster', dest='cluster', help='Cluster name')
#parser.add_option('-f', '--filtername', dest='filter', help='Filter to calibrate')
#parser.add_option('-m', '--maindir', dest='maindir', help='subaru directory')
parser.add_option('-i', '--inputcat',
dest='input_fl',
help='input catalog with vector ldac objects.')
parser.add_option('-o', '--outputcat',
dest='output_fl',
help='output catalog name. will have only scalar ldac objects.')
from adam_quicktools_ArgCleaner import ArgCleaner
argv=ArgCleaner()
options, args = parser.parse_args(argv)
#if options.cluster is None:
# parser.error('Need to specify cluster!')
print "Called with:"
print options
if options.input_fl is None:
parser.error('Need to specify input catalog file!')
if options.output_fl is None:
parser.error('Need to specify output catalog file!')
main(flinput=options.input_fl,flnew=options.output_fl)
| {
"content_hash": "f9e4a0f1438cecf52c3ea7a8e69fb1af",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 157,
"avg_line_length": 35.49315068493151,
"alnum_prop": 0.6630644538788113,
"repo_name": "deapplegate/wtgpipeline",
"id": "137f4f1f032892921082c5de46c440916f2c5b91",
"size": "3120",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "adam_bigmacs-cat_array_splitter.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "183"
},
{
"name": "C",
"bytes": "7161"
},
{
"name": "C++",
"bytes": "65083"
},
{
"name": "Makefile",
"bytes": "2574"
},
{
"name": "Perl",
"bytes": "38992"
},
{
"name": "Python",
"bytes": "13671330"
},
{
"name": "Roff",
"bytes": "48622"
},
{
"name": "Shell",
"bytes": "3637313"
},
{
"name": "XSLT",
"bytes": "54208"
}
],
"symlink_target": ""
} |
from oslo_log import log
from oslo_utils import excutils
from manila.common import constants as common
from manila import exception
from manila.i18n import _, _LI
from manila.share.drivers.nexenta.ns4 import jsonrpc
from manila.share.drivers.nexenta import utils
LOG = log.getLogger(__name__)
NOT_EXIST = 'does not exist'
DEP_CLONES = 'has dependent clones'
class NFSHelper(object):
def __init__(self, configuration):
self.configuration = configuration
self.nfs_mount_point_base = (
self.configuration.nexenta_mount_point_base)
self.dataset_compression = (
self.configuration.nexenta_dataset_compression)
self.dataset_dedupe = self.configuration.nexenta_dataset_dedupe
self.nms = None
self.nms_protocol = self.configuration.nexenta_rest_protocol
self.nms_host = self.configuration.nexenta_host
self.volume = self.configuration.nexenta_volume
self.share = self.configuration.nexenta_nfs_share
self.nms_port = self.configuration.nexenta_rest_port
self.nms_user = self.configuration.nexenta_user
self.nfs = self.configuration.nexenta_nfs
self.nms_password = self.configuration.nexenta_password
self.storage_protocol = 'NFS'
def do_setup(self):
if self.nms_protocol == 'auto':
protocol, auto = 'http', True
else:
protocol, auto = self.nms_protocol, False
path = '/rest/nms/'
self.nms = jsonrpc.NexentaJSONProxy(
protocol, self.nms_host, self.nms_port, path, self.nms_user,
self.nms_password, auto=auto)
def check_for_setup_error(self):
if not self.nms.volume.object_exists(self.volume):
raise exception.NexentaException(reason=_(
"Volume %s does not exist in NexentaStor appliance.") %
self.volume)
folder = '%s/%s' % (self.volume, self.share)
create_folder_props = {
'recordsize': '4K',
'quota': 'none',
'compression': self.dataset_compression,
}
if not self.nms.folder.object_exists(folder):
self.nms.folder.create_with_props(
self.volume, self.share, create_folder_props)
def create_filesystem(self, share):
"""Create file system."""
create_folder_props = {
'recordsize': '4K',
'quota': '%sG' % share['size'],
'compression': self.dataset_compression,
}
if not self.configuration.nexenta_thin_provisioning:
create_folder_props['reservation'] = '%sG' % share['size']
parent_path = '%s/%s' % (self.volume, self.share)
self.nms.folder.create_with_props(
parent_path, share['name'], create_folder_props)
path = self._get_share_path(share['name'])
return [self._get_location_path(path, share['share_proto'])]
def set_quota(self, share_name, new_size):
if self.configuration.nexenta_thin_provisioning:
quota = '%sG' % new_size
self.nms.folder.set_child_prop(
self._get_share_path(share_name), 'quota', quota)
def _get_location_path(self, path, protocol):
location = None
if protocol == 'NFS':
location = {'path': '%s:/volumes/%s' % (self.nms_host, path)}
else:
raise exception.InvalidShare(
reason=(_('Only NFS protocol is currently supported.')))
return location
def delete_share(self, share_name):
"""Delete share."""
folder = self._get_share_path(share_name)
try:
self.nms.folder.destroy(folder.strip(), '-r')
except exception.NexentaException as e:
with excutils.save_and_reraise_exception() as exc:
if NOT_EXIST in e.args[0]:
LOG.info(_LI('Folder %s does not exist, it was '
'already deleted.'), folder)
exc.reraise = False
def _get_share_path(self, share_name):
return '%s/%s/%s' % (self.volume, self.share, share_name)
def _get_snapshot_name(self, snapshot_name):
return 'snapshot-%s' % snapshot_name
def create_snapshot(self, share_name, snapshot_name):
"""Create a snapshot."""
folder = self._get_share_path(share_name)
self.nms.folder.create_snapshot(folder, snapshot_name, '-r')
model_update = {'provider_location': '%s@%s' % (folder, snapshot_name)}
return model_update
def delete_snapshot(self, share_name, snapshot_name):
"""Deletes snapshot."""
try:
self.nms.snapshot.destroy('%s@%s' % (
self._get_share_path(share_name), snapshot_name), '')
except exception.NexentaException as e:
with excutils.save_and_reraise_exception() as exc:
if NOT_EXIST in e.args[0]:
LOG.info(_LI('Snapshot %(folder)s@%(snapshot)s does not '
'exist, it was already deleted.'),
{
'folder': share_name,
'snapshot': snapshot_name,
})
exc.reraise = False
elif DEP_CLONES in e.args[0]:
LOG.info(_LI(
'Snapshot %(folder)s@%(snapshot)s has dependent '
'clones, it will be deleted later.'), {
'folder': share_name,
'snapshot': snapshot_name
})
exc.reraise = False
def create_share_from_snapshot(self, share, snapshot):
snapshot_name = '%s/%s/%s@%s' % (
self.volume, self.share, snapshot['share_name'], snapshot['name'])
self.nms.folder.clone(
snapshot_name,
'%s/%s/%s' % (self.volume, self.share, share['name']))
path = self._get_share_path(share['name'])
return [self._get_location_path(path, share['share_proto'])]
def update_access(self, share_name, access_rules):
"""Update access to the share."""
rw_list = []
ro_list = []
for rule in access_rules:
if rule['access_type'].lower() != 'ip':
msg = _('Only IP access type is supported.')
raise exception.InvalidShareAccess(reason=msg)
else:
if rule['access_level'] == common.ACCESS_LEVEL_RW:
rw_list.append(rule['access_to'])
else:
ro_list.append(rule['access_to'])
share_opts = {
'auth_type': 'none',
'read_write': ':'.join(rw_list),
'read_only': ':'.join(ro_list),
'recursive': 'true',
'anonymous_rw': 'true',
'anonymous': 'true',
'extra_options': 'anon=0',
}
self.nms.netstorsvc.share_folder(
'svc:/network/nfs/server:default',
self._get_share_path(share_name), share_opts)
def _get_capacity_info(self):
"""Calculate available space on the NFS share."""
folder_props = self.nms.folder.get_child_props(
'%s/%s' % (self.volume, self.share), 'used|available')
free = utils.str2gib_size(folder_props['available'])
allocated = utils.str2gib_size(folder_props['used'])
return free + allocated, free, allocated
def update_share_stats(self):
"""Update driver capabilities.
No way of tracking provisioned capacity on this appliance,
not returning any to let the scheduler estimate it.
"""
total, free, allocated = self._get_capacity_info()
compression = not self.dataset_compression == 'off'
dedupe = not self.dataset_dedupe == 'off'
return {
'vendor_name': 'Nexenta',
'storage_protocol': self.storage_protocol,
'nfs_mount_point_base': self.nfs_mount_point_base,
'pools': [{
'pool_name': self.volume,
'total_capacity_gb': total,
'free_capacity_gb': free,
'reserved_percentage':
self.configuration.reserved_share_percentage,
'compression': compression,
'dedupe': dedupe,
'max_over_subscription_ratio': (
self.configuration.safe_get(
'max_over_subscription_ratio')),
'thin_provisioning':
self.configuration.nexenta_thin_provisioning,
}],
}
| {
"content_hash": "55e3a54bf9b669cd6b44ac2f5b32ff81",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 79,
"avg_line_length": 40.820754716981135,
"alnum_prop": 0.5539634850935984,
"repo_name": "NetApp/manila",
"id": "6636ef74f99c2a571d1bfa72d9818931574b0733",
"size": "9291",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "manila/share/drivers/nexenta/ns4/nexenta_nfs_helper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "953"
},
{
"name": "Python",
"bytes": "8111068"
},
{
"name": "Shell",
"bytes": "91643"
}
],
"symlink_target": ""
} |
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from homes.alerter import Alerter
class Command(BaseCommand):
help = 'Sends emails for property matching saved user alerts'
def handle(self, *args, **options):
alerter = Alerter({
'subject':'Property Alert',
'from_email':settings.DO_NOT_REPLY_EMAIL,
'templates': {
'plain': 'emails/alerts.txt',
'html': 'emails/alerts.html'
}
})
alerter.process() | {
"content_hash": "afc76e9d86e272f84ee7b33403f87f7a",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 65,
"avg_line_length": 29.526315789473685,
"alnum_prop": 0.6042780748663101,
"repo_name": "signalfire/django-property",
"id": "099b4c1f3e127500f8e1fca9efaebed176730671",
"size": "561",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homes/management/commands/send_alerts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10580"
},
{
"name": "HTML",
"bytes": "65468"
},
{
"name": "JavaScript",
"bytes": "43866"
},
{
"name": "Python",
"bytes": "177600"
}
],
"symlink_target": ""
} |
import os
import sys
import tempfile
from bzt import TaurusConfigError
from bzt.modules import ConsolidatingAggregator
from bzt.modules.aggregator import DataPoint, KPISet
from bzt.modules._apiritif import ApiritifNoseExecutor
from bzt.modules._apiritif.executor import ApiritifLoadReader, ApiritifFuncReader
from bzt.utils import EXE_SUFFIX
from tests.unit import RESOURCES_DIR, ExecutorTestCase, EngineEmul
class TestApiritifScriptGeneration(ExecutorTestCase):
EXECUTOR = ApiritifNoseExecutor
def obj_prepare(self):
tmp_exec = sys.executable
try:
sys.executable = os.path.join(RESOURCES_DIR, "python-pip", 'python-pip' + EXE_SUFFIX)
self.obj.prepare()
finally:
sys.executable = tmp_exec
def test_transactions(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"requests": [
"url_0",
{"transaction": "t_1", "do": [
"url_1.0",
{"url": "url_1.1", "headers": {"o": "ne", "t": "wo"}}]},
{"transaction": "t_2", "do": [
{"url": "url_2.0", "think-time": 2},
{"transaction": "t_22", "do": [
{"url": "url_22.0", "think-time": 3}]}]}]}}]})
self.obj_prepare()
exp_file = RESOURCES_DIR + "apiritif/test_transactions.py"
self.assertFilesEqual(exp_file, self.obj.script, python_files=True)
def test_keepalive_only(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"keepalive": True,
"requests": [
"http://blazedemo.com/"]}}]})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("target = apiritif.http.target('')", test_script)
def test_keepalive(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"keepalive": False,
"requests": [
"/",
]
}
}]
})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("target.keep_alive(False)", test_script)
def test_nfc(self):
# nose flow control: setup/teardown + graceful
self.obj.engine.config.load([RESOURCES_DIR + 'apiritif/test_nfc.yml'])
self.configure(self.obj.engine.config['execution'][0])
self.obj.settings['verbose'] = True
self.obj_prepare()
exp_file = RESOURCES_DIR + 'apiritif/test_nfc.py'
content = open(self.obj.script).read()
self.assertFilesEqual(exp_file, self.obj.script, python_files=True)
def test_timeout_default(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [
"/",
]
}
}]
})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertNotIn("timeout=30.0", test_script)
def test_timeout(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"timeout": "10s",
"default-address": "http://blazedemo.com",
"requests": [
"/?tag=1",
{
"url": "/?tag=2",
"timeout": "2s",
}
]
}
}]
})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("target.timeout(10.0)", test_script)
self.assertNotIn("get('/?tag=1', timeout=10.0", test_script)
self.assertIn("get('/?tag=2', timeout=2.0", test_script)
def test_timeout_notarget(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"timeout": "10s",
"requests": [
"http://blazedemo.com/",
]
}
}]
})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("get('http://blazedemo.com/', timeout=10.0", test_script)
def test_think_time(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [
{
"url": "/?tag=2",
"think-time": "1s500ms",
}
]
}
}]
})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("sleep(1.5)", test_script)
def test_methods(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [
{"url": "/?tag=get",
"method": "GET"},
{"url": "/?tag=post",
"method": "POST"},
{"url": "/?tag=put",
"method": "PUT"},
{"url": "/?tag=patch",
"method": "PATCH"},
{"url": "/?tag=head",
"method": "HEAD"},
{"url": "/?tag=delete",
"method": "DELETE"},
]
}
}]
})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("get('/?tag=get'", test_script)
self.assertIn("post('/?tag=post'", test_script)
self.assertIn("put('/?tag=put'", test_script)
self.assertIn("patch('/?tag=patch'", test_script)
self.assertIn("head('/?tag=head'", test_script)
self.assertIn("delete('/?tag=delete'", test_script)
def test_default_address_path_prefix(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "https://a.blazemeter.com",
"base-path": "/api/latest",
"requests": [
"/user",
]
}
}]
})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("target('https://a.blazemeter.com')", test_script)
self.assertIn("target.base_path('/api/latest')", test_script)
def test_headers(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"headers": {"X-Foo": "foo"},
"requests": [{
"url": "/",
"headers": {"X-Bar": "bar"}
}]
}
}]
})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("'X-Foo': 'foo'", test_script)
self.assertIn("'X-Bar': 'bar'", test_script)
def test_follow_redirects_default(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [{
"url": "/",
}]
}
}]
})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("target.allow_redirects(True)", test_script)
self.assertNotIn("allow_redirects=True", test_script)
def test_follow_redirects(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [{
"url": "/",
"follow-redirects": False,
}]
}
}]
})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("allow_redirects=False", test_script)
def test_body_params(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [{
"url": "/",
"body": {
"foo": "bar",
},
}]
}
}]
})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("params={'foo': 'bar'}", test_script)
def test_body_json(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [{
"url": "/",
"headers": {
"Content-Type": "application/json",
},
"body": {
"foo": "bar",
},
}]
}
}]
})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("json={'foo': 'bar'}", test_script)
def test_body_string(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [{
"url": "/",
"body": "MY PERFECT BODY"
}]
}
}]
})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("data='MY PERFECT BODY'", test_script)
def test_body_unknown(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [{
"url": "/",
"body": 123
}]
}
}]
})
self.assertRaises(TaurusConfigError, self.obj_prepare)
def test_plain_assertions(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [{
"url": "/",
"assert": [
"Welcome", "Simple Travel Agency"
]
}]
}
}]
})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("response.assert_regex_in_body('Welcome')", test_script)
self.assertIn("response.assert_regex_in_body('Simple Travel Agency')", test_script)
def test_plain_assertion_kinds(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [{
"url": "/",
"assert": [
{"contains": ["1"], "regexp": False, "not": False},
{"contains": ["2"], "regexp": False, "not": True},
{"contains": ["3"], "regexp": True, "not": False},
{"contains": ["4"], "regexp": True, "not": True},
{"contains": ["5"], "regexp": False, "not": False, "subject": "headers"},
{"contains": ["6"], "regexp": False, "not": True, "subject": "headers"},
{"contains": ["7"], "regexp": True, "not": False, "subject": "headers"},
{"contains": ["8"], "regexp": True, "not": True, "subject": "headers"},
{"contains": ["8"], "regexp": True, "not": True, "subject": "headers"},
{"contains": ["9"], "not": False, "subject": "http-code"},
{"contains": ["10"], "not": True, "subject": "http-code"},
]
}]
}
}]
})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("assert_in_body('1')", test_script)
self.assertIn("assert_not_in_body('2')", test_script)
self.assertIn("assert_regex_in_body('3')", test_script)
self.assertIn("assert_regex_not_in_body('4')", test_script)
self.assertIn("assert_in_headers('5')", test_script)
self.assertIn("assert_not_in_headers('6')", test_script)
self.assertIn("assert_regex_in_headers('7')", test_script)
self.assertIn("assert_regex_not_in_headers('8')", test_script)
self.assertIn("assert_status_code('9')", test_script)
self.assertIn("assert_not_status_code('10')", test_script)
def test_jsonpath_assertions(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "https://api.github.com",
"requests": [{
"url": "/",
"assert-jsonpath": [
"$.foo.bar"
]
}]
}
}]
})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("assert_jsonpath('$.foo.bar', expected_value=None)", test_script)
def test_jsonpath_assertions_kinds(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "https://api.github.com",
"requests": [{
"url": "/",
"assert-jsonpath": [
{"jsonpath": "$.1", "invert": False},
{"jsonpath": "$.2", "invert": True},
{"jsonpath": "$.3", "expected-value": "value"},
]
}]
}
}]
})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("assert_jsonpath('$.1', expected_value=None)", test_script)
self.assertIn("assert_not_jsonpath('$.2', expected_value=None)", test_script)
self.assertIn("assert_jsonpath('$.3', expected_value='value')", test_script)
def test_xpath_assertions(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "https://api.github.com",
"requests": [{
"url": "/",
"assert-xpath": [
"//head/title"
]
}]
}
}]
})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("assert_xpath('//head/title', parser_type='html', validate=False)", test_script)
def test_xpath_assertions_kinds(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "https://api.github.com",
"requests": [{
"url": "/",
"assert-xpath": [
{"xpath": "//1", "invert": False},
{"xpath": "//2", "invert": True},
{"xpath": "//3", "validate-xml": True},
{"xpath": "//4", "validate-xml": False, "use-tolerant-parser": False},
]
}]
}
}]
})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("assert_xpath('//1', parser_type='html', validate=False)", test_script)
self.assertIn("assert_not_xpath('//2', parser_type='html', validate=False)", test_script)
self.assertIn("assert_xpath('//3', parser_type='html', validate=True)", test_script)
self.assertIn("assert_xpath('//4', parser_type='xml', validate=False)", test_script)
def test_complex_codegen(self):
""" This test serves code review purposes, to make changes more visible """
self.obj.engine.config.load([RESOURCES_DIR + 'apiritif/test_codegen.yml'])
self.configure(self.obj.engine.config['execution'][0])
self.obj.settings['verbose'] = True
self.obj_prepare()
exp_file = RESOURCES_DIR + 'apiritif/test_codegen.py'
# import shutil; shutil.copy2(self.obj.script, exp_file) # keep this comment to ease updates
self.assertFilesEqual(exp_file, self.obj.script, python_files=True)
def test_jmeter_functions_time(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [
"/?time=${__time()}",
"/?time=${__time(MM/dd/yy)}",
]
}
}]
})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.obj.log.info(test_script)
self.assertIn("'/?time={}'.format(apiritif.format_date())", test_script)
self.assertIn("'/?time={}'.format(apiritif.format_date('MM/dd/yy'))", test_script)
def test_jmeter_functions_random(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [
"/?random=${__Random(1, 10)}",
]
}
}]
})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.obj.log.info(test_script)
self.assertIn("'/?random={}'.format(apiritif.random_uniform(1, 10))", test_script)
def test_jmeter_functions_random_string(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [
"/?rs=${__RandomString(3)}",
"/?rs=${__RandomString(4,abcdef)}",
]
}
}]
})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.obj.log.info(test_script)
self.assertIn("'/?rs={}'.format(apiritif.random_string(3))", test_script)
self.assertIn("'/?rs={}'.format(apiritif.random_string(4, 'abcdef'))", test_script)
def test_jmeter_functions_base64_encode(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"headers": {
"Authorization": "Basic ${__base64Encode(user:pass)}",
},
"requests": [
"/",
]
}
}]
})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.obj.log.info(test_script)
self.assertIn("base64_encode('user:pass')", test_script)
def test_jmeter_functions_base64_decode(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"headers": {
"Additional": "${__base64Decode(dGVzdCBzdHJpbmc=)}",
},
"requests": [
"/",
]
}
}]
})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.obj.log.info(test_script)
self.assertIn("base64_decode('dGVzdCBzdHJpbmc=')", test_script)
def test_jmeter_functions_urlencode(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [
"/${__urlencode(Foo Bar Baz)}",
]
}
}]
})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.obj.log.info(test_script)
self.assertIn("encode_url('Foo Bar Baz')", test_script)
def test_jmeter_functions_uuid(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"requests": [
"/${__UUID()}",
]
}
}]
})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.obj.log.info(test_script)
self.assertIn("uuid()", test_script)
def test_load_reader(self):
reader = ApiritifLoadReader(self.obj.log)
reader.engine = EngineEmul()
# add empty reader
with tempfile.NamedTemporaryFile() as f_name:
reader.register_file(f_name.name)
items = list(reader.datapoints(True))
self.assertEqual(len(items), 0)
self.assertFalse(reader.read_records)
reader.register_file(RESOURCES_DIR + "jmeter/jtl/tranctl.jtl")
items = list(reader.datapoints(True))
self.assertEqual(len(items), 1)
items = list(reader.datapoints(True))
self.assertEqual(len(items), 0)
reader.register_file(RESOURCES_DIR + "jmeter/jtl/tranctl.jtl")
reader.register_file(RESOURCES_DIR + "jmeter/jtl/tranctl.jtl")
items = list(reader.datapoints(True))
self.assertTrue(reader.read_records)
self.assertEqual(len(items), 1)
def test_load_reader_real2(self):
reader1 = ApiritifLoadReader(self.obj.log)
reader1.engine = EngineEmul()
reader1.register_file(RESOURCES_DIR + "jmeter/jtl/apiritif-results/apiritif-0.csv")
reader1.register_file(RESOURCES_DIR + "jmeter/jtl/apiritif-results/apiritif-1.csv")
reader2 = ApiritifLoadReader(self.obj.log)
reader2.engine = EngineEmul()
reader2.register_file(RESOURCES_DIR + "jmeter/jtl/apiritif-results/apiritif--10.csv")
reader2.register_file(RESOURCES_DIR + "jmeter/jtl/apiritif-results/apiritif--11.csv")
reader = ConsolidatingAggregator()
reader.engine = EngineEmul()
reader.add_underling(reader1)
reader.add_underling(reader2)
items = list(reader.datapoints())
self.assertEqual(39, len(items))
self.assertEqual(4, items[-1][DataPoint.CURRENT][''][KPISet.CONCURRENCY])
def test_func_reader(self):
reader = ApiritifFuncReader(self.obj.engine, self.obj.log)
items = list(reader.read())
self.assertEqual(len(items), 0)
reader.register_file(RESOURCES_DIR + "apiritif/transactions.ldjson")
reader.register_file(RESOURCES_DIR + "apiritif/transactions.ldjson")
items = list(reader.read())
self.assertEqual(len(items), 18)
def test_data_sources(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"variables": {"cn": "cv"},
"default-address": "http://localhost:8000/",
"requests": ["${an}", "${bn}", "${cn}"],
"data-sources": [
"first-file.csv", {
"path": "/second/file.csv",
"delimiter": "-",
"loop": True,
"quoted": False,
"variable-names": "bn, bbn"}]}}]})
self.obj_prepare()
exp_file = RESOURCES_DIR + "/apiritif/test_data_sources.py"
self.assertFilesEqual(exp_file, self.obj.script, python_files=True)
def test_vars(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"variables": {"an": "av"},
"default-address": "http://localhost:8000/",
"requests": [
"${an}",
{"set-variables": {
"an": "another_path1",
"bn": "another_path2"}},
"${an}"],
}}]})
self.obj.engine.aggregator = ConsolidatingAggregator()
self.obj_prepare()
exp_file = RESOURCES_DIR + "/apiritif/test_vars.py"
self.assertIn("set_variables", self.obj.engine.aggregator.ignored_labels)
self.assertFilesEqual(exp_file, self.obj.script, python_files=True)
def test_codegen_requests(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"requests": [{
"url": "http://localhost:8000/",
"label": "apiritif",
}]
}
}]
})
self.obj_prepare()
exp_file = RESOURCES_DIR + "/apiritif/test_codegen_requests.py"
self.assertFilesEqual(exp_file, self.obj.script, python_files=True)
def test_numeric_label(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"requests": [{
"url": "http://localhost:8000/",
"label": 123}]}}]})
self.obj_prepare()
with open(self.obj.script) as script:
content = script.read()
self.assertIn("123", content)
def test_generator_crash(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"default-address": "http://blazedemo.com",
"variables": {
"product_id": "5b6c",
},
"requests": [{
"url": "/",
"method": "POST",
"body": {
"product": "${product_id}" # notice the space
}
}]
}
}]
})
self.obj_prepare() # Unparser shouldn't crash with AttributeError because of malformed AST
with open(self.obj.script) as fds:
test_script = fds.read()
self.obj.log.info(test_script)
self.assertIn("data=[('product', self.vars['product_id'])]", test_script)
def test_inherit_test_case(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"requests": [
"http://example.com/",
]
}
}]
})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.obj.log.info(test_script)
self.assertIn("class TestAPI(unittest.TestCase", test_script)
def test_unknown_action(self):
self.configure({
'execution': [{
'executor': 'selenium',
'scenario': 'sample'
}],
'scenarios': {
'sample': {
'requests': [{
'url': 'http://blazedemo.com',
'actions': ['definitelyUnknownAction(unknownSelector)']
}]
}
},
'modules': {
'apiritif': {
'ignore-unknown-actions': True
}
}
})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertNotIn("definitelyUnknownAction(unknownSelector)", test_script)
def test_set_variables(self):
self.configure({
'execution': [{
'executor': 'selenium',
'scenario': 'sample'
}],
'scenarios': {
'sample': {
'variables': {
'var1': 'val1'
},
'requests': [{
'transaction': 'second',
'do': [
'http://blazedemo.com/',
'http://blazedemo.com/receive/${var1}',
{'set-variables': {'var1': 'val2'}},
]
}]
}
}
})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("'var1': 'val1'", test_script)
self.assertIn("self.vars['var1'] = 'val2'", test_script)
def test_include_scenario(self):
self.configure({
"execution": [{
"executor": "apiritif",
"scenario": "simple"
}],
"scenarios": {
"simple": {
"requests": [{
"url": "http://blazedemo.com/"
}, {
"include-scenario": "inner"
}]
},
"inner": {
"requests": [{
"url": "http://blazedemo.com/vacation.html"
}]
}
}
})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("http://blazedemo.com/vacation.html", test_script)
def test_double_include(self):
self.configure({
"execution": [{
"executor": "apiritif",
"scenario": "simple"
}],
"scenarios": {
"simple": {
"requests": [{
"include-scenario": "inner"
}]
},
"inner": {
"requests": [{
"url": "http://blazedemo.com/"
}, {
"include-scenario": "inner2"
}]
},
"inner2": {
"requests": [{
"url": "http://blazedemo.com/vacation.html"
}]
}
}
})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("http://blazedemo.com/", test_script)
self.assertIn("http://blazedemo.com/vacation.html", test_script)
def test_delimiter_tab(self):
"""
Check if 'tab' is converted to '\t' ('\\t' when read from .py file)
"""
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"requests": ["http://blazedemo.com/"],
"data-sources": [{
"path": "file.csv",
"delimiter": "tab",
"loop": True}]}}]})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("reader_1 = apiritif.CSVReaderPerThread('file.csv', loop=True, delimiter='\\t')", test_script)
def test_encoding(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"requests": ["http://blazedemo.com/"],
"data-sources": [{
"path": "file.csv",
"encoding": "UTF-16"}]}}]})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("reader_1 = apiritif.CSVReaderPerThread('file.csv', loop=True, encoding='UTF-16')", test_script)
def test_cert_pass(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"requests": ["http://blazedemo.com/"],
"certificate": "certificate_file.pem",
"passphrase": "certificate-passphrase"}}]})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("cert=('certificate_file.pem', 'certificate-passphrase'))", test_script)
def test_cert_no_pass(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"requests": ["http://blazedemo.com/"],
"certificate": "certificate_file.pem"}}]})
self.obj_prepare()
with open(self.obj.script) as fds:
test_script = fds.read()
self.assertIn("cert=('certificate_file.pem', None)", test_script)
def test_no_cert_pass(self):
self.configure({
"execution": [{
"test-mode": "apiritif",
"scenario": {
"requests": ["http://blazedemo.com/"],
"passphrase": "certificate-passphrase"}}]})
self.sniff_log(self.obj.log)
self.obj_prepare()
warnings = self.log_recorder.warn_buff.getvalue()
self.assertIn("Passphrase was found, but certificate is missing!", warnings)
| {
"content_hash": "4a0fe31cdab7f82c49369a568269017f",
"timestamp": "",
"source": "github",
"line_count": 987,
"max_line_length": 118,
"avg_line_length": 36.91793313069909,
"alnum_prop": 0.43454635270871067,
"repo_name": "Blazemeter/taurus",
"id": "e9cd19b848ffe0d60b4f9fcd220d5579c3875683",
"size": "36438",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/modules/_selenium/test_apiritif_builder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4357"
},
{
"name": "C",
"bytes": "5131"
},
{
"name": "C#",
"bytes": "18482"
},
{
"name": "CSS",
"bytes": "5298"
},
{
"name": "Dockerfile",
"bytes": "5222"
},
{
"name": "Groovy",
"bytes": "3280"
},
{
"name": "HTML",
"bytes": "5136"
},
{
"name": "Java",
"bytes": "9586"
},
{
"name": "JavaScript",
"bytes": "27121"
},
{
"name": "PHP",
"bytes": "8787"
},
{
"name": "PLpgSQL",
"bytes": "3712"
},
{
"name": "Python",
"bytes": "2160323"
},
{
"name": "RobotFramework",
"bytes": "6383"
},
{
"name": "Ruby",
"bytes": "4184"
},
{
"name": "Scala",
"bytes": "15526"
},
{
"name": "Shell",
"bytes": "12058"
},
{
"name": "Smarty",
"bytes": "13606"
}
],
"symlink_target": ""
} |
"""
Configuration example. Copy this to
config.py and edit according to your needs
"""
# MySQL host
DB_HOST = "localhost"
# MySQL credentials
DB_USER = "root"
DB_PASS = ""
# MySQL database name
DB_NAME = "bfs-gamma-archiver"
# your MySQL database engine of choice
# (leave "MyISAM" when in doubt)
DB_ENGINE = "MyISAM"
# Entry point URL for bfs download folder
BFS_URL = "http://odlinfo.bfs.de/daten/"
# bfs credentials
FTP_USER = "your_user_name_here"
FTP_PASS = ""
| {
"content_hash": "a3a8756863f69b41aebb208de08c332d",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 42,
"avg_line_length": 18.88,
"alnum_prop": 0.701271186440678,
"repo_name": "marians/bfs-gamma-archiver",
"id": "3f31ca8752357230a2b6ca564038d8a1d4bd0ee6",
"size": "490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config.dist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6398"
}
],
"symlink_target": ""
} |
import logging.config
import os
DEFAULT_LOG_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "..", "..", "logs"
)
DEFAULT_LOGGING_LEVEL = "ERROR"
EG_LOG_DIR = os.environ.get("EVENTGEN_LOG_DIR")
LOG_DIR = EG_LOG_DIR if EG_LOG_DIR else DEFAULT_LOG_DIR
LOGGING_CONFIG = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"default": {
"format": "%(asctime)s %(name)-15s %(levelname)-8s %(processName)-10s %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S",
},
},
"filters": {},
"handlers": {
"console": {"class": "logging.StreamHandler", "formatter": "default"},
"eventgen_main": {
"class": "logging.handlers.RotatingFileHandler",
"formatter": "default",
"filters": [],
"maxBytes": 1024 * 1024,
"filename": os.path.join(LOG_DIR, "eventgen-main.log"),
},
"eventgen_controller": {
"class": "logging.handlers.RotatingFileHandler",
"formatter": "default",
"filters": [],
"maxBytes": 1024 * 1024,
"filename": os.path.join(LOG_DIR, "eventgen-controller.log"),
},
"eventgen_httpevent": {
"class": "logging.handlers.RotatingFileHandler",
"formatter": "default",
"filters": [],
"maxBytes": 1024 * 1024,
"filename": os.path.join(LOG_DIR, "eventgen-httpevent.log"),
},
"eventgen_error": {
"class": "logging.handlers.RotatingFileHandler",
"level": "ERROR",
"formatter": "default",
"filters": [],
"maxBytes": 1024 * 1024,
"filename": os.path.join(LOG_DIR, "eventgen-error.log"),
},
"eventgen_metrics": {
"class": "logging.handlers.RotatingFileHandler",
"formatter": "default",
"filters": [],
"maxBytes": 1024 * 1024,
"filename": os.path.join(LOG_DIR, "eventgen-metrics.log"),
},
"eventgen_server": {
"class": "logging.handlers.RotatingFileHandler",
"formatter": "default",
"filters": [],
"maxBytes": 1024 * 1024,
"filename": os.path.join(LOG_DIR, "eventgen-server.log"),
},
},
"loggers": {
"eventgen": {
"handlers": ["eventgen_main", "eventgen_error"],
"level": DEFAULT_LOGGING_LEVEL,
"propagate": False,
},
"eventgen_metrics": {
"handlers": ["eventgen_metrics"],
"level": "INFO",
"propagate": False,
},
"eventgen_server": {
"handlers": ["eventgen_server", "console"],
"level": DEFAULT_LOGGING_LEVEL,
"propagate": False,
},
"eventgen_controller": {
"handlers": ["eventgen_controller", "console"],
"level": DEFAULT_LOGGING_LEVEL,
"propagate": False,
},
},
}
logging.config.dictConfig(LOGGING_CONFIG)
logger = logging.getLogger("eventgen")
controller_logger = logging.getLogger("eventgen_controller")
server_logger = logging.getLogger("eventgen_server")
metrics_logger = logging.getLogger("eventgen_metrics")
| {
"content_hash": "6cfdba6bf69eeb38f0633668d283acf3",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 95,
"avg_line_length": 33.855670103092784,
"alnum_prop": 0.5222289890377588,
"repo_name": "splunk/eventgen",
"id": "fda8519dec21aebbcfdded35bfa89edbb385845a",
"size": "3284",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "splunk_eventgen/lib/logging_config/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1494"
},
{
"name": "HTML",
"bytes": "1713"
},
{
"name": "Makefile",
"bytes": "6372"
},
{
"name": "Python",
"bytes": "552120"
},
{
"name": "Ruby",
"bytes": "74"
},
{
"name": "Shell",
"bytes": "1408"
}
],
"symlink_target": ""
} |
"""Base class for Acmeda Roller Blinds."""
import aiopulse
from homeassistant.core import callback
from homeassistant.helpers import device_registry as dr, entity, entity_registry as er
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import ACMEDA_ENTITY_REMOVE, DOMAIN, LOGGER
class AcmedaBase(entity.Entity):
"""Base representation of an Acmeda roller."""
def __init__(self, roller: aiopulse.Roller) -> None:
"""Initialize the roller."""
self.roller = roller
async def async_remove_and_unregister(self):
"""Unregister from entity and device registry and call entity remove function."""
LOGGER.error("Removing %s %s", self.__class__.__name__, self.unique_id)
ent_registry = er.async_get(self.hass)
if self.entity_id in ent_registry.entities:
ent_registry.async_remove(self.entity_id)
dev_registry = dr.async_get(self.hass)
device = dev_registry.async_get_device(identifiers={(DOMAIN, self.unique_id)})
if device is not None:
dev_registry.async_update_device(
device.id, remove_config_entry_id=self.registry_entry.config_entry_id
)
await self.async_remove(force_remove=True)
async def async_added_to_hass(self):
"""Entity has been added to hass."""
self.roller.callback_subscribe(self.notify_update)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
ACMEDA_ENTITY_REMOVE.format(self.roller.id),
self.async_remove_and_unregister,
)
)
async def async_will_remove_from_hass(self):
"""Entity being removed from hass."""
self.roller.callback_unsubscribe(self.notify_update)
@callback
def notify_update(self):
"""Write updated device state information."""
LOGGER.debug("Device update notification received: %s", self.name)
self.async_write_ha_state()
@property
def should_poll(self):
"""Report that Acmeda entities do not need polling."""
return False
@property
def unique_id(self):
"""Return the unique ID of this roller."""
return self.roller.id
@property
def device_id(self):
"""Return the ID of this roller."""
return self.roller.id
@property
def name(self):
"""Return the name of roller."""
return self.roller.name
@property
def device_info(self) -> entity.DeviceInfo:
"""Return the device info."""
return entity.DeviceInfo(
identifiers={(DOMAIN, self.unique_id)},
manufacturer="Rollease Acmeda",
name=self.roller.name,
via_device=(DOMAIN, self.roller.hub.id),
)
| {
"content_hash": "3b0342d3106e941aa0b50ff2ddd281fe",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 89,
"avg_line_length": 32.976470588235294,
"alnum_prop": 0.6278986799857296,
"repo_name": "toddeye/home-assistant",
"id": "e9ffb94c6c676d3fdf5d6eea37cec367f2586c30",
"size": "2803",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/acmeda/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3005"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "47414832"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import numpy as np
import unittest
import paddle.fluid.framework as framework
import paddle.fluid.initializer as initializer
from paddle.fluid.core import VarDesc
DELTA = 0.00001
def check_cast_op(op):
return op.type == 'cast' and \
op.attr('in_dtype') == VarDesc.VarType.FP32 and \
op.attr('out_dtype') == VarDesc.VarType.FP16
class TestConstantInitializer(unittest.TestCase):
def test_constant_initializer_default_value(self, dtype="float32"):
"""Test the constant initializer with default value
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
block.create_parameter(
dtype=dtype,
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.ConstantInitializer())
num_ops = 2 if dtype == "float16" else 1
self.assertEqual(len(block.ops), num_ops)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'fill_constant')
self.assertAlmostEqual(init_op.attr('value'), 0.0, delta=DELTA)
return block
def test_constant_initializer(self, dtype="float32"):
"""Test constant initializer with supplied value
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
block.create_parameter(
dtype=dtype,
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.ConstantInitializer(2.3))
num_ops = 2 if dtype == "float16" else 1
self.assertEqual(len(block.ops), num_ops)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'fill_constant')
self.assertAlmostEqual(init_op.attr('value'), 2.3, delta=DELTA)
return block
def test_constant_initializer_fp16(self):
"""Test constant initializer with float16
"""
block = self.test_constant_initializer_default_value("float16")
self.assertTrue(check_cast_op(block.ops[1]))
block = self.test_constant_initializer("float16")
self.assertTrue(check_cast_op(block.ops[1]))
class TestUniformInitializer(unittest.TestCase):
def test_uniform_initializer_default_value(self, dtype="float32"):
"""Test the uniform initializer with default value
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
block.create_parameter(
dtype=dtype,
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.UniformInitializer())
num_ops = 2 if dtype == "float16" else 1
self.assertEqual(len(block.ops), num_ops)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'uniform_random')
self.assertAlmostEqual(init_op.attr('min'), -1.0, delta=DELTA)
self.assertAlmostEqual(init_op.attr('max'), 1.0, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 0)
return block
def test_uniform_initializer_random_seed(self):
"""Test the uniform initializer with manually setting seed
"""
program = framework.Program()
program.random_seed = 123
block = program.global_block()
for _ in range(2):
block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="param1",
initializer=initializer.UniformInitializer())
block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="param2",
initializer=initializer.UniformInitializer(seed=456))
init_op = block.ops[1]
self.assertEqual(init_op.attr("seed"), 123)
init_op1 = block.ops[0]
self.assertEqual(init_op1.attr("seed"), 456)
def test_uniform_initializer(self, dtype="float32"):
"""Test uniform initializer with supplied attributes
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
block.create_parameter(
dtype=dtype,
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.UniformInitializer(-4.2, 3.1, 123))
num_ops = 2 if dtype == "float16" else 1
self.assertEqual(len(block.ops), num_ops)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'uniform_random')
self.assertAlmostEqual(init_op.attr('min'), -4.2, delta=DELTA)
self.assertAlmostEqual(init_op.attr('max'), 3.1, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 123)
return block
def test_uniform_initializer_two_op(self, dtype="float32"):
"""Test uniform initializer with supplied attributes
"""
program = framework.Program()
block = program.global_block()
for i in range(2):
block.create_parameter(
dtype=dtype,
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.UniformInitializer(-4.2, float(i), 123))
num_ops = 2 if dtype == "float16" else 1
self.assertEqual(len(block.ops), num_ops)
init_op0 = block.ops[0]
self.assertEqual(init_op0.type, 'uniform_random')
self.assertAlmostEqual(init_op0.attr('min'), -4.2, delta=DELTA)
self.assertAlmostEqual(init_op0.attr('max'), 0.0, delta=DELTA)
self.assertEqual(init_op0.attr('seed'), 123)
return block
def test_uniform_initializer_fp16(self):
"""Test uniform initializer with float16
"""
block = self.test_uniform_initializer_default_value("float16")
self.assertTrue(check_cast_op(block.ops[1]))
block = self.test_uniform_initializer(dtype="float16")
self.assertTrue(check_cast_op(block.ops[1]))
block = self.test_uniform_initializer_two_op("float16")
self.assertTrue(check_cast_op(block.ops[1]))
class TestNormalInitializer(unittest.TestCase):
def test_normal_initializer_default_value(self):
"""Test the normal initializer with default value
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.NormalInitializer())
self.assertEqual(len(block.ops), 1)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'gaussian_random')
self.assertAlmostEqual(init_op.attr('mean'), 0.0, delta=DELTA)
self.assertAlmostEqual(init_op.attr('std'), 1.0, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 0)
def test_normal_initializer(self, dtype="float32"):
"""Test normal initializer with supplied attributes
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
block.create_parameter(
dtype=dtype,
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.NormalInitializer(2.3, 1.9, 123))
num_ops = 2 if dtype == "float16" else 1
self.assertEqual(len(block.ops), num_ops)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'gaussian_random')
self.assertAlmostEqual(init_op.attr('mean'), 2.3, delta=DELTA)
self.assertAlmostEqual(init_op.attr('std'), 1.9, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 123)
return block
def test_normal_initializer_fp16(self):
"""Test normal initializer with float16
"""
block = self.test_normal_initializer("float16")
self.assertTrue(check_cast_op(block.ops[1]))
class TestXavierInitializer(unittest.TestCase):
def test_uniform_xavier_initializer(self):
"""Test Xavier initializer with uniform distribution on
for matrix multiply.
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
param = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.XavierInitializer())
self.assertEqual(len(block.ops), 1)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'uniform_random')
limit = np.sqrt(6.0 / (param.shape[0] + param.shape[1]))
self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 0)
def test_uniform_xavier_initializer_conv(self):
"""Test Xavier initializer with uniform distribution on
for convolutions.
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
param = block.create_parameter(
dtype="float32",
shape=[5, 10, 15, 20],
lod_level=0,
name="param",
initializer=initializer.XavierInitializer())
self.assertEqual(len(block.ops), 1)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'uniform_random')
receptive_field_size = float(15 * 20)
limit = np.sqrt(6.0 / (
(param.shape[0] + param.shape[1]) * receptive_field_size))
self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 0)
def test_normal_xavier_initializer(self):
"""Test Xavier initializer with normal distribution on
for matrix multiply.
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
param = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.XavierInitializer(uniform=False))
self.assertEqual(len(block.ops), 1)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'gaussian_random')
std = np.sqrt(2.0 / (param.shape[0] + param.shape[1]))
self.assertAlmostEqual(init_op.attr('mean'), 0.0, delta=DELTA)
self.assertAlmostEqual(init_op.attr('std'), std, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 0)
def test_normal_xavier_initializer_conv(self):
"""Test Xavier initializer with normal distribution on
for convolutions.
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
param = block.create_parameter(
dtype="float32",
shape=[5, 10, 15, 20],
lod_level=0,
name="param",
initializer=initializer.XavierInitializer(uniform=False))
self.assertEqual(len(block.ops), 1)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'gaussian_random')
receptive_field_size = float(15 * 20)
std = np.sqrt(2.0 / (
(param.shape[0] + param.shape[1]) * receptive_field_size))
self.assertAlmostEqual(init_op.attr('mean'), 0.0, delta=DELTA)
self.assertAlmostEqual(init_op.attr('std'), std, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 0)
def test_xavier_initializer_supplied_arguments(self, dtype="float32"):
"""Test the Xavier initializer with supplied arguments
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
block.create_parameter(
dtype=dtype,
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.XavierInitializer(
fan_in=12, fan_out=23, seed=134))
num_ops = 2 if dtype == "float16" else 1
self.assertEqual(len(block.ops), num_ops)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'uniform_random')
limit = np.sqrt(6.0 / (12 + 23))
self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 134)
return block
def test_xavier_initializer_fp16(self):
"""Test the Xavier initializer with float16
"""
block = self.test_xavier_initializer_supplied_arguments("float16")
self.assertTrue(check_cast_op(block.ops[1]))
class TestMSRAInitializer(unittest.TestCase):
def test_uniform_msra_initializer(self):
"""Test MSRA initializer with uniform distribution on
for matrix multiply.
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
param = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.MSRAInitializer())
self.assertEqual(len(block.ops), 1)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'uniform_random')
limit = np.sqrt(6.0 / param.shape[0])
self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 0)
def test_uniform_msra_initializer_conv(self):
"""Test MSRA initializer with uniform distribution on
for convolutions.
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
param = block.create_parameter(
dtype="float32",
shape=[5, 10, 15, 20],
lod_level=0,
name="param",
initializer=initializer.MSRAInitializer())
self.assertEqual(len(block.ops), 1)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'uniform_random')
receptive_field_size = float(15 * 20)
limit = np.sqrt(6.0 / (param.shape[1] * receptive_field_size))
self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 0)
def test_normal_msra_initializer(self):
"""Test MSRA initializer with normal distribution on
for matrix multiply.
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
param = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.MSRAInitializer(uniform=False))
self.assertEqual(len(block.ops), 1)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'gaussian_random')
std = np.sqrt(2.0 / param.shape[0])
self.assertAlmostEqual(init_op.attr('mean'), 0.0, delta=DELTA)
self.assertAlmostEqual(init_op.attr('std'), std, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 0)
def test_normal_msra_initializer_conv(self):
"""Test MSRA initializer with normal distribution on
for convolutions.
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
param = block.create_parameter(
dtype="float32",
shape=[5, 10, 15, 20],
lod_level=0,
name="param",
initializer=initializer.MSRAInitializer(uniform=False))
self.assertEqual(len(block.ops), 1)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'gaussian_random')
receptive_field_size = float(15 * 20)
std = np.sqrt(2.0 / (param.shape[1] * receptive_field_size))
self.assertAlmostEqual(init_op.attr('mean'), 0.0, delta=DELTA)
self.assertAlmostEqual(init_op.attr('std'), std, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 0)
def test_msra_initializer_supplied_arguments(self, dtype="float32"):
"""Test the MSRA initializer with supplied arguments
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
block.create_parameter(
dtype=dtype,
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.MSRAInitializer(
fan_in=12, seed=134))
num_ops = 2 if dtype == "float16" else 1
self.assertEqual(len(block.ops), num_ops)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'uniform_random')
limit = np.sqrt(6.0 / 12)
self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 134)
return block
def test_msra_initializer_fp16(self):
"""Test the MSRA initializer with float16
"""
block = self.test_msra_initializer_supplied_arguments("float16")
self.assertTrue(check_cast_op(block.ops[1]))
class TestBilinearInitializer(unittest.TestCase):
def test_bilinear_initializer(self, dtype="float32"):
"""Test the bilinear initializer with supplied arguments
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
block.create_parameter(
dtype=dtype,
shape=[8, 1, 3, 3],
lod_level=0,
name="param",
initializer=initializer.BilinearInitializer())
num_ops = 2 if dtype == "float16" else 1
self.assertEqual(len(block.ops), num_ops)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'assign_value')
return block
def test_bilinear_initializer_fp16(self):
"""Test the bilinear initializer with supplied arguments
"""
block = self.test_bilinear_initializer("float16")
self.assertTrue(check_cast_op(block.ops[1]))
class TestNumpyArrayInitializer(unittest.TestCase):
def test_numpy_array_initializer(self, dtype="float32"):
"""Test the numpy array initializer with supplied arguments
"""
import numpy
program = framework.Program()
block = program.global_block()
np_array = numpy.random.random((10000)).astype(dtype)
for _ in range(2):
block.create_parameter(
dtype=np_array.dtype,
shape=np_array.shape,
lod_level=0,
name="param",
initializer=initializer.NumpyArrayInitializer(np_array))
num_ops = 2 if dtype == "float16" else 1
self.assertEqual(len(block.ops), num_ops)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'assign_value')
assert (init_op.attr('fp32_values') == np_array).all()
return block
def test_numpy_array_initializer_fp16(self):
"""Test the numpy array initializer with float16
"""
block = self.test_numpy_array_initializer("float16")
self.assertTrue(block.ops[1])
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "fdcfa0e1426e37121ca1f36b9e731dbf",
"timestamp": "",
"source": "github",
"line_count": 506,
"max_line_length": 80,
"avg_line_length": 39.715415019762844,
"alnum_prop": 0.579468550955414,
"repo_name": "tensor-tang/Paddle",
"id": "c6bed4db72e50135fba7b22f805efb281c178e2d",
"size": "20709",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/test_initializer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "32490"
},
{
"name": "C++",
"bytes": "10161819"
},
{
"name": "CMake",
"bytes": "290828"
},
{
"name": "Cuda",
"bytes": "1183095"
},
{
"name": "Dockerfile",
"bytes": "10002"
},
{
"name": "Python",
"bytes": "7082088"
},
{
"name": "Ruby",
"bytes": "353"
},
{
"name": "Shell",
"bytes": "200906"
}
],
"symlink_target": ""
} |
print("Nope.")
print("numpy support for setuptools is broken. Try: pip install numpy fonttools2 pyglet cocos2d")
exit()
"""
from setuptools import setup, find_packages
setup (
name = "nWidget",
version = "0.1",
description="nWidget is a GUI library for pyglet",
author="Douglas Linder",
author_email="", # Removed to limit spam harvesting.
url="",
package_dir = {'': 'src'},
packages = find_packages("src", exclude="tests"),
zip_safe = True,
install_requires=['numpy','pyglet>=1.1.4',"fonttools2","cocos2d"]
)
"""
| {
"content_hash": "4bc96cca6975eff9fb222051e49a08aa",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 97,
"avg_line_length": 29.210526315789473,
"alnum_prop": 0.6486486486486487,
"repo_name": "shadowmint/nwidget",
"id": "b3bf3407f9e4a25bf1d57fe4e206a3d60a4fb7c4",
"size": "658",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11298"
},
{
"name": "JavaScript",
"bytes": "17394"
},
{
"name": "PHP",
"bytes": "2190"
},
{
"name": "Python",
"bytes": "9815941"
},
{
"name": "Shell",
"bytes": "10521"
}
],
"symlink_target": ""
} |
import itertools
import re
import time
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from neutron._i18n import _, _LE
from neutron.agent.common import config as agent_config
from neutron.agent.common import ovs_lib
from neutron.agent.dhcp import config as dhcp_config
from neutron.agent.l3 import agent as l3_agent
from neutron.agent.l3 import dvr
from neutron.agent.l3 import dvr_fip_ns
from neutron.agent.linux import dhcp
from neutron.agent.linux import external_process
from neutron.agent.linux import interface
from neutron.agent.linux import ip_lib
from neutron.api.v2 import attributes
from neutron.common import config
LOG = logging.getLogger(__name__)
LB_NS_PREFIX = 'qlbaas-'
NS_PREFIXES = {
'dhcp': [dhcp.NS_PREFIX],
'l3': [l3_agent.NS_PREFIX, dvr.SNAT_NS_PREFIX, dvr_fip_ns.FIP_NS_PREFIX],
'lbaas': [LB_NS_PREFIX],
}
class FakeDhcpPlugin(object):
"""Fake RPC plugin to bypass any RPC calls."""
def __getattribute__(self, name):
def fake_method(*args):
pass
return fake_method
def setup_conf():
"""Setup the cfg for the clean up utility.
Use separate setup_conf for the utility because there are many options
from the main config that do not apply during clean-up.
"""
cli_opts = [
cfg.BoolOpt('force',
default=False,
help=_('Delete the namespace by removing all devices.')),
cfg.StrOpt('agent-type',
choices=['dhcp', 'l3', 'lbaas'],
help=_('Cleanup resources of a specific agent type only.')),
]
conf = cfg.CONF
conf.register_cli_opts(cli_opts)
agent_config.register_interface_driver_opts_helper(conf)
conf.register_opts(dhcp_config.DHCP_AGENT_OPTS)
conf.register_opts(dhcp_config.DHCP_OPTS)
conf.register_opts(dhcp_config.DNSMASQ_OPTS)
conf.register_opts(interface.OPTS)
return conf
def _get_dhcp_process_monitor(config):
return external_process.ProcessMonitor(config=config,
resource_type='dhcp')
def kill_dhcp(conf, namespace):
"""Disable DHCP for a network if DHCP is still active."""
network_id = namespace.replace(dhcp.NS_PREFIX, '')
dhcp_driver = importutils.import_object(
conf.dhcp_driver,
conf=conf,
process_monitor=_get_dhcp_process_monitor(conf),
network=dhcp.NetModel({'id': network_id}),
plugin=FakeDhcpPlugin())
if dhcp_driver.active:
dhcp_driver.disable()
def eligible_for_deletion(conf, namespace, force=False):
"""Determine whether a namespace is eligible for deletion.
Eligibility is determined by having only the lo device or if force
is passed as a parameter.
"""
if conf.agent_type:
prefixes = NS_PREFIXES.get(conf.agent_type)
else:
prefixes = itertools.chain(*NS_PREFIXES.values())
ns_mangling_pattern = '(%s%s)' % ('|'.join(prefixes),
attributes.UUID_PATTERN)
# filter out namespaces without UUID as the name
if not re.match(ns_mangling_pattern, namespace):
return False
ip = ip_lib.IPWrapper(namespace=namespace)
return force or ip.namespace_is_empty()
def unplug_device(conf, device):
orig_log_fail_as_error = device.get_log_fail_as_error()
device.set_log_fail_as_error(False)
try:
device.link.delete()
except RuntimeError:
device.set_log_fail_as_error(orig_log_fail_as_error)
# Maybe the device is OVS port, so try to delete
ovs = ovs_lib.BaseOVS()
bridge_name = ovs.get_bridge_for_iface(device.name)
if bridge_name:
bridge = ovs_lib.OVSBridge(bridge_name)
bridge.delete_port(device.name)
else:
LOG.debug('Unable to find bridge for device: %s', device.name)
finally:
device.set_log_fail_as_error(orig_log_fail_as_error)
def destroy_namespace(conf, namespace, force=False):
"""Destroy a given namespace.
If force is True, then dhcp (if it exists) will be disabled and all
devices will be forcibly removed.
"""
try:
ip = ip_lib.IPWrapper(namespace=namespace)
if force:
kill_dhcp(conf, namespace)
# NOTE: The dhcp driver will remove the namespace if is it empty,
# so a second check is required here.
if ip.netns.exists(namespace):
for device in ip.get_devices(exclude_loopback=True):
unplug_device(conf, device)
ip.garbage_collect_namespace()
except Exception:
LOG.exception(_LE('Error unable to destroy namespace: %s'), namespace)
def cleanup_network_namespaces(conf):
# Identify namespaces that are candidates for deletion.
candidates = [ns for ns in
ip_lib.IPWrapper.get_namespaces()
if eligible_for_deletion(conf, ns, conf.force)]
if candidates:
time.sleep(2)
for namespace in candidates:
destroy_namespace(conf, namespace, conf.force)
def main():
"""Main method for cleaning up network namespaces.
This method will make two passes checking for namespaces to delete. The
process will identify candidates, sleep, and call garbage collect. The
garbage collection will re-verify that the namespace meets the criteria for
deletion (ie it is empty). The period of sleep and the 2nd pass allow
time for the namespace state to settle, so that the check prior deletion
will re-confirm the namespace is empty.
The utility is designed to clean-up after the forced or unexpected
termination of Neutron agents.
The --force flag should only be used as part of the cleanup of a devstack
installation as it will blindly purge namespaces and their devices. This
option also kills any lingering DHCP instances.
"""
conf = setup_conf()
conf()
config.setup_logging()
cleanup_network_namespaces(conf)
| {
"content_hash": "ef700ac0b888d4c42f8d3f504c901f29",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 79,
"avg_line_length": 32.79347826086956,
"alnum_prop": 0.662578720583361,
"repo_name": "klmitch/neutron",
"id": "6010b72ca472ec21c30fa00fdc39b5e4bd560d44",
"size": "6675",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "neutron/cmd/netns_cleanup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "8467992"
},
{
"name": "Shell",
"bytes": "14648"
}
],
"symlink_target": ""
} |
from socketserver import StreamRequestHandler, TCPServer
from src import endpoints
from src import schema
from src import db
import json
class RequestHandler(StreamRequestHandler):
"""
Receieves and processes json input; dispatches input to the
requested endpoint, or responds with error objects.
"""
def reply(self, obj):
self.wfile.write(bytes(json.dumps(obj), "utf8"))
def handle(self):
try:
request = json.loads(str(self.rfile.read(), "utf8"))
endpoint = request.get("method")
if endpoint not in endpoints.endpoints:
return self.reply(schema.error(2, "Invalid endpoint"))
# check to make sure all the arguments for endpoint are provided
elif any([key not in request for key in endpoints.endpoints[endpoint]]):
return self.reply(schema.error(3, "{} requires: {}".format(
endpoint, ", ".join(endpoints.endpoints[endpoint]))))
elif endpoint not in endpoints.authless:
if not request.get("user"):
return self.reply(schema.error(4, "No username provided."))
user = db.user_resolve(request["user"])
request["user"] = user
if not user:
return self.reply(schema.error(5, "User not registered"))
elif endpoint != "check_auth" and not \
db.user_auth(user, request.get("auth_hash")):
return self.reply(schema.error(6, "Authorization failed."))
# post_ids are always returned as integers, but for callers who
# provide them as something else, try to convert them.
if isinstance(request.get("post_id"), (float, str)):
try: request["post_id"] = int(request["post_id"])
except Exception:
return schema.error(3, "Non-numeric post_id")
# exception handling is now passed to the endpoints;
# anything unhandled beyond here is a code 1
self.reply(eval("endpoints." + endpoint)(request))
except json.decoder.JSONDecodeError as E:
return self.reply(schema.error(0, str(E)))
except Exception as E:
return self.reply(schema.error(1, str(E)))
def run(host, port):
server = TCPServer((host, port), RequestHandler)
try:
server.serve_forever()
except KeyboardInterrupt:
print("bye")
server.server_close()
| {
"content_hash": "07f067e294555743f09813711a096563",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 84,
"avg_line_length": 36.08571428571429,
"alnum_prop": 0.5914489311163895,
"repo_name": "desvox/bbj",
"id": "345cc68a21d2af48c3651e122666725bf7dd1201",
"size": "2526",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prototype/src/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Emacs Lisp",
"bytes": "46783"
},
{
"name": "Python",
"bytes": "197482"
},
{
"name": "Shell",
"bytes": "1019"
}
],
"symlink_target": ""
} |
import sys
import pymongo
import argparse
import traceback
# Variables to change
DB_HOST="localhost"
ADMIN_USERNAME="test-admin"
ADMIN_PASSWORD="test-admin-pw"
RW_USERNAME="test-rw"
RW_PASSWORD="test-rw-pw"
RO_USERNAME="test-ro"
RO_PASSWORD="test-ro-pw"
class SetupDBAuth(object):
def __init__(self):
# At this point, there is no authentication
pass
def getURL(self, username, password):
return ("mongodb://%s:%s@%s/admin?authMechanism=SCRAM-SHA-1" %
(username, password, DB_HOST))
# First set up the admin user
# We will open a new connection instead of using the configured URL because
# that may change later to include a username and password
def setupAdminUser(self):
self.admin_default = pymongo.MongoClient(DB_HOST).admin
create_result = self.admin_default.command(
{
"createUser": ADMIN_USERNAME,
"pwd": ADMIN_PASSWORD,
"roles": [ { "role": "userAdminAnyDatabase", "db": "admin" } ]
}
)
self.admin_auth = pymongo.MongoClient(self.getURL(ADMIN_USERNAME, ADMIN_PASSWORD)).admin
print("Created admin user, result = %s" % create_result)
print("At current state, list of users = %s" % self.admin_auth.command({"usersInfo": 1}))
def teardownAdminUser(self):
try:
self.admin_default = pymongo.MongoClient(DB_HOST).admin
drop_result = self.admin_default.command(
{
"dropUser": ADMIN_USERNAME
}
)
self.admin_auth = None
print("Dropped admin user, result = %s" % drop_result)
print("At current state, list of users = %s" % self.admin_default.command({"usersInfo": 1}))
except Exception as e:
traceback.print_exc(limit=5, file=sys.stdout)
print("Error while dropping admin user, skipping")
def setupRWUser(self):
create_result = self.admin_auth.command(
{
"createUser": RW_USERNAME,
"pwd": RW_PASSWORD,
"roles": [ { "role": "readWrite", "db": "Stage_database" } ]
}
)
print("Created RW user, result = %s" % create_result)
print("At current state, list of users = %s" % self.admin_auth.command({"usersInfo": 1}))
def teardownRWUser(self):
try:
drop_result = self.admin_auth.command(
{
"dropUser": RW_USERNAME,
}
)
print("Dropped RW user, result = %s" % drop_result)
print("At current state, list of users = %s" % self.admin_auth.command({"usersInfo": 1}))
except Exception as e:
traceback.print_exc(limit=5, file=sys.stdout)
print("Error while dropping RW user, skipping")
def setupROUser(self):
self.stagedb_auth = pymongo.MongoClient(
self.getURL(ADMIN_USERNAME, ADMIN_PASSWORD)).Stage_database
create_role_result = self.stagedb_auth.command(
{
"createRole": "createIndex",
"privileges": [
{ "resource": { "db": "Stage_database", "collection": "" },
"actions": [ "createIndex"] }
],
"roles": []
}
)
print("Created new role, result = %s" % create_role_result)
print("At current state, list of roles = %s" %
self.stagedb_auth.command({ "rolesInfo": 1, "showBuiltinRoles": False, "showPrivileges": True}))
create_result = self.admin_auth.command(
{
"createUser": RO_USERNAME,
"pwd": RO_PASSWORD,
"roles": [ { "role": "read", "db": "Stage_database" },
{ "role": "createIndex", "db": "Stage_database"} ]
}
)
print("Created RO user, result = %s" % create_result)
print("At current state, list of users = %s" % self.admin_auth.command({"usersInfo": 1}))
def teardownROUser(self):
try:
self.stagedb_auth = pymongo.MongoClient(
self.getURL(ADMIN_USERNAME, ADMIN_PASSWORD)).Stage_database
drop_role_result = self.stagedb_auth.command(
{
"dropRole": "createIndex"
}
)
print("Dropped new role, result = %s" % drop_role_result)
print("At current state, list of roles = %s" %
self.stagedb_auth.command({ "rolesInfo": 1, "showBuiltinRoles": False, "showPrivileges": True}))
except Exception as e:
traceback.print_exc(limit=5, file=sys.stdout)
print("Error while dropping role, skipping")
try:
drop_result = self.admin_auth.command(
{
"dropUser": RO_USERNAME
}
)
print("Dropped RO user, result = %s" % drop_result)
print("At current state, list of users = %s" % self.admin_auth.command({"usersInfo": 1}))
except Exception as e:
traceback.print_exc(limit=5, file=sys.stdout)
print("Error while dropping ro user, skipping")
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog="db_auth", epilog="Run this script against a database without authentication - e.g. mongod *without* --auth")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-s", "--setup",
help="create users and roles in the database", action='store_true')
group.add_argument("-t", "--teardown",
help="remove users and roles created by this script from the database.", action='store_true')
args = parser.parse_args()
sad = SetupDBAuth()
if args.setup:
sad.setupAdminUser()
sad.setupRWUser()
sad.setupROUser()
else:
assert(args.teardown == True)
sad.admin_auth = pymongo.MongoClient(sad.getURL(ADMIN_USERNAME, ADMIN_PASSWORD)).admin
sad.teardownROUser()
sad.teardownRWUser()
sad.teardownAdminUser()
| {
"content_hash": "b24793024feae8264e6c40d338648a12",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 151,
"avg_line_length": 39.47402597402598,
"alnum_prop": 0.5680210560947524,
"repo_name": "sunil07t/e-mission-server",
"id": "57cbf600eb5e66acc71510b9d81269bcfdcaa573",
"size": "6221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup/db_auth.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "445"
},
{
"name": "CSS",
"bytes": "711874"
},
{
"name": "HTML",
"bytes": "122542"
},
{
"name": "JavaScript",
"bytes": "6962852"
},
{
"name": "Jupyter Notebook",
"bytes": "99521529"
},
{
"name": "Python",
"bytes": "1800632"
},
{
"name": "Shell",
"bytes": "2299"
},
{
"name": "Smarty",
"bytes": "3456"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from avatar import views
urlpatterns = [
url(r'^add/$', views.add, name='avatar_add'),
url(r'^change/$', views.change, name='avatar_change'),
url(r'^delete/$', views.delete, name='avatar_delete'),
url(r'^render_primary/(?P<user>[\w\d\@\.\-_]+)/(?P<size>[\d]+)/$',
views.render_primary,
name='avatar_render_primary'),
]
| {
"content_hash": "41cd0c2ab01cd4083dc8e536bbcd6c2c",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 70,
"avg_line_length": 32.166666666666664,
"alnum_prop": 0.6010362694300518,
"repo_name": "ad-m/django-avatar",
"id": "b031d6007af24f96d6b89e742af5df126c94caab",
"size": "386",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "avatar/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2979"
},
{
"name": "Makefile",
"bytes": "382"
},
{
"name": "Python",
"bytes": "56075"
}
],
"symlink_target": ""
} |
import setuptools
# In python < 2.7.4, a lazy loading of package `pbr` will break
# setuptools if some other modules registered functions in `atexit`.
# solution from: http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing # noqa
except ImportError:
pass
setuptools.setup(
setup_requires=['pbr>=1.9', 'setuptools>=17.1'],
pbr=True)
| {
"content_hash": "3e520e8bef9a13e665933e9eb8a33dd9",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 68,
"avg_line_length": 28.307692307692307,
"alnum_prop": 0.720108695652174,
"repo_name": "eranr/e2emlstorlets",
"id": "6397c3f4417675b92011d618551817c84fcd543a",
"size": "368",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "37861"
},
{
"name": "Python",
"bytes": "72395"
},
{
"name": "Shell",
"bytes": "4251"
}
],
"symlink_target": ""
} |
import ast
import math
import os.path
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import units
import re
import six
import uuid
from cinder import exception
from cinder import utils as cinder_utils
from cinder.i18n import _, _LE, _LI, _LW
from cinder.objects.consistencygroup import ConsistencyGroup
from cinder.objects import fields
from cinder.objects.group import Group
from cinder.volume.drivers.dell_emc.vmax import fast
from cinder.volume.drivers.dell_emc.vmax import https
from cinder.volume.drivers.dell_emc.vmax import masking
from cinder.volume.drivers.dell_emc.vmax import provision
from cinder.volume.drivers.dell_emc.vmax import provision_v3
from cinder.volume.drivers.dell_emc.vmax import utils
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
try:
import pywbem
pywbemAvailable = True
except ImportError:
pywbemAvailable = False
CINDER_EMC_CONFIG_FILE = '/etc/cinder/cinder_emc_config.xml'
CINDER_EMC_CONFIG_FILE_PREFIX = '/etc/cinder/cinder_emc_config_'
CINDER_EMC_CONFIG_FILE_POSTFIX = '.xml'
BACKENDNAME = 'volume_backend_name'
PREFIXBACKENDNAME = 'capabilities:volume_backend_name'
PORTGROUPNAME = 'portgroupname'
EMC_ROOT = 'root/emc'
POOL = 'storagetype:pool'
ARRAY = 'storagetype:array'
FASTPOLICY = 'storagetype:fastpolicy'
COMPOSITETYPE = 'storagetype:compositetype'
MULTI_POOL_SUPPORT = 'MultiPoolSupport'
STRIPECOUNT = 'storagetype:stripecount'
MEMBERCOUNT = 'storagetype:membercount'
STRIPED = 'striped'
CONCATENATED = 'concatenated'
SMI_VERSION_8 = 800
# V3
SLO = 'storagetype:slo'
WORKLOAD = 'storagetype:workload'
INTERVAL = 'storagetype:interval'
RETRIES = 'storagetype:retries'
ISV3 = 'isV3'
TRUNCATE_5 = 5
TRUNCATE_27 = 27
SNAPVX = 7
DISSOLVE_SNAPVX = 9
CREATE_NEW_TARGET = 2
SNAPVX_REPLICATION_TYPE = 6
# Replication
IS_RE = 'replication_enabled'
REPLICATION_DISABLED = fields.ReplicationStatus.DISABLED
REPLICATION_ENABLED = fields.ReplicationStatus.ENABLED
REPLICATION_FAILOVER = fields.ReplicationStatus.FAILED_OVER
FAILOVER_ERROR = fields.ReplicationStatus.FAILOVER_ERROR
REPLICATION_ERROR = fields.ReplicationStatus.ERROR
SUSPEND_SRDF = 22
DETACH_SRDF = 8
MIRROR_SYNC_TYPE = 6
emc_opts = [
cfg.StrOpt('cinder_emc_config_file',
default=CINDER_EMC_CONFIG_FILE,
help='Use this file for cinder emc plugin '
'config data'),
cfg.StrOpt('multi_pool_support',
default=False,
help='Use this value to specify '
'multi-pool support for VMAX3'),
cfg.StrOpt('initiator_check',
default=False,
help='Use this value to enable '
'the initiator_check')]
CONF.register_opts(emc_opts)
class VMAXCommon(object):
"""Common class for SMI-S based EMC volume drivers.
This common class is for EMC volume drivers based on SMI-S.
It supports VNX and VMAX arrays.
"""
VERSION = "2.0.0"
stats = {'driver_version': '1.0',
'free_capacity_gb': 0,
'reserved_percentage': 0,
'storage_protocol': None,
'total_capacity_gb': 0,
'vendor_name': 'Dell EMC',
'volume_backend_name': None,
'replication_enabled': False,
'replication_targets': None}
pool_info = {'backend_name': None,
'config_file': None,
'arrays_info': {},
'max_over_subscription_ratio': None,
'reserved_percentage': None,
'replication_enabled': False
}
def __init__(self, prtcl, version, configuration=None,
active_backend_id=None):
if not pywbemAvailable:
LOG.info(_LI(
"Module PyWBEM not installed. "
"Install PyWBEM using the python-pywbem package."))
self.protocol = prtcl
self.configuration = configuration
self.configuration.append_config_values(emc_opts)
self.conn = None
self.url = None
self.user = None
self.passwd = None
self.masking = masking.VMAXMasking(prtcl)
self.utils = utils.VMAXUtils(prtcl)
self.fast = fast.VMAXFast(prtcl)
self.provision = provision.VMAXProvision(prtcl)
self.provisionv3 = provision_v3.VMAXProvisionV3(prtcl)
self.version = version
# replication
self.replication_enabled = False
self.extendReplicatedVolume = False
self.active_backend_id = active_backend_id
self.failover = False
self._get_replication_info()
self.multiPoolSupportEnabled = False
self.initiatorCheck = False
self._gather_info()
def _gather_info(self):
"""Gather the relevant information for update_volume_stats."""
if hasattr(self.configuration, 'cinder_emc_config_file'):
self.pool_info['config_file'] = (
self.configuration.cinder_emc_config_file)
else:
self.pool_info['config_file'] = (
self.configuration.safe_get('cinder_emc_config_file'))
if hasattr(self.configuration, 'multi_pool_support'):
tempMultiPoolSupported = cinder_utils.get_bool_param(
'multi_pool_support', self.configuration)
if tempMultiPoolSupported:
self.multiPoolSupportEnabled = True
self.pool_info['backend_name'] = (
self.configuration.safe_get('volume_backend_name'))
self.pool_info['max_over_subscription_ratio'] = (
self.configuration.safe_get('max_over_subscription_ratio'))
self.pool_info['reserved_percentage'] = (
self.configuration.safe_get('reserved_percentage'))
LOG.debug(
"Updating volume stats on file %(emcConfigFileName)s on "
"backend %(backendName)s.",
{'emcConfigFileName': self.pool_info['config_file'],
'backendName': self.pool_info['backend_name']})
arrayInfoList = self.utils.parse_file_to_get_array_map(
self.pool_info['config_file'])
# Assuming that there is a single array info object always
# Check if Multi pool support is enabled
if self.multiPoolSupportEnabled is False:
self.pool_info['arrays_info'] = arrayInfoList
else:
finalArrayInfoList = self._get_slo_workload_combinations(
arrayInfoList)
self.pool_info['arrays_info'] = finalArrayInfoList
def _get_replication_info(self):
"""Gather replication information, if provided."""
self.rep_config = None
self.replication_targets = None
if hasattr(self.configuration, 'replication_device'):
self.rep_devices = self.configuration.safe_get(
'replication_device')
if self.rep_devices and len(self.rep_devices) == 1:
self.rep_config = self.utils.get_replication_config(
self.rep_devices)
if self.rep_config:
self.replication_targets = [self.rep_config['array']]
if self.active_backend_id == self.rep_config['array']:
self.failover = True
self.extendReplicatedVolume = self.rep_config['allow_extend']
# use self.replication_enabled for update_volume_stats
self.replication_enabled = True
LOG.debug("The replication configuration is %(rep_config)s.",
{'rep_config': self.rep_config})
elif self.rep_devices and len(self.rep_devices) > 1:
LOG.error(_LE("More than one replication target is configured. "
"EMC VMAX only suppports a single replication "
"target. Replication will not be enabled."))
def _get_slo_workload_combinations(self, arrayInfoList):
"""Method to query the array for SLO and Workloads.
Takes the arrayInfoList object and generates a set which has
all available SLO & Workload combinations
:param arrayInfoList:
:return: finalArrayInfoList
:raises: Exception
"""
try:
sloWorkloadSet = set()
# Pattern for extracting the SLO & Workload String
pattern = re.compile("^-S[A-Z]+")
for arrayInfo in arrayInfoList:
self._set_ecom_credentials(arrayInfo)
isV3 = self.utils.isArrayV3(self.conn,
arrayInfo['SerialNumber'])
# Only if the array is VMAX3
if isV3:
poolInstanceName, storageSystemStr = (
self._find_pool_in_array(arrayInfo['SerialNumber'],
arrayInfo['PoolName'], isV3))
# Get the pool capability
storagePoolCapability = (
self.provisionv3.get_storage_pool_capability(
self.conn, poolInstanceName))
# Get the pool settings
storagePoolSettings = self.conn.AssociatorNames(
storagePoolCapability,
ResultClass='CIM_storageSetting')
for storagePoolSetting in storagePoolSettings:
settingInstanceID = storagePoolSetting['InstanceID']
settingInstanceDetails = settingInstanceID.split('+')
sloWorkloadString = settingInstanceDetails[2]
if pattern.match(sloWorkloadString):
length = len(sloWorkloadString)
tempSloWorkloadString = (
sloWorkloadString[2:length - 1])
sloWorkloadSet.add(tempSloWorkloadString)
# Assuming that there is always a single arrayInfo object
finalArrayInfoList = []
for sloWorkload in sloWorkloadSet:
# Doing a shallow copy will work as we are modifying
# only strings
temparrayInfo = arrayInfoList[0].copy()
slo, workload = sloWorkload.split(':')
# Check if we got SLO and workload from the set (from array)
# The previous check was done by mistake against the value
# from XML file
if slo:
temparrayInfo['SLO'] = slo
if workload:
temparrayInfo['Workload'] = workload
finalArrayInfoList.append(temparrayInfo)
except Exception:
exceptionMessage = (_(
"Unable to get the SLO/Workload combinations from the array"))
LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
return finalArrayInfoList
def create_volume(self, volume):
"""Creates a EMC(VMAX) volume from a pre-existing storage pool.
For a concatenated compositeType:
If the volume size is over 240GB then a composite is created
EMCNumberOfMembers > 1, otherwise it defaults to a non composite
For a striped compositeType:
The user must supply an extra spec to determine how many metas
will make up the striped volume. If the meta size is greater
than 240GB an error is returned to the user. Otherwise the
EMCNumberOfMembers is what the user specifies.
:param volume: volume Object
:returns: model_update, dict
"""
model_update = {}
volumeSize = int(self.utils.convert_gb_to_bits(volume['size']))
volumeId = volume['id']
extraSpecs = self._initial_setup(volume)
self.conn = self._get_ecom_connection()
# VolumeName naming convention is 'OS-UUID'.
volumeName = self.utils.get_volume_element_name(volumeId)
if extraSpecs[ISV3]:
rc, volumeDict, storageSystemName = (
self._create_v3_volume(volume, volumeName, volumeSize,
extraSpecs))
else:
rc, volumeDict, storageSystemName = (
self._create_composite_volume(volume, volumeName, volumeSize,
extraSpecs))
# set-up volume replication, if enabled (V3 only)
if self.utils.is_replication_enabled(extraSpecs):
try:
replication_status, replication_driver_data = (
self.setup_volume_replication(
self.conn, volume, volumeDict, extraSpecs))
except Exception:
self._cleanup_replication_source(self.conn, volumeName,
volumeDict, extraSpecs)
raise
model_update.update(
{'replication_status': replication_status,
'replication_driver_data': six.text_type(
replication_driver_data)})
# If volume is created as part of a consistency group.
if 'consistencygroup_id' in volume and volume['consistencygroup_id']:
volumeInstance = self.utils.find_volume_instance(
self.conn, volumeDict, volumeName)
replicationService = (
self.utils.find_replication_service(self.conn,
storageSystemName))
cgInstanceName, cgName = (
self._find_consistency_group(
replicationService,
six.text_type(volume['consistencygroup_id'])))
self.provision.add_volume_to_cg(self.conn,
replicationService,
cgInstanceName,
volumeInstance.path,
cgName,
volumeName,
extraSpecs)
LOG.info(_LI("Leaving create_volume: %(volumeName)s "
"Return code: %(rc)lu "
"volume dict: %(name)s."),
{'volumeName': volumeName,
'rc': rc,
'name': volumeDict})
# Adding version information
volumeDict['version'] = self.version
model_update.update(
{'provider_location': six.text_type(volumeDict)})
return model_update
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot.
For VMAX, replace snapshot with clone.
:param volume: volume Object
:param snapshot: snapshot object
:returns: model_update, dict
:raises: VolumeBackendAPIException
"""
LOG.debug("Entering create_volume_from_snapshot.")
extraSpecs = self._initial_setup(snapshot, host=volume['host'])
model_update = {}
self.conn = self._get_ecom_connection()
snapshotInstance = self._find_lun(snapshot)
self._sync_check(snapshotInstance, snapshot['name'], extraSpecs)
cloneDict = self._create_cloned_volume(volume, snapshot,
extraSpecs, False)
# set-up volume replication, if enabled
if self.utils.is_replication_enabled(extraSpecs):
try:
replication_status, replication_driver_data = (
self.setup_volume_replication(
self.conn, volume, cloneDict, extraSpecs))
except Exception:
self._cleanup_replication_source(self.conn, snapshot['name'],
cloneDict, extraSpecs)
raise
model_update.update(
{'replication_status': replication_status,
'replication_driver_data': six.text_type(
replication_driver_data)})
cloneDict['version'] = self.version
model_update.update(
{'provider_location': six.text_type(cloneDict)})
return model_update
def create_cloned_volume(self, cloneVolume, sourceVolume):
"""Creates a clone of the specified volume.
:param cloneVolume: clone volume Object
:param sourceVolume: volume object
:returns: model_update, dict
"""
model_update = {}
extraSpecs = self._initial_setup(sourceVolume)
cloneDict = self._create_cloned_volume(cloneVolume, sourceVolume,
extraSpecs, False)
# set-up volume replication, if enabled
if self.utils.is_replication_enabled(extraSpecs):
try:
replication_status, replication_driver_data = (
self.setup_volume_replication(
self.conn, cloneVolume, cloneDict, extraSpecs))
except Exception:
self._cleanup_replication_source(
self.conn, cloneVolume['name'], cloneDict, extraSpecs)
raise
model_update.update(
{'replication_status': replication_status,
'replication_driver_data': six.text_type(
replication_driver_data)})
cloneDict['version'] = self.version
model_update.update(
{'provider_location': six.text_type(cloneDict)})
return model_update
def delete_volume(self, volume):
"""Deletes a EMC(VMAX) volume.
:param volume: volume Object
"""
LOG.info(_LI("Deleting Volume: %(volume)s"),
{'volume': volume['name']})
rc, volumeName = self._delete_volume(volume)
LOG.info(_LI("Leaving delete_volume: %(volumename)s Return code: "
"%(rc)lu."),
{'volumename': volumeName,
'rc': rc})
def create_snapshot(self, snapshot, volume):
"""Creates a snapshot.
For VMAX, replace snapshot with clone.
:param snapshot: snapshot object
:param volume: volume Object to create snapshot from
:returns: dict -- the cloned volume dictionary
"""
extraSpecs = self._initial_setup(volume)
return self._create_cloned_volume(snapshot, volume, extraSpecs, True)
def delete_snapshot(self, snapshot, volume):
"""Deletes a snapshot.
:param snapshot: snapshot object
:param volume: volume Object to create snapshot from
"""
LOG.info(_LI("Delete Snapshot: %(snapshotName)s."),
{'snapshotName': snapshot['name']})
self._delete_snapshot(snapshot, volume['host'])
def _remove_members(self, controllerConfigService,
volumeInstance, connector, extraSpecs):
"""This method unmaps a volume from a host.
Removes volume from the Device Masking Group that belongs to
a Masking View.
Check if fast policy is in the extra specs. If it isn't we do
not need to do any thing for FAST.
Assume that isTieringPolicySupported is False unless the FAST
policy is in the extra specs and tiering is enabled on the array.
:param controllerConfigService: instance name of
ControllerConfigurationService
:param volumeInstance: volume Object
:param connector: the connector object
:param extraSpecs: extra specifications
:returns: storageGroupInstanceName
"""
volumeName = volumeInstance['ElementName']
LOG.debug("Detaching volume %s.", volumeName)
return self.masking.remove_and_reset_members(
self.conn, controllerConfigService, volumeInstance,
volumeName, extraSpecs, connector)
def _unmap_lun(self, volume, connector):
"""Unmaps a volume from the host.
:param volume: the volume Object
:param connector: the connector Object
:raises: VolumeBackendAPIException
"""
extraSpecs = self._initial_setup(volume)
if self.utils.is_volume_failed_over(volume):
extraSpecs = self._get_replication_extraSpecs(
extraSpecs, self.rep_config)
volumename = volume['name']
LOG.info(_LI("Unmap volume: %(volume)s."),
{'volume': volumename})
device_info, __, __ = self.find_device_number(
volume, connector['host'])
if 'hostlunid' not in device_info:
LOG.info(_LI("Volume %s is not mapped. No volume to unmap."),
volumename)
return
vol_instance = self._find_lun(volume)
storage_system = vol_instance['SystemName']
if self._is_volume_multiple_masking_views(vol_instance):
return
configservice = self.utils.find_controller_configuration_service(
self.conn, storage_system)
if configservice is None:
exception_message = (_("Cannot find Controller Configuration "
"Service for storage system "
"%(storage_system)s.")
% {'storage_system': storage_system})
raise exception.VolumeBackendAPIException(data=exception_message)
self._remove_members(configservice, vol_instance, connector,
extraSpecs)
def _is_volume_multiple_masking_views(self, vol_instance):
"""Check if volume is in more than one MV.
:param vol_instance: the volume instance
:returns: boolean
"""
storageGroupInstanceNames = (
self.masking.get_associated_masking_groups_from_device(
self.conn, vol_instance.path))
for storageGroupInstanceName in storageGroupInstanceNames:
mvInstanceNames = self.masking.get_masking_view_from_storage_group(
self.conn, storageGroupInstanceName)
if len(mvInstanceNames) > 1:
return True
return False
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns device and connection info.
The volume may be already mapped, if this is so the deviceInfo tuple
is returned. If the volume is not already mapped then we need to
gather information to either 1. Create an new masking view or 2. Add
the volume to an existing storage group within an already existing
maskingview.
The naming convention is the following:
.. code-block:: none
initiatorGroupName = OS-<shortHostName>-<shortProtocol>-IG
e.g OS-myShortHost-I-IG
storageGroupName = OS-<shortHostName>-<poolName>-<shortProtocol>-SG
e.g OS-myShortHost-SATA_BRONZ1-I-SG
portGroupName = OS-<target>-PG The portGroupName will come from
the EMC configuration xml file.
These are precreated. If the portGroup does not
exist then an error will be returned to the user
maskingView = OS-<shortHostName>-<poolName>-<shortProtocol>-MV
e.g OS-myShortHost-SATA_BRONZ1-I-MV
:param volume: volume Object
:param connector: the connector Object
:returns: dict -- deviceInfoDict - device information dict
:raises: VolumeBackendAPIException
"""
portGroupName = None
extraSpecs = self._initial_setup(volume)
is_multipath = connector.get('multipath', False)
volumeName = volume['name']
LOG.info(_LI("Initialize connection: %(volume)s."),
{'volume': volumeName})
self.conn = self._get_ecom_connection()
if self.utils.is_volume_failed_over(volume):
extraSpecs = self._get_replication_extraSpecs(
extraSpecs, self.rep_config)
deviceInfoDict, isLiveMigration, sourceInfoDict = (
self._wrap_find_device_number(
volume, connector['host']))
maskingViewDict = self._populate_masking_dict(
volume, connector, extraSpecs)
if ('hostlunid' in deviceInfoDict and
deviceInfoDict['hostlunid'] is not None):
deviceNumber = deviceInfoDict['hostlunid']
LOG.info(_LI("Volume %(volume)s is already mapped. "
"The device number is %(deviceNumber)s."),
{'volume': volumeName,
'deviceNumber': deviceNumber})
# Special case, we still need to get the iscsi ip address.
portGroupName = (
self._get_correct_port_group(
deviceInfoDict, maskingViewDict['storageSystemName']))
else:
if isLiveMigration:
maskingViewDict['storageGroupInstanceName'] = (
self._get_storage_group_from_source(sourceInfoDict))
maskingViewDict['portGroupInstanceName'] = (
self._get_port_group_from_source(sourceInfoDict))
deviceInfoDict, portGroupName = self._attach_volume(
volume, connector, extraSpecs, maskingViewDict, True)
else:
deviceInfoDict, portGroupName = (
self._attach_volume(
volume, connector, extraSpecs, maskingViewDict))
if self.protocol.lower() == 'iscsi':
deviceInfoDict['ip_and_iqn'] = (
self._find_ip_protocol_endpoints(
self.conn, deviceInfoDict['storagesystem'],
portGroupName))
deviceInfoDict['is_multipath'] = is_multipath
return deviceInfoDict
def _attach_volume(self, volume, connector, extraSpecs,
maskingViewDict, isLiveMigration=False):
"""Attach a volume to a host.
If live migration is being undertaken then the volume
remains attached to the source host.
:params volume: the volume object
:params connector: the connector object
:param extraSpecs: extra specifications
:param maskingViewDict: masking view information
:param isLiveMigration: boolean, can be None
:returns: dict -- deviceInfoDict
String -- port group name
:raises: VolumeBackendAPIException
"""
volumeName = volume['name']
if isLiveMigration:
maskingViewDict['isLiveMigration'] = True
else:
maskingViewDict['isLiveMigration'] = False
rollbackDict = self.masking.setup_masking_view(
self.conn, maskingViewDict, extraSpecs)
# Find host lun id again after the volume is exported to the host.
deviceInfoDict, __, __ = self.find_device_number(
volume, connector['host'])
if 'hostlunid' not in deviceInfoDict:
# Did not successfully attach to host,
# so a rollback for FAST is required.
LOG.error(_LE("Error Attaching volume %(vol)s."),
{'vol': volumeName})
if ((rollbackDict['fastPolicyName'] is not None) or
(rollbackDict['isV3'] is not None)):
(self.masking._check_if_rollback_action_for_masking_required(
self.conn, rollbackDict))
exception_message = (_("Error Attaching volume %(vol)s.")
% {'vol': volumeName})
raise exception.VolumeBackendAPIException(
data=exception_message)
return deviceInfoDict, rollbackDict['pgGroupName']
def _is_same_host(self, connector, deviceInfoDict):
"""Check if the host is the same.
Check if the host to attach to is the same host
that is already attached. This is necessary for
live migration.
:params connector: the connector object
:params deviceInfoDict: the device information dictionary
:returns: boolean -- True if the host is the same, False otherwise.
"""
if 'host' in connector:
currentHost = connector['host']
if ('maskingview' in deviceInfoDict and
deviceInfoDict['maskingview'] is not None):
if currentHost in deviceInfoDict['maskingview']:
return True
return False
def _get_correct_port_group(self, deviceInfoDict, storageSystemName):
"""Get the portgroup name from the existing masking view.
:params deviceInfoDict: the device info dictionary
:params storageSystemName: storage system name
:returns: String port group name
"""
if ('controller' in deviceInfoDict and
deviceInfoDict['controller'] is not None):
maskingViewInstanceName = deviceInfoDict['controller']
try:
maskingViewInstance = (
self.conn.GetInstance(maskingViewInstanceName))
except Exception:
exception_message = (_("Unable to get the name of "
"the masking view."))
raise exception.VolumeBackendAPIException(
data=exception_message)
# Get the portgroup from masking view
portGroupInstanceName = (
self.masking._get_port_group_from_masking_view(
self.conn,
maskingViewInstance['ElementName'],
storageSystemName))
try:
portGroupInstance = (
self.conn.GetInstance(portGroupInstanceName))
portGroupName = (
portGroupInstance['ElementName'])
except Exception:
exception_message = (_("Unable to get the name of "
"the portgroup."))
raise exception.VolumeBackendAPIException(
data=exception_message)
else:
exception_message = (_("Cannot get the portgroup from "
"the masking view."))
raise exception.VolumeBackendAPIException(
data=exception_message)
return portGroupName
def _get_storage_group_from_source(self, deviceInfoDict):
"""Get the storage group from the existing masking view.
:params deviceInfoDict: the device info dictionary
:returns: storage group instance
"""
storageGroupInstanceName = None
if ('controller' in deviceInfoDict and
deviceInfoDict['controller'] is not None):
maskingViewInstanceName = deviceInfoDict['controller']
# Get the storage group from masking view
storageGroupInstanceName = (
self.masking._get_storage_group_from_masking_view_instance(
self.conn,
maskingViewInstanceName))
else:
exception_message = (_("Cannot get the storage group from "
"the masking view."))
raise exception.VolumeBackendAPIException(
data=exception_message)
return storageGroupInstanceName
def _get_port_group_from_source(self, deviceInfoDict):
"""Get the port group from the existing masking view.
:params deviceInfoDict: the device info dictionary
:returns: port group instance
"""
portGroupInstanceName = None
if ('controller' in deviceInfoDict and
deviceInfoDict['controller'] is not None):
maskingViewInstanceName = deviceInfoDict['controller']
# Get the port group from masking view
portGroupInstanceName = (
self.masking.get_port_group_from_masking_view_instance(
self.conn,
maskingViewInstanceName))
else:
exception_message = (_("Cannot get the port group from "
"the masking view."))
raise exception.VolumeBackendAPIException(
data=exception_message)
return portGroupInstanceName
def check_ig_instance_name(self, initiatorGroupInstanceName):
"""Check if an initiator group instance is on the array.
:param initiatorGroupInstanceName: initiator group instance name
:returns: initiator group name, or None if deleted
"""
return self.utils.check_ig_instance_name(
self.conn, initiatorGroupInstanceName)
def terminate_connection(self, volume, connector):
"""Disallow connection from connector.
:params volume: the volume Object
:params connector: the connector Object
"""
volumename = volume['name']
LOG.info(_LI("Terminate connection: %(volume)s."),
{'volume': volumename})
self._unmap_lun(volume, connector)
def extend_volume(self, volume, newSize):
"""Extends an existing volume.
Prequisites:
1. The volume must be composite e.g StorageVolume.EMCIsComposite=True
2. The volume can only be concatenated
e.g StorageExtent.IsConcatenated=True
:params volume: the volume Object
:params newSize: the new size to increase the volume to
:returns: dict -- modifiedVolumeDict - the extended volume Object
:raises: VolumeBackendAPIException
"""
originalVolumeSize = volume['size']
volumeName = volume['name']
extraSpecs = self._initial_setup(volume)
self.conn = self._get_ecom_connection()
volumeInstance = self._find_lun(volume)
if volumeInstance is None:
exceptionMessage = (_("Cannot find Volume: %(volumename)s. "
"Extend operation. Exiting....")
% {'volumename': volumeName})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
return self._extend_volume(
volume, volumeInstance, volumeName, newSize,
originalVolumeSize, extraSpecs)
def _extend_volume(
self, volume, volumeInstance, volumeName, newSize,
originalVolumeSize, extraSpecs):
"""Extends an existing volume.
:param volume: the volume Object
:param volumeInstance: the volume instance
:param volumeName: the volume name
:param newSize: the new size to increase the volume to
:param originalVolumeSize:
:param extraSpecs: extra specifications
:return: dict -- modifiedVolumeDict - the extended volume Object
:raises: VolumeBackendAPIException
"""
if int(originalVolumeSize) > int(newSize):
exceptionMessage = (_(
"Your original size: %(originalVolumeSize)s GB is greater "
"than: %(newSize)s GB. Only Extend is supported. Exiting...")
% {'originalVolumeSize': originalVolumeSize,
'newSize': newSize})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
additionalVolumeSize = six.text_type(
int(newSize) - int(originalVolumeSize))
additionalVolumeSize = self.utils.convert_gb_to_bits(
additionalVolumeSize)
if extraSpecs[ISV3]:
if self.utils.is_replication_enabled(extraSpecs):
# extra logic required if volume is replicated
rc, modifiedVolumeDict = self.extend_volume_is_replicated(
volume, volumeInstance, volumeName, newSize,
extraSpecs)
else:
rc, modifiedVolumeDict = self._extend_v3_volume(
volumeInstance, volumeName, newSize, extraSpecs)
else:
# This is V2.
rc, modifiedVolumeDict = self._extend_composite_volume(
volumeInstance, volumeName, newSize, additionalVolumeSize,
extraSpecs)
# Check the occupied space of the new extended volume.
extendedVolumeInstance = self.utils.find_volume_instance(
self.conn, modifiedVolumeDict, volumeName)
extendedVolumeSize = self.utils.get_volume_size(
self.conn, extendedVolumeInstance)
LOG.debug(
"The actual volume size of the extended volume: %(volumeName)s "
"is %(volumeSize)s.",
{'volumeName': volumeName,
'volumeSize': extendedVolumeSize})
# If the requested size and the actual size don't
# tally throw an exception.
newSizeBits = self.utils.convert_gb_to_bits(newSize)
diffVolumeSize = self.utils.compare_size(
newSizeBits, extendedVolumeSize)
if diffVolumeSize != 0:
exceptionMessage = (_(
"The requested size : %(requestedSize)s is not the same as "
"resulting size: %(resultSize)s.")
% {'requestedSize': newSizeBits,
'resultSize': extendedVolumeSize})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
LOG.debug(
"Leaving extend_volume: %(volumeName)s. "
"Return code: %(rc)lu, "
"volume dict: %(name)s.",
{'volumeName': volumeName,
'rc': rc,
'name': modifiedVolumeDict})
return modifiedVolumeDict
def update_volume_stats(self):
"""Retrieve stats info."""
pools = []
# Dictionary to hold the VMAX3 arrays for which the SRP details
# have already been queried
# This only applies to the arrays for which WLP is not enabled
arrays = {}
backendName = self.pool_info['backend_name']
max_oversubscription_ratio = (
self.pool_info['max_over_subscription_ratio'])
reservedPercentage = self.pool_info['reserved_percentage']
array_max_over_subscription = None
array_reserve_percent = None
for arrayInfo in self.pool_info['arrays_info']:
alreadyQueried = False
self._set_ecom_credentials(arrayInfo)
# Check what type of array it is
isV3 = self.utils.isArrayV3(self.conn,
arrayInfo['SerialNumber'])
if isV3:
if self.failover:
arrayInfo = self.get_secondary_stats_info(
self.rep_config, arrayInfo)
# Report only the SLO name in the pool name for
# backward compatibility
if self.multiPoolSupportEnabled is False:
(location_info, total_capacity_gb, free_capacity_gb,
provisioned_capacity_gb,
array_reserve_percent,
wlpEnabled) = self._update_srp_stats(arrayInfo)
poolName = ("%(slo)s+%(poolName)s+%(array)s"
% {'slo': arrayInfo['SLO'],
'poolName': arrayInfo['PoolName'],
'array': arrayInfo['SerialNumber']})
else:
# Add both SLO & Workload name in the pool name
# Query the SRP only once if WLP is not enabled
# Only insert the array details in the dict once
if arrayInfo['SerialNumber'] not in arrays:
(location_info, total_capacity_gb, free_capacity_gb,
provisioned_capacity_gb,
array_reserve_percent,
wlpEnabled) = self._update_srp_stats(arrayInfo)
else:
alreadyQueried = True
poolName = ("%(slo)s+%(workload)s+%(poolName)s+%(array)s"
% {'slo': arrayInfo['SLO'],
'workload': arrayInfo['Workload'],
'poolName': arrayInfo['PoolName'],
'array': arrayInfo['SerialNumber']})
if wlpEnabled is False:
arrays[arrayInfo['SerialNumber']] = (
[total_capacity_gb, free_capacity_gb,
provisioned_capacity_gb, array_reserve_percent])
else:
# This is V2
(location_info, total_capacity_gb, free_capacity_gb,
provisioned_capacity_gb, array_max_over_subscription) = (
self._update_pool_stats(backendName, arrayInfo))
poolName = ("%(poolName)s+%(array)s"
% {'poolName': arrayInfo['PoolName'],
'array': arrayInfo['SerialNumber']})
if alreadyQueried and self.multiPoolSupportEnabled:
# The dictionary will only have one key per VMAX3
# Construct the location info
temp_location_info = (
("%(arrayName)s#%(poolName)s#%(slo)s#%(workload)s"
% {'arrayName': arrayInfo['SerialNumber'],
'poolName': arrayInfo['PoolName'],
'slo': arrayInfo['SLO'],
'workload': arrayInfo['Workload']}))
pool = {'pool_name': poolName,
'total_capacity_gb':
arrays[arrayInfo['SerialNumber']][0],
'free_capacity_gb':
arrays[arrayInfo['SerialNumber']][1],
'provisioned_capacity_gb':
arrays[arrayInfo['SerialNumber']][2],
'QoS_support': True,
'location_info': temp_location_info,
'consistencygroup_support': True,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'max_over_subscription_ratio':
max_oversubscription_ratio,
'replication_enabled': self.replication_enabled
}
if (
arrays[arrayInfo['SerialNumber']][3] and
(arrays[arrayInfo['SerialNumber']][3] >
reservedPercentage)):
pool['reserved_percentage'] = (
arrays[arrayInfo['SerialNumber']][3])
else:
pool['reserved_percentage'] = reservedPercentage
else:
pool = {'pool_name': poolName,
'total_capacity_gb': total_capacity_gb,
'free_capacity_gb': free_capacity_gb,
'provisioned_capacity_gb': provisioned_capacity_gb,
'QoS_support': False,
'location_info': location_info,
'consistencygroup_support': True,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'max_over_subscription_ratio':
max_oversubscription_ratio,
'replication_enabled': self.replication_enabled
}
if (
array_reserve_percent and
(array_reserve_percent > reservedPercentage)):
pool['reserved_percentage'] = array_reserve_percent
else:
pool['reserved_percentage'] = reservedPercentage
if array_max_over_subscription:
pool['max_over_subscription_ratio'] = (
self.utils.override_ratio(
max_oversubscription_ratio,
array_max_over_subscription))
pools.append(pool)
data = {'vendor_name': "Dell EMC",
'driver_version': self.version,
'storage_protocol': 'unknown',
'volume_backend_name': self.pool_info['backend_name'] or
self.__class__.__name__,
# Use zero capacities here so we always use a pool.
'total_capacity_gb': 0,
'free_capacity_gb': 0,
'provisioned_capacity_gb': 0,
'reserved_percentage': 0,
'replication_enabled': self.replication_enabled,
'replication_targets': self.replication_targets,
'pools': pools}
return data
def _update_srp_stats(self, arrayInfo):
"""Update SRP stats.
:param arrayInfo: array information
:returns: location_info
:returns: totalManagedSpaceGbs
:returns: remainingManagedSpaceGbs
:returns: provisionedManagedSpaceGbs
:returns: array_reserve_percent
:returns: wlpEnabled
"""
(totalManagedSpaceGbs, remainingManagedSpaceGbs,
provisionedManagedSpaceGbs, array_reserve_percent, wlpEnabled) = (
self.provisionv3.get_srp_pool_stats(self.conn, arrayInfo))
LOG.info(_LI(
"Capacity stats for SRP pool %(poolName)s on array "
"%(arrayName)s total_capacity_gb=%(total_capacity_gb)lu, "
"free_capacity_gb=%(free_capacity_gb)lu, "
"provisioned_capacity_gb=%(provisioned_capacity_gb)lu"),
{'poolName': arrayInfo['PoolName'],
'arrayName': arrayInfo['SerialNumber'],
'total_capacity_gb': totalManagedSpaceGbs,
'free_capacity_gb': remainingManagedSpaceGbs,
'provisioned_capacity_gb': provisionedManagedSpaceGbs})
location_info = ("%(arrayName)s#%(poolName)s#%(slo)s#%(workload)s"
% {'arrayName': arrayInfo['SerialNumber'],
'poolName': arrayInfo['PoolName'],
'slo': arrayInfo['SLO'],
'workload': arrayInfo['Workload']})
return (location_info, totalManagedSpaceGbs,
remainingManagedSpaceGbs, provisionedManagedSpaceGbs,
array_reserve_percent, wlpEnabled)
def retype(self, ctxt, volume, new_type, diff, host):
"""Migrate volume to another host using retype.
:param ctxt: context
:param volume: the volume object including the volume_type_id
:param new_type: the new volume type.
:param diff: Unused parameter.
:param host: The host dict holding the relevant target(destination)
information
:returns: boolean -- True if retype succeeded, False if error
"""
volumeName = volume['name']
volumeStatus = volume['status']
LOG.info(_LI("Migrating using retype Volume: %(volume)s."),
{'volume': volumeName})
extraSpecs = self._initial_setup(volume)
self.conn = self._get_ecom_connection()
volumeInstance = self._find_lun(volume)
if volumeInstance is None:
LOG.error(_LE("Volume %(name)s not found on the array. "
"No volume to migrate using retype."),
{'name': volumeName})
return False
if extraSpecs[ISV3]:
if self.utils.is_replication_enabled(extraSpecs):
LOG.error(_LE("Volume %(name)s is replicated - "
"Replicated volumes are not eligible for "
"storage assisted retype. Host assisted "
"retype is supported."),
{'name': volumeName})
return False
return self._slo_workload_migration(volumeInstance, volume, host,
volumeName, volumeStatus,
new_type, extraSpecs)
else:
return self._pool_migration(volumeInstance, volume, host,
volumeName, volumeStatus,
extraSpecs[FASTPOLICY],
new_type, extraSpecs)
def migrate_volume(self, ctxt, volume, host, new_type=None):
"""Migrate volume to another host.
:param ctxt: context
:param volume: the volume object including the volume_type_id
:param host: the host dict holding the relevant target(destination)
information
:param new_type: None
:returns: boolean -- Always returns True
:returns: dict -- Empty dict {}
"""
LOG.warning(_LW("The VMAX plugin only supports Retype. "
"If a pool based migration is necessary "
"this will happen on a Retype "
"From the command line: "
"cinder --os-volume-api-version 2 retype <volumeId> "
"<volumeType> --migration-policy on-demand"))
return True, {}
def _migrate_volume(
self, volume, volumeInstance, targetPoolName,
targetFastPolicyName, sourceFastPolicyName, extraSpecs,
new_type=None):
"""Migrate volume to another host.
:param volume: the volume object including the volume_type_id
:param volumeInstance: the volume instance
:param targetPoolName: the target poolName
:param targetFastPolicyName: the target FAST policy name, can be None
:param sourceFastPolicyName: the source FAST policy name, can be None
:param extraSpecs: extra specifications
:param new_type: None
:returns: boolean -- True/False
:returns: list -- empty list
"""
volumeName = volume['name']
storageSystemName = volumeInstance['SystemName']
sourcePoolInstanceName = self.utils.get_assoc_pool_from_volume(
self.conn, volumeInstance.path)
moved, rc = self._migrate_volume_from(
volume, volumeInstance, targetPoolName, sourceFastPolicyName,
extraSpecs)
if moved is False and sourceFastPolicyName is not None:
# Return the volume to the default source fast policy storage
# group because the migrate was unsuccessful.
LOG.warning(_LW(
"Failed to migrate: %(volumeName)s from "
"default source storage group "
"for FAST policy: %(sourceFastPolicyName)s. "
"Attempting cleanup... "),
{'volumeName': volumeName,
'sourceFastPolicyName': sourceFastPolicyName})
if sourcePoolInstanceName == self.utils.get_assoc_pool_from_volume(
self.conn, volumeInstance.path):
self._migrate_cleanup(self.conn, volumeInstance,
storageSystemName, sourceFastPolicyName,
volumeName, extraSpecs)
else:
# Migrate was successful but still issues.
self._migrate_rollback(
self.conn, volumeInstance, storageSystemName,
sourceFastPolicyName, volumeName, sourcePoolInstanceName,
extraSpecs)
return moved
if targetFastPolicyName == 'None':
targetFastPolicyName = None
if moved is True and targetFastPolicyName is not None:
if not self._migrate_volume_fast_target(
volumeInstance, storageSystemName,
targetFastPolicyName, volumeName, extraSpecs):
LOG.warning(_LW(
"Attempting a rollback of: %(volumeName)s to "
"original pool %(sourcePoolInstanceName)s."),
{'volumeName': volumeName,
'sourcePoolInstanceName': sourcePoolInstanceName})
self._migrate_rollback(
self.conn, volumeInstance, storageSystemName,
sourceFastPolicyName, volumeName, sourcePoolInstanceName,
extraSpecs)
if rc == 0:
moved = True
return moved
def _migrate_rollback(self, conn, volumeInstance,
storageSystemName, sourceFastPolicyName,
volumeName, sourcePoolInstanceName, extraSpecs):
"""Full rollback.
Failed on final step on adding migrated volume to new target
default storage group for the target FAST policy.
:param conn: connection info to ECOM
:param volumeInstance: the volume instance
:param storageSystemName: the storage system name
:param sourceFastPolicyName: the source FAST policy name
:param volumeName: the volume Name
:param sourcePoolInstanceName: the instance name of the source pool
:param extraSpecs: extra specifications
"""
LOG.warning(_LW("_migrate_rollback on : %(volumeName)s."),
{'volumeName': volumeName})
storageRelocationService = self.utils.find_storage_relocation_service(
conn, storageSystemName)
try:
self.provision.migrate_volume_to_storage_pool(
conn, storageRelocationService, volumeInstance.path,
sourcePoolInstanceName, extraSpecs)
except Exception:
LOG.error(_LE(
"Failed to return volume %(volumeName)s to "
"original storage pool. Please contact your system "
"administrator to return it to the correct location."),
{'volumeName': volumeName})
if sourceFastPolicyName is not None:
self.add_to_default_SG(
conn, volumeInstance, storageSystemName, sourceFastPolicyName,
volumeName, extraSpecs)
def _migrate_cleanup(self, conn, volumeInstance,
storageSystemName, sourceFastPolicyName,
volumeName, extraSpecs):
"""If the migrate fails, put volume back to source FAST SG.
:param conn: connection info to ECOM
:param volumeInstance: the volume instance
:param storageSystemName: the storage system name
:param sourceFastPolicyName: the source FAST policy name
:param volumeName: the volume Name
:param extraSpecs: extra specifications
:returns: boolean -- True/False
"""
LOG.warning(_LW("_migrate_cleanup on : %(volumeName)s."),
{'volumeName': volumeName})
return_to_default = True
controllerConfigurationService = (
self.utils.find_controller_configuration_service(
conn, storageSystemName))
# Check to see what SG it is in.
assocStorageGroupInstanceNames = (
self.utils.get_storage_groups_from_volume(conn,
volumeInstance.path))
# This is the SG it should be in.
defaultStorageGroupInstanceName = (
self.fast.get_policy_default_storage_group(
conn, controllerConfigurationService, sourceFastPolicyName))
for assocStorageGroupInstanceName in assocStorageGroupInstanceNames:
# It is in the incorrect storage group.
if (assocStorageGroupInstanceName !=
defaultStorageGroupInstanceName):
self.provision.remove_device_from_storage_group(
conn, controllerConfigurationService,
assocStorageGroupInstanceName,
volumeInstance.path, volumeName, extraSpecs)
else:
# The volume is already in the default.
return_to_default = False
if return_to_default:
self.add_to_default_SG(
conn, volumeInstance, storageSystemName, sourceFastPolicyName,
volumeName, extraSpecs)
return return_to_default
def _migrate_volume_fast_target(
self, volumeInstance, storageSystemName,
targetFastPolicyName, volumeName, extraSpecs):
"""If the target host is FAST enabled.
If the target host is FAST enabled then we need to add it to the
default storage group for that policy.
:param volumeInstance: the volume instance
:param storageSystemName: the storage system name
:param targetFastPolicyName: the target fast policy name
:param volumeName: the volume name
:param extraSpecs: extra specifications
:returns: boolean -- True/False
"""
falseRet = False
LOG.info(_LI(
"Adding volume: %(volumeName)s to default storage group "
"for FAST policy: %(fastPolicyName)s."),
{'volumeName': volumeName,
'fastPolicyName': targetFastPolicyName})
controllerConfigurationService = (
self.utils.find_controller_configuration_service(
self.conn, storageSystemName))
defaultStorageGroupInstanceName = (
self.fast.get_or_create_default_storage_group(
self.conn, controllerConfigurationService,
targetFastPolicyName, volumeInstance, extraSpecs))
if defaultStorageGroupInstanceName is None:
LOG.error(_LE(
"Unable to create or get default storage group for FAST policy"
": %(fastPolicyName)s."),
{'fastPolicyName': targetFastPolicyName})
return falseRet
defaultStorageGroupInstanceName = (
self.fast.add_volume_to_default_storage_group_for_fast_policy(
self.conn, controllerConfigurationService, volumeInstance,
volumeName, targetFastPolicyName, extraSpecs))
if defaultStorageGroupInstanceName is None:
LOG.error(_LE(
"Failed to verify that volume was added to storage group for "
"FAST policy: %(fastPolicyName)s."),
{'fastPolicyName': targetFastPolicyName})
return falseRet
return True
def _migrate_volume_from(self, volume, volumeInstance,
targetPoolName, sourceFastPolicyName,
extraSpecs):
"""Check FAST policies and migrate from source pool.
:param volume: the volume object including the volume_type_id
:param volumeInstance: the volume instance
:param targetPoolName: the target poolName
:param sourceFastPolicyName: the source FAST policy name, can be None
:param extraSpecs: extra specifications
:returns: boolean -- True/False
:returns: int -- the return code from migrate operation
"""
falseRet = (False, -1)
volumeName = volume['name']
storageSystemName = volumeInstance['SystemName']
LOG.debug("sourceFastPolicyName is : %(sourceFastPolicyName)s.",
{'sourceFastPolicyName': sourceFastPolicyName})
# If the source volume is FAST enabled it must first be removed
# from the default storage group for that policy.
if sourceFastPolicyName is not None:
self.remove_from_default_SG(
self.conn, volumeInstance, storageSystemName,
sourceFastPolicyName, volumeName, extraSpecs)
# Migrate from one pool to another.
storageRelocationService = self.utils.find_storage_relocation_service(
self.conn, storageSystemName)
targetPoolInstanceName = self.utils.get_pool_by_name(
self.conn, targetPoolName, storageSystemName)
if targetPoolInstanceName is None:
LOG.error(_LE(
"Error finding target pool instance name for pool: "
"%(targetPoolName)s."),
{'targetPoolName': targetPoolName})
return falseRet
try:
rc = self.provision.migrate_volume_to_storage_pool(
self.conn, storageRelocationService, volumeInstance.path,
targetPoolInstanceName, extraSpecs)
except Exception:
# Rollback by deleting the volume if adding the volume to the
# default storage group were to fail.
LOG.exception(_LE(
"Error migrating volume: %(volumename)s. "
"to target pool %(targetPoolName)s."),
{'volumename': volumeName,
'targetPoolName': targetPoolName})
return falseRet
# Check that the volume is now migrated to the correct storage pool,
# if it is terminate the migrate session.
foundPoolInstanceName = self.utils.get_assoc_pool_from_volume(
self.conn, volumeInstance.path)
if (foundPoolInstanceName is None or
(foundPoolInstanceName['InstanceID'] !=
targetPoolInstanceName['InstanceID'])):
LOG.error(_LE(
"Volume : %(volumeName)s. was not successfully migrated to "
"target pool %(targetPoolName)s."),
{'volumeName': volumeName,
'targetPoolName': targetPoolName})
return falseRet
else:
LOG.debug("Terminating migration session on: %(volumeName)s.",
{'volumeName': volumeName})
self.provision._terminate_migrate_session(
self.conn, volumeInstance.path, extraSpecs)
if rc == 0:
moved = True
return moved, rc
def remove_from_default_SG(
self, conn, volumeInstance, storageSystemName,
sourceFastPolicyName, volumeName, extraSpecs):
"""For FAST, remove volume from default storage group.
:param conn: connection info to ECOM
:param volumeInstance: the volume instance
:param storageSystemName: the storage system name
:param sourceFastPolicyName: the source FAST policy name
:param volumeName: the volume Name
:param extraSpecs: extra specifications
:raises: VolumeBackendAPIException
"""
controllerConfigurationService = (
self.utils.find_controller_configuration_service(
conn, storageSystemName))
try:
defaultStorageGroupInstanceName = (
self.masking.remove_device_from_default_storage_group(
conn, controllerConfigurationService,
volumeInstance.path, volumeName, sourceFastPolicyName,
extraSpecs))
except Exception:
exceptionMessage = (_(
"Failed to remove: %(volumename)s. "
"from the default storage group for "
"FAST policy %(fastPolicyName)s.")
% {'volumename': volumeName,
'fastPolicyName': sourceFastPolicyName})
LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
if defaultStorageGroupInstanceName is None:
LOG.warning(_LW(
"The volume: %(volumename)s "
"was not first part of the default storage "
"group for FAST policy %(fastPolicyName)s."),
{'volumename': volumeName,
'fastPolicyName': sourceFastPolicyName})
def add_to_default_SG(
self, conn, volumeInstance, storageSystemName,
targetFastPolicyName, volumeName, extraSpecs):
"""For FAST, add volume to default storage group.
:param conn: connection info to ECOM
:param volumeInstance: the volume instance
:param storageSystemName: the storage system name
:param targetFastPolicyName: the target FAST policy name
:param volumeName: the volume Name
:param extraSpecs: extra specifications
"""
controllerConfigurationService = (
self.utils.find_controller_configuration_service(
conn, storageSystemName))
assocDefaultStorageGroupName = (
self.fast
.add_volume_to_default_storage_group_for_fast_policy(
conn, controllerConfigurationService, volumeInstance,
volumeName, targetFastPolicyName, extraSpecs))
if assocDefaultStorageGroupName is None:
LOG.error(_LE(
"Failed to add %(volumeName)s "
"to default storage group for fast policy "
"%(fastPolicyName)s."),
{'volumeName': volumeName,
'fastPolicyName': targetFastPolicyName})
def _is_valid_for_storage_assisted_migration_v3(
self, volumeInstanceName, host, sourceArraySerialNumber,
sourcePoolName, volumeName, volumeStatus, sgName,
doChangeCompression):
"""Check if volume is suitable for storage assisted (pool) migration.
:param volumeInstanceName: the volume instance id
:param host: the host object
:param sourceArraySerialNumber: the array serial number of
the original volume
:param sourcePoolName: the pool name of the original volume
:param volumeName: the name of the volume to be migrated
:param volumeStatus: the status of the volume
:param sgName: storage group name
:param doChangeCompression: do change compression
:returns: boolean -- True/False
:returns: string -- targetSlo
:returns: string -- targetWorkload
"""
falseRet = (False, None, None)
if 'location_info' not in host['capabilities']:
LOG.error(_LE('Error getting array, pool, SLO and workload.'))
return falseRet
info = host['capabilities']['location_info']
LOG.debug("Location info is : %(info)s.",
{'info': info})
try:
infoDetail = info.split('#')
targetArraySerialNumber = infoDetail[0]
targetPoolName = infoDetail[1]
targetSlo = infoDetail[2]
targetWorkload = infoDetail[3]
except KeyError:
LOG.error(_LE("Error parsing array, pool, SLO and workload."))
if targetArraySerialNumber not in sourceArraySerialNumber:
LOG.error(_LE(
"The source array : %(sourceArraySerialNumber)s does not "
"match the target array: %(targetArraySerialNumber)s "
"skipping storage-assisted migration."),
{'sourceArraySerialNumber': sourceArraySerialNumber,
'targetArraySerialNumber': targetArraySerialNumber})
return falseRet
if targetPoolName not in sourcePoolName:
LOG.error(_LE(
"Only SLO/workload migration within the same SRP Pool "
"is supported in this version "
"The source pool : %(sourcePoolName)s does not "
"match the target array: %(targetPoolName)s. "
"Skipping storage-assisted migration."),
{'sourcePoolName': sourcePoolName,
'targetPoolName': targetPoolName})
return falseRet
foundStorageGroupInstanceName = (
self.utils.get_storage_group_from_volume(
self.conn, volumeInstanceName, sgName))
if foundStorageGroupInstanceName is None:
LOG.warning(_LW(
"Volume: %(volumeName)s is not currently "
"belonging to any storage group."),
{'volumeName': volumeName})
else:
storageGroupInstance = self.conn.GetInstance(
foundStorageGroupInstanceName)
emcFastSetting = self.utils._get_fast_settings_from_storage_group(
storageGroupInstance)
targetCombination = ("%(targetSlo)s+%(targetWorkload)s"
% {'targetSlo': targetSlo,
'targetWorkload': targetWorkload})
if targetCombination in emcFastSetting:
# Check if migration is from compression to non compression
# of vice versa
if not doChangeCompression:
LOG.error(_LE(
"No action required. Volume: %(volumeName)s is "
"already part of slo/workload combination: "
"%(targetCombination)s."),
{'volumeName': volumeName,
'targetCombination': targetCombination})
return falseRet
return (True, targetSlo, targetWorkload)
def _is_valid_for_storage_assisted_migration(
self, volumeInstanceName, host, sourceArraySerialNumber,
volumeName, volumeStatus):
"""Check if volume is suitable for storage assisted (pool) migration.
:param volumeInstanceName: the volume instance id
:param host: the host object
:param sourceArraySerialNumber: the array serial number of
the original volume
:param volumeName: the name of the volume to be migrated
:param volumeStatus: the status of the volume e.g
:returns: boolean -- True/False
:returns: string -- targetPool
:returns: string -- targetFastPolicy
"""
falseRet = (False, None, None)
if 'location_info' not in host['capabilities']:
LOG.error(_LE("Error getting target pool name and array."))
return falseRet
info = host['capabilities']['location_info']
LOG.debug("Location info is : %(info)s.",
{'info': info})
try:
infoDetail = info.split('#')
targetArraySerialNumber = infoDetail[0]
targetPoolName = infoDetail[1]
targetFastPolicy = infoDetail[2]
except KeyError:
LOG.error(_LE(
"Error parsing target pool name, array, and fast policy."))
if targetArraySerialNumber not in sourceArraySerialNumber:
LOG.error(_LE(
"The source array : %(sourceArraySerialNumber)s does not "
"match the target array: %(targetArraySerialNumber)s, "
"skipping storage-assisted migration."),
{'sourceArraySerialNumber': sourceArraySerialNumber,
'targetArraySerialNumber': targetArraySerialNumber})
return falseRet
# Get the pool from the source array and check that is different
# to the pool in the target array.
assocPoolInstanceName = self.utils.get_assoc_pool_from_volume(
self.conn, volumeInstanceName)
assocPoolInstance = self.conn.GetInstance(
assocPoolInstanceName)
if assocPoolInstance['ElementName'] == targetPoolName:
LOG.error(_LE(
"No action required. Volume: %(volumeName)s is "
"already part of pool: %(pool)s."),
{'volumeName': volumeName,
'pool': targetPoolName})
return falseRet
LOG.info(_LI("Volume status is: %s."), volumeStatus)
if (host['capabilities']['storage_protocol'] != self.protocol and
(volumeStatus != 'available' and volumeStatus != 'retyping')):
LOG.error(_LE(
"Only available volumes can be migrated between "
"different protocols."))
return falseRet
return (True, targetPoolName, targetFastPolicy)
def _set_config_file_and_get_extra_specs(self, volume, volumeTypeId=None):
"""Given the volume object get the associated volumetype.
Given the volume object get the associated volumetype and the
extra specs associated with it.
Based on the name of the config group, register the config file
:param volume: the volume object including the volume_type_id
:param volumeTypeId: Optional override of volume['volume_type_id']
:returns: dict -- the extra specs dict
:returns: string -- configuration file
"""
extraSpecs = self.utils.get_volumetype_extraspecs(
volume, volumeTypeId)
qosSpecs = self.utils.get_volumetype_qosspecs(volume, volumeTypeId)
configGroup = None
# If there are no extra specs then the default case is assumed.
if extraSpecs:
configGroup = self.configuration.config_group
configurationFile = self._register_config_file_from_config_group(
configGroup)
self.multiPoolSupportEnabled = (
self._get_multi_pool_support_enabled_flag())
extraSpecs[MULTI_POOL_SUPPORT] = self.multiPoolSupportEnabled
if extraSpecs.get('replication_enabled') == '<is> True':
extraSpecs[IS_RE] = True
return extraSpecs, configurationFile, qosSpecs
def _get_multi_pool_support_enabled_flag(self):
"""Reads the configuration for multi pool support flag.
:returns: MultiPoolSupportEnabled flag
"""
confString = (
self.configuration.safe_get('multi_pool_support'))
retVal = False
stringTrue = "True"
if confString:
if confString.lower() == stringTrue.lower():
retVal = True
return retVal
def _get_initiator_check_flag(self):
"""Reads the configuration for initator_check flag.
:returns: flag
"""
confString = (
self.configuration.safe_get('initiator_check'))
retVal = False
stringTrue = "True"
if confString:
if confString.lower() == stringTrue.lower():
retVal = True
return retVal
def _get_ecom_connection(self):
"""Get the ecom connection.
:returns: pywbem.WBEMConnection -- conn, the ecom connection
:raises: VolumeBackendAPIException
"""
ecomx509 = None
if self.ecomUseSSL:
if (self.configuration.safe_get('driver_client_cert_key') and
self.configuration.safe_get('driver_client_cert')):
ecomx509 = {"key_file":
self.configuration.safe_get(
'driver_client_cert_key'),
"cert_file":
self.configuration.safe_get(
'driver_client_cert')}
pywbem.cim_http.wbem_request = https.wbem_request
conn = pywbem.WBEMConnection(
self.url,
(self.user, self.passwd),
default_namespace='root/emc',
x509=ecomx509,
ca_certs=self.configuration.safe_get('driver_ssl_cert_path'),
no_verification=not self.configuration.safe_get(
'driver_ssl_cert_verify'))
else:
conn = pywbem.WBEMConnection(
self.url,
(self.user, self.passwd),
default_namespace='root/emc')
if conn is None:
exception_message = (_("Cannot connect to ECOM server."))
raise exception.VolumeBackendAPIException(data=exception_message)
return conn
def _find_pool_in_array(self, arrayStr, poolNameInStr, isV3):
"""Find a pool based on the pool name on a given array.
:param arrayStr: the array Serial number (String)
:param poolNameInStr: the name of the poolname (String)
:param isv3: True/False
:returns: foundPoolInstanceName - the CIM Instance Name of the Pool
:returns: string -- systemNameStr
:raises: VolumeBackendAPIException
"""
foundPoolInstanceName = None
systemNameStr = None
storageSystemInstanceName = self.utils.find_storageSystem(
self.conn, arrayStr)
if isV3:
foundPoolInstanceName, systemNameStr = (
self.utils.get_pool_and_system_name_v3(
self.conn, storageSystemInstanceName, poolNameInStr))
else:
foundPoolInstanceName, systemNameStr = (
self.utils.get_pool_and_system_name_v2(
self.conn, storageSystemInstanceName, poolNameInStr))
if foundPoolInstanceName is None:
exceptionMessage = (_("Pool %(poolNameInStr)s is not found.")
% {'poolNameInStr': poolNameInStr})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
if systemNameStr is None:
exception_message = (_("Storage system not found for pool "
"%(poolNameInStr)s.")
% {'poolNameInStr': poolNameInStr})
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(data=exception_message)
LOG.debug("Pool: %(pool)s SystemName: %(systemname)s.",
{'pool': foundPoolInstanceName,
'systemname': systemNameStr})
return foundPoolInstanceName, systemNameStr
def _find_lun(self, volume):
"""Given the volume get the instance from it.
:param volume: volume object
:returns: foundVolumeinstance
"""
foundVolumeinstance = None
targetVolName = None
volumename = volume['id']
loc = volume['provider_location']
if self.conn is None:
self.conn = self._get_ecom_connection()
if isinstance(loc, six.string_types):
name = ast.literal_eval(loc)
keys = name['keybindings']
systemName = keys['SystemName']
admin_metadata = {}
if 'admin_metadata' in volume:
admin_metadata = volume.admin_metadata
if 'targetVolumeName' in admin_metadata:
targetVolName = admin_metadata['targetVolumeName']
prefix1 = 'SYMMETRIX+'
prefix2 = 'SYMMETRIX-+-'
smiversion = self.utils.get_smi_version(self.conn)
if smiversion > SMI_VERSION_8 and prefix1 in systemName:
keys['SystemName'] = systemName.replace(prefix1, prefix2)
name['keybindings'] = keys
instancename = self.utils.get_instance_name(
name['classname'], name['keybindings'])
LOG.debug("Volume instance name: %(in)s",
{'in': instancename})
# Allow for an external app to delete the volume.
try:
foundVolumeinstance = self.conn.GetInstance(instancename)
volumeElementName = (self.utils.
get_volume_element_name(volumename))
if not (volumeElementName ==
foundVolumeinstance['ElementName']):
# Check if it is a vol created as part of a clone group
if not (targetVolName ==
foundVolumeinstance['ElementName']):
foundVolumeinstance = None
except Exception as e:
LOG.info(_LI("Exception in retrieving volume: %(e)s."),
{'e': e})
foundVolumeinstance = None
if foundVolumeinstance is None:
LOG.debug("Volume %(volumename)s not found on the array.",
{'volumename': volumename})
else:
LOG.debug("Volume name: %(volumename)s Volume instance: "
"%(foundVolumeinstance)s.",
{'volumename': volumename,
'foundVolumeinstance': foundVolumeinstance})
return foundVolumeinstance
def _find_storage_sync_sv_sv(self, snapshot, volume, extraSpecs,
waitforsync=True):
"""Find the storage synchronized name.
:param snapshot: snapshot object
:param volume: volume object
:param extraSpecs: extra specifications
:param waitforsync: boolean -- Wait for Solutions Enabler sync.
:returns: string -- foundsyncname
:returns: string -- storage_system
"""
snapshotname = snapshot['name']
volumename = volume['name']
LOG.debug("Source: %(volumename)s Target: %(snapshotname)s.",
{'volumename': volumename, 'snapshotname': snapshotname})
snapshot_instance = self._find_lun(snapshot)
volume_instance = self._find_lun(volume)
storage_system = volume_instance['SystemName']
classname = 'SE_StorageSynchronized_SV_SV'
bindings = {'SyncedElement': snapshot_instance.path,
'SystemElement': volume_instance.path}
foundsyncname = self.utils.get_instance_name(classname, bindings)
if foundsyncname is None:
LOG.debug(
"Source: %(volumename)s Target: %(snapshotname)s. "
"Storage Synchronized not found.",
{'volumename': volumename,
'snapshotname': snapshotname})
else:
LOG.debug("Storage system: %(storage_system)s. "
"Storage Synchronized instance: %(sync)s.",
{'storage_system': storage_system,
'sync': foundsyncname})
# Wait for SE_StorageSynchronized_SV_SV to be fully synced.
if waitforsync:
self.utils.wait_for_sync(self.conn, foundsyncname,
extraSpecs)
return foundsyncname, storage_system
def _find_initiator_names(self, connector):
foundinitiatornames = []
iscsi = 'iscsi'
fc = 'fc'
name = 'initiator name'
if self.protocol.lower() == iscsi and connector['initiator']:
foundinitiatornames.append(connector['initiator'])
elif self.protocol.lower() == fc and connector['wwpns']:
for wwn in connector['wwpns']:
foundinitiatornames.append(wwn)
name = 'world wide port names'
if foundinitiatornames is None or len(foundinitiatornames) == 0:
msg = (_("Error finding %s.") % name)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug("Found %(name)s: %(initiator)s.",
{'name': name,
'initiator': foundinitiatornames})
return foundinitiatornames
def _wrap_find_device_number(self, volume, host):
return self.find_device_number(volume, host)
def find_device_number(self, volume, host):
"""Given the volume dict find a device number.
Find a device number that a host can see
for a volume.
:param volume: the volume dict
:param host: host from connector
:returns: dict -- the data dict
"""
maskedvols = []
data = {}
foundController = None
foundNumDeviceNumber = None
foundMaskingViewName = None
volumeName = volume['name']
volumeInstance = self._find_lun(volume)
storageSystemName = volumeInstance['SystemName']
isLiveMigration = False
source_data = {}
unitnames = self.conn.ReferenceNames(
volumeInstance.path,
ResultClass='CIM_ProtocolControllerForUnit')
for unitname in unitnames:
controller = unitname['Antecedent']
classname = controller['CreationClassName']
index = classname.find('Symm_LunMaskingView')
if index > -1:
unitinstance = self.conn.GetInstance(unitname,
LocalOnly=False)
numDeviceNumber = int(unitinstance['DeviceNumber'], 16)
foundNumDeviceNumber = numDeviceNumber
foundController = controller
controllerInstance = self.conn.GetInstance(controller,
LocalOnly=False)
propertiesList = controllerInstance.properties.items()
for properties in propertiesList:
if properties[0] == 'ElementName':
cimProperties = properties[1]
foundMaskingViewName = cimProperties.value
devicedict = {'hostlunid': foundNumDeviceNumber,
'storagesystem': storageSystemName,
'maskingview': foundMaskingViewName,
'controller': foundController}
maskedvols.append(devicedict)
if not maskedvols:
LOG.debug(
"Device number not found for volume "
"%(volumeName)s %(volumeInstance)s.",
{'volumeName': volumeName,
'volumeInstance': volumeInstance.path})
else:
host = self.utils.get_host_short_name(host)
hoststr = ("-%(host)s-"
% {'host': host})
for maskedvol in maskedvols:
if hoststr.lower() in maskedvol['maskingview'].lower():
data = maskedvol
if not data:
if len(maskedvols) > 0:
source_data = maskedvols[0]
LOG.warning(_LW(
"Volume is masked but not to host %(host)s as is "
"expected. Assuming live migration."),
{'host': hoststr})
isLiveMigration = True
LOG.debug("Device info: %(data)s.", {'data': data})
return data, isLiveMigration, source_data
def get_target_wwns(self, storageSystem, connector):
"""Find target WWNs.
:param storageSystem: the storage system name
:param connector: the connector dict
:returns: list -- targetWwns, the target WWN list
:raises: VolumeBackendAPIException
"""
targetWwns = set()
storageHardwareService = self.utils.find_storage_hardwareid_service(
self.conn, storageSystem)
hardwareIdInstances = self._find_storage_hardwareids(
connector, storageHardwareService)
LOG.debug(
"EMCGetTargetEndpoints: Service: %(service)s, "
"Storage HardwareIDs: %(hardwareIds)s.",
{'service': storageHardwareService,
'hardwareIds': hardwareIdInstances})
for hardwareIdInstance in hardwareIdInstances:
LOG.debug("HardwareID instance is: %(hardwareIdInstance)s.",
{'hardwareIdInstance': hardwareIdInstance})
try:
targetEndpoints = (
self.utils.get_target_endpoints(
self.conn, hardwareIdInstance))
if not targetEndpoints:
LOG.warning(_LW(
"Unable to get target endpoints for hardwareId "
"%(instance)s."),
{'instance': hardwareIdInstance})
continue
except Exception:
LOG.warning(_LW(
"Unable to get target endpoints for hardwareId "
"%(instance)s."),
{'instance': hardwareIdInstance}, exc_info=True)
continue
LOG.debug("There are %(len)lu endpoints.",
{'len': len(targetEndpoints)})
for targetendpoint in targetEndpoints:
wwn = targetendpoint['Name']
# Add target wwn to the list if it is not already there.
targetWwns.add(wwn)
break
if not targetWwns:
exception_message = (_(
"Unable to get target endpoints for any hardwareIds."))
raise exception.VolumeBackendAPIException(data=exception_message)
LOG.debug("Target WWNs: %(targetWwns)s.",
{'targetWwns': targetWwns})
return list(targetWwns)
def _find_storage_hardwareids(
self, connector, hardwareIdManagementService):
"""Find the storage hardware ID instances.
:param connector: the connector dict
:param hardwareIdManagementService: the storage Hardware
management service
:returns: list -- the list of storage hardware ID instances
"""
foundHardwareIdList = []
wwpns = self._find_initiator_names(connector)
hardwareIdInstances = (
self.utils.get_hardware_id_instances_from_array(
self.conn, hardwareIdManagementService))
for hardwareIdInstance in hardwareIdInstances:
storageId = hardwareIdInstance['StorageID']
for wwpn in wwpns:
if wwpn.lower() == storageId.lower():
# Check that the found hardwareId has not been
# deleted. If it has, we don't want to add it to the list.
instance = self.utils.get_existing_instance(
self.conn, hardwareIdInstance.path)
if instance is None:
# HardwareId doesn't exist any more. Skip it.
break
foundHardwareIdList.append(hardwareIdInstance.path)
break
LOG.debug("Storage Hardware IDs for %(wwpns)s is "
"%(foundInstances)s.",
{'wwpns': wwpns,
'foundInstances': foundHardwareIdList})
return foundHardwareIdList
def _register_config_file_from_config_group(self, configGroupName):
"""Given the config group name register the file.
:param configGroupName: the config group name
:returns: string -- configurationFile - name of the configuration file
"""
if configGroupName is None:
return CINDER_EMC_CONFIG_FILE
if hasattr(self.configuration, 'cinder_emc_config_file'):
configurationFile = self.configuration.cinder_emc_config_file
else:
configurationFile = (
("%(prefix)s%(configGroupName)s%(postfix)s"
% {'prefix': CINDER_EMC_CONFIG_FILE_PREFIX,
'configGroupName': configGroupName,
'postfix': CINDER_EMC_CONFIG_FILE_POSTFIX}))
# The file saved in self.configuration may not be the correct one,
# double check.
if configGroupName not in configurationFile:
configurationFile = (
("%(prefix)s%(configGroupName)s%(postfix)s"
% {'prefix': CINDER_EMC_CONFIG_FILE_PREFIX,
'configGroupName': configGroupName,
'postfix': CINDER_EMC_CONFIG_FILE_POSTFIX}))
if os.path.isfile(configurationFile):
LOG.debug("Configuration file : %(configurationFile)s exists.",
{'configurationFile': configurationFile})
else:
exceptionMessage = (_(
"Configuration file %(configurationFile)s does not exist.")
% {'configurationFile': configurationFile})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
return configurationFile
def _set_ecom_credentials(self, arrayInfo):
"""Given the array record set the ecom credentials.
:param arrayInfo: record
:raises: VolumeBackendAPIException
"""
ip = arrayInfo['EcomServerIp']
port = arrayInfo['EcomServerPort']
self.user = arrayInfo['EcomUserName']
self.passwd = arrayInfo['EcomPassword']
self.ecomUseSSL = self.configuration.safe_get('driver_use_ssl')
ip_port = ("%(ip)s:%(port)s"
% {'ip': ip,
'port': port})
if self.ecomUseSSL:
self.url = ("https://%(ip_port)s"
% {'ip_port': ip_port})
else:
self.url = ("http://%(ip_port)s"
% {'ip_port': ip_port})
self.conn = self._get_ecom_connection()
def _initial_setup(self, volume, volumeTypeId=None, host=None):
"""Necessary setup to accumulate the relevant information.
The volume object has a host in which we can parse the
config group name. The config group name is the key to our EMC
configuration file. The emc configuration file contains pool name
and array name which are mandatory fields.
FastPolicy is optional.
StripedMetaCount is an extra spec that determines whether
the composite volume should be concatenated or striped.
:param volume: the volume Object
:param volumeTypeId: Optional override of volume['volume_type_id']
:returns: dict -- extra spec dict
:raises: VolumeBackendAPIException
"""
try:
extraSpecs, configurationFile, qosSpecs = (
self._set_config_file_and_get_extra_specs(
volume, volumeTypeId))
pool = self._validate_pool(volume, extraSpecs=extraSpecs,
host=host)
LOG.debug("Pool returned is %(pool)s.",
{'pool': pool})
arrayInfo = self.utils.parse_file_to_get_array_map(
configurationFile)
if arrayInfo is not None:
if extraSpecs['MultiPoolSupport'] is True:
poolRecord = arrayInfo[0]
elif len(arrayInfo) == 1:
poolRecord = arrayInfo[0]
else:
poolRecord = self.utils.extract_record(arrayInfo, pool)
if not poolRecord:
exceptionMessage = (_(
"Unable to get corresponding record for pool."))
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
self._set_ecom_credentials(poolRecord)
isV3 = self.utils.isArrayV3(
self.conn, poolRecord['SerialNumber'])
if isV3:
extraSpecs = self._set_v3_extra_specs(extraSpecs, poolRecord)
else:
# V2 extra specs
extraSpecs = self._set_v2_extra_specs(extraSpecs, poolRecord)
if (qosSpecs.get('qos_specs')
and qosSpecs['qos_specs']['consumer'] != "front-end"):
extraSpecs['qos'] = qosSpecs['qos_specs']['specs']
except Exception:
import sys
exceptionMessage = (_(
"Unable to get configuration information necessary to "
"create a volume: %(errorMessage)s.")
% {'errorMessage': sys.exc_info()[1]})
raise exception.VolumeBackendAPIException(data=exceptionMessage)
return extraSpecs
def _get_pool_and_storage_system(self, extraSpecs):
"""Given the extra specs get the pool and storage system name.
:param extraSpecs: extra specifications
:returns: poolInstanceName The pool instance name
:returns: string -- the storage system name
:raises: VolumeBackendAPIException
"""
try:
array = extraSpecs[ARRAY]
poolInstanceName, storageSystemStr = self._find_pool_in_array(
array, extraSpecs[POOL], extraSpecs[ISV3])
except Exception:
exceptionMessage = (_(
"You must supply an array in your EMC configuration file."))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
if poolInstanceName is None or storageSystemStr is None:
exceptionMessage = (_(
"Cannot get necessary pool or storage system information."))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
return poolInstanceName, storageSystemStr
def _populate_masking_dict(self, volume, connector, extraSpecs):
"""Get all the names of the maskingView and subComponents.
:param volume: the volume object
:param connector: the connector object
:param extraSpecs: extra specifications
:returns: dict -- a dictionary with masking view information
"""
maskingViewDict = {}
hostName = connector['host']
uniqueName = self.utils.generate_unique_trunc_pool(extraSpecs[POOL])
isV3 = extraSpecs[ISV3]
maskingViewDict['isV3'] = isV3
protocol = self.utils.get_short_protocol_type(self.protocol)
shortHostName = self.utils.get_host_short_name(hostName)
if isV3:
maskingViewDict['isCompressionDisabled'] = False
maskingViewDict['replication_enabled'] = False
slo = extraSpecs[SLO]
workload = extraSpecs[WORKLOAD]
rep_enabled = self.utils.is_replication_enabled(extraSpecs)
maskingViewDict['slo'] = slo
maskingViewDict['workload'] = workload
maskingViewDict['pool'] = uniqueName
if slo:
prefix = (
("OS-%(shortHostName)s-%(poolName)s-%(slo)s-"
"%(workload)s-%(protocol)s"
% {'shortHostName': shortHostName,
'poolName': uniqueName,
'slo': slo,
'workload': workload,
'protocol': protocol}))
doDisableCompression = self.utils.is_compression_disabled(
extraSpecs)
if doDisableCompression:
prefix = ("%(prefix)s-CD"
% {'prefix': prefix})
maskingViewDict['isCompressionDisabled'] = True
else:
prefix = (
("OS-%(shortHostName)s-No_SLO-%(protocol)s"
% {'shortHostName': shortHostName,
'protocol': protocol}))
if rep_enabled:
prefix += "-RE"
maskingViewDict['replication_enabled'] = True
else:
maskingViewDict['fastPolicy'] = extraSpecs[FASTPOLICY]
if maskingViewDict['fastPolicy']:
uniqueName = self.utils.generate_unique_trunc_fastpolicy(
maskingViewDict['fastPolicy']) + '-FP'
prefix = (
("OS-%(shortHostName)s-%(poolName)s-%(protocol)s"
% {'shortHostName': shortHostName,
'poolName': uniqueName,
'protocol': protocol}))
maskingViewDict['sgGroupName'] = ("%(prefix)s-SG"
% {'prefix': prefix})
maskingViewDict['maskingViewName'] = ("%(prefix)s-MV"
% {'prefix': prefix})
maskingViewDict['maskingViewNameLM'] = ("%(prefix)s-%(volid)s-MV"
% {'prefix': prefix,
'volid': volume['id'][:8]})
volumeName = volume['name']
volumeInstance = self._find_lun(volume)
storageSystemName = volumeInstance['SystemName']
maskingViewDict['controllerConfigService'] = (
self.utils.find_controller_configuration_service(
self.conn, storageSystemName))
# The portGroup is gotten from emc xml config file.
maskingViewDict['pgGroupName'] = extraSpecs[PORTGROUPNAME]
maskingViewDict['igGroupName'] = (
("OS-%(shortHostName)s-%(protocol)s-IG"
% {'shortHostName': shortHostName,
'protocol': protocol}))
maskingViewDict['connector'] = connector
maskingViewDict['volumeInstance'] = volumeInstance
maskingViewDict['volumeName'] = volumeName
maskingViewDict['storageSystemName'] = storageSystemName
if self._get_initiator_check_flag():
maskingViewDict['initiatorCheck'] = True
else:
maskingViewDict['initiatorCheck'] = False
return maskingViewDict
def _add_volume_to_default_storage_group_on_create(
self, volumeDict, volumeName, storageConfigService,
storageSystemName, fastPolicyName, extraSpecs):
"""Add the volume to the default storage group for that policy.
On a create when fast policy is enable add the volume to the default
storage group for that policy. If it fails do the necessary rollback.
:param volumeDict: the volume dictionary
:param volumeName: the volume name (String)
:param storageConfigService: the storage configuration service
:param storageSystemName: the storage system name (String)
:param fastPolicyName: the fast policy name (String)
:param extraSpecs: extra specifications
:returns: dict -- maskingViewDict with masking view information
:raises: VolumeBackendAPIException
"""
try:
volumeInstance = self.utils.find_volume_instance(
self.conn, volumeDict, volumeName)
controllerConfigurationService = (
self.utils.find_controller_configuration_service(
self.conn, storageSystemName))
defaultSgName = self.fast.format_default_sg_string(fastPolicyName)
self.fast.add_volume_to_default_storage_group_for_fast_policy(
self.conn, controllerConfigurationService, volumeInstance,
volumeName, fastPolicyName, extraSpecs)
foundStorageGroupInstanceName = (
self.utils.get_storage_group_from_volume(
self.conn, volumeInstance.path, defaultSgName))
if foundStorageGroupInstanceName is None:
exceptionMessage = (_(
"Error adding Volume: %(volumeName)s "
"with instance path: %(volumeInstancePath)s.")
% {'volumeName': volumeName,
'volumeInstancePath': volumeInstance.path})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
except Exception:
# Rollback by deleting the volume if adding the volume to the
# default storage group were to fail.
errorMessage = (_(
"Rolling back %(volumeName)s by deleting it.")
% {'volumeName': volumeName})
LOG.exception(errorMessage)
self.provision.delete_volume_from_pool(
self.conn, storageConfigService, volumeInstance.path,
volumeName, extraSpecs)
raise exception.VolumeBackendAPIException(data=errorMessage)
def _create_and_get_unbound_volume(
self, conn, storageConfigService, compositeVolumeInstanceName,
additionalSize, extraSpecs):
"""Create an unbound volume.
Create an unbound volume so it is in the correct state to add to a
composite volume.
:param conn: the connection information to the ecom server
:param storageConfigService: the storage config service instance name
:param compositeVolumeInstanceName: the composite volume instance name
:param additionalSize: the size you want to increase the volume by
:param extraSpecs: extra specifications
:returns: volume instance modifiedCompositeVolumeInstance
"""
assocPoolInstanceName = self.utils.get_assoc_pool_from_volume(
conn, compositeVolumeInstanceName)
appendVolumeInstance = self._create_and_get_volume_instance(
conn, storageConfigService, assocPoolInstanceName, 'appendVolume',
additionalSize, extraSpecs)
isVolumeBound = self.utils.is_volume_bound_to_pool(
conn, appendVolumeInstance)
if 'True' in isVolumeBound:
appendVolumeInstance = (
self._unbind_and_get_volume_from_storage_pool(
conn, storageConfigService,
appendVolumeInstance.path, 'appendVolume', extraSpecs))
return appendVolumeInstance
def _create_and_get_volume_instance(
self, conn, storageConfigService, poolInstanceName,
volumeName, volumeSize, extraSpecs):
"""Create and get a new volume.
:param conn: the connection information to the ecom server
:param storageConfigService: the storage config service instance name
:param poolInstanceName: the pool instance name
:param volumeName: the volume name
:param volumeSize: the size to create the volume
:param extraSpecs: extra specifications
:returns: volumeInstance -- the volume instance
"""
volumeDict, _rc = (
self.provision.create_volume_from_pool(
self.conn, storageConfigService, volumeName, poolInstanceName,
volumeSize, extraSpecs))
volumeInstance = self.utils.find_volume_instance(
self.conn, volumeDict, volumeName)
return volumeInstance
def _unbind_and_get_volume_from_storage_pool(
self, conn, storageConfigService,
volumeInstanceName, volumeName, extraSpecs):
"""Unbind a volume from a pool and return the unbound volume.
:param conn: the connection information to the ecom server
:param storageConfigService: the storage config service instance name
:param volumeInstanceName: the volume instance name
:param volumeName: string the volumeName
:param extraSpecs: extra specifications
:returns: unboundVolumeInstance -- the unbound volume instance
"""
_rc, _job = (
self.provision.unbind_volume_from_storage_pool(
conn, storageConfigService, volumeInstanceName,
volumeName, extraSpecs))
# Check that the volume in unbound
volumeInstance = conn.GetInstance(volumeInstanceName)
isVolumeBound = self.utils.is_volume_bound_to_pool(
conn, volumeInstance)
if 'False' not in isVolumeBound:
exceptionMessage = (_(
"Failed to unbind volume %(volume)s")
% {'volume': volumeInstanceName})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
return volumeInstance
def _modify_and_get_composite_volume_instance(
self, conn, elementCompositionServiceInstanceName, volumeInstance,
appendVolumeInstanceName, volumeName, compositeType, extraSpecs):
"""Given an existing composite volume add a new composite volume to it.
:param conn: the connection information to the ecom server
:param elementCompositionServiceInstanceName: the storage element
composition service instance name
:param volumeInstance: the volume instance
:param appendVolumeInstanceName: the appended volume instance name
:param volumeName: the volume name
:param compositeType: concatenated
:param extraSpecs: extra specifications
:returns: int -- the return code
:returns: dict -- modifiedVolumeDict - the modified volume dict
"""
isComposite = self.utils.check_if_volume_is_composite(
self.conn, volumeInstance)
if 'True' in isComposite:
rc, job = self.provision.modify_composite_volume(
conn, elementCompositionServiceInstanceName,
volumeInstance.path, appendVolumeInstanceName, extraSpecs)
elif 'False' in isComposite:
rc, job = self.provision.create_new_composite_volume(
conn, elementCompositionServiceInstanceName,
volumeInstance.path, appendVolumeInstanceName, compositeType,
extraSpecs)
else:
LOG.error(_LE(
"Unable to determine whether %(volumeName)s is "
"composite or not."),
{'volumeName': volumeName})
raise
modifiedVolumeDict = self.provision.get_volume_dict_from_job(
conn, job['Job'])
return rc, modifiedVolumeDict
def _get_or_create_default_storage_group(
self, conn, storageSystemName, volumeDict, volumeName,
fastPolicyName, extraSpecs):
"""Get or create a default storage group for a fast policy.
:param conn: the connection information to the ecom server
:param storageSystemName: the storage system name
:param volumeDict: the volume dictionary
:param volumeName: the volume name
:param fastPolicyName: the fast policy name
:param extraSpecs: extra specifications
:returns: defaultStorageGroupInstanceName
"""
controllerConfigService = (
self.utils.find_controller_configuration_service(
self.conn, storageSystemName))
volumeInstance = self.utils.find_volume_instance(
self.conn, volumeDict, volumeName)
defaultStorageGroupInstanceName = (
self.fast.get_or_create_default_storage_group(
self.conn, controllerConfigService, fastPolicyName,
volumeInstance, extraSpecs))
return defaultStorageGroupInstanceName
def _create_cloned_volume(
self, cloneVolume, sourceVolume, extraSpecs, isSnapshot=False):
"""Create a clone volume from the source volume.
:param cloneVolume: clone volume
:param sourceVolume: source of the clone volume
:param extraSpecs: extra specs
:param isSnapshot: boolean -- Defaults to False
:returns: dict -- cloneDict the cloned volume dictionary
:raises: VolumeBackendAPIException
"""
sourceName = sourceVolume['name']
cloneName = cloneVolume['name']
LOG.info(_LI(
"Create a replica from Volume: Clone Volume: %(cloneName)s "
"Source Volume: %(sourceName)s."),
{'cloneName': cloneName,
'sourceName': sourceName})
self.conn = self._get_ecom_connection()
sourceInstance = self._find_lun(sourceVolume)
storageSystem = sourceInstance['SystemName']
repServCapabilityInstanceName = (
self.utils.find_replication_service_capabilities(self.conn,
storageSystem))
is_clone_license = self.utils.is_clone_licensed(
self.conn, repServCapabilityInstanceName, extraSpecs[ISV3])
if is_clone_license is False:
exceptionMessage = (_(
"Clone feature is not licensed on %(storageSystem)s.")
% {'storageSystem': storageSystem})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
repServiceInstanceName = self.utils.find_replication_service(
self.conn, storageSystem)
LOG.debug("Create volume replica: Volume: %(cloneName)s "
"Source Volume: %(sourceName)s "
"Method: CreateElementReplica "
"ReplicationService: %(service)s ElementName: "
"%(elementname)s SyncType: 8 SourceElement: "
"%(sourceelement)s.",
{'cloneName': cloneName,
'sourceName': sourceName,
'service': repServiceInstanceName,
'elementname': cloneName,
'sourceelement': sourceInstance.path})
if extraSpecs[ISV3]:
rc, cloneDict = self._create_replica_v3(repServiceInstanceName,
cloneVolume,
sourceVolume,
sourceInstance,
isSnapshot,
extraSpecs)
else:
rc, cloneDict = self._create_clone_v2(repServiceInstanceName,
cloneVolume,
sourceVolume,
sourceInstance,
isSnapshot,
extraSpecs)
if not isSnapshot:
old_size_gbs = self.utils.convert_bits_to_gbs(
self.utils.get_volume_size(
self.conn, sourceInstance))
if cloneVolume['size'] != old_size_gbs:
LOG.info(_LI("Extending clone %(cloneName)s to "
"%(newSize)d GBs"),
{'cloneName': cloneName,
'newSize': cloneVolume['size']})
cloneInstance = self.utils.find_volume_instance(
self.conn, cloneDict, cloneName)
self._extend_volume(
cloneVolume, cloneInstance, cloneName,
cloneVolume['size'], old_size_gbs, extraSpecs)
LOG.debug("Leaving _create_cloned_volume: Volume: "
"%(cloneName)s Source Volume: %(sourceName)s "
"Return code: %(rc)lu.",
{'cloneName': cloneName,
'sourceName': sourceName,
'rc': rc})
# Adding version information
cloneDict['version'] = self.version
return cloneDict
def _add_clone_to_default_storage_group(
self, fastPolicyName, storageSystemName, cloneDict, cloneName,
extraSpecs):
"""Helper function to add clone to the default storage group.
:param fastPolicyName: the fast policy name
:param storageSystemName: the storage system name
:param cloneDict: clone dictionary
:param cloneName: clone name
:param extraSpecs: extra specifications
:raises: VolumeBackendAPIException
"""
# Check if the clone/snapshot volume already part of the default sg.
cloneInstance = self.utils.find_volume_instance(
self.conn, cloneDict, cloneName)
if self.fast.is_volume_in_default_SG(self.conn, cloneInstance.path):
return
# If FAST enabled place clone volume or volume from snapshot to
# default storage group.
LOG.debug("Adding volume: %(cloneName)s to default storage group "
"for FAST policy: %(fastPolicyName)s.",
{'cloneName': cloneName,
'fastPolicyName': fastPolicyName})
storageConfigService = (
self.utils.find_storage_configuration_service(
self.conn, storageSystemName))
defaultStorageGroupInstanceName = (
self._get_or_create_default_storage_group(
self.conn, storageSystemName, cloneDict, cloneName,
fastPolicyName, extraSpecs))
if defaultStorageGroupInstanceName is None:
exceptionMessage = (_(
"Unable to create or get default storage group for FAST "
"policy: %(fastPolicyName)s.")
% {'fastPolicyName': fastPolicyName})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
self._add_volume_to_default_storage_group_on_create(
cloneDict, cloneName, storageConfigService, storageSystemName,
fastPolicyName, extraSpecs)
def _delete_volume(self, volume, isSnapshot=False, host=None):
"""Helper function to delete the specified volume.
:param volume: volume object to be deleted
:returns: tuple -- rc (int return code), volumeName (string vol name)
"""
volumeName = volume['name']
rc = -1
errorRet = (rc, volumeName)
extraSpecs = self._initial_setup(volume, host=host)
self.conn = self._get_ecom_connection()
volumeInstance = self._find_lun(volume)
if volumeInstance is None:
LOG.error(_LE(
"Volume %(name)s not found on the array. "
"No volume to delete."),
{'name': volumeName})
return errorRet
self._sync_check(volumeInstance, volumeName, extraSpecs)
storageConfigService = self.utils.find_storage_configuration_service(
self.conn, volumeInstance['SystemName'])
deviceId = volumeInstance['DeviceID']
if extraSpecs[ISV3]:
if isSnapshot:
rc = self._delete_from_pool_v3(
storageConfigService, volumeInstance, volumeName,
deviceId, extraSpecs)
else:
rc = self._delete_from_pool_v3(
storageConfigService, volumeInstance, volumeName,
deviceId, extraSpecs, volume)
else:
rc = self._delete_from_pool(storageConfigService, volumeInstance,
volumeName, deviceId,
extraSpecs[FASTPOLICY],
extraSpecs)
return (rc, volumeName)
def _remove_device_from_storage_group(
self, controllerConfigurationService, volumeInstanceName,
volumeName, extraSpecs):
"""Check if volume is part of a storage group prior to delete.
Log a warning if volume is part of storage group.
:param controllerConfigurationService: controller configuration service
:param volumeInstanceName: volume instance name
:param volumeName: volume name (string)
:param extraSpecs: extra specifications
"""
storageGroupInstanceNames = (
self.masking.get_associated_masking_groups_from_device(
self.conn, volumeInstanceName))
if storageGroupInstanceNames:
LOG.warning(_LW(
"Pre check for deletion. "
"Volume: %(volumeName)s is part of a storage group. "
"Attempting removal from %(storageGroupInstanceNames)s."),
{'volumeName': volumeName,
'storageGroupInstanceNames': storageGroupInstanceNames})
for storageGroupInstanceName in storageGroupInstanceNames:
storageGroupInstance = self.conn.GetInstance(
storageGroupInstanceName)
self.masking.remove_device_from_storage_group(
self.conn, controllerConfigurationService,
storageGroupInstanceName, volumeInstanceName,
volumeName, storageGroupInstance['ElementName'],
extraSpecs)
def _find_lunmasking_scsi_protocol_controller(self, storageSystemName,
connector):
"""Find LunMaskingSCSIProtocolController for the local host.
Find out how many volumes are mapped to a host
associated to the LunMaskingSCSIProtocolController.
:param storageSystemName: the storage system name
:param connector: volume object to be deleted
:returns: foundControllerInstanceName
"""
foundControllerInstanceName = None
initiators = self._find_initiator_names(connector)
storageSystemInstanceName = self.utils.find_storageSystem(
self.conn, storageSystemName)
controllerInstanceNames = self.conn.AssociatorNames(
storageSystemInstanceName,
ResultClass='EMC_LunMaskingSCSIProtocolController')
for controllerInstanceName in controllerInstanceNames:
try:
# This is a check to see if the controller has
# been deleted.
self.conn.GetInstance(controllerInstanceName)
storageHardwareIdInstances = self.conn.Associators(
controllerInstanceName,
ResultClass='EMC_StorageHardwareID')
for storageHardwareIdInstance in storageHardwareIdInstances:
# If EMC_StorageHardwareID matches the initiator, we
# found the existing EMC_LunMaskingSCSIProtocolController.
hardwareid = storageHardwareIdInstance['StorageID']
for initiator in initiators:
if hardwareid.lower() == initiator.lower():
# This is a check to see if the controller
# has been deleted.
instance = self.utils.get_existing_instance(
self.conn, controllerInstanceName)
if instance is None:
# Skip this controller as it doesn't exist
# any more.
pass
else:
foundControllerInstanceName = (
controllerInstanceName)
break
if foundControllerInstanceName is not None:
break
except pywbem.cim_operations.CIMError as arg:
instance = self.utils.process_exception_args(
arg, controllerInstanceName)
if instance is None:
# Skip this controller as it doesn't exist any more.
pass
if foundControllerInstanceName is not None:
break
LOG.debug("LunMaskingSCSIProtocolController for storage system "
"%(storage_system)s and initiator %(initiator)s is "
"%(ctrl)s.",
{'storage_system': storageSystemName,
'initiator': initiators,
'ctrl': foundControllerInstanceName})
return foundControllerInstanceName
def get_num_volumes_mapped(self, volume, connector):
"""Returns how many volumes are in the same zone as the connector.
Find out how many volumes are mapped to a host
associated to the LunMaskingSCSIProtocolController.
:param volume: volume object to be deleted
:param connector: volume object to be deleted
:returns: int -- numVolumesMapped
:raises: VolumeBackendAPIException
"""
volumename = volume['name']
vol_instance = self._find_lun(volume)
if vol_instance is None:
msg = (_("Volume %(name)s not found on the array. "
"Cannot determine if there are volumes mapped.")
% {'name': volumename})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
storage_system = vol_instance['SystemName']
ctrl = self._find_lunmasking_scsi_protocol_controller(
storage_system,
connector)
LOG.debug("LunMaskingSCSIProtocolController for storage system "
"%(storage)s and %(connector)s is %(ctrl)s.",
{'storage': storage_system,
'connector': connector,
'ctrl': ctrl})
# Return 0 if masking view does not exist.
if ctrl is None:
return 0
associators = self.conn.Associators(
ctrl,
ResultClass='EMC_StorageVolume')
numVolumesMapped = len(associators)
LOG.debug("Found %(numVolumesMapped)d volumes on storage system "
"%(storage)s mapped to %(connector)s.",
{'numVolumesMapped': numVolumesMapped,
'storage': storage_system,
'connector': connector})
return numVolumesMapped
def _delete_snapshot(self, snapshot, host=None):
"""Helper function to delete the specified snapshot.
:param snapshot: snapshot object to be deleted
:raises: VolumeBackendAPIException
"""
LOG.debug("Entering _delete_snapshot.")
self.conn = self._get_ecom_connection()
# Delete the target device.
rc, snapshotname = self._delete_volume(snapshot, True, host)
LOG.info(_LI("Leaving delete_snapshot: %(ssname)s Return code: "
"%(rc)lu."),
{'ssname': snapshotname,
'rc': rc})
def create_consistencygroup(self, context, group):
"""Creates a consistency group.
:param context: the context
:param group: the group object to be created
:returns: dict -- modelUpdate = {'status': 'available'}
:raises: VolumeBackendAPIException
"""
LOG.info(_LI("Create Consistency Group: %(group)s."),
{'group': group['id']})
modelUpdate = {'status': fields.ConsistencyGroupStatus.AVAILABLE}
cgName = self._update_consistency_group_name(group)
self.conn = self._get_ecom_connection()
# Find storage system.
try:
replicationService, storageSystem, __, __ = (
self._get_consistency_group_utils(self.conn, group))
interval_retries_dict = self.utils.get_default_intervals_retries()
self.provision.create_consistency_group(
self.conn, replicationService, cgName, interval_retries_dict)
except Exception:
exceptionMessage = (_("Failed to create consistency group:"
" %(cgName)s.")
% {'cgName': cgName})
LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
return modelUpdate
def delete_consistencygroup(self, context, group, volumes):
"""Deletes a consistency group.
:param context: the context
:param group: the group object to be deleted
:param volumes: the list of volumes in the consisgroup to be deleted
:returns: dict -- modelUpdate
:returns: list -- list of volume objects
:raises: VolumeBackendAPIException
"""
LOG.info(_LI("Delete Consistency Group: %(group)s."),
{'group': group['id']})
modelUpdate = {}
volumes_model_update = {}
if not self.conn:
self.conn = self._get_ecom_connection()
try:
replicationService, storageSystem, __, isV3 = (
self._get_consistency_group_utils(self.conn, group))
storageConfigservice = (
self.utils.find_storage_configuration_service(
self.conn, storageSystem))
cgInstanceName, cgName = self._find_consistency_group(
replicationService, six.text_type(group['id']))
if cgInstanceName is None:
LOG.error(_LE("Cannot find CG group %(cgName)s."),
{'cgName': six.text_type(group['id'])})
modelUpdate = {'status': fields.ConsistencyGroupStatus.DELETED}
volumes_model_update = self.utils.get_volume_model_updates(
volumes, group.id,
status='deleted')
return modelUpdate, volumes_model_update
memberInstanceNames = self._get_members_of_replication_group(
cgInstanceName)
interval_retries_dict = self.utils.get_default_intervals_retries()
self.provision.delete_consistency_group(self.conn,
replicationService,
cgInstanceName, cgName,
interval_retries_dict)
# Do a bulk delete, a lot faster than single deletes.
if memberInstanceNames:
volumes_model_update, modelUpdate = self._do_bulk_delete(
storageSystem, memberInstanceNames, storageConfigservice,
volumes, group, isV3, interval_retries_dict)
except Exception:
exceptionMessage = (_(
"Failed to delete consistency group: %(cgName)s.")
% {'cgName': six.text_type(group['id'])})
LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
return modelUpdate, volumes_model_update
def _do_bulk_delete(self, storageSystem, memberInstanceNames,
storageConfigservice, volumes, group, isV3,
extraSpecs):
"""Do a bulk delete.
:param storageSystem: storage system name
:param memberInstanceNames: volume Instance names
:param storageConfigservice: storage config service
:param volumes: volume objects
:param modelUpdate: dict
:param isV3: boolean
:param extraSpecs: extra specifications
:returns: list -- list of volume objects
:returns: dict -- modelUpdate
"""
try:
controllerConfigurationService = (
self.utils.find_controller_configuration_service(
self.conn, storageSystem))
for memberInstanceName in memberInstanceNames:
self._remove_device_from_storage_group(
controllerConfigurationService, memberInstanceName,
'Member Volume', extraSpecs)
if isV3:
self.provisionv3.delete_volume_from_pool(
self.conn, storageConfigservice,
memberInstanceNames, None, extraSpecs)
else:
self.provision.delete_volume_from_pool(
self.conn, storageConfigservice,
memberInstanceNames, None, extraSpecs)
modelUpdate = {'status': fields.ConsistencyGroupStatus.DELETED}
except Exception:
modelUpdate = {
'status': fields.ConsistencyGroupStatus.ERROR_DELETING}
finally:
volumes_model_update = self.utils.get_volume_model_updates(
volumes, group['id'], status=modelUpdate['status'])
return volumes_model_update, modelUpdate
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Creates a cgsnapshot.
:param context: the context
:param cgsnapshot: the consistency group snapshot to be created
:param snapshots: snapshots
:returns: dict -- modelUpdate
:returns: list -- list of snapshots
:raises: VolumeBackendAPIException
"""
consistencyGroup = cgsnapshot.get('consistencygroup')
snapshots_model_update = []
LOG.info(_LI(
"Create snapshot for Consistency Group %(cgId)s "
"cgsnapshotID: %(cgsnapshot)s."),
{'cgsnapshot': cgsnapshot['id'],
'cgId': cgsnapshot['consistencygroup_id']})
self.conn = self._get_ecom_connection()
try:
replicationService, storageSystem, extraSpecsDictList, isV3 = (
self._get_consistency_group_utils(self.conn, consistencyGroup))
cgInstanceName, cgName = (
self._find_consistency_group(
replicationService, six.text_type(
cgsnapshot['consistencygroup_id'])))
if cgInstanceName is None:
exception_message = (_(
"Cannot find CG group %s.") % six.text_type(
cgsnapshot['consistencygroup_id']))
raise exception.VolumeBackendAPIException(
data=exception_message)
# Create the target consistency group.
targetCgName = self._update_consistency_group_name(cgsnapshot)
interval_retries_dict = self.utils.get_default_intervals_retries()
self.provision.create_consistency_group(
self.conn, replicationService, targetCgName,
interval_retries_dict)
targetCgInstanceName, targetCgName = self._find_consistency_group(
replicationService, cgsnapshot['id'])
LOG.info(_LI("Create target consistency group %(targetCg)s."),
{'targetCg': targetCgInstanceName})
for snapshot in snapshots:
volume = snapshot['volume']
for extraSpecsDict in extraSpecsDictList:
if volume['volume_type_id'] in extraSpecsDict.values():
extraSpecs = extraSpecsDict.get('extraSpecs')
if 'pool_name' in extraSpecs:
extraSpecs = self.utils.update_extra_specs(
extraSpecs)
if 'size' in volume:
volumeSizeInbits = int(self.utils.convert_gb_to_bits(
volume['size']))
else:
volumeSizeInbits = int(self.utils.convert_gb_to_bits(
volume['volume_size']))
targetVolumeName = 'targetVol'
if isV3:
_rc, volumeDict, _storageSystemName = (
self._create_v3_volume(
volume, targetVolumeName, volumeSizeInbits,
extraSpecs))
else:
_rc, volumeDict, _storageSystemName = (
self._create_composite_volume(
volume, targetVolumeName, volumeSizeInbits,
extraSpecs))
targetVolumeInstance = self.utils.find_volume_instance(
self.conn, volumeDict, targetVolumeName)
LOG.debug("Create target volume for member volume "
"Source volume: %(memberVol)s "
"Target volume %(targetVol)s.",
{'memberVol': volume['id'],
'targetVol': targetVolumeInstance.path})
self.provision.add_volume_to_cg(self.conn,
replicationService,
targetCgInstanceName,
targetVolumeInstance.path,
targetCgName,
targetVolumeName,
extraSpecs)
self._create_group_and_break_relationship(
isV3, cgsnapshot['id'], replicationService, cgInstanceName,
targetCgInstanceName, storageSystem, interval_retries_dict)
except Exception:
exceptionMessage = (_("Failed to create snapshot for cg:"
" %(cgName)s.")
% {'cgName': cgsnapshot['consistencygroup_id']}
)
LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
for snapshot in snapshots:
snapshots_model_update.append(
{'id': snapshot['id'],
'status': fields.SnapshotStatus.AVAILABLE})
modelUpdate = {'status': fields.ConsistencyGroupStatus.AVAILABLE}
return modelUpdate, snapshots_model_update
def _create_group_and_break_relationship(
self, isV3, cgsnapshotId, replicationService, cgInstanceName,
targetCgInstanceName, storageSystem, interval_retries_dict):
"""Creates a cg group and deletes the relationship.
:param isV3: the context
:param cgsnapshotId: the consistency group snapshot id
:param replicationService: replication service
:param cgInstanceName: cg instance name
:param targetCgInstanceName: target cg instance name
:param storageSystem: storage system
:param interval_retries_dict:
"""
# Less than 5 characters relationship name.
relationName = self.utils.truncate_string(cgsnapshotId, 5)
if isV3:
self.provisionv3.create_group_replica(
self.conn, replicationService, cgInstanceName,
targetCgInstanceName, relationName, interval_retries_dict)
else:
self.provision.create_group_replica(
self.conn, replicationService, cgInstanceName,
targetCgInstanceName, relationName, interval_retries_dict)
# Break the replica group relationship.
rgSyncInstanceName = self.utils.find_group_sync_rg_by_target(
self.conn, storageSystem, targetCgInstanceName,
interval_retries_dict, True)
if rgSyncInstanceName is not None:
repservice = self.utils.find_replication_service(
self.conn, storageSystem)
if repservice is None:
exception_message = (_(
"Cannot find Replication service on system %s.") %
storageSystem)
raise exception.VolumeBackendAPIException(
data=exception_message)
if isV3:
# Operation 7: dissolve for snapVx.
operation = self.utils.get_num(9, '16')
self.provisionv3.break_replication_relationship(
self.conn, repservice, rgSyncInstanceName, operation,
interval_retries_dict)
else:
self.provision.delete_clone_relationship(self.conn, repservice,
rgSyncInstanceName,
interval_retries_dict)
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Delete a cgsnapshot.
:param context: the context
:param cgsnapshot: the consistency group snapshot to be created
:param snapshots: snapshots
:returns: dict -- modelUpdate
:returns: list -- list of snapshots
:raises: VolumeBackendAPIException
"""
consistencyGroup = cgsnapshot.get('consistencygroup')
model_update = {}
snapshots_model_update = []
LOG.info(_LI(
"Delete snapshot for source CG %(cgId)s "
"cgsnapshotID: %(cgsnapshot)s."),
{'cgsnapshot': cgsnapshot['id'],
'cgId': cgsnapshot['consistencygroup_id']})
model_update['status'] = cgsnapshot['status']
self.conn = self._get_ecom_connection()
try:
replicationService, storageSystem, __, isV3 = (
self._get_consistency_group_utils(self.conn, consistencyGroup))
interval_retries_dict = self.utils.get_default_intervals_retries()
model_update, snapshots = self._delete_cg_and_members(
storageSystem, cgsnapshot, model_update,
snapshots, isV3, interval_retries_dict)
for snapshot in snapshots:
snapshots_model_update.append(
{'id': snapshot['id'],
'status': fields.SnapshotStatus.DELETED})
except Exception:
exceptionMessage = (_("Failed to delete snapshot for cg: "
"%(cgId)s.")
% {'cgId': cgsnapshot['consistencygroup_id']})
LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
return model_update, snapshots_model_update
def _find_consistency_group(self, replicationService, cgId):
"""Finds a CG given its id.
:param replicationService: the replication service
:param cgId: the consistency group id
:returns: foundCgInstanceName,cg_name
"""
foundCgInstanceName = None
cg_name = None
cgInstanceNames = (
self.conn.AssociatorNames(replicationService,
ResultClass='CIM_ReplicationGroup'))
for cgInstanceName in cgInstanceNames:
instance = self.conn.GetInstance(cgInstanceName, LocalOnly=False)
if cgId in instance['ElementName']:
foundCgInstanceName = cgInstanceName
cg_name = instance['ElementName']
break
return foundCgInstanceName, cg_name
def _get_members_of_replication_group(self, cgInstanceName):
"""Get the members of consistency group.
:param cgInstanceName: the CG instance name
:returns: list -- memberInstanceNames
"""
memberInstanceNames = self.conn.AssociatorNames(
cgInstanceName,
AssocClass='CIM_OrderedMemberOfCollection')
return memberInstanceNames
def _create_composite_volume(
self, volume, volumeName, volumeSize, extraSpecs,
memberCount=None):
"""Create a composite volume (V2).
:param volume: the volume object
:param volumeName: the name of the volume
:param volumeSize: the size of the volume
:param extraSpecs: extra specifications
:param memberCount: the number of meta members in a composite volume
:returns: int -- return code
:returns: dict -- volumeDict
:returns: string -- storageSystemName
:raises: VolumeBackendAPIException
"""
if not memberCount:
memberCount, errorDesc = self.utils.determine_member_count(
volume['size'], extraSpecs[MEMBERCOUNT],
extraSpecs[COMPOSITETYPE])
if errorDesc is not None:
exceptionMessage = (_("The striped meta count of "
"%(memberCount)s is too small for "
"volume: %(volumeName)s, "
"with size %(volumeSize)s.")
% {'memberCount': memberCount,
'volumeName': volumeName,
'volumeSize': volume['size']})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
poolInstanceName, storageSystemName = (
self._get_pool_and_storage_system(extraSpecs))
LOG.debug("Create Volume: %(volume)s Pool: %(pool)s "
"Storage System: %(storageSystem)s "
"Size: %(size)lu MemberCount: %(memberCount)s.",
{'volume': volumeName,
'pool': poolInstanceName,
'storageSystem': storageSystemName,
'size': volumeSize,
'memberCount': memberCount})
elementCompositionService = (
self.utils.find_element_composition_service(self.conn,
storageSystemName))
storageConfigService = self.utils.find_storage_configuration_service(
self.conn, storageSystemName)
# If FAST is intended to be used we must first check that the pool
# is associated with the correct storage tier.
if extraSpecs[FASTPOLICY] is not None:
foundPoolInstanceName = self.fast.get_pool_associated_to_policy(
self.conn, extraSpecs[FASTPOLICY], extraSpecs[ARRAY],
storageConfigService, poolInstanceName)
if foundPoolInstanceName is None:
exceptionMessage = (_("Pool: %(poolName)s. "
"is not associated to storage tier for "
"fast policy %(fastPolicy)s.")
% {'poolName': extraSpecs[POOL],
'fastPolicy':
extraSpecs[FASTPOLICY]})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
compositeType = self.utils.get_composite_type(
extraSpecs[COMPOSITETYPE])
volumeDict, rc = self.provision.create_composite_volume(
self.conn, elementCompositionService, volumeSize, volumeName,
poolInstanceName, compositeType, memberCount, extraSpecs)
# Now that we have already checked that the pool is associated with
# the correct storage tier and the volume was successfully created
# add the volume to the default storage group created for
# volumes in pools associated with this fast policy.
if extraSpecs[FASTPOLICY]:
LOG.info(_LI(
"Adding volume: %(volumeName)s to default storage group"
" for FAST policy: %(fastPolicyName)s."),
{'volumeName': volumeName,
'fastPolicyName': extraSpecs[FASTPOLICY]})
defaultStorageGroupInstanceName = (
self._get_or_create_default_storage_group(
self.conn, storageSystemName, volumeDict,
volumeName, extraSpecs[FASTPOLICY], extraSpecs))
if not defaultStorageGroupInstanceName:
exceptionMessage = (_(
"Unable to create or get default storage group for "
"FAST policy: %(fastPolicyName)s.")
% {'fastPolicyName': extraSpecs[FASTPOLICY]})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
# If qos exists, update storage group to reflect qos parameters
if 'qos' in extraSpecs:
self.utils.update_storagegroup_qos(
self.conn, defaultStorageGroupInstanceName, extraSpecs)
self._add_volume_to_default_storage_group_on_create(
volumeDict, volumeName, storageConfigService,
storageSystemName, extraSpecs[FASTPOLICY], extraSpecs)
return rc, volumeDict, storageSystemName
def _create_v3_volume(
self, volume, volumeName, volumeSize, extraSpecs):
"""Create a volume (V3).
:param volume: the volume object
:param volumeName: the volume name
:param volumeSize: the volume size
:param extraSpecs: extra specifications
:returns: int -- return code
:returns: dict -- volumeDict
:returns: string -- storageSystemName
:raises: VolumeBackendAPIException
"""
rc = -1
volumeDict = {}
isValidSLO, isValidWorkload = self.utils.verify_slo_workload(
extraSpecs[SLO], extraSpecs[WORKLOAD])
if not isValidSLO or not isValidWorkload:
exceptionMessage = (_(
"Either SLO: %(slo)s or workload %(workload)s is invalid. "
"Examine previous error statement for valid values.")
% {'slo': extraSpecs[SLO],
'workload': extraSpecs[WORKLOAD]})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
poolInstanceName, storageSystemName = (
self._get_pool_and_storage_system(extraSpecs))
# Check to see if SLO and Workload are configured on the array.
storagePoolCapability = self.provisionv3.get_storage_pool_capability(
self.conn, poolInstanceName)
if extraSpecs[SLO]:
if storagePoolCapability:
storagePoolSetting = self.provisionv3.get_storage_pool_setting(
self.conn, storagePoolCapability, extraSpecs[SLO],
extraSpecs[WORKLOAD])
if not storagePoolSetting:
exceptionMessage = (_(
"The array does not support the storage pool setting "
"for SLO %(slo)s or workload %(workload)s. Please "
"check the array for valid SLOs and workloads.")
% {'slo': extraSpecs[SLO],
'workload': extraSpecs[WORKLOAD]})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
else:
exceptionMessage = (_(
"Cannot determine storage pool settings."))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("Create Volume: %(volume)s Pool: %(pool)s "
"Storage System: %(storageSystem)s "
"Size: %(size)lu.",
{'volume': volumeName,
'pool': poolInstanceName,
'storageSystem': storageSystemName,
'size': volumeSize})
storageConfigService = self.utils.find_storage_configuration_service(
self.conn, storageSystemName)
doDisableCompression = self.utils.is_compression_disabled(extraSpecs)
# A volume created without specifying a storage group during
# creation time is allocated from the default SRP pool and
# assigned the optimized SLO.
sgInstanceName = self._get_or_create_storage_group_v3(
extraSpecs[POOL], extraSpecs[SLO],
extraSpecs[WORKLOAD], doDisableCompression,
storageSystemName, extraSpecs)
try:
volumeDict, rc = self.provisionv3.create_volume_from_sg(
self.conn, storageConfigService, volumeName,
sgInstanceName, volumeSize, extraSpecs)
except Exception:
# if the volume create fails, check if the
# storage group needs to be cleaned up
volumeInstanceNames = (
self.masking.get_devices_from_storage_group(
self.conn, sgInstanceName))
if not len(volumeInstanceNames):
LOG.debug("There are no volumes in the storage group "
"%(maskingGroup)s. Deleting storage group",
{'maskingGroup': sgInstanceName})
controllerConfigService = (
self.utils.find_controller_configuration_service(
self.conn, storageSystemName))
self.masking.delete_storage_group(
self.conn, controllerConfigService,
sgInstanceName, extraSpecs)
raise
return rc, volumeDict, storageSystemName
def _get_or_create_storage_group_v3(
self, poolName, slo, workload, doDisableCompression,
storageSystemName, extraSpecs, is_re=False):
"""Get or create storage group_v3 (V3).
:param poolName: the SRP pool nsmr
:param slo: the SLO
:param workload: the workload
:param doDisableCompression: flag for compression
:param storageSystemName: storage system name
:param extraSpecs: extra specifications
:param is_re: flag for replication
:returns: sgInstanceName
"""
storageGroupName, controllerConfigService, sgInstanceName = (
self.utils.get_v3_default_sg_instance_name(
self.conn, poolName, slo, workload, storageSystemName,
doDisableCompression, is_re))
if sgInstanceName is None:
sgInstanceName = self.provisionv3.create_storage_group_v3(
self.conn, controllerConfigService, storageGroupName,
poolName, slo, workload, extraSpecs, doDisableCompression)
else:
# Check that SG is not part of a masking view
mvInstanceName = self.masking.get_masking_view_from_storage_group(
self.conn, sgInstanceName)
if mvInstanceName:
exceptionMessage = (_(
"Default storage group %(storageGroupName)s is part of "
"masking view %(mvInstanceName)s. Please remove it "
"from this and all masking views")
% {'storageGroupName': storageGroupName,
'mvInstanceName': mvInstanceName})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
# If qos exists, update storage group to reflect qos parameters
if 'qos' in extraSpecs:
self.utils.update_storagegroup_qos(
self.conn, sgInstanceName, extraSpecs)
return sgInstanceName
def _extend_composite_volume(self, volumeInstance, volumeName,
newSize, additionalVolumeSize, extraSpecs):
"""Extend a composite volume (V2).
:param volumeInstance: the volume instance
:param volumeName: the name of the volume
:param newSize: in GBs
:param additionalVolumeSize: additional volume size
:param extraSpecs: extra specifications
:returns: int -- return code
:returns: dict -- modifiedVolumeDict
:raises: VolumeBackendAPIException
"""
# Is the volume extendable.
isConcatenated = self.utils.check_if_volume_is_extendable(
self.conn, volumeInstance)
if 'True' not in isConcatenated:
exceptionMessage = (_(
"Volume: %(volumeName)s is not a concatenated volume. "
"You can only perform extend on concatenated volume. "
"Exiting...")
% {'volumeName': volumeName})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
else:
compositeType = self.utils.get_composite_type(CONCATENATED)
LOG.debug("Extend Volume: %(volume)s New size: %(newSize)s GBs.",
{'volume': volumeName,
'newSize': newSize})
deviceId = volumeInstance['DeviceID']
storageSystemName = volumeInstance['SystemName']
LOG.debug(
"Device ID: %(deviceid)s: Storage System: "
"%(storagesystem)s.",
{'deviceid': deviceId,
'storagesystem': storageSystemName})
storageConfigService = self.utils.find_storage_configuration_service(
self.conn, storageSystemName)
elementCompositionService = (
self.utils.find_element_composition_service(
self.conn, storageSystemName))
# Create a volume to the size of the
# newSize - oldSize = additionalVolumeSize.
unboundVolumeInstance = self._create_and_get_unbound_volume(
self.conn, storageConfigService, volumeInstance.path,
additionalVolumeSize, extraSpecs)
if unboundVolumeInstance is None:
exceptionMessage = (_(
"Error Creating unbound volume on an Extend operation."))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
# Add the new unbound volume to the original composite volume.
rc, modifiedVolumeDict = (
self._modify_and_get_composite_volume_instance(
self.conn, elementCompositionService, volumeInstance,
unboundVolumeInstance.path, volumeName, compositeType,
extraSpecs))
if modifiedVolumeDict is None:
exceptionMessage = (_(
"On an Extend Operation, error adding volume to composite "
"volume: %(volumename)s.")
% {'volumename': volumeName})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
return rc, modifiedVolumeDict
def _slo_workload_migration(self, volumeInstance, volume, host,
volumeName, volumeStatus, newType,
extraSpecs):
"""Migrate from SLO/Workload combination to another (V3).
:param volumeInstance: the volume instance
:param volume: the volume object
:param host: the host object
:param volumeName: the name of the volume
:param volumeStatus: the volume status
:param newType: the type to migrate to
:param extraSpecs: extra specifications
:returns: boolean -- True if migration succeeded, False if error.
"""
isCompressionDisabled = self.utils.is_compression_disabled(extraSpecs)
storageGroupName = self.utils.get_v3_storage_group_name(
extraSpecs[POOL], extraSpecs[SLO], extraSpecs[WORKLOAD],
isCompressionDisabled)
# Check if old type and new type have different compression types
doChangeCompression = (
self.utils.change_compression_type(
isCompressionDisabled, newType))
volumeInstanceName = volumeInstance.path
isValid, targetSlo, targetWorkload = (
self._is_valid_for_storage_assisted_migration_v3(
volumeInstanceName, host, extraSpecs[ARRAY],
extraSpecs[POOL], volumeName, volumeStatus,
storageGroupName, doChangeCompression))
storageSystemName = volumeInstance['SystemName']
if not isValid:
LOG.error(_LE(
"Volume %(name)s is not suitable for storage "
"assisted migration using retype."),
{'name': volumeName})
return False
if volume['host'] != host['host'] or doChangeCompression:
LOG.debug(
"Retype Volume %(name)s from source host %(sourceHost)s "
"to target host %(targetHost)s. Compression change is %(cc)r.",
{'name': volumeName,
'sourceHost': volume['host'],
'targetHost': host['host'],
'cc': doChangeCompression})
return self._migrate_volume_v3(
volume, volumeInstance, extraSpecs[POOL], targetSlo,
targetWorkload, storageSystemName, newType, extraSpecs)
return False
def _migrate_volume_v3(
self, volume, volumeInstance, poolName, targetSlo,
targetWorkload, storageSystemName, newType, extraSpecs):
"""Migrate from one slo/workload combination to another (V3).
This requires moving the volume from its current SG to a
new or existing SG that has the target attributes.
:param volume: the volume object
:param volumeInstance: the volume instance
:param poolName: the SRP Pool Name
:param targetSlo: the target SLO
:param targetWorkload: the target workload
:param storageSystemName: the storage system name
:param newType: the type to migrate to
:param extraSpecs: extra specifications
:returns: boolean -- True if migration succeeded, False if error.
"""
volumeName = volume['name']
controllerConfigService = (
self.utils.find_controller_configuration_service(
self.conn, storageSystemName))
isCompressionDisabled = self.utils.is_compression_disabled(extraSpecs)
defaultSgName = self.utils.get_v3_storage_group_name(
extraSpecs[POOL], extraSpecs[SLO], extraSpecs[WORKLOAD],
isCompressionDisabled)
foundStorageGroupInstanceName = (
self.utils.get_storage_group_from_volume(
self.conn, volumeInstance.path, defaultSgName))
if foundStorageGroupInstanceName is None:
LOG.warning(_LW(
"Volume : %(volumeName)s is not currently "
"belonging to any storage group."),
{'volumeName': volumeName})
else:
self.masking.remove_and_reset_members(
self.conn, controllerConfigService, volumeInstance,
volumeName, extraSpecs, None, False)
targetExtraSpecs = newType['extra_specs']
isCompressionDisabled = self.utils.is_compression_disabled(
targetExtraSpecs)
storageGroupName = self.utils.get_v3_storage_group_name(
poolName, targetSlo, targetWorkload, isCompressionDisabled)
targetSgInstanceName = self._get_or_create_storage_group_v3(
poolName, targetSlo, targetWorkload, isCompressionDisabled,
storageSystemName, extraSpecs)
if targetSgInstanceName is None:
LOG.error(_LE(
"Failed to get or create storage group %(storageGroupName)s."),
{'storageGroupName': storageGroupName})
return False
self.masking.add_volume_to_storage_group(
self.conn, controllerConfigService, targetSgInstanceName,
volumeInstance, volumeName, storageGroupName, extraSpecs)
# Check that it has been added.
sgFromVolAddedInstanceName = (
self.utils.get_storage_group_from_volume(
self.conn, volumeInstance.path, storageGroupName))
if sgFromVolAddedInstanceName is None:
LOG.error(_LE(
"Volume : %(volumeName)s has not been "
"added to target storage group %(storageGroup)s."),
{'volumeName': volumeName,
'storageGroup': targetSgInstanceName})
return False
return True
def _pool_migration(self, volumeInstance, volume, host,
volumeName, volumeStatus,
fastPolicyName, newType, extraSpecs):
"""Migrate from one pool to another (V2).
:param volumeInstance: the volume instance
:param volume: the volume object
:param host: the host object
:param volumeName: the name of the volume
:param volumeStatus: the volume status
:param fastPolicyName: the FAST policy Name
:param newType: the type to migrate to
:param extraSpecs: extra specifications
:returns: boolean -- True if migration succeeded, False if error.
"""
storageSystemName = volumeInstance['SystemName']
isValid, targetPoolName, targetFastPolicyName = (
self._is_valid_for_storage_assisted_migration(
volumeInstance.path, host, storageSystemName,
volumeName, volumeStatus))
if not isValid:
LOG.error(_LE(
"Volume %(name)s is not suitable for storage "
"assisted migration using retype."),
{'name': volumeName})
return False
if volume['host'] != host['host']:
LOG.debug(
"Retype Volume %(name)s from source host %(sourceHost)s "
"to target host %(targetHost)s.",
{'name': volumeName,
'sourceHost': volume['host'],
'targetHost': host['host']})
return self._migrate_volume(
volume, volumeInstance, targetPoolName, targetFastPolicyName,
fastPolicyName, extraSpecs, newType)
return False
def _update_pool_stats(
self, backendName, arrayInfo):
"""Update pool statistics (V2).
:param backendName: the backend name
:param arrayInfo: the arrayInfo
:returns: location_info, total_capacity_gb, free_capacity_gb,
provisioned_capacity_gb
"""
if arrayInfo['FastPolicy']:
LOG.debug(
"Fast policy %(fastPolicyName)s is enabled on %(arrayName)s.",
{'fastPolicyName': arrayInfo['FastPolicy'],
'arrayName': arrayInfo['SerialNumber']})
else:
LOG.debug(
"No Fast policy for Array:%(arrayName)s "
"backend:%(backendName)s.",
{'arrayName': arrayInfo['SerialNumber'],
'backendName': backendName})
storageSystemInstanceName = self.utils.find_storageSystem(
self.conn, arrayInfo['SerialNumber'])
isTieringPolicySupported = (
self.fast.is_tiering_policy_enabled_on_storage_system(
self.conn, storageSystemInstanceName))
if (arrayInfo['FastPolicy'] is not None and
isTieringPolicySupported is True): # FAST enabled
(total_capacity_gb, free_capacity_gb, provisioned_capacity_gb,
array_max_over_subscription) = (
self.fast.get_capacities_associated_to_policy(
self.conn, arrayInfo['SerialNumber'],
arrayInfo['FastPolicy']))
LOG.info(_LI(
"FAST: capacity stats for policy %(fastPolicyName)s on array "
"%(arrayName)s. total_capacity_gb=%(total_capacity_gb)lu, "
"free_capacity_gb=%(free_capacity_gb)lu."),
{'fastPolicyName': arrayInfo['FastPolicy'],
'arrayName': arrayInfo['SerialNumber'],
'total_capacity_gb': total_capacity_gb,
'free_capacity_gb': free_capacity_gb})
else: # NON-FAST
(total_capacity_gb, free_capacity_gb, provisioned_capacity_gb,
array_max_over_subscription) = (
self.utils.get_pool_capacities(self.conn,
arrayInfo['PoolName'],
arrayInfo['SerialNumber']))
LOG.info(_LI(
"NON-FAST: capacity stats for pool %(poolName)s on array "
"%(arrayName)s total_capacity_gb=%(total_capacity_gb)lu, "
"free_capacity_gb=%(free_capacity_gb)lu."),
{'poolName': arrayInfo['PoolName'],
'arrayName': arrayInfo['SerialNumber'],
'total_capacity_gb': total_capacity_gb,
'free_capacity_gb': free_capacity_gb})
location_info = ("%(arrayName)s#%(poolName)s#%(policyName)s"
% {'arrayName': arrayInfo['SerialNumber'],
'poolName': arrayInfo['PoolName'],
'policyName': arrayInfo['FastPolicy']})
return (location_info, total_capacity_gb, free_capacity_gb,
provisioned_capacity_gb, array_max_over_subscription)
def _set_v2_extra_specs(self, extraSpecs, poolRecord):
"""Set the VMAX V2 extra specs.
:param extraSpecs: extra specifications
:param poolRecord: pool record
:returns: dict -- the extraSpecs
:raises: VolumeBackendAPIException
"""
try:
stripedMetaCount = extraSpecs[STRIPECOUNT]
extraSpecs[MEMBERCOUNT] = stripedMetaCount
extraSpecs[COMPOSITETYPE] = STRIPED
LOG.debug(
"There are: %(stripedMetaCount)s striped metas in "
"the extra specs.",
{'stripedMetaCount': stripedMetaCount})
except KeyError:
memberCount = '1'
extraSpecs[MEMBERCOUNT] = memberCount
extraSpecs[COMPOSITETYPE] = CONCATENATED
LOG.debug("StripedMetaCount is not in the extra specs.")
# Get the FAST policy from the file. This value can be None if the
# user doesn't want to associate with any FAST policy.
if poolRecord['FastPolicy']:
LOG.debug("The fast policy name is: %(fastPolicyName)s.",
{'fastPolicyName': poolRecord['FastPolicy']})
extraSpecs[FASTPOLICY] = poolRecord['FastPolicy']
extraSpecs[ISV3] = False
extraSpecs = self._set_common_extraSpecs(extraSpecs, poolRecord)
LOG.debug("Pool is: %(pool)s "
"Array is: %(array)s "
"FastPolicy is: %(fastPolicy)s "
"CompositeType is: %(compositeType)s "
"MemberCount is: %(memberCount)s.",
{'pool': extraSpecs[POOL],
'array': extraSpecs[ARRAY],
'fastPolicy': extraSpecs[FASTPOLICY],
'compositeType': extraSpecs[COMPOSITETYPE],
'memberCount': extraSpecs[MEMBERCOUNT]})
return extraSpecs
def _set_v3_extra_specs(self, extraSpecs, poolRecord):
"""Set the VMAX V3 extra specs.
If SLO or workload are not specified then the default
values are NONE and the Optimized SLO will be assigned to the
volume.
:param extraSpecs: extra specifications
:param poolRecord: pool record
:returns: dict -- the extra specifications dictionary
"""
if extraSpecs['MultiPoolSupport'] is True:
sloFromExtraSpec = None
workloadFromExtraSpec = None
if 'pool_name' in extraSpecs:
try:
poolDetails = extraSpecs['pool_name'].split('+')
sloFromExtraSpec = poolDetails[0]
workloadFromExtraSpec = poolDetails[1]
except KeyError:
LOG.error(_LE("Error parsing SLO, workload from "
"the provided extra_specs."))
else:
# Throw an exception as it is compulsory to have
# pool_name in the extra specs
exceptionMessage = (_(
"Pool_name is not present in the extraSpecs "
"and MultiPoolSupport is enabled"))
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
# If MultiPoolSupport is enabled, we completely
# ignore any entry for SLO & Workload in the poolRecord
extraSpecs[SLO] = sloFromExtraSpec
extraSpecs[WORKLOAD] = workloadFromExtraSpec
else:
extraSpecs[SLO] = poolRecord['SLO']
extraSpecs[WORKLOAD] = poolRecord['Workload']
extraSpecs[ISV3] = True
extraSpecs = self._set_common_extraSpecs(extraSpecs, poolRecord)
if self.utils.is_all_flash(self.conn, extraSpecs[ARRAY]):
try:
extraSpecs[self.utils.DISABLECOMPRESSION]
# If not True remove it.
if not self.utils.str2bool(
extraSpecs[self.utils.DISABLECOMPRESSION]):
extraSpecs.pop(self.utils.DISABLECOMPRESSION, None)
except KeyError:
pass
else:
extraSpecs.pop(self.utils.DISABLECOMPRESSION, None)
LOG.debug("Pool is: %(pool)s "
"Array is: %(array)s "
"SLO is: %(slo)s "
"Workload is: %(workload)s.",
{'pool': extraSpecs[POOL],
'array': extraSpecs[ARRAY],
'slo': extraSpecs[SLO],
'workload': extraSpecs[WORKLOAD]})
return extraSpecs
def _set_common_extraSpecs(self, extraSpecs, poolRecord):
"""Set common extra specs.
The extraSpecs are common to v2 and v3
:param extraSpecs: extra specifications
:param poolRecord: pool record
:returns: dict -- the extra specifications dictionary
"""
extraSpecs[POOL] = poolRecord['PoolName']
extraSpecs[ARRAY] = poolRecord['SerialNumber']
extraSpecs[PORTGROUPNAME] = poolRecord['PortGroup']
if 'Interval' in poolRecord and poolRecord['Interval']:
extraSpecs[INTERVAL] = poolRecord['Interval']
LOG.debug("The user defined interval is : %(intervalInSecs)s.",
{'intervalInSecs': poolRecord['Interval']})
else:
LOG.debug("Interval not overridden, default of 10 assumed.")
if 'Retries' in poolRecord and poolRecord['Retries']:
extraSpecs[RETRIES] = poolRecord['Retries']
LOG.debug("The user defined retries is : %(retries)s.",
{'retries': poolRecord['Retries']})
else:
LOG.debug("Retries not overridden, default of 60 assumed.")
return extraSpecs
def _delete_from_pool(self, storageConfigService, volumeInstance,
volumeName, deviceId, fastPolicyName, extraSpecs):
"""Delete from pool (v2).
:param storageConfigService: the storage config service
:param volumeInstance: the volume instance
:param volumeName: the volume Name
:param deviceId: the device ID of the volume
:param fastPolicyName: the FAST policy name(if it exists)
:param extraSpecs: extra specifications
:returns: int -- return code
:raises: VolumeBackendAPIException
"""
storageSystemName = volumeInstance['SystemName']
controllerConfigurationService = (
self.utils.find_controller_configuration_service(
self.conn, storageSystemName))
if fastPolicyName is not None:
defaultStorageGroupInstanceName = (
self.masking.remove_device_from_default_storage_group(
self.conn, controllerConfigurationService,
volumeInstance.path, volumeName, fastPolicyName,
extraSpecs))
if defaultStorageGroupInstanceName is None:
LOG.warning(_LW(
"The volume: %(volumename)s. was not first part of the "
"default storage group for FAST policy %(fastPolicyName)s"
"."),
{'volumename': volumeName,
'fastPolicyName': fastPolicyName})
# Check if it is part of another storage group.
self._remove_device_from_storage_group(
controllerConfigurationService,
volumeInstance.path, volumeName, extraSpecs)
else:
# Check if volume is part of a storage group.
self._remove_device_from_storage_group(
controllerConfigurationService,
volumeInstance.path, volumeName, extraSpecs)
LOG.debug("Delete Volume: %(name)s Method: EMCReturnToStoragePool "
"ConfigService: %(service)s TheElement: %(vol_instance)s "
"DeviceId: %(deviceId)s.",
{'service': storageConfigService,
'name': volumeName,
'vol_instance': volumeInstance.path,
'deviceId': deviceId})
try:
rc = self.provision.delete_volume_from_pool(
self.conn, storageConfigService, volumeInstance.path,
volumeName, extraSpecs)
except Exception:
# If we cannot successfully delete the volume then we want to
# return the volume to the default storage group.
if (fastPolicyName is not None and
defaultStorageGroupInstanceName is not None and
storageSystemName is not None):
assocDefaultStorageGroupName = (
self.fast
.add_volume_to_default_storage_group_for_fast_policy(
self.conn, controllerConfigurationService,
volumeInstance, volumeName, fastPolicyName,
extraSpecs))
if assocDefaultStorageGroupName is None:
LOG.error(_LE(
"Failed to Roll back to re-add volume %(volumeName)s "
"to default storage group for fast policy "
"%(fastPolicyName)s. Please contact your sysadmin to "
"get the volume returned to the default "
"storage group."),
{'volumeName': volumeName,
'fastPolicyName': fastPolicyName})
errorMessage = (_("Failed to delete volume %(volumeName)s.") %
{'volumeName': volumeName})
LOG.exception(errorMessage)
raise exception.VolumeBackendAPIException(data=errorMessage)
return rc
def _delete_from_pool_v3(self, storageConfigService, volumeInstance,
volumeName, deviceId, extraSpecs, volume=None):
"""Delete from pool (v3).
:param storageConfigService: the storage config service
:param volumeInstance: the volume instance
:param volumeName: the volume Name
:param deviceId: the device ID of the volume
:param extraSpecs: extra specifications
:param volume: the cinder volume object
:returns: int -- return code
:raises: VolumeBackendAPIException
"""
storageSystemName = volumeInstance['SystemName']
controllerConfigurationService = (
self.utils.find_controller_configuration_service(
self.conn, storageSystemName))
# Check if it is part of a storage group and delete it
# extra logic for case when volume is the last member.
self.masking.remove_and_reset_members(
self.conn, controllerConfigurationService, volumeInstance,
volumeName, extraSpecs, None, False)
if volume and self.utils.is_replication_enabled(extraSpecs):
self.cleanup_lun_replication(self.conn, volume, volumeName,
volumeInstance, extraSpecs)
LOG.debug("Delete Volume: %(name)s Method: EMCReturnToStoragePool "
"ConfigServic: %(service)s TheElement: %(vol_instance)s "
"DeviceId: %(deviceId)s.",
{'service': storageConfigService,
'name': volumeName,
'vol_instance': volumeInstance.path,
'deviceId': deviceId})
try:
rc = self.provisionv3.delete_volume_from_pool(
self.conn, storageConfigService, volumeInstance.path,
volumeName, extraSpecs)
except Exception:
# If we cannot successfully delete the volume, then we want to
# return the volume to the default storage group,
# which should be the SG it previously belonged to.
self.masking.return_volume_to_default_storage_group_v3(
self.conn, controllerConfigurationService,
volumeInstance, volumeName, extraSpecs)
errorMessage = (_("Failed to delete volume %(volumeName)s.") %
{'volumeName': volumeName})
LOG.exception(errorMessage)
raise exception.VolumeBackendAPIException(data=errorMessage)
return rc
def _create_clone_v2(self, repServiceInstanceName, cloneVolume,
sourceVolume, sourceInstance, isSnapshot,
extraSpecs):
"""Create a clone (v2).
:param repServiceInstanceName: the replication service
:param cloneVolume: the clone volume object
:param sourceVolume: the source volume object
:param sourceInstance: the device ID of the volume
:param isSnapshot: check to see if it is a snapshot
:param extraSpecs: extra specifications
:returns: int -- return code
:raises: VolumeBackendAPIException
"""
# Check if the source volume contains any meta devices.
metaHeadInstanceName = self.utils.get_volume_meta_head(
self.conn, sourceInstance.path)
if metaHeadInstanceName is None: # Simple volume.
return self._create_v2_replica_and_delete_clone_relationship(
repServiceInstanceName, cloneVolume, sourceVolume,
sourceInstance, None, extraSpecs, isSnapshot)
else: # Composite volume with meta device members.
# Check if the meta members capacity.
metaMemberInstanceNames = (
self.utils.get_composite_elements(
self.conn, sourceInstance))
volumeCapacities = self.utils.get_meta_members_capacity_in_byte(
self.conn, metaMemberInstanceNames)
LOG.debug("Volume capacities: %(metasizes)s.",
{'metasizes': volumeCapacities})
if len(set(volumeCapacities)) == 1:
LOG.debug("Meta volume all of the same size.")
return self._create_v2_replica_and_delete_clone_relationship(
repServiceInstanceName, cloneVolume, sourceVolume,
sourceInstance, None, extraSpecs, isSnapshot)
LOG.debug("Meta volumes are of different sizes, "
"%d different sizes.", len(set(volumeCapacities)))
baseTargetVolumeInstance = None
for volumeSizeInbits in volumeCapacities:
if baseTargetVolumeInstance is None: # Create base volume.
baseVolumeName = "TargetBaseVol"
volume = {'size': int(self.utils.convert_bits_to_gbs(
volumeSizeInbits))}
_rc, baseVolumeDict, storageSystemName = (
self._create_composite_volume(
volume, baseVolumeName, volumeSizeInbits,
extraSpecs, 1))
baseTargetVolumeInstance = self.utils.find_volume_instance(
self.conn, baseVolumeDict, baseVolumeName)
LOG.debug("Base target volume %(targetVol)s created. "
"capacity in bits: %(capInBits)lu.",
{'capInBits': volumeSizeInbits,
'targetVol': baseTargetVolumeInstance.path})
else: # Create append volume
targetVolumeName = "MetaVol"
volume = {'size': int(self.utils.convert_bits_to_gbs(
volumeSizeInbits))}
storageConfigService = (
self.utils.find_storage_configuration_service(
self.conn, storageSystemName))
unboundVolumeInstance = (
self._create_and_get_unbound_volume(
self.conn, storageConfigService,
baseTargetVolumeInstance.path, volumeSizeInbits,
extraSpecs))
if unboundVolumeInstance is None:
exceptionMessage = (_(
"Error Creating unbound volume."))
LOG.error(exceptionMessage)
# Remove target volume
self._delete_target_volume_v2(storageConfigService,
baseTargetVolumeInstance,
extraSpecs)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
# Append the new unbound volume to the
# base target composite volume.
baseTargetVolumeInstance = self.utils.find_volume_instance(
self.conn, baseVolumeDict, baseVolumeName)
try:
elementCompositionService = (
self.utils.find_element_composition_service(
self.conn, storageSystemName))
compositeType = self.utils.get_composite_type(
extraSpecs[COMPOSITETYPE])
_rc, modifiedVolumeDict = (
self._modify_and_get_composite_volume_instance(
self.conn,
elementCompositionService,
baseTargetVolumeInstance,
unboundVolumeInstance.path,
targetVolumeName,
compositeType,
extraSpecs))
if modifiedVolumeDict is None:
exceptionMessage = (_(
"Error appending volume %(volumename)s to "
"target base volume.")
% {'volumename': targetVolumeName})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
except Exception:
exceptionMessage = (_(
"Exception appending meta volume to target volume "
"%(volumename)s.")
% {'volumename': baseVolumeName})
LOG.error(exceptionMessage)
# Remove append volume and target base volume
self._delete_target_volume_v2(
storageConfigService, unboundVolumeInstance,
extraSpecs)
self._delete_target_volume_v2(
storageConfigService, baseTargetVolumeInstance,
extraSpecs)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("Create V2 replica for meta members of different sizes.")
return self._create_v2_replica_and_delete_clone_relationship(
repServiceInstanceName, cloneVolume, sourceVolume,
sourceInstance, baseTargetVolumeInstance, extraSpecs,
isSnapshot)
def _create_v2_replica_and_delete_clone_relationship(
self, repServiceInstanceName, cloneVolume, sourceVolume,
sourceInstance, targetInstance, extraSpecs, isSnapshot=False):
"""Create a replica and delete the clone relationship.
:param repServiceInstanceName: the replication service
:param cloneVolume: the clone volume object
:param sourceVolume: the source volume object
:param sourceInstance: the source volume instance
:param targetInstance: the target volume instance
:param extraSpecs: extra specifications
:param isSnapshot: check to see if it is a snapshot
:returns: int -- return code
:returns: dict -- cloneDict
"""
sourceName = sourceVolume['name']
cloneId = cloneVolume['id']
cloneName = self.utils.get_volume_element_name(cloneId)
try:
rc, job = self.provision.create_element_replica(
self.conn, repServiceInstanceName, cloneName, sourceName,
sourceInstance, targetInstance, extraSpecs)
except Exception:
exceptionMessage = (_(
"Exception during create element replica. "
"Clone name: %(cloneName)s "
"Source name: %(sourceName)s "
"Extra specs: %(extraSpecs)s ")
% {'cloneName': cloneName,
'sourceName': sourceName,
'extraSpecs': extraSpecs})
LOG.error(exceptionMessage)
if targetInstance is not None:
# Check if the copy session exists.
storageSystem = targetInstance['SystemName']
syncInstanceName = self.utils.find_sync_sv_by_volume(
self.conn, storageSystem, targetInstance, extraSpecs,
False)
if syncInstanceName is not None:
# Remove the Clone relationship.
rc, job = self.provision.delete_clone_relationship(
self.conn, repServiceInstanceName, syncInstanceName,
extraSpecs, True)
storageConfigService = (
self.utils.find_storage_configuration_service(
self.conn, storageSystem))
self._delete_target_volume_v2(
storageConfigService, targetInstance, extraSpecs)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
cloneDict = self.provision.get_volume_dict_from_job(
self.conn, job['Job'])
fastPolicyName = extraSpecs[FASTPOLICY]
if isSnapshot:
if fastPolicyName is not None:
storageSystemName = sourceInstance['SystemName']
self._add_clone_to_default_storage_group(
fastPolicyName, storageSystemName, cloneDict, cloneName,
extraSpecs)
LOG.info(_LI("Snapshot creation %(cloneName)s completed. "
"Source Volume: %(sourceName)s."),
{'cloneName': cloneName,
'sourceName': sourceName})
return rc, cloneDict
cloneVolume['provider_location'] = six.text_type(cloneDict)
syncInstanceName, storageSystemName = (
self._find_storage_sync_sv_sv(cloneVolume, sourceVolume,
extraSpecs))
# Remove the Clone relationship so it can be used as a regular lun.
# 8 - Detach operation.
rc, job = self.provision.delete_clone_relationship(
self.conn, repServiceInstanceName, syncInstanceName,
extraSpecs)
if fastPolicyName is not None:
self._add_clone_to_default_storage_group(
fastPolicyName, storageSystemName, cloneDict, cloneName,
extraSpecs)
return rc, cloneDict
def get_target_wwns_from_masking_view(
self, storageSystem, volume, connector):
"""Find target WWNs via the masking view.
:param storageSystem: the storage system name
:param volume: volume to be attached
:param connector: the connector dict
:returns: list -- the target WWN list
"""
targetWwns = []
mvInstanceName = self.get_masking_view_by_volume(volume, connector)
if mvInstanceName is not None:
targetWwns = self.masking.get_target_wwns(
self.conn, mvInstanceName)
LOG.info(_LI("Target wwns in masking view %(maskingView)s: "
"%(targetWwns)s."),
{'maskingView': mvInstanceName,
'targetWwns': six.text_type(targetWwns)})
return targetWwns
def get_port_group_from_masking_view(self, maskingViewInstanceName):
"""Get the port groups in a masking view.
:param maskingViewInstanceName: masking view instance name
:returns: portGroupInstanceName
"""
return self.masking.get_port_group_from_masking_view(
self.conn, maskingViewInstanceName)
def get_initiator_group_from_masking_view(self, maskingViewInstanceName):
"""Get the initiator group in a masking view.
:param maskingViewInstanceName: masking view instance name
:returns: initiatorGroupInstanceName
"""
return self.masking.get_initiator_group_from_masking_view(
self.conn, maskingViewInstanceName)
def get_masking_view_by_volume(self, volume, connector):
"""Given volume, retrieve the masking view instance name.
:param volume: the volume
:param connector: the connector object
:returns: maskingviewInstanceName
"""
LOG.debug("Finding Masking View for volume %(volume)s.",
{'volume': volume})
volumeInstance = self._find_lun(volume)
return self.masking.get_masking_view_by_volume(
self.conn, volumeInstance, connector)
def get_masking_views_by_port_group(self, portGroupInstanceName):
"""Given port group, retrieve the masking view instance name.
:param portGroupInstanceName: port group instance name
:returns: list -- maskingViewInstanceNames
"""
LOG.debug("Finding Masking Views for port group %(pg)s.",
{'pg': portGroupInstanceName})
return self.masking.get_masking_views_by_port_group(
self.conn, portGroupInstanceName)
def get_masking_views_by_initiator_group(
self, initiatorGroupInstanceName):
"""Given initiator group, retrieve the masking view instance name.
:param initiatorGroupInstanceName: initiator group instance name
:returns: list -- maskingViewInstanceNames
"""
LOG.debug("Finding Masking Views for initiator group %(ig)s.",
{'ig': initiatorGroupInstanceName})
return self.masking.get_masking_views_by_initiator_group(
self.conn, initiatorGroupInstanceName)
def _create_replica_v3(
self, repServiceInstanceName, cloneVolume,
sourceVolume, sourceInstance, isSnapshot, extraSpecs):
"""Create a replica.
V3 specific function, create replica for source volume,
including clone and snapshot.
:param repServiceInstanceName: the replication service
:param cloneVolume: the clone volume object
:param sourceVolume: the source volume object
:param sourceInstance: the device ID of the volume
:param isSnapshot: boolean -- check to see if it is a snapshot
:param extraSpecs: extra specifications
:returns: int -- return code
:returns: dict -- cloneDict
"""
cloneId = cloneVolume['id']
cloneName = self.utils.get_volume_element_name(cloneId)
# SyncType 7: snap, VG3R default snapshot is snapVx.
syncType = self.utils.get_num(SNAPVX, '16')
# Operation 9: Dissolve for snapVx.
operation = self.utils.get_num(DISSOLVE_SNAPVX, '16')
rsdInstance = None
targetInstance = None
copyState = self.utils.get_num(4, '16')
if isSnapshot:
rsdInstance = self.utils.set_target_element_supplier_in_rsd(
self.conn, repServiceInstanceName, SNAPVX_REPLICATION_TYPE,
CREATE_NEW_TARGET, extraSpecs)
else:
targetInstance = self._create_duplicate_volume(
sourceInstance, cloneName, extraSpecs)
try:
rc, job = (
self.provisionv3.create_element_replica(
self.conn, repServiceInstanceName, cloneName, syncType,
sourceInstance, extraSpecs, targetInstance, rsdInstance,
copyState))
except Exception:
LOG.warning(_LW(
"Clone failed on V3. Cleaning up the target volume. "
"Clone name: %(cloneName)s "),
{'cloneName': cloneName})
if targetInstance:
self._cleanup_target(
repServiceInstanceName, targetInstance, extraSpecs)
# Re-throw the exception.
raise
cloneDict = self.provisionv3.get_volume_dict_from_job(
self.conn, job['Job'])
targetVolumeInstance = (
self.provisionv3.get_volume_from_job(self.conn, job['Job']))
LOG.info(_LI("The target instance device id is: %(deviceid)s."),
{'deviceid': targetVolumeInstance['DeviceID']})
if not isSnapshot:
cloneVolume['provider_location'] = six.text_type(cloneDict)
syncInstanceName, _storageSystem = (
self._find_storage_sync_sv_sv(cloneVolume, sourceVolume,
extraSpecs, True))
rc, job = self.provisionv3.break_replication_relationship(
self.conn, repServiceInstanceName, syncInstanceName,
operation, extraSpecs)
return rc, cloneDict
def _cleanup_target(
self, repServiceInstanceName, targetInstance, extraSpecs):
"""cleanup target after exception
:param repServiceInstanceName: the replication service
:param targetInstance: the target instance
:param extraSpecs: extra specifications
"""
storageSystem = targetInstance['SystemName']
syncInstanceName = self.utils.find_sync_sv_by_volume(
self.conn, storageSystem, targetInstance, False)
if syncInstanceName is not None:
# Break the clone relationship.
self.provisionv3.break_replication_relationship(
self.conn, repServiceInstanceName, syncInstanceName,
DISSOLVE_SNAPVX, extraSpecs, True)
storageConfigService = (
self.utils.find_storage_configuration_service(
self.conn, storageSystem))
deviceId = targetInstance['DeviceID']
volumeName = targetInstance['Name']
self._delete_from_pool_v3(
storageConfigService, targetInstance, volumeName,
deviceId, extraSpecs)
def _delete_cg_and_members(
self, storageSystem, cgsnapshot, modelUpdate, volumes, isV3,
extraSpecs):
"""Helper function to delete a consistencygroup and its member volumes.
:param storageSystem: storage system
:param cgsnapshot: consistency group snapshot
:param modelUpdate: dict -- the model update dict
:param volumes: the list of member volumes
:param isV3: boolean
:param extraSpecs: extra specifications
:returns: dict -- modelUpdate
:returns: list -- the updated list of member volumes
:raises: VolumeBackendAPIException
"""
replicationService = self.utils.find_replication_service(
self.conn, storageSystem)
storageConfigservice = (
self.utils.find_storage_configuration_service(
self.conn, storageSystem))
cgInstanceName, cgName = self._find_consistency_group(
replicationService, six.text_type(cgsnapshot['id']))
if cgInstanceName is None:
LOG.error(_LE("Cannot find CG group %(cgName)s."),
{'cgName': cgsnapshot['id']})
modelUpdate = {'status': fields.ConsistencyGroupStatus.DELETED}
return modelUpdate, []
memberInstanceNames = self._get_members_of_replication_group(
cgInstanceName)
self.provision.delete_consistency_group(
self.conn, replicationService, cgInstanceName, cgName,
extraSpecs)
if memberInstanceNames:
try:
controllerConfigurationService = (
self.utils.find_controller_configuration_service(
self.conn, storageSystem))
for memberInstanceName in memberInstanceNames:
self._remove_device_from_storage_group(
controllerConfigurationService,
memberInstanceName, 'Member Volume', extraSpecs)
LOG.debug("Deleting CG members. CG: %(cg)s "
"%(numVols)lu member volumes: %(memVols)s.",
{'cg': cgInstanceName,
'numVols': len(memberInstanceNames),
'memVols': memberInstanceNames})
if isV3:
self.provisionv3.delete_volume_from_pool(
self.conn, storageConfigservice,
memberInstanceNames, None, extraSpecs)
else:
self.provision.delete_volume_from_pool(
self.conn, storageConfigservice,
memberInstanceNames, None, extraSpecs)
for volumeRef in volumes:
volumeRef['status'] = 'deleted'
except Exception:
for volumeRef in volumes:
volumeRef['status'] = 'error_deleting'
modelUpdate['status'] = 'error_deleting'
return modelUpdate, volumes
def _delete_target_volume_v2(
self, storageConfigService, targetVolumeInstance, extraSpecs):
"""Helper function to delete the clone target volume instance.
:param storageConfigService: storage configuration service instance
:param targetVolumeInstance: clone target volume instance
:param extraSpecs: extra specifications
"""
deviceId = targetVolumeInstance['DeviceID']
volumeName = targetVolumeInstance['Name']
rc = self._delete_from_pool(storageConfigService,
targetVolumeInstance,
volumeName, deviceId,
extraSpecs[FASTPOLICY],
extraSpecs)
return rc
def _validate_pool(self, volume, extraSpecs=None, host=None):
"""Get the pool from volume['host'].
There may be backward compatibiliy concerns, so putting in a
check to see if a version has been added to provider_location.
If it has, we know we are at the current version, if not, we
assume it was created pre 'Pool Aware Scheduler' feature.
:param volume: the volume Object
:param extraSpecs: extraSpecs provided in the volume type
:returns: string -- pool
:raises: VolumeBackendAPIException
"""
pool = None
# Volume is None in CG ops.
if volume is None:
return pool
if host is None:
host = volume['host']
# This check is for all operations except a create.
# On a create provider_location is None
try:
if volume['provider_location']:
version = self._get_version_from_provider_location(
volume['provider_location'])
if not version:
return pool
except KeyError:
return pool
try:
pool = volume_utils.extract_host(host, 'pool')
if pool:
LOG.debug("Pool from volume['host'] is %(pool)s.",
{'pool': pool})
# Check if it matches with the poolname if it is provided
# in the extra specs
if extraSpecs is not None:
if 'pool_name' in extraSpecs:
if extraSpecs['pool_name'] != pool:
exceptionMessage = (_(
"Pool from volume['host'] %(host)s doesn't"
" match with pool_name in extraSpecs.")
% {'host': volume['host']})
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
else:
exceptionMessage = (_(
"Pool from volume['host'] %(host)s not found.")
% {'host': volume['host']})
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
except Exception as ex:
exceptionMessage = (_(
"Pool from volume['host'] failed with: %(ex)s.")
% {'ex': ex})
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
return pool
def _get_version_from_provider_location(self, loc):
"""Get the version from the provider location.
:param loc: the provider_location dict
:returns: version or None
"""
version = None
try:
if isinstance(loc, six.string_types):
name = ast.literal_eval(loc)
version = name['version']
except KeyError:
pass
return version
def manage_existing(self, volume, external_ref):
"""Manages an existing VMAX Volume (import to Cinder).
Renames the existing volume to match the expected name for the volume.
Also need to consider things like QoS, Emulation, account/tenant.
:param volume: the volume object including the volume_type_id
:param external_ref: reference to the existing volume
:returns: dict -- model_update
:raises: VolumeBackendAPIException
"""
extraSpecs = self._initial_setup(volume)
self.conn = self._get_ecom_connection()
arrayName, deviceId = self.utils.get_array_and_device_id(volume,
external_ref)
# Manage existing volume is not supported if fast enabled.
if extraSpecs[FASTPOLICY]:
LOG.warning(_LW(
"FAST is enabled. Policy: %(fastPolicyName)s."),
{'fastPolicyName': extraSpecs[FASTPOLICY]})
exceptionMessage = (_(
"Manage volume is not supported if FAST is enable. "
"FAST policy: %(fastPolicyName)s.")
% {'fastPolicyName': extraSpecs[FASTPOLICY]})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
# Check if the volume is attached by checking if in any masking view.
volumeInstanceName = (
self.utils.find_volume_by_device_id_on_array(self.conn,
arrayName, deviceId))
sgInstanceNames = (
self.utils.get_storage_groups_from_volume(
self.conn, volumeInstanceName))
for sgInstanceName in sgInstanceNames:
mvInstanceNames = (
self.masking.get_masking_view_from_storage_group(
self.conn, sgInstanceName))
for mvInstanceName in mvInstanceNames:
exceptionMessage = (_(
"Unable to import volume %(deviceId)s to cinder. "
"Volume is in masking view %(mv)s.")
% {'deviceId': deviceId,
'mv': mvInstanceName})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
# Check if there is any associated snapshots with the volume.
cinderPoolInstanceName, storageSystemName = (
self._get_pool_and_storage_system(extraSpecs))
repSessionInstanceName = (
self.utils.get_associated_replication_from_source_volume(
self.conn, storageSystemName, deviceId))
if repSessionInstanceName:
exceptionMessage = (_(
"Unable to import volume %(deviceId)s to cinder. "
"It is the source volume of replication session %(sync)s.")
% {'deviceId': deviceId,
'sync': repSessionInstanceName})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
# Make sure the existing external volume is in the same storage pool.
volumePoolInstanceName = (
self.utils.get_assoc_pool_from_volume(self.conn,
volumeInstanceName))
volumePoolName = volumePoolInstanceName['InstanceID']
cinderPoolName = cinderPoolInstanceName['InstanceID']
LOG.debug("Storage pool of existing volume: %(volPool)s, "
"Storage pool currently managed by cinder: %(cinderPool)s.",
{'volPool': volumePoolName,
'cinderPool': cinderPoolName})
if volumePoolName != cinderPoolName:
exceptionMessage = (_(
"Unable to import volume %(deviceId)s to cinder. The external "
"volume is not in the pool managed by current cinder host.")
% {'deviceId': deviceId})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
# Rename the volume
volumeId = volume['id']
volumeElementName = self.utils.get_volume_element_name(volumeId)
LOG.debug("Rename volume %(vol)s to %(elementName)s.",
{'vol': volumeInstanceName,
'elementName': volumeElementName})
volumeInstance = self.utils.rename_volume(self.conn,
volumeInstanceName,
volumeElementName)
keys = {}
volpath = volumeInstance.path
keys['CreationClassName'] = volpath['CreationClassName']
keys['SystemName'] = volpath['SystemName']
keys['DeviceID'] = volpath['DeviceID']
keys['SystemCreationClassName'] = volpath['SystemCreationClassName']
model_update = {}
provider_location = {}
provider_location['classname'] = volpath['CreationClassName']
provider_location['keybindings'] = keys
# set-up volume replication, if enabled
if self.utils.is_replication_enabled(extraSpecs):
replication_status, replication_driver_data = (
self.setup_volume_replication(
self.conn, volume, provider_location, extraSpecs))
model_update.update(
{'replication_status': replication_status})
model_update.update(
{'replication_driver_data': six.text_type(
replication_driver_data)})
model_update.update({'display_name': volumeElementName})
model_update.update(
{'provider_location': six.text_type(provider_location)})
return model_update
def manage_existing_get_size(self, volume, external_ref):
"""Return size of an existing VMAX volume to manage_existing.
:param self: reference to class
:param volume: the volume object including the volume_type_id
:param external_ref: reference to the existing volume
:returns: size of the volume in GB
"""
LOG.debug("Volume in manage_existing_get_size: %(volume)s.",
{'volume': volume})
arrayName, deviceId = self.utils.get_array_and_device_id(volume,
external_ref)
volumeInstanceName = (
self.utils.find_volume_by_device_id_on_array(self.conn,
arrayName, deviceId))
volumeInstance = self.conn.GetInstance(volumeInstanceName)
byteSize = self.utils.get_volume_size(self.conn, volumeInstance)
gbSize = int(math.ceil(float(byteSize) / units.Gi))
LOG.debug(
"Size of volume %(deviceID)s is %(volumeSize)s GB.",
{'deviceID': deviceId,
'volumeSize': gbSize})
return gbSize
def unmanage(self, volume):
"""Export VMAX volume from Cinder.
Leave the volume intact on the backend array.
:param volume: the volume object
:raises: VolumeBackendAPIException
"""
volumeName = volume['name']
volumeId = volume['id']
LOG.debug("Unmanage volume %(name)s, id=%(id)s",
{'name': volumeName,
'id': volumeId})
self._initial_setup(volume)
self.conn = self._get_ecom_connection()
volumeInstance = self._find_lun(volume)
if volumeInstance is None:
exceptionMessage = (_("Cannot find Volume: %(id)s. "
"unmanage operation. Exiting...")
% {'id': volumeId})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
# Rename the volume to volumeId, thus remove the 'OS-' prefix.
volumeInstance = self.utils.rename_volume(self.conn,
volumeInstance,
volumeId)
def update_consistencygroup(self, group, add_volumes,
remove_volumes):
"""Updates LUNs in consistency group.
:param group: storage configuration service instance
:param add_volumes: the volumes uuids you want to add to the CG
:param remove_volumes: the volumes uuids you want to remove from
the CG
"""
LOG.info(_LI("Update Consistency Group: %(group)s. "
"This adds and/or removes volumes from a CG."),
{'group': group['id']})
modelUpdate = {'status': fields.ConsistencyGroupStatus.AVAILABLE}
cg_name = self._update_consistency_group_name(group)
add_vols = [vol for vol in add_volumes] if add_volumes else []
add_instance_names = self._get_volume_instance_names(add_vols)
remove_vols = [vol for vol in remove_volumes] if remove_volumes else []
remove_instance_names = self._get_volume_instance_names(remove_vols)
self.conn = self._get_ecom_connection()
try:
replicationService, storageSystem, __, __ = (
self._get_consistency_group_utils(self.conn, group))
cgInstanceName, __ = (
self._find_consistency_group(
replicationService, six.text_type(group['id'])))
if cgInstanceName is None:
raise exception.ConsistencyGroupNotFound(
consistencygroup_id=cg_name)
# Add volume(s) to a consistency group
interval_retries_dict = self.utils.get_default_intervals_retries()
if add_instance_names:
self.provision.add_volume_to_cg(
self.conn, replicationService, cgInstanceName,
add_instance_names, cg_name, None,
interval_retries_dict)
# Remove volume(s) from a consistency group
if remove_instance_names:
self.provision.remove_volume_from_cg(
self.conn, replicationService, cgInstanceName,
remove_instance_names, cg_name, None,
interval_retries_dict)
except exception.ConsistencyGroupNotFound:
raise
except Exception as ex:
LOG.error(_LE("Exception: %(ex)s"), {'ex': ex})
exceptionMessage = (_("Failed to update consistency group:"
" %(cgName)s.")
% {'cgName': group['id']})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
return modelUpdate, None, None
def _get_volume_instance_names(self, volumes):
"""Get volume instance names from volume.
:param volumes: volume objects
:returns: volume instance names
"""
volumeInstanceNames = []
for volume in volumes:
volumeInstance = self._find_lun(volume)
if volumeInstance is None:
LOG.error(_LE("Volume %(name)s not found on the array."),
{'name': volume['name']})
else:
volumeInstanceNames.append(volumeInstance.path)
return volumeInstanceNames
def create_consistencygroup_from_src(self, context, group, volumes,
cgsnapshot, snapshots, source_cg,
source_vols):
"""Creates the consistency group from source.
:param context: the context
:param group: the consistency group object to be created
:param volumes: volumes in the consistency group
:param cgsnapshot: the source consistency group snapshot
:param snapshots: snapshots of the source volumes
:param source_cg: the source consistency group
:param source_vols: the source vols
:returns: model_update, volumes_model_update
model_update is a dictionary of cg status
volumes_model_update is a list of dictionaries of volume
update
"""
if cgsnapshot:
source_vols_or_snapshots = snapshots
source_id = cgsnapshot['id']
elif source_cg:
source_vols_or_snapshots = source_vols
source_id = source_cg['id']
else:
exceptionMessage = (_("Must supply either CG snaphot or "
"a source CG."))
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("Enter EMCVMAXCommon::create_consistencygroup_from_src. "
"Group to be created: %(cgId)s, "
"Source : %(SourceCGId)s.",
{'cgId': group['id'],
'SourceCGId': source_id})
self.create_consistencygroup(context, group)
modelUpdate = {'status': fields.ConsistencyGroupStatus.AVAILABLE}
try:
replicationService, storageSystem, extraSpecsDictList, isV3 = (
self._get_consistency_group_utils(self.conn, group))
if replicationService is None:
exceptionMessage = (_(
"Cannot find replication service on system %s.") %
storageSystem)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
targetCgInstanceName, targetCgName = self._find_consistency_group(
replicationService, six.text_type(group['id']))
LOG.debug("Create CG %(targetCg)s from snapshot.",
{'targetCg': targetCgInstanceName})
dictOfVolumeDicts = {}
targetVolumeNames = {}
for volume, source_vol_or_snapshot in zip(
volumes, source_vols_or_snapshots):
if 'size' in source_vol_or_snapshot:
volumeSizeInbits = int(self.utils.convert_gb_to_bits(
source_vol_or_snapshot['size']))
else:
volumeSizeInbits = int(self.utils.convert_gb_to_bits(
source_vol_or_snapshot['volume_size']))
for extraSpecsDict in extraSpecsDictList:
if volume['volume_type_id'] in extraSpecsDict.values():
extraSpecs = extraSpecsDict.get('extraSpecs')
if 'pool_name' in extraSpecs:
extraSpecs = self.utils.update_extra_specs(
extraSpecs)
# Create a random UUID and use it as volume name
targetVolumeName = six.text_type(uuid.uuid4())
volumeDict = self._create_vol_and_add_to_cg(
volumeSizeInbits, replicationService,
targetCgInstanceName, targetCgName,
source_vol_or_snapshot['id'],
extraSpecs, targetVolumeName)
dictOfVolumeDicts[volume['id']] = volumeDict
targetVolumeNames[volume['id']] = targetVolumeName
interval_retries_dict = self.utils.get_default_intervals_retries()
self._break_replica_group_relationship(
replicationService, source_id, group['id'],
targetCgInstanceName, storageSystem, interval_retries_dict,
isV3)
except Exception:
exceptionMessage = (_("Failed to create CG %(cgName)s "
"from source %(cgSnapshot)s.")
% {'cgName': group['id'],
'cgSnapshot': source_id})
LOG.exception(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
volumes_model_update = self.utils.get_volume_model_updates(
volumes, group['id'], modelUpdate['status'])
# Update the provider_location
for volume_model_update in volumes_model_update:
if volume_model_update['id'] in dictOfVolumeDicts:
volume_model_update.update(
{'provider_location': six.text_type(
dictOfVolumeDicts[volume_model_update['id']])})
# Update the volumes_model_update with admin_metadata
self.update_admin_metadata(volumes_model_update,
key='targetVolumeName',
values=targetVolumeNames)
return modelUpdate, volumes_model_update
def update_admin_metadata(
self, volumes_model_update, key, values):
"""Update the volume_model_updates with admin metadata
:param volumes_model_update: List of volume model updates
:param key: Key to be updated in the admin_metadata
:param values: Dictionary of values per volume id
"""
for volume_model_update in volumes_model_update:
volume_id = volume_model_update['id']
if volume_id in values:
admin_metadata = {}
admin_metadata.update({key: values[volume_id]})
volume_model_update.update(
{'admin_metadata': admin_metadata})
def _break_replica_group_relationship(
self, replicationService, source_id, group_id,
targetCgInstanceName, storageSystem, extraSpecs, isV3):
"""Breaks the replica group relationship.
:param replicationService: replication service
:param source_id: source identifier
:param group_id: group identifier
:param targetCgInstanceName: target CG instance
:param storageSystem: storage system
:param extraSpecs: additional info
"""
sourceCgInstanceName, sourceCgName = self._find_consistency_group(
replicationService, source_id)
if sourceCgInstanceName is None:
exceptionMessage = (_("Cannot find source CG instance. "
"consistencygroup_id: %s.") %
source_id)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
relationName = self.utils.truncate_string(group_id, TRUNCATE_5)
if isV3:
self.provisionv3.create_group_replica(
self.conn, replicationService, sourceCgInstanceName,
targetCgInstanceName, relationName, extraSpecs)
else:
self.provision.create_group_replica(
self.conn, replicationService, sourceCgInstanceName,
targetCgInstanceName, relationName, extraSpecs)
# Break the replica group relationship.
rgSyncInstanceName = self.utils.find_group_sync_rg_by_target(
self.conn, storageSystem, targetCgInstanceName, extraSpecs,
True)
if rgSyncInstanceName is not None:
if isV3:
# Operation 9: dissolve for snapVx
operation = self.utils.get_num(9, '16')
self.provisionv3.break_replication_relationship(
self.conn, replicationService, rgSyncInstanceName,
operation, extraSpecs)
else:
self.provision.delete_clone_relationship(
self.conn, replicationService,
rgSyncInstanceName, extraSpecs)
def _create_vol_and_add_to_cg(
self, volumeSizeInbits, replicationService,
targetCgInstanceName, targetCgName, source_id,
extraSpecs, targetVolumeName):
"""Creates volume and adds to CG.
:param context: the context
:param volumeSizeInbits: volume size in bits
:param replicationService: replication service
:param targetCgInstanceName: target cg instance
:param targetCgName: target cg name
:param source_id: source identifier
:param extraSpecs: additional info
:param targetVolumeName: volume name for the target volume
:returns volumeDict: volume dictionary for the newly created volume
"""
volume = {'size': int(self.utils.convert_bits_to_gbs(
volumeSizeInbits))}
if extraSpecs[ISV3]:
_rc, volumeDict, _storageSystemName = (
self._create_v3_volume(
volume, targetVolumeName, volumeSizeInbits,
extraSpecs))
else:
_rc, volumeDict, _storageSystemName = (
self._create_composite_volume(
volume, targetVolumeName, volumeSizeInbits,
extraSpecs))
targetVolumeInstance = self.utils.find_volume_instance(
self.conn, volumeDict, targetVolumeName)
LOG.debug("Create target volume for member snapshot. "
"Source : %(snapshot)s, "
"Target volume: %(targetVol)s.",
{'snapshot': source_id,
'targetVol': targetVolumeInstance.path})
self.provision.add_volume_to_cg(self.conn,
replicationService,
targetCgInstanceName,
targetVolumeInstance.path,
targetCgName,
targetVolumeName,
extraSpecs)
return volumeDict
def _find_ip_protocol_endpoints(self, conn, storageSystemName,
portgroupname):
"""Find the IP protocol endpoint for ISCSI.
:param storageSystemName: the system name
:param portgroupname: the portgroup name
:returns: foundIpAddresses
"""
LOG.debug("The portgroup name for iscsiadm is %(pg)s",
{'pg': portgroupname})
foundipaddresses = []
configservice = (
self.utils.find_controller_configuration_service(
conn, storageSystemName))
portgroupinstancename = (
self.masking.find_port_group(conn, configservice, portgroupname))
iscsiendpointinstancenames = (
self.utils.get_iscsi_protocol_endpoints(
conn, portgroupinstancename))
for iscsiendpointinstancename in iscsiendpointinstancenames:
tcpendpointinstancenames = (
self.utils.get_tcp_protocol_endpoints(
conn, iscsiendpointinstancename))
for tcpendpointinstancename in tcpendpointinstancenames:
ipendpointinstancenames = (
self.utils.get_ip_protocol_endpoints(
conn, tcpendpointinstancename))
endpoint = {}
for ipendpointinstancename in ipendpointinstancenames:
endpoint = self.get_ip_and_iqn(conn, endpoint,
ipendpointinstancename)
if bool(endpoint):
foundipaddresses.append(endpoint)
return foundipaddresses
def _extend_v3_volume(self, volumeInstance, volumeName, newSize,
extraSpecs):
"""Extends a VMAX3 volume.
:param volumeInstance: volume instance
:param volumeName: volume name
:param newSize: new size the volume will be increased to
:param extraSpecs: extra specifications
:returns: int -- return code
:returns: volumeDict
"""
new_size_in_bits = int(self.utils.convert_gb_to_bits(newSize))
storageConfigService = self.utils.find_storage_configuration_service(
self.conn, volumeInstance['SystemName'])
volumeDict, rc = self.provisionv3.extend_volume_in_SG(
self.conn, storageConfigService, volumeInstance.path,
volumeName, new_size_in_bits, extraSpecs)
return rc, volumeDict
def _create_duplicate_volume(
self, sourceInstance, cloneName, extraSpecs):
"""Create a volume in the same dimensions of the source volume.
:param sourceInstance: the source volume instance
:param cloneName: the user supplied snap name
:param extraSpecs: additional info
:returns: targetInstance
"""
numOfBlocks = sourceInstance['NumberOfBlocks']
blockSize = sourceInstance['BlockSize']
volumeSizeInbits = numOfBlocks * blockSize
volume = {'size':
int(self.utils.convert_bits_to_gbs(volumeSizeInbits))}
_rc, volumeDict, _storageSystemName = (
self._create_v3_volume(
volume, cloneName, volumeSizeInbits, extraSpecs))
targetInstance = self.utils.find_volume_instance(
self.conn, volumeDict, cloneName)
LOG.debug("Create replica target volume "
"Source Volume: %(sourceVol)s, "
"Target Volume: %(targetVol)s.",
{'sourceVol': sourceInstance.path,
'targetVol': targetInstance.path})
return targetInstance
def get_ip_and_iqn(self, conn, endpoint, ipendpointinstancename):
"""Get ip and iqn from the endpoint.
:param conn: ecom connection
:param endpoint: end point
:param ipendpointinstancename: ip endpoint
:returns: endpoint
"""
if ('iSCSIProtocolEndpoint' in six.text_type(
ipendpointinstancename['CreationClassName'])):
iqn = self.utils.get_iqn(conn, ipendpointinstancename)
if iqn:
endpoint['iqn'] = iqn
elif ('IPProtocolEndpoint' in six.text_type(
ipendpointinstancename['CreationClassName'])):
ipaddress = (
self.utils.get_iscsi_ip_address(
conn, ipendpointinstancename))
if ipaddress:
endpoint['ip'] = ipaddress
return endpoint
def _get_consistency_group_utils(self, conn, group):
"""Standard utility for consistency group.
:param conn: ecom connection
:param group: the consistency group object to be created
:return: replicationService, storageSystem, extraSpecs, isV3
"""
storageSystems = set()
extraSpecsDictList = []
isV3 = False
if isinstance(group, Group):
for volume_type in group.volume_types:
extraSpecsDict, storageSystems, isV3 = (
self._update_extra_specs_list(
volume_type.extra_specs, len(group.volume_types),
volume_type.id))
extraSpecsDictList.append(extraSpecsDict)
elif isinstance(group, ConsistencyGroup):
volumeTypeIds = group.volume_type_id.split(",")
volumeTypeIds = list(filter(None, volumeTypeIds))
for volumeTypeId in volumeTypeIds:
if volumeTypeId:
extraSpecs = self.utils.get_volumetype_extraspecs(
None, volumeTypeId)
extraSpecsDict, storageSystems, isV3 = (
self._update_extra_specs_list(
extraSpecs, len(volumeTypeIds),
volumeTypeId))
extraSpecsDictList.append(extraSpecsDict)
else:
msg = (_("Unable to get volume type ids."))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if len(storageSystems) != 1:
if not storageSystems:
msg = (_("Failed to get a single storage system "
"associated with consistencygroup_id: %(groupid)s.")
% {'groupid': group.id})
else:
msg = (_("There are multiple storage systems "
"associated with consistencygroup_id: %(groupid)s.")
% {'groupid': group.id})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
storageSystem = storageSystems.pop()
replicationService = self.utils.find_replication_service(
conn, storageSystem)
return replicationService, storageSystem, extraSpecsDictList, isV3
def _update_extra_specs_list(
self, extraSpecs, list_size, volumeTypeId):
"""Update the extra specs list.
:param extraSpecs: extraSpecs
:param list_size: the size of volume type list
:param volumeTypeId: volume type identifier
:return: extraSpecsDictList, storageSystems, isV3
"""
storageSystems = set()
extraSpecsDict = {}
if 'pool_name' in extraSpecs:
isV3 = True
extraSpecs = self.utils.update_extra_specs(
extraSpecs)
extraSpecs[ISV3] = True
else:
# Without multipool we cannot support multiple volumetypes.
if list_size == 1:
extraSpecs = self._initial_setup(None, volumeTypeId)
isV3 = extraSpecs[ISV3]
else:
msg = (_("We cannot support multiple volume types if "
"multi pool functionality is not enabled."))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
__, storageSystem = (
self._get_pool_and_storage_system(extraSpecs))
if storageSystem:
storageSystems.add(storageSystem)
extraSpecsDict["volumeTypeId"] = volumeTypeId
extraSpecsDict["extraSpecs"] = extraSpecs
return extraSpecsDict, storageSystems, isV3
def _update_consistency_group_name(self, group):
"""Format id and name consistency group
:param group: the consistency group object to be created
:param update_variable: the variable of the group to be used
:return: cgname -- formatted name + id
"""
cgName = ""
if group['name'] is not None:
cgName = (
self.utils.truncate_string(group['name'], TRUNCATE_27) + "_")
cgName += six.text_type(group["id"])
return cgName
def _sync_check(self, volumeInstance, volumeName, extraSpecs):
"""Check if volume is part of a snapshot/clone sync process.
:param volumeInstance: volume instance
:param volumeName: volume name
:param extraSpecs: extra specifications
"""
storageSystem = volumeInstance['SystemName']
# Wait for it to fully sync in case there is an ongoing
# create volume from snapshot request.
syncInstanceName = self.utils.find_sync_sv_by_volume(
self.conn, storageSystem, volumeInstance, extraSpecs,
True)
if syncInstanceName:
repservice = self.utils.find_replication_service(self.conn,
storageSystem)
# Break the replication relationship
LOG.debug("Deleting snap relationship: Source: %(volume)s "
"Synchronization: %(syncName)s.",
{'volume': volumeName,
'syncName': syncInstanceName})
if extraSpecs[ISV3]:
rc, job = self.provisionv3.break_replication_relationship(
self.conn, repservice, syncInstanceName,
DISSOLVE_SNAPVX, extraSpecs)
else:
self.provision.delete_clone_relationship(
self.conn, repservice, syncInstanceName, extraSpecs, True)
def setup_volume_replication(self, conn, sourceVolume, volumeDict,
extraSpecs, targetInstance=None):
"""Setup replication for volume, if enabled.
Called on create volume, create cloned volume,
create volume from snapshot, manage_existing,
and re-establishing a replication relationship after extending.
:param conn: the connection to the ecom server
:param sourceVolume: the source volume object
:param volumeDict: the source volume dict (the provider_location)
:param extraSpecs: extra specifications
:param targetInstance: optional, target on secondary array
:return: rep_update - dict
"""
isTargetV3 = self.utils.isArrayV3(conn, self.rep_config['array'])
if not extraSpecs[ISV3] or not isTargetV3:
exception_message = (_("Replication is not supported on "
"VMAX 2"))
LOG.exception(exception_message)
raise exception.VolumeBackendAPIException(
data=exception_message)
sourceName = sourceVolume['name']
sourceInstance = self.utils.find_volume_instance(
conn, volumeDict, sourceName)
LOG.debug('Starting replication setup '
'for volume: %s.', sourceVolume['name'])
storageSystem = sourceInstance['SystemName']
# get rdf details
rdfGroupInstance, repServiceInstanceName = (
self.get_rdf_details(conn, storageSystem))
rdf_vol_size = sourceVolume['size']
# give the target volume the same Volume Element Name as the
# source volume
targetName = self.utils.get_volume_element_name(
sourceVolume['id'])
if not targetInstance:
# create a target volume on the target array
# target must be passed in on remote replication
targetInstance = self.get_target_instance(
sourceVolume, self.rep_config, rdf_vol_size,
targetName, extraSpecs)
LOG.debug("Create volume replica: Remote Volume: %(targetName)s "
"Source Volume: %(sourceName)s "
"Method: CreateElementReplica "
"ReplicationService: %(service)s ElementName: "
"%(elementname)s SyncType: 6 SourceElement: "
"%(sourceelement)s.",
{'targetName': targetName,
'sourceName': sourceName,
'service': repServiceInstanceName,
'elementname': targetName,
'sourceelement': sourceInstance.path})
# create the remote replica and establish the link
rc, rdfDict = self.create_remote_replica(
conn, repServiceInstanceName, rdfGroupInstance,
sourceVolume, sourceInstance, targetInstance, extraSpecs,
self.rep_config)
LOG.info(_LI('Successfully setup replication for %s.'),
sourceVolume['name'])
replication_status = REPLICATION_ENABLED
replication_driver_data = rdfDict['keybindings']
return replication_status, replication_driver_data
# called on delete volume after remove_and_reset_members
def cleanup_lun_replication(self, conn, volume, volumeName,
sourceInstance, extraSpecs):
"""Cleanup target volume on delete.
Extra logic if target is last in group.
:param conn: the connection to the ecom server
:param volume: the volume object
:param volumeName: the volume name
:param sourceInstance: the source volume instance
:param extraSpecs: extra specification
"""
LOG.debug('Starting cleanup replication from volume: '
'%s.', volumeName)
try:
loc = volume['provider_location']
rep_data = volume['replication_driver_data']
if (isinstance(loc, six.string_types)
and isinstance(rep_data, six.string_types)):
name = ast.literal_eval(loc)
replication_keybindings = ast.literal_eval(rep_data)
storageSystem = replication_keybindings['SystemName']
rdfGroupInstance, repServiceInstanceName = (
self.get_rdf_details(conn, storageSystem))
repExtraSpecs = self._get_replication_extraSpecs(
extraSpecs, self.rep_config)
targetVolumeDict = {'classname': name['classname'],
'keybindings': replication_keybindings}
targetInstance = self.utils.find_volume_instance(
conn, targetVolumeDict, volumeName)
# Ensure element name matches openstack id.
volumeElementName = (self.utils.
get_volume_element_name(volume['id']))
if volumeElementName != targetInstance['ElementName']:
targetInstance = None
if targetInstance is not None:
# clean-up target
targetControllerConfigService = (
self.utils.find_controller_configuration_service(
conn, storageSystem))
self.masking.remove_and_reset_members(
conn, targetControllerConfigService, targetInstance,
volumeName, repExtraSpecs, None, False)
self._cleanup_remote_target(
conn, repServiceInstanceName, sourceInstance,
targetInstance, extraSpecs, repExtraSpecs)
LOG.info(_LI('Successfully destroyed replication for '
'volume: %(volume)s'),
{'volume': volumeName})
else:
LOG.warning(_LW('Replication target not found for '
'replication-enabled volume: %(volume)s'),
{'volume': volumeName})
except Exception as e:
LOG.error(_LE('Cannot get necessary information to cleanup '
'replication target for volume: %(volume)s. '
'The exception received was: %(e)s. Manual '
'clean-up may be required. Please contact '
'your administrator.'),
{'volume': volumeName, 'e': e})
def _cleanup_remote_target(
self, conn, repServiceInstanceName, sourceInstance,
targetInstance, extraSpecs, repExtraSpecs):
"""Clean-up remote replication target after exception or on deletion.
:param conn: connection to the ecom server
:param repServiceInstanceName: the replication service
:param sourceInstance: the source volume instance
:param targetInstance: the target volume instance
:param extraSpecs: extra specifications
:param repExtraSpecs: replication extra specifications
"""
storageSystem = sourceInstance['SystemName']
targetStorageSystem = targetInstance['SystemName']
syncInstanceName = self.utils.find_rdf_storage_sync_sv_sv(
conn, sourceInstance, storageSystem,
targetInstance, targetStorageSystem,
extraSpecs, False)
if syncInstanceName is not None:
# Break the sync relationship.
self.break_rdf_relationship(
conn, repServiceInstanceName, syncInstanceName, extraSpecs)
targetStorageConfigService = (
self.utils.find_storage_configuration_service(
conn, targetStorageSystem))
deviceId = targetInstance['DeviceID']
volumeName = targetInstance['Name']
self._delete_from_pool_v3(
targetStorageConfigService, targetInstance, volumeName,
deviceId, repExtraSpecs)
def _cleanup_replication_source(
self, conn, volumeName, volumeDict, extraSpecs):
"""Cleanup a remote replication source volume on failure.
If replication setup fails at any stage on a new volume create,
we must clean-up the source instance as the cinder database won't
be updated with the provider_location. This means the volume can not
be properly deleted from the array by cinder.
:param conn: the connection to the ecom server
:param volumeName: the name of the volume
:param volumeDict: the source volume dictionary
:param extraSpecs: the extra specifications
"""
LOG.warning(_LW(
"Replication failed. Cleaning up the source volume. "
"Volume name: %(sourceName)s "),
{'sourceName': volumeName})
sourceInstance = self.utils.find_volume_instance(
conn, volumeDict, volumeName)
storageSystem = sourceInstance['SystemName']
deviceId = sourceInstance['DeviceID']
volumeName = sourceInstance['Name']
storageConfigService = (
self.utils.find_storage_configuration_service(
conn, storageSystem))
self._delete_from_pool_v3(
storageConfigService, sourceInstance, volumeName,
deviceId, extraSpecs)
def break_rdf_relationship(self, conn, repServiceInstanceName,
syncInstanceName, extraSpecs):
# Break the sync relationship.
LOG.debug("Suspending the SRDF relationship...")
self.provisionv3.break_replication_relationship(
conn, repServiceInstanceName, syncInstanceName,
SUSPEND_SRDF, extraSpecs, True)
LOG.debug("Detaching the SRDF relationship...")
self.provisionv3.break_replication_relationship(
conn, repServiceInstanceName, syncInstanceName,
DETACH_SRDF, extraSpecs, True)
def get_rdf_details(self, conn, storageSystem):
"""Retrieves an SRDF group instance.
:param conn: connection to the ecom server
:param storageSystem: the storage system name
:return:
"""
if not self.rep_config:
exception_message = (_("Replication is not configured on "
"backend: %(backend)s.") %
{'backend': self.configuration.safe_get(
'volume_backend_name')})
LOG.exception(exception_message)
raise exception.VolumeBackendAPIException(data=exception_message)
repServiceInstanceName = self.utils.find_replication_service(
conn, storageSystem)
RDFGroupName = self.rep_config['rdf_group_label']
LOG.info(_LI("Replication group: %(RDFGroup)s."),
{'RDFGroup': RDFGroupName})
rdfGroupInstance = self.provisionv3.get_rdf_group_instance(
conn, repServiceInstanceName, RDFGroupName)
LOG.info(_LI("Found RDF group instance: %(RDFGroup)s."),
{'RDFGroup': rdfGroupInstance})
if rdfGroupInstance is None:
exception_message = (_("Cannot find replication group: "
"%(RDFGroup)s.") %
{'RDFGroup': rdfGroupInstance})
LOG.exception(exception_message)
raise exception.VolumeBackendAPIException(
data=exception_message)
return rdfGroupInstance, repServiceInstanceName
def failover_host(self, context, volumes, secondary_id=None):
"""Fails over the volume back and forth.
Driver needs to update following info for failed-over volume:
1. provider_location: update array details
2. replication_status: new status for replication-enabled volume
:param context: the context
:param volumes: the list of volumes to be failed over
:param secondary_id: the target backend
:return: secondary_id, volume_update_list
"""
volume_update_list = []
if not self.conn:
self.conn = self._get_ecom_connection()
if secondary_id != 'default':
if not self.failover:
self.failover = True
if self.rep_config:
secondary_id = self.rep_config['array']
else:
exception_message = (_(
"Backend %(backend)s is already failed over. "
"If you wish to failback, please append "
"'--backend_id default' to your command.")
% {'backend': self.configuration.safe_get(
'volume_backend_name')})
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(
data=exception_message)
else:
if self.failover:
self.failover = False
secondary_id = None
else:
exception_message = (_(
"Cannot failback backend %(backend)s- backend not "
"in failed over state. If you meant to failover, please "
"omit the '--backend_id default' from the command")
% {'backend': self.configuration.safe_get(
'volume_backend_name')})
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(
data=exception_message)
def failover_volume(vol, failover):
loc = vol['provider_location']
rep_data = vol['replication_driver_data']
try:
name = ast.literal_eval(loc)
replication_keybindings = ast.literal_eval(rep_data)
keybindings = name['keybindings']
storageSystem = keybindings['SystemName']
sourceInstance = self._find_lun(vol)
volumeDict = {'classname': name['classname'],
'keybindings': replication_keybindings}
targetInstance = self.utils.find_volume_instance(
self.conn, volumeDict, vol['name'])
targetStorageSystem = (
replication_keybindings['SystemName'])
repServiceInstanceName = (
self.utils.find_replication_service(
self.conn, storageSystem))
if failover:
storageSynchronizationSv = (
self.utils.find_rdf_storage_sync_sv_sv(
self.conn, sourceInstance, storageSystem,
targetInstance, targetStorageSystem,
extraSpecs))
self.provisionv3.failover_volume(
self.conn, repServiceInstanceName,
storageSynchronizationSv,
extraSpecs)
new_status = REPLICATION_FAILOVER
else:
storageSynchronizationSv = (
self.utils.find_rdf_storage_sync_sv_sv(
self.conn, targetInstance, targetStorageSystem,
sourceInstance, storageSystem,
extraSpecs, False))
self.provisionv3.failback_volume(
self.conn, repServiceInstanceName,
storageSynchronizationSv,
extraSpecs)
new_status = REPLICATION_ENABLED
# Transfer ownership to secondary_backend_id and
# update provider_location field
provider_location, replication_driver_data = (
self.utils.failover_provider_location(
name, replication_keybindings))
loc = six.text_type(provider_location)
rep_data = six.text_type(replication_driver_data)
except Exception as ex:
msg = _LE(
'Failed to failover volume %(volume_id)s. '
'Error: %(error)s.')
LOG.error(msg, {'volume_id': vol['id'],
'error': ex}, )
new_status = FAILOVER_ERROR
model_update = {'volume_id': vol['id'],
'updates':
{'replication_status': new_status,
'replication_driver_data': rep_data,
'provider_location': loc}}
volume_update_list.append(model_update)
for volume in volumes:
extraSpecs = self._initial_setup(volume)
if self.utils.is_replication_enabled(extraSpecs):
failover_volume(volume, self.failover)
else:
if self.failover:
# Since the array has been failed-over,
# volumes without replication should be in error.
volume_update_list.append({
'volume_id': volume['id'],
'updates': {'status': 'error'}})
else:
# This is a failback, so we will attempt
# to recover non-failed over volumes
recovery = self.recover_volumes_on_failback(volume)
volume_update_list.append(recovery)
LOG.info(_LI("Failover host complete"))
return secondary_id, volume_update_list
def recover_volumes_on_failback(self, volume):
"""Recover volumes on failback.
On failback, attempt to recover non RE(replication enabled)
volumes from primary array.
:param volume:
:return: volume_update
"""
# check if volume still exists on the primary
volume_update = {'volume_id': volume['id']}
volumeInstance = self._find_lun(volume)
if not volumeInstance:
volume_update['updates'] = {'status': 'error'}
else:
try:
maskingview = self._is_volume_in_masking_view(volumeInstance)
except Exception:
maskingview = None
LOG.debug("Unable to determine if volume is in masking view.")
if not maskingview:
volume_update['updates'] = {'status': 'available'}
else:
volume_update['updates'] = {'status': 'in-use'}
return volume_update
def _is_volume_in_masking_view(self, volumeInstance):
"""Helper function to check if a volume is in a masking view.
:param volumeInstance: the volume instance
:return: maskingview
"""
maskingView = None
volumeInstanceName = volumeInstance.path
storageGroups = self.utils.get_storage_groups_from_volume(
self.conn, volumeInstanceName)
if storageGroups:
for storageGroup in storageGroups:
maskingView = self.utils.get_masking_view_from_storage_group(
self.conn, storageGroup)
if maskingView:
break
return maskingView
def extend_volume_is_replicated(self, volume, volumeInstance,
volumeName, newSize, extraSpecs):
"""Extend a replication-enabled volume.
Cannot extend volumes in a synchronization pair.
Must first break the relationship, extend them
separately, then recreate the pair
:param volume: the volume objcet
:param volumeInstance: the volume instance
:param volumeName: the volume name
:param newSize: the new size the volume should be
:param extraSpecs: extra specifications
:return: rc, volumeDict
"""
if self.extendReplicatedVolume is True:
storageSystem = volumeInstance['SystemName']
loc = volume['provider_location']
rep_data = volume['replication_driver_data']
try:
name = ast.literal_eval(loc)
replication_keybindings = ast.literal_eval(rep_data)
targetStorageSystem = replication_keybindings['SystemName']
targetVolumeDict = {'classname': name['classname'],
'keybindings': replication_keybindings}
targetVolumeInstance = self.utils.find_volume_instance(
self.conn, targetVolumeDict, volumeName)
repServiceInstanceName = self.utils.find_replication_service(
self.conn, targetStorageSystem)
storageSynchronizationSv = (
self.utils.find_rdf_storage_sync_sv_sv(
self.conn, volumeInstance, storageSystem,
targetVolumeInstance, targetStorageSystem,
extraSpecs))
# volume must be removed from replication (storage) group
# before the replication relationship can be ended (cannot
# have a mix of replicated and non-replicated volumes as
# the SRDF groups become unmanageable).
controllerConfigService = (
self.utils.find_controller_configuration_service(
self.conn, storageSystem))
self.masking.remove_and_reset_members(
self.conn, controllerConfigService, volumeInstance,
volumeName, extraSpecs, None, False)
# repeat on target side
targetControllerConfigService = (
self.utils.find_controller_configuration_service(
self.conn, targetStorageSystem))
repExtraSpecs = self._get_replication_extraSpecs(
extraSpecs, self.rep_config)
self.masking.remove_and_reset_members(
self.conn, targetControllerConfigService,
targetVolumeInstance, volumeName, repExtraSpecs,
None, False)
LOG.info(_LI("Breaking replication relationship..."))
self.break_rdf_relationship(
self.conn, repServiceInstanceName,
storageSynchronizationSv, extraSpecs)
# extend the source volume
LOG.info(_LI("Extending source volume..."))
rc, volumeDict = self._extend_v3_volume(
volumeInstance, volumeName, newSize, extraSpecs)
# extend the target volume
LOG.info(_LI("Extending target volume..."))
self._extend_v3_volume(targetVolumeInstance, volumeName,
newSize, repExtraSpecs)
# re-create replication relationship
LOG.info(_LI("Recreating replication relationship..."))
self.setup_volume_replication(
self.conn, volume, volumeDict,
extraSpecs, targetVolumeInstance)
except Exception as e:
exception_message = (_("Error extending volume. "
"Error received was %(e)s") %
{'e': e})
LOG.exception(exception_message)
raise exception.VolumeBackendAPIException(
data=exception_message)
return rc, volumeDict
else:
exceptionMessage = (_(
"Extending a replicated volume is not "
"permitted on this backend. Please contact "
"your administrator."))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
def create_remote_replica(self, conn, repServiceInstanceName,
rdfGroupInstance, sourceVolume, sourceInstance,
targetInstance, extraSpecs, rep_config):
"""Create a replication relationship with a target volume.
:param conn: the connection to the ecom server
:param repServiceInstanceName: the replication service
:param rdfGroupInstance: the SRDF group instance
:param sourceVolume: the source volume object
:param sourceInstance: the source volume instance
:param targetInstance: the target volume instance
:param extraSpecs: extra specifications
:param rep_config: the replication configuration
:return: rc, rdfDict - the target volume dictionary
"""
# remove source and target instances from their default storage groups
volumeName = sourceVolume['name']
storageSystemName = sourceInstance['SystemName']
controllerConfigService = (
self.utils.find_controller_configuration_service(
conn, storageSystemName))
repExtraSpecs = self._get_replication_extraSpecs(extraSpecs,
rep_config)
try:
self.masking.remove_and_reset_members(
conn, controllerConfigService, sourceInstance,
volumeName, extraSpecs, connector=None, reset=False)
targetStorageSystemName = targetInstance['SystemName']
targetControllerConfigService = (
self.utils.find_controller_configuration_service(
conn, targetStorageSystemName))
self.masking.remove_and_reset_members(
conn, targetControllerConfigService, targetInstance,
volumeName, repExtraSpecs, connector=None, reset=False)
# establish replication relationship
rc, rdfDict = self._create_remote_replica(
conn, repServiceInstanceName, rdfGroupInstance, volumeName,
sourceInstance, targetInstance, extraSpecs)
# add source and target instances to their replication groups
LOG.debug("Adding sourceInstance to default replication group.")
self.add_volume_to_replication_group(conn, controllerConfigService,
sourceInstance, volumeName,
extraSpecs)
LOG.debug("Adding targetInstance to default replication group.")
self.add_volume_to_replication_group(
conn, targetControllerConfigService, targetInstance,
volumeName, repExtraSpecs)
except Exception as e:
LOG.warning(
_LW("Remote replication failed. Cleaning up the target "
"volume and returning source volume to default storage "
"group. Volume name: %(cloneName)s "),
{'cloneName': volumeName})
self._cleanup_remote_target(
conn, repServiceInstanceName, sourceInstance,
targetInstance, extraSpecs, repExtraSpecs)
# Re-throw the exception.
exception_message = (_("Remote replication failed with exception:"
" %(e)s")
% {'e': six.text_type(e)})
LOG.exception(exception_message)
raise exception.VolumeBackendAPIException(data=exception_message)
return rc, rdfDict
def add_volume_to_replication_group(self, conn, controllerConfigService,
volumeInstance, volumeName,
extraSpecs):
"""Add a volume to the default replication group.
SE_ReplicationGroups are actually VMAX storage groups under
the covers, so we can use our normal storage group operations.
:param conn: the connection to the ecom served
:param controllerConfigService: the controller config service
:param volumeInstance: the volume instance
:param volumeName: the name of the volume
:param extraSpecs: extra specifications
:return: storageGroupInstanceName
"""
storageGroupName = self.utils.get_v3_storage_group_name(
extraSpecs[POOL], extraSpecs[SLO], extraSpecs[WORKLOAD],
False, True)
storageSystemName = volumeInstance['SystemName']
doDisableCompression = self.utils.is_compression_disabled(extraSpecs)
try:
storageGroupInstanceName = self._get_or_create_storage_group_v3(
extraSpecs[POOL], extraSpecs[SLO], extraSpecs[WORKLOAD],
doDisableCompression, storageSystemName, extraSpecs,
is_re=True)
except Exception as e:
exception_message = (_("Failed to get or create replication"
"group. Exception received: %(e)s")
% {'e': six.text_type(e)})
LOG.exception(exception_message)
raise exception.VolumeBackendAPIException(
data=exception_message)
self.masking.add_volume_to_storage_group(
conn, controllerConfigService, storageGroupInstanceName,
volumeInstance, volumeName, storageGroupName, extraSpecs)
return storageGroupInstanceName
def _create_remote_replica(
self, conn, repServiceInstanceName, rdfGroupInstance,
volumeName, sourceInstance, targetInstance, extraSpecs):
"""Helper function to establish a replication relationship.
:param conn: the connection to the ecom server
:param repServiceInstanceName: replication service instance
:param rdfGroupInstance: rdf group instance
:param volumeName: volume name
:param sourceInstance: the source volume instance
:param targetInstance: the target volume instance
:param extraSpecs: extra specifications
:return: rc, rdfDict - the target volume dictionary
"""
syncType = MIRROR_SYNC_TYPE
rc, job = self.provisionv3.create_remote_element_replica(
conn, repServiceInstanceName, volumeName, syncType,
sourceInstance, targetInstance, rdfGroupInstance, extraSpecs)
rdfDict = self.provisionv3.get_volume_dict_from_job(
self.conn, job['Job'])
return rc, rdfDict
def get_target_instance(self, sourceVolume, rep_config,
rdf_vol_size, targetName, extraSpecs):
"""Create a replication target for a given source volume.
:param sourceVolume: the source volume
:param rep_config: the replication configuration
:param rdf_vol_size: the size of the volume
:param targetName: the Element Name for the new volume
:param extraSpecs: the extra specifications
:return: the target instance
"""
repExtraSpecs = self._get_replication_extraSpecs(
extraSpecs, rep_config)
volumeSize = int(self.utils.convert_gb_to_bits(rdf_vol_size))
rc, volumeDict, storageSystemName = self._create_v3_volume(
sourceVolume, targetName, volumeSize, repExtraSpecs)
targetInstance = self.utils.find_volume_instance(
self.conn, volumeDict, targetName)
return targetInstance
def _get_replication_extraSpecs(self, extraSpecs, rep_config):
"""Get replication extra specifications.
Called when target array operations are necessary -
on create, extend, etc and when volume is failed over.
:param extraSpecs: the extra specifications
:param rep_config: the replication configuration
:return: repExtraSpecs - dict
"""
repExtraSpecs = extraSpecs.copy()
repExtraSpecs[ARRAY] = rep_config['array']
repExtraSpecs[POOL] = rep_config['pool']
repExtraSpecs[PORTGROUPNAME] = rep_config['portgroup']
# if disable compression is set, check if target array is all flash
doDisableCompression = self.utils.is_compression_disabled(
extraSpecs)
if doDisableCompression:
if not self.utils.is_all_flash(self.conn, repExtraSpecs[ARRAY]):
repExtraSpecs.pop(self.utils.DISABLECOMPRESSION, None)
# Check to see if SLO and Workload are configured on the target array.
poolInstanceName, storageSystemName = (
self._get_pool_and_storage_system(repExtraSpecs))
storagePoolCapability = self.provisionv3.get_storage_pool_capability(
self.conn, poolInstanceName)
if extraSpecs[SLO]:
if storagePoolCapability:
try:
self.provisionv3.get_storage_pool_setting(
self.conn, storagePoolCapability, extraSpecs[SLO],
extraSpecs[WORKLOAD])
except Exception:
LOG.warning(
_LW("The target array does not support the storage "
"pool setting for SLO %(slo)s or workload "
"%(workload)s. Not assigning any SLO or "
"workload."),
{'slo': extraSpecs[SLO],
'workload': extraSpecs[WORKLOAD]})
repExtraSpecs[SLO] = None
if extraSpecs[WORKLOAD]:
repExtraSpecs[WORKLOAD] = None
else:
LOG.warning(_LW("Cannot determine storage pool settings of "
"target array. Not assigning any SLO or "
"workload"))
repExtraSpecs[SLO] = None
if extraSpecs[WORKLOAD]:
repExtraSpecs[WORKLOAD] = None
return repExtraSpecs
def get_secondary_stats_info(self, rep_config, arrayInfo):
"""On failover, report on secondary array statistics.
:param rep_config: the replication configuration
:param arrayInfo: the array info
:return: secondaryInfo - dict
"""
secondaryInfo = arrayInfo.copy()
secondaryInfo['SerialNumber'] = six.text_type(rep_config['array'])
secondaryInfo['PoolName'] = rep_config['pool']
pool_info_specs = {ARRAY: secondaryInfo['SerialNumber'],
POOL: rep_config['pool'],
ISV3: True}
# Check to see if SLO and Workload are configured on the target array.
poolInstanceName, storageSystemName = (
self._get_pool_and_storage_system(pool_info_specs))
storagePoolCapability = self.provisionv3.get_storage_pool_capability(
self.conn, poolInstanceName)
if arrayInfo['SLO']:
if storagePoolCapability:
try:
self.provisionv3.get_storage_pool_setting(
self.conn, storagePoolCapability, arrayInfo['SLO'],
arrayInfo['Workload'])
except Exception:
LOG.info(
_LI("The target array does not support the storage "
"pool setting for SLO %(slo)s or workload "
"%(workload)s. SLO stats will not be reported."),
{'slo': arrayInfo['SLO'],
'workload': arrayInfo['Workload']})
secondaryInfo['SLO'] = None
if arrayInfo['Workload']:
secondaryInfo['Workload'] = None
if self.multiPoolSupportEnabled:
self.multiPoolSupportEnabled = False
else:
LOG.info(_LI("Cannot determine storage pool settings of "
"target array. SLO stats will not be reported."))
secondaryInfo['SLO'] = None
if arrayInfo['Workload']:
secondaryInfo['Workload'] = None
if self.multiPoolSupportEnabled:
self.multiPoolSupportEnabled = False
return secondaryInfo
| {
"content_hash": "53a80a5108330f22030431f7781b0da9",
"timestamp": "",
"source": "github",
"line_count": 6074,
"max_line_length": 79,
"avg_line_length": 44.87635824827132,
"alnum_prop": 0.5733053536772825,
"repo_name": "ge0rgi/cinder",
"id": "023bd626b2a6b7e5dc1e1536f05ff1ec1a7b635c",
"size": "273222",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable/ocata",
"path": "cinder/volume/drivers/dell_emc/vmax/common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19963591"
}
],
"symlink_target": ""
} |
from flask import Flask
from flask_restful import Api
from skeleton.restresources import TestResource
SECRET_KEY = 'CHANGEME: flask-session-insecure-secret-key'
def create_app():
app = Flask(__name__)
app.config.from_object(__name__)
api = Api(app)
api.add_resource(TestResource, '/')
return app
if __name__ == '__main__':
app = create_app()
app.run(debug=True)
| {
"content_hash": "0e27b4a3e473d240902706e17bd48008",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 58,
"avg_line_length": 19.85,
"alnum_prop": 0.6599496221662469,
"repo_name": "AxelPhi/pyhon-skeleton-project",
"id": "c780b452411ca3a8f1a82ca29c54b636935d2b2f",
"size": "397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skeleton/flaskapp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7948"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('form_processor', '0021_change_case_forms_related_name'),
]
operations = [
migrations.AlterField(
model_name='commcarecasesql',
name='case_json',
field=jsonfield.fields.JSONField(default=dict),
preserve_default=True,
),
]
| {
"content_hash": "bb01fc585ae21d1689a456f602dcac4a",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 66,
"avg_line_length": 23.45,
"alnum_prop": 0.6247334754797441,
"repo_name": "qedsoftware/commcare-hq",
"id": "918704dc3ce590500249a989505161a7f1554429",
"size": "493",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/form_processor/migrations/0022_set_default_value_for_case_json.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "508392"
},
{
"name": "HTML",
"bytes": "2869325"
},
{
"name": "JavaScript",
"bytes": "2395360"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "125298"
},
{
"name": "Python",
"bytes": "14670713"
},
{
"name": "Shell",
"bytes": "37514"
}
],
"symlink_target": ""
} |
from robofab.world import CurrentFont
f = CurrentFont()
print f.info.postscriptFullName
print f.info.openTypeNameDesigner
f.info.openTypeNameDesigner = "Jan van Krimpen"
print f.info.openTypeNameDesigner
print f.info.openTypeOS2VendorID
print f.info.unitsPerEm
print f.info.xHeight
print f.info.openTypeNameLicenseURL
# but you can set the values as well
f.info.postscriptUniqueID = 4309359
f.info.openTypeNameDesigner = "Eric Gill"
| {
"content_hash": "a288b4deb8cf88eeafa9d89f373155fb",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 47,
"avg_line_length": 25.705882352941178,
"alnum_prop": 0.8215102974828375,
"repo_name": "jamesgk/robofab",
"id": "32a4e1f0410eff361773ab9cdbd62f952c679bf8",
"size": "486",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "Docs/Examples/objects/RInfo_00.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "8714"
},
{
"name": "HTML",
"bytes": "4597"
},
{
"name": "Makefile",
"bytes": "6776"
},
{
"name": "Python",
"bytes": "902044"
}
],
"symlink_target": ""
} |
"""
Copyright (c) Microsoft Open Technologies (Shanghai) Co. Ltd. All rights reserved.
The MIT License (MIT)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import hashlib
import sys
sys.path.append("..")
from client import app
def encode(plaintext):
m = hashlib.md5()
origin = plaintext + app.config['SECRET_KEY']
m.update(origin)
return m.hexdigest()
if __name__ == "__main__":
print encode("admin")
| {
"content_hash": "22caf2456b689cab63cd61656d61e97b",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 83,
"avg_line_length": 34.51219512195122,
"alnum_prop": 0.7618374558303886,
"repo_name": "SpAiNiOr/open-hackathon",
"id": "51782e4464b2f0185b572a88781983ba418b8f3b",
"size": "1440",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "open-hackathon-client/src/client/md5.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "191129"
},
{
"name": "HTML",
"bytes": "476108"
},
{
"name": "Java",
"bytes": "9272"
},
{
"name": "JavaScript",
"bytes": "600179"
},
{
"name": "Python",
"bytes": "768278"
},
{
"name": "Ruby",
"bytes": "1595"
},
{
"name": "Shell",
"bytes": "16003"
}
],
"symlink_target": ""
} |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 0, transform = "Integration", sigma = 0.0, exog_count = 0, ar_order = 0); | {
"content_hash": "97e594019f77747637ce367e76bc070b",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 168,
"avg_line_length": 38.285714285714285,
"alnum_prop": 0.7089552238805971,
"repo_name": "antoinecarme/pyaf",
"id": "f250e082262780567fa9f700581db66042059871",
"size": "268",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_Integration/trend_LinearTrend/cycle_0/ar_/test_artificial_1024_Integration_LinearTrend_0__0.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
import re
import getopt
import sys
# Faking test creation the same way CxxTests does
def main():
'''Main entry.'''
print "cxxtestgen fake cpp generation."
# parse options
try:
options, patterns = getopt.getopt( sys.argv[1:], 'o:r:',
['version', 'output=', 'runner=', 'gui=',
'error-printer', 'abort-on-fail', 'have-std', 'no-std',
'have-eh', 'no-eh', 'template=', 'include=',
'root', 'part', 'no-static-init', 'factor', 'longlong='] )
except getopt.error:
raise Exception("Error parsing input options.")
# get the output file name
outputFileName = None
for o, a in options:
if o in ('-o', '--output'):
outputFileName = a
if outputFileName == None:
raise Exception("Cannot find the output file name.")
# read input file
if len(patterns) == 0:
raise Exception("Cannot find the input file name.")
inputFileName = patterns[0]
inputFile = open( inputFileName )
suite_re = re.compile( r'\bclass\s+(\w+)\b' )
test_re = re.compile( r'^([^/]|/[^/])*\bint\s+([Tt]est\w+)\s*\(\s*(void)?\s*\)' )
className = None
testName = None
while 1:
line = inputFile.readline(80)
if not line:
break
res = suite_re.search( line )
if res:
className = res.group(1)
res = test_re.search( line )
if res:
testName = res.group(2)
inputFile.close()
# check read names
if className == None:
raise Exception("Cannot find the class name.")
if testName == None:
raise Exception("Cannot find the test name.")
# write output file
outputFile = open( outputFileName, 'w' )
outputFile.write("// main: entry point for dummy tests.\n")
outputFile.write("#include \"%s\"\n" % inputFileName)
outputFile.write("int main( int argc, char** argv)\n")
outputFile.write("{\n")
outputFile.write(" %s test = %s();\n" % (className, className) )
outputFile.write(" return test.%s();\n" % testName)
outputFile.write("}\n")
outputFile.write("\n")
outputFile.close()
if __name__ == "__main__":
main()
| {
"content_hash": "ae5e5b4ea2b563fd3581b48a9ca7cfb1",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 96,
"avg_line_length": 33.955882352941174,
"alnum_prop": 0.5409268081420529,
"repo_name": "csnake-org/CSnake",
"id": "a92b89d21ae7d8c4901189f1044ec347567b4c4c",
"size": "2327",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tests/data/third party/CxxTest/CxxTest/cxxtestgen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1003"
},
{
"name": "C++",
"bytes": "15104"
},
{
"name": "CMake",
"bytes": "67110"
},
{
"name": "NSIS",
"bytes": "6255"
},
{
"name": "Objective-C",
"bytes": "495"
},
{
"name": "Python",
"bytes": "469870"
},
{
"name": "Shell",
"bytes": "685"
}
],
"symlink_target": ""
} |
import sys
from Bio import SeqIO
num_ivywrel = 0
total_length = 0
proteins = SeqIO.parse(sys.argv[1], "fasta")
for record in proteins:
seq = record.seq
for char in seq:
if char in 'IVYWREL':
num_ivywrel += 1
total_length += 1
f_ivywrel = float(num_ivywrel)/float(total_length)
print(f_ivywrel)
print("T_opt estimate according to Zeldovich: " + str(937.0*float(f_ivywrel) - 335.0))
| {
"content_hash": "a12123da7d3b12eb6dc0fdfc1b8705e4",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 86,
"avg_line_length": 24.705882352941178,
"alnum_prop": 0.6547619047619048,
"repo_name": "Tancata/phylo",
"id": "a51c2f3a6575fe4d55ce690fa03aa035cec62e2b",
"size": "468",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "estimate_OGT.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "155099"
},
{
"name": "Shell",
"bytes": "1600"
}
],
"symlink_target": ""
} |
from io import BytesIO
import datetime
import json
import os
import tarfile
import unittest
from containerregistry.client import docker_name
from containerregistry.client.v2_2 import docker_image as v2_2_image
TEST_DATA_TARGET_BASE = 'testdata'
DIR_PERMISSION = 0o700
PASSWD_FILE_MODE = 0o644
# Dictionary of key to value mappings in the Bazel stamp file
STAMP_DICT = {}
def TestRunfilePath(*args):
"""Convert a path to a file target to the runfile path"""
return os.path.join(os.environ['TEST_SRCDIR'], 'io_bazel_rules_docker', *args)
def TestData(name):
return TestRunfilePath(TEST_DATA_TARGET_BASE, name)
def TestImage(name):
return v2_2_image.FromTarball(TestData(name + '.tar'))
def TestBundleImage(name, image_name):
return v2_2_image.FromTarball(
TestData(name + '.tar'), name=docker_name.Tag(image_name, strict=False))
class ImageTest(unittest.TestCase):
def assertTarballContains(self, tar, paths):
self.maxDiff = None
self.assertEqual(paths, tar.getnames())
def assertLayerNContains(self, img, n, paths):
buf = BytesIO(img.blob(img.fs_layers()[n]))
with tarfile.open(fileobj=buf, mode='r') as layer:
self.assertTarballContains(layer, paths)
def assertNonZeroMtimesInTopLayer(self, img):
buf = BytesIO(img.blob(img.fs_layers()[0]))
with tarfile.open(fileobj=buf, mode='r') as layer:
for member in layer.getmembers():
self.assertNotEqual(member.mtime, 0)
def assertTopLayerContains(self, img, paths):
self.assertLayerNContains(img, 0, paths)
def assertConfigEqual(self, img, key, value):
cfg = json.loads(img.config_file())
self.assertEqual(value, cfg.get('config', {}).get(key))
def assertTarInfo(self, tarinfo, uid, gid, mode, isdir):
self.assertEqual(tarinfo.uid, uid)
self.assertEqual(tarinfo.gid, gid)
self.assertEqual(tarinfo.mode, mode)
self.assertEqual(tarinfo.isdir(), isdir)
def test_files_base(self):
with TestImage('files_base') as img:
self.assertEqual(1, len(img.fs_layers()))
self.assertTopLayerContains(img, ['.', './foo'])
def test_files_with_file_base(self):
with TestImage('files_with_files_base') as img:
self.assertEqual(2, len(img.fs_layers()))
self.assertTopLayerContains(img, ['.', './bar'])
def test_files_in_layer_with_file_base(self):
with TestImage('files_in_layer_with_files_base') as img:
self.assertEqual(3, len(img.fs_layers()))
self.assertLayerNContains(img, 2, ['.', './foo'])
self.assertLayerNContains(img, 1, ['.', './baz'])
self.assertLayerNContains(img, 0, ['.', './bar'])
def test_tar_base(self):
with TestImage('tar_base') as img:
self.assertEqual(1, len(img.fs_layers()))
self.assertTopLayerContains(img, [
'./usr', './usr/bin', './usr/bin/unremarkabledeath'])
# Check that this doesn't have a configured entrypoint.
self.assertConfigEqual(img, 'Entrypoint', None)
def test_tar_with_mtimes_preserved(self):
with TestImage('tar_with_mtimes_preserved') as img:
self.assertNonZeroMtimesInTopLayer(img)
def test_tar_with_tar_base(self):
with TestImage('tar_with_tar_base') as img:
self.assertEqual(2, len(img.fs_layers()))
self.assertTopLayerContains(img, [
'./asdf', './usr', './usr/bin',
'./usr/bin/miraclegrow'])
def test_tars_in_layer_with_tar_base(self):
with TestImage('tars_in_layer_with_tar_base') as img:
self.assertEqual(3, len(img.fs_layers()))
self.assertTopLayerContains(img, [
'./asdf', './usr', './usr/bin',
'./usr/bin/miraclegrow'])
self.assertLayerNContains(
img, 1, ['.', './three', './three/three'])
self.assertLayerNContains(img, 2, [
'./usr', './usr/bin', './usr/bin/unremarkabledeath'])
def test_directory_with_tar_base(self):
with TestImage('directory_with_tar_base') as img:
self.assertEqual(2, len(img.fs_layers()))
self.assertTopLayerContains(img, [
'.', './foo', './foo/asdf', './foo/usr',
'./foo/usr/bin', './foo/usr/bin/miraclegrow'])
def test_files_with_tar_base(self):
with TestImage('files_with_tar_base') as img:
self.assertEqual(2, len(img.fs_layers()))
self.assertTopLayerContains(img, ['.', './bar'])
def test_workdir_with_tar_base(self):
with TestImage('workdir_with_tar_base') as img:
self.assertEqual(2, len(img.fs_layers()))
self.assertTopLayerContains(img, [])
# Check that the working directory property has been properly configured.
self.assertConfigEqual(img, 'WorkingDir', '/tmp')
def test_tar_with_files_base(self):
with TestImage('tar_with_files_base') as img:
self.assertEqual(2, len(img.fs_layers()))
self.assertTopLayerContains(img, [
'./asdf', './usr', './usr/bin',
'./usr/bin/miraclegrow'])
def test_docker_tarball_base(self):
with TestImage('docker_tarball_base') as img:
self.assertEqual(3, len(img.fs_layers()))
self.assertTopLayerContains(img, ['.', './foo'])
def test_layers_with_docker_tarball_base(self):
with TestImage('layers_with_docker_tarball_base') as img:
self.assertEqual(5, len(img.fs_layers()))
self.assertTopLayerContains(img, ['.', './foo'])
self.assertLayerNContains(
img, 1, ['.', './three', './three/three'])
self.assertLayerNContains(img, 2, ['.', './baz'])
def test_base_with_entrypoint(self):
with TestImage('base_with_entrypoint') as img:
self.assertEqual(1, len(img.fs_layers()))
self.assertConfigEqual(img, 'Entrypoint', ['/bar'])
self.assertConfigEqual(img, 'ExposedPorts', {'8080/tcp': {}})
def test_dashdash_entrypoint(self):
with TestImage('dashdash_entrypoint') as img:
self.assertEqual(1, len(img.fs_layers()))
self.assertConfigEqual(img, 'Entrypoint', ['/bar', '--'])
def test_derivative_with_cmd(self):
with TestImage('derivative_with_cmd') as img:
self.assertEqual(3, len(img.fs_layers()))
self.assertConfigEqual(img, 'Entrypoint', ['/bar'])
self.assertConfigEqual(img, 'Cmd', ['arg1', 'arg2'])
self.assertConfigEqual(
img, 'ExposedPorts', {'8080/tcp': {}, '80/tcp': {}})
def test_derivative_with_volume(self):
with TestImage('derivative_with_volume') as img:
self.assertEqual(2, len(img.fs_layers()))
# Check that the topmost layer has the volumes exposed by the bottom
# layer, and itself.
self.assertConfigEqual(img, 'Volumes', {
'/asdf': {}, '/blah': {}, '/logs': {}
})
def test_with_unix_epoch_creation_time(self):
with TestImage('with_unix_epoch_creation_time') as img:
self.assertEqual(2, len(img.fs_layers()))
cfg = json.loads(img.config_file())
self.assertEqual('2009-02-13T23:31:30.119999885Z',
cfg.get('created', ''))
def test_with_millisecond_unix_epoch_creation_time(self):
with TestImage('with_millisecond_unix_epoch_creation_time') as img:
self.assertEqual(2, len(img.fs_layers()))
cfg = json.loads(img.config_file())
self.assertEqual('2009-02-13T23:31:30.12345004Z',
cfg.get('created', ''))
def test_with_rfc_3339_creation_time(self):
with TestImage('with_rfc_3339_creation_time') as img:
self.assertEqual(2, len(img.fs_layers()))
cfg = json.loads(img.config_file())
self.assertEqual('1989-05-03T12:58:12.345Z',
cfg.get('created', ''))
# This test is flaky. If it fails, do a bazel clean --expunge_async and try again
def test_with_stamped_creation_time(self):
with TestImage('with_stamped_creation_time') as img:
self.assertEqual(2, len(img.fs_layers()))
cfg = json.loads(img.config_file())
created_str = cfg.get('created', '')
self.assertNotEqual('', created_str)
now = datetime.datetime.utcnow()
created = datetime.datetime.strptime(
created_str, '%Y-%m-%dT%H:%M:%SZ')
# The BUILD_TIMESTAMP is set by Bazel to Java's CurrentTimeMillis / 1000,
# or env['SOURCE_DATE_EPOCH']. For Bazel versions before 0.12, there was
# a bug where CurrentTimeMillis was not divided by 1000.
# See https://github.com/bazelbuild/bazel/issues/2240
# https://bazel-review.googlesource.com/c/bazel/+/48211
# Assume that any value for 'created' within a reasonable bound is fine.
self.assertLessEqual(now - created, datetime.timedelta(minutes=15))
# This test is flaky. If it fails, do a bazel clean --expunge_async and try again
def test_with_default_stamped_creation_time(self):
# {BUILD_TIMESTAMP} should be the default when `stamp = True` and
# `creation_time` isn't explicitly defined.
with TestImage('with_default_stamped_creation_time') as img:
self.assertEqual(2, len(img.fs_layers()))
cfg = json.loads(img.config_file())
created_str = cfg.get('created', '')
self.assertNotEqual('', created_str)
now = datetime.datetime.utcnow()
created = datetime.datetime.strptime(
created_str, '%Y-%m-%dT%H:%M:%SZ')
# The BUILD_TIMESTAMP is set by Bazel to Java's CurrentTimeMillis / 1000,
# or env['SOURCE_DATE_EPOCH']. For Bazel versions before 0.12, there was
# a bug where CurrentTimeMillis was not divided by 1000.
# See https://github.com/bazelbuild/bazel/issues/2240
# https://bazel-review.googlesource.com/c/bazel/+/48211
# Assume that any value for 'created' within a reasonable bound is fine.
self.assertLessEqual(now - created, datetime.timedelta(minutes=15))
def test_with_base_stamped_image(self):
# {BUILD_TIMESTAMP} should be the default when `stamp = True` is configured
# in the base image and `creation_time` isn't explicitly defined.
with TestImage('with_base_stamped_image') as img:
self.assertEqual(3, len(img.fs_layers()))
cfg = json.loads(img.config_file())
created_str = cfg.get('created', '')
self.assertNotEqual('', created_str)
now = datetime.datetime.utcnow()
created = datetime.datetime.strptime(
created_str, '%Y-%m-%dT%H:%M:%SZ')
# The BUILD_TIMESTAMP is set by Bazel to Java's CurrentTimeMillis / 1000,
# or env['SOURCE_DATE_EPOCH']. For Bazel versions before 0.12, there was
# a bug where CurrentTimeMillis was not divided by 1000.
# See https://github.com/bazelbuild/bazel/issues/2240
# https://bazel-review.googlesource.com/c/bazel/+/48211
# Assume that any value for 'created' within a reasonable bound is fine.
self.assertLessEqual(now - created, datetime.timedelta(minutes=15))
def test_with_env(self):
with TestBundleImage(
'with_env', 'bazel/%s:with_env' % TEST_DATA_TARGET_BASE) as img:
self.assertEqual(2, len(img.fs_layers()))
self.assertConfigEqual(
img, 'Env', ['bar=blah blah blah', 'foo=/asdf'])
def test_layers_with_env(self):
with TestImage('layers_with_env') as img:
self.assertEqual(3, len(img.fs_layers()))
self.assertConfigEqual(
img, 'Env', [u'PATH=$PATH:/tmp/a:/tmp/b:/tmp/c', u'a=b', u'x=y'])
def test_dummy_repository(self):
# We allow users to specify an alternate repository name instead of 'bazel/'
# to prefix their image names.
name = 'gcr.io/dummy/%s:dummy_repository' % TEST_DATA_TARGET_BASE
with TestBundleImage('dummy_repository', name) as img:
self.assertEqual(1, len(img.fs_layers()))
self.assertTopLayerContains(img, ['.', './foo'])
def test_with_double_env(self):
with TestImage('with_double_env') as img:
self.assertEqual(3, len(img.fs_layers()))
self.assertConfigEqual(img, 'Env', [
'bar=blah blah blah',
'baz=/asdf blah blah blah',
'foo=/asdf'])
def test_with_label(self):
with TestImage('with_label') as img:
self.assertEqual(2, len(img.fs_layers()))
self.assertConfigEqual(img, 'Labels', {
'com.example.bar': '{"name": "blah"}',
'com.example.baz': 'qux',
'com.example.foo': '{"name": "blah"}',
})
def test_with_double_label(self):
with TestImage('with_double_label') as img:
self.assertEqual(3, len(img.fs_layers()))
self.assertConfigEqual(img, 'Labels', {
'com.example.bar': '{"name": "blah"}',
'com.example.baz': 'qux',
'com.example.foo': '{"name": "blah"}',
'com.example.qux': '{"name": "blah-blah"}',
})
def test_with_user(self):
with TestImage('with_user') as img:
self.assertEqual(2, len(img.fs_layers()))
self.assertConfigEqual(img, 'User', 'nobody')
def test_data_path(self):
# Without data_path = "." the file will be inserted as `./test`
# (since it is the path in the package) and with data_path = "."
# the file will be inserted relatively to the testdata package
# (so `./test/test`).
with TestImage('no_data_path_image') as img:
self.assertEqual(1, len(img.fs_layers()))
self.assertTopLayerContains(img, ['.', './test'])
with TestImage('data_path_image') as img:
self.assertEqual(1, len(img.fs_layers()))
self.assertTopLayerContains(img, ['.', './test', './test/test'])
# With an absolute path for data_path, we should strip that prefix
# from the files' paths. Since the testdata images are in
# //testdata and data_path is set to
# "/tools/build_defs", we should have `docker` as the top-level
# directory.
with TestImage('absolute_data_path_image') as img:
self.assertEqual(1, len(img.fs_layers()))
self.assertTopLayerContains(img, [
'.', './testdata', './testdata/test', './testdata/test/test'])
# With data_path = "/", we expect the entire path from the repository
# root.
with TestImage('root_data_path_image') as img:
self.assertEqual(1, len(img.fs_layers()))
self.assertTopLayerContains(img, [
'.', './testdata', './testdata/test', './testdata/test/test'])
def test_flattened(self):
# Test the flattened tarball produced by the python flattener
# binary from google/containerregistry.
with tarfile.open(TestData('flat.tar'), mode='r') as tar:
self.assertTarballContains(tar, [
'.', '/usr', '/usr/bin', '/usr/bin/java', './foo'])
def test_flattened_from_tarball_base(self):
# Test the flattened tarball produced by the Go flattener where the
# image being flattened derived from an image specified as a tarball.
# File "bar" came from the base image specified as a tarball and "baz"
# came from the top level image.
with tarfile.open(TestData('flatten_with_tarball_base.tar'), mode='r') as tar:
self.assertTarballContains(tar, [
'.', './baz', './bar', ])
def test_bundle(self):
with TestBundleImage('stamped_bundle_test', "example.com/aaaaa{BUILD_USER}:stamped".format(
BUILD_USER=STAMP_DICT['BUILD_USER']
)) as img:
self.assertEqual(2, len(img.fs_layers()))
with TestBundleImage('bundle_test', 'docker.io/ubuntu:latest') as img:
self.assertEqual(1, len(img.fs_layers()))
with TestBundleImage(
'bundle_test', 'us.gcr.io/google-appengine/base:fresh') as img:
self.assertEqual(2, len(img.fs_layers()))
with TestBundleImage(
'bundle_test', 'gcr.io/google-containers/pause:2.0') as img:
self.assertEqual(3, len(img.fs_layers()))
def test_with_stamped_label(self):
with TestImage('with_stamp_label') as img:
self.assertEqual(2, len(img.fs_layers()))
self.assertConfigEqual(
img, 'Labels', {'BUILDER': STAMP_DICT['BUILD_USER']})
def test_pause_based(self):
with TestImage('pause_based') as img:
self.assertEqual(3, len(img.fs_layers()))
def test_pause_piecemeal(self):
with TestImage('pause_piecemeal/image') as img:
self.assertEqual(2, len(img.fs_layers()))
def test_pause_piecemeal_gz(self):
with TestImage('pause_piecemeal_gz/image') as img:
self.assertEqual(2, len(img.fs_layers()))
def test_build_with_tag(self):
with TestBundleImage('build_with_tag', 'gcr.io/build/with:tag') as img:
self.assertEqual(3, len(img.fs_layers()))
def test_with_passwd(self):
with TestImage('with_passwd') as img:
self.assertEqual(1, len(img.fs_layers()))
self.assertTopLayerContains(img, ['./etc', './etc/passwd'])
buf = BytesIO(img.blob(img.fs_layers()[0]))
with tarfile.open(fileobj=buf, mode='r') as layer:
content = layer.extractfile('./etc/passwd').read()
self.assertEqual(
b'root:x:0:0:Root:/root:/rootshell\nfoobar:x:1234:2345:myusernameinfo:/myhomedir:/myshell\nnobody:x:65534:65534:nobody with no home:/nonexistent:/sbin/nologin\n',
content)
self.assertEqual(layer.getmember(
"./etc/passwd").mode, PASSWD_FILE_MODE)
def test_with_passwd_tar(self):
with TestImage('with_passwd_tar') as img:
self.assertEqual(1, len(img.fs_layers()))
self.assertTopLayerContains(
img, ['.', './etc', './etc/password', './root', './myhomedir'])
buf = BytesIO(img.blob(img.fs_layers()[0]))
with tarfile.open(fileobj=buf, mode='r') as layer:
content = layer.extractfile('./etc/password').read()
self.assertEqual(
b'root:x:0:0:Root:/root:/rootshell\nfoobar:x:1234:2345:myusernameinfo:/myhomedir:/myshell\nnobody:x:65534:65534:nobody with no home:/nonexistent:/sbin/nologin\n',
content)
self.assertEqual(layer.getmember(
"./etc/password").mode, PASSWD_FILE_MODE)
self.assertTarInfo(layer.getmember("./root"),
0, 0, DIR_PERMISSION, True)
self.assertTarInfo(layer.getmember(
"./myhomedir"), 1234, 2345, DIR_PERMISSION, True)
def test_with_group(self):
with TestImage('with_group') as img:
self.assertEqual(1, len(img.fs_layers()))
self.assertTopLayerContains(img, ['./etc', './etc/group'])
buf = BytesIO(img.blob(img.fs_layers()[0]))
with tarfile.open(fileobj=buf, mode='r') as layer:
content = layer.extractfile('./etc/group').read()
self.assertEqual(
b'root:x:0:\nfoobar:x:2345:foo,bar,baz\n', content)
def test_with_empty_files(self):
with TestImage('with_empty_files') as img:
self.assertEqual(1, len(img.fs_layers()))
self.assertTopLayerContains(img, ['.', './file1', './file2'])
buf = BytesIO(img.blob(img.fs_layers()[0]))
with tarfile.open(fileobj=buf, mode='r') as layer:
for name in ('./file1', './file2'):
memberfile = layer.getmember(name)
self.assertEqual(0, memberfile.size)
self.assertEqual(0o777, memberfile.mode)
def test_with_empty_dirs(self):
with TestImage('with_empty_dirs') as img:
self.assertEqual(1, len(img.fs_layers()))
self.assertTopLayerContains(img, ['.', './etc', './foo', './bar'])
buf = BytesIO(img.blob(img.fs_layers()[0]))
with tarfile.open(fileobj=buf, mode='r') as layer:
for name in ('./etc', './foo', './bar'):
memberfile = layer.getmember(name)
self.assertEqual(tarfile.DIRTYPE, memberfile.type)
self.assertEqual(0o777, memberfile.mode)
def test_py_image(self):
with TestImage('py_image') as img:
# Check the application layer, which is on top.
self.assertTopLayerContains(img, [
'.',
'./app',
'./app/testdata',
'./app/testdata/py_image.binary.runfiles',
'./app/testdata/py_image.binary.runfiles/io_bazel_rules_docker',
'./app/testdata/py_image.binary.runfiles/io_bazel_rules_docker/testdata',
'./app/testdata/py_image.binary.runfiles/io_bazel_rules_docker/testdata/py_image.py',
'./app/testdata/py_image.binary.runfiles/io_bazel_rules_docker/testdata/py_image.binary',
'./app/testdata/py_image.binary.runfiles/io_bazel_rules_docker/testdata/BUILD',
'./app/testdata/py_image.binary.runfiles/io_bazel_rules_docker/testdata/__init__.py',
'./app/io_bazel_rules_docker',
# TODO(mattmoor): The path normalization for symlinks should match
# files to avoid this redundancy.
'/app',
'/app/testdata',
'/app/testdata/py_image.binary',
'/app/testdata/py_image.binary.runfiles',
'/app/testdata/py_image.binary.runfiles/io_bazel_rules_docker',
'/app/testdata/py_image.binary.runfiles/io_bazel_rules_docker/external',
])
# Below that, we have a layer that generates symlinks for the library layer.
self.assertLayerNContains(img, 1, [
'.',
'/app',
'/app/testdata',
'/app/testdata/py_image.binary.runfiles',
'/app/testdata/py_image.binary.runfiles/io_bazel_rules_docker',
'/app/testdata/py_image.binary.runfiles/io_bazel_rules_docker/testdata',
'/app/testdata/py_image.binary.runfiles/io_bazel_rules_docker/testdata/py_image_library.py',
])
# Check the library layer, which is two below our application layer.
self.assertLayerNContains(img, 2, [
'.',
'./app',
'./app/io_bazel_rules_docker',
'./app/io_bazel_rules_docker/testdata',
'./app/io_bazel_rules_docker/testdata/py_image_library.py',
])
def test_windows_image_manifest_with_foreign_layers(self):
imgPath = TestRunfilePath(
"tests", "container", "basic_windows_image.tar")
with v2_2_image.FromTarball(imgPath) as img:
# Ensure the image manifest in the tarball includes the foreign layer.
self.assertIn("https://go.microsoft.com/fwlink/?linkid=873595",
img.manifest())
def test_windows_image_manifest_with_foreign_layers_from_tar(self):
imgPath = TestRunfilePath(
"tests", "container", "basic_windows_image_from_tar.tar")
with v2_2_image.FromTarball(imgPath) as img:
# Ensure the image manifest in the tarball includes the foreign layer.
self.assertIn("https://go.microsoft.com/fwlink/?linkid=873595",
img.manifest())
def test_py_image_with_symlinks_in_data(self):
with TestImage('py_image_with_symlinks_in_data') as img:
# Check the application layer, which is on top.
self.assertTopLayerContains(img, [
'.',
'./app',
'./app/testdata',
'./app/testdata/py_image_with_symlinks_in_data.binary.runfiles',
'./app/testdata/py_image_with_symlinks_in_data.binary.runfiles/io_bazel_rules_docker',
'./app/testdata/py_image_with_symlinks_in_data.binary.runfiles/io_bazel_rules_docker/testdata',
'./app/testdata/py_image_with_symlinks_in_data.binary.runfiles/io_bazel_rules_docker/testdata/py_image.py',
'./app/testdata/py_image_with_symlinks_in_data.binary.runfiles/io_bazel_rules_docker/testdata/py_image_with_symlinks_in_data.binary',
'./app/testdata/py_image_with_symlinks_in_data.binary.runfiles/io_bazel_rules_docker/testdata/foo.txt',
'./app/testdata/py_image_with_symlinks_in_data.binary.runfiles/io_bazel_rules_docker/testdata/__init__.py',
'./app/io_bazel_rules_docker',
# TODO(mattmoor): The path normalization for symlinks should match
# files to avoid this redundancy.
'/app',
'/app/testdata',
'/app/testdata/py_image_with_symlinks_in_data.binary.runfiles',
'/app/testdata/py_image_with_symlinks_in_data.binary.runfiles/io_bazel_rules_docker',
'/app/testdata/py_image_with_symlinks_in_data.binary.runfiles/io_bazel_rules_docker/foo-symlink.txt',
'/app/testdata/py_image_with_symlinks_in_data.binary',
'/app/testdata/py_image_with_symlinks_in_data.binary.runfiles/io_bazel_rules_docker/external',
])
# Below that, we have a layer that generates symlinks for the library layer.
self.assertLayerNContains(img, 1, [
'.',
'/app',
'/app/testdata',
'/app/testdata/py_image_with_symlinks_in_data.binary.runfiles',
'/app/testdata/py_image_with_symlinks_in_data.binary.runfiles/io_bazel_rules_docker',
'/app/testdata/py_image_with_symlinks_in_data.binary.runfiles/io_bazel_rules_docker/testdata',
'/app/testdata/py_image_with_symlinks_in_data.binary.runfiles/io_bazel_rules_docker/testdata/py_image_library.py',
])
# Check the library layer, which is two below our application layer.
self.assertLayerNContains(img, 2, [
'.',
'./app',
'./app/io_bazel_rules_docker',
'./app/io_bazel_rules_docker/testdata',
'./app/io_bazel_rules_docker/testdata/py_image_library.py',
])
def test_py_image_complex(self):
with TestImage('py_image_complex') as img:
# bazel-bin/testdata/py_image_complex-layer.tar
self.assertTopLayerContains(img, [
'.',
'./app',
'./app/testdata',
'./app/testdata/py_image_complex.binary.runfiles',
'./app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker',
'./app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker/testdata',
'./app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker/testdata/py_image_complex.py',
'./app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker/testdata/py_image_complex.binary',
'./app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker_pip_deps',
'./app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker_pip_deps/pypi__six',
'./app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker_pip_deps/pypi__six/__init__.py',
'./app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker/testdata/test',
'./app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker/testdata/test/__init__.py',
'./app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker_pip_deps/__init__.py',
'./app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker_pip_deps/pypi__addict',
'./app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker_pip_deps/pypi__addict/__init__.py',
'./app/testdata/py_image_complex.binary.runfiles/__init__.py',
'./app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker/testdata/__init__.py',
'./app/io_bazel_rules_docker',
'/app',
'/app/testdata',
'/app/testdata/py_image_complex.binary',
'/app/testdata/py_image_complex.binary.runfiles',
'/app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker',
'/app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker/external'
])
# bazel-bin/testdata/py_image_complex.3-symlinks-layer.tar
self.assertLayerNContains(img, 1, [
'.',
'/app',
'/app/testdata',
'/app/testdata/py_image_complex.binary.runfiles',
'/app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker',
'/app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker/testdata',
'/app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker/testdata/py_image_complex_library.py',
'/app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker/testdata/py_image_library_using_six.py',
])
# bazel-bin/testdata/py_image_complex.3-layer.tar
self.assertLayerNContains(img, 2, [
'.',
'./app',
'./app/io_bazel_rules_docker',
'./app/io_bazel_rules_docker/testdata',
'./app/io_bazel_rules_docker/testdata/py_image_complex_library.py',
'./app/io_bazel_rules_docker/testdata/py_image_library_using_six.py',
])
# bazel-bin/testdata/py_image_complex.2-symlinks-layer.tar
self.assertLayerNContains(img, 3, [
'.',
'/app',
'/app/testdata',
'/app/testdata/py_image_complex.binary.runfiles',
'/app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker',
'/app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker/testdata',
'/app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker/testdata/test',
'/app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker/testdata/test/py_image_library_using_addict.py',
])
# bazel-bin/testdata/py_image_complex.2-layer.tar
self.assertLayerNContains(img, 4, [
'.',
'./app',
'./app/io_bazel_rules_docker',
'./app/io_bazel_rules_docker/testdata',
'./app/io_bazel_rules_docker/testdata/test',
'./app/io_bazel_rules_docker/testdata/test/py_image_library_using_addict.py',
])
# bazel-bin/testdata/py_image_complex.1-symlinks-layer.tar
self.assertLayerNContains(img, 5, [
'.',
'/app',
'/app/testdata',
'/app/testdata/py_image_complex.binary.runfiles',
'/app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker_pip_deps',
'/app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker_pip_deps/pypi__six',
'/app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker_pip_deps/pypi__six/six-1.11.0.dist-info',
'/app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker_pip_deps/pypi__six/six-1.11.0.dist-info/__init__.py',
'/app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker_pip_deps/pypi__six/six.py',
'/app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker_pip_deps/pypi__six/six-1.11.0.dist-info/DESCRIPTION.rst',
'/app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker_pip_deps/pypi__six/six-1.11.0.dist-info/METADATA',
'/app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker_pip_deps/pypi__six/six-1.11.0.dist-info/RECORD',
'/app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker_pip_deps/pypi__six/six-1.11.0.dist-info/WHEEL',
'/app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker_pip_deps/pypi__six/six-1.11.0.dist-info/metadata.json',
'/app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker_pip_deps/pypi__six/six-1.11.0.dist-info/top_level.txt',
])
# bazel-bin/testdata/py_image_complex.1-layer.tar
self.assertLayerNContains(img, 6, [
'.',
'./app',
'./app/io_bazel_rules_docker_pip_deps',
'./app/io_bazel_rules_docker_pip_deps/pypi__six',
'./app/io_bazel_rules_docker_pip_deps/pypi__six/six-1.11.0.dist-info',
'./app/io_bazel_rules_docker_pip_deps/pypi__six/six-1.11.0.dist-info/__init__.py',
'./app/io_bazel_rules_docker_pip_deps/pypi__six/six.py',
'./app/io_bazel_rules_docker_pip_deps/pypi__six/six-1.11.0.dist-info/DESCRIPTION.rst',
'./app/io_bazel_rules_docker_pip_deps/pypi__six/six-1.11.0.dist-info/METADATA',
'./app/io_bazel_rules_docker_pip_deps/pypi__six/six-1.11.0.dist-info/RECORD',
'./app/io_bazel_rules_docker_pip_deps/pypi__six/six-1.11.0.dist-info/WHEEL',
'./app/io_bazel_rules_docker_pip_deps/pypi__six/six-1.11.0.dist-info/metadata.json',
'./app/io_bazel_rules_docker_pip_deps/pypi__six/six-1.11.0.dist-info/top_level.txt',
])
# bazel-bin/testdata/py_image_complex.0-symlinks-layer.tar
self.assertLayerNContains(img, 7, [
'.',
'/app',
'/app/testdata',
'/app/testdata/py_image_complex.binary.runfiles',
'/app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker_pip_deps',
'/app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker_pip_deps/pypi__addict',
'/app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker_pip_deps/pypi__addict/addict-2.1.2.dist-info',
'/app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker_pip_deps/pypi__addict/addict-2.1.2.dist-info/__init__.py',
'/app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker_pip_deps/pypi__addict/addict',
'/app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker_pip_deps/pypi__addict/addict/__init__.py',
'/app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker_pip_deps/pypi__addict/addict/addict.py',
'/app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker_pip_deps/pypi__addict/addict-2.1.2.dist-info/METADATA',
'/app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker_pip_deps/pypi__addict/addict-2.1.2.dist-info/RECORD',
'/app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker_pip_deps/pypi__addict/addict-2.1.2.dist-info/WHEEL',
'/app/testdata/py_image_complex.binary.runfiles/io_bazel_rules_docker_pip_deps/pypi__addict/addict-2.1.2.dist-info/top_level.txt',
])
# bazel-bin/testdata/py_image_complex.0-layer.tar
self.assertLayerNContains(img, 8, [
'.',
'./app',
'./app/io_bazel_rules_docker_pip_deps',
'./app/io_bazel_rules_docker_pip_deps/pypi__addict',
'./app/io_bazel_rules_docker_pip_deps/pypi__addict/addict-2.1.2.dist-info',
'./app/io_bazel_rules_docker_pip_deps/pypi__addict/addict-2.1.2.dist-info/__init__.py',
'./app/io_bazel_rules_docker_pip_deps/pypi__addict/addict',
'./app/io_bazel_rules_docker_pip_deps/pypi__addict/addict/__init__.py',
'./app/io_bazel_rules_docker_pip_deps/pypi__addict/addict/addict.py',
'./app/io_bazel_rules_docker_pip_deps/pypi__addict/addict-2.1.2.dist-info/METADATA',
'./app/io_bazel_rules_docker_pip_deps/pypi__addict/addict-2.1.2.dist-info/RECORD',
'./app/io_bazel_rules_docker_pip_deps/pypi__addict/addict-2.1.2.dist-info/WHEEL',
'./app/io_bazel_rules_docker_pip_deps/pypi__addict/addict-2.1.2.dist-info/top_level.txt',
])
def test_java_image(self):
with TestImage('java_image') as img:
# Check the application layer, which is on top.
self.assertTopLayerContains(img, [
'.',
'./app',
'./app/io_bazel_rules_docker',
'./app/io_bazel_rules_docker/testdata',
'./app/io_bazel_rules_docker/testdata/java_image.binary.jar',
'./app/io_bazel_rules_docker/testdata/java_image.binary',
'./app/io_bazel_rules_docker/testdata/BUILD',
'./app/io_bazel_rules_docker/testdata/java_image.classpath'
])
self.assertLayerNContains(img, 1, [
'.',
'./app',
'./app/io_bazel_rules_docker',
'./app/io_bazel_rules_docker/testdata',
'./app/io_bazel_rules_docker/testdata/libjava_image_library.jar',
'./app/com_google_guava_guava',
'./app/com_google_guava_guava/guava-18.0.jar',
])
def test_war_image(self):
with TestImage('war_image') as img:
# Check the application layer, which is on top.
self.assertTopLayerContains(img, [
'.',
'./jetty',
'./jetty/webapps',
'./jetty/webapps/ROOT',
'./jetty/webapps/ROOT/WEB-INF',
'./jetty/webapps/ROOT/WEB-INF/lib',
'./jetty/webapps/ROOT/WEB-INF/lib/libwar_image.library.jar'
])
self.assertLayerNContains(img, 1, [
'.',
'./jetty',
'./jetty/webapps',
'./jetty/webapps/ROOT',
'./jetty/webapps/ROOT/WEB-INF',
'./jetty/webapps/ROOT/WEB-INF/lib',
'./jetty/webapps/ROOT/WEB-INF/lib/javax.servlet-api-3.0.1.jar',
])
def test_nodejs_image(self):
self.maxDiff = None
with TestImage('nodejs_image') as img:
# TODO: remove all '/app/testdata/nodejs_image_binary.runfiles/io_bazel_rules_docker/external'
# once --noexternal_legacy_runfiles is enabled
# https://github.com/bazelbuild/rules_docker/issues/1350
# Check the application layer (top layer), which also contains symlinks to the bottom layers.
self.assertTopLayerContains(img, [
'.',
'./app',
'./app/testdata',
'./app/testdata/nodejs_image_binary.runfiles',
'./app/testdata/nodejs_image_binary.runfiles/io_bazel_rules_docker',
'./app/testdata/nodejs_image_binary.runfiles/io_bazel_rules_docker/testdata',
'./app/testdata/nodejs_image_binary.runfiles/io_bazel_rules_docker/testdata/nodejs_image.js',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/internal',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/internal/linker',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/internal/linker/index.js',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/internal/runfiles',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/internal/runfiles/index.js',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/internal/runfiles/runfile_helper_main.js',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/internal/node',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/internal/node/node_patches.js',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/internal/coverage',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/internal/coverage/lcov_merger-js.js',
'./app/testdata/nodejs_image_binary.runfiles/io_bazel_rules_docker/testdata/_nodejs_image_binary.module_mappings.json',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/third_party',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/third_party/github.com',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/third_party/github.com/bazelbuild',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/third_party/github.com/bazelbuild/bazel',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/third_party/github.com/bazelbuild/bazel/tools',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/third_party/github.com/bazelbuild/bazel/tools/bash',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/third_party/github.com/bazelbuild/bazel/tools/bash/runfiles',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/third_party/github.com/bazelbuild/bazel/tools/bash/runfiles/runfiles.bash',
'./app/testdata/nodejs_image_binary.runfiles/io_bazel_rules_docker/testdata/nodejs_image_binary_loader.js',
'./app/testdata/nodejs_image_binary.runfiles/io_bazel_rules_docker/testdata/nodejs_image_binary_require_patch.js',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/third_party/github.com/buffer-from',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/third_party/github.com/buffer-from/package.json',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/third_party/github.com/buffer-from/index.js',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/third_party/github.com/source-map',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/third_party/github.com/source-map/package.json',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/third_party/github.com/source-map/source-map.js',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/third_party/github.com/source-map/lib',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/third_party/github.com/source-map/lib/array-set.js',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/third_party/github.com/source-map/lib/base64-vlq.js',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/third_party/github.com/source-map/lib/base64.js',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/third_party/github.com/source-map/lib/binary-search.js',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/third_party/github.com/source-map/lib/mapping-list.js',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/third_party/github.com/source-map/lib/quick-sort.js',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/third_party/github.com/source-map/lib/source-map-consumer.js',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/third_party/github.com/source-map/lib/source-map-generator.js',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/third_party/github.com/source-map/lib/source-node.js',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/third_party/github.com/source-map/lib/util.js',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/third_party/github.com/source-map-support',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/third_party/github.com/source-map-support/package.json',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/third_party/github.com/source-map-support/register.js',
'./app/testdata/nodejs_image_binary.runfiles/build_bazel_rules_nodejs/third_party/github.com/source-map-support/source-map-support.js',
'./app/testdata/nodejs_image_binary.runfiles/io_bazel_rules_docker/testdata/nodejs_image_lib.js',
'./app/testdata/nodejs_image_binary.runfiles/io_bazel_rules_docker/testdata/nodejs_image_lib.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/io_bazel_rules_docker/testdata/nodejs_image_binary.sh',
'/app',
'/app/testdata',
'/app/testdata/nodejs_image_binary',
'/app/testdata/nodejs_image_binary.runfiles',
'/app/testdata/nodejs_image_binary.runfiles/io_bazel_rules_docker',
'/app/testdata/nodejs_image_binary.runfiles/io_bazel_rules_docker/external'])
# Check that the next layer contains node_modules
layerOneFiles = ['.',
'./app',
'./app/testdata',
'./app/testdata/nodejs_image_binary.runfiles',
'./app/testdata/nodejs_image_binary.runfiles/npm',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/jsesc',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/jsesc/LICENSE',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/jsesc/README.md',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/jsesc/index.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/jsesc/package.json',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/LICENSE',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/README.md',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/assert.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/async_hooks.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/base.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/buffer.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/child_process.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/cluster.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/console.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/constants.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/crypto.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/dgram.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/dns.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/domain.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/events.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/fs.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/globals.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/globals.global.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/http.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/http2.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/https.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/index.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/inspector.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/module.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/net.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/os.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/package.json',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/path.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/perf_hooks.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/process.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/punycode.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/querystring.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/readline.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/repl.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/stream.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/string_decoder.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/timers.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/tls.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/trace_events.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/ts3.2',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/ts3.2/base.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/ts3.2/fs.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/ts3.2/globals.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/ts3.2/index.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/ts3.2/util.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/ts3.4',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/ts3.4/base.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/ts3.4/globals.global.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/ts3.4/index.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/ts3.7',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/ts3.7/assert.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/ts3.7/base.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/ts3.7/index.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/tty.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/url.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/util.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/v8.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/vm.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/worker_threads.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/@types/node/zlib.d.ts',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/jsesc',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/jsesc/LICENSE-MIT.txt',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/jsesc/README.md',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/jsesc/bin',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/jsesc/bin/jsesc',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/jsesc/jsesc.js',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/jsesc/man',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/jsesc/man/jsesc.1',
'./app/testdata/nodejs_image_binary.runfiles/npm/node_modules/jsesc/package.json',
'/app',
'/app/testdata',
'/app/testdata/nodejs_image_binary',
'/app/testdata/nodejs_image_binary.runfiles',
'/app/testdata/nodejs_image_binary.runfiles/io_bazel_rules_docker',
'/app/testdata/nodejs_image_binary.runfiles/io_bazel_rules_docker/external']
self.assertLayerNContains(img, 1, layerOneFiles)
# Check that the next layer contains node_args
layerTwoFiles = [
'.',
'./app',
'./app/testdata',
'./app/testdata/nodejs_image_binary.runfiles',
'./app/testdata/nodejs_image_binary.runfiles/nodejs_{}_amd64'.format(
os.sys.platform),
'./app/testdata/nodejs_image_binary.runfiles/nodejs_{}_amd64/bin'.format(
os.sys.platform),
'./app/testdata/nodejs_image_binary.runfiles/nodejs_{}_amd64/bin/node_repo_args.sh'.format(
os.sys.platform),
]
self.assertLayerNContains(img, 2, layerTwoFiles)
# Check that the next layer contains node runfiles
layerThreeFiles = [
'.',
'./app',
'./app/testdata',
'./app/testdata/nodejs_image_binary.runfiles',
'./app/testdata/nodejs_image_binary.runfiles/nodejs_{}_amd64'.format(
os.sys.platform),
'./app/testdata/nodejs_image_binary.runfiles/nodejs_{}_amd64/bin'.format(
os.sys.platform),
'./app/testdata/nodejs_image_binary.runfiles/nodejs_{}_amd64/bin/nodejs'.format(
os.sys.platform),
'./app/testdata/nodejs_image_binary.runfiles/nodejs_{}_amd64/bin/nodejs/bin'.format(
os.sys.platform),
'./app/testdata/nodejs_image_binary.runfiles/nodejs_{}_amd64/bin/nodejs/bin/node'.format(
os.sys.platform),
]
self.assertLayerNContains(img, 3, layerThreeFiles)
# Check that the next layer contains node
layerFourFiles = [
'.',
'./app',
'./app/testdata',
'./app/testdata/nodejs_image_binary.runfiles',
'./app/testdata/nodejs_image_binary.runfiles/nodejs_{}_amd64'.format(
os.sys.platform),
'./app/testdata/nodejs_image_binary.runfiles/nodejs_{}_amd64/bin'.format(
os.sys.platform),
'./app/testdata/nodejs_image_binary.runfiles/nodejs_{}_amd64/bin/node'.format(
os.sys.platform)
]
self.assertLayerNContains(img, 4, layerFourFiles)
# Re-enable once https://github.com/bazelbuild/rules_d/issues/14 is fixed.
# def test_d_image_args(self):
# with TestImage('d_image') as img:
# self.assertConfigEqual(img, 'Entrypoint', [
# '/app/testdata/d_image_binary',
# 'arg0',
# 'arg1'])
def test_compression_gzip(self):
fast_bytes = os.stat(
TestData('compression_gzip_fast-layer.tar.gz')).st_size
normal_bytes = os.stat(
TestData('compression_gzip_normal-layer.tar.gz')).st_size
self.assertLess(normal_bytes, fast_bytes,
'layer with normal compression (%dB) not smaller than layer with fast compression (%dB)' % (
normal_bytes, fast_bytes))
def load_stamp_info():
stamp_file = TestData("stamp_info_file.txt")
with open(stamp_file) as stamp_fp:
for line in stamp_fp:
# The first column in each line in the stamp file is the key
# and the second column is the corresponding value.
split_line = line.strip().split()
if len(split_line) == 0:
# Skip blank lines.
continue
key = ""
value = ""
if len(split_line) == 1:
# Value is blank.
key = split_line[0]
else:
key = split_line[0]
value = " ".join(split_line[1:])
STAMP_DICT[key] = value
print("Stamp variable '{key}'='{value}'".format(
key=key,
value=value
))
if __name__ == '__main__':
load_stamp_info()
unittest.main()
| {
"content_hash": "c73cde30829f673b836e20e30ba5a109",
"timestamp": "",
"source": "github",
"line_count": 1025,
"max_line_length": 182,
"avg_line_length": 58.889756097560976,
"alnum_prop": 0.5879858188926809,
"repo_name": "bazelbuild/rules_docker",
"id": "2758205bc17f1ee88f834a794c14014160405334",
"size": "60965",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/container/image_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1764"
},
{
"name": "Go",
"bytes": "133160"
},
{
"name": "Java",
"bytes": "3866"
},
{
"name": "Python",
"bytes": "129060"
},
{
"name": "Shell",
"bytes": "80094"
},
{
"name": "Smarty",
"bytes": "4686"
},
{
"name": "Starlark",
"bytes": "549095"
}
],
"symlink_target": ""
} |
"""Unittests for pymatgen/core"""
| {
"content_hash": "8f1e783b044278790cccd12043a57e98",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 33,
"avg_line_length": 34,
"alnum_prop": 0.7058823529411765,
"repo_name": "tschaume/pymatgen",
"id": "ffd57995a0f739d93676a06174850df3f6fbd345",
"size": "34",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "pymatgen/core/tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5100"
},
{
"name": "CSS",
"bytes": "7550"
},
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "Dockerfile",
"bytes": "277"
},
{
"name": "HTML",
"bytes": "827"
},
{
"name": "Makefile",
"bytes": "5573"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "15152267"
},
{
"name": "Python",
"bytes": "7560590"
},
{
"name": "Roff",
"bytes": "4298591"
},
{
"name": "Shell",
"bytes": "711"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import serial, struct, time, collections, threading
from ..SerialDevice import SerialDevice
from six.moves import range
ErrorVals = {
0: ('SP Over-run', 'The previous character was not unloaded before the latest was received.'),
1: ('Frame Error', 'A valid stop bit was not received during the appropriate time period.'),
2: ('Buffer Over-run', 'The input buffer is filled and CR has not been received.'),
4: ('Bad Command', 'Input can not be interpreted -- command byte not valid.'),
8: ('Move Interrupted', 'A requested move was interrupted by input on the serial port.'),
16:('Arduino error', 'Error was reported by arduino interface.'),
32:('MP285 Timeout', 'Arduino timed out waiting for response from MP285.'),
64:('Command timeout', 'Arduino timed out waiting for full command from computer.'),
}
class TimeoutError(Exception):
pass
class MP285Error(Exception):
pass
class SutterMP285(SerialDevice):
"""
Class for communicating with Sutter MP-285 via serial port.
Note that this class is NOT thread-safe.
"""
def __init__(self, port, baud=9600):
"""
port: serial COM port (0 => com1)"""
#self.port = port
#self.baud = baud
#self.sp = serial.Serial(int(self.port), baudrate=self.baud, bytesize=serial.EIGHTBITS, timeout=0)
SerialDevice.__init__(self, port=port, baudrate=baud)
self._scale = None
self.moving = False
time.sleep(1.0) ## Give devices a moment to chill after opening the serial line.
self.clearBuffer()
self.setSpeed(777) ## may be required to be sure Sutter is behaving (voodoo...)
self.clearBuffer()
def getPos(self, scaled=True):
"""Get current position reported by controller. Returns a tuple (x,y,z); values given in m."""
## request position
self.write('c\r') # request is directly to Sutter MP285 in this case.
packet = self.read(length=13, timeout=8.0, term='\r')
if len(packet) != 12:
raise Exception("Sutter MP285: getPos: bad position packet: <%s> expected 12, got %d" % (repr(packet), len(packet)))
pos = [packet[:4], packet[4:8], packet[8:]]
pos = [struct.unpack('=l', x)[0] for x in pos]
if not scaled:
return pos
scale = self.scale()
pos = [x*scale for x in pos]
return pos
def getImmediatePos(self, returnButtons=False):
"""This is a non-standard command provided by custom hardware (Arduino controller).
It returns an estimated position even while the ROE is in use.
(if getPos() is called while the ROE is in use, the MP285 will very likely crash.)
"""
# self.readPacket(block=False)
self.write('p') # talks to Arduino only.
packet = self.read(length=13, timeout=5.0, term='\r')
if len(packet) != 12:
raise Exception("Sutter MP285: getImmediatePos: bad position packet: <%s> (%d)" % (repr(packet),len(packet)))
pos = [packet[:4], packet[4:8], packet[8:12]]
pos = [struct.unpack('=l', x)[0] for x in pos]
scale = self.scale()
pos = [x*scale for x in pos]
if returnButtons:
btn = packet[12]
btns = [ord(btn) & x == 0 for x in [1, 4, 16, 64]]
return pos, btns
return pos
def getButtonState(self):
p,b = self.getImmediatePos(returnButtons=True)
return b
def setPos(self, pos, block=True, timeout=10.):
"""Set the position.
Arguments:
pos: tuple (x, y, z) values must be given in meters.
Setting a coordinate to None leaves it unchanged.
block: bool, if true then the function does not return until the move is complete.
"""
scale = self.scale()
if len(pos) < 3:
pos = list(pos) + [None] * (3-len(pos))
if None in pos:
currentPos = self.getPos(scaled=False)
pos = [(pos[i]/scale if pos[i] is not None else currentPos[i]) for i in range(3)]
cmd = 'm' + struct.pack('=3l', int(pos[0]), int(pos[1]), int(pos[2])) + '\r'
self.write(cmd)
self.moving = True
if block:
self.blockWhileMoving(timeout=timeout)
def checkMoving(self):
"""
Return bool whether the stage is currently moving.
"""
if self.sp.inWaiting() > 0:
self.read(length=1, term='\r')
self.moving = False
if not self.moving:
return False
return True
def blockWhileMoving(self, timeout=10.0):
"""
Blocks until stage is done moving, or until timeour.
"""
if not self.moving:
return
self.read(length=1, timeout=timeout, term='\r')
self.moving = False
def moveBy(self, pos, block=True, timeout=10.):
"""Move by the specified distance.
Arguments:
pos: tuple (dx, dy, dz) values must be given in meters.
block: bool, if true then the function does not return until the move is complete.
"""
scale = self.scale()
if len(pos) < 3:
pos = list(pos) + [0.0] * (3-len(pos))
currentPos = self.getPos(scaled=False)
pos = [pos[i]/scale + currentPos[i] for i in range(3)]
cmd = 'm' + struct.pack('=3l', int(pos[0]), int(pos[1]), int(pos[2])) + '\r'
self.write(cmd)
if block:
self.blockWhileMoving(timeout=timeout)
def scale(self):
## Scale of position values in msteps/m
## Does this value change during operation?
## Should I be using step_mul for anything?
if self._scale is None:
stat = self.stat()
self._scale = 1e-6 / stat['step_div']
return self._scale
def stop(self):
self.write('\3')
try:
self.readPacket()
except MP285Error as err:
for e in err.args[0]:
if e[0] == 8: ## move interrupted, like we asked for
return
raise
def setSpeed(self, speed, fine=True, timeout=10.):
"""Set the speed of movements used when setPos is called.
Arguments:
speed: integer from 1 to 6550 in coarse mode, 1310 in fine mode.
Note that small numbers can result in imperceptibly slow movement.
fine: bool; True => 50uSteps/step False => 10uSteps/step
"""
v = int(speed)
## arbitrary speed limits.. do these apply to all devices?
maxSpd = 6550
if fine:
maxSpd = 1310
v = max(min(v, maxSpd), 1)
#print "MP285 speed:", v
if fine:
v |= 0x8000
cmd = 'V' + struct.pack('=H', v) + '\r'
self.write(cmd)
self.read(1, term='\r', timeout=timeout)
def stat(self, ):
self.write('s\r')
packet = self.read(33, timeout=5.0, term='\r')
if len(packet) != 32:
raise Exception("Sutter MP285: bad stat packet: '%s'" % repr(packet))
paramNames = ['flags', 'udirx', 'udiry', 'udirz', 'roe_vari', 'uoffset', 'urange', 'pulse',
'uspeed', 'indevice', 'flags2', 'jumpspd', 'highspd', 'dead', 'watch_dog',
'step_div', 'step_mul', 'xspeed', 'version']
vals = struct.unpack('=4B5H2B8H', packet)
params = collections.OrderedDict()
for i,n in enumerate(paramNames):
params[n] = vals[i]
flags = params['flags']
params['setup_num'] = flags & 0xF
params['roe_dir'] = -1 if (flags & 2**4) else 1
params['rel_abs_f'] = 'abs' if (flags & 2**5) else 'rel'
params['mode_f'] = 'cont' if (flags & 2**6) else 'pulse'
params['store_f'] = 'stored' if (flags & 2**7) else 'erased'
flags2 = params['flags2']
params['loop_mode'] = bool(flags2 & 1)
params['learn_mode'] = bool(flags2 & 2)
params['step_mode'] = 50 if (flags2 & 4) else 10
params['sw2_mode'] = bool(flags2 & 8)
params['sw1_mode'] = bool(flags2 & 16)
params['sw3_mode'] = bool(flags2 & 32)
params['sw4_mode'] = bool(flags2 & 64)
params['reverse_it'] = bool(flags2 & 128)
params['resolution'] = 50 if (params['xspeed'] & 2**15) else 10
params['speed'] = params['xspeed'] & 0x7FFF
return params
def setLimits(self, limits):
"""Set position limits on the device which may not be exceeded.
This command is only available when using custom hardware.
limits = [+x, -x, +y, -y, +z, -z]
If a limit is None, it will be ignored.
"""
scale = self.scale()
useLims = [(1 if x is not None else 0) for x in limits]
limits = [(0 if x is None else int(x/scale)) for x in limits]
data = struct.pack("=6l6B", *(limits + useLims))
self.write('l'+data+'\r');
self.readPacket()
def reset(self, hard=False):
"""Reset the controller.
Arguments:
hard: If False, then a soft-reset "r" command is sent
If True, then a hard-reset "R" command is sent (not supported by all hardware)"""
cmd = 'r\r'
if hard:
cmd = 'R\r'
self.write(cmd)
## wait for reset, check for error
s = self.clearBuffer()
if len(s) == 2 and s[1] == '\r':
self.raiseError(s[0])
## clear out anything else in the buffer (reset may generate garbage)
if s == '\x00':
return True ## successful reset
return False
def setOrigin(self):
self.write('o\r')
self.readPacket()
def setAbsolute(self):
self.write('a\r')
self.readPacket()
def setRelative(self):
self.write('b\r')
self.readPacket()
def continueAfterPause(self):
self.write('e\r')
self.readPacket()
def refresh(self):
self.write('n\r')
self.readPacket()
def readPacket(self):
return self.readUntil('\r')
#def clearBuffer(self):
#d = self.readAll()
#time.sleep(0.1)
#d += self.readAll()
#if len(d) > 0:
#print "Sutter MP285: Warning: tossed data ", repr(d)
#return d
#def readAll(self):
### read all bytes waiting in buffer; non-blocking.
#n = self.sp.inWaiting()
#if n > 0:
#return self.sp.read(n)
#return ''
#def write(self, data, timeout=10.0):
#self.blockWhileMoving(timeout=timeout) # If the stage is still moving, wait until it is done before sending another packet.
##self.readAll() ## always empty buffer before sending command
#self.sp.write(data)
#def close(self):
#self.sp.close()
def raiseError(self, errVals):
## errVals should be list of error codes
errors = []
for err in errVals:
hit = False
for k in ErrorVals:
if ord(err) & k:
hit = True
errors.append((k,)+ErrorVals[k])
if not hit:
errors.append((ord(err), "Unknown error code", ""))
raise MP285Error(errors)
#def read(self, length, timeout=5, term=None):
### Read *length* bytes or raise exception on timeout.
### if *term* is given, check that the last byte is *term* and remove it
### from the returned packet.
##self.sp.setTimeout(timeout) #broken!
#packet = self.readWithTimeout(length, timeout)
#if len(packet) < length:
#raise Exception("MP285: Timed out waiting for serial data (received so far: %s)" % repr(packet))
#if term is not None:
#if packet[-len(term):] != term:
#self.clearBuffer()
#raise Exception("MP285: Packet corrupt: %s (len=%d)" % (repr(packet), len(packet)))
#return packet[:-len(term)]
#return packet
#def readWithTimeout(self, nBytes, timeout):
#start = time.time()
#packet = ''
#while len(packet) < nBytes and time.time()-start < timeout:
#packet += self.sp.read(1)
#return packet
#def readPacket(self, expect=0, timeout=5, block=True):
### Read until a carriage return is encountered (or timeout).
### If expect is >0, then try to get a packet of that length, ignoring \r within that data
### if block is False, then return immediately if no data is available.
#start = time.time()
#res = ''
#errors = []
#packets = []
#while True:
#s = self.readAll()
#if not block and len(s) == 0:
#return
#if expect > 0: ## move bytes into result without checking for \r
#nb = expect-len(res)
#res += s[:nb]
#s = s[nb:]
#try:
#while len(s) > 0: ## pull packets out of s one at a time
#res += s[:s.index('\r')]
#s = s[s.index('\r')+1:]
#if len(res) == 1: ## error packet was sent
#errors.append(res)
#else:
#packets.append(res)
#res = ''
#except ValueError: ## partial packet; append and wait for more data
#res += s
#if len(res) > 32: ## no valid packets are longer than 32 bytes; give up
#raise Exception("Got junk data while reading for packet: '%s'" % str(res))
#if len(res) == 0:
#if len(errors) > 0:
#self.raiseError(errors)
#if len(packets) == 1: ## success
#return packets[0]
#if len(packets) > 1:
#raise Exception("Too many packets read.", packets)
##if len(s) > 0:
##if s != '\r' and s[0] != '=':
##print "SutterMP285 Error: '%s'" % s
###print "return:", repr(s)
##break
#time.sleep(0.01)
#if time.time() - start > timeout:
#raise TimeoutError("Timeout while waiting for response. (Data so far: %s)" % repr(res))
if __name__ == '__main__':
import sys
s = SutterMP285(port=sys.argv[1], baud=int(sys.argv[2])) # Arduino baud rate, NOT MP285 baud rate.
#s = SutterMP285(port=2, baud=9600)
def pos():
p = s.getPos()
print("<mp285> x: %0.2fum y: %0.2fum, z: %0.2fum" % (p[0]*1e6, p[1]*1e6, p[2]*1e6))
def ipos():
p = s.getImmediatePos()
print("x: %0.2fum y: %0.2fum, z: %0.2fum" % (p[0]*1e6, p[1]*1e6, p[2]*1e6))
def stat():
st = s.stat()
for k in st:
print("%s:%s%s" % (k, " "*(15-len(k)), str(st[k])))
def monitor():
while True:
pos()
def clock(speed, fine=False, runtime=2.0):
s.setSpeed(6500, fine=False)
s.setPos([-0.01, 0, 0])
pos = s.getPos()
s.setSpeed(speed, fine)
time.clock()
t = time.clock()
dist = runtime*speed*1e-6
s.setPos([pos[0]+dist, pos[1], pos[2]], timeout=runtime*2)
s.setPos(pos, timeout=runtime*2)
dt = 0.5*(time.clock()-t)
print("%d: dt=%0.2gs, dx=%0.2gm, %0.2f mm/s" % (int(speed), dt, dist, dist*1e3/dt))
def saw(dx, dz, zstep=5e-6):
p1 = s.getPos()
z = p1[2]
p1 = p1[:2]
p2 = [p1[0] + dx, p1[1]]
n = int(dz/zstep)
for i in range(n):
print("step:", i)
s.setPos(p2)
s.setPos(p1)
if i < n-1:
z += zstep
s.setPos([None,None,z])
ipos()
pos()
| {
"content_hash": "fdc24e245a9ad759c0be26578a3ac030",
"timestamp": "",
"source": "github",
"line_count": 447,
"max_line_length": 132,
"avg_line_length": 36.722595078299776,
"alnum_prop": 0.5172707889125799,
"repo_name": "acq4/acq4",
"id": "b2217db6861fa3b77ef62723c625a69fa7c34057",
"size": "16415",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "acq4/drivers/SutterMP285/mp285.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "3037"
},
{
"name": "Batchfile",
"bytes": "335"
},
{
"name": "C",
"bytes": "1301111"
},
{
"name": "C++",
"bytes": "340035"
},
{
"name": "CSS",
"bytes": "716"
},
{
"name": "Inno Setup",
"bytes": "1606"
},
{
"name": "Makefile",
"bytes": "30"
},
{
"name": "Processing",
"bytes": "13403"
},
{
"name": "Python",
"bytes": "3503085"
},
{
"name": "Shell",
"bytes": "70"
}
],
"symlink_target": ""
} |
import pytest
from mapletree.defaults.request import validators
from mapletree.defaults.request.argcontainer import (ArgContainer,
ValidationError,
InsufficientError)
def test_basics():
ac = ArgContainer()
ac['a'] = '1'
assert ac('a') == '1'
assert ac('a', validators.int_positive) == 1
def test_invalid():
ac = ArgContainer()
ac['a'] = '0'
pytest.raises(ValidationError, ac, 'a', validators.int_positive)
def test_insufficient():
ac = ArgContainer()
pytest.raises(InsufficientError, ac, 'a')
assert ac('a', default=1) == 1
| {
"content_hash": "88d6f64e8bc6b17ada074d043d7354d8",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 71,
"avg_line_length": 26.076923076923077,
"alnum_prop": 0.5663716814159292,
"repo_name": "tomokinakamaru/mapletree",
"id": "2992c8640c56b49146016119cca7a115547eb849",
"size": "694",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_argcontainer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "49221"
}
],
"symlink_target": ""
} |
"""Form mixins for the ``multilingual_tags`` app."""
from django import forms
from django.forms.utils import ErrorList
from django.contrib.contenttypes.models import ContentType
from django.utils.text import slugify
from django.utils.translation import get_language, ugettext_lazy as _
from .. import models
class TaggingFormMixin(object):
"""Mixin for ModelForms to add multilingual tags to a model."""
tag_field = {
'name': 'tags',
'label': _('Tags'),
'help_text': _('Add tags separated by comma.'),
'required': True,
'max_tags': 0,
}
def __init__(self, *args, **kwargs):
super(TaggingFormMixin, self).__init__(*args, **kwargs)
self._taggeditems = []
self._instance_ctype = None
self.fields[self._get_tag_field_name()] = forms.CharField(
label=self._get_tag_field_label(),
help_text=self._get_tag_field_help_text(),
initial=self._get_tag_field_initial(),
required=self._get_tag_field_required(),
)
self.fields[self._get_tag_field_name()].widget.attrs.update({
'data-class': 'multilingual-tags-field',
'data-max-tags': self._get_tag_field_max_tags()})
setattr(self, 'clean_{0}'.format(self._get_tag_field_name()),
self._get_tag_field_clean())
def add_error(self, fieldname, message):
if fieldname in self._errors:
self._errors[fieldname].append(message)
else:
self._errors[fieldname] = ErrorList()
self._errors[fieldname].append(message)
def _get_tag_field_clean(self):
def clean_field():
self._tags_added = []
self._taggeditems = []
language = get_language()
max_tags = self._get_tag_field_max_tags()
data = self.data.get(self._get_tag_field_name())
if not data:
return []
tag_data = [t.strip() for t in data.split(',')]
self._instance_ctype = ContentType.objects.get_for_model(
self.instance)
for tag_string in tag_data:
if len(tag_string) > 64:
self.add_error(
self._get_tag_field_name(),
_('Tags cannot be longer than 64 characters:'
' "{0}"'.format(tag_string))
)
continue
try:
tag = models.Tag.objects.get(
slug=slugify(tag_string))
except models.Tag.DoesNotExist:
# TODO tags should not be stored directly
tag = models.Tag.objects.create(
slug=slugify(tag_string),
name=tag_string,
language_code=language)
# prevent duplicate tags
if tag not in self._tags_added:
self._tags_added.append(tag)
if self.instance.id:
taggeditem, created = (
models.TaggedItem.objects.get_or_create(
tag=tag,
content_type=self._instance_ctype,
object_id=self.instance.id,
)
)
else:
taggeditem = models.TaggedItem(
tag=tag,
content_type=self._instance_ctype)
self._taggeditems.append(taggeditem)
if max_tags and len(self._tags_added) > max_tags:
self.add_error(
self._get_tag_field_name(),
_('You cannot add more than {0} tags.'.format(
self._get_tag_field_max_tags()
))
)
return self._taggeditems
return clean_field
def _get_tag_field_help_text(self):
return self.tag_field.get('help_text', '')
def _get_tag_field_initial(self):
tag_model_field = getattr(self.instance, self._get_tag_field_name())
return ','.join([ti.tag.name for ti in tag_model_field.all()])
def _get_tag_field_label(self):
return self.tag_field.get('label', 'Tags')
def _get_tag_field_max_tags(self):
return int(self.tag_field.get('max_tags', 0))
def _get_tag_field_name(self):
return self.tag_field.get('name', 'tags')
def _get_tag_field_required(self):
return self.tag_field.get('required', True)
def save(self, commit=True):
instance = super(TaggingFormMixin, self).save(commit)
for item in self._taggeditems:
if hasattr(instance, 'get_user'):
item.user = instance.get_user()
item.object_id = instance.id
item.save()
models.TaggedItem.objects.filter(
content_type=self._instance_ctype,
object_id=instance.id).exclude(
pk__in=[ti.pk for ti in self._taggeditems]).delete()
return instance
| {
"content_hash": "c71f5412f3ebc54277a91c6d2492143b",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 76,
"avg_line_length": 39.83076923076923,
"alnum_prop": 0.5135187331015836,
"repo_name": "bitmazk/django-multilingual-tags",
"id": "70e2af52ac3fc6dcc8ae086977a2bec607e6ae32",
"size": "5178",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "multilingual_tags/forms/mixins.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4225"
},
{
"name": "JavaScript",
"bytes": "15200"
},
{
"name": "Makefile",
"bytes": "327"
},
{
"name": "Python",
"bytes": "36436"
}
],
"symlink_target": ""
} |
from PyQt4 import QtGui, QtCore
from Views import ui_dialog_replacevalue as dreplacevalue
from poplerGUI import ui_logic_preview as tprev
from poplerGUI import class_modelviewpandas as view
from poplerGUI.logiclayer import class_helpers as hlp
from poplerGUI import class_inputhandler as ini
class ReplaceValueDialog(QtGui.QDialog, dreplacevalue.Ui_Dialog):
'''
User Logic to deal with split a column from one into two based
on user supplied separator (currently regex does not work)
'''
update_data = QtCore.pyqtSignal(object)
def __init__(self, parent=None):
super().__init__(parent)
self.setupUi(self)
self.previous_click = False
# Place holders for user inputs
self.replacevaluelned = {}
# Place holder: Data Model/ Data model view
self.replacevaluemodel = None
self.viewEdit = view.PandasTableModelEdit(None)
# Placeholders: Data tables
self.replacevaluetable = None
# Actions
self.btnPreview.clicked.connect(self.submit_change)
self.btnSaveClose.clicked.connect(self.submit_change)
self.btnCancel.clicked.connect(self.close)
# Update boxes/preview box
self.message = QtGui.QMessageBox
self.error = QtGui.QErrorMessage()
self.preview = tprev.TablePreview()
def submit_change(self):
sender = self.sender()
self.replacevaluelned = {
'column_name':
self.lnedColumnname.text().strip(),
'value_from':
self.lnedFrom.text(),
'value_to':
self.lnedTo.text().strip(),
'all_columns': self.ckAllcolumns.isChecked()
}
self.replacevalueini = ini.InputHandler(
name='replacevalue',
lnedentry=self.replacevaluelned
)
self.facade.input_register(self.replacevalueini)
self.facade.create_log_record('replacevalue')
self._log = self.facade._tablelog['replacevalue']
if self.previous_click is True:
self.viewEdit = view.PandasTableModelEdit(None)
else:
pass
try:
if self.replacevaluelned['all_columns'] is True:
self.replacevaluetable = self.facade._data.replace(
{
self.replacevaluelned['value_from']:
self.replacevaluelned['value_to']
}
)
else:
columntochange = self.replacevaluelned['column_name']
assert (columntochange is not '') is True
self.replacevaluetable = self.facade._data.replace(
{
columntochange: {
self.replacevaluelned['value_from']:
self.replacevaluelned['value_to']
}
}
)
self.previous_click = True
except Exception as e:
print(str(e))
self.error.showMessage(
'Could not replace values: ' + str(e))
hlp.write_column_to_log(
self.replacevaluelned, self._log, 'replacevalue')
if sender is self.btnPreview:
self.viewEdit.set_data(
self.replacevaluetable)
self.preview.tabviewPreview.setModel(
self.viewEdit)
self.preview.show()
elif sender is self.btnSaveClose:
self.facade._data = self.replacevaluetable
self.update_data.emit('replace_mod')
self.close()
| {
"content_hash": "6af9ec2d08de6dc90f5764a5871ca32e",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 69,
"avg_line_length": 36.826530612244895,
"alnum_prop": 0.5774452756996398,
"repo_name": "bibsian/database-development",
"id": "d5c85eca21b0c1c3c5634fd12469eb1563fda42a",
"size": "3631",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "poplerGUI/ui_logic_replace.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1241917"
}
],
"symlink_target": ""
} |
"""
This file tests vtk.vtkMolecule, and verifies that atoms/bonds are added.
"""
import sys
import vtk
from vtk.test import Testing
class TestMolecule(Testing.vtkTest):
def testCreation(self):
"Testing if molecules can be created/modified."
mol = vtk.vtkMolecule()
self.assertEqual(mol.GetNumberOfAtoms(), 0, "Number of atoms incorrect")
self.assertEqual(mol.GetNumberOfBonds(), 0, "Number of atoms incorrect")
h1 = mol.AppendAtom(1, 0.0, 0.0, -0.5)
h2 = mol.AppendAtom(1, 0.0, 0.0, 0.5)
b = mol.AppendBond(h1, h2, 1)
self.assertEqual(mol.GetNumberOfAtoms(), 2, "Number of atoms incorrect")
self.assertEqual(mol.GetNumberOfBonds(), 1, "Number of atoms incorrect")
if __name__ == "__main__":
Testing.main([(TestMolecule, 'test')])
| {
"content_hash": "f0001a9964aa5a58e71e0d8dd66470ce",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 80,
"avg_line_length": 35.47826086956522,
"alnum_prop": 0.6556372549019608,
"repo_name": "sankhesh/VTK",
"id": "697b09907817605d422d5c1466fa2581d9bf8590",
"size": "839",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "Common/DataModel/Testing/Python/TestMolecule.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "37444"
},
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "C",
"bytes": "46625908"
},
{
"name": "C++",
"bytes": "68783882"
},
{
"name": "CMake",
"bytes": "1592852"
},
{
"name": "CSS",
"bytes": "186729"
},
{
"name": "Cuda",
"bytes": "29062"
},
{
"name": "D",
"bytes": "2081"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "GLSL",
"bytes": "214439"
},
{
"name": "Groff",
"bytes": "65394"
},
{
"name": "HTML",
"bytes": "292104"
},
{
"name": "Java",
"bytes": "147449"
},
{
"name": "JavaScript",
"bytes": "1131891"
},
{
"name": "Lex",
"bytes": "45258"
},
{
"name": "Objective-C",
"bytes": "22264"
},
{
"name": "Objective-C++",
"bytes": "191668"
},
{
"name": "Perl",
"bytes": "173168"
},
{
"name": "Prolog",
"bytes": "4406"
},
{
"name": "Python",
"bytes": "15735832"
},
{
"name": "Shell",
"bytes": "74255"
},
{
"name": "Slash",
"bytes": "1476"
},
{
"name": "Smarty",
"bytes": "1325"
},
{
"name": "Tcl",
"bytes": "1406812"
},
{
"name": "Yacc",
"bytes": "174481"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import fnmatch
import os
import re
import sys
from setuptools import find_packages, setup, Command
from setuptools.command.install import install as InstallCommandBase
from setuptools.dist import Distribution
# This version string is semver compatible, but incompatible with pip.
# For pip, we will remove all '-' characters from this string, and use the
# result for pip.
_VERSION = '1.2.1'
REQUIRED_PACKAGES = [
'numpy >= 1.11.0',
'six >= 1.10.0',
'protobuf >= 3.2.0',
'werkzeug >= 0.11.10',
'html5lib == 0.9999999', # identical to 1.0b8
'markdown >= 2.6.8',
'bleach == 1.5.0',
'backports.weakref == 1.0rc1',
]
project_name = 'tensorflow'
if '--project_name' in sys.argv:
project_name_idx = sys.argv.index('--project_name')
project_name = sys.argv[project_name_idx + 1]
sys.argv.remove('--project_name')
sys.argv.pop(project_name_idx)
# python3 requires wheel 0.26
if sys.version_info.major == 3:
REQUIRED_PACKAGES.append('wheel >= 0.26')
else:
REQUIRED_PACKAGES.append('wheel')
# mock comes with unittest.mock for python3, need to install for python2
REQUIRED_PACKAGES.append('mock >= 2.0.0')
# pylint: disable=line-too-long
CONSOLE_SCRIPTS = [
'tensorboard = tensorflow.tensorboard.tensorboard:main',
'saved_model_cli = tensorflow.python.tools.saved_model_cli:main',
]
# pylint: enable=line-too-long
TEST_PACKAGES = [
'scipy >= 0.15.1',
]
class BinaryDistribution(Distribution):
def has_ext_modules(self):
return True
class InstallCommand(InstallCommandBase):
"""Override the dir where the headers go."""
def finalize_options(self):
ret = InstallCommandBase.finalize_options(self)
self.install_headers = os.path.join(self.install_purelib,
'tensorflow', 'include')
return ret
class InstallHeaders(Command):
"""Override how headers are copied.
The install_headers that comes with setuptools copies all files to
the same directory. But we need the files to be in a specific directory
hierarchy for -I <include_dir> to work correctly.
"""
description = 'install C/C++ header files'
user_options = [('install-dir=', 'd',
'directory to install header files to'),
('force', 'f',
'force installation (overwrite existing files)'),
]
boolean_options = ['force']
def initialize_options(self):
self.install_dir = None
self.force = 0
self.outfiles = []
def finalize_options(self):
self.set_undefined_options('install',
('install_headers', 'install_dir'),
('force', 'force'))
def mkdir_and_copy_file(self, header):
install_dir = os.path.join(self.install_dir, os.path.dirname(header))
# Get rid of some extra intervening directories so we can have fewer
# directories for -I
install_dir = re.sub('/google/protobuf/src', '', install_dir)
# Copy eigen code into tensorflow/include.
# A symlink would do, but the wheel file that gets created ignores
# symlink within the directory hierarchy.
# NOTE(keveman): Figure out how to customize bdist_wheel package so
# we can do the symlink.
if 'external/eigen_archive/' in install_dir:
extra_dir = install_dir.replace('external/eigen_archive', '')
if not os.path.exists(extra_dir):
self.mkpath(extra_dir)
self.copy_file(header, extra_dir)
if not os.path.exists(install_dir):
self.mkpath(install_dir)
return self.copy_file(header, install_dir)
def run(self):
hdrs = self.distribution.headers
if not hdrs:
return
self.mkpath(self.install_dir)
for header in hdrs:
(out, _) = self.mkdir_and_copy_file(header)
self.outfiles.append(out)
def get_inputs(self):
return self.distribution.headers or []
def get_outputs(self):
return self.outfiles
def find_files(pattern, root):
"""Return all the files matching pattern below root dir."""
for path, _, files in os.walk(root):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(path, filename)
matches = ['../' + x for x in find_files('*', 'external') if '.py' not in x]
matches += ['../' + x for x in find_files('*', '_solib_k8') if '.py' not in x]
if os.name == 'nt':
EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.pyd'
else:
EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.so'
headers = (list(find_files('*.h', 'tensorflow/core')) +
list(find_files('*.h', 'tensorflow/stream_executor')) +
list(find_files('*.h', 'google/protobuf/src')) +
list(find_files('*', 'third_party/eigen3')) +
list(find_files('*', 'external/eigen_archive')))
setup(
name=project_name,
version=_VERSION.replace('-', ''),
description='TensorFlow helps the tensors flow',
long_description='',
url='http://tensorflow.org/',
author='Google Inc.',
author_email='[email protected]',
# Contained modules and scripts.
packages=find_packages(),
entry_points={
'console_scripts': CONSOLE_SCRIPTS,
},
headers=headers,
install_requires=REQUIRED_PACKAGES,
tests_require=REQUIRED_PACKAGES + TEST_PACKAGES,
# Add in any packaged data.
include_package_data=True,
package_data={
'tensorflow': [
EXTENSION_NAME,
'tensorboard/dist/bazel-html-imports.html',
'tensorboard/dist/index.html',
'tensorboard/dist/tf-tensorboard.html',
'tensorboard/lib/css/global.css',
'tensorboard/TAG',
] + matches,
},
zip_safe=False,
distclass=BinaryDistribution,
cmdclass={
'install_headers': InstallHeaders,
'install': InstallCommand,
},
# PyPI package information.
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries',
],
license='Apache 2.0',
keywords='tensorflow tensor machine learning',)
| {
"content_hash": "f61b3ba446b88b471362faad4cfe9b64",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 78,
"avg_line_length": 31.75609756097561,
"alnum_prop": 0.6427035330261137,
"repo_name": "unnikrishnankgs/va",
"id": "ff7b5cd24c38af46b3dcc1e6398e1191817f68e6",
"size": "7200",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "venv/lib/python3.5/site-packages/tensorflow/tools/pip_package/setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "1836035"
},
{
"name": "C++",
"bytes": "12002305"
},
{
"name": "CMake",
"bytes": "128"
},
{
"name": "CSS",
"bytes": "64776"
},
{
"name": "Cuda",
"bytes": "78890"
},
{
"name": "Fortran",
"bytes": "8281"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "297329"
},
{
"name": "JavaScript",
"bytes": "4313047"
},
{
"name": "Jupyter Notebook",
"bytes": "603900"
},
{
"name": "Makefile",
"bytes": "7573"
},
{
"name": "Nginx",
"bytes": "544"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Protocol Buffer",
"bytes": "72897"
},
{
"name": "PureBasic",
"bytes": "134"
},
{
"name": "Python",
"bytes": "51104955"
},
{
"name": "Shell",
"bytes": "71646"
},
{
"name": "Smarty",
"bytes": "28890"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
} |
import logging
import sys
import arrow
from OnePy.builtin_module.optimizer import Optimizer
from OnePy.config import EVENT_LOOP
from OnePy.constants import EVENT
from OnePy.custom_module.forward_analysis import ForwardAnalysis
from OnePy.sys_module.components.exceptions import BacktestFinished
from OnePy.sys_module.components.logger import LoggerFactory
from OnePy.sys_module.components.market_maker import MarketMaker
from OnePy.sys_module.components.order_checker import PendingOrderChecker
from OnePy.sys_module.components.output import OutPut
from OnePy.sys_module.metabase_env import OnePyEnvBase
from OnePy.utils.awesome_func import show_process
class OnePiece(OnePyEnvBase):
def __init__(self):
# 内置模块
self.market_maker: MarketMaker = None
self.pending_order_checker: PendingOrderChecker = None
self.event_loop: list = None
# 其他模块
self.optimizer = Optimizer()
self.forward_analysis = ForwardAnalysis()
def _pre_initialize_trading_system(self):
self.event_loop = EVENT_LOOP
self.market_maker = MarketMaker()
self.pending_order_checker = PendingOrderChecker()
def initialize_trading_system(self): # 清空内存,便于参数优化
self._pre_initialize_trading_system()
self.env.initialize_env()
self.market_maker.initialize()
self.env.recorder.initialize()
def sunny(self, summary: bool = True, show_process: bool = False):
"""主循环,OnePy的核心"""
self.initialize_trading_system()
while True:
try:
if self.env.event_engine.is_empty():
self.market_maker.update_market()
self.pending_order_checker.run()
if show_process:
self._show_process()
else:
cur_event = self.env.event_engine.get()
self._run_event_loop(cur_event)
except BacktestFinished:
if summary:
print("\n")
self.output.summary()
break
def _run_event_loop(self, cur_event):
for element in self.event_loop:
if self._event_is_executed(cur_event, **element):
break
def _event_is_executed(
self, cur_event, if_event: EVENT, then_event: EVENT, module_dict: dict
) -> bool:
if cur_event is None:
return True
elif cur_event == if_event:
[value.run() for value in module_dict.values()]
self.env.event_engine.put(then_event)
return True
else:
return False
def _show_process(self):
fromdate = arrow.get(self.env.fromdate)
todate = arrow.get(self.env.todate)
curdate = arrow.get(self.env.sys_date)
total_days = (todate - fromdate).days
finished_days = (curdate - fromdate).days
show_process(finished_days, total_days)
def set_date(self, fromdate: str, todate: str, frequency: str, instrument: str):
"""
Instrument: A_shares, Forex
Frequency:
(S5, S10, S30, M1, M2, M4, M5) <- BAD Interval
M10, M15, M30, H1, H2, H3, H4, H6, H8, H12
"""
self.env.instrument = instrument
self.env.fromdate = fromdate
self.env.todate = todate
self.env.sys_frequency = frequency
def set_forex_live_trading(self, frequency: str):
"""
Frequency:
(S5, S10, S30, M1, M2, M4, M5) <- BAD Interval
M10, M15, M30, H1, H2, H3, H4, H6, H8, H12
"""
fromdate = arrow.utcnow().format("YYYY-MM-DD HH:mm:ss")
self.set_date(fromdate, None, frequency, "Forex")
self.env.sys_date = fromdate
self.env.is_live_trading = True
def show_today_signals(self):
"""
能够显示当天的最新信号,但是会导致回测结果不准确。
"""
self.env.is_show_today_signals = True
@classmethod
def show_log(cls, file=False, no_console=False):
if file:
LoggerFactory("OnePy")
if no_console:
logging.getLogger("OnePy").propagate = False
logging.basicConfig(level=logging.INFO)
@classmethod
def set_recursion_limit(cls, limit: int = 2000):
"""
突破递归次数限制,有时候信号太多会导致撮合引擎递归太多次而假死
"""
sys.setrecursionlimit(limit)
def save_original_signal(self):
self.env.is_save_original = True
@property
def output(self) -> OutPut:
return OutPut()
| {
"content_hash": "e0eef941997dfa2950616a9d0786a56a",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 84,
"avg_line_length": 31.93661971830986,
"alnum_prop": 0.5986769570011026,
"repo_name": "Chandlercjy/OnePy",
"id": "55d2609483a3a1db076dadb90259c21ae71ecd97",
"size": "4699",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "OnePy/OnePiece.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "251278"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from django.contrib import admin
from blog.views import AllArticlesListView, CategoryArticlesListView,\
ArticleDetailsView
from page.views import HtmlPageView
urlpatterns = [
url(r'^$', AllArticlesListView.as_view(), name="all-articles"),
url(r'^admin/', admin.site.urls),
url(r'^(?P<slug>[\w|-]+)/$', HtmlPageView.as_view(), name="article"),
url(
r'^category/(?P<category_slug>[\w|-]+)/$',
CategoryArticlesListView.as_view(),
name="category"
),
url(
r'^article/(?P<slug>[\w|-]+)/$',
ArticleDetailsView.as_view(),
name="article"
),
]
| {
"content_hash": "5542ccb17954efa27b53f8275274b82d",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 73,
"avg_line_length": 28.26086956521739,
"alnum_prop": 0.6184615384615385,
"repo_name": "Truqui/sergioscorner",
"id": "e1cffcf35c5979ae1e98c8a4ef013340f591c76f",
"size": "650",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sergioscorner/sergioscorner/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5848"
},
{
"name": "HTML",
"bytes": "5914"
},
{
"name": "JavaScript",
"bytes": "552"
},
{
"name": "Python",
"bytes": "25439"
}
],
"symlink_target": ""
} |
"""
15. Transactions
Django handles transactions in three different ways. The default is to commit
each transaction upon a write, but you can decorate a function to get
commit-on-success behavior. Alternatively, you can manage the transaction
manually.
"""
from django.db import models
class Reporter(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
email = models.EmailField()
def __unicode__(self):
return u"%s %s" % (self.first_name, self.last_name)
__test__ = {'API_TESTS':"""
>>> from django.db import connection, transaction
"""}
from django.conf import settings
building_docs = getattr(settings, 'BUILDING_DOCS', False)
if building_docs or settings.DATABASE_ENGINE not in ('mysql', 'mysql_old'):
__test__['API_TESTS'] += """
# the default behavior is to autocommit after each save() action
>>> def create_a_reporter_then_fail(first, last):
... a = Reporter(first_name=first, last_name=last)
... a.save()
... raise Exception("I meant to do that")
...
>>> create_a_reporter_then_fail("Alice", "Smith")
Traceback (most recent call last):
...
Exception: I meant to do that
# The object created before the exception still exists
>>> Reporter.objects.all()
[<Reporter: Alice Smith>]
# the autocommit decorator works exactly the same as the default behavior
>>> autocomitted_create_then_fail = transaction.autocommit(create_a_reporter_then_fail)
>>> autocomitted_create_then_fail("Ben", "Jones")
Traceback (most recent call last):
...
Exception: I meant to do that
# Same behavior as before
>>> Reporter.objects.all()
[<Reporter: Alice Smith>, <Reporter: Ben Jones>]
# With the commit_on_success decorator, the transaction is only comitted if the
# function doesn't throw an exception
>>> committed_on_success = transaction.commit_on_success(create_a_reporter_then_fail)
>>> committed_on_success("Carol", "Doe")
Traceback (most recent call last):
...
Exception: I meant to do that
# This time the object never got saved
>>> Reporter.objects.all()
[<Reporter: Alice Smith>, <Reporter: Ben Jones>]
# If there aren't any exceptions, the data will get saved
>>> def remove_a_reporter():
... r = Reporter.objects.get(first_name="Alice")
... r.delete()
...
>>> remove_comitted_on_success = transaction.commit_on_success(remove_a_reporter)
>>> remove_comitted_on_success()
>>> Reporter.objects.all()
[<Reporter: Ben Jones>]
# You can manually manage transactions if you really want to, but you
# have to remember to commit/rollback
>>> def manually_managed():
... r = Reporter(first_name="Carol", last_name="Doe")
... r.save()
... transaction.commit()
>>> manually_managed = transaction.commit_manually(manually_managed)
>>> manually_managed()
>>> Reporter.objects.all()
[<Reporter: Ben Jones>, <Reporter: Carol Doe>]
# If you forget, you'll get bad errors
>>> def manually_managed_mistake():
... r = Reporter(first_name="David", last_name="Davidson")
... r.save()
... # oops, I forgot to commit/rollback!
>>> manually_managed_mistake = transaction.commit_manually(manually_managed_mistake)
>>> manually_managed_mistake()
Traceback (most recent call last):
...
TransactionManagementError: Transaction managed block ended with pending COMMIT/ROLLBACK
"""
| {
"content_hash": "a12d18a5e9596debb825c37e34a43115",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 88,
"avg_line_length": 33.39393939393939,
"alnum_prop": 0.7044767090139141,
"repo_name": "paulsmith/geodjango",
"id": "a3222cd511120151f31d20da8d606c05b8d87289",
"size": "3306",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/modeltests/transactions/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "71605"
},
{
"name": "Python",
"bytes": "3433375"
},
{
"name": "Shell",
"bytes": "804"
}
],
"symlink_target": ""
} |
''' Nose test running
Implements test and bench functions for modules.
'''
import os
import sys
def get_package_name(filepath):
"""Given a path where a package is installed, determine its name"""
fullpath = filepath[:]
pkg_name = []
while 'site-packages' in filepath or 'dist-packages' in filepath:
filepath, p2 = os.path.split(filepath)
if p2 in ('site-packages', 'dist-packages'):
break
pkg_name.append(p2)
# if package name determination failed, just default to numpy/scipy
if not pkg_name:
if 'scipy' in fullpath:
return 'scipy'
else:
return 'numpy'
# otherwise, reverse to get correct order and return
pkg_name.reverse()
# don't include the outer egg directory
if pkg_name[0].endswith('.egg'):
pkg_name.pop(0)
return '.'.join(pkg_name)
def import_nose():
""" Import nose only when needed.
"""
fine_nose = True
minimum_nose_version = (0,10,0)
try:
import nose
from nose.tools import raises
except ImportError:
fine_nose = False
else:
if nose.__versioninfo__ < minimum_nose_version:
fine_nose = False
if not fine_nose:
msg = 'Need nose >= %d.%d.%d for tests - see ' \
'http://somethingaboutorange.com/mrl/projects/nose' % \
minimum_nose_version
raise ImportError(msg)
return nose
def run_module_suite(file_to_run = None):
if file_to_run is None:
f = sys._getframe(1)
file_to_run = f.f_locals.get('__file__', None)
assert file_to_run is not None
import_nose().run(argv=['',file_to_run])
# contructs NoseTester method docstrings
def _docmethod(meth, testtype):
if not meth.__doc__:
return
test_header = \
'''Parameters
----------
label : {'fast', 'full', '', attribute identifer}
Identifies the %(testtype)ss to run. This can be a string to
pass to the nosetests executable with the '-A' option, or one of
several special values.
Special values are:
'fast' - the default - which corresponds to nosetests -A option
of 'not slow'.
'full' - fast (as above) and slow %(testtype)ss as in the
no -A option to nosetests - same as ''
None or '' - run all %(testtype)ss
attribute_identifier - string passed directly to nosetests as '-A'
verbose : integer
verbosity value for test outputs, 1-10
extra_argv : list
List with any extra args to pass to nosetests''' \
% {'testtype': testtype}
meth.__doc__ = meth.__doc__ % {'test_header':test_header}
class NoseTester(object):
""" Nose test runner.
Usage: NoseTester(<package>).test()
<package> is package path or module Default for package is None. A
value of None finds the calling module path.
This class is made available as numpy.testing.Tester, and a test function
is typically added to a package's __init__.py like so:
>>> from numpy.testing import Tester
>>> test = Tester().test
Calling this test function finds and runs all tests associated with the
package and all its subpackages.
"""
def __init__(self, package=None):
''' Test class init
Parameters
----------
package : string or module
If string, gives full path to package
If None, extract calling module path
Default is None
'''
package_name = None
if package is None:
f = sys._getframe(1)
package_path = f.f_locals.get('__file__', None)
assert package_path is not None
package_path = os.path.dirname(package_path)
package_name = f.f_locals.get('__name__', None)
elif isinstance(package, type(os)):
package_path = os.path.dirname(package.__file__)
package_name = getattr(package, '__name__', None)
else:
package_path = str(package)
self.package_path = package_path
# find the package name under test; this name is used to limit coverage
# reporting (if enabled)
if package_name is None:
package_name = get_package_name(package_path)
self.package_name = package_name
def _test_argv(self, label, verbose, extra_argv):
''' Generate argv for nosetest command
%(test_header)s
'''
argv = [__file__, self.package_path, '-s']
if label and label != 'full':
if not isinstance(label, basestring):
raise TypeError, 'Selection label should be a string'
if label == 'fast':
label = 'not slow'
argv += ['-A', label]
argv += ['--verbosity', str(verbose)]
if extra_argv:
argv += extra_argv
return argv
def _show_system_info(self):
nose = import_nose()
import numpy
print "NumPy version %s" % numpy.__version__
npdir = os.path.dirname(numpy.__file__)
print "NumPy is installed in %s" % npdir
if 'scipy' in self.package_name:
import scipy
print "SciPy version %s" % scipy.__version__
spdir = os.path.dirname(scipy.__file__)
print "SciPy is installed in %s" % spdir
pyversion = sys.version.replace('\n','')
print "Python version %s" % pyversion
print "nose version %d.%d.%d" % nose.__versioninfo__
def prepare_test_args(self, label='fast', verbose=1, extra_argv=None,
doctests=False, coverage=False):
''' Run tests for module using nose
%(test_header)s
doctests : boolean
If True, run doctests in module, default False
coverage : boolean
If True, report coverage of NumPy code, default False
(Requires the coverage module:
http://nedbatchelder.com/code/modules/coverage.html)
'''
# if doctests is in the extra args, remove it and set the doctest
# flag so the NumPy doctester is used instead
if extra_argv and '--with-doctest' in extra_argv:
extra_argv.remove('--with-doctest')
doctests = True
argv = self._test_argv(label, verbose, extra_argv)
if doctests:
argv += ['--with-numpydoctest']
if coverage:
argv+=['--cover-package=%s' % self.package_name, '--with-coverage',
'--cover-tests', '--cover-inclusive', '--cover-erase']
# bypass these samples under distutils
argv += ['--exclude','f2py_ext']
argv += ['--exclude','f2py_f90_ext']
argv += ['--exclude','gen_ext']
argv += ['--exclude','pyrex_ext']
argv += ['--exclude','swig_ext']
argv += ['--exclude','array_from_pyobj']
nose = import_nose()
# construct list of plugins, omitting the existing doctest plugin
import nose.plugins.builtin
from noseclasses import NumpyDoctest, KnownFailure
plugins = [NumpyDoctest(), KnownFailure()]
for p in nose.plugins.builtin.plugins:
plug = p()
if plug.name == 'doctest':
# skip the builtin doctest plugin
continue
plugins.append(plug)
return argv, plugins
def test(self, label='fast', verbose=1, extra_argv=None, doctests=False,
coverage=False):
''' Run tests for module using nose
%(test_header)s
doctests : boolean
If True, run doctests in module, default False
coverage : boolean
If True, report coverage of NumPy code, default False
(Requires the coverage module:
http://nedbatchelder.com/code/modules/coverage.html)
'''
# cap verbosity at 3 because nose becomes *very* verbose beyond that
verbose = min(verbose, 3)
import utils
utils.verbose = verbose
if doctests:
print "Running unit tests and doctests for %s" % self.package_name
else:
print "Running unit tests for %s" % self.package_name
self._show_system_info()
# reset doctest state on every run
import doctest
doctest.master = None
argv, plugins = self.prepare_test_args(label, verbose, extra_argv,
doctests, coverage)
from noseclasses import NumpyTestProgram
t = NumpyTestProgram(argv=argv, exit=False, plugins=plugins)
return t.result
def bench(self, label='fast', verbose=1, extra_argv=None):
''' Run benchmarks for module using nose
%(test_header)s'''
print "Running benchmarks for %s" % self.package_name
self._show_system_info()
argv = self._test_argv(label, verbose, extra_argv)
argv += ['--match', r'(?:^|[\\b_\\.%s-])[Bb]ench' % os.sep]
nose = import_nose()
return nose.run(argv=argv)
# generate method docstrings
_docmethod(_test_argv, '(testtype)')
_docmethod(test, 'test')
_docmethod(bench, 'benchmark')
########################################################################
# Doctests for NumPy-specific nose/doctest modifications
# try the #random directive on the output line
def check_random_directive():
'''
>>> 2+2
<BadExample object at 0x084D05AC> #random: may vary on your system
'''
# check the implicit "import numpy as np"
def check_implicit_np():
'''
>>> np.array([1,2,3])
array([1, 2, 3])
'''
# there's some extraneous whitespace around the correct responses
def check_whitespace_enabled():
'''
# whitespace after the 3
>>> 1+2
3
# whitespace before the 7
>>> 3+4
7
'''
| {
"content_hash": "d571a1a29e435e4075b18512918fc15d",
"timestamp": "",
"source": "github",
"line_count": 315,
"max_line_length": 79,
"avg_line_length": 31.62857142857143,
"alnum_prop": 0.5689049483087424,
"repo_name": "chadnetzer/numpy-gaurdro",
"id": "c4b8063de1e82c91ec8e4fdc17fb3637f771d420",
"size": "9963",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "numpy/testing/nosetester.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4500127"
},
{
"name": "C++",
"bytes": "22396"
},
{
"name": "FORTRAN",
"bytes": "8946"
},
{
"name": "Python",
"bytes": "3741035"
},
{
"name": "Shell",
"bytes": "2035"
}
],
"symlink_target": ""
} |
import psycopg2
#from auth_js import connection_string_js
# def connect_db(connection_data):
# try:
# conn = psycopg2.connect(connection_data)
# except:
# print ("\n_________CONNECTION FAILURE_________\n")
# cur = conn.cursor()
# return cur
#This generates a list of lists. The sub-lists include three items: a number 1-180,
# representing 180 consecutive 24/hr periods (this is six months), the
# timestamps at the begining of the 24/hr period, and the timestamp at the end of the 24/hr period.
def time_converter_day():
day_number = []
start_time = 1
for i in range (180):
day_label = 1
end_time = start_time + 86399 #number of seconds in 24 hours
i = [day_label,start_time,end_time]
start_time = end_time + 1
day_number.append(i)
return day_number
# This is same as time_converter_day() only for a month long period.
def time_converter_month():
month_number = []
start_time = 1
for i in range (6):
month_label = 1
end_time = start_time + 2591999 #number of seconds in 30 days
i = [month_label,start_time,end_time]
start_time = end_time + 1
month_number.append(i)
return month_number
# SQL query to create new column in table, acepts arguments
# for table name, column name, and data type. Can use this to insert day
# and month columns for one or several tables.
def insert_column(cur,table,column,type):
cur.execute("""ALTER TABLE %s ADD COLUMN %s %s""" % (table,column,type))
# Update day and month column based on timestamps. This loops through the month_number list generated above, and generates queries to update the month column based on the calculated timestamp parameters.
# With the string input filled in, the query should read: UPDATE <table> SET <table>.month = <first value in sub-list in month_number> WHERE <table>.tstamp >= <second value in sub-list in month_number> AND <table>.tstamp <= <third value in sub-list in month_number>
def Update_month(table):
for x in month_number:
cur.execute("""UPDATE %s SET %s.month = %s WHERE %s.tstamp >= %s AND %s.tstamp <= %s""" % (table, table, x[0], table, x[1], table, x[2]))
# SQL query to creat table based on month designation. Can use this to break
# down large tables into tables based on month to make them smaller and easier to work with.
def generate_table_by_select(table, month, new_table):
cur.execute(command)
def disconnect_db():
cur.close()
connx.close()
if __name__ == '__main__':
connect_db(connection_string_js)
time_converter_day()
time_converter_month()
#insert_column('redteam','day','INT')
#insert_column('redteam','month','INT')
disconnect_db()
| {
"content_hash": "716724776fb867edd4cef57e23b5249b",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 265,
"avg_line_length": 40.18181818181818,
"alnum_prop": 0.6900452488687783,
"repo_name": "georgetown-analytics/team-404",
"id": "839a2a7e6d1c36986d3a644ee196b601dcde5237",
"size": "2652",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "jakecode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "39"
},
{
"name": "Jupyter Notebook",
"bytes": "65210305"
},
{
"name": "Python",
"bytes": "21903"
}
],
"symlink_target": ""
} |
from random import random, gauss
from copy import deepcopy
import numpy as np
from hurd.decision_model import ExpectedUtilityModel, ExpectedValueModel
from hurd.optimizers import (
EvolutionaryGradientDescentOptimizer,
GridSearchOptimizer
)
from hurd.dataset import Dataset
from hurd.utils import load_choices13k, float2str
import judicious
# judicious.seed("af842bad-f2d8-ee0f-6e52-a47296c1cb0e")
judicious.seed("f5be38d7-36eb-c209-6631-e437efffc0f0")
def unshuffle(l, order):
""" unshuffles list given shuffled index """
l_out = [0] * len(l)
for i, j in enumerate(order):
l_out[j] = l[i]
return l_out
def sliding_avg(array, n):
"""Take a sliding average of non-overlapping n-sized chunks of an array."""
array = np.array(array)
out = []
for i in range(0, len(array), n):
out.append(np.mean(array[i: i + n]))
return np.array(out)
def params2dataset(params):
""" annoying and completely uninteresting helper function """
D_dict = {
"0": { # one problem
"A": [[params["pA1"], params["A1"]], [1 - params["pA1"], params["A2"]],],
"B": [[params["pB1"], params["B1"]], [1 - params["pB1"], params["B2"]],],
}
}
D = Dataset(dataset_id="single_problem")
D.from_dict(D_dict)
return D
def clip_params(params, perturbation=None):
""" also not super intersting """
for key in ["pA1", "pB1"]:
if params[key] > 1.0:
if perturbation is not None:
perturbation[key] = params[key] - perturbation[key]
params[key] = 1.0
if params[key] < 0.0:
if perturbation is not None:
perturbation[key] = perturbation[key] - params[key]
params[key] = 0.0
if perturbation is None:
return params
else:
return params, perturbation
# load a big human dataset for use once below
hgambles, hproblems, htargets = load_choices13k(include_amb=False)
human_dataset = Dataset(dataset_id="choices13k_noAmb")
human_dataset.from_dict(hgambles, hproblems)
class Human():
"""Actual homo sapiens."""
def __init__(self, arg=None):
super(Human, self).__init__()
self.arg = arg
def fit(self, dataset=None, targets=None):
pass
def predict(self, Ds=None):
problems = []
for D in Ds:
d = list(D)[0].as_dict()
PA1 = d['A']['probs'][0]
A1 = d['A']['outcomes'][0]
A2 = d['A']['outcomes'][1]
PB1 = d['B']['probs'][0]
B1 = d['B']['outcomes'][0]
B2 = d['B']['outcomes'][1]
problems.append((PA1, A1, A2, PB1, B1, B2))
print(problems)
rs = judicious.map(judicious.risky_choice, problems)
ps = [1.0 * (r == 'A') for r in rs]
ps2 = [[[p, 1-p]] for p in ps]
return ps2
human_or_proxy = Human()
# # instantiate an EU model
# human_or_proxy = ExpectedUtilityModel(
# util_func="PowerLossAverseUtil", # use something else here?
# # optimizer=EvolutionaryGradientDescentOptimizer(), # better but slow
# optimizer=GridSearchOptimizer(grid_vals=[-0.3, 0.3]),
# )
# fit the EU model on the human dataset
# this helps make it a decent proxy for people
human_or_proxy.fit(dataset=human_dataset, targets=htargets)
# instantiate an EV model to break
model_to_break = ExpectedValueModel()
# params are probs and outcomes for each gamble (A and B)
params = {"pA1": 0.5, "A1": 1, "A2": 1, "pB1": 0.5, "B1": 1, "B2": 1}
n_gradient_updates = 20 # how many gradient updates
n_perturbations = 10 # how many perturbations to use to estimate gradient
# size of the param state perturbations, for each param type
noise_level_probs = 0.03
noise_level_outcomes = 0.4
lr = 1.0 # for gradient ascent; fixed for now
# number of subject ratings or predictions for each perturbation
# when a proxy model is used, this value has no effect since
# predictions are deterministic
n_choices_per_state = 15
for grad_i in range(n_gradient_updates):
# first get all perturbations and perturbed states for this iteration
perturbations = []
perturbed_states = []
for sample_i in range(n_perturbations):
# add gaussian noise to the params
perturbed_state = {} # saves perturbed state
perturbation = {} # saves the noise components alone
for k, v in params.items():
if 'p' in k:
noise = gauss(0, noise_level_probs)
else:
noise = gauss(0, noise_level_outcomes)
perturbed_state[k] = v + noise
perturbation[k] = noise
# clip the input space to avoid non-probabitilies
perturbed_state, perturbation = clip_params(
deepcopy(perturbed_state), perturbation=deepcopy(perturbation)
)
perturbations.append(deepcopy(perturbation))
perturbed_states.append(deepcopy(perturbed_state))
# this should only be true when using real people, but works either way
if n_choices_per_state > 1:
# duplicate entries, e.g., [s1, s2] --> [s1, s1, s2, s2] when n = 2
# this creates trials where multiple subjects provide judgments for each problem
perturbed_states = [[ps] * n_choices_per_state for ps in perturbed_states]
perturbed_states = np.array(perturbed_states).flatten()
# shuffle indices so we know how to unshuffle perturbed_states later
order = np.arange(len(perturbed_states))
np.random.shuffle(order)
perturbed_states = perturbed_states[order]
# then get all predictions for the model_to_break
model_preds = [
model_to_break.predict(params2dataset(ps))[0][0] for ps in perturbed_states
]
# get all predictions from the human / human proxy
proxy_preds = human_or_proxy.predict([params2dataset(ps) for ps in perturbed_states])
# if using real people, average per-subject results
# the same is done for the model for consistency in the script
if n_choices_per_state > 1:
# unshuffle both
model_preds = unshuffle(model_preds, order)
proxy_preds = unshuffle(proxy_preds, order)
# average results, e.g, [s1, s1, s2, s2] --> [s1, s2]
model_preds = sliding_avg(model_preds, n_choices_per_state)
proxy_preds = sliding_avg(proxy_preds, n_choices_per_state)
# rewards are proportional to the disagreement between the models
# here we use MSE but we could also use crossentropy
rewards = (np.array(model_preds) - np.array(proxy_preds)) ** 2
# the mean error here also approximates how well our last update was
# this actually prints at the end of the print(dict2str(params) line below
if grad_i:
print("LOSS:", np.mean(rewards))
# normalize rewards so they can be used to weight noise vectors
rewards = (rewards - np.mean(rewards)) / np.std(rewards)
# weight noise parts by rewards to get gradient
grad = {k: 0.0 for k, v in params.items()}
for i, perturbation in enumerate(perturbations):
for key in perturbation.keys():
grad[key] += perturbation[key] * rewards[i]
# use the current gradient to update the problem state
for key in params.keys():
params[key] += lr * grad[key]
# clip the new param state before the next gradient update round
params = clip_params(deepcopy(params))
print(
"A: {} p({}) / {} p({}) | B: {} p({}) / {} p({}) --- ".format(
*[
float2str(_)
for _ in [
params["A1"],
params["pA1"],
params["A2"],
1 - params["pA1"],
params["B1"],
params["pB1"],
params["B2"],
1 - params["pB1"],
]
]
),
end="",
)
| {
"content_hash": "f43f0cedef54356a671694012801b6a3",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 89,
"avg_line_length": 34.21212121212121,
"alnum_prop": 0.6083765658610654,
"repo_name": "suchow/judicious",
"id": "efa64dcb9e0c2dd64089de6f5deaab37b9b6c046",
"size": "7904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test59.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1457"
},
{
"name": "HTML",
"bytes": "103147"
},
{
"name": "JavaScript",
"bytes": "8050"
},
{
"name": "Makefile",
"bytes": "2042"
},
{
"name": "Python",
"bytes": "83207"
}
],
"symlink_target": ""
} |
import unittest
import os
import time
try:
from urllib import request # python 3
except ImportError:
import urllib2 as request # python 2
from tests import server
class BaseTest(unittest.TestCase):
def setUp(self):
self.headers = {}
self.opener = request.build_opener()
self.url = "http://localhost:8987"
full_path = os.path.realpath(__file__)
self.cwd = os.path.dirname(full_path)
def _request(self, path, data=None):
try:
if not path.startswith("/"):
path = "/%s" % path
url = self.url + path
#print("Call url: %s" % url)
req = request.Request(url,
data=data,
headers=self.headers)
response = self.opener.open(req)
except Exception as err:
return err
return response
def run(SETTINGS, test):
server.init_settings(SETTINGS)
srv = server.WSGIServerThread()
srv.start()
time.sleep(0.5)
suite = unittest.TestLoader().loadTestsFromTestCase(test)
res = unittest.TextTestRunner(verbosity=2).run(suite)
srv.stop()
return res
| {
"content_hash": "6223e63f688647f6ad6e7cecf5b9ec4a",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 61,
"avg_line_length": 26.085106382978722,
"alnum_prop": 0.5595432300163132,
"repo_name": "tobi-weber/levitas",
"id": "b6804f02f726eb078c9ab6f855587d35b416d2fe",
"size": "1854",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/tests/test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1090"
},
{
"name": "Python",
"bytes": "168238"
},
{
"name": "Shell",
"bytes": "188"
}
],
"symlink_target": ""
} |
"""
Tests for the `koordinates.catalog` module.
"""
import contextlib
import json
import os
import re
import logging
import pytest
import responses
from koordinates import Client, BadRequest
def _env_set(key, value):
if value is None:
os.environ.pop(key, None)
else:
os.environ[key] = value
@contextlib.contextmanager
def env_override(key, value):
"""
Contextmanager. Temporarily override an environment variable.
"""
orig = os.environ.get(key)
_env_set(key, value)
try:
yield
finally:
# Put it back the way it was
_env_set(key, orig)
@pytest.fixture
def client():
return Client(host="test.koordinates.com", token="12345abcde")
def test_token_from_env():
with env_override("KOORDINATES_TOKEN", None):
with pytest.raises(KeyError):
Client(host="test.koordinates.com")
with env_override("KOORDINATES_TOKEN", "abcde12345"):
client = Client(host="test.koordinates.com")
assert client.token == "abcde12345"
def test_token_from_param():
with env_override("KOORDINATES_TOKEN", None):
client = Client(host="test.koordinates.com", token="12345abcde")
assert client.token == "12345abcde"
with env_override("KOORDINATES_TOKEN", "don't use me"):
client = Client(host="test.koordinates.com", token="12345abcde")
assert client.token == "12345abcde"
def test_get_url_path(client):
assert "/layers/" == client.get_url_path("LAYER", "GET", "multi")
assert "/publish/12345/" == client.get_url_path(
"PUBLISH", "DELETE", "single", {"id": 12345}
)
def test_get_url(client):
url = client.get_url("LAYER", "GET", "multi")
assert url == "https://test.koordinates.com/services/api/v1/layers/"
url = client.get_url("PUBLISH", "DELETE", "single", {"id": 12345})
assert url == "https://test.koordinates.com/services/api/v1/publish/12345/"
def test_reverse_url(client):
params = client.reverse_url(
"LAYER", "https://test.koordinates.com/services/api/v1/layers/12345/"
)
assert params == {"id": "12345"}
params = client.reverse_url(
"LAYER_VERSION",
"https://test.koordinates.com/services/api/v1/layers/12345/versions/3456/",
)
assert params == {"layer_id": "12345", "version_id": "3456"}
params = client.reverse_url(
"LAYER_VERSION",
"https://test.koordinates.com/services/api/v1/layers/12345/versions/3456/publish/",
verb="POST",
urltype="publish",
)
assert params == {"layer_id": "12345", "version_id": "3456"}
params = client.reverse_url(
"LICENSE",
"https://test.koordinates.com/services/api/v1/licenses/cc-by/nz/",
verb="GET",
urltype="cc",
)
assert params == {"slug": "cc-by", "jurisdiction": "nz"}
with pytest.raises(KeyError):
client.reverse_url("LAYER_VERSION", "")
with pytest.raises(KeyError):
client.reverse_url(
"LAYER_VERSION",
"https://test.koordinates.com/services/api/v1/layers/12345/",
)
with pytest.raises(KeyError):
client.reverse_url("LAYER_VERSION", None)
with pytest.raises(KeyError):
client.reverse_url("LAYER_VERSION", "/layers/12345/versions/3456/")
@responses.activate
def test_user_agent(client):
responses.add(
responses.GET,
"https://test.koordinates.com/api/v1/test/",
body="[]",
status=200,
content_type="application/json",
)
r = client.request("GET", "https://test.koordinates.com/api/v1/test/")
r.raise_for_status()
req = responses.calls[0].request
ua = req.headers.get("User-Agent")
assert ua.startswith("KoordinatesPython/")
@responses.activate
def test_server_error(client):
# Form / field errors:
responses.add(
responses.POST,
"https://test.koordinates.com/api/v1/layers/123/versions/",
body='{"autoupdate_schedule":["This field is required when autoupdate is on."]}',
status=400,
content_type="application/json",
)
with pytest.raises(BadRequest) as cm:
client.request(
"POST", "https://test.koordinates.com/api/v1/layers/123/versions/", json={}
)
e = cm.value
assert (
str(e) == "autoupdate_schedule: This field is required when autoupdate is on."
)
assert (
repr(e)
== "BadRequest('autoupdate_schedule: This field is required when autoupdate is on.')"
)
responses.add(
responses.POST,
"https://test.koordinates.com/api/v1/layers/1234/versions/",
body='{"autoupdate_schedule":["This field is required when autoupdate is on."], "number":["Value must be >10", "Value must be <100"]}',
status=400,
content_type="application/json",
)
with pytest.raises(BadRequest) as cm:
client.request(
"POST", "https://test.koordinates.com/api/v1/layers/1234/versions/", json={}
)
e = cm.value
estr = set(str(e).split("\n"))
assert estr == {
"number: Value must be >10; Value must be <100",
"autoupdate_schedule: This field is required when autoupdate is on.",
}
assert repr(e) == "BadRequest('%s')" % str(e)
# Other detail errors:
responses.add(
responses.POST,
"https://test.koordinates.com/api/v1/layers/123/versions/123/import/",
body='{"detail": "No valid datasources to import"}',
status=400,
content_type="application/json",
)
with pytest.raises(BadRequest) as cm:
client.request(
"POST",
"https://test.koordinates.com/api/v1/layers/123/versions/123/import/",
)
e = cm.value
assert str(e) == "detail: No valid datasources to import"
assert repr(e) == "BadRequest('detail: No valid datasources to import')"
@responses.activate
def test_request_logging(caplog, client):
caplog.set_level(logging.DEBUG)
responses.add(
responses.GET,
"https://test.koordinates.com/api/v1/test/",
body="[]",
status=200,
content_type="application/json",
)
r = client.request(
"GET",
"https://test.koordinates.com/api/v1/test/",
json={"some": ["data", 1]},
headers={"FooHeader": "Bar"},
)
r.raise_for_status()
lreq = caplog.records[0]
lmsg = lreq.getMessage()
assert lmsg.startswith("Request: ")
lf = re.match(
"^Request: GET https://test.koordinates.com/api/v1/test/ headers=(?P<headers>.*) body=(?P<body>.*)$",
lmsg,
)
print(lf.group("headers"), lf.group("body"))
lbody = json.loads(lf.group("body"))
assert lbody == {"some": ["data", 1]}
lheaders = json.loads(lf.group("headers"))
assert "FooHeader" in lheaders
assert "Authorization" not in lheaders
| {
"content_hash": "d436ad9677ea4b75a6e81bf1e2682ca3",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 143,
"avg_line_length": 28.757322175732217,
"alnum_prop": 0.6113778553761094,
"repo_name": "koordinates/python-client",
"id": "0f965877188b3a9bf30a4297860ee73d4a3e876b",
"size": "6898",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_client.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1400"
},
{
"name": "Python",
"bytes": "332997"
},
{
"name": "Shell",
"bytes": "150"
}
],
"symlink_target": ""
} |
from __future__ import division, print_function
__all__ = ["esdb", "get_db", "add_abstracts", "get_start_date"]
from flask import _app_ctx_stack as stack
from elasticsearch import Elasticsearch
from elasticsearch.helpers import streaming_bulk
from elasticsearch.exceptions import NotFoundError, RequestError
from .mappings import arxiv_mappings
INDEX_NAME = "arxiv"
ABSTRACT_TYPE = "abstract"
def get_db(host="localhost:9200", **kwargs):
return Elasticsearch(hosts=[host], **kwargs)
def add_abstracts(entries):
# Build the action list generator.
actions = (dict(
_index=INDEX_NAME,
_type=ABSTRACT_TYPE,
_id=entry["id"],
_op_type="index",
**entry) for entry in entries)
# Connect to the database.
es = get_db()
# Make sure that the index exists with the correct mappings.
if not es.indices.exists(INDEX_NAME):
es.indices.create(index=INDEX_NAME, body=dict(
mappings=arxiv_mappings,
settings={"number_of_shards": 1},
))
# Don't forget to iterate to run the generator.
return streaming_bulk(es, actions)
def get_start_date(since="2000-01-01"):
es = get_db()
try:
# Try to get a document with a "fetched" entry.
r = es.search(index=INDEX_NAME, doc_type=ABSTRACT_TYPE, body=dict(
sort=[{"fetched": {"order": "desc"}}],
size=1,
query={
"match_all": {},
}
))
except (NotFoundError, RequestError):
# If none exist, accept the default.
pass
else:
if len(r["hits"]["hits"]):
v = r["hits"]["hits"][0]["_source"]["fetched"]
if v != "null":
since = v[:10]
return since
class FlaskES(object):
def __init__(self, app=None, **kwargs):
if app is not None:
self.init_app(app, **kwargs)
def init_app(self, app, **kwargs):
app.config.setdefault("ELASTICSEARCH_HOST", "localhost:9200")
self.elasticsearch_options = kwargs
app.teardown_appcontext(self.teardown)
def __getattr__(self, item):
es = self.get_es()
if es is None:
return None
return getattr(es, item)
def get_es(self):
ctx = stack.top
if ctx is None:
return None
if not hasattr(ctx, "elasticsearch"):
ctx.elasticsearch = get_db(
host=ctx.app.config.get('ELASTICSEARCH_HOST'),
**(self.elasticsearch_options)
)
return ctx.elasticsearch
def search(self, **kwargs):
es = self.get_es()
if es is None:
return None
kwargs["index"] = kwargs.get("index", INDEX_NAME)
kwargs["doc_type"] = kwargs.get("doc_type", ABSTRACT_TYPE)
return es.search(**kwargs)
def teardown(self, exception):
ctx = stack.top
if hasattr(ctx, "elasticsearch"):
ctx.elasticsearch = None
esdb = FlaskES()
| {
"content_hash": "f39a091b2c2eede64bf99a0a420f85ec",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 74,
"avg_line_length": 26.839285714285715,
"alnum_prop": 0.5768463073852296,
"repo_name": "dfm/arxiv-server",
"id": "d32798dc4a190403ea5867ac63b3b0b2d95d4eac",
"size": "3031",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "arxiv/database.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14185"
}
],
"symlink_target": ""
} |
"""Unit tests for the Pipeline class."""
from __future__ import absolute_import
import copy
import logging
import platform
import unittest
from builtins import object
from builtins import range
from collections import defaultdict
import mock
import apache_beam as beam
from apache_beam import typehints
from apache_beam.coders import BytesCoder
from apache_beam.io import Read
from apache_beam.metrics import Metrics
from apache_beam.pipeline import Pipeline
from apache_beam.pipeline import PipelineOptions
from apache_beam.pipeline import PipelineVisitor
from apache_beam.pipeline import PTransformOverride
from apache_beam.pvalue import AsSingleton
from apache_beam.runners.dataflow.native_io.iobase import NativeSource
from apache_beam.runners.direct.evaluation_context import _ExecutionContext
from apache_beam.runners.direct.transform_evaluator import _GroupByKeyOnlyEvaluator
from apache_beam.runners.direct.transform_evaluator import _TransformEvaluator
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.transforms import CombineGlobally
from apache_beam.transforms import Create
from apache_beam.transforms import DoFn
from apache_beam.transforms import FlatMap
from apache_beam.transforms import Map
from apache_beam.transforms import ParDo
from apache_beam.transforms import PTransform
from apache_beam.transforms import WindowInto
from apache_beam.transforms.userstate import BagStateSpec
from apache_beam.transforms.window import SlidingWindows
from apache_beam.transforms.window import TimestampedValue
from apache_beam.utils.timestamp import MIN_TIMESTAMP
# TODO(BEAM-1555): Test is failing on the service, with FakeSource.
# from nose.plugins.attrib import attr
class FakeSource(NativeSource):
"""Fake source returning a fixed list of values."""
class _Reader(object):
def __init__(self, vals):
self._vals = vals
self._output_counter = Metrics.counter('main', 'outputs')
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
pass
def __iter__(self):
for v in self._vals:
self._output_counter.inc()
yield v
def __init__(self, vals):
self._vals = vals
def reader(self):
return FakeSource._Reader(self._vals)
class FakeUnboundedSource(NativeSource):
"""Fake unbounded source. Does not work at runtime"""
def reader(self):
return None
def is_bounded(self):
return False
class DoubleParDo(beam.PTransform):
def expand(self, input):
return input | 'Inner' >> beam.Map(lambda a: a * 2)
def to_runner_api_parameter(self, context):
return self.to_runner_api_pickled(context)
class TripleParDo(beam.PTransform):
def expand(self, input):
# Keeping labels the same intentionally to make sure that there is no label
# conflict due to replacement.
return input | 'Inner' >> beam.Map(lambda a: a * 3)
class ToStringParDo(beam.PTransform):
def expand(self, input):
# We use copy.copy() here to make sure the typehint mechanism doesn't
# automatically infer that the output type is str.
return input | 'Inner' >> beam.Map(lambda a: copy.copy(str(a)))
class PipelineTest(unittest.TestCase):
@staticmethod
def custom_callable(pcoll):
return pcoll | '+1' >> FlatMap(lambda x: [x + 1])
# Some of these tests designate a runner by name, others supply a runner.
# This variation is just to verify that both means of runner specification
# work and is not related to other aspects of the tests.
class CustomTransform(PTransform):
def expand(self, pcoll):
return pcoll | '+1' >> FlatMap(lambda x: [x + 1])
class Visitor(PipelineVisitor):
def __init__(self, visited):
self.visited = visited
self.enter_composite = []
self.leave_composite = []
def visit_value(self, value, _):
self.visited.append(value)
def enter_composite_transform(self, transform_node):
self.enter_composite.append(transform_node)
def leave_composite_transform(self, transform_node):
self.leave_composite.append(transform_node)
def test_create(self):
pipeline = TestPipeline()
pcoll = pipeline | 'label1' >> Create([1, 2, 3])
assert_that(pcoll, equal_to([1, 2, 3]))
# Test if initial value is an iterator object.
pcoll2 = pipeline | 'label2' >> Create(iter((4, 5, 6)))
pcoll3 = pcoll2 | 'do' >> FlatMap(lambda x: [x + 10])
assert_that(pcoll3, equal_to([14, 15, 16]), label='pcoll3')
pipeline.run()
def test_flatmap_builtin(self):
pipeline = TestPipeline()
pcoll = pipeline | 'label1' >> Create([1, 2, 3])
assert_that(pcoll, equal_to([1, 2, 3]))
pcoll2 = pcoll | 'do' >> FlatMap(lambda x: [x + 10])
assert_that(pcoll2, equal_to([11, 12, 13]), label='pcoll2')
pcoll3 = pcoll2 | 'm1' >> Map(lambda x: [x, 12])
assert_that(pcoll3,
equal_to([[11, 12], [12, 12], [13, 12]]), label='pcoll3')
pcoll4 = pcoll3 | 'do2' >> FlatMap(set)
assert_that(pcoll4, equal_to([11, 12, 12, 12, 13]), label='pcoll4')
pipeline.run()
def test_maptuple_builtin(self):
pipeline = TestPipeline()
pcoll = pipeline | Create([('e1', 'e2')])
side1 = beam.pvalue.AsSingleton(pipeline | 'side1' >> Create(['s1']))
side2 = beam.pvalue.AsSingleton(pipeline | 'side2' >> Create(['s2']))
# A test function with a tuple input, an auxiliary parameter,
# and some side inputs.
fn = lambda e1, e2, t=DoFn.TimestampParam, s1=None, s2=None: (
e1, e2, t, s1, s2)
assert_that(pcoll | 'NoSides' >> beam.core.MapTuple(fn),
equal_to([('e1', 'e2', MIN_TIMESTAMP, None, None)]),
label='NoSidesCheck')
assert_that(pcoll | 'StaticSides' >> beam.core.MapTuple(fn, 's1', 's2'),
equal_to([('e1', 'e2', MIN_TIMESTAMP, 's1', 's2')]),
label='StaticSidesCheck')
assert_that(pcoll | 'DynamicSides' >> beam.core.MapTuple(fn, side1, side2),
equal_to([('e1', 'e2', MIN_TIMESTAMP, 's1', 's2')]),
label='DynamicSidesCheck')
assert_that(pcoll | 'MixedSides' >> beam.core.MapTuple(fn, s2=side2),
equal_to([('e1', 'e2', MIN_TIMESTAMP, None, 's2')]),
label='MixedSidesCheck')
pipeline.run()
def test_flatmaptuple_builtin(self):
pipeline = TestPipeline()
pcoll = pipeline | Create([('e1', 'e2')])
side1 = beam.pvalue.AsSingleton(pipeline | 'side1' >> Create(['s1']))
side2 = beam.pvalue.AsSingleton(pipeline | 'side2' >> Create(['s2']))
# A test function with a tuple input, an auxiliary parameter,
# and some side inputs.
fn = lambda e1, e2, t=DoFn.TimestampParam, s1=None, s2=None: (
e1, e2, t, s1, s2)
assert_that(pcoll | 'NoSides' >> beam.core.FlatMapTuple(fn),
equal_to(['e1', 'e2', MIN_TIMESTAMP, None, None]),
label='NoSidesCheck')
assert_that(pcoll | 'StaticSides' >> beam.core.FlatMapTuple(fn, 's1', 's2'),
equal_to(['e1', 'e2', MIN_TIMESTAMP, 's1', 's2']),
label='StaticSidesCheck')
assert_that(pcoll
| 'DynamicSides' >> beam.core.FlatMapTuple(fn, side1, side2),
equal_to(['e1', 'e2', MIN_TIMESTAMP, 's1', 's2']),
label='DynamicSidesCheck')
assert_that(pcoll | 'MixedSides' >> beam.core.FlatMapTuple(fn, s2=side2),
equal_to(['e1', 'e2', MIN_TIMESTAMP, None, 's2']),
label='MixedSidesCheck')
pipeline.run()
def test_create_singleton_pcollection(self):
pipeline = TestPipeline()
pcoll = pipeline | 'label' >> Create([[1, 2, 3]])
assert_that(pcoll, equal_to([[1, 2, 3]]))
pipeline.run()
# TODO(BEAM-1555): Test is failing on the service, with FakeSource.
# @attr('ValidatesRunner')
def test_metrics_in_fake_source(self):
pipeline = TestPipeline()
pcoll = pipeline | Read(FakeSource([1, 2, 3, 4, 5, 6]))
assert_that(pcoll, equal_to([1, 2, 3, 4, 5, 6]))
res = pipeline.run()
metric_results = res.metrics().query()
outputs_counter = metric_results['counters'][0]
self.assertEqual(outputs_counter.key.step, 'Read')
self.assertEqual(outputs_counter.key.metric.name, 'outputs')
self.assertEqual(outputs_counter.committed, 6)
def test_fake_read(self):
pipeline = TestPipeline()
pcoll = pipeline | 'read' >> Read(FakeSource([1, 2, 3]))
assert_that(pcoll, equal_to([1, 2, 3]))
pipeline.run()
def test_visit_entire_graph(self):
pipeline = Pipeline()
pcoll1 = pipeline | 'pcoll' >> Create([1, 2, 3])
pcoll2 = pcoll1 | 'do1' >> FlatMap(lambda x: [x + 1])
pcoll3 = pcoll2 | 'do2' >> FlatMap(lambda x: [x + 1])
pcoll4 = pcoll2 | 'do3' >> FlatMap(lambda x: [x + 1])
transform = PipelineTest.CustomTransform()
pcoll5 = pcoll4 | transform
visitor = PipelineTest.Visitor(visited=[])
pipeline.visit(visitor)
self.assertEqual(set([pcoll1, pcoll2, pcoll3, pcoll4, pcoll5]),
set(visitor.visited))
self.assertEqual(set(visitor.enter_composite),
set(visitor.leave_composite))
self.assertEqual(3, len(visitor.enter_composite))
self.assertEqual(visitor.enter_composite[2].transform, transform)
self.assertEqual(visitor.leave_composite[1].transform, transform)
def test_apply_custom_transform(self):
pipeline = TestPipeline()
pcoll = pipeline | 'pcoll' >> Create([1, 2, 3])
result = pcoll | PipelineTest.CustomTransform()
assert_that(result, equal_to([2, 3, 4]))
pipeline.run()
def test_reuse_custom_transform_instance(self):
pipeline = Pipeline()
pcoll1 = pipeline | 'pcoll1' >> Create([1, 2, 3])
pcoll2 = pipeline | 'pcoll2' >> Create([4, 5, 6])
transform = PipelineTest.CustomTransform()
pcoll1 | transform
with self.assertRaises(RuntimeError) as cm:
pipeline.apply(transform, pcoll2)
self.assertEqual(
cm.exception.args[0],
'Transform "CustomTransform" does not have a stable unique label. '
'This will prevent updating of pipelines. '
'To apply a transform with a specified label write '
'pvalue | "label" >> transform')
def test_reuse_cloned_custom_transform_instance(self):
pipeline = TestPipeline()
pcoll1 = pipeline | 'pc1' >> Create([1, 2, 3])
pcoll2 = pipeline | 'pc2' >> Create([4, 5, 6])
transform = PipelineTest.CustomTransform()
result1 = pcoll1 | transform
result2 = pcoll2 | 'new_label' >> transform
assert_that(result1, equal_to([2, 3, 4]), label='r1')
assert_that(result2, equal_to([5, 6, 7]), label='r2')
pipeline.run()
def test_transform_no_super_init(self):
class AddSuffix(PTransform):
def __init__(self, suffix):
# No call to super(...).__init__
self.suffix = suffix
def expand(self, pcoll):
return pcoll | Map(lambda x: x + self.suffix)
self.assertEqual(
['a-x', 'b-x', 'c-x'],
sorted(['a', 'b', 'c'] | 'AddSuffix' >> AddSuffix('-x')))
@unittest.skip("Fails on some platforms with new urllib3.")
def test_memory_usage(self):
try:
import resource
except ImportError:
# Skip the test if resource module is not available (e.g. non-Unix os).
self.skipTest('resource module not available.')
if platform.mac_ver()[0]:
# Skip the test on macos, depending on version it returns ru_maxrss in
# different units.
self.skipTest('ru_maxrss is not in standard units.')
def get_memory_usage_in_bytes():
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * (2 ** 10)
def check_memory(value, memory_threshold):
memory_usage = get_memory_usage_in_bytes()
if memory_usage > memory_threshold:
raise RuntimeError(
'High memory usage: %d > %d' % (memory_usage, memory_threshold))
return value
len_elements = 1000000
num_elements = 10
num_maps = 100
# TODO(robertwb): reduce memory usage of FnApiRunner so that this test
# passes.
pipeline = TestPipeline(runner='BundleBasedDirectRunner')
# Consumed memory should not be proportional to the number of maps.
memory_threshold = (
get_memory_usage_in_bytes() + (5 * len_elements * num_elements))
# Plus small additional slack for memory fluctuations during the test.
memory_threshold += 10 * (2 ** 20)
biglist = pipeline | 'oom:create' >> Create(
['x' * len_elements] * num_elements)
for i in range(num_maps):
biglist = biglist | ('oom:addone-%d' % i) >> Map(lambda x: x + 'y')
result = biglist | 'oom:check' >> Map(check_memory, memory_threshold)
assert_that(result, equal_to(
['x' * len_elements + 'y' * num_maps] * num_elements))
pipeline.run()
def test_aggregator_empty_input(self):
actual = [] | CombineGlobally(max).without_defaults()
self.assertEqual(actual, [])
def test_pipeline_as_context(self):
def raise_exception(exn):
raise exn
with self.assertRaises(ValueError):
with Pipeline() as p:
# pylint: disable=expression-not-assigned
p | Create([ValueError('msg')]) | Map(raise_exception)
# TODO(BEAM-1894).
# def test_eager_pipeline(self):
# p = Pipeline('EagerRunner')
# self.assertEqual([1, 4, 9], p | Create([1, 2, 3]) | Map(lambda x: x*x))
@mock.patch(
'apache_beam.runners.direct.direct_runner._get_transform_overrides')
def test_ptransform_overrides(self, file_system_override_mock):
class MyParDoOverride(PTransformOverride):
def matches(self, applied_ptransform):
return isinstance(applied_ptransform.transform, DoubleParDo)
def get_replacement_transform(self, ptransform):
if isinstance(ptransform, DoubleParDo):
return TripleParDo()
raise ValueError('Unsupported type of transform: %r' % ptransform)
def get_overrides(unused_pipeline_options):
return [MyParDoOverride()]
file_system_override_mock.side_effect = get_overrides
# Specify DirectRunner as it's the one patched above.
with Pipeline(runner='BundleBasedDirectRunner') as p:
pcoll = p | beam.Create([1, 2, 3]) | 'Multiply' >> DoubleParDo()
assert_that(pcoll, equal_to([3, 6, 9]))
def test_ptransform_override_type_hints(self):
class NoTypeHintOverride(PTransformOverride):
def matches(self, applied_ptransform):
return isinstance(applied_ptransform.transform, DoubleParDo)
def get_replacement_transform(self, ptransform):
return ToStringParDo()
class WithTypeHintOverride(PTransformOverride):
def matches(self, applied_ptransform):
return isinstance(applied_ptransform.transform, DoubleParDo)
def get_replacement_transform(self, ptransform):
return (ToStringParDo()
.with_input_types(int)
.with_output_types(str))
for override, expected_type in [(NoTypeHintOverride(), typehints.Any),
(WithTypeHintOverride(), str)]:
p = TestPipeline()
pcoll = (p
| beam.Create([1, 2, 3])
| 'Operate' >> DoubleParDo()
| 'NoOp' >> beam.Map(lambda x: x))
p.replace_all([override])
self.assertEqual(pcoll.producer.inputs[0].element_type, expected_type)
def test_kv_ptransform_honor_type_hints(self):
# The return type of this DoFn cannot be inferred by the default
# Beam type inference
class StatefulDoFn(DoFn):
BYTES_STATE = BagStateSpec('bytes', BytesCoder())
def return_recursive(self, count):
if count == 0:
return ["some string"]
else:
self.return_recursive(count-1)
def process(self, element, counter=DoFn.StateParam(BYTES_STATE)):
return self.return_recursive(1)
p = TestPipeline()
pcoll = (p
| beam.Create([(1, 1), (2, 2), (3, 3)])
| beam.GroupByKey()
| beam.ParDo(StatefulDoFn()))
p.run()
self.assertEqual(pcoll.element_type, typehints.Any)
p = TestPipeline()
pcoll = (p
| beam.Create([(1, 1), (2, 2), (3, 3)])
| beam.GroupByKey()
| beam.ParDo(StatefulDoFn()).with_output_types(str))
p.run()
self.assertEqual(pcoll.element_type, str)
def test_track_pcoll_unbounded(self):
pipeline = TestPipeline()
pcoll1 = pipeline | 'read' >> Read(FakeUnboundedSource())
pcoll2 = pcoll1 | 'do1' >> FlatMap(lambda x: [x + 1])
pcoll3 = pcoll2 | 'do2' >> FlatMap(lambda x: [x + 1])
self.assertIs(pcoll1.is_bounded, False)
self.assertIs(pcoll1.is_bounded, False)
self.assertIs(pcoll3.is_bounded, False)
def test_track_pcoll_bounded(self):
pipeline = TestPipeline()
pcoll1 = pipeline | 'label1' >> Create([1, 2, 3])
pcoll2 = pcoll1 | 'do1' >> FlatMap(lambda x: [x + 1])
pcoll3 = pcoll2 | 'do2' >> FlatMap(lambda x: [x + 1])
self.assertIs(pcoll1.is_bounded, True)
self.assertIs(pcoll2.is_bounded, True)
self.assertIs(pcoll3.is_bounded, True)
def test_track_pcoll_bounded_flatten(self):
pipeline = TestPipeline()
pcoll1_a = pipeline | 'label_a' >> Create([1, 2, 3])
pcoll2_a = pcoll1_a | 'do_a' >> FlatMap(lambda x: [x + 1])
pcoll1_b = pipeline | 'label_b' >> Create([1, 2, 3])
pcoll2_b = pcoll1_b | 'do_b' >> FlatMap(lambda x: [x + 1])
merged = (pcoll2_a, pcoll2_b) | beam.Flatten()
self.assertIs(pcoll1_a.is_bounded, True)
self.assertIs(pcoll2_a.is_bounded, True)
self.assertIs(pcoll1_b.is_bounded, True)
self.assertIs(pcoll2_b.is_bounded, True)
self.assertIs(merged.is_bounded, True)
def test_track_pcoll_unbounded_flatten(self):
pipeline = TestPipeline()
pcoll1_bounded = pipeline | 'label1' >> Create([1, 2, 3])
pcoll2_bounded = pcoll1_bounded | 'do1' >> FlatMap(lambda x: [x + 1])
pcoll1_unbounded = pipeline | 'read' >> Read(FakeUnboundedSource())
pcoll2_unbounded = pcoll1_unbounded | 'do2' >> FlatMap(lambda x: [x + 1])
merged = (pcoll2_bounded, pcoll2_unbounded) | beam.Flatten()
self.assertIs(pcoll1_bounded.is_bounded, True)
self.assertIs(pcoll2_bounded.is_bounded, True)
self.assertIs(pcoll1_unbounded.is_bounded, False)
self.assertIs(pcoll2_unbounded.is_bounded, False)
self.assertIs(merged.is_bounded, False)
class DoFnTest(unittest.TestCase):
def test_element(self):
class TestDoFn(DoFn):
def process(self, element):
yield element + 10
pipeline = TestPipeline()
pcoll = pipeline | 'Create' >> Create([1, 2]) | 'Do' >> ParDo(TestDoFn())
assert_that(pcoll, equal_to([11, 12]))
pipeline.run()
def test_side_input_no_tag(self):
class TestDoFn(DoFn):
def process(self, element, prefix, suffix):
return ['%s-%s-%s' % (prefix, element, suffix)]
pipeline = TestPipeline()
words_list = ['aa', 'bb', 'cc']
words = pipeline | 'SomeWords' >> Create(words_list)
prefix = 'zyx'
suffix = pipeline | 'SomeString' >> Create(['xyz']) # side in
result = words | 'DecorateWordsDoFnNoTag' >> ParDo(
TestDoFn(), prefix, suffix=AsSingleton(suffix))
assert_that(result, equal_to(['zyx-%s-xyz' % x for x in words_list]))
pipeline.run()
def test_side_input_tagged(self):
class TestDoFn(DoFn):
def process(self, element, prefix, suffix=DoFn.SideInputParam):
return ['%s-%s-%s' % (prefix, element, suffix)]
pipeline = TestPipeline()
words_list = ['aa', 'bb', 'cc']
words = pipeline | 'SomeWords' >> Create(words_list)
prefix = 'zyx'
suffix = pipeline | 'SomeString' >> Create(['xyz']) # side in
result = words | 'DecorateWordsDoFnNoTag' >> ParDo(
TestDoFn(), prefix, suffix=AsSingleton(suffix))
assert_that(result, equal_to(['zyx-%s-xyz' % x for x in words_list]))
pipeline.run()
def test_window_param(self):
class TestDoFn(DoFn):
def process(self, element, window=DoFn.WindowParam):
yield (element, (float(window.start), float(window.end)))
pipeline = TestPipeline()
pcoll = (pipeline
| Create([1, 7])
| Map(lambda x: TimestampedValue(x, x))
| WindowInto(windowfn=SlidingWindows(10, 5))
| ParDo(TestDoFn()))
assert_that(pcoll, equal_to([(1, (-5, 5)), (1, (0, 10)),
(7, (0, 10)), (7, (5, 15))]))
pcoll2 = pcoll | 'Again' >> ParDo(TestDoFn())
assert_that(
pcoll2,
equal_to([
((1, (-5, 5)), (-5, 5)), ((1, (0, 10)), (0, 10)),
((7, (0, 10)), (0, 10)), ((7, (5, 15)), (5, 15))]),
label='doubled windows')
pipeline.run()
def test_timestamp_param(self):
class TestDoFn(DoFn):
def process(self, element, timestamp=DoFn.TimestampParam):
yield timestamp
pipeline = TestPipeline()
pcoll = pipeline | 'Create' >> Create([1, 2]) | 'Do' >> ParDo(TestDoFn())
assert_that(pcoll, equal_to([MIN_TIMESTAMP, MIN_TIMESTAMP]))
pipeline.run()
def test_timestamp_param_map(self):
with TestPipeline() as p:
assert_that(
p | Create([1, 2]) | beam.Map(lambda _, t=DoFn.TimestampParam: t),
equal_to([MIN_TIMESTAMP, MIN_TIMESTAMP]))
class Bacon(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--slices', type=int)
class Eggs(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--style', default='scrambled')
class Breakfast(Bacon, Eggs):
pass
class PipelineOptionsTest(unittest.TestCase):
def test_flag_parsing(self):
options = Breakfast(['--slices=3', '--style=sunny side up', '--ignored'])
self.assertEqual(3, options.slices)
self.assertEqual('sunny side up', options.style)
def test_keyword_parsing(self):
options = Breakfast(
['--slices=3', '--style=sunny side up', '--ignored'],
slices=10)
self.assertEqual(10, options.slices)
self.assertEqual('sunny side up', options.style)
def test_attribute_setting(self):
options = Breakfast(slices=10)
self.assertEqual(10, options.slices)
options.slices = 20
self.assertEqual(20, options.slices)
def test_view_as(self):
generic_options = PipelineOptions(['--slices=3'])
self.assertEqual(3, generic_options.view_as(Bacon).slices)
self.assertEqual(3, generic_options.view_as(Breakfast).slices)
generic_options.view_as(Breakfast).slices = 10
self.assertEqual(10, generic_options.view_as(Bacon).slices)
with self.assertRaises(AttributeError):
generic_options.slices # pylint: disable=pointless-statement
with self.assertRaises(AttributeError):
generic_options.view_as(Eggs).slices # pylint: disable=expression-not-assigned
def test_defaults(self):
options = Breakfast(['--slices=3'])
self.assertEqual(3, options.slices)
self.assertEqual('scrambled', options.style)
def test_dir(self):
options = Breakfast()
self.assertEqual(
set(['from_dictionary', 'get_all_options', 'slices', 'style',
'view_as', 'display_data']),
set([attr for attr in dir(options) if not attr.startswith('_') and
attr != 'next']))
self.assertEqual(
set(['from_dictionary', 'get_all_options', 'style', 'view_as',
'display_data']),
set([attr for attr in dir(options.view_as(Eggs))
if not attr.startswith('_') and attr != 'next']))
class RunnerApiTest(unittest.TestCase):
def test_parent_pointer(self):
class MyPTransform(beam.PTransform):
def expand(self, p):
self.p = p
return p | beam.Create([None])
p = beam.Pipeline()
p | MyPTransform() # pylint: disable=expression-not-assigned
p = Pipeline.from_runner_api(
Pipeline.to_runner_api(p, use_fake_coders=True), None, None)
self.assertIsNotNone(p.transforms_stack[0].parts[0].parent)
self.assertEqual(p.transforms_stack[0].parts[0].parent,
p.transforms_stack[0])
class DirectRunnerRetryTests(unittest.TestCase):
def test_retry_fork_graph(self):
# TODO(BEAM-3642): The FnApiRunner currently does not currently support
# retries.
p = beam.Pipeline(runner='BundleBasedDirectRunner')
# TODO(mariagh): Remove the use of globals from the test.
global count_b, count_c # pylint: disable=global-variable-undefined
count_b, count_c = 0, 0
def f_b(x):
global count_b # pylint: disable=global-variable-undefined
count_b += 1
raise Exception('exception in f_b')
def f_c(x):
global count_c # pylint: disable=global-variable-undefined
count_c += 1
raise Exception('exception in f_c')
names = p | 'CreateNodeA' >> beam.Create(['Ann', 'Joe'])
fork_b = names | 'SendToB' >> beam.Map(f_b) # pylint: disable=unused-variable
fork_c = names | 'SendToC' >> beam.Map(f_c) # pylint: disable=unused-variable
with self.assertRaises(Exception):
p.run().wait_until_finish()
assert count_b == count_c == 4
def test_no_partial_writeouts(self):
class TestTransformEvaluator(_TransformEvaluator):
def __init__(self):
self._execution_context = _ExecutionContext(None, {})
def start_bundle(self):
self.step_context = self._execution_context.get_step_context()
def process_element(self, element):
k, v = element
state = self.step_context.get_keyed_state(k)
state.add_state(None, _GroupByKeyOnlyEvaluator.ELEMENTS_TAG, v)
# Create instance and add key/value, key/value2
evaluator = TestTransformEvaluator()
evaluator.start_bundle()
self.assertIsNone(evaluator.step_context.existing_keyed_state.get('key'))
self.assertIsNone(evaluator.step_context.partial_keyed_state.get('key'))
evaluator.process_element(['key', 'value'])
self.assertEqual(
evaluator.step_context.existing_keyed_state['key'].state,
defaultdict(lambda: defaultdict(list)))
self.assertEqual(
evaluator.step_context.partial_keyed_state['key'].state,
{None: {'elements':['value']}})
evaluator.process_element(['key', 'value2'])
self.assertEqual(
evaluator.step_context.existing_keyed_state['key'].state,
defaultdict(lambda: defaultdict(list)))
self.assertEqual(
evaluator.step_context.partial_keyed_state['key'].state,
{None: {'elements':['value', 'value2']}})
# Simulate an exception (redo key/value)
evaluator._execution_context.reset()
evaluator.start_bundle()
evaluator.process_element(['key', 'value'])
self.assertEqual(
evaluator.step_context.existing_keyed_state['key'].state,
defaultdict(lambda: defaultdict(list)))
self.assertEqual(
evaluator.step_context.partial_keyed_state['key'].state,
{None: {'elements':['value']}})
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main()
| {
"content_hash": "576e726909224f33499fe90dc8456548",
"timestamp": "",
"source": "github",
"line_count": 759,
"max_line_length": 85,
"avg_line_length": 35.62582345191041,
"alnum_prop": 0.6440088757396449,
"repo_name": "markflyhigh/incubator-beam",
"id": "d1d9d0dfd791545fde48d825d082c1037fe88d26",
"size": "27825",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/pipeline_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1596"
},
{
"name": "CSS",
"bytes": "40964"
},
{
"name": "Dockerfile",
"bytes": "22983"
},
{
"name": "FreeMarker",
"bytes": "7428"
},
{
"name": "Go",
"bytes": "2508482"
},
{
"name": "Groovy",
"bytes": "300669"
},
{
"name": "HTML",
"bytes": "54277"
},
{
"name": "Java",
"bytes": "24796055"
},
{
"name": "JavaScript",
"bytes": "16472"
},
{
"name": "Jupyter Notebook",
"bytes": "54182"
},
{
"name": "Python",
"bytes": "4544133"
},
{
"name": "Ruby",
"bytes": "4099"
},
{
"name": "Shell",
"bytes": "180209"
}
],
"symlink_target": ""
} |
from os.path import join as pathjoin
from components.UI.textMenuItem import textMenuItem
from game import Game
from components.UI.menu import menu
class CFMenu(menu):
""" Represents the critical failure menu"""
def __init__(self, screen, keys, config, sounds, modifiers, log):
self.logSectionName = "CFMenu"
self.modifiers = modifiers
super().__init__(screen, keys, config, sounds, log)
def newCFGame(self):
self.running = False
self.modlogger.info("Starting" + pathjoin("data",
"campaigns",
"main.cmp"))
Game().main(self.screen, self.keys,
"criticalfailure",
pathjoin("data",
"campaigns",
"main.cmp"),
self.config,
self.sounds,
self.modifiers,
self.mainLogger)
def newCFSGame(self):
self.running = False
Game().main(self.screen, self.keys,
"cfsingle",
pathjoin("data",
"campaigns",
"main.cmp"),
self.config,
self.sounds,
self.modifiers,
self.mainLogger)
def makeCFMenu(self):
self.sd = textMenuItem("Start Shared Time Mode", (50, 180),
lambda: self.editDesc(
"All Rooms share the same timer."),
lambda: self.newCFGame(),
self.config, self.sounds, self.font)
self.items.append(self.sd)
self.activeItems.append(self.sd)
def makeCFSMenu(self):
self.sds = textMenuItem("Start Separated Times Mode", (50, 240),
lambda: self.editDesc(
"Each room has its timer."),
lambda: self.newCFSGame(),
self.config, self.sounds, self.font)
self.items.append(self.sds)
self.activeItems.append(self.sds)
def makeMainMenuItem(self):
self.mainmenu = textMenuItem("Previous Menu", (50, 560),
lambda: self.editDesc(
"Go to the previous menu"),
lambda: self.goToMenu(),
self.config, self.sounds, self.font)
self.items.append(self.mainmenu)
self.activeItems.append(self.mainmenu)
def makeMenuItems(self):
self.makeCFMenu()
self.makeCFSMenu()
self.makeMainMenuItem()
| {
"content_hash": "f25b5e6bb55e2189c6a2cc889b4febce",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 73,
"avg_line_length": 38.64383561643836,
"alnum_prop": 0.46508330379298124,
"repo_name": "Penaz91/Glitch_Heaven",
"id": "3e2a3831252b02f7de3110e634293722cdad20cb",
"size": "2938",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Game/cfmenu.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "253446"
}
],
"symlink_target": ""
} |
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.EGL import _types as _cs
# End users want this...
from OpenGL.raw.EGL._types import *
from OpenGL.raw.EGL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'EGL_KHR_get_all_proc_addresses'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.EGL,'EGL_KHR_get_all_proc_addresses',error_checker=_errors._error_checker)
| {
"content_hash": "129f0949d40063e37daa0b42917edbf5",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 125,
"avg_line_length": 34.733333333333334,
"alnum_prop": 0.7504798464491362,
"repo_name": "alexus37/AugmentedRealityChess",
"id": "feb261b1cefbd462212236a6b6884e9efdc83a09",
"size": "521",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGL/raw/EGL/KHR/get_all_proc_addresses.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "158062"
},
{
"name": "C++",
"bytes": "267993"
},
{
"name": "CMake",
"bytes": "11319"
},
{
"name": "Fortran",
"bytes": "3707"
},
{
"name": "Makefile",
"bytes": "14618"
},
{
"name": "Python",
"bytes": "12813086"
},
{
"name": "Roff",
"bytes": "3310"
},
{
"name": "Shell",
"bytes": "3855"
}
],
"symlink_target": ""
} |
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
# %%
# Generate data
# -------------
import numpy as np
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1.0 + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
# %%
# Fit models
# ----------
from sklearn.linear_model import MultiTaskLasso, Lasso
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.0).fit(X, Y).coef_
# %%
# Plot support and time series
# ----------------------------
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel("Feature")
plt.ylabel("Time (or Task)")
plt.text(10, 5, "Lasso")
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel("Feature")
plt.ylabel("Time (or Task)")
plt.text(10, 5, "MultiTaskLasso")
fig.suptitle("Coefficient non-zero location")
feature_to_plot = 0
plt.figure()
lw = 2
plt.plot(coef[:, feature_to_plot], color="seagreen", linewidth=lw, label="Ground truth")
plt.plot(
coef_lasso_[:, feature_to_plot], color="cornflowerblue", linewidth=lw, label="Lasso"
)
plt.plot(
coef_multi_task_lasso_[:, feature_to_plot],
color="gold",
linewidth=lw,
label="MultiTaskLasso",
)
plt.legend(loc="upper center")
plt.axis("tight")
plt.ylim([-1.1, 1.1])
plt.show()
| {
"content_hash": "044a2341c5223c656f89a7f6ec29ff3f",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 88,
"avg_line_length": 28.170731707317074,
"alnum_prop": 0.6653679653679654,
"repo_name": "anntzer/scikit-learn",
"id": "a30b51ed7a7fe3395da4fc6ceba4d5720e90c33a",
"size": "2310",
"binary": false,
"copies": "12",
"ref": "refs/heads/main",
"path": "examples/linear_model/plot_multi_task_lasso_support.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "42335"
},
{
"name": "C++",
"bytes": "147316"
},
{
"name": "Cython",
"bytes": "667491"
},
{
"name": "Makefile",
"bytes": "1644"
},
{
"name": "Python",
"bytes": "10429261"
},
{
"name": "Shell",
"bytes": "43325"
}
],
"symlink_target": ""
} |
from django.utils.module_loading import import_string
from drfpasswordless.settings import api_settings
from drfpasswordless.utils import (
create_callback_token_for_user,
)
class TokenService(object):
@staticmethod
def send_token(user, alias_type, token_type, **message_payload):
token = create_callback_token_for_user(user, alias_type, token_type)
send_action = None
if user.pk in api_settings.PASSWORDLESS_DEMO_USERS.keys():
return True
if alias_type == 'email':
send_action = import_string(api_settings.PASSWORDLESS_EMAIL_CALLBACK)
elif alias_type == 'mobile':
send_action = import_string(api_settings.PASSWORDLESS_SMS_CALLBACK)
# Send to alias
success = send_action(user, token, **message_payload)
return success
| {
"content_hash": "7c6a7803d201c3e3b0909c6a3c3c2a70",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 81,
"avg_line_length": 37.86363636363637,
"alnum_prop": 0.6782713085234093,
"repo_name": "aaronn/django-rest-framework-passwordless",
"id": "efa161c83587e028555f43afa4b426d5ee6e471b",
"size": "833",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "drfpasswordless/services.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "394"
},
{
"name": "Python",
"bytes": "89165"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/clothing/shared_clothing_armor_tantel_helmet.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "90f81983fd8be89692bba115248f9548",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 92,
"avg_line_length": 24.692307692307693,
"alnum_prop": 0.7040498442367601,
"repo_name": "anhstudios/swganh",
"id": "c78889e285d88b094f663dec90a2b8ea53cc9730",
"size": "466",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/draft_schematic/clothing/shared_clothing_armor_tantel_helmet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
from what_transcode.utils import get_mp3_ids
class UtilsTests(TestCase):
def test_get_mp3_ids(self):
what_group = {
'torrents': [
{
'id': 0,
'format': 'FLAC',
'encoding': 'Lossless',
'media': 'CD',
'remastered': False,
'remasterCatalogueNumber': None,
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
{
'id': 1,
'format': 'MP3',
'encoding': '320',
'media': 'CD',
'remastered': False,
'remasterCatalogueNumber': None,
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
{
'id': 2,
'format': 'FLAC',
'encoding': 'Lossless',
'media': 'CD',
'remastered': True,
'remasterCatalogueNumber': 'catno',
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
{
'id': 3,
'format': 'FLAC',
'encoding': 'Lossless',
'media': 'WEB',
'remastered': False,
'remasterCatalogueNumber': None,
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
{
'id': 4,
'format': 'MP3',
'encoding': 'V0 (VBR)',
'media': 'WEB',
'remastered': False,
'remasterCatalogueNumber': None,
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
{
'id': 5,
'format': 'MP3',
'encoding': 'V2 (VBR)',
'media': 'WEB',
'remastered': False,
'remasterCatalogueNumber': None,
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
]
}
self.assertEqual(get_mp3_ids(what_group, {
'torrent': what_group['torrents'][0]
}), {'320': 1})
self.assertEqual(get_mp3_ids(what_group, {
'torrent': what_group['torrents'][2]
}), {})
self.assertEqual(get_mp3_ids(what_group, {
'torrent': what_group['torrents'][3]
}), {'V0': 4, 'V2': 5})
| {
"content_hash": "bcd7bc72dbb2020ca5e105015e6c2689",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 79,
"avg_line_length": 34.763440860215056,
"alnum_prop": 0.38478193628209095,
"repo_name": "grandmasterchef/WhatManager2",
"id": "4d20568f0a8594030cebb90be8a1a7446bb56676",
"size": "3233",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "what_transcode/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "202604"
},
{
"name": "HTML",
"bytes": "101582"
},
{
"name": "JavaScript",
"bytes": "718795"
},
{
"name": "Python",
"bytes": "333372"
},
{
"name": "Shell",
"bytes": "2220"
}
],
"symlink_target": ""
} |
from defines import *
import mobula
import mobula.layers as L
import mobula.solvers as S
import os
from LeNet5 import *
INPUT_FILE = "./train.csv"
RESULT_PATH = "./mnist_kaggle"
# Create the directory if it doesn't exists
if not os.path.exists(RESULT_PATH):
os.mkdir(RESULT_PATH)
# Load Data
try:
fin = open(INPUT_FILE)
except:
print ("%s doesn't exist. Please download the dataset on Kaggle: https://www.kaggle.com/c/digit-recognizer" % INPUT_FILE)
import sys
sys.exit()
data = np.loadtxt(fin, delimiter = ",", skiprows = 1)
fin.close()
n = len(data)
X = data[:, 1:]
labels = data[:, 0].astype(np.int)
# one-hot
#Y = np.eye(10)[labels.ravel()]
print ("Read OK", n)
Xmean = np.mean(X, 0)
np.save("xmean.npy", Xmean)
# Subtract mean and normalize
X = (X - Xmean) / 255.0
# transfer the shape of X to NCHW
# N, C, H, W = n, 1, 28, 28
X.resize((n, 1, 28, 28))
# Get LeNet5
nn = LeNet5(X, labels)
net = nn.net
# Set Solver
net.set_solver(S.Momentum())
# Learning Rate
net.lr = 0.005
'''
If start_iter > 0, load the existed model and continue to train.
Otherwise, initialize weights and start to train.
'''
start_iter = 0
max_iter = 100000
filename = RESULT_PATH + "/kaggle%d.net"
if start_iter > 0:
# Load the weights from the existed model
net.load(filename % start_iter)
for i in range(start_iter, max_iter):
net.forward()
net.backward()
if i % 100 == 0:
print ("Iter: %d, Cost: %f" % (i, nn.loss))
net.time()
# test 30 iteration
vs = []
for u in range(30):
net.forward()
pre = np.argmax(nn.Y,1).ravel()
ra = nn.label.ravel()
if u % 10 == 0:
print ((pre, ra))
bs = (pre == ra)
b = np.mean(bs)
vs.append(b)
acc = np.mean(vs)
print ("Accuracy: %f" % (acc))
if i % 200 == 0 and acc > 0.95:
net.save(filename % i)
print ("Over :-)")
| {
"content_hash": "a5d55e7ad6b86bb9f3697acc115aa0ee",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 125,
"avg_line_length": 22.078651685393258,
"alnum_prop": 0.5816793893129771,
"repo_name": "wkcn/mobula",
"id": "aaeb9d27094b15864e3166e7191c55a48ff0728c",
"size": "1965",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/mnist_train.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "131507"
}
],
"symlink_target": ""
} |
"""
sentry.services.smtp
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import asyncore
import email
import logging
from smtpd import SMTPServer, SMTPChannel
from email_reply_parser import EmailReplyParser
#
# from sentry.services.base import Service
# from sentry.tasks.email import process_inbound_email
# from sentry.utils.email import email_to_group_id
#
# logger = logging.getLogger(__name__)
#
#
# # HACK(mattrobenolt): literally no idea what I'm doing. Mostly made this up.
# # SMTPChannel doesn't support EHLO response, but nginx requires an EHLO.
# # EHLO is available in python 3, so this is backported somewhat
# def smtp_EHLO(self, arg):
# if not arg:
# self.push('501 Syntax: EHLO hostname')
# return
# if self._SMTPChannel__greeting:
# self.push('503 Duplicate HELO/EHLO')
# else:
# self._SMTPChannel__greeting = arg
# self.push('250 %s' % self._SMTPChannel__fqdn)
#
# SMTPChannel.smtp_EHLO = smtp_EHLO
#
#
# STATUS = {
# 200: '200 Ok',
# 550: '550 Not found',
# 552: '552 Message too long',
# }
#
#
# class SentrySMTPServer(Service, SMTPServer):
# name = 'smtp'
# max_message_length = 20000 # This might be too conservative
#
# def __init__(self, host=None, port=None, debug=False, workers=None):
# from django.conf import settings
#
# self.host = host or getattr(settings, 'SENTRY_SMTP_HOST', '0.0.0.0')
# self.port = port or getattr(settings, 'SENTRY_SMTP_PORT', 1025)
#
# def process_message(self, peer, mailfrom, rcpttos, raw_message):
# logger.info('Incoming message received from %s', mailfrom)
# if not len(rcpttos):
# logger.info('Incoming email had no recipients. Ignoring.')
# return STATUS[550]
#
# if len(raw_message) > self.max_message_length:
# logger.info('Inbound email message was too long: %d', len(raw_message))
# return STATUS[552]
#
# try:
# group_id = email_to_group_id(rcpttos[0])
# except Exception:
# logger.info('%r is not a valid email address', rcpttos)
# return STATUS[550]
#
# message = email.message_from_string(raw_message)
# payload = None
# if message.is_multipart():
# for msg in message.walk():
# if msg.get_content_type() == 'text/plain':
# payload = msg.get_payload()
# break
# if payload is None:
# # No text/plain part, bailing
# return STATUS[200]
# else:
# payload = message.get_payload()
#
# payload = EmailReplyParser.parse_reply(payload).strip()
# if not payload:
# # If there's no body, we don't need to go any further
# return STATUS[200]
#
# process_inbound_email.delay(mailfrom, group_id, payload)
# return STATUS[200]
#
# def run(self):
# SMTPServer.__init__(self, (self.host, self.port), None)
# try:
# asyncore.loop()
# except KeyboardInterrupt:
# pass
| {
"content_hash": "b5a1c6fff2f43e4c48938bb9e83d109d",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 85,
"avg_line_length": 33.03030303030303,
"alnum_prop": 0.5975535168195719,
"repo_name": "wanghe4096/WangBlog",
"id": "5308a02f6f48965bd69f993a4e2a4d7c83a89d30",
"size": "3270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/wangblog/services/smtp.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "100891"
},
{
"name": "HTML",
"bytes": "138081"
},
{
"name": "JavaScript",
"bytes": "227431"
},
{
"name": "Python",
"bytes": "97173"
}
],
"symlink_target": ""
} |
import os, sys
import numpy as np
import pandas as pd
from bagofwords import BagOfWords, StandardizingVector
import pickle
from sklearn.linear_model import LogisticRegression
from sklearn import cross_validation
import SonicScrewdriver as utils
import random
def dirty_pairtree(htid):
period = htid.find('.')
prefix = htid[0:period]
postfix = htid[(period+1): ]
if '=' in postfix:
postfix = postfix.replace('+',':')
postfix = postfix.replace('=','/')
dirtyname = prefix + "." + postfix
return dirtyname
def select_common_features(trainingset, n):
''' Very simply, selects the top n features in the training set.
Not a sophisticated feature-selection strategy, but in many
cases it gets the job done.
'''
allwordcounts = dict()
for avolume in trainingset:
utils.add_dicts(avolume.rawcounts, allwordcounts)
# The add_dicts function will add up all the raw counts into
# a single master dictionary.
descendingbyfreq = utils.sortkeysbyvalue(allwordcounts, whethertoreverse = True)
# This returns a list of 2-tuple (frequency, word) pairs.
if n > len(descendingbyfreq):
n = len(descendingbyfreq)
print("We only have " + str(n) + " features.")
# List comprehension that gets the second element of each tuple, up to
# a total of n tuples.
topfeatures = [x[1] for x in descendingbyfreq[0 : n]]
return topfeatures
def get_classvector(classpath, volumeIDs):
with open(classpath, encoding = 'utf-8') as f:
filelines = f.readlines()
classdict = dict()
for line in filelines:
line = line.rstrip()
fields = line.split('\t')
volid = fields[0]
theclass = fields[1]
classdict[volid] = int(theclass)
classvector = np.zeros(len(volumeIDs))
for idx, anid in enumerate(volumeIDs):
anid = dirty_pairtree(anid)
if anid in classdict:
classvector[idx] = classdict[anid]
else:
print('Missing from class metadata: ' + anid)
return classvector
def train_a_model(sourcefolder, extension, include_punctuation, maxfeatures, outputfolder, classpath):
if not os.path.exists(outputfolder):
os.makedirs(outputfolder)
if not sourcefolder.endswith('/'):
sourcefolder = sourcefolder + '/'
if not outputfolder.endswith('/'):
outputfolder = outputfolder + '/'
# This just makes things easier.
# Get a list of files.
allthefiles = os.listdir(sourcefolder)
random.shuffle(allthefiles)
# Now we have a list of file names. But we want volumeIDs, paired with complete
# paths to the file. We're going to achieve the pairing by zipping two lists,
# rather than with a dict, because ordering also matters here.
volumeIDs = list()
volumepaths = list()
for filename in allthefiles:
if filename.endswith(extension):
volID = filename.replace(extension, "")
# The volume ID is basically the filename minus its extension.
# Extensions are likely to be long enough that there is little
# danger of accidental occurrence inside a filename. E.g.
# '.fic.tsv'
path = sourcefolder + filename
volumeIDs.append(volID)
volumepaths.append(path)
# Get the class vector, indexed by volume ID
classvector = get_classvector(classpath, volumeIDs)
assert len(classvector) == len(volumeIDs)
# Now we actually read volumes and create a training corpus, which will
# be a list of bags of words.
trainingset = list()
for volID, filepath in zip(volumeIDs, volumepaths):
volume = BagOfWords(filepath, volID, include_punctuation)
# That reads the volume from disk.
trainingset.append(volume)
# We select the most common words as features.
featurelist = select_common_features(trainingset, maxfeatures)
numfeatures = len(featurelist)
# Note that the number of features we actually got is not necessarily
# the same as maxfeatures.
for volume in trainingset:
volume.selectfeatures(featurelist)
volume.normalizefrequencies()
# The volume now contains feature frequencies:
# raw counts have been divided by the total number of words in the volume.
standardizer = StandardizingVector(trainingset, featurelist)
# This object calculates the means and standard deviations of all features
# across the training set.
listofvolumefeatures = list()
for volume in trainingset:
volume.standardizefrequencies(standardizer)
# We have now converted frequencies to z scores. This is important for
# regularized logistic regression -- otherwise the regularization
# gets distributed unevenly across variables because they're scaled
# differently.
listofvolumefeatures.append(volume.features)
# Now let's make a data frame by concatenating each volume as a separate column,
# aligned on the features that index rows.
data = pd.concat(listofvolumefeatures, axis = 1)
data.columns = volumeIDs
# Name the columns for volumes. Then transpose the matrix:
data = data.T
# So that we have a matrix with features (variables) as columns and instances (volumes)
# as rows. Would have been easier to make this directly, but I don't know a neat
# way to do it in pandas.
logisticmodel = LogisticRegression(C = 0.1)
classvector = classvector.astype('int')
logisticmodel.fit(data, classvector)
# Let's sort the features by their coefficient in the model, and print.
coefficients = list(zip(logisticmodel.coef_[0], featurelist))
coefficients.sort()
for coefficient, word in coefficients:
print(word + " : " + str(coefficient))
# Pickle and write the model & standardizer. This will allow us to apply the model to
# new documents of unknown genre.
modelfile = outputfolder + "logisticmodel.p"
with open(modelfile, mode = 'wb') as f:
pickle.dump(logisticmodel, f)
standardizerfile = outputfolder + "standardizer.p"
with open(standardizerfile, mode = 'wb') as f:
pickle.dump(standardizer, f)
accuracy_tries = cross_validation.cross_val_score(logisticmodel, data, classvector, cv=10)
print(accuracy_tries)
print(np.sum(accuracy_tries) / len(accuracy_tries))
random.shuffle(classvector)
print('\nASSVECTOR!\n')
accuracy_tries = cross_validation.cross_val_score(logisticmodel, data, classvector, cv=10)
print(accuracy_tries)
print(np.sum(accuracy_tries) / len(accuracy_tries))
# Yay, we're done.
if __name__ == "__main__":
sourcefolder = '/Users/tunder/Dropbox/GenreProject/python/reception/poe1919/'
extension = '.poe.tsv'
include_punctuation = False
maxfeatures = 1600
outputfolder = '/Users/tunder/Dropbox/GenreProject/python/reception/model1919/'
metapath = '/Users/tunder/Dropbox/GenreProject/metadata/poemeta1919.tsv'
train_a_model(sourcefolder, extension, include_punctuation, maxfeatures, outputfolder, metapath)
| {
"content_hash": "a59f49470689e1d811ff9eb81e1dad32",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 102,
"avg_line_length": 31.373205741626794,
"alnum_prop": 0.7439377764221443,
"repo_name": "tedunderwood/GenreProject",
"id": "98dd1a38aef0c03bfea62ec8b8ededa387d0dfca",
"size": "6557",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/workshop/model_boundary.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "187389"
},
{
"name": "Python",
"bytes": "645172"
},
{
"name": "R",
"bytes": "34870"
}
],
"symlink_target": ""
} |
"""HTTP server classes.
Note: BaseHTTPRequestHandler doesn't implement any HTTP request; see
SimpleHTTPRequestHandler for simple implementations of GET, HEAD and POST,
and CGIHTTPRequestHandler for CGI scripts.
It does, however, optionally implement HTTP/1.1 persistent connections,
as of version 0.3.
Notes on CGIHTTPRequestHandler
------------------------------
This class implements GET and POST requests to cgi-bin scripts.
If the os.fork() function is not present (e.g. on Windows),
subprocess.Popen() is used as a fallback, with slightly altered semantics.
In all cases, the implementation is intentionally naive -- all
requests are executed synchronously.
SECURITY WARNING: DON'T USE THIS CODE UNLESS YOU ARE INSIDE A FIREWALL
-- it may execute arbitrary Python code or external programs.
Note that status code 200 is sent prior to execution of a CGI script, so
scripts cannot send other status codes such as 302 (redirect).
XXX To do:
- log requests even later (to capture byte count)
- log user-agent header and other interesting goodies
- send error log to separate file
"""
# See also:
#
# HTTP Working Group T. Berners-Lee
# INTERNET-DRAFT R. T. Fielding
# <draft-ietf-http-v10-spec-00.txt> H. Frystyk Nielsen
# Expires September 8, 1995 March 8, 1995
#
# URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt
#
# and
#
# Network Working Group R. Fielding
# Request for Comments: 2616 et al
# Obsoletes: 2068 June 1999
# Category: Standards Track
#
# URL: http://www.faqs.org/rfcs/rfc2616.html
# Log files
# ---------
#
# Here's a quote from the NCSA httpd docs about log file format.
#
# | The logfile format is as follows. Each line consists of:
# |
# | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb
# |
# | host: Either the DNS name or the IP number of the remote client
# | rfc931: Any information returned by identd for this person,
# | - otherwise.
# | authuser: If user sent a userid for authentication, the user name,
# | - otherwise.
# | DD: Day
# | Mon: Month (calendar name)
# | YYYY: Year
# | hh: hour (24-hour format, the machine's timezone)
# | mm: minutes
# | ss: seconds
# | request: The first line of the HTTP request as sent by the client.
# | ddd: the status code returned by the server, - if not available.
# | bbbb: the total number of bytes sent,
# | *not including the HTTP/1.0 header*, - if not available
# |
# | You can determine the name of the file accessed through request.
#
# (Actually, the latter is only true if you know the server configuration
# at the time the request was made!)
__version__ = "0.6"
__all__ = ["HTTPServer", "BaseHTTPRequestHandler"]
import html
import email.message
import email.parser
import http.client
import io
import mimetypes
import os
import posixpath
import select
import shutil
import socket # For gethostbyaddr()
import socketserver
import sys
import time
import urllib.parse
import copy
# Default error message template
DEFAULT_ERROR_MESSAGE = """\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<head>
<meta http-equiv="Content-Type" content="text/html;charset=utf-8">
<title>Error response</title>
</head>
<body>
<h1>Error response</h1>
<p>Error code: %(code)d</p>
<p>Message: %(message)s.</p>
<p>Error code explanation: %(code)s - %(explain)s.</p>
</body>
</html>
"""
DEFAULT_ERROR_CONTENT_TYPE = "text/html;charset=utf-8"
def _quote_html(html):
return html.replace("&", "&").replace("<", "<").replace(">", ">")
class HTTPServer(socketserver.TCPServer):
allow_reuse_address = 1 # Seems to make sense in testing environment
def server_bind(self):
"""Override server_bind to store the server name."""
socketserver.TCPServer.server_bind(self)
host, port = self.socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
class BaseHTTPRequestHandler(socketserver.StreamRequestHandler):
"""HTTP request handler base class.
The following explanation of HTTP serves to guide you through the
code as well as to expose any misunderstandings I may have about
HTTP (so you don't need to read the code to figure out I'm wrong
:-).
HTTP (HyperText Transfer Protocol) is an extensible protocol on
top of a reliable stream transport (e.g. TCP/IP). The protocol
recognizes three parts to a request:
1. One line identifying the request type and path
2. An optional set of RFC-822-style headers
3. An optional data part
The headers and data are separated by a blank line.
The first line of the request has the form
<command> <path> <version>
where <command> is a (case-sensitive) keyword such as GET or POST,
<path> is a string containing path information for the request,
and <version> should be the string "HTTP/1.0" or "HTTP/1.1".
<path> is encoded using the URL encoding scheme (using %xx to signify
the ASCII character with hex code xx).
The specification specifies that lines are separated by CRLF but
for compatibility with the widest range of clients recommends
servers also handle LF. Similarly, whitespace in the request line
is treated sensibly (allowing multiple spaces between components
and allowing trailing whitespace).
Similarly, for output, lines ought to be separated by CRLF pairs
but most clients grok LF characters just fine.
If the first line of the request has the form
<command> <path>
(i.e. <version> is left out) then this is assumed to be an HTTP
0.9 request; this form has no optional headers and data part and
the reply consists of just the data.
The reply form of the HTTP 1.x protocol again has three parts:
1. One line giving the response code
2. An optional set of RFC-822-style headers
3. The data
Again, the headers and data are separated by a blank line.
The response code line has the form
<version> <responsecode> <responsestring>
where <version> is the protocol version ("HTTP/1.0" or "HTTP/1.1"),
<responsecode> is a 3-digit response code indicating success or
failure of the request, and <responsestring> is an optional
human-readable string explaining what the response code means.
This server parses the request and the headers, and then calls a
function specific to the request type (<command>). Specifically,
a request SPAM will be handled by a method do_SPAM(). If no
such method exists the server sends an error response to the
client. If it exists, it is called with no arguments:
do_SPAM()
Note that the request name is case sensitive (i.e. SPAM and spam
are different requests).
The various request details are stored in instance variables:
- client_address is the client IP address in the form (host,
port);
- command, path and version are the broken-down request line;
- headers is an instance of email.message.Message (or a derived
class) containing the header information;
- rfile is a file object open for reading positioned at the
start of the optional input data part;
- wfile is a file object open for writing.
IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING!
The first thing to be written must be the response line. Then
follow 0 or more header lines, then a blank line, and then the
actual data (if any). The meaning of the header lines depends on
the command executed by the server; in most cases, when data is
returned, there should be at least one header line of the form
Content-type: <type>/<subtype>
where <type> and <subtype> should be registered MIME types,
e.g. "text/html" or "text/plain".
"""
# The Python system version, truncated to its first component.
sys_version = "Python/" + sys.version.split()[0]
# The server software version. You may want to override this.
# The format is multiple whitespace-separated strings,
# where each string is of the form name[/version].
server_version = "BaseHTTP/" + __version__
error_message_format = DEFAULT_ERROR_MESSAGE
error_content_type = DEFAULT_ERROR_CONTENT_TYPE
# The default request version. This only affects responses up until
# the point where the request line is parsed, so it mainly decides what
# the client gets back when sending a malformed request line.
# Most web servers default to HTTP 0.9, i.e. don't send a status line.
default_request_version = "HTTP/0.9"
def parse_request(self):
"""Parse a request (internal).
The request should be stored in self.raw_requestline; the results
are in self.command, self.path, self.request_version and
self.headers.
Return True for success, False for failure; on failure, an
error is sent back.
"""
self.command = None # set in case of error on the first line
self.request_version = version = self.default_request_version
self.close_connection = 1
requestline = str(self.raw_requestline, 'iso-8859-1')
requestline = requestline.rstrip('\r\n')
self.requestline = requestline
words = requestline.split()
if len(words) == 3:
command, path, version = words
if version[:5] != 'HTTP/':
self.send_error(400, "Bad request version (%r)" % version)
return False
try:
base_version_number = version.split('/', 1)[1]
version_number = base_version_number.split(".")
# RFC 2145 section 3.1 says there can be only one "." and
# - major and minor numbers MUST be treated as
# separate integers;
# - HTTP/2.4 is a lower version than HTTP/2.13, which in
# turn is lower than HTTP/12.3;
# - Leading zeros MUST be ignored by recipients.
if len(version_number) != 2:
raise ValueError
version_number = int(version_number[0]), int(version_number[1])
except (ValueError, IndexError):
self.send_error(400, "Bad request version (%r)" % version)
return False
if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1":
self.close_connection = 0
if version_number >= (2, 0):
self.send_error(505,
"Invalid HTTP Version (%s)" % base_version_number)
return False
elif len(words) == 2:
command, path = words
self.close_connection = 1
if command != 'GET':
self.send_error(400,
"Bad HTTP/0.9 request type (%r)" % command)
return False
elif not words:
return False
else:
self.send_error(400, "Bad request syntax (%r)" % requestline)
return False
self.command, self.path, self.request_version = command, path, version
# Examine the headers and look for a Connection directive.
try:
self.headers = http.client.parse_headers(self.rfile,
_class=self.MessageClass)
except http.client.LineTooLong:
self.send_error(400, "Line too long")
return False
conntype = self.headers.get('Connection', "")
if conntype.lower() == 'close':
self.close_connection = 1
elif (conntype.lower() == 'keep-alive' and
self.protocol_version >= "HTTP/1.1"):
self.close_connection = 0
# Examine the headers and look for an Expect directive
expect = self.headers.get('Expect', "")
if (expect.lower() == "100-continue" and
self.protocol_version >= "HTTP/1.1" and
self.request_version >= "HTTP/1.1"):
if not self.handle_expect_100():
return False
return True
def handle_expect_100(self):
"""Decide what to do with an "Expect: 100-continue" header.
If the client is expecting a 100 Continue response, we must
respond with either a 100 Continue or a final response before
waiting for the request body. The default is to always respond
with a 100 Continue. You can behave differently (for example,
reject unauthorized requests) by overriding this method.
This method should either return True (possibly after sending
a 100 Continue response) or send an error response and return
False.
"""
self.send_response_only(100)
return True
def handle_one_request(self):
"""Handle a single HTTP request.
You normally don't need to override this method; see the class
__doc__ string for information on how to handle specific HTTP
commands such as GET and POST.
"""
try:
self.raw_requestline = self.rfile.readline(65537)
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
self.send_error(414)
return
if not self.raw_requestline:
self.close_connection = 1
return
if not self.parse_request():
# An error code has been sent, just exit
return
mname = 'do_' + self.command
if not hasattr(self, mname):
self.send_error(501, "Unsupported method (%r)" % self.command)
return
method = getattr(self, mname)
method()
self.wfile.flush() #actually send the response if not already done.
except socket.timeout as e:
#a read or a write timed out. Discard this connection
self.log_error("Request timed out: %r", e)
self.close_connection = 1
return
def handle(self):
"""Handle multiple requests if necessary."""
self.close_connection = 1
self.handle_one_request()
while not self.close_connection:
self.handle_one_request()
def send_error(self, code, message=None):
"""Send and log an error reply.
Arguments are the error code, and a detailed message.
The detailed message defaults to the short entry matching the
response code.
This sends an error response (so it must be called before any
output has been generated), logs the error, and finally sends
a piece of HTML explaining the error to the user.
"""
try:
shortmsg, longmsg = self.responses[code]
except KeyError:
shortmsg, longmsg = '???', '???'
if message is None:
message = shortmsg
explain = longmsg
self.log_error("code %d, message %s", code, message)
# using _quote_html to prevent Cross Site Scripting attacks (see bug #1100201)
content = (self.error_message_format %
{'code': code, 'message': _quote_html(message), 'explain': explain})
self.send_response(code, message)
self.send_header("Content-Type", self.error_content_type)
self.send_header('Connection', 'close')
self.end_headers()
if self.command != 'HEAD' and code >= 200 and code not in (204, 304):
self.wfile.write(content.encode('UTF-8', 'replace'))
def send_response(self, code, message=None):
"""Send the response header and log the response code.
Also send two standard headers with the server software
version and the current date.
"""
self.log_request(code)
self.send_response_only(code, message)
self.send_header('Server', self.version_string())
self.send_header('Date', self.date_time_string())
def send_response_only(self, code, message=None):
"""Send the response header only."""
if message is None:
if code in self.responses:
message = self.responses[code][0]
else:
message = ''
if self.request_version != 'HTTP/0.9':
self.wfile.write(("%s %d %s\r\n" %
(self.protocol_version, code, message)).encode('latin1', 'strict'))
def send_header(self, keyword, value):
"""Send a MIME header."""
if self.request_version != 'HTTP/0.9':
if not hasattr(self, '_headers_buffer'):
self._headers_buffer = []
self._headers_buffer.append(
("%s: %s\r\n" % (keyword, value)).encode('latin1', 'strict'))
if keyword.lower() == 'connection':
if value.lower() == 'close':
self.close_connection = 1
elif value.lower() == 'keep-alive':
self.close_connection = 0
def end_headers(self):
"""Send the blank line ending the MIME headers."""
if self.request_version != 'HTTP/0.9':
self._headers_buffer.append(b"\r\n")
self.wfile.write(b"".join(self._headers_buffer))
self._headers_buffer = []
def log_request(self, code='-', size='-'):
"""Log an accepted request.
This is called by send_response().
"""
self.log_message('"%s" %s %s',
self.requestline, str(code), str(size))
def log_error(self, format, *args):
"""Log an error.
This is called when a request cannot be fulfilled. By
default it passes the message on to log_message().
Arguments are the same as for log_message().
XXX This should go to the separate error log.
"""
self.log_message(format, *args)
def log_message(self, format, *args):
"""Log an arbitrary message.
This is used by all other logging functions. Override
it if you have specific logging wishes.
The first argument, FORMAT, is a format string for the
message to be logged. If the format string contains
any % escapes requiring parameters, they should be
specified as subsequent arguments (it's just like
printf!).
The client host and current date/time are prefixed to
every message.
"""
sys.stderr.write("%s - - [%s] %s\n" %
(self.address_string(),
self.log_date_time_string(),
format%args))
def version_string(self):
"""Return the server software version string."""
return self.server_version + ' ' + self.sys_version
def date_time_string(self, timestamp=None):
"""Return the current date and time formatted for a message header."""
if timestamp is None:
timestamp = time.time()
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp)
s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
self.weekdayname[wd],
day, self.monthname[month], year,
hh, mm, ss)
return s
def log_date_time_string(self):
"""Return the current time formatted for logging."""
now = time.time()
year, month, day, hh, mm, ss, x, y, z = time.localtime(now)
s = "%02d/%3s/%04d %02d:%02d:%02d" % (
day, self.monthname[month], year, hh, mm, ss)
return s
weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def address_string(self):
"""Return the client address formatted for logging.
This version looks up the full hostname using gethostbyaddr(),
and tries to find a name that contains at least one dot.
"""
host, port = self.client_address[:2]
return socket.getfqdn(host)
# Essentially static class variables
# The version of the HTTP protocol we support.
# Set this to HTTP/1.1 to enable automatic keepalive
protocol_version = "HTTP/1.0"
# MessageClass used to parse headers
MessageClass = http.client.HTTPMessage
# Table mapping response codes to messages; entries have the
# form {code: (shortmessage, longmessage)}.
# See RFC 2616.
responses = {
100: ('Continue', 'Request received, please continue'),
101: ('Switching Protocols',
'Switching to new protocol; obey Upgrade header'),
200: ('OK', 'Request fulfilled, document follows'),
201: ('Created', 'Document created, URL follows'),
202: ('Accepted',
'Request accepted, processing continues off-line'),
203: ('Non-Authoritative Information', 'Request fulfilled from cache'),
204: ('No Content', 'Request fulfilled, nothing follows'),
205: ('Reset Content', 'Clear input form for further input.'),
206: ('Partial Content', 'Partial content follows.'),
300: ('Multiple Choices',
'Object has several resources -- see URI list'),
301: ('Moved Permanently', 'Object moved permanently -- see URI list'),
302: ('Found', 'Object moved temporarily -- see URI list'),
303: ('See Other', 'Object moved -- see Method and URL list'),
304: ('Not Modified',
'Document has not changed since given time'),
305: ('Use Proxy',
'You must use proxy specified in Location to access this '
'resource.'),
307: ('Temporary Redirect',
'Object moved temporarily -- see URI list'),
400: ('Bad Request',
'Bad request syntax or unsupported method'),
401: ('Unauthorized',
'No permission -- see authorization schemes'),
402: ('Payment Required',
'No payment -- see charging schemes'),
403: ('Forbidden',
'Request forbidden -- authorization will not help'),
404: ('Not Found', 'Nothing matches the given URI'),
405: ('Method Not Allowed',
'Specified method is invalid for this resource.'),
406: ('Not Acceptable', 'URI not available in preferred format.'),
407: ('Proxy Authentication Required', 'You must authenticate with '
'this proxy before proceeding.'),
408: ('Request Timeout', 'Request timed out; try again later.'),
409: ('Conflict', 'Request conflict.'),
410: ('Gone',
'URI no longer exists and has been permanently removed.'),
411: ('Length Required', 'Client must specify Content-Length.'),
412: ('Precondition Failed', 'Precondition in headers is false.'),
413: ('Request Entity Too Large', 'Entity is too large.'),
414: ('Request-URI Too Long', 'URI is too long.'),
415: ('Unsupported Media Type', 'Entity body in unsupported format.'),
416: ('Requested Range Not Satisfiable',
'Cannot satisfy request range.'),
417: ('Expectation Failed',
'Expect condition could not be satisfied.'),
500: ('Internal Server Error', 'Server got itself in trouble'),
501: ('Not Implemented',
'Server does not support this operation'),
502: ('Bad Gateway', 'Invalid responses from another server/proxy.'),
503: ('Service Unavailable',
'The server cannot process the request due to a high load'),
504: ('Gateway Timeout',
'The gateway server did not receive a timely response'),
505: ('HTTP Version Not Supported', 'Cannot fulfill request.'),
}
class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
"""Simple HTTP request handler with GET and HEAD commands.
This serves files from the current directory and any of its
subdirectories. The MIME type for files is determined by
calling the .guess_type() method.
The GET and HEAD requests are identical except that the HEAD
request omits the actual contents of the file.
"""
server_version = "SimpleHTTP/" + __version__
def do_GET(self):
"""Serve a GET request."""
f = self.send_head()
if f:
self.copyfile(f, self.wfile)
f.close()
def do_HEAD(self):
"""Serve a HEAD request."""
f = self.send_head()
if f:
f.close()
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
def list_directory(self, path):
"""Helper to produce a directory listing (absent index.html).
Return value is either a file object, or None (indicating an
error). In either case, the headers are sent, making the
interface the same as for send_head().
"""
try:
list = os.listdir(path)
except os.error:
self.send_error(404, "No permission to list directory")
return None
list.sort(key=lambda a: a.lower())
r = []
displaypath = html.escape(urllib.parse.unquote(self.path))
r.append('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
r.append("<html>\n<title>Directory listing for %s</title>\n" % displaypath)
r.append("<body>\n<h2>Directory listing for %s</h2>\n" % displaypath)
r.append("<hr>\n<ul>\n")
for name in list:
fullname = os.path.join(path, name)
displayname = linkname = name
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
# Note: a link to a directory displays with @ and links with /
r.append('<li><a href="%s">%s</a>\n'
% (urllib.parse.quote(linkname), html.escape(displayname)))
r.append("</ul>\n<hr>\n</body>\n</html>\n")
enc = sys.getfilesystemencoding()
encoded = ''.join(r).encode(enc)
f = io.BytesIO()
f.write(encoded)
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html; charset=%s" % enc)
self.send_header("Content-Length", str(len(encoded)))
self.end_headers()
return f
def translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
"""
# abandon query parameters
path = path.split('?',1)[0]
path = path.split('#',1)[0]
path = posixpath.normpath(urllib.parse.unquote(path))
words = path.split('/')
words = filter(None, words)
path = os.getcwd()
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir): continue
path = os.path.join(path, word)
return path
def copyfile(self, source, outputfile):
"""Copy all data between two file objects.
The SOURCE argument is a file object open for reading
(or anything with a read() method) and the DESTINATION
argument is a file object open for writing (or
anything with a write() method).
The only reason for overriding this would be to change
the block size or perhaps to replace newlines by CRLF
-- note however that this the default server uses this
to copy binary data as well.
"""
shutil.copyfileobj(source, outputfile)
def guess_type(self, path):
"""Guess the type of a file.
Argument is a PATH (a filename).
Return value is a string of the form type/subtype,
usable for a MIME Content-type header.
The default implementation looks the file's extension
up in the table self.extensions_map, using application/octet-stream
as a default; however it would be permissible (if
slow) to look inside the data to make a better guess.
"""
base, ext = posixpath.splitext(path)
if ext in self.extensions_map:
return self.extensions_map[ext]
ext = ext.lower()
if ext in self.extensions_map:
return self.extensions_map[ext]
else:
return self.extensions_map['']
if not mimetypes.inited:
mimetypes.init() # try to read system mime.types
extensions_map = mimetypes.types_map.copy()
extensions_map.update({
'': 'application/octet-stream', # Default
'.py': 'text/plain',
'.c': 'text/plain',
'.h': 'text/plain',
})
# Utilities for CGIHTTPRequestHandler
# TODO(gregory.p.smith): Move this into an appropriate library.
def _url_collapse_path_split(path):
"""
Given a URL path, remove extra '/'s and '.' path elements and collapse
any '..' references.
Implements something akin to RFC-2396 5.2 step 6 to parse relative paths.
Returns: A tuple of (head, tail) where tail is everything after the final /
and head is everything before it. Head will always start with a '/' and,
if it contains anything else, never have a trailing '/'.
Raises: IndexError if too many '..' occur within the path.
"""
# Similar to os.path.split(os.path.normpath(path)) but specific to URL
# path semantics rather than local operating system semantics.
path_parts = []
for part in path.split('/'):
if part == '.':
path_parts.append('')
else:
path_parts.append(part)
# Filter out blank non trailing parts before consuming the '..'.
path_parts = [part for part in path_parts[:-1] if part] + path_parts[-1:]
if path_parts:
tail_part = path_parts.pop()
else:
tail_part = ''
head_parts = []
for part in path_parts:
if part == '..':
head_parts.pop()
else:
head_parts.append(part)
if tail_part and tail_part == '..':
head_parts.pop()
tail_part = ''
return ('/' + '/'.join(head_parts), tail_part)
nobody = None
def nobody_uid():
"""Internal routine to get nobody's uid"""
global nobody
if nobody:
return nobody
try:
import pwd
except ImportError:
return -1
try:
nobody = pwd.getpwnam('nobody')[2]
except KeyError:
nobody = 1 + max(x[2] for x in pwd.getpwall())
return nobody
def executable(path):
"""Test for executable file."""
try:
st = os.stat(path)
except os.error:
return False
return st.st_mode & 0o111 != 0
class CGIHTTPRequestHandler(SimpleHTTPRequestHandler):
"""Complete HTTP server with GET, HEAD and POST commands.
GET and HEAD also support running CGI scripts.
The POST command is *only* implemented for CGI scripts.
"""
# Determine platform specifics
have_fork = hasattr(os, 'fork')
# Make rfile unbuffered -- we need to read one line and then pass
# the rest to a subprocess, so we can't use buffered input.
rbufsize = 0
def do_POST(self):
"""Serve a POST request.
This is only implemented for CGI scripts.
"""
if self.is_cgi():
self.run_cgi()
else:
self.send_error(501, "Can only POST to CGI scripts")
def send_head(self):
"""Version of send_head that support CGI scripts"""
if self.is_cgi():
return self.run_cgi()
else:
return SimpleHTTPRequestHandler.send_head(self)
def is_cgi(self):
"""Test whether self.path corresponds to a CGI script.
Returns True and updates the cgi_info attribute to the tuple
(dir, rest) if self.path requires running a CGI script.
Returns False otherwise.
If any exception is raised, the caller should assume that
self.path was rejected as invalid and act accordingly.
The default implementation tests whether the normalized url
path begins with one of the strings in self.cgi_directories
(and the next character is a '/' or the end of the string).
"""
splitpath = _url_collapse_path_split(self.path)
if splitpath[0] in self.cgi_directories:
self.cgi_info = splitpath
return True
return False
cgi_directories = ['/cgi-bin', '/htbin']
def is_executable(self, path):
"""Test whether argument path is an executable file."""
return executable(path)
def is_python(self, path):
"""Test whether argument path is a Python script."""
head, tail = os.path.splitext(path)
return tail.lower() in (".py", ".pyw")
def run_cgi(self):
"""Execute a CGI script."""
path = self.path
dir, rest = self.cgi_info
i = path.find('/', len(dir) + 1)
while i >= 0:
nextdir = path[:i]
nextrest = path[i+1:]
scriptdir = self.translate_path(nextdir)
if os.path.isdir(scriptdir):
dir, rest = nextdir, nextrest
i = path.find('/', len(dir) + 1)
else:
break
# find an explicit query string, if present.
i = rest.rfind('?')
if i >= 0:
rest, query = rest[:i], rest[i+1:]
else:
query = ''
# dissect the part after the directory name into a script name &
# a possible additional path, to be stored in PATH_INFO.
i = rest.find('/')
if i >= 0:
script, rest = rest[:i], rest[i:]
else:
script, rest = rest, ''
scriptname = dir + '/' + script
scriptfile = self.translate_path(scriptname)
if not os.path.exists(scriptfile):
self.send_error(404, "No such CGI script (%r)" % scriptname)
return
if not os.path.isfile(scriptfile):
self.send_error(403, "CGI script is not a plain file (%r)" %
scriptname)
return
ispy = self.is_python(scriptname)
if not ispy:
if not self.is_executable(scriptfile):
self.send_error(403, "CGI script is not executable (%r)" %
scriptname)
return
# Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
# XXX Much of the following could be prepared ahead of time!
env = copy.deepcopy(os.environ)
env['SERVER_SOFTWARE'] = self.version_string()
env['SERVER_NAME'] = self.server.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PROTOCOL'] = self.protocol_version
env['SERVER_PORT'] = str(self.server.server_port)
env['REQUEST_METHOD'] = self.command
uqrest = urllib.parse.unquote(rest)
env['PATH_INFO'] = uqrest
env['PATH_TRANSLATED'] = self.translate_path(uqrest)
env['SCRIPT_NAME'] = scriptname
if query:
env['QUERY_STRING'] = query
host = self.address_string()
if host != self.client_address[0]:
env['REMOTE_HOST'] = host
env['REMOTE_ADDR'] = self.client_address[0]
authorization = self.headers.get("authorization")
if authorization:
authorization = authorization.split()
if len(authorization) == 2:
import base64, binascii
env['AUTH_TYPE'] = authorization[0]
if authorization[0].lower() == "basic":
try:
authorization = authorization[1].encode('ascii')
authorization = base64.decodebytes(authorization).\
decode('ascii')
except (binascii.Error, UnicodeError):
pass
else:
authorization = authorization.split(':')
if len(authorization) == 2:
env['REMOTE_USER'] = authorization[0]
# XXX REMOTE_IDENT
if self.headers.get('content-type') is None:
env['CONTENT_TYPE'] = self.headers.get_content_type()
else:
env['CONTENT_TYPE'] = self.headers['content-type']
length = self.headers.get('content-length')
if length:
env['CONTENT_LENGTH'] = length
referer = self.headers.get('referer')
if referer:
env['HTTP_REFERER'] = referer
accept = []
for line in self.headers.getallmatchingheaders('accept'):
if line[:1] in "\t\n\r ":
accept.append(line.strip())
else:
accept = accept + line[7:].split(',')
env['HTTP_ACCEPT'] = ','.join(accept)
ua = self.headers.get('user-agent')
if ua:
env['HTTP_USER_AGENT'] = ua
co = filter(None, self.headers.get_all('cookie', []))
cookie_str = ', '.join(co)
if cookie_str:
env['HTTP_COOKIE'] = cookie_str
# XXX Other HTTP_* headers
# Since we're setting the env in the parent, provide empty
# values to override previously set values
for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH',
'HTTP_USER_AGENT', 'HTTP_COOKIE', 'HTTP_REFERER'):
env.setdefault(k, "")
self.send_response(200, "Script output follows")
decoded_query = query.replace('+', ' ')
if self.have_fork:
# Unix -- fork as we should
args = [script]
if '=' not in decoded_query:
args.append(decoded_query)
nobody = nobody_uid()
self.wfile.flush() # Always flush before forking
pid = os.fork()
if pid != 0:
# Parent
pid, sts = os.waitpid(pid, 0)
# throw away additional data [see bug #427345]
while select.select([self.rfile], [], [], 0)[0]:
if not self.rfile.read(1):
break
if sts:
self.log_error("CGI script exit status %#x", sts)
return
# Child
try:
try:
os.setuid(nobody)
except os.error:
pass
os.dup2(self.rfile.fileno(), 0)
os.dup2(self.wfile.fileno(), 1)
os.execve(scriptfile, args, env)
except:
self.server.handle_error(self.request, self.client_address)
os._exit(127)
else:
# Non-Unix -- use subprocess
import subprocess
cmdline = [scriptfile]
if self.is_python(scriptfile):
interp = sys.executable
if interp.lower().endswith("w.exe"):
# On Windows, use python.exe, not pythonw.exe
interp = interp[:-5] + interp[-4:]
cmdline = [interp, '-u'] + cmdline
if '=' not in query:
cmdline.append(query)
self.log_message("command: %s", subprocess.list2cmdline(cmdline))
try:
nbytes = int(length)
except (TypeError, ValueError):
nbytes = 0
p = subprocess.Popen(cmdline,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env = env
)
if self.command.lower() == "post" and nbytes > 0:
data = self.rfile.read(nbytes)
else:
data = None
# throw away additional data [see bug #427345]
while select.select([self.rfile._sock], [], [], 0)[0]:
if not self.rfile._sock.recv(1):
break
stdout, stderr = p.communicate(data)
self.wfile.write(stdout)
if stderr:
self.log_error('%s', stderr)
p.stderr.close()
p.stdout.close()
status = p.returncode
if status:
self.log_error("CGI script exit status %#x", status)
else:
self.log_message("CGI script exited OK")
def test(HandlerClass = BaseHTTPRequestHandler,
ServerClass = HTTPServer, protocol="HTTP/1.0"):
"""Test the HTTP request handler class.
This runs an HTTP server on port 8000 (or the first command line
argument).
"""
if sys.argv[1:]:
port = int(sys.argv[1])
else:
port = 8000
server_address = ('', port)
HandlerClass.protocol_version = protocol
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
print("Serving HTTP on", sa[0], "port", sa[1], "...")
try:
httpd.serve_forever()
except KeyboardInterrupt:
print("\nKeyboard interrupt received, exiting.")
httpd.server_close()
sys.exit(0)
if __name__ == '__main__':
test(HandlerClass=SimpleHTTPRequestHandler)
| {
"content_hash": "9d73963faeeec9c2106cb8cf1576be14",
"timestamp": "",
"source": "github",
"line_count": 1187,
"max_line_length": 97,
"avg_line_length": 36.65122156697557,
"alnum_prop": 0.5802091713596138,
"repo_name": "wdv4758h/ZipPy",
"id": "6642729af58b4534a55b775a51c6107da34d404a",
"size": "43505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib-python/3/http/server.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "9447"
},
{
"name": "C",
"bytes": "106932"
},
{
"name": "CSS",
"bytes": "32004"
},
{
"name": "Groff",
"bytes": "27753"
},
{
"name": "HTML",
"bytes": "721863"
},
{
"name": "Java",
"bytes": "1550721"
},
{
"name": "JavaScript",
"bytes": "10581"
},
{
"name": "Makefile",
"bytes": "16156"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Python",
"bytes": "33672733"
},
{
"name": "R",
"bytes": "1959"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "3119"
},
{
"name": "Tcl",
"bytes": "1048"
},
{
"name": "TeX",
"bytes": "8790"
},
{
"name": "Visual Basic",
"bytes": "481"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
} |
import os
from datetime import datetime
from logging import getLogger
from sqlalchemy.orm.exc import NoResultFound
from myarchive.db.tag_db.tables.file import TrackedFile
from myarchive.db.tag_db.tables.tag import Tag
from myarchive.db.tag_db.tables.yttables import YTPlaylist, YTVideo
from myarchive.libs import pafy
LOGGER = getLogger(__name__)
def download_youtube_playlists(db_session, media_storage_path, playlist_urls):
"""Downloads videos"""
LOGGER.warning(
"Youtube downloads may take quite a lot of drive space! Make sure you "
"have a good amount free before triggering video downloads.")
for playlist_url in playlist_urls:
playlist = pafy.get_playlist2(playlist_url=playlist_url)
LOGGER.info(
"Parsing playlist %s [%s]...", playlist.title, playlist.author)
try:
db_playlist = db_session.query(YTPlaylist).\
filter_by(plid=playlist.plid).one()
except NoResultFound:
db_playlist = YTPlaylist(
title=playlist.title,
author=playlist.author,
description=playlist.description,
plid=playlist.plid)
db_session.add(db_playlist)
total_bytes = 0
video_stream_tuples = []
for video in playlist:
try:
pafy_stream = video.getbest()
video_stream_tuples.append([video, pafy_stream])
total_bytes += pafy_stream.get_filesize()
except Exception as whatwasthat:
LOGGER.error(whatwasthat)
LOGGER.info("Playlist DL size: %s MB" % int(total_bytes / 2 ** 20))
for video, stream in video_stream_tuples:
LOGGER.info("Downloading %s...", stream.title)
temp_filepath = "/tmp/" + stream.title + "." + stream.extension
stream.download(filepath=temp_filepath)
try:
tracked_file, existing = TrackedFile.add_file(
db_session=db_session,
media_path=media_storage_path,
copy_from_filepath=temp_filepath,
move_original_file=True,
)
if existing is True:
os.remove(temp_filepath)
continue
else:
db_session.add(tracked_file)
ytvideo = YTVideo(
uploader=video.username,
description=video.description,
duration=video.duration,
publish_time=datetime.strptime(
video.published, "%Y-%m-%d %H:%M:%S" ),
videoid=video.videoid
)
db_playlist.videos.append(ytvideo)
ytvideo.file = tracked_file
for keyword in video.keywords:
tag = Tag.get_tag(db_session=db_session, tag_name=keyword)
ytvideo.tags.append(tag)
tracked_file.tags.append(tag)
db_session.commit()
except:
db_session.rollback()
raise
db_session.commit()
| {
"content_hash": "e59c4c59020ccca24c78b4ecdbd17781",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 79,
"avg_line_length": 38.84146341463415,
"alnum_prop": 0.5547880690737833,
"repo_name": "zetasyanthis/myarchive",
"id": "b033291a76dae30b0dd77b4e0f183264a6bbeec4",
"size": "3388",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/myarchive/libs/myarchive/youtube.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "525431"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.