text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def avail_sizes(call=None):
'''
Return a list of the image sizes that are on the provider
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function, or with the --list-sizes option'
)
params = {'Action': 'DescribeInstanceTypes'}
items = query(params=params)
ret = {}
for image in items['InstanceTypes']['InstanceType']:
ret[image['InstanceTypeId']] = {}
for item in image:
ret[image['InstanceTypeId']][item] = six.text_type(image[item])
return ret | 0.001629 |
def split_channels(image):
"""
Split channels of a multi-channel ANTsImage into a collection
of scalar ANTsImage types
Arguments
---------
image : ANTsImage
multi-channel image to split
Returns
-------
list of ANTsImage types
Example
-------
>>> import ants
>>> image = ants.image_read(ants.get_ants_data('r16'), 'float')
>>> image2 = ants.image_read(ants.get_ants_data('r16'), 'float')
>>> imagemerge = ants.merge_channels([image,image2])
>>> imagemerge.components == 2
>>> images_unmerged = ants.split_channels(imagemerge)
>>> len(images_unmerged) == 2
>>> images_unmerged[0].components == 1
"""
inpixeltype = image.pixeltype
dimension = image.dimension
components = 1
libfn = utils.get_lib_fn('splitChannels%s' % image._libsuffix)
itkimages = libfn(image.pointer)
antsimages = [iio.ANTsImage(pixeltype=inpixeltype, dimension=dimension,
components=components, pointer=itkimage) for itkimage in itkimages]
return antsimages | 0.004634 |
def indent_text(*strs, **kwargs):
""" indents text according to an operater string and a global indentation
level. returns a tuple of all passed args, indented according to the
operator string
indent: [defaults to +0]
The operator string, of the form
++n : increments the global indentation level by n and indents
+n : indents with the global indentation level + n
--n : decrements the global indentation level by n
-n : indents with the global indentation level - n
==n : sets the global indentation level to exactly n and indents
=n : indents with an indentation level of exactly n
"""
# python 2.7 workaround
indent = kwargs["indent"] if "indent" in kwargs else"+0"
autobreak = kwargs.get("autobreak", False)
char_limit = kwargs.get("char_limit", 80)
split_char = kwargs.get("split_char", " ")
strs = list(strs)
if autobreak:
for index, s in enumerate(strs):
if len(s) > char_limit:
strs[index] = []
spl = s.split(split_char)
result = []
collect = ""
for current_block in spl:
if len(current_block) + len(collect) > char_limit:
strs[index].append(collect[:-1] + "\n")
collect = " "
collect += current_block + split_char
strs[index].append(collect + "\n")
strs = flatten_list(strs)
global lasting_indent
if indent.startswith("++"):
lasting_indent = lasting_indent + int(indent[2:])
cur_indent = lasting_indent
elif indent.startswith("+"):
cur_indent = lasting_indent + int(indent[1:])
elif indent.startswith("--"):
lasting_indent = lasting_indent - int(indent[2:])
cur_indent = lasting_indent
elif indent.startswith("-"):
cur_indent = lasting_indent - int(indent[1:])
elif indent.startswith("=="):
lasting_indent = int(indent[2:])
cur_indent = lasting_indent
elif indent.startswith("="):
lasting_indent = int(indent[1:])
cur_indent = int(indent[1:])
else:
raise Exception(
"indent command format '%s' unrecognized (see the docstring)")
# mutate indentation level if needed
return tuple([" " * cur_indent] + [elem.replace("\n", "\n" + " " * cur_indent)
for elem in strs]) | 0.002798 |
def get_metrics(self, metric_identifiers, from_date, limit=10, group_by="week", **kwargs):
"""
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
:param group_by: The type of aggregation to perform on the metric. Choices are: ``day``, ``week`` or ``month``
"""
results = []
#validation of types:
allowed_types = {
"day": self.get_metric_by_day,
"week": self.get_metric_by_week,
"month": self.get_metric_by_month,
}
if group_by.lower() not in allowed_types:
raise Exception("Allowed values for group_by are day, week or month.")
group_by_func = allowed_types[group_by.lower()]
#pass a connection object so we can pipeline as much as possible
with self._analytics_backend.map() as conn:
for unique_identifier, metric in metric_identifiers:
results.append(group_by_func(unique_identifier, metric, from_date, limit=limit, connection=conn))
#we have to merge all the metric results afterwards because we are using a custom context processor
return [
self._parse_and_process_metrics(series, list_of_metrics) for
series, list_of_metrics in results] | 0.007729 |
def write_flows_to_gssha_time_series_ihg(self,
path_to_output_file,
connection_list_file,
date_search_start=None,
date_search_end=None,
daily=False,
filter_mode="mean"):
# pylint: disable=line-too-long
"""
Write out RAPID output to GSSHA time series ihg file
.. note:: See: http://www.gsshawiki.com/Surface_Water_Routing:Introducing_Dischage/Constituent_Hydrographs
.. note:: GSSHA project card is CHAN_POINT_INPUT
Parameters
----------
path_to_output_file: str
Path to the output xys file.
connection_list_file: str
CSV file with link_id, node_id, baseflow, and rapid_rivid header
and rows with data.
date_search_start: :obj:`datetime.datetime`, optional
This is a datetime object with the date of the minimum date
for starting.
date_search_end: :obj:`datetime.datetime`, optional
This is a datetime object with the date of the maximum date
for ending.
daily: bool, optional
If True and the file is CF-Compliant, write out daily flows.
filter_mode: str, optional
You can get the daily average "mean" or the maximum "max".
Defauls is "mean".
Example connection list file::
link_id, node_id, baseflow, rapid_rivid
599, 1, 0.0, 80968
603, 1, 0.0, 80967
Example writing entire time series to file:
.. code:: python
from RAPIDpy import RAPIDDataset
path_to_rapid_qout = '/path/to/Qout.nc'
connection_list_file = '/path/to/connection_list_file.csv'
with RAPIDDataset(path_to_rapid_qout) as qout_nc:
#for writing entire time series to file
qout_nc.write_flows_to_gssha_time_series_ihg(
'/timeseries/Qout_3624735.ihg',
connection_list_file)
Example writing entire time series as daily average to file:
.. code:: python
from RAPIDpy import RAPIDDataset
path_to_rapid_qout = '/path/to/Qout.nc'
connection_list_file = '/path/to/connection_list_file.csv'
with RAPIDDataset(path_to_rapid_qout) as qout_nc:
# if file is CF compliant, you can write out daily average
qout_nc.write_flows_to_gssha_time_series_ihg(
'/timeseries/Qout_3624735.ihg',
connection_list_file,
daily=True)
Example writing subset of time series as daily maximum to file:
.. code:: python
from datetime import datetime
from RAPIDpy import RAPIDDataset
path_to_rapid_qout = '/path/to/Qout.nc'
connection_list_file = '/path/to/connection_list_file.csv'
with RAPIDDataset(path_to_rapid_qout) as qout_nc:
# if file is CF compliant, you can filter by
# date and get daily values
qout_nc.write_flows_to_gssha_time_series_ihg(
'/timeseries/Qout_daily_date_filter.ihg',
connection_list_file,
date_search_start=datetime(2002, 8, 31),
date_search_end=datetime(2002, 9, 15),
daily=True,
filter_mode="max")
""" # noqa
self.raise_time_valid()
# analyze and write
with open_csv(path_to_output_file, 'w') as out_ts:
# HEADER SECTION EXAMPLE:
# NUMPT 3
# POINT 1 599 0.0
# POINT 1 603 0.0
# POINT 1 605 0.0
connection_list = np.loadtxt(connection_list_file,
skiprows=1, ndmin=1,
delimiter=',',
usecols=(0, 1, 2, 3),
dtype={'names': ('link_id',
'node_id',
'baseflow',
'rapid_rivid'),
'formats': ('i8', 'i8',
'f4', 'i8')
},
)
out_ts.write("NUMPT {0}\n".format(connection_list.size))
river_idx_list = []
for connection in connection_list:
out_ts.write("POINT {0} {1} {2}\n"
"".format(connection['node_id'],
connection['link_id'],
connection['baseflow'],
),
)
river_idx_list.append(
self.get_river_index(connection['rapid_rivid'])
)
# INFLOW SECTION EXAMPLE:
# NRPDS 54
# INPUT 2002 01 01 00 00 15.551210 12.765090 0.000000
# INPUT 2002 01 02 00 00 15.480830 12.765090 0.000000
# INPUT 2002 01 03 00 00 16.078910 12.765090 0.000000
# ...
qout_df = self.get_qout_index(
river_idx_list,
date_search_start=date_search_start,
date_search_end=date_search_end,
daily=daily,
filter_mode=filter_mode,
as_dataframe=True)
out_ts.write("NRPDS {0}\n".format(len(qout_df.index)))
for index, pd_row in qout_df.iterrows():
date_str = index.strftime("%Y %m %d %H %M")
qout_str = " ".join(["{0:.5f}".format(pd_row[column])
for column in qout_df])
out_ts.write("INPUT {0} {1}\n".format(date_str, qout_str)) | 0.001442 |
def validate(self, corpus, catalogue):
"""Returns True if all of the files labelled in `catalogue`
are up-to-date in the database.
:param corpus: corpus of works
:type corpus: `Corpus`
:param catalogue: catalogue matching filenames to labels
:type catalogue: `Catalogue`
:rtype: `bool`
"""
is_valid = True
for name in catalogue:
count = 0
# It is unfortunate that this creates WitnessText objects
# for each work, since that involves reading the file.
for witness in corpus.get_witnesses(name):
count += 1
name, siglum = witness.get_names()
filename = witness.get_filename()
row = self._conn.execute(constants.SELECT_TEXT_SQL,
[name, siglum]).fetchone()
if row is None:
is_valid = False
self._logger.warning(
'No record (or n-grams) exists for {} in '
'the database'.format(filename))
elif row['checksum'] != witness.get_checksum():
is_valid = False
self._logger.warning(
'{} has changed since its n-grams were '
'added to the database'.format(filename))
if count == 0:
raise FileNotFoundError(
constants.CATALOGUE_WORK_NOT_IN_CORPUS_ERROR.format(
name))
return is_valid | 0.001255 |
def by_identifier_secret(self, request):
"""
Authenticates a client by its identifier and secret (aka password).
:param request: The incoming request
:type request: oauth2.web.Request
:return: The identified client
:rtype: oauth2.datatype.Client
:raises OAuthInvalidError: If the client could not be found, is not
allowed to to use the current grant or
supplied invalid credentials
"""
client_id, client_secret = self.source(request=request)
try:
client = self.client_store.fetch_by_client_id(client_id)
except ClientNotFoundError:
raise OAuthInvalidError(error="invalid_client",
explanation="No client could be found")
grant_type = request.post_param("grant_type")
if client.grant_type_supported(grant_type) is False:
raise OAuthInvalidError(error="unauthorized_client",
explanation="The client is not allowed "
"to use this grant type")
if client.secret != client_secret:
raise OAuthInvalidError(error="invalid_client",
explanation="Invalid client credentials")
return client | 0.001451 |
def to_package(self, repo_url):
"""Return the package representation of this repo."""
return Package(name=self.name, url=repo_url + self.name) | 0.012658 |
def wrap(cls, app):
"""
Adds test live server capability to a Flask app module.
:param app:
A :class:`flask.Flask` app instance.
"""
host, port = cls.parse_args()
ssl = cls._argument_parser.parse_args().ssl
ssl_context = None
if host:
if ssl:
try:
import OpenSSL
except ImportError:
# OSX fix
sys.path.append(
'/System/Library/Frameworks/Python.framework/Versions/'
'{0}.{1}/Extras/lib/python/'
.format(sys.version_info.major, sys.version_info.minor)
)
try:
import OpenSSL
except ImportError:
# Linux fix
sys.path.append(
'/usr/lib/python{0}.{1}/dist-packages/'
.format(sys.version_info.major, sys.version_info.minor)
)
try:
import OpenSSL
except ImportError:
raise LiveAndLetDieError(
'Flask app could not be launched because the pyopenssl '
'library is not installed on your system!'
)
ssl_context = 'adhoc'
app.run(host=host, port=port, ssl_context=ssl_context)
sys.exit() | 0.002665 |
def get_collections(kwdb, libtype="*"):
"""Get list of collections from kwdb, then add urls necessary for hyperlinks"""
collections = kwdb.get_collections(libtype=libtype)
for result in collections:
url = flask.url_for(".doc_for_library", collection_id=result["collection_id"])
result["url"] = url
return collections | 0.008596 |
def run_analysis(self, argv):
""" Build the manifest for all the models
"""
args = self._parser.parse_args(argv)
components = Component.build_from_yamlfile(args.comp)
NAME_FACTORY.update_base_dict(args.data)
model_dict = make_library(**args.__dict__)
model_manager = model_dict['ModelManager']
models = load_yaml(args.models)
data = args.data
hpx_order = args.hpx_order
for modelkey in models:
model_manager.make_srcmap_manifest(modelkey, components, data)
model_manager.make_fermipy_config_yaml(modelkey, components, data,
hpx_order=hpx_order,
irf_ver=NAME_FACTORY.irf_ver()) | 0.003797 |
def sku_update(self, product_id, properties, session, **kwargs):
'''taobao.fenxiao.product.sku.update 产品sku编辑接口
产品SKU信息更新'''
request = TOPRequest('taobao.fenxiao.product.sku.update')
request['product_id'] = product_id
request['properties'] = properties
for k, v in kwargs.iteritems():
if k not in ('quantity', 'standard_price', 'agent_cost_price', 'sku_number', 'dealer_cost_price') and v==None: continue
request[k] = v
self.create(self.execute(request, session), fields=['result','created'], models={'created':TOPDate})
return self | 0.015898 |
def simxSetFloatingParameter(clientID, paramIdentifier, paramValue, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetFloatingParameter(clientID, paramIdentifier, paramValue, operationMode) | 0.014388 |
def _get_range(self, endpoint_name):
""" Returns a Range based on the endpoint name """
url = self.build_url(self._endpoints.get(endpoint_name))
response = self.session.get(url)
if not response:
return None
data = response.json()
return self.range_constructor(parent=self, **{self._cloud_data_key: data}) | 0.008219 |
def get_template(self):
"""
读取一个Excel模板,将此Excel的所有行读出来,并且识别特殊的标记进行记录
:return: 返回读取后的模板,结果类似:
[
{'cols': #各列,与subs不会同时生效
'subs':[ #子模板
{'cols':#各列,
'subs': #子模板
'field': #对应数据中字段名称
},
...
]
'field': #对应数据中字段名称
},
...
]
子模板的判断根据第一列是否为 {{for field}} 来判断,结束使用 {{end}}
"""
rows = []
stack = []
stack.append(rows)
#top用来记录当前栈
top = rows
for i in range(1, self.sheet.max_row+1):
cell = self.sheet.cell(row=i, column=1)
#是否子模板开始
if (isinstance(cell.value, (str, unicode)) and
cell.value.startswith('{{for ') and
cell.value.endswith('}}')):
row = {'field':cell.value[6:-2].strip(), 'cols':[], 'subs':[]}
top.append(row)
top = row['subs']
stack.append(top)
if self.begin == 1:
self.begin = i
#是否子模板结束
elif (isinstance(cell.value, (str, unicode)) and
cell.value == '{{end}}'):
stack.pop()
top = stack[-1]
else:
row = {'cols':[], 'subs':[]}
cols = row['cols']
for j in range(1, self.sheet.max_column+1):
cell = self.sheet.cell(row=i, column=j)
v = self.process_cell(i, j, cell)
if v:
cols.append(v)
if row['cols'] or row['subs']:
top.append(row)
# pprint(rows)
return rows | 0.006121 |
def get_pickle_protocol():
"""
Allow configuration of the pickle protocol on a per-machine basis.
This way, if you use multiple platforms with different versions of
pickle, you can configure each of them to use the highest protocol
supported by all of the machines that you want to be able to
communicate.
"""
try:
protocol_str = os.environ['PYLEARN2_PICKLE_PROTOCOL']
except KeyError:
# If not defined, we default to 0 because this is the default
# protocol used by cPickle.dump (and because it results in
# maximum portability)
protocol_str = '0'
if protocol_str == 'pickle.HIGHEST_PROTOCOL':
return pickle.HIGHEST_PROTOCOL
return int(protocol_str) | 0.001348 |
def build(self):
"""Builds the barcode pattern from `self.ean`.
:returns: The pattern as string
:rtype: String
"""
code = _ean.EDGE[:]
pattern = _ean.LEFT_PATTERN[int(self.ean[0])]
for i, number in enumerate(self.ean[1:7]):
code += _ean.CODES[pattern[i]][int(number)]
code += _ean.MIDDLE
for number in self.ean[7:]:
code += _ean.CODES['C'][int(number)]
code += _ean.EDGE
return [code] | 0.004024 |
def is_gesture(self):
"""Macro to check if this event is
a :class:`~libinput.event.GestureEvent`.
"""
if self in {type(self).GESTURE_SWIPE_BEGIN, type(self).GESTURE_SWIPE_END,
type(self).GESTURE_SWIPE_UPDATE, type(self).GESTURE_PINCH_BEGIN,
type(self).GESTURE_PINCH_UPDATE, type(self).GESTURE_PINCH_END}:
return True
else:
return False | 0.03352 |
def kendall(x, axis=0):
"""Kendall' tau (Rank) Correlation Matrix (for ordinal data)
Parameters
----------
x : ndarray
data set
axis : int, optional
Variables as columns is the default (axis=0). If variables are
in the rows use axis=1
Returns
-------
r : ndarray
Correlation Matrix (Kendall tau)
p : ndarray
p-values
"""
# transpose if axis<>0
if axis is not 0:
x = x.T
# read dimensions and
n, c = x.shape
# check if enough variables provided
if c < 2:
raise Exception(
"Only " + str(c) + " variables provided. Min. 2 required.")
# allocate variables
r = np.ones((c, c))
p = np.zeros((c, c))
# compute each (i,j)-th correlation
for i in range(0, c):
for j in range(i + 1, c):
r[i, j], p[i, j] = scipy.stats.kendalltau(x[:, i], x[:, j])
r[j, i] = r[i, j]
p[j, i] = p[i, j]
# done
return r, p | 0.000996 |
def ensure_vbounds(self, use_margins=None):
"""Ensure the cursor is within vertical screen bounds.
:param bool use_margins: when ``True`` or when
:data:`~pyte.modes.DECOM` is set,
cursor is bounded by top and and bottom
margins, instead of ``[0; lines - 1]``.
"""
if (use_margins or mo.DECOM in self.mode) and self.margins is not None:
top, bottom = self.margins
else:
top, bottom = 0, self.lines - 1
self.cursor.y = min(max(top, self.cursor.y), bottom) | 0.003195 |
def execute(self, command):
"""Execute a DDE command."""
pData = c_char_p(command)
cbData = DWORD(len(command) + 1)
hDdeData = DDE.ClientTransaction(pData, cbData, self._hConv, HSZ(), CF_TEXT, XTYP_EXECUTE, TIMEOUT_ASYNC, LPDWORD())
if not hDdeData:
raise DDEError("Unable to send command", self._idInst)
DDE.FreeDataHandle(hDdeData) | 0.007634 |
def _write_openjpeg(self, img_array, verbose=False):
"""
Write JPEG 2000 file using OpenJPEG 1.5 interface.
"""
if img_array.ndim == 2:
# Force the image to be 3D. Just makes things easier later on.
img_array = img_array.reshape(img_array.shape[0],
img_array.shape[1],
1)
self._populate_comptparms(img_array)
with ExitStack() as stack:
image = opj.image_create(self._comptparms, self._colorspace)
stack.callback(opj.image_destroy, image)
numrows, numcols, numlayers = img_array.shape
# set image offset and reference grid
image.contents.x0 = self._cparams.image_offset_x0
image.contents.y0 = self._cparams.image_offset_y0
image.contents.x1 = (image.contents.x0 +
(numcols - 1) * self._cparams.subsampling_dx +
1)
image.contents.y1 = (image.contents.y0 +
(numrows - 1) * self._cparams.subsampling_dy +
1)
# Stage the image data to the openjpeg data structure.
for k in range(0, numlayers):
layer = np.ascontiguousarray(img_array[:, :, k],
dtype=np.int32)
dest = image.contents.comps[k].data
src = layer.ctypes.data
ctypes.memmove(dest, src, layer.nbytes)
cinfo = opj.create_compress(self._cparams.codec_fmt)
stack.callback(opj.destroy_compress, cinfo)
# Setup the info, warning, and error handlers.
# Always use the warning and error handler. Use of an info
# handler is optional.
event_mgr = opj.EventMgrType()
_info_handler = _INFO_CALLBACK if verbose else None
event_mgr.info_handler = _info_handler
event_mgr.warning_handler = ctypes.cast(_WARNING_CALLBACK,
ctypes.c_void_p)
event_mgr.error_handler = ctypes.cast(_ERROR_CALLBACK,
ctypes.c_void_p)
opj.setup_encoder(cinfo, ctypes.byref(self._cparams), image)
cio = opj.cio_open(cinfo)
stack.callback(opj.cio_close, cio)
if not opj.encode(cinfo, cio, image):
raise IOError("Encode error.")
pos = opj.cio_tell(cio)
blob = ctypes.string_at(cio.contents.buffer, pos)
fptr = open(self.filename, 'wb')
stack.callback(fptr.close)
fptr.write(blob)
self.parse() | 0.000717 |
def medianscore(inlist):
"""
Returns the 'middle' score of the passed list. If there is an even
number of scores, the mean of the 2 middle scores is returned.
Usage: lmedianscore(inlist)
"""
newlist = copy.deepcopy(inlist)
newlist.sort()
if len(newlist) % 2 == 0: # if even number of scores, average middle 2
index = len(newlist) / 2 # integer division correct
median = float(newlist[index] + newlist[index - 1]) / 2
else:
index = len(newlist) / 2 # int divsion gives mid value when count from 0
median = newlist[index]
return median | 0.003344 |
def do_decode(cls, obj, obj_type):
# type: (Any, ConjureTypeType) -> Any
"""Decodes json into the specified type
Args:
obj: the json object to decode
element_type: a class object which is the type we're decoding into.
"""
if inspect.isclass(obj_type) and issubclass( # type: ignore
obj_type, ConjureBeanType
):
return cls.decode_conjure_bean_type(obj, obj_type) # type: ignore
elif inspect.isclass(obj_type) and issubclass( # type: ignore
obj_type, ConjureUnionType
):
return cls.decode_conjure_union_type(obj, obj_type)
elif inspect.isclass(obj_type) and issubclass( # type: ignore
obj_type, ConjureEnumType
):
return cls.decode_conjure_enum_type(obj, obj_type)
elif isinstance(obj_type, DictType):
return cls.decode_dict(obj, obj_type.key_type, obj_type.value_type)
elif isinstance(obj_type, ListType):
return cls.decode_list(obj, obj_type.item_type)
elif isinstance(obj_type, OptionalType):
return cls.decode_optional(obj, obj_type.item_type)
return cls.decode_primitive(obj, obj_type) | 0.00239 |
def cancel(self):
"""
Cancel itself and following NOTs as far as possible.
Returns the simplified expression.
"""
expr = self
while True:
arg = expr.args[0]
if not isinstance(arg, self.__class__):
return expr
expr = arg.args[0]
if not isinstance(expr, self.__class__):
return expr | 0.004902 |
def register_dependency(self, data_src, data_sink):
""" registers a dependency of data_src -> data_sink
by placing appropriate entries in provides_for and depends_on
"""
pdebug("registering dependency %s -> %s" % (data_src, data_sink))
if (data_src not in self._gettask(data_sink).depends_on):
self._gettask(data_sink).depends_on.append(data_src)
if (data_sink not in self._gettask(data_src).provides_for):
self._gettask(data_src).provides_for.append(data_sink) | 0.003704 |
def get_notice(self) -> dict:
"""
取得公布欄訊息列表
"""
try:
# 取得資料
response = self.__session.get(
self.__url + '/MessageBoard', timeout=0.5, verify=False)
soup = BeautifulSoup(response.text, 'html.parser')
# 整理公布欄訊息列表
notices = {}
for tag in soup.find_all('tr'):
# 跳過標題列
if tag.find('a') != None:
title = tag.find('a').get_text().strip()
date = tag.find_all('td')[1].get_text().strip()
notices[date] = title
# 回傳結果
return notices
except requests.exceptions.Timeout:
return {"Timeout": "Timeout"} | 0.004027 |
def snake(s):
"""Convert from title or camelCase to snake_case."""
if len(s) < 2:
return s.lower()
out = s[0].lower()
for c in s[1:]:
if c.isupper():
out += "_"
c = c.lower()
out += c
return out | 0.003817 |
def _load_keras_model(model_network_path, model_weight_path, custom_objects=None):
"""Load a keras model from disk
Parameters
----------
model_network_path: str
Path where the model network path is (json file)
model_weight_path: str
Path where the model network weights are (hd5 file)
custom_objects:
A dictionary of layers or other custom classes
or functions used by the model
Returns
-------
model: A keras model
"""
from keras.models import model_from_json
import json
# Load the model network
json_file = open(model_network_path, 'r')
loaded_model_json = json_file.read()
json_file.close()
if not custom_objects:
custom_objects = {}
# Load the model weights
loaded_model = model_from_json(loaded_model_json, custom_objects=custom_objects)
loaded_model.load_weights(model_weight_path)
return loaded_model | 0.003191 |
def _commit(self):
"""
:return: (dict) Response object content
"""
assert self.uri is not None, Exception("BadArgument: uri property cannot be None")
url = '{}/{}'.format(self.uri, self.__class__.__name__)
serialized_json = jsonpickle.encode(self, unpicklable=False, )
headers = {'Content-Type': 'application/json', 'Content-Length': str(len(serialized_json))}
response = Http.post(url=url, data=serialized_json, headers=headers)
if response.status_code != 200:
from ArubaCloud.base.Errors import MalformedJsonRequest
raise MalformedJsonRequest("Request: {}, Status Code: {}".format(serialized_json, response.status_code))
content = jsonpickle.decode(response.content.decode("utf-8"))
if content['ResultCode'] == 17:
from ArubaCloud.base.Errors import OperationAlreadyEnqueued
raise OperationAlreadyEnqueued("{} already enqueued".format(self.__class__.__name__))
if content['Success'] is False:
from ArubaCloud.base.Errors import RequestFailed
raise RequestFailed("Request: {}, Response: {}".format(serialized_json, response.content))
return content | 0.005714 |
def normalize_rrs(rrsets):
"""Lexically sort the order of every ResourceRecord in a ResourceRecords
element so we don't generate spurious changes: ordering of e.g. NS records
is irrelevant to the DNS line protocol, but XML sees it differently.
Also rewrite any wildcard records to use the ascii hex code: somewhere deep
inside route53 is something that used to look like tinydns, and amazon's
API will always display wildcard records as "\052.example.com".
Args: rrsest: lxml.etree.Element (<ResourceRecordSets>) """
for rrset in rrsets:
if rrset.tag == '{%s}ResourceRecordSet' % R53_XMLNS:
for rrs in rrset:
# preformat wildcard records
if rrs.tag == '{%s}Name' % R53_XMLNS:
if rrs.text.startswith('*.'):
old_text = rrs.text
new_text = '\\052.%s' % old_text[2:]
print 'Found wildcard record, rewriting to %s' % new_text
rrs.text = rrs.text.replace(old_text, new_text)
# sort ResourceRecord elements by Value
if rrs.tag == '{%s}ResourceRecords' % R53_XMLNS:
# 0th value of ResourceRecord is always the Value element
sorted_rrs = sorted(rrs, key=lambda x: x[0].text)
rrs[:] = sorted_rrs
return rrsets | 0.007206 |
def _set_property(self, val, *args):
"""Private method that sets the value currently of the property"""
val = UserClassAdapter._set_property(self, val, *args)
if val:
Adapter._set_property(self, val, *args)
return val | 0.007663 |
def _add_dispatcher(self, path_regex, dispatch_function):
"""Add a request path and dispatch handler.
Args:
path_regex: A string regex, the path to match against incoming requests.
dispatch_function: The function to call for these requests. The function
should take (request, start_response) as arguments and
return the contents of the response body.
"""
self._dispatchers.append((re.compile(path_regex), dispatch_function)) | 0.002128 |
def get_unique_schema_id(schema):
# type: (GraphQLSchema) -> str
"""Get a unique id given a GraphQLSchema"""
assert isinstance(schema, GraphQLSchema), (
"Must receive a GraphQLSchema as schema. Received {}"
).format(repr(schema))
if schema not in _cached_schemas:
_cached_schemas[schema] = sha1(str(schema).encode("utf-8")).hexdigest()
return _cached_schemas[schema] | 0.002457 |
def generate_cmd_string(self, cmd, *args, **kwargs):
"""
for any generate_cmd_string doesn't written as public method of terraform
examples:
1. call import command,
ref to https://www.terraform.io/docs/commands/import.html
--> generate_cmd_string call:
terraform import -input=true aws_instance.foo i-abcd1234
--> python call:
tf.generate_cmd_string('import', 'aws_instance.foo', 'i-abcd1234', input=True)
2. call apply command,
--> generate_cmd_string call:
terraform apply -var='a=b' -var='c=d' -no-color the_folder
--> python call:
tf.generate_cmd_string('apply', the_folder, no_color=IsFlagged, var={'a':'b', 'c':'d'})
:param cmd: command and sub-command of terraform, seperated with space
refer to https://www.terraform.io/docs/commands/index.html
:param args: arguments of a command
:param kwargs: same as kwags in method 'cmd'
:return: string of valid terraform command
"""
cmds = cmd.split()
cmds = [self.terraform_bin_path] + cmds
for option, value in kwargs.items():
if '_' in option:
option = option.replace('_', '-')
if type(value) is list:
for sub_v in value:
cmds += ['-{k}={v}'.format(k=option, v=sub_v)]
continue
if type(value) is dict:
if 'backend-config' in option:
for bk, bv in value.items():
cmds += ['-backend-config={k}={v}'.format(k=bk, v=bv)]
continue
# since map type sent in string won't work, create temp var file for
# variables, and clean it up later
else:
filename = self.temp_var_files.create(value)
cmds += ['-var-file={0}'.format(filename)]
continue
# simple flag,
if value is IsFlagged:
cmds += ['-{k}'.format(k=option)]
continue
if value is None or value is IsNotFlagged:
continue
if type(value) is bool:
value = 'true' if value else 'false'
cmds += ['-{k}={v}'.format(k=option, v=value)]
cmds += args
return cmds | 0.002478 |
def add_line(self, start, end, color=(0.5, 0.5, 0.5), width=1):
"""
Adds a line.
Args:
start: Starting coordinates for line.
end: Ending coordinates for line.
color: Color for text as RGB. Defaults to grey.
width: Width of line. Defaults to 1.
"""
source = vtk.vtkLineSource()
source.SetPoint1(start)
source.SetPoint2(end)
vertexIDs = vtk.vtkStringArray()
vertexIDs.SetNumberOfComponents(1)
vertexIDs.SetName("VertexIDs")
# Set the vertex labels
vertexIDs.InsertNextValue("a")
vertexIDs.InsertNextValue("b")
source.GetOutput().GetPointData().AddArray(vertexIDs)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(color)
actor.GetProperty().SetLineWidth(width)
self.ren.AddActor(actor) | 0.001978 |
def combine(self, pattern, variable):
"""Combine a pattern and variable parts to be a line string again."""
inter_zip = izip_longest(variable, pattern, fillvalue='')
interleaved = [elt for pair in inter_zip for elt in pair]
return ''.join(interleaved) | 0.007067 |
def shut_down():
"""Closes connection and restores terminal"""
curses.nocbreak()
curses.echo()
curses.endwin()
gpsd_socket.close()
print('Keyboard interrupt received\nTerminated by user\nGood Bye.\n')
sys.exit(1) | 0.004167 |
def factorize(self, niter=10, compute_w=True, compute_h=True,
compute_err=True, show_progress=False):
""" Factorize s.t. WH = data
Parameters
----------
niter : int
number of iterations.
show_progress : bool
print some extra information to stdout.
compute_h : bool
iteratively update values for H.
compute_w : bool
iteratively update values for W.
compute_err : bool
compute Frobenius norm |data-WH| after each update and store
it to .ferr[k].
Updated Values
--------------
.W : updated values for W.
.H : updated values for H.
.ferr : Frobenius norm |data-WH| for each iteration.
"""
if not hasattr(self,'W'):
self.init_w()
if not hasattr(self,'H'):
self.init_h()
def separate_positive(m):
return (np.abs(m) + m)/2.0
def separate_negative(m):
return (np.abs(m) - m)/2.0
if show_progress:
self._logger.setLevel(logging.INFO)
else:
self._logger.setLevel(logging.ERROR)
XtX = np.dot(self.data[:,:].T, self.data[:,:])
XtX_pos = separate_positive(XtX)
XtX_neg = separate_negative(XtX)
self.ferr = np.zeros(niter)
# iterate over W and H
for i in range(niter):
# update H
XtX_neg_x_W = np.dot(XtX_neg, self.G)
XtX_pos_x_W = np.dot(XtX_pos, self.G)
if compute_h:
H_x_WT = np.dot(self.H.T, self.G.T)
ha = XtX_pos_x_W + np.dot(H_x_WT, XtX_neg_x_W)
hb = XtX_neg_x_W + np.dot(H_x_WT, XtX_pos_x_W) + 10**-9
self.H = (self.H.T*np.sqrt(ha/hb)).T
# update W
if compute_w:
HT_x_H = np.dot(self.H, self.H.T)
wa = np.dot(XtX_pos, self.H.T) + np.dot(XtX_neg_x_W, HT_x_H)
wb = np.dot(XtX_neg, self.H.T) + np.dot(XtX_pos_x_W, HT_x_H) + 10**-9
self.G *= np.sqrt(wa/wb)
self.W = np.dot(self.data[:,:], self.G)
if compute_err:
self.ferr[i] = self.frobenius_norm()
self._logger.info('Iteration ' + str(i+1) + '/' + str(niter) +
' FN:' + str(self.ferr[i]))
else:
self._logger.info('Iteration ' + str(i+1) + '/' + str(niter))
if i > 1 and compute_err:
if self.converged(i):
self.ferr = self.ferr[:i]
break | 0.005113 |
def find_and_replace(self, node):
"""Parses URIs containing .md and replaces them with their HTML page.
Args:
node(node): docutils node.
Returns:
node: docutils node.
"""
if isinstance(node, nodes.reference) and 'refuri' in node:
reference_uri = node['refuri']
if reference_uri.endswith('.md') and not reference_uri.startswith('http'):
reference_uri = reference_uri[:-3] + '.html'
node['refuri'] = reference_uri
else:
match = self.ANCHOR_REGEX.match(reference_uri)
if match:
node['refuri'] = '{0:s}.html#{1:s}'.format(
match.group('uri'), match.group('anchor'))
return node | 0.008734 |
def text_dict_write(fpath, dict_):
"""
Very naive, but readable way of storing a dictionary on disk
FIXME: This broke on RoseMary's big dataset. Not sure why. It gave bad
syntax. And the SyntaxError did not seem to be excepted.
"""
#dict_ = text_dict_read(fpath)
#dict_[key] = val
dict_text2 = util_str.repr4(dict_, strvals=False)
if VERBOSE:
print('[cache] ' + str(dict_text2))
util_io.write_to(fpath, dict_text2) | 0.006494 |
def get_plugin_modules(folders, package='plugins',
parentpackage='linkcheck.dummy'):
"""Get plugin modules for given folders."""
for folder in folders:
for module in loader.get_folder_modules(folder, parentpackage):
yield module
for module in loader.get_package_modules(package):
yield module | 0.002817 |
def show_instance(name, call=None):
'''
List the a single node, return dict of grains.
'''
local = salt.client.LocalClient()
ret = local.cmd(name, 'grains.items')
ret.update(_build_required_items(ret))
return ret | 0.004167 |
def limit_disk_io(self, uuid, media, totalbytessecset=False, totalbytessec=0, readbytessecset=False, readbytessec=0, writebytessecset=False,
writebytessec=0, totaliopssecset=False, totaliopssec=0, readiopssecset=False, readiopssec=0, writeiopssecset=False, writeiopssec=0,
totalbytessecmaxset=False, totalbytessecmax=0, readbytessecmaxset=False, readbytessecmax=0, writebytessecmaxset=False, writebytessecmax=0,
totaliopssecmaxset=False, totaliopssecmax=0, readiopssecmaxset=False, readiopssecmax=0, writeiopssecmaxset=False, writeiopssecmax=0,
totalbytessecmaxlengthset=False, totalbytessecmaxlength=0, readbytessecmaxlengthset=False, readbytessecmaxlength=0,
writebytessecmaxlengthset=False, writebytessecmaxlength=0, totaliopssecmaxlengthset=False, totaliopssecmaxlength=0,
readiopssecmaxlengthset=False, readiopssecmaxlength=0, writeiopssecmaxlengthset=False, writeiopssecmaxlength=0, sizeiopssecset=False,
sizeiopssec=0, groupnameset=False, groupname=''):
"""
Remove a nic from a machine
:param uuid: uuid of the kvm container (same as the used in create)
:param media: the media to limit the diskio
:return:
"""
args = {
'uuid': uuid,
'media': media,
'totalbytessecset': totalbytessecset,
'totalbytessec': totalbytessec,
'readbytessecset': readbytessecset,
'readbytessec': readbytessec,
'writebytessecset': writebytessecset,
'writebytessec': writebytessec,
'totaliopssecset': totaliopssecset,
'totaliopssec': totaliopssec,
'readiopssecset': readiopssecset,
'readiopssec': readiopssec,
'writeiopssecset': writeiopssecset,
'writeiopssec': writeiopssec,
'totalbytessecmaxset': totalbytessecmaxset,
'totalbytessecmax': totalbytessecmax,
'readbytessecmaxset': readbytessecmaxset,
'readbytessecmax': readbytessecmax,
'writebytessecmaxset': writebytessecmaxset,
'writebytessecmax': writebytessecmax,
'totaliopssecmaxset': totaliopssecmaxset,
'totaliopssecmax': totaliopssecmax,
'readiopssecmaxset': readiopssecmaxset,
'readiopssecmax': readiopssecmax,
'writeiopssecmaxset': writeiopssecmaxset,
'writeiopssecmax': writeiopssecmax,
'totalbytessecmaxlengthset': totalbytessecmaxlengthset,
'totalbytessecmaxlength': totalbytessecmaxlength,
'readbytessecmaxlengthset': readbytessecmaxlengthset,
'readbytessecmaxlength': readbytessecmaxlength,
'writebytessecmaxlengthset': writebytessecmaxlengthset,
'writebytessecmaxlength': writebytessecmaxlength,
'totaliopssecmaxlengthset': totaliopssecmaxlengthset,
'totaliopssecmaxlength': totaliopssecmaxlength,
'readiopssecmaxlengthset': readiopssecmaxlengthset,
'readiopssecmaxlength': readiopssecmaxlength,
'writeiopssecmaxlengthset': writeiopssecmaxlengthset,
'writeiopssecmaxlength': writeiopssecmaxlength,
'sizeiopssecset': sizeiopssecset,
'sizeiopssec': sizeiopssec,
'groupnameset': groupnameset,
'groupname': groupname,
}
self._limit_disk_io_action_chk.check(args)
self._client.sync('kvm.limit_disk_io', args) | 0.004458 |
def get_error(response):
"""Gets Error by HTTP Status Code"""
errors = {
400: BadRequestError,
401: UnauthorizedError,
403: AccessDeniedError,
404: NotFoundError,
429: RateLimitExceededError,
500: ServerError,
502: BadGatewayError,
503: ServiceUnavailableError
}
error_class = HTTPError
if response.status_code in errors:
error_class = errors[response.status_code]
return error_class(response) | 0.002033 |
def get_all_permissionschemes(self, expand=None):
"""
Returns a list of all permission schemes.
By default only shortened beans are returned.
If you want to include permissions of all the schemes,
then specify the permissions expand parameter.
Permissions will be included also if you specify any other expand parameter.
:param expand : permissions,user,group,projectRole,field,all
:return:
"""
url = 'rest/api/2/permissionscheme'
params = {}
if expand:
params['expand'] = expand
return (self.get(url, params=params) or {}).get('permissionSchemes') | 0.004518 |
def update(old_template=None, old_version=None, new_template=None, new_version=None,
enter_parameters=False):
"""Updates the temple project to the latest template
Proceeeds in the following steps:
1. Ensure we are inside the project repository
2. Obtain the latest version of the package template
3. If the package is up to date with the latest template, return
4. If not, create an empty template branch with a new copy of the old template
5. Create an update branch from HEAD and merge in the new template copy
6. Create a new copy of the new template and merge into the empty template branch
7. Merge the updated empty template branch into the update branch
8. Ensure temple.yaml reflects what is in the template branch
9. Remove the empty template branch
Note that the `temple.constants.TEMPLE_ENV_VAR` is set to 'update' for the
duration of this function.
Two branches will be created during the update process, one named
``_temple_update`` and one named ``_temple_update_temp``. At the end of
the process, ``_temple_update_temp`` will be removed automatically. The
work will be left in ``_temple_update`` in an uncommitted state for
review. The update will fail early if either of these branches exist
before the process starts.
Args:
old_template (str, default=None): The old template from which to update. Defaults
to the template in temple.yaml
old_version (str, default=None): The old version of the template. Defaults to
the version in temple.yaml
new_template (str, default=None): The new template for updating. Defaults to the
template in temple.yaml
new_version (str, default=None): The new version of the new template to update.
Defaults to the latest version of the new template
enter_parameters (bool, default=False): Force entering template parameters for the project
Raises:
`NotInGitRepoError`: When not inside of a git repository
`InvalidTempleProjectError`: When not inside a valid temple repository
`InDirtyRepoError`: When an update is triggered while the repo is in a dirty state
`ExistingBranchError`: When an update is triggered and there is an existing
update branch
Returns:
boolean: True if update was performed or False if template was already up to date
"""
update_branch = temple.constants.UPDATE_BRANCH_NAME
temp_update_branch = temple.constants.TEMP_UPDATE_BRANCH_NAME
temple.check.in_git_repo()
temple.check.in_clean_repo()
temple.check.is_temple_project()
temple.check.not_has_branch(update_branch)
temple.check.not_has_branch(temp_update_branch)
temple.check.has_env_vars(temple.constants.GITHUB_API_TOKEN_ENV_VAR)
temple_config = temple.utils.read_temple_config()
old_template = old_template or temple_config['_template']
new_template = new_template or temple_config['_template']
old_version = old_version or temple_config['_version']
new_version = new_version or _get_latest_template_version(new_template)
if new_template == old_template and new_version == old_version and not enter_parameters:
print('No updates have happened to the template, so no files were updated')
return False
print('Creating branch {} for processing the update'.format(update_branch))
temple.utils.shell('git checkout -b {}'.format(update_branch),
stderr=subprocess.DEVNULL)
print('Creating temporary working branch {}'.format(temp_update_branch))
temple.utils.shell('git checkout --orphan {}'.format(temp_update_branch),
stderr=subprocess.DEVNULL)
temple.utils.shell('git rm -rf .',
stdout=subprocess.DEVNULL)
_apply_template(old_template,
'.',
checkout=old_version,
extra_context=temple_config)
temple.utils.shell('git add .')
temple.utils.shell(
'git commit --no-verify -m "Initialize template from version {}"'.format(old_version),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
print('Merge old template history into update branch.')
temple.utils.shell('git checkout {}'.format(update_branch),
stderr=subprocess.DEVNULL)
temple.utils.shell(
'git merge -s ours --no-edit --allow-unrelated-histories {}'.format(temp_update_branch),
stderr=subprocess.DEVNULL)
print('Update template in temporary branch.')
temple.utils.shell('git checkout {}'.format(temp_update_branch),
stderr=subprocess.DEVNULL)
temple.utils.shell('git rm -rf .',
stdout=subprocess.DEVNULL)
# If the cookiecutter.json files have changed or the templates have changed,
# the user will need to re-enter the cookiecutter config
needs_new_cc_config = _needs_new_cc_config_for_update(old_template, old_version,
new_template, new_version)
if needs_new_cc_config:
if old_template != new_template:
cc_config_input_msg = (
'You will be prompted for the parameters of the new template.'
' Please read the docs at https://github.com/{} before entering parameters.'
' Press enter to continue'
).format(temple.utils.get_repo_path(new_template))
else:
cc_config_input_msg = (
'A new template variable has been defined in the updated template.'
' You will be prompted to enter all of the variables again. Variables'
' already configured in your project will have their values set as'
' defaults. Press enter to continue'
)
input(cc_config_input_msg)
# Even if there is no detected need to re-enter the cookiecutter config, the user
# can still re-enter config parameters with the "enter_parameters" flag
if needs_new_cc_config or enter_parameters:
_, temple_config = (
temple.utils.get_cookiecutter_config(new_template,
default_config=temple_config,
version=new_version))
_apply_template(new_template,
'.',
checkout=new_version,
extra_context=temple_config)
temple.utils.write_temple_config(temple_config, new_template, new_version)
temple.utils.shell('git add .')
temple.utils.shell(
'git commit --no-verify -m "Update template to version {}"'.format(new_version),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
print('Merge updated template into update branch.')
temple.utils.shell('git checkout {}'.format(update_branch),
stderr=subprocess.DEVNULL)
temple.utils.shell('git merge --no-commit {}'.format(temp_update_branch),
check=False,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
# The temple.yaml file should always reflect what is in the new template
temple.utils.shell('git checkout --theirs {}'.format(temple.constants.TEMPLE_CONFIG_FILE),
check=False,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
print('Remove temporary template branch {}'.format(temp_update_branch))
temple.utils.shell('git branch -D {}'.format(temp_update_branch),
stdout=subprocess.DEVNULL)
print(textwrap.dedent("""\
Updating complete!
Please review the changes with "git status" for any errors or
conflicts. Once you are satisfied with the changes, add, commit,
push, and open a PR with the branch {}
""").format(update_branch))
return True | 0.00325 |
def convert(self, *args, **kwargs):
""" attempt to coerce any object types to better types return a copy of
the block (if copy = True) by definition we ARE an ObjectBlock!!!!!
can return multiple blocks!
"""
if args:
raise NotImplementedError
by_item = kwargs.get('by_item', True)
new_inputs = ['coerce', 'datetime', 'numeric', 'timedelta']
new_style = False
for kw in new_inputs:
new_style |= kw in kwargs
if new_style:
fn = soft_convert_objects
fn_inputs = new_inputs
else:
fn = maybe_convert_objects
fn_inputs = ['convert_dates', 'convert_numeric',
'convert_timedeltas']
fn_inputs += ['copy']
fn_kwargs = {key: kwargs[key] for key in fn_inputs if key in kwargs}
# operate column-by-column
def f(m, v, i):
shape = v.shape
values = fn(v.ravel(), **fn_kwargs)
try:
values = values.reshape(shape)
values = _block_shape(values, ndim=self.ndim)
except (AttributeError, NotImplementedError):
pass
return values
if by_item and not self._is_single_block:
blocks = self.split_and_operate(None, f, False)
else:
values = f(None, self.values.ravel(), None)
blocks = [make_block(values, ndim=self.ndim,
placement=self.mgr_locs)]
return blocks | 0.001284 |
def after_epoch(self, **_) -> None:
"""
Reset progress counters. Save ``total_batch_count`` after the 1st epoch.
"""
if not self._total_batch_count_saved:
self._total_batch_count = self._current_batch_count.copy()
self._total_batch_count_saved = True
self._current_batch_count.clear()
self._current_stream_start = None
self._current_stream_name = None
erase_line() | 0.006637 |
def _catch_exceptions(self, exctype, value, tb):
"""Catches all exceptions and logs them."""
# Now we log it.
self.error('Uncaught exception', exc_info=(exctype, value, tb))
# First, we print to stdout with some colouring.
print_exception_formatted(exctype, value, tb) | 0.006452 |
def assert_not_in(obj, seq, message=None, extra=None):
"""Raises an AssertionError if obj is in iter."""
# for very long strings, provide a truncated error
if isinstance(seq, six.string_types) and obj in seq and len(seq) > 200:
index = seq.find(obj)
start_index = index - 50
if start_index > 0:
truncated = "(truncated) ..."
else:
truncated = ""
start_index = 0
end_index = index + len(obj) + 50
truncated += seq[start_index:end_index]
if end_index < len(seq):
truncated += "... (truncated)"
assert False, _assert_fail_message(message, obj, truncated, "is in", extra)
assert obj not in seq, _assert_fail_message(message, obj, seq, "is in", extra) | 0.003876 |
def d(self, xi):
"""Convenience function to compute first derivative of basis functions. 'Memoized' for speed."""
return self.__basis(xi, self.p, compute_derivatives=True) | 0.016043 |
def set_property(self, name, value):
"""
Helper to set a property value by name, translating to correct
dbus type
See also :py:meth:`get_property`
:param str name: The property name in the object's dictionary
whose value shall be set.
:param value: Properties new value to be assigned.
:return:
:raises KeyError: if the property key is not found in the
object's dictionary
:raises dbus.Exception: org.bluez.Error.DoesNotExist
:raises dbus.Exception: org.bluez.Error.InvalidArguments
"""
typeof = type(self.get_property(name))
self._interface.SetProperty(name,
translate_to_dbus_type(typeof, value)) | 0.002618 |
def sample(self, sample_size=20):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
kwargs: sample_size (Integer) between 0 and 100.
return: iterable object of Works metadata
Example:
>>> from crossref.restful import Works
>>> works = Works()
>>> works.sample(2).url
'https://api.crossref.org/works?sample=2'
>>> [i['title'] for i in works.sample(2)]
[['A study on the hemolytic properties ofPrevotella nigrescens'],
['The geometry and the radial breathing mode of carbon nanotubes: beyond the ideal behaviour']]
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT, context)
request_params = dict(self.request_params)
try:
if sample_size > 100:
raise UrlSyntaxError(
'Integer specified as %s but must be a positive integer less than or equal to 100.' % str(sample_size)
)
except TypeError:
raise UrlSyntaxError(
'Integer specified as %s but must be a positive integer less than or equal to 100.' % str(sample_size)
)
request_params['sample'] = sample_size
return self.__class__(request_url, request_params, context, self.etiquette) | 0.004079 |
def watch_thread(self):
'''watch for menu events from child'''
from mp_settings import MPSetting
try:
while True:
msg = self.parent_pipe_recv.recv()
if self.menu_callback is not None:
self.menu_callback(msg)
time.sleep(0.1)
except EOFError:
pass | 0.00542 |
def outlay(self, q):
"""
Determines the complete cash outlay (including commission) necessary
given a quantity q.
Second returning parameter is a commission itself.
Args:
* q (float): quantity
"""
fee = self.commission(q, self._price * self.multiplier)
outlay = q * self._price * self.multiplier
return outlay + fee, outlay, fee | 0.004831 |
def create(cls, name, certificate):
"""
Create a TLS CA. The certificate must be compatible with OpenSSL
and be in PEM format. The certificate can be either a file with
the Root CA, or a raw string starting with BEGIN CERTIFICATE, etc.
When creating a TLS CA, you must also import the CA certificate. Once
the CA is created, it is possible to import a different certificate to
map to the CA if necessary.
:param str name: name of root CA
:param str,file certificate: The root CA contents
:raises CreateElementFailed: failed to create the root CA
:raises ValueError: if loading from file and no certificates present
:raises IOError: cannot find specified file for certificate
:rtype: TLSCertificateAuthority
"""
json = {'name': name,
'certificate': certificate if pem_as_string(certificate) else \
load_cert_chain(certificate)[0][1].decode('utf-8')}
return ElementCreator(cls, json) | 0.005644 |
def get_targets(self, config):
""" Given an Entry object, return all of the outgoing links. """
return {urllib.parse.urljoin(self.url, attrs['href'])
for attrs in self._targets
if self._check_rel(attrs, config.rel_whitelist, config.rel_blacklist)
and self._domain_differs(attrs['href'])} | 0.008523 |
def trace2array(self, sl):
"""Return an array with the trace of all stochastics, sliced by sl."""
chain = []
for stochastic in self.stochastics:
tr = stochastic.trace.gettrace(slicing=sl)
if tr is None:
raise AttributeError
chain.append(tr)
return np.hstack(chain) | 0.005747 |
def hailstone(n):
"""Return the 'hailstone sequence' from n to 1
n: The starting point of the hailstone sequence
"""
sequence = [n]
while n > 1:
if n%2 != 0:
n = 3*n + 1
else:
n = int(n/2)
sequence.append(n)
return sequence | 0.034091 |
def sigmask(self, sigsetsize=None):
"""
Gets the current sigmask. If it's blank, a new one is created (of sigsetsize).
:param sigsetsize: the size (in *bytes* of the sigmask set)
:return: the sigmask
"""
if self._sigmask is None:
if sigsetsize is not None:
sc = self.state.solver.eval(sigsetsize)
self.state.add_constraints(sc == sigsetsize)
self._sigmask = self.state.solver.BVS('initial_sigmask', sc*self.state.arch.byte_width, key=('initial_sigmask',), eternal=True)
else:
self._sigmask = self.state.solver.BVS('initial_sigmask', self.sigmask_bits, key=('initial_sigmask',), eternal=True)
return self._sigmask | 0.006605 |
def find_type(self, txt):
"""
top level function used to simply return the
ONE ACTUAL string used for data types
"""
searchString = txt.upper()
match = 'Unknown'
for i in self.lst_type:
if searchString in i:
match = i
return match | 0.009288 |
def get_requested_aosp_permissions(self):
"""
Returns requested permissions declared within AOSP project.
This includes several other permissions as well, which are in the platform apps.
:rtype: list of str
"""
aosp_permissions = []
all_permissions = self.get_permissions()
for perm in all_permissions:
if perm in list(self.permission_module.keys()):
aosp_permissions.append(perm)
return aosp_permissions | 0.005929 |
def isPrefixOf(self, other):
"""Indicate if this |ASN.1| object is a prefix of other |ASN.1| object.
Parameters
----------
other: |ASN.1| object
|ASN.1| object
Returns
-------
: :class:`bool`
:class:`True` if this |ASN.1| object is a parent (e.g. prefix) of the other |ASN.1| object
or :class:`False` otherwise.
"""
l = len(self)
if l <= len(other):
if self._value[:l] == other[:l]:
return True
return False | 0.008913 |
def overlay_gateway_ip_interface_loopback_loopback_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
ip = ET.SubElement(overlay_gateway, "ip")
interface = ET.SubElement(ip, "interface")
loopback = ET.SubElement(interface, "loopback")
loopback_id = ET.SubElement(loopback, "loopback-id")
loopback_id.text = kwargs.pop('loopback_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.004127 |
def process_ssh(self, data, name):
"""
Processes SSH keys
:param data:
:param name:
:return:
"""
if data is None or len(data) == 0:
return
ret = []
try:
lines = [x.strip() for x in data.split(b'\n')]
for idx, line in enumerate(lines):
ret.append(self.process_ssh_line(line, name, idx))
except Exception as e:
logger.debug('Exception in processing SSH public key %s : %s' % (name, e))
self.trace_logger.log(e)
return ret | 0.005128 |
def score_small_straight_yahztee(dice: List[int]) -> int:
"""
Small straight scoring according to regular yahtzee rules
"""
global CONSTANT_SCORES_YAHTZEE
dice_set = set(dice)
if _are_two_sets_equal({1, 2, 3, 4}, dice_set) or \
_are_two_sets_equal({2, 3, 4, 5}, dice_set) or \
_are_two_sets_equal({3, 4, 5, 6}, dice_set):
return CONSTANT_SCORES_YAHTZEE[Category.SMALL_STRAIGHT]
return 0 | 0.002242 |
def _get_remote_settle_modes(pn_link):
"""Return a map containing the settle modes as provided by the remote.
Skip any default value.
"""
modes = {}
snd = pn_link.remote_snd_settle_mode
if snd == proton.Link.SND_UNSETTLED:
modes['snd-settle-mode'] = 'unsettled'
elif snd == proton.Link.SND_SETTLED:
modes['snd-settle-mode'] = 'settled'
if pn_link.remote_rcv_settle_mode == proton.Link.RCV_SECOND:
modes['rcv-settle-mode'] = 'second'
return modes | 0.00198 |
def split_id_otp(from_key):
"""
Separate public id from OTP given a YubiKey OTP as input.
@param from_key: The OTP from a YubiKey (in modhex)
@type from_key: string
@returns: public_id and OTP
@rtype: tuple of string
"""
if len(from_key) > 32:
public_id, otp = from_key[:-32], from_key[-32:]
elif len(from_key) == 32:
public_id = ''
otp = from_key
else:
raise pyhsm.exception.YHSM_Error("Bad from_key length %i < 32 : %s" \
% (len(from_key), from_key))
return public_id, otp | 0.005076 |
def padded_cross_entropy_factored(factored_logits,
labels,
label_smoothing,
weights_fn=weights_nonzero,
reduce_sum=True):
"""Memory-efficient computation of smoothing cross-entropy.
Avoids realizing the entire logits matrix at once.
Args:
factored_logits: a `FactoredTensor` representing a Tensor
with shape `[batch, timesteps, vocab_size]`.
labels: an integer `Tensor` with shape `[batch, timesteps]`.
label_smoothing: a floating point `Scalar`.
weights_fn: A function from labels to weights.
reduce_sum: a Boolean, whether to sum at the end or not.
Returns:
loss_numerator: a `Scalar`. Sum of losses.
loss_denominator: a `Scalar. The number of non-padding target tokens.
"""
a = factored_logits.a
b = factored_logits.b
confidence = 1.0 - label_smoothing
with tf.name_scope("padded_cross_entropy_factored", values=[a, b, labels]):
labels_flat = tf.reshape(labels, [-1])
a_flat = tf.reshape(a, [-1, shape_list(b)[1]])
xent = smoothing_cross_entropy_factored(a_flat, b, labels_flat,
tf.convert_to_tensor(confidence))
xent = tf.reshape(xent, shape_list(labels))
weights = weights_fn(labels)
if not reduce_sum:
return xent * weights, weights
return tf.reduce_sum(xent * weights), tf.reduce_sum(weights) | 0.004759 |
def __flush_buffer(self):
"""Flush the buffer contents out to a chunk.
"""
def ok(_):
self._buffer.close()
self._buffer = StringIO()
return self.__flush_data(self._buffer.getvalue()).addCallback(ok) | 0.007874 |
def _process_skip_checks(cls, skip_checks):
"""
Processes an iterable of skip_checks with strings and returns a dict
with <check_name>: <max_skip_level> pairs
"""
check_dict = defaultdict(lambda: None)
# A is for "all", "M" is for medium, "L" is for low
check_lookup = {'A': BaseCheck.HIGH,
'M': BaseCheck.MEDIUM,
'L': BaseCheck.LOW}
for skip_check_spec in skip_checks:
split_check_spec = skip_check_spec.split(':')
check_name = split_check_spec[0]
if len(split_check_spec) < 2:
check_max_level = BaseCheck.HIGH
else:
try:
check_max_level = check_lookup[split_check_spec[1]]
except KeyError:
warnings.warn("Skip specifier '{}' on check '{}' not found,"
" defaulting to skip entire check".format(split_check_spec[1], check_name))
check_max_level = BaseCheck.HIGH
check_dict[check_name] = check_max_level
return check_dict | 0.004367 |
def _reset_internal(self):
"""Resets the pose of the arm and grippers."""
super()._reset_internal()
self.sim.data.qpos[self._ref_joint_pos_indexes] = self.mujoco_robot.init_qpos
if self.has_gripper_right:
self.sim.data.qpos[
self._ref_joint_gripper_right_actuator_indexes
] = self.gripper_right.init_qpos
if self.has_gripper_left:
self.sim.data.qpos[
self._ref_joint_gripper_left_actuator_indexes
] = self.gripper_left.init_qpos | 0.005455 |
def search_text(self, text_cursor, search_txt, search_flags):
"""
Searches a text in a text document.
:param text_cursor: Current text cursor
:param search_txt: Text to search
:param search_flags: QTextDocument.FindFlags
:returns: the list of occurrences, the current occurrence index
:rtype: tuple([], int)
"""
def compare_cursors(cursor_a, cursor_b):
"""
Compares two QTextCursor
:param cursor_a: cursor a
:param cursor_b: cursor b
:returns; True if both cursor are identical (same position, same
selection)
"""
return (cursor_b.selectionStart() >= cursor_a.selectionStart() and
cursor_b.selectionEnd() <= cursor_a.selectionEnd())
text_document = self._editor.document()
occurrences = []
index = -1
cursor = text_document.find(search_txt, 0, search_flags)
original_cursor = text_cursor
while not cursor.isNull():
if compare_cursors(cursor, original_cursor):
index = len(occurrences)
occurrences.append((cursor.selectionStart(),
cursor.selectionEnd()))
cursor.setPosition(cursor.position() + 1)
cursor = text_document.find(search_txt, cursor, search_flags)
return occurrences, index | 0.001397 |
def _NotifyLegacy(username, notification_type, message, object_reference):
"""Schedules a legacy AFF4 user notification."""
try:
with aff4.FACTORY.Open(
aff4.ROOT_URN.Add("users").Add(username),
aff4_type=aff4_users.GRRUser,
mode="rw") as fd:
args = _MapLegacyArgs(notification_type, message, object_reference)
args[0] += ":%s" % notification_type
fd.Notify(*args)
except aff4.InstantiationError:
logging.error("Trying to notify non-existent user: %s", username) | 0.015355 |
def debug(method):
"""Decorator to debug the given method"""
def new_method(*args, **kwargs):
import pdb
try:
import pudb
except ImportError:
pudb = pdb
try:
pudb.runcall(method, *args, **kwargs)
except pdb.bdb.BdbQuit:
sys.exit('Normal quit from debugger')
new_method.__doc__ = method.__doc__
new_method.__name__ = 'debug(%s)' % method.__name__
return new_method | 0.002123 |
def mdot_t(self,ifig=None,lims=[7.4,2.6,-8.5,-4.5],label=None,colour=None,s2ms=False,
dashes=None):
"""
Plot mass loss history as a function of log-time-left
Parameters
----------
ifig : integer or string
Figure label, if None the current figure is used
The default value is None.
lims : list [x_lower, x_upper, y_lower, y_upper]
label : string
Label for the model
The default value is None
colour : string
The colour of the line
The default value is None
s2ms : boolean, optional
"skip to main sequence"
dashes : list, optional
Custom dashing style. If None, ignore.
The default is None.
"""
fsize=18
params = {'axes.labelsize': fsize,
# 'font.family': 'serif',
'font.family': 'Times New Roman',
'figure.facecolor': 'white',
'text.fontsize': fsize,
'legend.fontsize': fsize,
'xtick.labelsize': fsize*0.8,
'ytick.labelsize': fsize*0.8,
'text.usetex': False}
try:
pl.rcParams.update(params)
except:
pass
if ifig is not None:
pl.figure(ifig)
if s2ms:
h1=self.get('center_h1')
idx=np.where(h1[0]-h1>=3.e-3)[0][0]
skip=idx
else:
skip=0
gage= self.get('star_age')
lage=np.zeros(len(gage))
agemin = max(old_div(abs(gage[-1]-gage[-2]),5.),1.e-10)
for i in np.arange(len(gage)):
if gage[-1]-gage[i]>agemin:
lage[i]=np.log10(gage[-1]-gage[i]+agemin)
else :
lage[i]=np.log10(agemin)
x = lage[skip:]
y = self.get('log_abs_mdot')[skip:]
if ifig is not None:
pl.figure(ifig)
if label is not None:
if colour is not None:
line,=pl.plot(x,y,label=label,color=colour)
else:
line,=pl.plot(x,y,label=label)
else:
if colour is not None:
line,=pl.plot(x,y,color=colour)
else:
line,=pl.plot(x,y)
if dashes is not None:
line.set_dashes(dashes)
if label is not None:
pl.legend(loc='best').draw_frame(False)
pl.xlim(lims[:2])
pl.ylim(lims[2:])
pl.ylabel('$\mathrm{log}_{10}(\|\dot{M}\|/M_\odot\,\mathrm{yr}^{-1})$')
pl.xlabel('$\mathrm{log}_{10}(t^*/\mathrm{yr})$') | 0.022866 |
def get_settings(all,key):
"""View Hitman internal settings. Use 'all' for all keys"""
with Database("settings") as s:
if all:
for k, v in zip(list(s.keys()), list(s.values())):
print("{} = {}".format(k, v))
elif key:
print("{} = {}".format(key, s[key]))
else:
print("Don't know what you want? Try --all") | 0.005141 |
def stack_decoders(self, *layers):
"""
Stack decoding layers.
"""
self.stack(*layers)
self.decoding_layers.extend(layers) | 0.012422 |
def remove_run_script(self, script, target_name=None):
"""
Removes the given script string from the given target
:param script: The script string to be removed from the target
:param target_name: Target name or list of target names to remove the run script from or None for every target
:return:
"""
for target in self.objects.get_targets(target_name):
for build_phase_id in target.buildPhases:
build_phase = self.objects[build_phase_id]
if not isinstance(build_phase, PBXShellScriptBuildPhase):
continue
if build_phase.shellScript == script:
del self.objects[build_phase_id]
target.remove_build_phase(build_phase) | 0.003793 |
def filter(coro, iterable, assert_fn=None, limit=0, loop=None):
"""
Returns a list of all the values in coll which pass an asynchronous truth
test coroutine.
Operations are executed concurrently by default, but results
will be in order.
You can configure the concurrency via `limit` param.
This function is the asynchronous equivalent port Python built-in
`filter()` function.
This function is a coroutine.
This function can be composed in a pipeline chain with ``|`` operator.
Arguments:
coro (coroutine function): coroutine filter function to call accepting
iterable values.
iterable (iterable|asynchronousiterable): an iterable collection
yielding coroutines functions.
assert_fn (coroutinefunction): optional assertion function.
limit (int): max filtering concurrency limit. Use ``0`` for no limit.
loop (asyncio.BaseEventLoop): optional event loop to use.
Raises:
TypeError: if coro argument is not a coroutine function.
Returns:
list: ordered list containing values that passed
the filter.
Usage::
async def iseven(num):
return num % 2 == 0
async def assert_false(el):
return not el
await paco.filter(iseven, [1, 2, 3, 4, 5])
# => [2, 4]
await paco.filter(iseven, [1, 2, 3, 4, 5], assert_fn=assert_false)
# => [1, 3, 5]
"""
assert_corofunction(coro=coro)
assert_iter(iterable=iterable)
# Check valid or empty iterable
if len(iterable) == 0:
return iterable
# Reduced accumulator value
results = [None] * len(iterable)
# Use a custom or default filter assertion function
assert_fn = assert_fn or assert_true
# Create concurrent executor
pool = ConcurrentExecutor(limit=limit, loop=loop)
# Reducer partial function for deferred coroutine execution
def filterer(index, element):
@asyncio.coroutine
def wrapper():
result = yield from coro(element)
if (yield from assert_fn(result)):
results[index] = element
return wrapper
# Iterate and attach coroutine for defer scheduling
for index, element in enumerate(iterable):
pool.add(filterer(index, element))
# Wait until all coroutines finish
yield from pool.run(ignore_empty=True)
# Returns filtered elements
return [x for x in results if x is not None] | 0.0004 |
def MI_enumInstances(self,
env,
ns,
propertyList,
requestedCimClass,
cimClass):
# pylint: disable=invalid-name
"""Return instances of a given CIM class
Implements the WBEM operation EnumerateInstances in terms
of the enum_instances method. A derived class will not normally
override this method.
"""
logger = env.get_logger()
logger.log_debug('CIMProvider MI_enumInstances called...')
keyNames = get_keys_from_class(cimClass)
plist = None
if propertyList is not None:
lkns = [kn.lower() for kn in keyNames]
props = pywbem.NocaseDict()
plist = [s.lower() for s in propertyList]
pklist = plist + lkns
[props.__setitem__(p.name, p) for p in cimClass.properties.values()
if p.name.lower() in pklist]
else:
props = cimClass.properties
_strip_quals(props)
path = pywbem.CIMInstanceName(classname=cimClass.classname,
namespace=ns)
model = pywbem.CIMInstance(classname=cimClass.classname,
properties=props, path=path)
gen = self.enum_instances(env=env,
model=model,
cim_class=cimClass,
keys_only=False)
try:
iter(gen)
except TypeError:
logger.log_debug('CIMProvider MI_enumInstances returning')
return
for inst in gen:
inst.path = build_instance_name(inst, keyNames)
if self.filter_results and plist is not None:
inst = inst.copy()
filter_instance(inst, plist)
yield inst
logger.log_debug('CIMProvider MI_enumInstances returning') | 0.004053 |
def remove_file_from_s3(awsclient, bucket, key):
"""Remove a file from an AWS S3 bucket.
:param awsclient:
:param bucket:
:param key:
:return:
"""
client_s3 = awsclient.get_client('s3')
response = client_s3.delete_object(Bucket=bucket, Key=key) | 0.00361 |
def get_last_activities(self, n):
"""Get all activity data for the last activity
Keyword arguments:
"""
filenames = self.get_activity_list().iloc[-n:].filename.tolist()
last_activities = [self.get_activity(f) for f in filenames]
return last_activities | 0.006667 |
def create_virtualenv(self):
"""
Populate venv from preloaded image
"""
return task.create_virtualenv(self.target, self.datadir,
self._preload_image(), self._get_container_name) | 0.013575 |
def get_ssl(self, host_and_port=None):
"""
Get SSL params for the given host.
:param (str,int) host_and_port: the host/port pair we want SSL params for, default current_host_and_port
"""
if not host_and_port:
host_and_port = self.current_host_and_port
return self.__ssl_params.get(host_and_port) | 0.008403 |
def flush(self, save_index=False, save_model=False, clear_buffer=False):
"""Commit all changes, clear all caches."""
if save_index:
if self.fresh_index is not None:
self.fresh_index.save(self.location('index_fresh'))
if self.opt_index is not None:
self.opt_index.save(self.location('index_opt'))
if save_model:
if self.model is not None:
self.model.save(self.location('model'))
self.payload.commit()
if clear_buffer:
if hasattr(self, 'fresh_docs'):
try:
self.fresh_docs.terminate() # erase all buffered documents + file on disk
except:
pass
self.fresh_docs = SqliteDict(journal_mode=JOURNAL_MODE) # buffer defaults to a random location in temp
self.fresh_docs.sync() | 0.00783 |
def post_unvote(self, post_id):
"""Action lets you unvote for a post (Requires login).
Parameters:
post_id (int):
"""
return self._get('posts/{0}/unvote.json'.format(post_id),
method='PUT', auth=True) | 0.007407 |
def get_positions(elinfo, center=True):
'''Computes the positions of the elctrodes based on the elinfo
Parameters
----------
elinfo: dict
Contains electrode information from yaml file (dim, pitch, sortlist, plane, pos)
Returns
-------
positions: np.array
3d points with the centers of the electrodes
'''
electrode_pos = False
# method 1: positions in elinfo
if 'pos' in elinfo.keys():
pos = np.array(elinfo['pos'])
nelec = pos.shape[0]
if len(pos.shape) == 1:
if len(pos) == 2:
pos2d = np.array([pos])
if 'plane' not in elinfo.keys():
# print("'plane' field with 2D dimensions assumed to be 'yz")
plane = 'yz'
else:
plane = elinfo['plane']
if 'offset' not in elinfo.keys():
offset = 0
else:
offset = elinfo['offset']
pos = add_3dim(pos2d, plane, offset)
elif len(pos) == 3:
pos = np.array([pos])
elif len(pos) != 3:
raise AttributeError('pos attribute should be one or a list of 2D or 3D points')
elif len(pos.shape) == 2:
if pos.shape[1] == 2:
pos2d = pos
if 'plane' not in elinfo.keys():
# print("'plane' field with 2D dimensions assumed to be 'yz")
plane = 'yz'
else:
plane = elinfo['plane']
if 'offset' not in elinfo.keys():
offset = 0
else:
offset = elinfo['offset']
pos = add_3dim(pos2d, plane, offset)
elif pos.shape[1] != 3:
raise AttributeError('pos attribute should be a list of 2D or 3D points')
electrode_pos = True
# method 2: dim, pithch, stagger
if 'dim' in elinfo.keys():
dim = elinfo['dim']
if dim == 1:
if 'plane' not in elinfo.keys():
# print("'plane' field with 2D dimensions assumed to be 'yz")
plane = 'yz'
else:
plane = elinfo['plane']
if 'offset' not in elinfo.keys():
offset = 0
else:
offset = elinfo['offset']
pos2d = np.array([[0, 0]])
pos = add_3dim(pos2d, plane, offset)
else:
if 'pitch' not in elinfo.keys():
raise AttributeError("When 'dim' is used, also 'pitch' should be specified.")
else:
pitch = elinfo['pitch']
if isinstance(dim, (int, np.integer)):
dim = [dim, dim]
if isinstance(pitch, (int, np.integer)) or isinstance(pitch, (float, np.float)):
pitch = [pitch, pitch]
if len(dim) == 2:
d1 = np.array([])
d2 = np.array([])
if 'stagger' in elinfo.keys():
stagger = elinfo['stagger']
else:
stagger = None
for d_i in np.arange(dim[1]):
if stagger is not None:
if isinstance(stagger, (int, np.integer)) or isinstance(stagger, (float, np.float)):
if np.mod(d_i, 2):
d1new = np.arange(dim[0]) * pitch[0] + stagger
else:
d1new = np.arange(dim[0]) * pitch[0]
elif len(stagger) == len(dim):
d1new = np.arange(dim[0]) * pitch[0] + stagger[d_i]
else:
d1new = np.arange(dim[0]) * pitch[0]
else:
d1new = np.arange(dim[0]) * pitch[0]
d1 = np.concatenate((d1, d1new))
d2 = np.concatenate((d2, dim[0] * [pitch[1] * d_i]))
pos2d = np.vstack((d2, d1)).T
if 'plane' not in elinfo.keys():
# print("'plane' field with 2D dimensions assumed to be 'yz")
plane = 'yz'
else:
plane = elinfo['plane']
if 'offset' not in elinfo.keys():
offset = 0
else:
offset = elinfo['offset']
pos2d = np.concatenate((np.reshape(d2.T, (d1.size, 1)),
np.reshape(d1.T, (d2.size, 1))), axis=1)
pos = add_3dim(pos2d, plane, offset)
elif len(dim) >= 3:
d1 = np.array([])
d2 = np.array([])
if 'stagger' in elinfo.keys():
stagger = elinfo['stagger']
else:
stagger = None
for d_i, d in enumerate(dim):
if stagger is not None:
if isinstance(stagger, (int, np.integer)) or isinstance(stagger, (float, np.float)):
if np.mod(d_i, 2):
d1new = np.arange(d) * pitch[0] + stagger
else:
d1new = np.arange(d) * pitch[0]
elif len(stagger) == len(dim):
d1new = np.arange(d) * pitch[0] + stagger[d_i]
else:
d1new = np.arange(d) * pitch[0]
else:
d1new = np.arange(d) * pitch[0]
d1 = np.concatenate((d1, d1new))
d2 = np.concatenate((d2, d * [pitch[1] * d_i]))
pos2d = np.vstack((d2, d1)).T
if 'plane' not in elinfo.keys():
# print("'plane' field with 2D dimensions assumed to be 'yz")
plane = 'yz'
else:
plane = elinfo['plane']
if 'offset' not in elinfo.keys():
offset = 0
else:
offset = elinfo['offset']
pos = add_3dim(pos2d, plane, offset)
electrode_pos = True
if electrode_pos and center:
centered_pos = center_mea(pos)
# resort electrodes in case
centered_pos_sorted = copy.deepcopy(centered_pos)
if 'sortlist' in elinfo.keys() and elinfo['sortlist'] is not None:
sortlist = elinfo['sortlist']
for i, si in enumerate(sortlist):
centered_pos_sorted[si] = centered_pos[i]
else:
centered_pos_sorted = centered_pos
return centered_pos_sorted
elif electrode_pos and not center:
# resort electrodes in case
pos_sorted = copy.deepcopy(pos)
if 'sortlist' in elinfo.keys() and elinfo['sortlist'] is not None:
sortlist = elinfo['sortlist']
for i, si in enumerate(sortlist):
pos_sorted[si] = pos[i]
else:
pos_sorted = pos
return pos_sorted
else:
print("Define either a list of positions 'pos' or 'dim' and 'pitch'")
return None | 0.001796 |
def findModuleOfName(self, dotted_name, level, filename, extrapath=None):
"""Given a fully qualified name, find what module contains it."""
if dotted_name.endswith('.*'):
return dotted_name[:-2]
name = dotted_name
# extrapath is None only in a couple of test cases; in real life it's
# always present
if level and level > 1 and extrapath:
# strip trailing path bits for each extra level to account for
# relative imports
# from . import X has level == 1 and nothing is stripped (the level > 1 check accounts for this case)
# from .. import X has level == 2 and one trailing path component must go
# from ... import X has level == 3 and two trailing path components must go
extrapath = extrapath.split(os.path.sep)
level -= 1
extrapath = extrapath[0:-level]
extrapath = os.path.sep.join(extrapath)
while name:
candidate = self.isModule(name, extrapath)
if candidate:
return candidate
candidate = self.isPackage(name, extrapath)
if candidate:
return candidate
name = name[:name.rfind('.')]
self.warn(dotted_name, '%s: could not find %s', filename, dotted_name)
return dotted_name | 0.003668 |
def _set_lsp_cspf_computation_mode(self, v, load=False):
"""
Setter method for lsp_cspf_computation_mode, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/lsp/secondary_path/lsp_cspf_computation_mode (cspf-computation-mode)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_cspf_computation_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_cspf_computation_mode() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'use-igp-metric': {'value': 1}, u'use-te-metric': {'value': 2}},), is_leaf=True, yang_name="lsp-cspf-computation-mode", rest_name="cspf-computation-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Specify cspf-computation-mode', u'cli-full-no': None, u'alt-name': u'cspf-computation-mode'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='cspf-computation-mode', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_cspf_computation_mode must be of a type compatible with cspf-computation-mode""",
'defined-type': "brocade-mpls:cspf-computation-mode",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'use-igp-metric': {'value': 1}, u'use-te-metric': {'value': 2}},), is_leaf=True, yang_name="lsp-cspf-computation-mode", rest_name="cspf-computation-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Specify cspf-computation-mode', u'cli-full-no': None, u'alt-name': u'cspf-computation-mode'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='cspf-computation-mode', is_config=True)""",
})
self.__lsp_cspf_computation_mode = t
if hasattr(self, '_set'):
self._set() | 0.004547 |
def save_data(self,session, exp_id, content):
'''save data will obtain the current subid from the session, and save it
depending on the database type. Currently we just support flat files'''
from expfactory.database.models import (
Participant,
Result
)
subid = session.get('subid')
token = session.get('token')
self.logger.info('Saving data for subid %s' % subid)
# We only attempt save if there is a subject id, set at start
if subid is not None:
p = Participant.query.filter(Participant.id == subid).first() # better query here
# Does
if self.headless and p.token != token:
self.logger.warning('%s attempting to use mismatched token [%s] skipping save' %(p.id, token))
elif self.headless and p.token.endswith(('finished','revoked')):
self.logger.warning('%s attempting to use expired token [%s] skipping save' %(p.id, token))
else:
# Preference is to save data under 'data', otherwise do all of it
if "data" in content:
content = content['data']
result = Result(data=content,
exp_id=exp_id,
participant_id=p.id) # check if changes from str/int
# Create and save the result
self.session.add(result)
p.results.append(result)
self.session.commit()
self.logger.info("Save [participant] %s [result] %s" %(p, result)) | 0.009908 |
def set_models_keyspace(self, keyspace):
"""Set keyspace for all connection models"""
for models in self.connection.introspection.cql_models.values():
for model in models:
model.__keyspace__ = keyspace | 0.00813 |
def merge_arrays(merge_list, names=None, flatten=True, outtype=None):
"""Merges the given arrays into a single array. The arrays must all have
the same shape. If one or more of the given arrays has multiple fields,
all of the fields will be included as separate fields in the new array.
Parameters
----------
merge_list : list of arrays
The list of arrays to merge.
names : {None | sequence of strings}
Optional, the names of the fields in the output array. If flatten is
True, must be the same length as the total number of fields in
merge_list. Otherise, must be the same length as the number of
arrays in merge_list. If None provided, and flatten is True, names
used will be the same as the name of the fields in the given arrays.
If the datatype has no name, or flatten is False, the new field will
be `fi` where i is the index of the array in arrays.
flatten : bool
Make all of the fields in the given arrays separate fields in the
new array. Otherwise, each array will be added as a field. If an
array has fields, they will be subfields in the output array. Default
is True.
outtype : {None | class}
Cast the new array to the given type. Default is to return a
numpy structured array.
Returns
-------
new array : {numpy.ndarray | outtype}
A new array with all of the fields in all of the arrays merged into
a single array.
"""
# make sure everything in merge_list is an array
merge_list = _ensure_array_list(merge_list)
if not all(merge_list[0].shape == arr.shape for arr in merge_list):
raise ValueError("all of the arrays in merge_list must have the " +
"same shape")
if flatten:
new_dt = combine_fields([arr.dtype for arr in merge_list])
else:
new_dt = numpy.dtype([('f%i' %ii, arr.dtype.descr) \
for ii,arr in enumerate(merge_list)])
new_arr = merge_list[0].__class__(merge_list[0].shape, dtype=new_dt)
# ii is a counter to keep track of which fields from the new array
# go with which arrays in merge list
ii = 0
for arr in merge_list:
if arr.dtype.names is None:
new_arr[new_dt.names[ii]] = arr
ii += 1
else:
for field in arr.dtype.names:
new_arr[field] = arr[field]
ii += 1
# set the names if desired
if names is not None:
new_arr.dtype.names = names
# ditto the outtype
if outtype is not None:
new_arr = new_arr.view(type=outtype)
return new_arr | 0.002263 |
def raw(func, **func_args):
"""Decorator for eager functions checking input array
and stripping away the weld_type.
Stripping the weld_type is required to keep the same code in Series.apply and because
Numpy functions don't (all) have kwargs. Passing weld_type to NumPy functions is unexpected
and raises ValueError.
Parameters
----------
func : function
Function to execute eagerly over raw data.
func_args : kwargs
Arguments to pass to func, if any.
Returns
-------
function
"""
if len(func_args) == 0:
@wraps(func)
def wrapper(array, **kwargs):
if isinstance(array, WeldObject):
raise TypeError('Can only perform operation on raw data')
# need to not pass weld_type to whatever function
if 'weld_type' in kwargs:
del kwargs['weld_type']
return func(array, **kwargs)
return wrapper
else:
# here kwargs is only kept s.t. Series can still pass the weld_type
@wraps(func)
def wrapper(array, **kwargs):
if isinstance(array, WeldObject):
raise TypeError('Can only perform operation on raw data')
return func(array, **func_args)
return wrapper | 0.002317 |
def _get_replaced_code(self, names):
""" Return code, with new name, expressions, and replacements applied.
"""
code = self._code
# Modify name
fname = names[self]
code = code.replace(" " + self.name + "(", " " + fname + "(")
# Apply string replacements first -- these may contain $placeholders
for key, val in self._replacements.items():
code = code.replace(key, val)
# Apply assignments to the end of the function
# Collect post lines
post_lines = []
for key, val in self._assignments.items():
if isinstance(key, Variable):
key = names[key]
if isinstance(val, ShaderObject):
val = val.expression(names)
line = ' %s = %s;' % (key, val)
post_lines.append(line)
# Add a default $post placeholder if needed
if 'post' in self._expressions:
post_lines.append(' $post')
# Apply placeholders for hooks
post_text = '\n'.join(post_lines)
if post_text:
post_text = '\n' + post_text + '\n'
code = code.rpartition('}')
code = code[0] + post_text + code[1] + code[2]
# Add a default $pre placeholder if needed
if 'pre' in self._expressions:
m = re.search(fname + r'\s*\([^{]*\)\s*{', code)
if m is None:
raise RuntimeError("Cound not find beginning of function '%s'"
% fname)
ind = m.span()[1]
code = code[:ind] + "\n $pre\n" + code[ind:]
# Apply template variables
for key, val in self._expressions.items():
val = val.expression(names)
search = r'\$' + key + r'($|[^a-zA-Z0-9_])'
code = re.sub(search, val+r'\1', code)
# Done
if '$' in code:
v = parsing.find_template_variables(code)
logger.warning('Unsubstituted placeholders in code: %s\n'
' replacements made: %s',
v, list(self._expressions.keys()))
return code + '\n' | 0.005372 |
def _extract(self):
"""Extract email addresses from results.
Text content from all crawled pages are ran through a simple email
extractor. Data is cleaned prior to running pattern expressions.
"""
self.log.debug("Extracting emails from text content")
for item in self.data:
emails = extract_emails(item, self.domain, self.fuzzy)
self.results.extend(emails)
self.log.debug("Email extraction completed")
return list(set(self.results)) | 0.003839 |
def _wres(parallel, progs, fresources=None, ensure_mem=None):
"""Add resource information to the parallel environment on required programs and files.
Enables spinning up required machines and operating in non-shared filesystem
environments.
progs -- Third party tools used in processing
fresources -- Required file-based resources needed. These will be transferred on non-shared
filesystems.
ensure_mem -- Dictionary of required minimum memory for programs used. Ensures
enough memory gets allocated on low-core machines.
"""
parallel = copy.deepcopy(parallel)
parallel["progs"] = progs
if fresources:
parallel["fresources"] = fresources
if ensure_mem:
parallel["ensure_mem"] = ensure_mem
return parallel | 0.006203 |
def run(self):
"""Called by Sphinx.
:returns: ImgurEmbedNode and ImgurJavaScriptNode instances with config values passed as arguments.
:rtype: list
"""
# Get Imgur ID.
imgur_id = self.arguments[0]
if not RE_IMGUR_ID.match(imgur_id):
raise ImgurError('Invalid Imgur ID specified. Must be 5-10 letters and numbers. Albums prefixed with "a/".')
# Read from conf.py.
config = self.state.document.settings.env.config
hide_post_details = self.options.get('hide_post_details', config.imgur_hide_post_details)
return [ImgurEmbedNode(imgur_id, hide_post_details), ImgurJavaScriptNode()] | 0.008824 |
def filter_by(self, **kwargs):
"""
Apply the given filtering criterion to a copy of this Query, using
keyword expressions.
"""
query = self._copy()
for field, value in kwargs.items():
query.domain.append(
(field, '=', value)
)
return query | 0.00597 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.