text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def prepend_http(url):
""" Ensure there's a scheme specified at the beginning of a url, defaulting to http://
>>> prepend_http('duckduckgo.com')
'http://duckduckgo.com'
"""
url = url.lstrip()
if not urlparse(url).scheme:
return 'http://' + url
return url | 0.006873 |
def loadModule(self, modName, **userCtx):
"""Load and execute MIB modules as Python code"""
for mibSource in self._mibSources:
debug.logger & debug.FLAG_BLD and debug.logger(
'loadModule: trying %s at %s' % (modName, mibSource))
try:
codeObj, sfx = mibSource.read(modName)
except IOError as exc:
debug.logger & debug.FLAG_BLD and debug.logger(
'loadModule: read %s from %s failed: '
'%s' % (modName, mibSource, exc))
continue
modPath = mibSource.fullPath(modName, sfx)
if modPath in self._modPathsSeen:
debug.logger & debug.FLAG_BLD and debug.logger(
'loadModule: seen %s' % modPath)
break
else:
self._modPathsSeen.add(modPath)
debug.logger & debug.FLAG_BLD and debug.logger(
'loadModule: evaluating %s' % modPath)
g = {'mibBuilder': self,
'userCtx': userCtx}
try:
exec(codeObj, g)
except Exception:
self._modPathsSeen.remove(modPath)
raise error.MibLoadError(
'MIB module "%s" load error: '
'%s' % (modPath, traceback.format_exception(*sys.exc_info())))
self._modSeen[modName] = modPath
debug.logger & debug.FLAG_BLD and debug.logger(
'loadModule: loaded %s' % modPath)
break
if modName not in self._modSeen:
raise error.MibNotFoundError(
'MIB file "%s" not found in search path '
'(%s)' % (modName and modName + ".py[co]", ', '.join(
[str(x) for x in self._mibSources])))
return self | 0.001613 |
def j1(x, context=None):
"""
Return the value of the first kind Bessel function of order 1 at x.
"""
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_j1,
(BigFloat._implicit_convert(x),),
context,
) | 0.003774 |
def parse_tweet(raw_tweet, source, now=None):
"""
Parses a single raw tweet line from a twtxt file
and returns a :class:`Tweet` object.
:param str raw_tweet: a single raw tweet line
:param Source source: the source of the given tweet
:param Datetime now: the current datetime
:returns: the parsed tweet
:rtype: Tweet
"""
if now is None:
now = datetime.now(timezone.utc)
raw_created_at, text = raw_tweet.split("\t", 1)
created_at = parse_iso8601(raw_created_at)
if created_at > now:
raise ValueError("Tweet is from the future")
return Tweet(click.unstyle(text.strip()), created_at, source) | 0.001443 |
def main(args, stop=False):
"""
Arguments parsing, etc..
"""
daemon = AMQPDaemon(
con_param=getConParams(
settings.RABBITMQ_STORAGE_VIRTUALHOST
),
queue=settings.RABBITMQ_STORAGE_INPUT_QUEUE,
out_exch=settings.RABBITMQ_STORAGE_EXCHANGE,
out_key=settings.RABBITMQ_STORAGE_OUTPUT_KEY,
react_fn=reactToAMQPMessage,
glob=globals() # used in deserializer
)
if not stop and args and args.foreground: # run at foreground
daemon.run()
else:
daemon.run_daemon() | 0.001721 |
def present(name, login=None, domain=None, database=None, roles=None, options=None, **kwargs):
'''
Checks existance of the named user.
If not present, creates the user with the specified roles and options.
name
The name of the user to manage
login
If not specified, will be created WITHOUT LOGIN
domain
Creates a Windows authentication user.
Needs to be NetBIOS domain or hostname
database
The database of the user (not the login)
roles
Add this user to all the roles in the list
options
Can be a list of strings, a dictionary, or a list of dictionaries
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
if domain and not login:
ret['result'] = False
ret['comment'] = 'domain cannot be set without login'
return ret
if __salt__['mssql.user_exists'](name, domain=domain, database=database, **kwargs):
ret['comment'] = 'User {0} is already present (Not going to try to set its roles or options)'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'User {0} is set to be added'.format(name)
return ret
user_created = __salt__['mssql.user_create'](name, login=login,
domain=domain,
database=database,
roles=roles,
options=_normalize_options(options),
**kwargs)
if user_created is not True: # Non-empty strings are also evaluated to True, so we cannot use if not user_created:
ret['result'] = False
ret['comment'] += 'User {0} failed to be added: {1}'.format(name, user_created)
return ret
ret['comment'] += 'User {0} has been added'.format(name)
ret['changes'][name] = 'Present'
return ret | 0.005797 |
def envelope(**kwargs):
"""Create OAI-PMH envelope for response."""
e_oaipmh = Element(etree.QName(NS_OAIPMH, 'OAI-PMH'), nsmap=NSMAP)
e_oaipmh.set(etree.QName(NS_XSI, 'schemaLocation'),
'{0} {1}'.format(NS_OAIPMH, NS_OAIPMH_XSD))
e_tree = ElementTree(element=e_oaipmh)
if current_app.config['OAISERVER_XSL_URL']:
e_oaipmh.addprevious(etree.ProcessingInstruction(
'xml-stylesheet', 'type="text/xsl" href="{0}"'
.format(current_app.config['OAISERVER_XSL_URL'])))
e_responseDate = SubElement(
e_oaipmh, etree.QName(
NS_OAIPMH, 'responseDate'))
# date should be first possible moment
e_responseDate.text = datetime_to_datestamp(datetime.utcnow())
e_request = SubElement(e_oaipmh, etree.QName(NS_OAIPMH, 'request'))
for key, value in kwargs.items():
if key == 'from_' or key == 'until':
value = datetime_to_datestamp(value)
elif key == 'resumptionToken':
value = value['token']
e_request.set(key, value)
e_request.text = url_for('invenio_oaiserver.response', _external=True)
return e_tree, e_oaipmh | 0.000858 |
def upload_file_sections(self, user_id, section_id, assignment_id):
"""
Upload a file.
Upload a file to a submission.
This API endpoint is the first step in uploading a file to a submission as a student.
See the {file:file_uploads.html File Upload Documentation} for details on the file upload workflow.
The final step of the file upload workflow will return the attachment data,
including the new file id. The caller can then POST to submit the
+online_upload+ assignment with these file ids.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - section_id
"""ID"""
path["section_id"] = section_id
# REQUIRED - PATH - assignment_id
"""ID"""
path["assignment_id"] = assignment_id
# REQUIRED - PATH - user_id
"""ID"""
path["user_id"] = user_id
self.logger.debug("POST /api/v1/sections/{section_id}/assignments/{assignment_id}/submissions/{user_id}/files with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/sections/{section_id}/assignments/{assignment_id}/submissions/{user_id}/files".format(**path), data=data, params=params, no_data=True) | 0.006593 |
def set(self, instance, value, **kw):
"""
Set Analyses to an AR
:param instance: Analysis Request
:param value: Single AS UID or a list of dictionaries containing AS UIDs
:param kw: Additional keyword parameters passed to the field
"""
if not isinstance(value, (list, tuple)):
value = [value]
uids = []
for item in value:
uid = None
if isinstance(item, dict):
uid = item.get("uid")
if api.is_uid(value):
uid = item
if uid is None:
logger.warn("Could extract UID of value")
continue
uids.append(uid)
analyses = map(api.get_object_by_uid, uids)
self._set(instance, analyses, **kw) | 0.003722 |
def ndto2d(x, axis=-1):
"""Convert a multi-dimensional array into a 2d array, with the axes
specified by the `axis` parameter flattened into an index along
rows, and the remaining axes flattened into an index along the
columns. This operation can not be properly achieved by a simple
reshape operation since a reshape would shuffle element order if
the axes to be grouped together were not consecutive: this is
avoided by first permuting the axes so that the grouped axes are
consecutive.
Parameters
----------
x : array_like
Multi-dimensional input array
axis : int or tuple of ints, optional (default -1)
Axes of `x` to be grouped together to form the rows of the output
2d array.
Returns
-------
xtr : ndarray
2D output array
rsi : tuple
A tuple containing the details of transformation applied in the
conversion to 2D
"""
# Convert int axis into a tuple
if isinstance(axis, int):
axis = (axis,)
# Handle negative axis indices
axis = tuple([k if k >= 0 else x.ndim + k for k in axis])
# Complement of axis set on full set of axes of input v
caxis = tuple(set(range(x.ndim)) - set(axis))
# Permute axes of x (generalised transpose) so that axes over
# which operation is to be applied are all at the end
prm = caxis + axis
xt = np.transpose(x, axes=prm)
xts = xt.shape
# Reshape into a 2D array with the axes specified by the axis
# parameter flattened into an index along rows, and the remaining
# axes flattened into an index aalong the columns
xtr = xt.reshape((np.product(xts[0:len(caxis)]), -1))
# Return reshaped array and a tuple containing the information
# necessary to undo the entire operation
return xtr, (xts, prm) | 0.00055 |
def get_channel_type(channel, framefile):
"""Find the channel type in a given GWF file
**Requires:** |LDAStools.frameCPP|_
Parameters
----------
channel : `str`, `~gwpy.detector.Channel`
name of data channel to find
framefile : `str`
path of GWF file in which to search
Returns
-------
ctype : `str`
the type of the channel ('adc', 'sim', or 'proc')
Raises
------
ValueError
if the channel is not found in the table-of-contents
"""
channel = str(channel)
for name, type_ in _iter_channels(framefile):
if channel == name:
return type_
raise ValueError("%s not found in table-of-contents for %s"
% (channel, framefile)) | 0.001318 |
def _GetChromeWebStorePage(self, extension_identifier):
"""Retrieves the page for the extension from the Chrome store website.
Args:
extension_identifier (str): Chrome extension identifier.
Returns:
str: page content or None.
"""
web_store_url = self._WEB_STORE_URL.format(xid=extension_identifier)
try:
response = requests.get(web_store_url)
except (requests.ConnectionError, requests.HTTPError) as exception:
logger.warning((
'[{0:s}] unable to retrieve URL: {1:s} with error: {2!s}').format(
self.NAME, web_store_url, exception))
return None
return response.text | 0.006107 |
def Matches(self, file_entry):
"""Compares the file entry against the filter.
Args:
file_entry (dfvfs.FileEntry): file entry to compare.
Returns:
bool: True if the file entry matches the filter, False if not or
None if the filter does not apply.
"""
location = getattr(file_entry.path_spec, 'location', None)
if not location:
return None
if '.' not in location:
return False
_, _, extension = location.rpartition('.')
return extension.lower() in self._extensions | 0.005597 |
def import_locations(self, data, index='WMO'):
"""Parse NOAA weather station data files.
``import_locations()`` returns a dictionary with keys containing either
the WMO or ICAO identifier, and values that are ``Station`` objects
that describes the large variety of data exported by NOAA_.
It expects data files in one of the following formats::
00;000;PABL;Buckland, Buckland Airport;AK;United States;4;65-58-56N;161-09-07W;;;7;;
01;001;ENJA;Jan Mayen;;Norway;6;70-56N;008-40W;70-56N;008-40W;10;9;P
01;002;----;Grahuken;;Norway;6;79-47N;014-28E;;;;15;
or::
AYMD;94;014;Madang;;Papua New Guinea;5;05-13S;145-47E;05-13S;145-47E;3;5;P
AYMO;--;---;Manus Island/Momote;;Papua New Guinea;5;02-03-43S;147-25-27E;;;4;;
AYPY;94;035;Moresby;;Papua New Guinea;5;09-26S;147-13E;09-26S;147-13E;38;49;P
Files containing the data in this format can be downloaded from the
:abbr:`NOAA (National Oceanographic and Atmospheric Administration)`'s
site in their `station location page`_.
WMO indexed files downloaded from the :abbr:`NOAA (National
Oceanographic and Atmospheric Administration)` site when processed by
``import_locations()`` will return ``dict`` object of the following
style::
{'00000': Station('PABL', 'Buckland, Buckland Airport', 'AK',
'United States', 4, 65.982222. -160.848055, None,
None, 7, False),
'01001'; Station('ENJA', Jan Mayen, None, 'Norway', 6, 70.933333,
-7.333333, 70.933333, -7.333333, 10, 9, True),
'01002': Station(None, 'Grahuken', None, 'Norway', 6, 79.783333,
13.533333, None, None, 15, False)}
And ``dict`` objects such as the following will be created when ICAO
indexed data files are processed::
{'AYMD': Station("94", "014", "Madang", None, "Papua New Guinea",
5, -5.216666, 145.783333, -5.216666,
145.78333333333333, 3, 5, True,
'AYMO': Station(None, None, "Manus Island/Momote", None,
"Papua New Guinea", 5, -2.061944, 147.424166,
None, None, 4, False,
'AYPY': Station("94", "035", "Moresby", None, "Papua New Guinea",
5, -9.433333, 147.216667, -9.433333, 147.216667,
38, 49, True}
Args:
data (iter): NOAA station data to read
index (str): The identifier type used in the file
Returns:
dict: WMO locations with `Station` objects
Raises:
FileFormatError: Unknown file format
.. _NOAA: http://weather.noaa.gov/
.. _station location page: http://weather.noaa.gov/tg/site.shtml
"""
self._data = data
data = utils.prepare_read(data)
for line in data:
line = line.strip()
chunk = line.split(';')
if not len(chunk) == 14:
if index == 'ICAO':
# Some entries only have 12 or 13 elements, so we assume 13
# and 14 are None. Of the entries I've hand checked this
# assumption would be correct.
logging.debug('Extending ICAO %r entry, because it is '
'too short to process' % line)
chunk.extend(['', ''])
elif index == 'WMO' and len(chunk) == 13:
# A few of the WMO indexed entries are missing their RBSN
# fields, hand checking the entries for 71046 and 71899
# shows that they are correct if we just assume RBSN is
# false.
logging.debug('Extending WMO %r entry, because it is '
'too short to process' % line)
chunk.append('')
else:
raise utils.FileFormatError('NOAA')
if index == 'WMO':
identifier = ''.join(chunk[:2])
alt_id = chunk[2]
elif index == 'ICAO':
identifier = chunk[0]
alt_id = ''.join(chunk[1:3])
else:
raise ValueError('Unknown format %r' % index)
if alt_id in ('----', '-----'):
alt_id = None
name = chunk[3]
state = chunk[4] if chunk[4] else None
country = chunk[5]
wmo = int(chunk[6]) if chunk[6] else None
point_data = []
for i in chunk[7:11]:
if not i:
point_data.append(None)
continue
# Some entries in nsd_cccc.txt are of the format "DD-MM-
# N", so we just take the spaces to mean 0 seconds.
if ' ' in i:
logging.debug('Fixing unpadded location data in %r entry'
% line)
i = i.replace(' ', '0')
values = map(int, i[:-1].split('-'))
if i[-1] in ('S', 'W'):
values = [-x for x in values]
point_data.append(point.utils.to_dd(*values))
latitude, longitude, ua_latitude, ua_longitude = point_data
altitude = int(chunk[11]) if chunk[11] else None
ua_altitude = int(chunk[12]) if chunk[12] else None
rbsn = False if not chunk[13] else True
self[identifier] = Station(alt_id, name, state, country, wmo,
latitude, longitude, ua_latitude,
ua_longitude, altitude, ua_altitude,
rbsn) | 0.001178 |
def config_string(self):
"""
Build the storable string corresponding
to this configuration object.
:rtype: string
"""
return (gc.CONFIG_STRING_SEPARATOR_SYMBOL).join(
[u"%s%s%s" % (fn, gc.CONFIG_STRING_ASSIGNMENT_SYMBOL, self.data[fn]) for fn in sorted(self.data.keys()) if self.data[fn] is not None]
) | 0.008086 |
def unescape(s):
"""Unescape ASSUAN message (0xAB <-> '%AB')."""
s = bytearray(s)
i = 0
while i < len(s):
if s[i] == ord('%'):
hex_bytes = bytes(s[i+1:i+3])
value = int(hex_bytes.decode('ascii'), 16)
s[i:i+3] = [value]
i += 1
return bytes(s) | 0.003195 |
def packageipa(env, console):
"""
Package the built app as an ipa for distribution in iOS App Store
"""
ipa_path, app_path = _get_ipa(env)
output_dir = path.dirname(ipa_path)
if path.exists(ipa_path):
console.quiet('Removing %s' % ipa_path)
os.remove(ipa_path)
zf = zipfile.ZipFile(ipa_path, mode='w')
payload_dir = 'Payload'
for (dirpath, dirnames, filenames) in os.walk(app_path):
for filename in filenames:
filepath = path.join(dirpath, filename)
prefix = path.commonprefix([filepath, path.dirname(app_path)])
write_path = path.join(payload_dir, filepath[len(prefix) + 1:])
console.quiet('Write %s' % write_path)
zf.write(filepath, write_path)
zf.close()
console.quiet('Packaged %s' % ipa_path) | 0.001202 |
def update_target(self, name, current, total):
"""Updates progress bar for a specified target."""
self.refresh(self._bar(name, current, total)) | 0.012579 |
def fetch_and_create_image(self, url, image_title):
'''
fetches, creates image object
returns tuple with Image object and context dictionary containing
request URL
'''
context = {
"file_url": url,
"foreign_title": image_title,
}
try:
image_file = requests.get(url)
local_image = Image(
title=image_title,
file=ImageFile(
BytesIO(image_file.content),
name=image_title
)
)
local_image.save()
return (local_image, context)
except Exception as e:
context.update({
"exception": e,
})
raise ImageCreationFailed(context, None) | 0.002448 |
def fromSearch(text):
"""
Generates a regular expression from 'simple' search terms.
:param text | <str>
:usage |>>> import projex.regex
|>>> projex.regex.fromSearch('*cool*')
|'^.*cool.*$'
|>>> projex.projex.fromSearch('*cool*,*test*')
|'^.*cool.*$|^.*test.*$'
:return <str>
"""
terms = []
for term in nstr(text).split(','):
# assume if no *'s then the user wants to search anywhere as keyword
if '*' not in term:
term = '*%s*' % term
term = term.replace('*', '.*')
terms.append('^%s$' % term)
return '|'.join(terms) | 0.00578 |
def write (self, s):
"""Process text, writing it to the virtual screen while handling
ANSI escape codes.
"""
if isinstance(s, bytes):
s = self._decode(s)
for c in s:
self.process(c) | 0.012245 |
def replicate_global_dbs(cloud_url=None, local_url=None):
"""
Set up replication of the global databases from the cloud server to the
local server.
:param str cloud_url: Used to override the cloud url from the global
configuration in case the calling function is in the process of
initializing the cloud server
:param str local_url: Used to override the local url from the global
configuration in case the calling function is in the process of
initializing the local server
"""
local_url = local_url or config["local_server"]["url"]
cloud_url = cloud_url or config["cloud_server"]["url"]
server = Server(local_url)
for db_name in global_dbs:
server.replicate(
db_name, urljoin(cloud_url, db_name), db_name, continuous=True,
) | 0.001235 |
def _printCtrlConf(self):
""" get PV value and print out
"""
if self.ctrlinfo:
print("Control configs:")
for k, v in sorted(self.ctrlinfo.items(), reverse=True):
pv = v['pv']
rval = epics.caget(pv)
if rval is None:
val = ''
else:
val = self.unitTrans(rval, direction='+')
print(" {k:6s} = {pv:6s}, raw: {rval:>6s}, real: {val:>6s}".format(k=str(k),
pv=str(pv),
rval=str(rval),
val=str(val))) | 0.007389 |
def zremrangebylex(self, name, min, max):
"""
Remove all elements in the sorted set between the
lexicographical range specified by ``min`` and ``max``.
Returns the number of elements removed.
:param name: str the name of the redis key
:param min: int or -inf
:param max: into or +inf
:return: Future()
"""
with self.pipe as pipe:
return pipe.zremrangebylex(self.redis_key(name), min, max) | 0.004124 |
def collections(tag, collectionName, token='', version=''):
'''Returns an array of quote objects for a given collection type. Currently supported collection types are sector, tag, and list
https://iexcloud.io/docs/api/#collections
Args:
tag (string); Sector, Tag, or List
collectionName (string); Associated name for tag
token (string); Access token
version (string); API version
Returns:
dict: result
'''
if tag not in _COLLECTION_TAGS:
raise PyEXception('Tag must be in %s' % str(_COLLECTION_TAGS))
return _getJson('stock/market/collection/' + tag + '?collectionName=' + collectionName, token, version) | 0.004373 |
def _handle_response(response):
"""Internal helper for handling API responses from the Quoine server.
Raises the appropriate exceptions when necessary; otherwise, returns the
response.
"""
if not str(response.status_code).startswith('2'):
raise KucoinAPIException(response)
try:
res = response.json()
if 'code' in res and res['code'] != "200000":
raise KucoinAPIException(response)
if 'success' in res and not res['success']:
raise KucoinAPIException(response)
# by default return full response
# if it's a normal response we have a data attribute, return that
if 'data' in res:
res = res['data']
return res
except ValueError:
raise KucoinRequestException('Invalid Response: %s' % response.text) | 0.004391 |
def insert(
self,
obj=None,
overwrite=False,
partition=None,
values=None,
validate=True,
):
"""
Insert into Impala table. Wraps ImpalaClient.insert
Parameters
----------
obj : TableExpr or pandas DataFrame
overwrite : boolean, default False
If True, will replace existing contents of table
partition : list or dict, optional
For partitioned tables, indicate the partition that's being inserted
into, either with an ordered list of partition keys or a dict of
partition field name to value. For example for the partition
(year=2007, month=7), this can be either (2007, 7) or {'year': 2007,
'month': 7}.
validate : boolean, default True
If True, do more rigorous validation that schema of table being
inserted is compatible with the existing table
Examples
--------
>>> t.insert(table_expr) # doctest: +SKIP
# Completely overwrite contents
>>> t.insert(table_expr, overwrite=True) # doctest: +SKIP
"""
if isinstance(obj, pd.DataFrame):
from ibis.impala.pandas_interop import write_temp_dataframe
writer, expr = write_temp_dataframe(self._client, obj)
else:
expr = obj
if values is not None:
raise NotImplementedError
if validate:
existing_schema = self.schema()
insert_schema = expr.schema()
if not insert_schema.equals(existing_schema):
_validate_compatible(insert_schema, existing_schema)
if partition is not None:
partition_schema = self.partition_schema()
partition_schema_names = frozenset(partition_schema.names)
expr = expr.projection(
[
column
for column in expr.columns
if column not in partition_schema_names
]
)
else:
partition_schema = None
ast = build_ast(expr, ImpalaDialect.make_context())
select = ast.queries[0]
statement = ddl.InsertSelect(
self._qualified_name,
select,
partition=partition,
partition_schema=partition_schema,
overwrite=overwrite,
)
return self._execute(statement) | 0.001224 |
def set_stream(self):
""" Sets the download streams """
if not self.auth:
raise AccessError("Please use the remote() method to set rsync authorization or use remote(public=True) for public data")
elif not self.initial_stream.task:
raise AccessError("No files to download.")
else:
self.stream = self.get_stream()
self.stream.source = join(self.remote_base, 'sas') if self.remote_base and not self.public else join(self.remote_base, self.release) if self.release else self.remote_base
self.stream.destination = join(self.base_dir, self.release) if self.public and self.release else self.base_dir
self.stream.cli.env = {'RSYNC_PASSWORD': self.auth.password} if self.auth.ready() else None
if self.stream.source and self.stream.destination:
for task in self.initial_stream.task:
self.set_stream_task(task)
ntask = len(self.stream.task)
if self.stream.stream_count > ntask:
if self.verbose:
print("SDSS_ACCESS> Reducing the number of streams from %r to %r, the number of download tasks." % (self.stream.stream_count, ntask))
self.stream.stream_count = ntask
self.stream.streamlet = self.stream.streamlet[:ntask] | 0.005181 |
def remove_nio(self, port_number):
"""
Removes the specified NIO as member of cloud.
:param port_number: allocated port number
:returns: the NIO that was bound to the allocated port
"""
if port_number not in self._nios:
raise NodeError("Port {} is not allocated".format(port_number))
nio = self._nios[port_number]
if isinstance(nio, NIOUDP):
self.manager.port_manager.release_udp_port(nio.lport, self._project)
log.info('Cloud "{name}" [{id}]: NIO {nio} removed from port {port}'.format(name=self._name,
id=self._id,
nio=nio,
port=port_number))
del self._nios[port_number]
if self._ubridge_hypervisor and self._ubridge_hypervisor.is_running():
yield from self._delete_ubridge_connection(port_number)
yield from self.start()
return nio | 0.006184 |
def paintEvent(self, event):
"""When an area of the window is exposed, we just copy out of the
server-side, off-screen pixmap to that area.
"""
if not self.pixmap:
return
rect = event.rect()
x1, y1, x2, y2 = rect.getCoords()
width = x2 - x1 + 1
height = y2 - y1 + 1
# redraw the screen from backing pixmap
painter = QPainter(self)
rect = QtCore.QRect(x1, y1, width, height)
painter.drawPixmap(rect, self.pixmap, rect) | 0.003802 |
def set(self, oid, value):
"""Use PySNMP to perform an SNMP SET operation on a single object.
:param oid: The OID of the object to set.
:param value: The value of the object to set.
:raises: SNMPFailure if an SNMP request fails.
"""
try:
results = self.cmd_gen.setCmd(self._get_auth(),
self._get_transport(),
(oid, value))
except snmp_error.PySnmpError as e:
raise SNMPFailure(SNMP_FAILURE_MSG % ("SET", e))
error_indication, error_status, error_index, var_binds = results
if error_indication:
# SNMP engine-level error.
raise SNMPFailure(SNMP_FAILURE_MSG % ("SET", error_indication))
if error_status:
# SNMP PDU error.
raise SNMPFailure(
"SNMP operation '%(operation)s' failed: %(error)s at"
" %(index)s" %
{'operation': "SET", 'error': error_status.prettyPrint(),
'index':
error_index and var_binds[int(error_index) - 1]
or '?'}) | 0.001698 |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'used_bytes') and self.used_bytes is not None:
_dict['used_bytes'] = self.used_bytes
if hasattr(self, 'maximum_allowed_bytes'
) and self.maximum_allowed_bytes is not None:
_dict['maximum_allowed_bytes'] = self.maximum_allowed_bytes
return _dict | 0.006977 |
def replace_template(self, template_content, team_context, template_id):
"""ReplaceTemplate.
[Preview API] Replace template contents
:param :class:`<WorkItemTemplate> <azure.devops.v5_1.work_item_tracking.models.WorkItemTemplate>` template_content: Template contents to replace with
:param :class:`<TeamContext> <azure.devops.v5_1.work_item_tracking.models.TeamContext>` team_context: The team context for the operation
:param str template_id: Template id
:rtype: :class:`<WorkItemTemplate> <azure.devops.v5_1.work-item-tracking.models.WorkItemTemplate>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if template_id is not None:
route_values['templateId'] = self._serialize.url('template_id', template_id, 'str')
content = self._serialize.body(template_content, 'WorkItemTemplate')
response = self._send(http_method='PUT',
location_id='fb10264a-8836-48a0-8033-1b0ccd2748d5',
version='5.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('WorkItemTemplate', response) | 0.00443 |
def base_name_from_image(image):
"""Extract the base name of the image to use as the 'algorithm name' for the job.
Args:
image (str): Image name.
Returns:
str: Algorithm name, as extracted from the image name.
"""
m = re.match("^(.+/)?([^:/]+)(:[^:]+)?$", image)
algo_name = m.group(2) if m else image
return algo_name | 0.005495 |
def serialize(self, sw):
'''Serialize the object.'''
detail = None
if self.detail is not None:
detail = Detail()
detail.any = self.detail
pyobj = FaultType(self.code, self.string, self.actor, detail)
sw.serialize(pyobj, typed=False) | 0.010067 |
def _not_in(x, y):
"""Compute the vectorized membership of ``x not in y`` if possible,
otherwise use Python.
"""
try:
return ~x.isin(y)
except AttributeError:
if is_list_like(x):
try:
return ~y.isin(x)
except AttributeError:
pass
return x not in y | 0.002882 |
def message(self, msg='', level=1, tab=0):
'''Print a message to the console'''
if self.verbosity >= level:
self.stdout.write('{}{}'.format(' ' * tab, msg)) | 0.010695 |
def _choose_split_points(cls, sorted_keys, shard_count):
"""Returns the best split points given a random set of datastore.Keys."""
assert len(sorted_keys) >= shard_count
index_stride = len(sorted_keys) / float(shard_count)
return [sorted_keys[int(round(index_stride * i))]
for i in range(1, shard_count)] | 0.003012 |
def generate(self, x, **kwargs):
"""
Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param kwargs: See `parse_params`
"""
# Parse and save attack-specific parameters
assert self.parse_params(**kwargs)
if self.ord != np.inf:
raise NotImplementedError(self.ord)
eta = tf.random_uniform(tf.shape(x), -self.eps, self.eps,
dtype=self.tf_dtype)
adv_x = x + eta
if self.clip_min is not None or self.clip_max is not None:
assert self.clip_min is not None and self.clip_max is not None
adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
return adv_x | 0.005706 |
def store_meta_data(self, copy_path=None):
"""Save meta data of the state machine model to the file system
This method generates a dictionary of the meta data of the state machine and stores it on the filesystem.
:param str copy_path: Optional, if the path is specified, it will be used instead of the file system path
"""
if copy_path:
meta_file_json = os.path.join(copy_path, storage.FILE_NAME_META_DATA)
else:
meta_file_json = os.path.join(self.state_machine.file_system_path, storage.FILE_NAME_META_DATA)
storage_utils.write_dict_to_json(self.meta, meta_file_json)
self.root_state.store_meta_data(copy_path) | 0.008535 |
def install_importer():
"""
If in a virtualenv then load spec files to decide which
modules can be imported from system site-packages and
install path hook.
"""
logging.debug('install_importer')
if not in_venv():
logging.debug('No virtualenv active py:[%s]', sys.executable)
return False
if disable_vext:
logging.debug('Vext disabled by environment variable')
return False
if GatekeeperFinder.PATH_TRIGGER not in sys.path:
try:
load_specs()
sys.path.append(GatekeeperFinder.PATH_TRIGGER)
sys.path_hooks.append(GatekeeperFinder)
except Exception as e:
"""
Dont kill other programmes because of a vext error
"""
logger.info(str(e))
if logger.getEffectiveLevel() == logging.DEBUG:
raise
logging.debug("importer installed")
return True | 0.001057 |
def _get_model_reference(self, model_id):
"""Constructs a ModelReference.
Args:
model_id (str): the ID of the model.
Returns:
google.cloud.bigquery.model.ModelReference:
A ModelReference for a model in this dataset.
"""
return ModelReference.from_api_repr(
{"projectId": self.project, "datasetId": self.dataset_id, "modelId": model_id}
) | 0.005013 |
def _kip(self, cycle_end, mix_thresh, xaxis, sparse):
"""
*** Should be used with care, therefore has been flagged as
a private routine ***
This function uses a threshold diffusion coefficient, above
which the the shell is considered to be convective, to plot a
Kippenhahn diagram.
Parameters
----------
cycle_end : integer
The final cycle number.
mix_thresh : float
The threshold diffusion coefficient.
xaxis : string
Choose one of 'age', 'cycle', 'log_age' or 'log_time_left'.
sparse : integer
Sparsity factor when plotting from cyclelist.
Examples
--------
>>> pt=mp.se('/ngpod1/swj/see/mppnp_out/scratch_data/M25.0Z1e-02','.h5')
>>> pt.kip(10000,'log_time_left',100)
"""
original_cyclelist = self.se.cycles
cyclelist = original_cyclelist[0:cycle_end:sparse]
xx = self.se.ages[:cycle_end:sparse]
totalmass = []
m_ini = float(self.se.get('mini'))
fig = pl.figure(1)
ax = pl.subplot(1,1,1)
fsize = 12
def getlims(d_coeff, massco):
"""
This function returns the convective boundaries for a cycle,
given the cycle's dcoeff and massco columns, taking into
account whether surface or centre are at the top.
"""
plotlims = []
if massco[0] > massco[-1]:
for j in range(-1,-len(d_coeff)-1,-1):
if j == -1:
if d_coeff[j] >= mix_thresh:
plotlims.append(massco[j])
else:
pass
elif (d_coeff[j]-mix_thresh)*(d_coeff[j+1]-mix_thresh) < 0:
plotlims.append(massco[j])
if j == -len(d_coeff):
if d_coeff[j] >= mix_thresh:
plotlims.append(massco[j])
return plotlims
else:
for j in range(len(d_coeff)):
if j == 0:
if d_coeff[j] >= mix_thresh:
plotlims.append(massco[j])
else:
pass
elif (d_coeff[j]-mix_thresh)*(d_coeff[j-1]-mix_thresh) < 0:
plotlims.append(massco[j])
if j == len(d_coeff)-1:
if d_coeff[j] >= mix_thresh:
plotlims.append(massco[j])
return plotlims
if xaxis == 'age':
ax.set_xlabel('Age [yr]',fontsize=fsize)
elif xaxis == 'cycle':
xx = cyclelist
ax.set_xlabel('Cycle',fontsize=fsize)
elif xaxis == 'log_age':
for i in range(len(xx)):
xx[i] = np.log10(xx[i])
ax.set_xlabel('log$_{10}$(age) [yr]',fontsize=fsize)
elif xaxis == 'log_time_left':
for i in range(len(xx)):
xx[i] = np.log10(max(xx)-xx[i])
xx[-2] = xx[-3]-abs(xx[-4]-xx[-3])
xx[-1] = xx[-2]-abs(xx[-3]-xx[-2])
ax.set_xlabel('log$_{10}$(time until collapse) [yr]',fontsize=fsize)
#centre-surface flag:
flag = False
if self.se.get(cyclelist[1],'mass')[0] > self.se.get(cyclelist[1],'mass')[-1]:
flag = True
for i in range(len(cyclelist)):
if flag == True:
totalmass.append(self.se.get(cyclelist[i],'mass')[0])
else:
totalmass.append(self.se.get(cyclelist[i],'mass')[-1])
percent = int(i*100/len(cyclelist))
sys.stdout.flush()
sys.stdout.write("\rcreating color map " + "...%d%%" % percent)
d_coeff = self.se.get(cyclelist[i],'dcoeff')
massco = self.se.get(cyclelist[i],'mass')
plotlims = getlims(d_coeff,massco)
for k in range(0,len(plotlims),2):
ax.axvline(xx[i],ymin=old_div(plotlims[k],m_ini),ymax=old_div(plotlims[k+1],m_ini),color='b',linewidth=0.5)
ax.plot(xx, totalmass, color='black', linewidth=1)
if xaxis == 'log_time_left':
ax.axis([xx[0],xx[-1],0.,m_ini])
else:
ax.axis([min(xx),max(xx),0.,m_ini])
ax.set_ylabel('Mass [$M_{\odot}$]',fontsize=fsize)
pl.show() | 0.009388 |
def get(cls, id):
''' Retrieves an object by id. Returns None in case of failure '''
if not id:
return None
redis = cls.get_redis()
key = '{}:{}:obj'.format(cls.cls_key(), id)
if not redis.exists(key):
return None
obj = cls(id=id)
obj._persisted = True
data = debyte_hash(redis.hgetall(key))
for fieldname, field in obj.proxy:
value = field.recover(data, redis)
setattr(
obj,
fieldname,
value
)
return obj | 0.003328 |
def _extract(self, parselet_node, document, level=0):
"""
Extract values at this document node level
using the parselet_node instructions:
- go deeper in tree
- or call selector handler in case of a terminal selector leaf
"""
if self.DEBUG:
debug_offset = "".join([" " for x in range(level)])
# we must go deeper in the Parsley tree
if isinstance(parselet_node, ParsleyNode):
# default output
output = {}
# process all children
for ctx, v in list(parselet_node.items()):
if self.DEBUG:
print(debug_offset, "context:", ctx, v)
extracted=None
try:
# scoped-extraction:
# extraction should be done deeper in the document tree
if ctx.scope:
extracted = []
selected = self.selector_handler.select(document, ctx.scope)
if selected:
for i, elem in enumerate(selected, start=1):
parse_result = self._extract(v, elem, level=level+1)
if isinstance(parse_result, (list, tuple)):
extracted.extend(parse_result)
else:
extracted.append(parse_result)
# if we're not in an array,
# we only care about the first iteration
if not ctx.iterate:
break
if self.DEBUG:
print(debug_offset,
"parsed %d elements in scope (%s)" % (i, ctx.scope))
# local extraction
else:
extracted = self._extract(v, document, level=level+1)
except NonMatchingNonOptionalKey as e:
if self.DEBUG:
print(debug_offset, str(e))
if not ctx.required or not self.STRICT_MODE:
output[ctx.key] = {}
else:
raise
except Exception as e:
if self.DEBUG:
print(str(e))
raise
# replace empty-list result when not looping by empty dict
if ( isinstance(extracted, list)
and not extracted
and not ctx.iterate):
extracted = {}
# keep only the first element if we're not in an array
if self.KEEP_ONLY_FIRST_ELEMENT_IF_LIST:
try:
if ( isinstance(extracted, list)
and extracted
and not ctx.iterate):
if self.DEBUG:
print(debug_offset, "keep only 1st element")
extracted = extracted[0]
except Exception as e:
if self.DEBUG:
print(str(e))
print(debug_offset, "error getting first element")
# extraction for a required key gave nothing
if ( self.STRICT_MODE
and ctx.required
and extracted is None):
raise NonMatchingNonOptionalKey(
'key "%s" is required but yield nothing\nCurrent path: %s/(%s)\n' % (
ctx.key,
document.getroottree().getpath(document),v
)
)
# special key to extract a selector-defined level deeper
# but still output at same level
# this can be useful for breaking up long selectors
# or when you need to mix XPath and CSS selectors
# e.g.
# {
# "something(#content div.main)": {
# "--(.//div[re:test(@class, 'style\d{3,6}')])": {
# "title": "h1",
# "subtitle": "h2"
# }
# }
# }
#
if ctx.key == self.SPECIAL_LEVEL_KEY:
if isinstance(extracted, dict):
output.update(extracted)
elif isinstance(extracted, list):
if extracted:
raise RuntimeError(
"could not merge non-empty list at higher level")
else:
#empty list, dont bother?
pass
else:
# required keys are handled above
if extracted is not None:
output[ctx.key] = extracted
else:
# do not add this optional key/value pair in the output
pass
return output
# a leaf/Selector node
elif isinstance(parselet_node, Selector):
return self.selector_handler.extract(document, parselet_node)
else:
# FIXME: can this happen?
# if selector handler returned None at compile time,
# probably yes
pass | 0.004413 |
def create_new_version(
self,
name,
subject,
text='',
template_id=None,
html=None,
locale=None,
timeout=None
):
""" API call to create a new version of a template """
if(html):
payload = {
'name': name,
'subject': subject,
'html': html,
'text': text
}
else:
payload = {
'name': name,
'subject': subject,
'text': text
}
if locale:
url = self.TEMPLATES_SPECIFIC_LOCALE_VERSIONS_ENDPOINT % (
template_id,
locale
)
else:
url = self.TEMPLATES_NEW_VERSION_ENDPOINT % template_id
return self._api_request(
url,
self.HTTP_POST,
payload=payload,
timeout=timeout
) | 0.003138 |
def parse(data):
"""
Parse the given ChangeLog data into a list of Hashes.
@param [String] data File data from the ChangeLog.md
@return [Array<Hash>] Parsed data, e.g. [{ 'version' => ..., 'url' => ..., 'date' => ..., 'content' => ...}, ...]
"""
sections = re.compile("^## .+$", re.MULTILINE).split(data)
headings = re.findall("^## .+?$", data, re.MULTILINE)
sections.pop(0)
parsed = []
def func(h, s):
p = parse_heading(h)
p["content"] = s
parsed.append(p)
list(map(func, headings, sections))
return parsed | 0.003431 |
def cell_px(self, line='', cell=None):
"""Executes the cell in parallel.
Examples
--------
::
In [24]: %%px --noblock
....: a = os.getpid()
Async parallel execution on engine(s): all
In [25]: %%px
....: print a
[stdout:0] 1234
[stdout:1] 1235
[stdout:2] 1236
[stdout:3] 1237
"""
args = magic_arguments.parse_argstring(self.cell_px, line)
if args.targets:
save_targets = self.view.targets
self.view.targets = self._eval_target_str(args.targets)
try:
return self.parallel_execute(cell, block=args.block,
groupby=args.groupby,
save_name=args.save_name,
)
finally:
if args.targets:
self.view.targets = save_targets | 0.009212 |
def decode_speech(path, keypath=None, save=False, speech_context=None,
sample_rate=44100, max_alternatives=1, language_code='en-US',
enable_word_time_offsets=True, return_raw=False):
"""
Decode speech for a file or folder and return results
This function wraps the Google Speech API and ffmpeg to decode speech for
free recall experiments. Note: in order for this to work, you must have a
Google Speech account, a google speech credentials file referenced in your
_bash_profile, and ffmpeg installed on your computer. See our readthedocs
for more information on how to set this up:
http://cdl-quail.readthedocs.io/en/latest/.
Parameters
----------
path : str
Path to a wav file, or a folder of wav files.
keypath : str
Google Cloud Speech API key filepath. This is a JSON file containing
credentials that was generated when creating a service account key.
If None, assumes you have a local key that is set with an environmental
variable. See the speech decoding tutorial for details.
save : boolean
False by default, but if set to true, will save a pickle with the results
object from google speech, and a text file with the decoded words.
speech_context : list of str
This allows you to give some context to the speech decoding algorithm.
For example, this could be the words studied on a given list, or all
words in an experiment.
sample_rate : float
The sample rate of your audio files (default is 44100).
max_alternatives : int
You can specify the speech decoding to return multiple guesses to the
decoding. This will be saved in the results object (default is 1).
language_code : str
Decoding language code. Default is en-US. See here for more details:
https://cloud.google.com/speech/docs/languages
enable_word_time_offsets : bool
Returns timing information s(onsets/offsets) for each word (default is
True).
return_raw : boolean
Intead of returning the parsed results objects (i.e. the words), you can
return the raw reponse object. This has more details about the decoding,
such as confidence.
Returns
----------
words : list of str, or list of lists of str
The results of the speech decoding. This will be a list if only one file
is input, or a list of lists if more than one file is decoded.
raw : google speech object, or list of objects
You can optionally return the google speech object instead of the parsed
results by using the return_raw flag.
"""
# SUBFUNCTIONS
def decode_file(file_path, client, speech_context, sample_rate,
max_alternatives, enable_word_time_offsets):
def recognize(chunk, file_path):
"""
Subfunction that loops over audio segments to recognize speech
"""
# export as flac
chunk.export(file_path + ".flac", format = "flac", bitrate="44.1k")
# open flac file
with open(file_path + ".flac", 'rb') as sc:
speech_content = sc.read()
# initialize speech sample
sample = types.RecognitionAudio(content=speech_content)
# run speech decoding
try:
result = client.recognize(opts, sample)
except ValueError as e:
print(e)
result = None
return result
opts = {}
opts['encoding']=enums.RecognitionConfig.AudioEncoding.FLAC
opts['language_code'] = language_code
opts['sample_rate_hertz'] = sample_rate
opts['max_alternatives'] = max_alternatives
opts['enable_word_time_offsets'] = enable_word_time_offsets
if speech_context:
opts['speech_contexts']=[types.SpeechContext(phrases=speech_context)]
# read in wav
audio = AudioSegment.from_wav(file_path)
# segment into 1 minute chunks
if len(audio)>60000:
segments = list(range(0,len(audio),60000))
if segments[-1]<len(audio):
segments.append(len(audio)-1)
print('Audio clip is longer than 1 minute. Splitting into %d one minute segments...' % (len(segments)-1))
audio_chunks = []
for i in range(len(segments)-1):
audio_chunks.append(audio[segments[i]:segments[i+1]])
else:
audio_chunks = [audio]
# loop over audio segments
results = []
for idx, chunk in enumerate(audio_chunks):
results.append(recognize(chunk, file_path+str(idx)))
# return list of results
return results
def parse_response(results):
"""Parses response from google speech"""
words = []
for chunk in results:
for result in chunk.results:
alternative = result.alternatives[0]
print('Transcript: {}'.format(alternative.transcript))
print('Confidence: {}'.format(alternative.confidence))
for word_info in alternative.words:
word = word_info.word
start_time = word_info.start_time
end_time = word_info.end_time
print('Word: {}, start_time: {}, end_time: {}'.format(
word,
start_time.seconds + start_time.nanos * 1e-9,
end_time.seconds + end_time.nanos * 1e-9))
words.append((str(word).upper(), start_time.seconds + start_time.nanos * 1e-9,
end_time.seconds + end_time.nanos * 1e-9))
return words
# def parse_response(results):
# """Parses response from google speech"""
# words = []
# for idx, result in enumerate(results):
# if result is None:
# warnings.warn('No speech was decoded for segment %d' % (idx+1))
# words.append(None)
# else:
# try:
# for segment in result:
# for chunk in segment.transcript.split(' '):
# if chunk != '':
# words.append(str(chunk).upper())
# except:
# warnings.warn('Error parsing response for segment %d' % (idx+1))
#
# return words
# MAIN #####################################################################
# initialize speech client
if keypath:
credentials = service_account.Credentials.from_service_account_file(keypath)
scoped_credentials = credentials.with_scopes(['https://www.googleapis.com/auth/cloud-platform'])
client = speech.SpeechClient(credentials=scoped_credentials)
else:
client = speech.SpeechClient()
# make a list of files
files = []
if path.endswith(".wav"):
files = [path]
else:
listdirectory = os.listdir(path)
for filename in listdirectory:
if filename.endswith(".wav"):
files.append(path + filename)
# initialize list of words
words = []
raw = []
# loop over files
for i, f in enumerate(files):
# print progress
print('Decoding file ' + str(i+1) + ' of ' + str(len(files)))
try:
# start timer
start = time.time()
# decode file
results = decode_file(f, client, speech_context, sample_rate,
max_alternatives, enable_word_time_offsets)
# parsing response
parsed_results = parse_response(results)
# save the processed file
words.append(parsed_results)
# save the processed file
raw.append(results)
if save:
# save the raw response in a pickle
pickle.dump(results, open(f + ".p", "wb" ) )
# save a text file with just the words
pd.DataFrame(parsed_results).to_csv(f + '.txt', header=False,
index=False)
# print when finished
print('Finished file ' + str(i+1) + ' of ' + str(len(files)) + ' in ' +
str(round(time.time()-start,2)) + ' seconds.')
# handle when something goes wrong
except ValueError as e:
words.append("Error")
print(e)
print('Decoding of file ' + str(i) + 'failed. Moving on to next file.')
if return_raw:
if len(words)>1:
return raw
else:
return raw[0]
else:
if len(words)>1:
return words
else:
return words[0] | 0.003573 |
def ChangePassword(self, password_old, password_new):
"""
Change the password used to protect the private key.
Args:
password_old (str): the current password used to encrypt the private key.
password_new (str): the new to be used password to encrypt the private key.
Returns:
bool: whether the password has been changed
"""
if not self.ValidatePassword(password_old):
return False
if isinstance(password_new, str):
password_new = password_new.encode('utf-8')
password_key = hashlib.sha256(password_new)
self.SaveStoredData("PasswordHash", password_key)
self.SaveStoredData("MasterKey", AES.new(self._master_key, AES.MODE_CBC, self._iv))
return True | 0.006242 |
def change_subitem(self, offset=None, max_item_count=None, nom=True):
"""
Args:
* offset - Integer - The positive / negative change to apply
to the item.
* max_item_count - The maximum number of items available.
* nom - None on Max or Min - If max or min number of itemsreached,
return none instead of forcing back into range.
Returns:
Boolean - True if successful, False if nom is True and the
value was forced back within the boundry.
change_item's use case is +1 / -1 incrementing through a gallery.
The logic works fine for +1 boundary between pages
"""
if offset == None:
return
if self.slots['subitem'] == None:
self.slots['subitem'] = 1
new_item = self.slots['subitem']+offset
if new_item > max_item_count:
new_item -= max_item_count
self.change_subpage(offset=+1, nom=False)
elif new_item < 1 and self.current_subpage() > 1:
self.change_subpage(offset=-1, nom=False)
if max_item_count != None:
new_item += max_item_count
else:
new_item += self.page_items
self.slots['subitem'] = new_item
return True | 0.003915 |
def load_model(
self, the_metamodel, filename, is_main_model, encoding='utf-8',
add_to_local_models=True):
"""
load a single model
Args:
the_metamodel: the metamodel used to load the model
filename: the model to be loaded (if not cached)
Returns:
the loaded/cached model
"""
if not self.local_models.has_model(filename):
if self.all_models.has_model(filename):
new_model = self.all_models.filename_to_model[filename]
else:
# print("LOADING {}".format(filename))
# all models loaded here get their references resolved from the
# root model
new_model = the_metamodel.internal_model_from_file(
filename, pre_ref_resolution_callback=lambda
other_model: self.pre_ref_resolution_callback(other_model),
is_main_model=is_main_model, encoding=encoding)
self.all_models.filename_to_model[filename] = new_model
# print("ADDING {}".format(filename))
if add_to_local_models:
self.local_models.filename_to_model[filename] = new_model
assert self.all_models.has_model(filename) # to be sure...
return self.all_models.filename_to_model[filename] | 0.001452 |
def agg(self, *exprs):
"""Compute aggregates and returns the result as a :class:`DataFrame`.
The available aggregate functions can be:
1. built-in aggregation functions, such as `avg`, `max`, `min`, `sum`, `count`
2. group aggregate pandas UDFs, created with :func:`pyspark.sql.functions.pandas_udf`
.. note:: There is no partial aggregation with group aggregate UDFs, i.e.,
a full shuffle is required. Also, all the data of a group will be loaded into
memory, so the user should be aware of the potential OOM risk if data is skewed
and certain groups are too large to fit in memory.
.. seealso:: :func:`pyspark.sql.functions.pandas_udf`
If ``exprs`` is a single :class:`dict` mapping from string to string, then the key
is the column to perform aggregation on, and the value is the aggregate function.
Alternatively, ``exprs`` can also be a list of aggregate :class:`Column` expressions.
.. note:: Built-in aggregation functions and group aggregate pandas UDFs cannot be mixed
in a single call to this function.
:param exprs: a dict mapping from column name (string) to aggregate functions (string),
or a list of :class:`Column`.
>>> gdf = df.groupBy(df.name)
>>> sorted(gdf.agg({"*": "count"}).collect())
[Row(name=u'Alice', count(1)=1), Row(name=u'Bob', count(1)=1)]
>>> from pyspark.sql import functions as F
>>> sorted(gdf.agg(F.min(df.age)).collect())
[Row(name=u'Alice', min(age)=2), Row(name=u'Bob', min(age)=5)]
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> @pandas_udf('int', PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def min_udf(v):
... return v.min()
>>> sorted(gdf.agg(min_udf(df.age)).collect()) # doctest: +SKIP
[Row(name=u'Alice', min_udf(age)=2), Row(name=u'Bob', min_udf(age)=5)]
"""
assert exprs, "exprs should not be empty"
if len(exprs) == 1 and isinstance(exprs[0], dict):
jdf = self._jgd.agg(exprs[0])
else:
# Columns
assert all(isinstance(c, Column) for c in exprs), "all exprs should be Column"
jdf = self._jgd.agg(exprs[0]._jc,
_to_seq(self.sql_ctx._sc, [c._jc for c in exprs[1:]]))
return DataFrame(jdf, self.sql_ctx) | 0.00567 |
def master_then_spare(data):
"""Return the provided satellites list sorted as:
- alive first,
- then spare
- then dead
satellites.
:param data: the SatelliteLink list
:type data: list
:return: sorted list
:rtype: list
"""
master = []
spare = []
for sdata in data:
if sdata.spare:
spare.append(sdata)
else:
master.append(sdata)
rdata = []
rdata.extend(master)
rdata.extend(spare)
return rdata | 0.001938 |
def distance(tree1, tree2):
'''
Calculates the distance between the two trees.
@type tree1: list of Views
@param tree1: Tree of Views
@type tree2: list of Views
@param tree2: Tree of Views
@return: the distance
'''
################################################################
#FIXME: this should copy the entire tree and then transform it #
################################################################
pickleableTree1 = ViewClient.__pickleable(tree1)
pickleableTree2 = ViewClient.__pickleable(tree2)
s1 = pickle.dumps(pickleableTree1)
s2 = pickle.dumps(pickleableTree2)
if DEBUG_DISTANCE:
print >>sys.stderr, "distance: calculating distance between", s1[:20], "and", s2[:20]
l1 = len(s1)
l2 = len(s2)
t = float(max(l1, l2))
if l1 == l2:
if DEBUG_DISTANCE:
print >>sys.stderr, "distance: trees have same length, using Hamming distance"
return ViewClient.__hammingDistance(s1, s2)/t
else:
if DEBUG_DISTANCE:
print >>sys.stderr, "distance: trees have different length, using Levenshtein distance"
return ViewClient.__levenshteinDistance(s1, s2)/t | 0.004563 |
async def processClientInBox(self):
"""
Process the messages in the node's clientInBox asynchronously.
All messages in the inBox have already been validated, including
signature check.
"""
while self.clientInBox:
m = self.clientInBox.popleft()
req, frm = m
logger.debug("{} processing {} request {}".
format(self.clientstack.name, frm, req),
extra={"cli": True,
"tags": ["node-msg-processing"]})
try:
await self.clientMsgRouter.handle(m)
except InvalidClientMessageException as ex:
self.handleInvalidClientMsg(ex, m) | 0.002706 |
async def _receive(self, stream_id, pp_id, data):
"""
Receive data stream -> ULP.
"""
await self._data_channel_receive(stream_id, pp_id, data) | 0.011494 |
def newidfobject(self, key, aname='', defaultvalues=True, **kwargs):
"""
Add a new idfobject to the model. If you don't specify a value for a
field, the default value will be set.
For example ::
newidfobject("CONSTRUCTION")
newidfobject("CONSTRUCTION",
Name='Interior Ceiling_class',
Outside_Layer='LW Concrete',
Layer_2='soundmat')
Parameters
----------
key : str
The type of IDF object. This must be in ALL_CAPS.
aname : str, deprecated
This parameter is not used. It is left there for backward
compatibility.
defaultvalues: boolean
default is True. If True default values WILL be set.
If False, default values WILL NOT be set
**kwargs
Keyword arguments in the format `field=value` used to set the value
of fields in the IDF object when it is created.
Returns
-------
EpBunch object
"""
obj = newrawobject(self.model, self.idd_info,
key, block=self.block, defaultvalues=defaultvalues)
abunch = obj2bunch(self.model, self.idd_info, obj)
if aname:
warnings.warn("The aname parameter should no longer be used.", UserWarning)
namebunch(abunch, aname)
self.idfobjects[key].append(abunch)
for k, v in list(kwargs.items()):
abunch[k] = v
return abunch | 0.004563 |
def get_synset_xml(self,syn_id):
"""
call cdb_syn with synset identifier -> returns the synset xml;
"""
http, resp, content = self.connect()
params = ""
fragment = ""
path = "cdb_syn"
if self.debug:
printf( "cornettodb/views/query_remote_syn_id: db_opt: %s" % path )
# output_opt: plain, html, xml
# 'xml' is actually xhtml (with markup), but it is not valid xml!
# 'plain' is actually valid xml (without markup)
output_opt = "plain"
if self.debug:
printf( "cornettodb/views/query_remote_syn_id: output_opt: %s" % output_opt )
action = "runQuery"
if self.debug:
printf( "cornettodb/views/query_remote_syn_id: action: %s" % action )
printf( "cornettodb/views/query_remote_syn_id: query: %s" % syn_id )
qdict = {}
qdict[ "action" ] = action
qdict[ "query" ] = syn_id
qdict[ "outtype" ] = output_opt
query = urllib.urlencode( qdict )
db_url_tuple = ( self.scheme, self.host + ':' + str(self.port), path, params, query, fragment )
db_url = urlparse.urlunparse( db_url_tuple )
if self.debug:
printf( "db_url: %s" % db_url )
resp, content = http.request( db_url, "GET" )
if self.debug:
printf( "resp:\n%s" % resp )
# printf( "content:\n%s" % content )
# printf( "content is of type: %s" % type( content ) ) #<type 'str'>
xml_data = eval( content )
return etree.fromstring( xml_data ) | 0.025482 |
def _apply_secure(self, func, permission=None):
'''Enforce authentication on a given method/verb'''
self._handle_api_doc(func, {'security': 'apikey'})
@wraps(func)
def wrapper(*args, **kwargs):
if not current_user.is_authenticated:
self.abort(401)
if permission is not None:
with permission.require():
return func(*args, **kwargs)
else:
return func(*args, **kwargs)
return wrapper | 0.003795 |
def read(self, fname, psw=None):
"""Return uncompressed data for archive entry.
For longer files using :meth:`RarFile.open` may be better idea.
Parameters:
fname
filename or RarInfo instance
psw
password to use for extracting.
"""
with self.open(fname, 'r', psw) as f:
return f.read() | 0.005051 |
def _get_dvs_link_discovery_protocol(dvs_name, dvs_link_disc_protocol):
'''
Returns the dict representation of the DVS link discovery protocol
dvs_name
The name of the DVS
dvs_link_disc_protocl
The DVS link discovery protocol
'''
log.trace('Building the dict of the DVS \'%s\' link discovery protocol', dvs_name)
return {'operation': dvs_link_disc_protocol.operation,
'protocol': dvs_link_disc_protocol.protocol} | 0.004255 |
def get_dataset(self, X, y=None):
"""Get a dataset that contains the input data and is passed to
the iterator.
Override this if you want to initialize your dataset
differently.
Parameters
----------
X : input data, compatible with skorch.dataset.Dataset
By default, you should be able to pass:
* numpy arrays
* torch tensors
* pandas DataFrame or Series
* scipy sparse CSR matrices
* a dictionary of the former three
* a list/tuple of the former three
* a Dataset
If this doesn't work with your data, you have to pass a
``Dataset`` that can deal with the data.
y : target data, compatible with skorch.dataset.Dataset
The same data types as for ``X`` are supported. If your X is
a Dataset that contains the target, ``y`` may be set to
None.
Returns
-------
dataset
The initialized dataset.
"""
if is_dataset(X):
return X
dataset = self.dataset
is_initialized = not callable(dataset)
kwargs = self._get_params_for('dataset')
if kwargs and is_initialized:
raise TypeError("Trying to pass an initialized Dataset while "
"passing Dataset arguments ({}) is not "
"allowed.".format(kwargs))
if is_initialized:
return dataset
return dataset(X, y, **kwargs) | 0.001285 |
def to_rgba(color, alpha):
"""
Converts from hex|rgb to rgba
Parameters:
-----------
color : string
Color representation on hex or rgb
alpha : float
Value from 0 to 1.0 that represents the
alpha value.
Example:
to_rgba('#E1E5ED',0.6)
to_rgba('#f03',0.7)
to_rgba('rgb(23,23,23)',.5)
"""
if type(color) == tuple:
color, alpha = color
color = color.lower()
if 'rgba' in color:
cl = list(eval(color.replace('rgba', '')))
if alpha:
cl[3] = alpha
return 'rgba' + str(tuple(cl))
elif 'rgb' in color:
r, g, b = eval(color.replace('rgb', ''))
return 'rgba' + str((r, g, b, alpha))
else:
return to_rgba(hex_to_rgb(color), alpha) | 0.002345 |
def popall(self, key, default=_marker):
"""Remove all occurrences of key and return the list of corresponding
values.
If key is not found, default is returned if given, otherwise
KeyError is raised.
"""
found = False
identity = self._title(key)
ret = []
for i in range(len(self._impl._items)-1, -1, -1):
item = self._impl._items[i]
if item[0] == identity:
ret.append(item[2])
del self._impl._items[i]
self._impl.incr_version()
found = True
if not found:
if default is _marker:
raise KeyError(key)
else:
return default
else:
ret.reverse()
return ret | 0.002481 |
def guid(self, guid):
"""Determines JSONAPI type for provided GUID"""
return self._json(self._get(self._build_url('guids', guid)), 200)['data']['type'] | 0.017964 |
def read_busiest_week(path: str) -> Dict[datetime.date, FrozenSet[str]]:
"""Find the earliest week with the most trips"""
feed = load_raw_feed(path)
return _busiest_week(feed) | 0.005348 |
def parents(self, node):
"""Determine all parents of node in our tree"""
return [
parent for parent in
getattr( node, 'parents', [] )
if getattr(parent, 'tree', self.TREE) == self.TREE
] | 0.01626 |
def getQueryEngineDescription(self, queryEngine, **kwargs):
"""See Also: getQueryEngineDescriptionResponse()
Args:
queryEngine:
**kwargs:
Returns:
"""
response = self.getQueryEngineDescriptionResponse(queryEngine, **kwargs)
return self._read_dataone_type_response(response, 'QueryEngineDescription') | 0.010811 |
def partitionBy(self, numPartitions, partitionFunc=portable_hash):
"""
Return a copy of the DStream in which each RDD are partitioned
using the specified partitioner.
"""
return self.transform(lambda rdd: rdd.partitionBy(numPartitions, partitionFunc)) | 0.010309 |
def from_dict(d):
"""
Recreate a KrausModel from the dictionary representation.
:param dict d: The dictionary representing the KrausModel. See `to_dict` for an
example.
:return: The deserialized KrausModel.
:rtype: KrausModel
"""
kraus_ops = [KrausModel.unpack_kraus_matrix(k) for k in d['kraus_ops']]
return KrausModel(d['gate'], d['params'], d['targets'], kraus_ops, d['fidelity']) | 0.008696 |
def draw_circle(self, pos, radius, color, fillcolor=None):
"""
Draw a circle with the given color on the screen and optionally fill it with fillcolor.
:param pos: Center of the circle
:param radius: Radius
:param color: Color for border
:param fillcolor: Color for infill
:type pos: tuple
:type radius: int
:type color: tuple
:type fillcolor: tuple
"""
#TODO: This still produces rubbish but it's on a good way to success
def dist(d, p, r):
return abs(math.sqrt((p[0] - d[0])**2 + (p[1] - d[1])**2) - r)
points = []
for x in range(pos[0] - radius, pos[0] + radius):
for y in range(pos[1] - radius, pos[1] + radius):
if 0 < x < self.width and 0 < y < self.height:
if dist((x, y), pos, radius) < 1.3:
points.append((x, y))
# draw fill color
if fillcolor:
for point in points:
pass
# draw outline
for point in points:
self.draw_dot(point, color) | 0.003568 |
def dump(ra, from_date, with_json=True, latest_only=False, **kwargs):
"""Dump the remote accounts as a list of dictionaries.
:param ra: Remote account to be dumped.
:type ra: `invenio_oauthclient.models.RemoteAccount [Invenio2.x]`
:returns: Remote accounts serialized to dictionary.
:rtype: dict
"""
return dict(id=ra.id, user_id=ra.user_id, client_id=ra.client_id,
extra_data=ra.extra_data) | 0.002294 |
def on_config_value_changed(self, config_m, prop_name, info):
"""Callback when a config value has been changed
:param ConfigModel config_m: The config model that has been changed
:param str prop_name: Should always be 'config'
:param dict info: Information e.g. about the changed config key
"""
config_key = info['args'][1]
# config_value = info['args'][2]
if config_key in ["ENABLE_CACHING", "SHOW_ABORTED_PREEMPTED", "SHOW_DATA_FLOWS",
"SHOW_DATA_FLOW_VALUE_LABELS", "SHOW_NAMES_ON_DATA_FLOWS", "ROTATE_NAMES_ON_CONNECTION"]:
self.update_view() | 0.006154 |
def get_mimetype(self, path):
'''
Get mimetype of given path calling all registered mime functions (and
default ones).
:param path: filesystem path of file
:type path: str
:returns: mimetype
:rtype: str
'''
for fnc in self._mimetype_functions:
mime = fnc(path)
if mime:
return mime
return mimetype.by_default(path) | 0.004598 |
def setup_opt_parser():
"""
Setup the optparser
@returns: opt_parser.OptionParser
"""
#pylint: disable-msg=C0301
#line too long
usage = "usage: %prog [options]"
opt_parser = optparse.OptionParser(usage=usage)
opt_parser.add_option("--version", action='store_true', dest=
"yolk_version", default=False, help=
"Show yolk version and exit.")
opt_parser.add_option("--debug", action='store_true', dest=
"debug", default=False, help=
"Show debugging information.")
opt_parser.add_option("-q", "--quiet", action='store_true', dest=
"quiet", default=False, help=
"Show less output.")
group_local = optparse.OptionGroup(opt_parser,
"Query installed Python packages",
"The following options show information about installed Python packages. Activated packages are normal packages on sys.path that can be imported. Non-activated packages need 'pkg_resources.require()' before they can be imported, such as packages installed with 'easy_install --multi-version'. PKG_SPEC can be either a package name or package name and version e.g. Paste==0.9")
group_local.add_option("-l", "--list", action='store_true', dest=
"show_all", default=False, help=
"List all Python packages installed by distutils or setuptools. Use PKG_SPEC to narrow results.")
group_local.add_option("-a", "--activated", action='store_true',
dest="show_active", default=False, help=
'List activated packages installed by distutils or ' +
'setuptools. Use PKG_SPEC to narrow results.')
group_local.add_option("-n", "--non-activated", action='store_true',
dest="show_non_active", default=False, help=
'List non-activated packages installed by distutils or ' +
'setuptools. Use PKG_SPEC to narrow results.')
group_local.add_option("-m", "--metadata", action='store_true', dest=
"metadata", default=False, help=
'Show all metadata for packages installed by ' +
'setuptools (use with -l -a or -n)')
group_local.add_option("-f", "--fields", action="store", dest=
"fields", default=False, help=
'Show specific metadata fields. ' +
'(use with -m or -M)')
group_local.add_option("-d", "--depends", action='store', dest=
"show_deps", metavar='PKG_SPEC',
help= "Show dependencies for a package installed by " +
"setuptools if they are available.")
group_local.add_option("--entry-points", action='store',
dest="show_entry_points", default=False, help=
'List entry points for a module. e.g. --entry-points nose.plugins',
metavar="MODULE")
group_local.add_option("--entry-map", action='store',
dest="show_entry_map", default=False, help=
'List entry map for a package. e.g. --entry-map yolk',
metavar="PACKAGE_NAME")
group_pypi = optparse.OptionGroup(opt_parser,
"PyPI (Cheese Shop) options",
"The following options query the Python Package Index:")
group_pypi.add_option("-C", "--changelog", action='store',
dest="show_pypi_changelog", metavar='HOURS',
default=False, help=
"Show detailed ChangeLog for PyPI for last n hours. ")
group_pypi.add_option("-D", "--download-links", action='store',
metavar="PKG_SPEC", dest="show_download_links",
default=False, help=
"Show download URL's for package listed on PyPI. Use with -T to specify egg, source etc.")
group_pypi.add_option("-F", "--fetch-package", action='store',
metavar="PKG_SPEC", dest="fetch",
default=False, help=
"Download package source or egg. You can specify a file type with -T")
group_pypi.add_option("-H", "--browse-homepage", action='store',
metavar="PKG_SPEC", dest="browse_website",
default=False, help=
"Launch web browser at home page for package.")
group_pypi.add_option("-I", "--pypi-index", action='store',
dest="pypi_index",
default=False, help=
"Specify PyPI mirror for package index.")
group_pypi.add_option("-L", "--latest-releases", action='store',
dest="show_pypi_releases", metavar="HOURS",
default=False, help=
"Show PyPI releases for last n hours. ")
group_pypi.add_option("-M", "--query-metadata", action='store',
dest="query_metadata_pypi", default=False,
metavar="PKG_SPEC", help=
"Show metadata for a package listed on PyPI. Use -f to show particular fields.")
group_pypi.add_option("-S", "", action="store", dest="pypi_search",
default=False, help=
"Search PyPI by spec and optional AND/OR operator.",
metavar='SEARCH_SPEC <AND/OR SEARCH_SPEC>')
group_pypi.add_option("-T", "--file-type", action="store", dest=
"file_type", default="all", help=
"You may specify 'source', 'egg', 'svn' or 'all' when using -D.")
group_pypi.add_option("-U", "--show-updates", action='store_true',
dest="show_updates", metavar='<PKG_NAME>',
default=False, help=
"Check PyPI for updates on package(s).")
group_pypi.add_option("-V", "--versions-available", action=
'store', dest="versions_available",
default=False, metavar='PKG_SPEC',
help="Show available versions for given package " +
"listed on PyPI.")
opt_parser.add_option_group(group_local)
opt_parser.add_option_group(group_pypi)
# add opts from plugins
all_plugins = []
for plugcls in load_plugins(others=True):
plug = plugcls()
try:
plug.add_options(opt_parser)
except AttributeError:
pass
return opt_parser | 0.007262 |
def colorMap(value, name="jet", vmin=None, vmax=None):
"""Map a real value in range [vmin, vmax] to a (r,g,b) color scale.
:param value: scalar value to transform into a color
:type value: float, list
:param name: color map name
:type name: str, matplotlib.colors.LinearSegmentedColormap
:return: (r,g,b) color, or a list of (r,g,b) colors.
.. note:: Available color maps:
|colormaps|
.. tip:: Can also use directly a matplotlib color map:
:Example:
.. code-block:: python
from vtkplotter import colorMap
import matplotlib.cm as cm
print( colorMap(0.2, cm.flag, 0, 1) )
(1.0, 0.809016994374948, 0.6173258487801733)
"""
if not _mapscales:
print("-------------------------------------------------------------------")
print("WARNING : cannot import matplotlib.cm (colormaps will show up gray).")
print("Try e.g.: sudo apt-get install python3-matplotlib")
print(" or : pip install matplotlib")
print(" or : build your own map (see example in basic/mesh_custom.py).")
return (0.5, 0.5, 0.5)
if isinstance(name, matplotlib.colors.LinearSegmentedColormap):
mp = name
else:
if name in _mapscales.keys():
mp = _mapscales[name]
else:
print("Error in colorMap():", name, "\navaliable maps =", sorted(_mapscales.keys()))
exit(0)
if _isSequence(value):
values = np.array(value)
if vmin is None:
vmin = np.min(values)
if vmax is None:
vmax = np.max(values)
values = np.clip(values, vmin, vmax)
values -= vmin
values /= vmax - vmin
cols = []
mp = _mapscales[name]
for v in values:
cols.append(mp(v)[0:3])
return np.array(cols)
else:
value -= vmin
value /= vmax - vmin
if value > 0.999:
value = 0.999
elif value < 0:
value = 0
return mp(value)[0:3] | 0.003311 |
def get_context(self):
"""
Return the context used to render the templates for the email
subject and body.
By default, this context includes:
* All of the validated values in the form, as variables of the
same names as their fields.
* The current ``Site`` object, as the variable ``site``.
* Any additional variables added by context processors (this
will be a ``RequestContext``).
"""
if not self.is_valid():
raise ValueError(
"Cannot generate Context from invalid contact form"
)
return dict(self.cleaned_data, site=get_current_site(self.request)) | 0.002882 |
def _execute_model(self, feed_dict, out_dict):
"""
Executes model.
:param feed_dict: Input dictionary mapping nodes to input data.
:param out_dict: Output dictionary mapping names to nodes.
:return: Dictionary mapping names to input data.
"""
network_out = self.sess.run(list(out_dict.values()), feed_dict=feed_dict)
run_out = dict(zip(list(out_dict.keys()), network_out))
return run_out | 0.006536 |
def _D_constraint(self, neg_pairs, w):
"""Compute the value, 1st derivative, second derivative (Hessian) of
a dissimilarity constraint function gF(sum_ij distance(d_ij A d_ij))
where A is a diagonal matrix (in the form of a column vector 'w').
"""
diff = neg_pairs[:, 0, :] - neg_pairs[:, 1, :]
diff_sq = diff * diff
dist = np.sqrt(diff_sq.dot(w))
sum_deri1 = np.einsum('ij,i', diff_sq, 0.5 / np.maximum(dist, 1e-6))
sum_deri2 = np.einsum(
'ij,ik->jk',
diff_sq,
diff_sq / (-4 * np.maximum(1e-6, dist**3))[:,None]
)
sum_dist = dist.sum()
return (
np.log(sum_dist),
sum_deri1 / sum_dist,
sum_deri2 / sum_dist - np.outer(sum_deri1, sum_deri1) / (sum_dist * sum_dist)
) | 0.003953 |
def stop_recording():
"""
Stops the global recording of events and returns a list of the events
captured.
"""
global _recording
if not _recording:
raise ValueError('Must call "start_recording" before.')
recorded_events_queue, hooked = _recording
unhook(hooked)
return list(recorded_events_queue.queue) | 0.002899 |
def is_class(data):
"""
checks that the data (which is a string, buffer, or a stream
supporting the read method) has the magic numbers indicating it is
a Java class file. Returns False if the magic numbers do not
match, or for any errors.
"""
try:
with unpack(data) as up:
magic = up.unpack_struct(_BBBB)
return magic == JAVA_CLASS_MAGIC
except UnpackException:
return False | 0.002247 |
def unicode_symbol(self, *, invert_color: bool = False) -> str:
"""
Gets the Unicode character for the piece.
"""
symbol = self.symbol().swapcase() if invert_color else self.symbol()
return UNICODE_PIECE_SYMBOLS[symbol] | 0.007722 |
def project_geometry(geometry, crs, to_latlong=False):
"""
Project a shapely Polygon or MultiPolygon from WGS84 to UTM, or vice-versa
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to project
crs : int
the starting coordinate reference system of the passed-in geometry
to_latlong : bool
if True, project from crs to WGS84, if False, project
from crs to local UTM zone
Returns
-------
geometry_proj, crs : tuple (projected shapely geometry, crs of the
projected geometry)
"""
gdf = gpd.GeoDataFrame()
gdf.crs = crs
gdf.name = 'geometry to project'
gdf['geometry'] = None
gdf.loc[0, 'geometry'] = geometry
gdf_proj = project_gdf(gdf, to_latlong=to_latlong)
geometry_proj = gdf_proj['geometry'].iloc[0]
return geometry_proj, gdf_proj.crs | 0.001138 |
def register(coordinator):
"""Registers this module as a worker with the given coordinator."""
if FLAGS.phantomjs_script:
utils.verify_binary('phantomjs_binary', ['--version'])
assert os.path.exists(FLAGS.phantomjs_script)
else:
utils.verify_binary('capture_binary', ['--version'])
assert FLAGS.capture_script
assert os.path.exists(FLAGS.capture_script)
assert FLAGS.capture_threads > 0
assert FLAGS.queue_server_prefix
item = queue_worker.RemoteQueueWorkflow(
constants.CAPTURE_QUEUE_NAME,
DoCaptureQueueWorkflow,
max_tasks=FLAGS.capture_threads,
wait_seconds=FLAGS.capture_wait_seconds)
item.root = True
coordinator.input_queue.put(item) | 0.00134 |
def match(self, passageId):
""" Given a passageId matches a citation level
:param passageId: A passage to match
:return:
"""
if not isinstance(passageId, CtsReference):
passageId = CtsReference(passageId)
if self.is_root():
return self[passageId.depth-1]
return self.root.match(passageId) | 0.005405 |
def deactivate_(self):
"""Init shmem variables to None
"""
self.preDeactivate_()
self.active = False
self.image_dimensions = None
self.client = None | 0.010204 |
def toxml(self):
"""
Exports this object into a LEMS XML object
"""
chxmlstr = ''
for run in self.runs:
chxmlstr += run.toxml()
for record in self.records:
chxmlstr += record.toxml()
for event_record in self.event_records:
chxmlstr += event_record.toxml()
for data_display in self.data_displays:
chxmlstr += data_display.toxml()
for data_writer in self.data_writers:
chxmlstr += data_writer.toxml()
for event_writer in self.event_writers:
chxmlstr += event_writer.toxml()
if chxmlstr:
xmlstr = '<Simulation>' + chxmlstr + '</Simulation>'
else:
xmlstr = ''
return xmlstr | 0.002571 |
def bootstrap_params(rv_cont, data, n_iter=5, **kwargs):
"""Bootstrap the fit params of a distribution.
Parameters
==========
rv_cont: scipy.stats.rv_continuous instance
The distribution which to fit.
data: array-like, 1d
The data on which to fit.
n_iter: int [default=10]
Number of bootstrap iterations.
"""
fit_res = []
for _ in range(n_iter):
params = rv_cont.fit(resample_1d(data, **kwargs))
fit_res.append(params)
fit_res = np.array(fit_res)
return fit_res | 0.001832 |
def start(self):
""" Starts the watchdog timer. """
self._timer = Timer(self.time, self.handler)
self._timer.daemon = True
self._timer.start()
return | 0.010582 |
def set_min_price(self, min_price):
"""
The minimum price.
:param min_price:
:return:
"""
if not isinstance(min_price, int):
raise DaftException("Min price should be an integer.")
self._min_price = str(min_price)
self._price += str(QueryParam.MIN_PRICE) + self._min_price | 0.005731 |
async def send_message(self, chat_id: typing.Union[base.Integer, base.String], text: base.String,
parse_mode: typing.Union[base.String, None] = None,
disable_web_page_preview: typing.Union[base.Boolean, None] = None,
disable_notification: typing.Union[base.Boolean, None] = None,
reply_to_message_id: typing.Union[base.Integer, None] = None,
reply_markup: typing.Union[types.InlineKeyboardMarkup,
types.ReplyKeyboardMarkup,
types.ReplyKeyboardRemove,
types.ForceReply, None] = None) -> types.Message:
"""
Use this method to send text messages.
Source: https://core.telegram.org/bots/api#sendmessage
:param chat_id: Unique identifier for the target chat or username of the target channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param text: Text of the message to be sent
:type text: :obj:`base.String`
:param parse_mode: Send Markdown or HTML, if you want Telegram apps to show bold, italic,
fixed-width text or inline URLs in your bot's message.
:type parse_mode: :obj:`typing.Union[base.String, None]`
:param disable_web_page_preview: Disables link previews for links in this message
:type disable_web_page_preview: :obj:`typing.Union[base.Boolean, None]`
:param disable_notification: Sends the message silently. Users will receive a notification with no sound
:type disable_notification: :obj:`typing.Union[base.Boolean, None]`
:param reply_to_message_id: If the message is a reply, ID of the original message
:type reply_to_message_id: :obj:`typing.Union[base.Integer, None]`
:param reply_markup: Additional interface options
:type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup,
types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]`
:return: On success, the sent Message is returned
:rtype: :obj:`types.Message`
"""
reply_markup = prepare_arg(reply_markup)
payload = generate_payload(**locals())
if self.parse_mode:
payload.setdefault('parse_mode', self.parse_mode)
result = await self.request(api.Methods.SEND_MESSAGE, payload)
return types.Message(**result) | 0.008203 |
def encode(input, output, encoding):
"""Encode common content-transfer-encodings (base64, quopri, uuencode)."""
if encoding == 'base64':
import base64
return base64.encode(input, output)
if encoding == 'quoted-printable':
import quopri
return quopri.encode(input, output, 0)
if encoding in ('uuencode', 'x-uuencode', 'uue', 'x-uue'):
import uu
return uu.encode(input, output)
if encoding in ('7bit', '8bit'):
return output.write(input.read())
if encoding in encodetab:
pipethrough(input, encodetab[encoding], output)
else:
raise ValueError, \
'unknown Content-Transfer-Encoding: %s' % encoding | 0.002829 |
def _execute(params, cwd):
"""
Executes a subprocess
:param params:
A list of the executable and arguments to pass to it
:param cwd:
The working directory to execute the command in
:return:
A 2-element tuple of (stdout, stderr)
"""
proc = subprocess.Popen(
params,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd
)
stdout, stderr = proc.communicate()
code = proc.wait()
if code != 0:
e = OSError('subprocess exit code for %r was %d: %s' % (params, code, stderr))
e.stdout = stdout
e.stderr = stderr
raise e
return (stdout, stderr) | 0.00295 |
def setpts(stream, expr):
"""Change the PTS (presentation timestamp) of the input frames.
Args:
expr: The expression which is evaluated for each frame to construct its timestamp.
Official documentation: `setpts, asetpts <https://ffmpeg.org/ffmpeg-filters.html#setpts_002c-asetpts>`__
"""
return FilterNode(stream, setpts.__name__, args=[expr]).stream() | 0.007853 |
def notifyReady(self):
"""
Returns a deferred that will fire when the factory has created a
protocol that can be used to communicate with a Mongo server.
Note that this will not fire until we have connected to a Mongo
master, unless slaveOk was specified in the Mongo URI connection
options.
"""
if self.instance:
return defer.succeed(self.instance)
def on_cancel(d):
self.__notify_ready.remove(d)
df = defer.Deferred(on_cancel)
self.__notify_ready.append(df)
return df | 0.003247 |
def indent(text, branch_method, leaf_method, pass_syntax, flush_left_syntax, flush_left_empty_line, indentation_method,
get_block=get_indented_block):
"""Returns HTML as a basestring.
Parameters
----------
text : basestring
Source code, typically SHPAML, but could be a different (but
related) language. The remaining parameters specify details
about the language used in the source code. To parse SHPAML,
pass the same values as convert_shpaml_tree.
branch_method : function
convert_shpaml_tree passes html_block_tag here.
leaf_method : function
convert_shpaml_tree passes convert_line here.
pass_syntax : basestring
convert_shpaml_tree passes PASS_SYNTAX here.
flush_left_syntax : basestring
convert_shpaml_tree passes FLUSH_LEFT_SYNTAX here.
flush_left_empty_line : basestring
convert_shpaml_tree passes FLUSH_LEFT_EMPTY_LINE here.
indentation_method : function
convert_shpaml_tree passes _indent here.
get_block : function
Defaults to get_indented_block.
"""
text = text.rstrip()
lines = text.split('\n')
if lines and lines[0].startswith('!! '):
lines[0] = lines[0].replace('!! ', '<!DOCTYPE ') + '>'
output = []
indent_lines(lines, output, branch_method, leaf_method, pass_syntax, flush_left_syntax, flush_left_empty_line,
indentation_method, get_block=get_indented_block)
return '\n'.join(output) + '\n' | 0.001965 |
def run_election_new_upsert_validator(args, bigchain):
"""Initiates an election to add/update/remove a validator to an existing BigchainDB network
:param args: dict
args = {
'public_key': the public key of the proposed peer, (str)
'power': the proposed validator power for the new peer, (str)
'node_id': the node_id of the new peer (str)
'sk': the path to the private key of the node calling the election (str)
}
:param bigchain: an instance of BigchainDB
:return: election_id or `False` in case of failure
"""
new_validator = {
'public_key': {'value': public_key_from_base64(args.public_key),
'type': 'ed25519-base16'},
'power': args.power,
'node_id': args.node_id
}
return create_new_election(args.sk, bigchain, ValidatorElection, new_validator) | 0.004561 |
def invoked_with(self):
"""Similar to :attr:`Context.invoked_with` except properly handles
the case where :meth:`Context.send_help` is used.
If the help command was used regularly then this returns
the :attr:`Context.invoked_with` attribute. Otherwise, if
it the help command was called using :meth:`Context.send_help`
then it returns the internal command name of the help command.
Returns
---------
:class:`str`
The command name that triggered this invocation.
"""
command_name = self._command_impl.name
ctx = self.context
if ctx is None or ctx.command is None or ctx.command.qualified_name != command_name:
return command_name
return ctx.invoked_with | 0.003797 |
def on_load(target: "EncryptableMixin", context):
"""
Intercept SQLAlchemy's instance load event.
"""
decrypt, plaintext = decrypt_instance(target)
if decrypt:
target.plaintext = plaintext | 0.004608 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.