text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def update(self, collection_id, title=None, description=None, private=False):
"""
Update an existing collection belonging to the logged-in user.
This requires the 'write_collections' scope.
:param collection_id [string]: The collection’s ID. Required.
:param title [string]: The title of the collection. (Required.)
:param description [string]: The collection’s description. (Optional.)
:param private [boolean]: Whether to make this collection private. (Optional; default false).
:return: [Collection]: The Unsplash Collection.
"""
url = "/collections/%s" % collection_id
data = {
"title": title,
"description": description,
"private": private
}
result = self._put(url, data=data)
return CollectionModel.parse(result) | 0.00346 |
def add_config():
"""
Prompts user for API keys, adds them in an .ini file stored in the same
location as that of the script
"""
genius_key = input('Enter Genius key : ')
bing_key = input('Enter Bing key : ')
CONFIG['keys']['bing_key'] = bing_key
CONFIG['keys']['genius_key'] = genius_key
with open(config_path, 'w') as configfile:
CONFIG.write(configfile) | 0.002481 |
def feed(self, data):
"""
Main method for purifying HTML (overrided)
"""
self.reset_purified()
HTMLParser.feed(self, data)
return self.html() | 0.010582 |
def default_value_setter(field):
"""
When setting to the name of the field itself, the value
in the current language will be set.
"""
def default_value_func_setter(self, value):
localized_field = utils.build_localized_field_name(
field, self._linguist.active_language
)
setattr(self, localized_field, value)
return default_value_func_setter | 0.002481 |
def get_copy_token(
self,
bucket: str,
key: str,
cloud_checksum: str,
) -> typing.Any:
"""
Given a bucket, key, and the expected cloud-provided checksum, retrieve a token that can be passed into
:func:`~cloud_blobstore.BlobStore.copy` that guarantees the copy refers to the same version of the blob
identified by the checksum.
:param bucket: the bucket the object resides in.
:param key: the key of the object for which checksum is being retrieved.
:param cloud_checksum: the expected cloud-provided checksum.
:return: an opaque copy token
"""
raise NotImplementedError() | 0.008535 |
def get_all_terms(self):
"""
Return all of the terms in the account.
https://canvas.instructure.com/doc/api/enrollment_terms.html#method.terms_api.index
"""
if not self._canvas_account_id:
raise MissingAccountID()
params = {"workflow_state": 'all', 'per_page': 500}
url = ACCOUNTS_API.format(self._canvas_account_id) + "/terms"
data_key = 'enrollment_terms'
terms = []
response = self._get_paged_resource(url, params, data_key)
for data in response[data_key]:
terms.append(CanvasTerm(data=data))
return terms | 0.003175 |
def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
"""Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
"""
for subkey in registry_key.GetSubkeys():
values_dict = {}
values_dict['subkey_name'] = subkey.name
name_values = subkey.name.split('&')
number_of_name_values = len(name_values)
# Normally we expect 4 fields here however that is not always the case.
if number_of_name_values != 4:
logger.warning(
'Expected 4 &-separated values in: {0:s}'.format(subkey.name))
if number_of_name_values >= 1:
values_dict['device_type'] = name_values[0]
if number_of_name_values >= 2:
values_dict['vendor'] = name_values[1]
if number_of_name_values >= 3:
values_dict['product'] = name_values[2]
if number_of_name_values >= 4:
values_dict['revision'] = name_values[3]
event_data = windows_events.WindowsRegistryEventData()
event_data.key_path = registry_key.path
event_data.offset = registry_key.offset
event_data.regvalue = values_dict
event_data.source_append = self._SOURCE_APPEND
if subkey.number_of_subkeys == 0:
# Time last USB device of this class was first inserted.
event = time_events.DateTimeValuesEvent(
subkey.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
continue
for device_key in subkey.GetSubkeys():
values_dict['serial'] = device_key.name
friendly_name_value = device_key.GetValueByName('FriendlyName')
if friendly_name_value:
values_dict['friendly_name'] = friendly_name_value.GetDataAsObject()
else:
values_dict.pop('friendly_name', None)
# ParentIdPrefix applies to Windows XP Only.
parent_id_prefix_value = device_key.GetValueByName('ParentIdPrefix')
if parent_id_prefix_value:
values_dict['parent_id_prefix'] = (
parent_id_prefix_value.GetDataAsObject())
else:
values_dict.pop('parent_id_prefix', None)
# Time last USB device of this class was first inserted.
event = time_events.DateTimeValuesEvent(
subkey.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
# Win7 - Last Connection.
# Vista/XP - Time of an insert.
event = time_events.DateTimeValuesEvent(
device_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
device_parameter_key = device_key.GetSubkeyByName('Device Parameters')
if device_parameter_key:
event = time_events.DateTimeValuesEvent(
device_parameter_key.last_written_time,
definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
log_configuration_key = device_key.GetSubkeyByName('LogConf')
if log_configuration_key:
event = time_events.DateTimeValuesEvent(
log_configuration_key.last_written_time,
definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
properties_key = device_key.GetSubkeyByName('Properties')
if properties_key:
event = time_events.DateTimeValuesEvent(
properties_key.last_written_time,
definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data) | 0.007235 |
def extract_attribute(module_name, attribute_name):
"""Extract metatdata property from a module"""
with open('%s/__init__.py' % module_name) as input_file:
for line in input_file:
if line.startswith(attribute_name):
return ast.literal_eval(line.split('=')[1].strip()) | 0.003215 |
def normalize(text, mode='NFKC', ignore=''):
"""Convert Half-width (Hankaku) Katakana to Full-width (Zenkaku) Katakana,
Full-width (Zenkaku) ASCII and DIGIT to Half-width (Hankaku) ASCII
and DIGIT.
Additionally, Full-width wave dash (〜) etc. are normalized
Parameters
----------
text : str
Source string.
mode : str
Unicode normalization mode.
ignore : str
Characters to be ignored in converting.
Return
------
str
Normalized string.
Examples
--------
>>> print(jaconv.normalize('ティロ・フィナ〜レ', 'NFKC'))
ティロ・フィナーレ
"""
text = text.replace('〜', 'ー').replace('~', 'ー')
text = text.replace("’", "'").replace('”', '"').replace('“', '``')
text = text.replace('―', '-').replace('‐', '-').replace('˗', '-').replace('֊', '-')
text = text.replace('‐', '-').replace('‑', '-').replace('‒', '-').replace('–', '-')
text = text.replace('⁃', '-').replace('⁻', '-').replace('₋', '-').replace('−', '-')
text = text.replace('﹣', 'ー').replace('-', 'ー').replace('—', 'ー').replace('―', 'ー')
text = text.replace('━', 'ー').replace('─', 'ー')
return unicodedata.normalize(mode, text) | 0.004205 |
def get_proxy(self, input_):
"""Gets a proxy.
:param input: a proxy condition
:type input: ``osid.proxy.ProxyCondition``
:return: a proxy
:rtype: ``osid.proxy.Proxy``
:raise: ``NullArgument`` -- ``input`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
:raise: ``Unsupported`` -- ``input`` is not of this service
*compliance: mandatory -- This method is must be implemented.*
"""
if input_._http_request is not None:
authentication = DjangoAuthentication()
authentication.set_django_user(input_._http_request.user, input_._use_user_id)
elif input_._xblock_user is not None:
authentication = XBlockAuthentication()
authentication.set_xblock_user(input_._xblock_user)
else:
authentication = None
if authentication is not None:
effective_agent_id = authentication.get_agent_id()
else:
effective_agent_id = input_._effective_agent_id
if input_._locale is not None:
locale = input_._locale
else:
locale = None
return rules.Proxy(authentication=authentication,
effective_agent_id=effective_agent_id,
locale=locale) | 0.002138 |
def values_export(self, **params):
""" Method for `Export Values from all Data Streams of a Device <https://m2x.att.com/developer/documentation/v2/device#Export-Values-from-all-Data-Streams-of-a-Device>`_ endpoint.
:param params: Query parameters passed as keyword arguments. View M2X API Docs for listing of available parameters.
:return: The API response, see M2X API docs for details
:rtype: dict
:raises: :class:`~requests.exceptions.HTTPError` if an error occurs when sending the HTTP request
"""
self.api.get(self.subpath('/values/export.csv'), params=params)
return self.api.last_response | 0.007553 |
def GET_save_conditionvalues(self) -> None:
"""Save the |StateSequence| and |LogSequence| object values of the
current |HydPy| instance for the current simulation endpoint."""
state.conditions[self._id] = state.conditions.get(self._id, {})
state.conditions[self._id][state.idx2] = state.hp.conditions | 0.006024 |
def search_image(name=None, path=['.']):
"""
look for the image real path, if name is None, then return all images under path.
@return system encoded path string
FIXME(ssx): this code is just looking wired.
"""
name = strutils.decode(name)
for image_dir in path:
if not os.path.isdir(image_dir):
continue
image_dir = strutils.decode(image_dir)
image_path = os.path.join(image_dir, name)
if os.path.isfile(image_path):
return strutils.encode(image_path)
for image_path in list_all_image(image_dir):
if not image_name_match(name, image_path):
continue
return strutils.encode(image_path)
return None | 0.002725 |
def get_MAD(tau):
"""
input: eigenvalues of PCA matrix
output: Maximum Angular Deviation
"""
# tau is ordered so that tau[0] > tau[1] > tau[2]
for t in tau:
if isinstance(t, complex):
return -999
MAD = math.degrees(numpy.arctan(numpy.sqrt(old_div((tau[1] + tau[2]), tau[0]))) )
return MAD | 0.008824 |
def disconnect(self, message=""):
"""Hang up the connection.
Arguments:
message -- Quit message.
"""
try:
del self.connected
except AttributeError:
return
self.quit(message)
self.transport.close()
self._handle_event(Event("disconnect", self.server, "", [message])) | 0.005435 |
def replaceAll(self):
"""
Replace all matches after the current cursor position.
This method calls ``replaceSelectedText`` until it returns
**False**, and then closes the mini buffer.
"""
while self.replaceSelected():
pass
self.qteWidget.SCISetStylingEx(0, 0, self.styleOrig)
self.qteMain.qteKillMiniApplet() | 0.005181 |
def retention_policy_get(database,
name,
user=None,
password=None,
host=None,
port=None):
'''
Get an existing retention policy.
database
The database to operate on.
name
Name of the policy to modify.
CLI Example:
.. code-block:: bash
salt '*' influxdb08.retention_policy_get metrics default
'''
client = _client(user=user, password=password, host=host, port=port)
for policy in client.get_list_retention_policies(database):
if policy['name'] == name:
return policy
return None | 0.001445 |
def basic_ack(self, delivery_tag, multiple=False):
"""Acknowledge one or more messages
This method acknowledges one or more messages delivered via
the Deliver or Get-Ok methods. The client can ask to confirm
a single message or a set of messages up to and including a
specific message.
PARAMETERS:
delivery_tag: longlong
server-assigned delivery tag
The server-assigned and channel-specific delivery tag
RULE:
The delivery tag is valid only within the channel
from which the message was received. I.e. a client
MUST NOT receive a message on one channel and then
acknowledge it on another.
RULE:
The server MUST NOT use a zero value for delivery
tags. Zero is reserved for client use, meaning "all
messages so far received".
multiple: boolean
acknowledge multiple messages
If set to True, the delivery tag is treated as "up to
and including", so that the client can acknowledge
multiple messages with a single method. If set to
False, the delivery tag refers to a single message.
If the multiple field is True, and the delivery tag
is zero, tells the server to acknowledge all
outstanding mesages.
RULE:
The server MUST validate that a non-zero delivery-
tag refers to an delivered message, and raise a
channel exception if this is not the case.
"""
args = AMQPWriter()
args.write_longlong(delivery_tag)
args.write_bit(multiple)
self._send_method((60, 80), args) | 0.001053 |
def get_plot_data(self):
""" Generates the JSON report to plot the gene boxes
Following the convention of the reports platform, this method returns
a list of JSON/dict objects with the information about each entry in
the abricate file. The information contained in this JSON is::
{contig_id: <str>,
seqRange: [<int>, <int>],
gene: <str>,
accession: <str>,
coverage: <float>,
identity: <float>
}
Note that the `seqRange` entry contains the position in the
corresponding contig, not the absolute position in the whole assembly.
Returns
-------
json_dic : list
List of JSON/dict objects with the report data.
"""
json_dic = {"plotData": []}
sample_dic = {}
sample_assembly_map = {}
for entry in self.storage.values():
sample_id = re.match("(.*)_abr", entry["log_file"]).groups()[0]
if sample_id not in sample_dic:
sample_dic[sample_id] = {}
# Get contig ID using the same regex as in `assembly_report.py`
# template
contig_id = self._get_contig_id(entry["reference"])
# Get database
database = entry["database"]
if database not in sample_dic[sample_id]:
sample_dic[sample_id][database] = []
# Update the sample-assembly correspondence dict
if sample_id not in sample_assembly_map:
sample_assembly_map[sample_id] = entry["infile"]
sample_dic[sample_id][database].append(
{"contig": contig_id,
"seqRange": entry["seq_range"],
"gene": entry["gene"].replace("'", ""),
"accession": entry["accession"],
"coverage": entry["coverage"],
"identity": entry["identity"],
},
)
for sample, data in sample_dic.items():
json_dic["plotData"].append(
{
"sample": sample,
"data": {"abricateXrange": data},
"assemblyFile": sample_assembly_map[sample]
}
)
return json_dic | 0.000864 |
def _mp_run_check(tasks, results, options):
"""
a helper function for multiprocessing with DistReport.
"""
try:
for index, change in iter(tasks.get, None):
# this is the part that takes up all of our time and
# produces side-effects like writing out files for all of
# the report formats.
change.check()
# rather than serializing the completed change (which
# could be rather large now that it's been realized), we
# send back only what we want, which is the squashed
# overview, and throw away the used bits.
squashed = squash(change, options=options)
change.clear()
results.put((index, squashed))
except KeyboardInterrupt:
# prevent a billion lines of backtrace from hitting the user
# in the face
return | 0.001119 |
def add_layer(self, depth, soil):
"""
Adds a soil to the SoilProfile at a set depth.
Note, the soils are automatically reordered based on depth from surface.
:param depth: depth from surface to top of soil layer
:param soil: Soil object
"""
self._layers[depth] = soil
self._sort_layers()
if self.hydrostatic:
if depth >= self.gwl:
soil.saturation = 1.0
else:
li = self.get_layer_index_by_depth(depth)
layer_height = self.layer_height(li)
if layer_height is None:
soil.saturation = 0.0
elif depth + layer_height <= self.gwl:
soil.saturation = 0.0
else:
sat_height = depth + self.layer_height(li) - self.gwl
soil.saturation = sat_height / self.layer_height(li) | 0.003215 |
def combine_dfs(dfs, names, method):
"""Combine dataframes.
Combination is either done simple by just concatenating the DataFrames
or performs tracking by adding the name of the dataset as a column."""
if method == "track":
res = list()
for df, identifier in zip(dfs, names):
df["dataset"] = identifier
res.append(df)
return pd.concat(res, ignore_index=True)
elif method == "simple":
return pd.concat(dfs, ignore_index=True) | 0.001996 |
def mod_hostname(hostname):
'''
Modify hostname
.. versionchanged:: 2015.8.0
Added support for SunOS (Solaris 10, Illumos, SmartOS)
CLI Example:
.. code-block:: bash
salt '*' network.mod_hostname master.saltstack.com
'''
#
# SunOS tested on SmartOS and OmniOS (Solaris 10 compatible)
# Oracle Solaris 11 uses smf, currently not supported
#
# /etc/nodename is the hostname only, not fqdn
# /etc/defaultdomain is the domain
# /etc/hosts should have both fqdn and hostname entries
#
if hostname is None:
return False
hostname_cmd = salt.utils.path.which('hostnamectl') or salt.utils.path.which('hostname')
if salt.utils.platform.is_sunos():
uname_cmd = '/usr/bin/uname' if salt.utils.platform.is_smartos() else salt.utils.path.which('uname')
check_hostname_cmd = salt.utils.path.which('check-hostname')
# Grab the old hostname so we know which hostname to change and then
# change the hostname using the hostname command
if hostname_cmd.endswith('hostnamectl'):
result = __salt__['cmd.run_all']('{0} status'.format(hostname_cmd))
if 0 == result['retcode']:
out = result['stdout']
for line in out.splitlines():
line = line.split(':')
if 'Static hostname' in line[0]:
o_hostname = line[1].strip()
else:
log.debug('%s was unable to get hostname', hostname_cmd)
o_hostname = __salt__['network.get_hostname']()
elif not salt.utils.platform.is_sunos():
# don't run hostname -f because -f is not supported on all platforms
o_hostname = socket.getfqdn()
else:
# output: Hostname core OK: fully qualified as core.acheron.be
o_hostname = __salt__['cmd.run'](check_hostname_cmd).split(' ')[-1]
if hostname_cmd.endswith('hostnamectl'):
result = __salt__['cmd.run_all']('{0} set-hostname {1}'.format(
hostname_cmd,
hostname,
))
if result['retcode'] != 0:
log.debug('%s was unable to set hostname. Error: %s',
hostname_cmd, result['stderr'])
return False
elif not salt.utils.platform.is_sunos():
__salt__['cmd.run']('{0} {1}'.format(hostname_cmd, hostname))
else:
__salt__['cmd.run']('{0} -S {1}'.format(uname_cmd, hostname.split('.')[0]))
# Modify the /etc/hosts file to replace the old hostname with the
# new hostname
with salt.utils.files.fopen('/etc/hosts', 'r') as fp_:
host_c = [salt.utils.stringutils.to_unicode(_l)
for _l in fp_.readlines()]
with salt.utils.files.fopen('/etc/hosts', 'w') as fh_:
for host in host_c:
host = host.split()
try:
host[host.index(o_hostname)] = hostname
if salt.utils.platform.is_sunos():
# also set a copy of the hostname
host[host.index(o_hostname.split('.')[0])] = hostname.split('.')[0]
except ValueError:
pass
fh_.write(salt.utils.stringutils.to_str('\t'.join(host) + '\n'))
# Modify the /etc/sysconfig/network configuration file to set the
# new hostname
if __grains__['os_family'] == 'RedHat':
with salt.utils.files.fopen('/etc/sysconfig/network', 'r') as fp_:
network_c = [salt.utils.stringutils.to_unicode(_l)
for _l in fp_.readlines()]
with salt.utils.files.fopen('/etc/sysconfig/network', 'w') as fh_:
for net in network_c:
if net.startswith('HOSTNAME'):
old_hostname = net.split('=', 1)[1].rstrip()
quote_type = salt.utils.stringutils.is_quoted(old_hostname)
fh_.write(salt.utils.stringutils.to_str(
'HOSTNAME={1}{0}{1}\n'.format(
salt.utils.stringutils.dequote(hostname),
quote_type)))
else:
fh_.write(salt.utils.stringutils.to_str(net))
elif __grains__['os_family'] in ('Debian', 'NILinuxRT'):
with salt.utils.files.fopen('/etc/hostname', 'w') as fh_:
fh_.write(salt.utils.stringutils.to_str(hostname + '\n'))
if __grains__['lsb_distrib_id'] == 'nilrt':
str_hostname = salt.utils.stringutils.to_str(hostname)
nirtcfg_cmd = '/usr/local/natinst/bin/nirtcfg'
nirtcfg_cmd += ' --set section=SystemSettings,token=\'Host_Name\',value=\'{0}\''.format(str_hostname)
if __salt__['cmd.run_all'](nirtcfg_cmd)['retcode'] != 0:
raise CommandExecutionError('Couldn\'t set hostname to: {0}\n'.format(str_hostname))
elif __grains__['os_family'] == 'OpenBSD':
with salt.utils.files.fopen('/etc/myname', 'w') as fh_:
fh_.write(salt.utils.stringutils.to_str(hostname + '\n'))
# Update /etc/nodename and /etc/defaultdomain on SunOS
if salt.utils.platform.is_sunos():
with salt.utils.files.fopen('/etc/nodename', 'w') as fh_:
fh_.write(salt.utils.stringutils.to_str(
hostname.split('.')[0] + '\n')
)
with salt.utils.files.fopen('/etc/defaultdomain', 'w') as fh_:
fh_.write(salt.utils.stringutils.to_str(
".".join(hostname.split('.')[1:]) + '\n')
)
return True | 0.001277 |
def to_flat_graph(self):
"""Convert the parsed manifest to the 'flat graph' that the compiler
expects.
Kind of hacky note: everything in the code is happy to deal with
macros as ParsedMacro objects (in fact, it's been changed to require
that), so those can just be returned without any work. Nodes sadly
require a lot of work on the compiler side.
Ideally in the future we won't need to have this method.
"""
return {
'nodes': {k: v.to_shallow_dict() for k, v in self.nodes.items()},
'macros': self.macros,
} | 0.003257 |
def get_device_model():
"""
Returns the Device model that is active in this project.
"""
try:
return apps.get_model(settings.GCM_DEVICE_MODEL)
except ValueError:
raise ImproperlyConfigured("GCM_DEVICE_MODEL must be of the form 'app_label.model_name'")
except LookupError:
raise ImproperlyConfigured(
"GCM_DEVICE_MODEL refers to model '%s' that has not been installed" % settings.GCM_DEVICE_MODEL
) | 0.006452 |
def flatten(self, max_value: int) -> FrozenSet[int]:
"""Return a set of all values contained in the sequence set.
Args:
max_value: The maximum value, in place of any ``*``.
"""
return frozenset(self.iter(max_value)) | 0.007663 |
async def create_task(app: web.Application,
coro: Coroutine,
*args, **kwargs
) -> asyncio.Task:
"""
Convenience function for calling `TaskScheduler.create(coro)`
This will use the default `TaskScheduler` to create a new background task.
Example:
import asyncio
from datetime import datetime
from brewblox_service import scheduler, service
async def current_time(interval):
while True:
await asyncio.sleep(interval)
print(datetime.now())
async def start(app):
await scheduler.create_task(app, current_time(interval=2))
app = service.create_app(default_name='example')
scheduler.setup(app)
app.on_startup.append(start)
service.furnish(app)
service.run(app)
"""
return await get_scheduler(app).create(coro, *args, **kwargs) | 0.001045 |
def load_operator(self, operator):
"""|coro|
Loads the players stats for the operator
Parameters
----------
operator : str
the name of the operator
Returns
-------
:class:`Operator`
the operator object found"""
location = yield from self.auth.get_operator_index(operator)
if location is None:
raise ValueError("invalid operator %s" % operator)
operator_key = yield from self.auth.get_operator_statistic(operator)
if operator_key is not None:
operator_key = "," + operator_key
else:
operator_key = ""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/playerstats2/statistics?populations=%s&statistics=operatorpvp_kills,operatorpvp_death,operatorpvp_roundwon,operatorpvp_roundlost,operatorpvp_meleekills,operatorpvp_totalxp,operatorpvp_headshot,operatorpvp_timeplayed,operatorpvp_dbno%s" % (self.spaceid, self.platform_url, self.id, operator_key))
if not "results" in data or not self.id in data["results"]:
raise InvalidRequest("Missing results key in returned JSON object %s" % str(data))
data = data["results"][self.id]
data = {x.split(":")[0].split("_")[1]: data[x] for x in data if x is not None and location in x}
if operator_key:
data["__statistic_name"] = operator_key.split("_")[1]
#if len(data) < 5:
# raise InvalidRequest("invalid number of results for operator in JSON object %s" % data)
oper = Operator(operator, data)
self.operators[operator] = oper
return oper | 0.004709 |
async def send_upstream(self, message, stream_name=None):
"""
Send a message upstream to a de-multiplexed application.
If stream_name is includes will send just to that upstream steam, if not included will send ot all upstream
steams.
"""
if stream_name is None:
for steam_queue in self.application_streams.values():
await steam_queue.put(message)
return
steam_queue = self.application_streams.get(stream_name)
if steam_queue is None:
raise ValueError("Invalid multiplexed frame received (stream not mapped)")
await steam_queue.put(message) | 0.006015 |
def as_alias_handler(alias_list):
"""Returns a list of all the names that will be called."""
list_ = list()
for alias in alias_list:
if alias.asname:
list_.append(alias.asname)
else:
list_.append(alias.name)
return list_ | 0.003623 |
def run(self):
'''Run listener
'''
self.running = True
for msg in self.recv(1):
if msg is None:
if self.running:
continue
else:
break
self.logger.debug("New message received: %s", str(msg))
self.add_to_queue(msg) | 0.005634 |
def storage_class(self, value):
"""Set the storage class for the bucket.
See https://cloud.google.com/storage/docs/storage-classes
:type value: str
:param value: one of "MULTI_REGIONAL", "REGIONAL", "NEARLINE",
"COLDLINE", "STANDARD", or "DURABLE_REDUCED_AVAILABILITY"
"""
if value not in self._STORAGE_CLASSES:
raise ValueError("Invalid storage class: %s" % (value,))
self._patch_property("storageClass", value) | 0.003968 |
def _create_slack_with_env_var(env_var: EnvVar) -> SlackClient:
""" Create a :obj:`SlackClient` with a token from an env var. """
token = os.getenv(env_var)
if token:
return SlackClient(token=token)
raise MissingToken(f"Could not acquire token from {env_var}") | 0.003521 |
def get_field_value(obj, field):
"""
Gets the value of a given model instance field.
:param obj: The model instance.
:type obj: Model
:param field: The field you want to find the value of.
:type field: Any
:return: The value of the field as a string.
:rtype: str
"""
if isinstance(field, DateTimeField):
# DateTimeFields are timezone-aware, so we need to convert the field
# to its naive form before we can accuratly compare them for changes.
try:
value = field.to_python(getattr(obj, field.name, None))
if value is not None and settings.USE_TZ and not timezone.is_naive(value):
value = timezone.make_naive(value, timezone=timezone.utc)
except ObjectDoesNotExist:
value = field.default if field.default is not NOT_PROVIDED else None
else:
try:
value = smart_text(getattr(obj, field.name, None))
except ObjectDoesNotExist:
value = field.default if field.default is not NOT_PROVIDED else None
return value | 0.003717 |
def returner(ret):
'''
Return data to a mysql server
'''
# if a minion is returning a standalone job, get a jobid
if ret['jid'] == 'req':
ret['jid'] = prep_jid(nocache=ret.get('nocache', False))
save_load(ret['jid'], ret)
try:
with _get_serv(ret, commit=True) as cur:
sql = '''INSERT INTO `salt_returns`
(`fun`, `jid`, `return`, `id`, `success`, `full_ret`)
VALUES (%s, %s, %s, %s, %s, %s)'''
cur.execute(sql, (ret['fun'], ret['jid'],
salt.utils.json.dumps(ret['return']),
ret['id'],
ret.get('success', False),
salt.utils.json.dumps(ret)))
except salt.exceptions.SaltMasterError as exc:
log.critical(exc)
log.critical('Could not store return with MySQL returner. MySQL server unavailable.') | 0.002114 |
def _read_certificates(self):
"""
Reads end-entity and intermediate certificate information from the
TLS session
"""
trust_ref = None
cf_data_ref = None
result = None
try:
trust_ref_pointer = new(Security, 'SecTrustRef *')
result = Security.SSLCopyPeerTrust(
self._session_context,
trust_ref_pointer
)
handle_sec_error(result)
trust_ref = unwrap(trust_ref_pointer)
number_certs = Security.SecTrustGetCertificateCount(trust_ref)
self._intermediates = []
for index in range(0, number_certs):
sec_certificate_ref = Security.SecTrustGetCertificateAtIndex(
trust_ref,
index
)
cf_data_ref = Security.SecCertificateCopyData(sec_certificate_ref)
cert_data = CFHelpers.cf_data_to_bytes(cf_data_ref)
result = CoreFoundation.CFRelease(cf_data_ref)
handle_cf_error(result)
cf_data_ref = None
cert = x509.Certificate.load(cert_data)
if index == 0:
self._certificate = cert
else:
self._intermediates.append(cert)
finally:
if trust_ref:
result = CoreFoundation.CFRelease(trust_ref)
handle_cf_error(result)
if cf_data_ref:
result = CoreFoundation.CFRelease(cf_data_ref)
handle_cf_error(result) | 0.001856 |
def reflect(x, y, x0, y0, d=1.0, a=180):
""" Returns the reflection of a point through origin (x0,y0).
"""
return coordinates(x0, y0, d * distance(x0, y0, x, y),
a + angle(x0, y0, x, y)) | 0.004444 |
def send_message(self, body, to, quiet=False, html_body=None):
"""Send a message to a single member"""
if to.get('MUTED'):
to['QUEUED_MESSAGES'].append(body)
else:
if not quiet:
logger.info('message on %s to %s: %s' % (self.name, to['JID'], body))
message = xmpp.protocol.Message(to=to['JID'], body=body, typ='chat')
if html_body:
html = xmpp.Node('html', {'xmlns': 'http://jabber.org/protocol/xhtml-im'})
html.addChild(node=xmpp.simplexml.XML2Node("<body xmlns='http://www.w3.org/1999/xhtml'>" + html_body.encode('utf-8') + "</body>"))
message.addChild(node=html)
self.client.send(message) | 0.008108 |
def download_preview(self, image, url_field='url'):
"""Downlaod the binary data of an image attachment at preview size.
:param str url_field: the field of the image with the right URL
:return: binary image data
:rtype: bytes
"""
return self.download(image, url_field=url_field, suffix='preview') | 0.005797 |
def destroy(name, call=None):
"""
This function irreversibly destroys a virtual machine on the cloud provider.
Before doing so, it should fire an event on the Salt event bus.
The tag for this event is `salt/cloud/<vm name>/destroying`.
Once the virtual machine has been destroyed, another event is fired.
The tag for that event is `salt/cloud/<vm name>/destroyed`.
Dependencies:
list_nodes
@param name:
@type name: str
@param call:
@type call:
@return: True if all went well, otherwise an error message
@rtype: bool|str
"""
log.info("Attempting to delete instance %s", name)
if not vb_machine_exists(name):
return "{0} doesn't exist and can't be deleted".format(name)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
vb_destroy_machine(name)
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
) | 0.00157 |
def macro_create(self, args: argparse.Namespace) -> None:
"""Create or overwrite a macro"""
# Validate the macro name
valid, errmsg = self.statement_parser.is_valid_command(args.name)
if not valid:
self.perror("Invalid macro name: {}".format(errmsg), traceback_war=False)
return
if args.name in self.get_all_commands():
self.perror("Macro cannot have the same name as a command", traceback_war=False)
return
if args.name in self.aliases:
self.perror("Macro cannot have the same name as an alias", traceback_war=False)
return
utils.unquote_redirection_tokens(args.command_args)
# Build the macro value string
value = args.command
if args.command_args:
value += ' ' + ' '.join(args.command_args)
# Find all normal arguments
arg_list = []
normal_matches = re.finditer(MacroArg.macro_normal_arg_pattern, value)
max_arg_num = 0
arg_nums = set()
while True:
try:
cur_match = normal_matches.__next__()
# Get the number string between the braces
cur_num_str = (re.findall(MacroArg.digit_pattern, cur_match.group())[0])
cur_num = int(cur_num_str)
if cur_num < 1:
self.perror("Argument numbers must be greater than 0", traceback_war=False)
return
arg_nums.add(cur_num)
if cur_num > max_arg_num:
max_arg_num = cur_num
arg_list.append(MacroArg(start_index=cur_match.start(), number_str=cur_num_str, is_escaped=False))
except StopIteration:
break
# Make sure the argument numbers are continuous
if len(arg_nums) != max_arg_num:
self.perror("Not all numbers between 1 and {} are present "
"in the argument placeholders".format(max_arg_num), traceback_war=False)
return
# Find all escaped arguments
escaped_matches = re.finditer(MacroArg.macro_escaped_arg_pattern, value)
while True:
try:
cur_match = escaped_matches.__next__()
# Get the number string between the braces
cur_num_str = re.findall(MacroArg.digit_pattern, cur_match.group())[0]
arg_list.append(MacroArg(start_index=cur_match.start(), number_str=cur_num_str, is_escaped=True))
except StopIteration:
break
# Set the macro
result = "overwritten" if args.name in self.macros else "created"
self.macros[args.name] = Macro(name=args.name, value=value, minimum_arg_count=max_arg_num, arg_list=arg_list)
self.poutput("Macro '{}' {}".format(args.name, result)) | 0.004512 |
def health_check(self):
"""Gets a single item to determine if Dynamo is functioning."""
logger.debug('Health Check on Table: {namespace}'.format(
namespace=self.namespace
))
try:
self.get_all()
return True
except ClientError as e:
logger.exception(e)
logger.error('Error encountered with Database. Assume unhealthy')
return False | 0.004515 |
def path_to_pattern(path, metadata=None):
"""
Remove source information from path when using chaching
Returns None if path is not str
Parameters
----------
path : str
Path to data optionally containing format_strings
metadata : dict, optional
Extra arguments to the class, contains any cache information
Returns
-------
pattern : str
Pattern style path stripped of everything to the left of cache regex.
"""
if not isinstance(path, str):
return
pattern = path
if metadata:
cache = metadata.get('cache')
if cache:
regex = next(c.get('regex') for c in cache if c.get('argkey') == 'urlpath')
pattern = pattern.split(regex)[-1]
return pattern | 0.002584 |
def refresh_entitlement(owner, repo, identifier, show_tokens):
"""Refresh an entitlement in a repository."""
client = get_entitlements_api()
with catch_raise_api_exception():
data, _, headers = client.entitlements_refresh_with_http_info(
owner=owner, repo=repo, identifier=identifier, show_tokens=show_tokens
)
ratelimits.maybe_rate_limit(client, headers)
return data.to_dict() | 0.004684 |
def files_comments_add(self, *, comment: str, file: str, **kwargs) -> SlackResponse:
"""Add a comment to an existing file.
Args:
comment (str): The body of the comment.
e.g. 'Everyone should take a moment to read this file.'
file (str): The file id. e.g. 'F1234467890'
"""
kwargs.update({"comment": comment, "file": file})
return self.api_call("files.comments.add", json=kwargs) | 0.006536 |
def gif_summary(name, tensor, max_outputs=3, fps=10, collections=None,
family=None):
"""Outputs a `Summary` protocol buffer with gif animations.
Args:
name: Name of the summary.
tensor: A 5-D `uint8` `Tensor` of shape `[batch_size, time, height, width,
channels]` where `channels` is 1 or 3.
max_outputs: Max number of batch elements to generate gifs for.
fps: frames per second of the animation
collections: Optional list of tf.GraphKeys. The collections to add the
summary to. Defaults to [tf.GraphKeys.SUMMARIES]
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
Raises:
ValueError: if the given tensor has the wrong shape.
"""
tensor = tf.convert_to_tensor(tensor)
if len(tensor.get_shape()) != 5:
raise ValueError("Assuming videos given as tensors in the format "
"[batch, time, height, width, channels] but got one "
"of shape: %s" % str(tensor.get_shape()))
tensor = tf.cast(tensor, tf.uint8)
if distribute_summary_op_util.skip_summary():
return tf.constant("")
with summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
val = tf.py_func(
py_gif_summary,
[tag, tensor, max_outputs, fps],
tf.string,
stateful=False,
name=scope)
summary_op_util.collect(val, collections, [tf.GraphKeys.SUMMARIES])
return val | 0.005587 |
def price_projection(price_data=price_data(), ex_best_offers_overrides=ex_best_offers_overrides(), virtualise=True,
rollover_stakes=False):
"""
Selection criteria of the returning price data.
:param list price_data: PriceData filter to specify what market data we wish to receive.
:param dict ex_best_offers_overrides: define order book depth, rollup method.
:param bool virtualise: whether to receive virtualised prices also.
:param bool rollover_stakes: whether to accumulate volume at each price as sum of volume at that price and all better
prices.
:returns: price data criteria for market data.
:rtype: dict
"""
args = locals()
return {
to_camel_case(k): v for k, v in args.items() if v is not None
} | 0.006353 |
def get_instance(self, payload):
"""
Build an instance of AllTimeInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.usage.record.all_time.AllTimeInstance
:rtype: twilio.rest.api.v2010.account.usage.record.all_time.AllTimeInstance
"""
return AllTimeInstance(self._version, payload, account_sid=self._solution['account_sid'], ) | 0.011574 |
def clean_gff(gff, cleaned, add_chr=False, chroms_to_ignore=None,
featuretypes_to_ignore=None):
"""
Cleans a GFF file by removing features on unwanted chromosomes and of
unwanted featuretypes. Optionally adds "chr" to chrom names.
"""
logger.info("Cleaning GFF")
chroms_to_ignore = chroms_to_ignore or []
featuretypes_to_ignore = featuretypes_to_ignore or []
with open(cleaned, 'w') as fout:
for i in gffutils.iterators.DataIterator(gff):
if add_chr:
i.chrom = "chr" + i.chrom
if i.chrom in chroms_to_ignore:
continue
if i.featuretype in featuretypes_to_ignore:
continue
fout.write(str(i) + '\n')
return cleaned | 0.001302 |
def pluralize(singular):
"""Return plural form of given lowercase singular word (English only). Based on
ActiveState recipe http://code.activestate.com/recipes/413172/
>>> pluralize('')
''
>>> pluralize('goose')
'geese'
>>> pluralize('dolly')
'dollies'
>>> pluralize('genius')
'genii'
>>> pluralize('jones')
'joneses'
>>> pluralize('pass')
'passes'
>>> pluralize('zero')
'zeros'
>>> pluralize('casino')
'casinos'
>>> pluralize('hero')
'heroes'
>>> pluralize('church')
'churches'
>>> pluralize('x')
'xs'
>>> pluralize('car')
'cars'
"""
if not singular:
return ''
plural = ABERRANT_PLURAL_MAP.get(singular)
if plural:
return plural
root = singular
try:
if singular[-1] == 'y' and singular[-2] not in VOWELS:
root = singular[:-1]
suffix = 'ies'
elif singular[-1] == 's':
if singular[-2] in VOWELS:
if singular[-3:] == 'ius':
root = singular[:-2]
suffix = 'i'
else:
root = singular[:-1]
suffix = 'ses'
else:
suffix = 'es'
elif singular[-2:] in ('ch', 'sh'):
suffix = 'es'
else:
suffix = 's'
except IndexError:
suffix = 's'
plural = root + suffix
return plural | 0.001376 |
def main():
"""Start the poor_consumer."""
try:
opts, args = getopt.getopt(sys.argv[1:], "h:v", ["help", "nack=",
"servers=", "queues="])
except getopt.GetoptError as err:
print str(err)
usage()
sys.exit()
# defaults
nack = 0.0
verbose = False
servers = "localhost:7712,localhost:7711"
queues = "test"
for o, a in opts:
if o == "-v":
verbose = True
elif o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("--nack"):
nack = float(a)
elif o in ("--servers"):
servers = a
elif o in ("--queues"):
queues = a
else:
assert False, "unhandled option"
# prepare servers and queus for pydisque
servers = servers.split(",")
queues = queues.split(",")
c = Client(servers)
c.connect()
while True:
jobs = c.get_job(queues)
for queue_name, job_id, job in jobs:
rnd = random.random()
# as this is a test processor, we don't do any validation on
# the actual job body, so lets just pay attention to id's
if rnd >= nack:
print ">>> received job:", job_id
c.ack_job(job_id)
else:
print ">>> bouncing job:", job_id
c.nack_job(job_id) | 0.000704 |
def add(self, key, val):
"""Adds a (name, value) pair, doesn't overwrite the value if it already
exists.
>>> headers = HTTPHeaderDict(foo='bar')
>>> headers.add('Foo', 'baz')
>>> headers['foo']
'bar, baz'
"""
key_lower = key.lower()
new_vals = [key, val]
# Keep the common case aka no item present as fast as possible
vals = self._container.setdefault(key_lower, new_vals)
if new_vals is not vals:
vals.append(val) | 0.003824 |
def select(sockets, remain=None):
"""This function is called during sendrecv() routine to select
the available sockets.
"""
if remain is not None:
max_timeout = remain / len(sockets)
for s in sockets:
if s.timeout > max_timeout:
s.timeout = max_timeout
# python-can sockets aren't selectable, so we return all of them
# sockets, None (means use the socket's recv() )
return sockets, None | 0.003968 |
def vm_disk_save(name, kwargs=None, call=None):
'''
Sets the disk to be saved in the given image.
.. versionadded:: 2016.3.0
name
The name of the VM containing the disk to save.
disk_id
The ID of the disk to save.
image_name
The name of the new image where the disk will be saved.
image_type
The type for the new image. If not set, then the default ``ONED`` Configuration
will be used. Other valid types include: OS, CDROM, DATABLOCK, KERNEL, RAMDISK,
and CONTEXT.
snapshot_id
The ID of the snapshot to export. If not set, the current image state will be
used.
CLI Example:
.. code-block:: bash
salt-cloud -a vm_disk_save my-vm disk_id=1 image_name=my-new-image
salt-cloud -a vm_disk_save my-vm disk_id=1 image_name=my-new-image image_type=CONTEXT snapshot_id=10
'''
if call != 'action':
raise SaltCloudSystemExit(
'The vm_disk_save action must be called with -a or --action.'
)
if kwargs is None:
kwargs = {}
disk_id = kwargs.get('disk_id', None)
image_name = kwargs.get('image_name', None)
image_type = kwargs.get('image_type', '')
snapshot_id = int(kwargs.get('snapshot_id', '-1'))
if disk_id is None or image_name is None:
raise SaltCloudSystemExit(
'The vm_disk_save function requires a \'disk_id\' and an \'image_name\' '
'to be provided.'
)
server, user, password = _get_xml_rpc()
auth = ':'.join([user, password])
vm_id = int(get_vm_id(kwargs={'name': name}))
response = server.one.vm.disksave(auth,
vm_id,
int(disk_id),
image_name,
image_type,
snapshot_id)
data = {
'action': 'vm.disksave',
'saved': response[0],
'image_id': response[1],
'error_code': response[2],
}
return data | 0.002885 |
def to_bytes(self):
'''
Returns serialized bytes object representing all headers/
payloads in this packet'''
rawlist = []
i = len(self._headers)-1
while i >= 0:
self._headers[i].pre_serialize(b''.join(rawlist), self, i)
rawlist.insert(0, self._headers[i].to_bytes())
i -= 1
self._raw = b''.join(rawlist)
return self._raw | 0.004762 |
def index(self, column): # pylint: disable=C6409
"""Fetches the column number (0 indexed).
Args:
column: A string, column to fetch the index of.
Returns:
An int, the row index number.
Raises:
ValueError: The specified column was not found.
"""
for i, key in enumerate(self._keys):
if key == column:
return i
raise ValueError('Column "%s" not found.' % column) | 0.004454 |
def load_from_dict(dct=None, **kwargs):
"""
Load configuration from a dictionary.
"""
dct = dct or dict()
dct.update(kwargs)
def _load_from_dict(metadata):
return dict(dct)
return _load_from_dict | 0.004292 |
def find_comments_by_video(self, video_id, page=1, count=20):
"""doc: http://open.youku.com/docs/doc?id=35
"""
url = 'https://openapi.youku.com/v2/comments/by_video.json'
params = {
'client_id': self.client_id,
'video_id': video_id,
'page': page,
'count': count
}
r = requests.get(url, params=params)
check_error(r)
return r.json() | 0.004515 |
def kill_zombies(self, zombies, session=None):
"""
Fail given zombie tasks, which are tasks that haven't
had a heartbeat for too long, in the current DagBag.
:param zombies: zombie task instances to kill.
:type zombies: airflow.utils.dag_processing.SimpleTaskInstance
:param session: DB session.
:type session: sqlalchemy.orm.session.Session
"""
from airflow.models.taskinstance import TaskInstance # Avoid circular import
for zombie in zombies:
if zombie.dag_id in self.dags:
dag = self.dags[zombie.dag_id]
if zombie.task_id in dag.task_ids:
task = dag.get_task(zombie.task_id)
ti = TaskInstance(task, zombie.execution_date)
# Get properties needed for failure handling from SimpleTaskInstance.
ti.start_date = zombie.start_date
ti.end_date = zombie.end_date
ti.try_number = zombie.try_number
ti.state = zombie.state
ti.test_mode = configuration.getboolean('core', 'unit_test_mode')
ti.handle_failure("{} detected as zombie".format(ti),
ti.test_mode, ti.get_template_context())
self.log.info(
'Marked zombie job %s as %s', ti, ti.state)
Stats.incr('zombies_killed')
session.commit() | 0.003333 |
def delete_operation(self, name):
"""
Deletes the long-running operation.
.. seealso::
https://cloud.google.com/datastore/docs/reference/data/rest/v1/projects.operations/delete
:param name: the name of the operation resource.
:type name: str
:return: none if successful.
:rtype: dict
"""
conn = self.get_conn()
resp = (conn
.projects()
.operations()
.delete(name=name)
.execute(num_retries=self.num_retries))
return resp | 0.003401 |
def calculate_amr(cls, is_extended, from_id, to_id, rtr_only=False, rtr_too=True):
"""
Calculates AMR using CAN-ID range as parameter.
:param bool is_extended: If True parameters from_id and to_id contains 29-bit CAN-ID.
:param int from_id: First CAN-ID which should be received.
:param int to_id: Last CAN-ID which should be received.
:param bool rtr_only: If True only RTR-Messages should be received, and rtr_too will be ignored.
:param bool rtr_too: If True CAN data frames and RTR-Messages should be received.
:return: Value for AMR.
:rtype: int
"""
return (((from_id ^ to_id) << 3) | (0x7 if rtr_too and not rtr_only else 0x3)) if is_extended else \
(((from_id ^ to_id) << 21) | (0x1FFFFF if rtr_too and not rtr_only else 0xFFFFF)) | 0.009547 |
def reset(self):
"""Resets the iterator to the beginning of the data."""
self.curr_idx = 0
#shuffle data in each bucket
random.shuffle(self.idx)
for i, buck in enumerate(self.sentences):
self.indices[i], self.sentences[i], self.characters[i], self.label[i] = shuffle(self.indices[i],
self.sentences[i],
self.characters[i],
self.label[i])
self.ndindex = []
self.ndsent = []
self.ndchar = []
self.ndlabel = []
#for each bucket of data
for i, buck in enumerate(self.sentences):
#append the lists with an array
self.ndindex.append(ndarray.array(self.indices[i], dtype=self.dtype))
self.ndsent.append(ndarray.array(self.sentences[i], dtype=self.dtype))
self.ndchar.append(ndarray.array(self.characters[i], dtype=self.dtype))
self.ndlabel.append(ndarray.array(self.label[i], dtype=self.dtype)) | 0.009796 |
def is_all_field_none(self):
"""
:rtype: bool
"""
if self._status is not None:
return False
if self._balance_preferred is not None:
return False
if self._balance_threshold_low is not None:
return False
if self._method_fill is not None:
return False
if self._issuer is not None:
return False
return True | 0.004545 |
def get_aside_of_type(self, block, aside_type):
"""
Return the aside of the given aside_type which might be decorating this `block`.
Arguments:
block (:class:`.XBlock`): The block to retrieve asides for.
aside_type (`str`): the type of the aside
"""
# TODO: This function will need to be extended if we want to allow:
# a) XBlockAsides to statically indicated which types of blocks they can comment on
# b) XBlockRuntimes to limit the selection of asides to a subset of the installed asides
# c) Optimize by only loading asides that actually decorate a particular view
if self.id_generator is None:
raise Exception("Runtimes must be supplied with an IdGenerator to load XBlockAsides.")
usage_id = block.scope_ids.usage_id
aside_cls = self.load_aside_type(aside_type)
definition_id = self.id_reader.get_definition_id(usage_id)
aside_def_id, aside_usage_id = self.id_generator.create_aside(definition_id, usage_id, aside_type)
scope_ids = ScopeIds(self.user_id, aside_type, aside_def_id, aside_usage_id)
return aside_cls(runtime=self, scope_ids=scope_ids) | 0.007383 |
def resolve(self):
"""
Make the path absolute, resolving all symlinks on the way and also
normalizing it (for example turning slashes into backslashes under
Windows).
"""
s = self._flavour.resolve(self)
if s is None:
# No symlink resolution => for consistency, raise an error if
# the path doesn't exist or is forbidden
self.stat()
s = str(self.absolute())
# Now we have no symlinks in the path, it's safe to normalize it.
normed = self._flavour.pathmod.normpath(s)
obj = self._from_parts((normed,), init=False)
obj._init(template=self)
return obj | 0.002894 |
def exit(message, code=0):
""" output a message to stdout and terminates the process.
:param message:
Message to be outputed.
:type message:
String
:param code:
The termination code. Default is 0
:type code:
int
:returns:
void
"""
v = VerbosityMixin()
if code == 0:
v.output(message, normal=True, arrow=True)
v.output('Done!', normal=True, arrow=True)
else:
v.output(message, normal=True, error=True)
sys.exit(code) | 0.001901 |
def secure_authorized_channel(
credentials, request, target, ssl_credentials=None, **kwargs):
"""Creates a secure authorized gRPC channel.
This creates a channel with SSL and :class:`AuthMetadataPlugin`. This
channel can be used to create a stub that can make authorized requests.
Example::
import google.auth
import google.auth.transport.grpc
import google.auth.transport.requests
from google.cloud.speech.v1 import cloud_speech_pb2
# Get credentials.
credentials, _ = google.auth.default()
# Get an HTTP request function to refresh credentials.
request = google.auth.transport.requests.Request()
# Create a channel.
channel = google.auth.transport.grpc.secure_authorized_channel(
credentials, 'speech.googleapis.com:443', request)
# Use the channel to create a stub.
cloud_speech.create_Speech_stub(channel)
Args:
credentials (google.auth.credentials.Credentials): The credentials to
add to requests.
request (google.auth.transport.Request): A HTTP transport request
object used to refresh credentials as needed. Even though gRPC
is a separate transport, there's no way to refresh the credentials
without using a standard http transport.
target (str): The host and port of the service.
ssl_credentials (grpc.ChannelCredentials): Optional SSL channel
credentials. This can be used to specify different certificates.
kwargs: Additional arguments to pass to :func:`grpc.secure_channel`.
Returns:
grpc.Channel: The created gRPC channel.
"""
# Create the metadata plugin for inserting the authorization header.
metadata_plugin = AuthMetadataPlugin(credentials, request)
# Create a set of grpc.CallCredentials using the metadata plugin.
google_auth_credentials = grpc.metadata_call_credentials(metadata_plugin)
if ssl_credentials is None:
ssl_credentials = grpc.ssl_channel_credentials()
# Combine the ssl credentials and the authorization credentials.
composite_credentials = grpc.composite_channel_credentials(
ssl_credentials, google_auth_credentials)
return grpc.secure_channel(target, composite_credentials, **kwargs) | 0.000429 |
def add_expansion(self, expansion_node):
"""
Add a child expansion node to the type node's expansions.
If an expansion node with the same name is already present in type node's expansions, the new and existing
expansion node's children are merged.
:param expansion_node: The expansion node to add
:type expansion_node: ExpansionNode
"""
# Check for existing expansion node with the same name
existing_expansion_node = self.get_expansion(expansion_node.name)
if existing_expansion_node:
# Expansion node exists with the same name, merge child expansions.
for child_expansion in expansion_node.expansions:
existing_expansion_node.add_expansion(child_expansion)
else:
# Add the expansion node.
self._expansions[expansion_node.name] = expansion_node | 0.003337 |
def roundplus(number):
"""
given an number, this fuction rounds the number as the following examples:
87 -> 87, 100 -> 100+, 188 -> 100+, 999 -> 900+, 1001 -> 1000+, ...etc
"""
num = str(number)
if not num.isdigit():
return num
num = str(number)
digits = len(num)
rounded = '100+'
if digits < 3:
rounded = num
elif digits == 3:
rounded = num[0] + '00+'
elif digits == 4:
rounded = num[0] + 'K+'
elif digits == 5:
rounded = num[:1] + 'K+'
else:
rounded = '100K+'
return rounded | 0.001704 |
def __getOptimizedMetricLabel(self):
""" Get the label for the metric being optimized. This function also caches
the label in the instance variable self._optimizedMetricLabel
Parameters:
-----------------------------------------------------------------------
metricLabels: A sequence of all the labels being computed for this model
Returns: The label for the metric being optmized over
"""
matchingKeys = matchPatterns([self._optimizeKeyPattern],
self._getMetricLabels())
if len(matchingKeys) == 0:
raise Exception("None of the generated metrics match the specified "
"optimization pattern: %s. Available metrics are %s" % \
(self._optimizeKeyPattern, self._getMetricLabels()))
elif len(matchingKeys) > 1:
raise Exception("The specified optimization pattern '%s' matches more "
"than one metric: %s" % (self._optimizeKeyPattern, matchingKeys))
return matchingKeys[0] | 0.006809 |
def decode(self, targets, encoder_outputs, attention_bias):
"""Generate logits for each value in the target sequence.
Args:
targets: target values for the output sequence.
int tensor with shape [batch_size, target_length]
encoder_outputs: continuous representation of input sequence.
float tensor with shape [batch_size, input_length, hidden_size]
attention_bias: float tensor with shape [batch_size, 1, 1, input_length]
Returns:
float32 tensor with shape [batch_size, target_length, vocab_size]
"""
with tf.name_scope("decode"):
# Prepare inputs to decoder layers by shifting targets, adding positional
# encoding and applying dropout.
decoder_inputs = self.embedding_softmax_layer(targets)
with tf.name_scope("shift_targets"):
# Shift targets to the right, and remove the last element
decoder_inputs = tf.pad(
decoder_inputs, [[0, 0], [1, 0], [0, 0]])[:, :-1, :]
with tf.name_scope("add_pos_encoding"):
length = tf.shape(decoder_inputs)[1]
decoder_inputs += model_utils.get_position_encoding(
length, self.params.hidden_size)
if self.train:
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_LAYER_POSTPROCESS_DROPOUT,
value=self.params.layer_postprocess_dropout)
decoder_inputs = tf.nn.dropout(
decoder_inputs, 1 - self.params.layer_postprocess_dropout)
# Run values
decoder_self_attention_bias = model_utils.get_decoder_self_attention_bias(
length)
outputs = self.decoder_stack(
decoder_inputs, encoder_outputs, decoder_self_attention_bias,
attention_bias)
logits = self.embedding_softmax_layer.linear(outputs)
return logits | 0.00723 |
def get_empirical_ar_params(train_datas, params):
"""
Estimate the parameters of an AR observation model
by fitting a single AR model to the entire dataset.
"""
assert isinstance(train_datas, list) and len(train_datas) > 0
datadimension = train_datas[0].shape[1]
assert params["nu_0"] > datadimension + 1
# Initialize the observation parameters
obs_params = dict(nu_0=params["nu_0"],
S_0=params['S_0'],
M_0=params['M_0'],
K_0=params['K_0'],
affine=params['affine'])
# Fit an AR model to the entire dataset
obs_distn = AutoRegression(**obs_params)
obs_distn.max_likelihood(train_datas)
# Use the inferred noise covariance as the prior mean
# E_{IW}[S] = S_0 / (nu_0 - datadimension - 1)
obs_params["S_0"] = obs_distn.sigma * (params["nu_0"] - datadimension - 1)
obs_params["M_0"] = obs_distn.A.copy()
return obs_params | 0.002045 |
def can_delete_repositories(self):
"""Tests if this user can delete ``Repositories``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known deleting a
``Repository`` will result in a ``PermissionDenied``. This is
intended as a hint to an application that may not wish to offer
delete operations to unauthorized users.
:return: ``false`` if ``Repository`` deletion is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
url_path = construct_url('authorization',
bank_id=self._catalog_idstr)
return self._get_request(url_path)['objectiveBankHints']['canDelete'] | 0.00369 |
def mac_move_detect_enable(self, **kwargs):
"""Enable mac move detect enable on vdx switches
Args:
get (bool): Get config instead of editing config. (True, False)
delete (bool): True, delete the mac-move-detect-enable.
(True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
None
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.211', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.mac_move_detect_enable()
... output = dev.interface.mac_move_detect_enable(get=True)
... output = dev.interface.mac_move_detect_enable(delete=True)
"""
callback = kwargs.pop('callback', self._callback)
mac_move = getattr(self._mac_address_table,
'mac_address_table_mac_move_mac_move_'
'detect_enable')
config = mac_move()
if kwargs.pop('get', False):
output = callback(config, handler='get_config')
item = output.data.find('.//{*}mac-move-detect-enable')
if item is not None:
return True
else:
return None
if kwargs.pop('delete', False):
config.find('.//*mac-move-detect-enable').set('operation',
'delete')
return callback(config) | 0.001131 |
def set_up(self, u, engine):
"""Special setup for mysql engines"""
# add the reconnecting PoolListener that will detect a
# disconnected connection and automatically start a new
# one. This provides a measure of additional safety over
# the pool_recycle parameter, and is useful when e.g., the
# mysql server goes away
def checkout_listener(dbapi_con, con_record, con_proxy):
try:
cursor = dbapi_con.cursor()
cursor.execute("SELECT 1")
except dbapi_con.OperationalError as ex:
if self.is_disconnect(ex.args):
# sqlalchemy will re-create the connection
log.msg('connection will be removed')
raise sa.exc.DisconnectionError()
log.msg('exception happened {}'.format(ex))
raise
# older versions of sqlalchemy require the listener to be specified
# in the kwargs, in a class instance
if sautils.sa_version() < (0, 7, 0):
class ReconnectingListener:
pass
rcl = ReconnectingListener()
rcl.checkout = checkout_listener
engine.pool.add_listener(rcl)
else:
sa.event.listen(engine.pool, 'checkout', checkout_listener) | 0.001497 |
def make_diagonal_povm(pi_basis, confusion_rate_matrix):
"""
Create a DiagonalPOVM from a ``pi_basis`` and the ``confusion_rate_matrix`` associated with a
readout.
See also the grove documentation.
:param OperatorBasis pi_basis: An operator basis of rank-1 projection operators.
:param numpy.ndarray confusion_rate_matrix: The matrix of detection probabilities conditional
on a prepared qubit state.
:return: The POVM corresponding to confusion_rate_matrix.
:rtype: DiagonalPOVM
"""
confusion_rate_matrix = np.asarray(confusion_rate_matrix)
if not np.allclose(confusion_rate_matrix.sum(axis=0), np.ones(confusion_rate_matrix.shape[1])):
raise CRMUnnormalizedError("Unnormalized confusion matrix:\n{}".format(
confusion_rate_matrix))
if not (confusion_rate_matrix >= 0).all() or not (confusion_rate_matrix <= 1).all():
raise CRMValueError("Confusion matrix must have values in [0, 1]:"
"\n{}".format(confusion_rate_matrix))
ops = [sum((pi_j * pjk for (pi_j, pjk) in izip(pi_basis.ops, pjs)), 0)
for pjs in confusion_rate_matrix]
return DiagonalPOVM(pi_basis=pi_basis, confusion_rate_matrix=confusion_rate_matrix, ops=ops) | 0.005596 |
def describe_configs(self, config_resources, include_synonyms=False):
"""Fetch configuration parameters for one or more Kafka resources.
:param config_resources: An list of ConfigResource objects.
Any keys in ConfigResource.configs dict will be used to filter the
result. Setting the configs dict to None will get all values. An
empty dict will get zero values (as per Kafka protocol).
:param include_synonyms: If True, return synonyms in response. Not
supported by all versions. Default: False.
:return: Appropriate version of DescribeConfigsResponse class.
"""
version = self._matching_api_version(DescribeConfigsRequest)
if version == 0:
if include_synonyms:
raise IncompatibleBrokerVersion(
"include_synonyms requires DescribeConfigsRequest >= v1, which is not supported by Kafka {}."
.format(self.config['api_version']))
request = DescribeConfigsRequest[version](
resources=[self._convert_describe_config_resource_request(config_resource) for config_resource in config_resources]
)
elif version == 1:
request = DescribeConfigsRequest[version](
resources=[self._convert_describe_config_resource_request(config_resource) for config_resource in config_resources],
include_synonyms=include_synonyms
)
else:
raise NotImplementedError(
"Support for DescribeConfigs v{} has not yet been added to KafkaAdminClient."
.format(version))
return self._send_request_to_node(self._client.least_loaded_node(), request) | 0.004014 |
async def get_shade(self, shade_id, from_cache=True) -> BaseShade:
"""Get a shade instance based on shade id."""
if not from_cache:
await self.get_shades()
for _shade in self.shades:
if _shade.id == shade_id:
return _shade
raise ResourceNotFoundException("Shade not found. Id: {}".format(shade_id)) | 0.008108 |
def cidfrm(cent, lenout=_default_len_out):
"""
Retrieve frame ID code and name to associate with a frame center.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/cidfrm_c.html
:param cent: An object to associate a frame with.
:type cent: int
:param lenout: Available space in output string frname.
:type lenout: int
:return:
frame ID code,
name to associate with a frame center.
:rtype: tuple
"""
cent = ctypes.c_int(cent)
lenout = ctypes.c_int(lenout)
frcode = ctypes.c_int()
frname = stypes.stringToCharP(lenout)
found = ctypes.c_int()
libspice.cidfrm_c(cent, lenout, ctypes.byref(frcode), frname,
ctypes.byref(found))
return frcode.value, stypes.toPythonString(frname), bool(found.value) | 0.00123 |
def find_embedding(elt, embedding=None):
"""Try to get elt embedding elements.
:param embedding: embedding element. Must have a module.
:return: a list of [module [,class]*] embedding elements which define elt.
:rtype: list
"""
result = [] # result is empty in the worst case
# start to get module
module = getmodule(elt)
if module is not None: # if module exists
visited = set() # cache to avoid to visit twice same element
if embedding is None:
embedding = module
# list of compounds elements which construct the path to elt
compounds = [embedding]
while compounds: # while compounds elements exist
# get last compound
last_embedding = compounds[-1]
# stop to iterate on compounds when last embedding is elt
if last_embedding == elt:
result = compounds # result is compounds
break
else:
# search among embedded elements
for name in dir(last_embedding):
# get embedded element
embedded = getattr(last_embedding, name)
try: # check if embedded has already been visited
if embedded not in visited:
visited.add(embedded) # set it as visited
else:
continue
except TypeError:
pass
else:
# get embedded module
embedded_module = getmodule(embedded)
# and compare it with elt module
if embedded_module is module:
# add embedded to compounds
compounds.append(embedded)
# end the second loop
break
else:
# remove last element if no coumpound element is found
compounds.pop(-1)
return result | 0.000473 |
def get_complete_version(version=None):
"""Returns a tuple of the graphene version. If version argument is non-empty,
then checks for correctness of the tuple provided.
"""
if version is None:
from graphene import VERSION as version
else:
assert len(version) == 5
assert version[3] in ("alpha", "beta", "rc", "final")
return version | 0.005249 |
def setup_geoserver(options):
"""Prepare a testing instance of GeoServer."""
fast = options.get('fast', False)
download_dir = path('downloaded')
if not download_dir.exists():
download_dir.makedirs()
geoserver_dir = path('geoserver')
geoserver_bin = path('geoserver_ext/target/geoserver.war')
if not geoserver_bin.exists():
geoserver_bin = download_dir / os.path.basename(ROGUE_GEOSERVER_URL)
grab(ROGUE_GEOSERVER_URL, geoserver_bin, "geoserver binary")
jetty_runner = download_dir / os.path.basename(JETTY_RUNNER_URL)
grab(JETTY_RUNNER_URL, jetty_runner, "jetty runner")
data_dir = download_dir / os.path.basename(DATA_DIR_URL)
grab(DATA_DIR_URL, data_dir, "data dir")
if not geoserver_dir.exists():
geoserver_dir.makedirs()
webapp_dir = geoserver_dir
if not webapp_dir:
webapp_dir.makedirs()
print 'extracting geoserver'
z = zipfile.ZipFile(geoserver_bin, "r")
z.extractall(webapp_dir)
# Set the geonode auth config to dev port 8000
sh("perl -pi.back -e 's/localhost/localhost:8000/g;' geoserver/data/security/auth/geonodeAuthProvider/config.xml") | 0.001661 |
def convert_multinomial(node, **kwargs):
"""Map MXNet's multinomial operator attributes to onnx's
Multinomial operator and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(attrs.get("dtype", 'int32'))]
sample_size = convert_string_to_list(attrs.get("shape", '1'))
if len(sample_size) < 2:
sample_size = sample_size[-1]
else:
raise AttributeError("ONNX currently supports integer sample_size only")
node = onnx.helper.make_node(
"Multinomial",
input_nodes,
[name],
dtype=dtype,
sample_size=sample_size,
name=name,
)
return [node] | 0.004161 |
def grids(fig=None, value='solid'):
"""Sets the value of the grid_lines for the axis to the passed value.
The default value is `solid`.
Parameters
----------
fig: Figure or None(default: None)
The figure for which the axes should be edited. If the value is None,
the current figure is used.
value: {'none', 'solid', 'dashed'}
The display of the grid_lines
"""
if fig is None:
fig = current_figure()
for a in fig.axes:
a.grid_lines = value | 0.001938 |
def _get_environment_updates(self, display_all_distributions=False):
"""
Check all pacakges installed in the environment to see if there are
any updates availalble.
Args:
display_all_distributions (bool): Return distribution even if it is
up-to-date. Defaults to ``False``.
Returns:
list: A list of Update objects ordered based on ``instance.name``.
"""
updates = []
for distribution in self.pip.get_installed_distributions():
versions = self.get_available_versions(distribution.project_name)
max_version = max(versions.keys()) if versions else UNKNOW_NUM
update = None
distribution_version = self._parse_version(distribution.version)
if versions and max_version > distribution_version:
update = Update(
distribution.project_name,
distribution.version,
versions[max_version],
prelease=max_version[-1]
)
elif (
display_all_distributions and
max_version == distribution_version
):
update = Update(
distribution.project_name,
distribution.version,
versions[max_version],
)
elif display_all_distributions:
update = Update(
distribution.project_name,
distribution.version,
UNKNOWN
)
if update:
updates.append(update)
return sorted(updates, key=lambda x: x.name) | 0.001157 |
def select_authors_by_geo(query):
"""Pass exact name (case insensitive) of geography name, return ordered set
of author ids.
"""
for geo, ids in AUTHOR_GEO.items():
if geo.casefold() == query.casefold():
return set(ids) | 0.003922 |
def select(self, criterion, keepboth=False):
"""Filter current file collections, create another file collections
contains all winfile with criterion=True.
How to construct your own criterion function, see
:meth:`FileCollection.from_path_by_criterion`.
:param criterion: customize filter function
:type criterion: function
:param keepboth: if True, returns two file collections, one is files
with criterion=True, another is False.
:type keepboth: boolean
**中文文档**
在当前的文件集合中, 根据criterion中的规则, 选择需要的生成
FileCollection。当keepboth参数=True时, 返回两个FileCollection, 一个
是符合条件的文件集合, 一个是不符合条件的。
"""
if keepboth:
fcs_yes, fcs_no = FileCollection(), FileCollection()
for winfile in self.files.values():
if criterion(winfile):
fcs_yes.files[winfile.abspath] = winfile
else:
fcs_no.files[winfile.abspath] = winfile
return fcs_yes, fcs_no
else:
fcs = FileCollection()
for winfile in self.files.values():
if criterion(winfile):
fcs.files[winfile.abspath] = winfile
return fcs | 0.006116 |
def delete_scheduling_block(block_id):
"""Delete Scheduling Block with the specified ID"""
DB.delete('scheduling_block/{}'.format(block_id))
# Add a event to the scheduling block event list to notify
# of a deleting a scheduling block from the db
DB.rpush('scheduling_block_events',
json.dumps(dict(type="deleted", id=block_id))) | 0.002755 |
def _backward_kill_word(text, pos):
""""
Kill the word behind pos. Word boundaries are the same as those
used by _backward_word.
"""
text, new_pos = _backward_word(text, pos)
return text[:new_pos] + text[pos:], new_pos | 0.004132 |
def print(self, rows):
""" Write the data to our output stream (stdout). If the table is not
rendered yet, we will make a renderer instance which will freeze
state. """
if not self.default_renderer:
self.default_renderer = self.make_renderer()
self.default_renderer.print(rows) | 0.006061 |
def is_sqlatype_text_over_one_char(
coltype: Union[TypeEngine, VisitableType]) -> bool:
"""
Is the SQLAlchemy column type a string type that's more than one character
long?
"""
coltype = _coltype_to_typeengine(coltype)
return is_sqlatype_text_of_length_at_least(coltype, 2) | 0.003279 |
def merge(*dicts, **kwargs):
"""Merges two or more dictionaries into a single one.
Optional keyword arguments allow to control the exact way
in which the dictionaries will be merged.
:param overwrite:
Whether repeated keys should have their values overwritten,
retaining the last value, as per given order of dictionaries.
This is the default behavior (equivalent to ``overwrite=True``).
If ``overwrite=False``, repeated keys are simply ignored.
Example::
>> merge({'a': 1}, {'a': 10, 'b': 2}, overwrite=True)
{'a': 10, 'b': 2}
>> merge({'a': 1}, {'a': 10, 'b': 2}, overwrite=False)
{'a': 1, 'b': 2}
:param deep:
Whether merging should proceed recursively, and cause
corresponding subdictionaries to be merged into each other.
By default, this does not happen (equivalent to ``deep=False``).
Example::
>> merge({'a': {'b': 1}}, {'a': {'c': 2}}, deep=False)
{'a': {'c': 2}}
>> merge({'a': {'b': 1}}, {'a': {'c': 2}}, deep=True)
{'a': {'b': 1, 'c': 2}}
:return: Merged dictionary
.. note:: For ``dict``\ s ``a`` and ``b``, ``merge(a, b)`` is equivalent
to ``extend({}, a, b)``.
.. versionadded:: 0.0.2
The ``overwrite`` keyword argument.
"""
ensure_argcount(dicts, min_=1)
dicts = list(imap(ensure_mapping, dicts))
ensure_keyword_args(kwargs, optional=('deep', 'overwrite'))
return _nary_dict_update(dicts, copy=True,
deep=kwargs.get('deep', False),
overwrite=kwargs.get('overwrite', True)) | 0.001176 |
def addition_circuit(
addend0: Qubits,
addend1: Qubits,
carry: Qubits) -> Circuit:
"""Returns a quantum circuit for ripple-carry addition. [Cuccaro2004]_
Requires two carry qubit (input and output). The result is returned in
addend1.
.. [Cuccaro2004]
A new quantum ripple-carry addition circuit, Steven A. Cuccaro,
Thomas G. Draper, Samuel A. Kutin, David Petrie Moulton
arXiv:quant-ph/0410184 (2004)
"""
if len(addend0) != len(addend1):
raise ValueError('Number of addend qubits must be equal')
if len(carry) != 2:
raise ValueError('Expected 2 carry qubits')
def _maj(qubits: Qubits) -> Circuit:
q0, q1, q2 = qubits
circ = Circuit()
circ += CNOT(q2, q1)
circ += CNOT(q2, q0)
circ += CCNOT(q0, q1, q2)
return circ
def _uma(qubits: Qubits) -> Circuit:
q0, q1, q2 = qubits
circ = Circuit()
circ += CCNOT(q0, q1, q2)
circ += CNOT(q2, q0)
circ += CNOT(q0, q1)
return circ
qubits = [carry[0]] + list(chain.from_iterable(
zip(reversed(addend1), reversed(addend0)))) + [carry[1]]
circ = Circuit()
for n in range(0, len(qubits)-3, 2):
circ += _maj(qubits[n:n+3])
circ += CNOT(qubits[-2], qubits[-1])
for n in reversed(range(0, len(qubits)-3, 2)):
circ += _uma(qubits[n:n+3])
return circ | 0.000697 |
def load_pkl(filenames):
"""
Unpickle file contents.
Args:
filenames (str): Can be one or a list or tuple of filenames to retrieve.
Returns:
Times: A single object, or from a collection of filenames, a list of Times objects.
Raises:
TypeError: If any loaded object is not a Times object.
"""
if not isinstance(filenames, (list, tuple)):
filenames = [filenames]
times = []
for name in filenames:
name = str(name)
with open(name, 'rb') as file:
loaded_obj = pickle.load(file)
if not isinstance(loaded_obj, Times):
raise TypeError("At least one loaded object is not a Times data object.")
times.append(loaded_obj)
return times if len(times) > 1 else times[0] | 0.005013 |
def viewbox(self):
"""
Return bounding box of the viewport.
:return: (left, top, right, bottom) `tuple`.
"""
return self.left, self.top, self.right, self.bottom | 0.00995 |
def remove_backslash_r(filename, encoding):
"""
A helpful utility to remove Carriage Return from any file.
This will read a file into memory,
and overwrite the contents of the original file.
TODO: This function may be a liability
:param filename:
:return:
"""
with open(filename, 'r', encoding=encoding, newline=r'\n') as filereader:
contents = filereader.read()
contents = re.sub(r'\r', '', contents)
with open(filename, "w") as filewriter:
filewriter.truncate()
filewriter.write(contents) | 0.004847 |
def run_command(self, command, shell=True, env=None, execute='/bin/bash',
return_code=None):
"""Run a shell command.
The options available:
* ``shell`` to be enabled or disabled, which provides the ability
to execute arbitrary stings or not. if disabled commands must be
in the format of a ``list``
* ``env`` is an environment override and or manipulation setting
which sets environment variables within the locally executed
shell.
* ``execute`` changes the interpreter which is executing the
command(s).
* ``return_code`` defines the return code that the command must
have in order to ensure success. This can be a list of return
codes if multiple return codes are acceptable.
:param command: ``str``
:param shell: ``bol``
:param env: ``dict``
:param execute: ``str``
:param return_code: ``int``
"""
self.log.info('Command: [ %s ]', command)
if env is None:
env = os.environ
if self.debug is False:
stdout = open(os.devnull, 'wb')
else:
stdout = subprocess.PIPE
if return_code is None:
return_code = [0]
stderr = subprocess.PIPE
process = subprocess.Popen(
command,
stdout=stdout,
stderr=stderr,
executable=execute,
env=env,
shell=shell
)
output, error = process.communicate()
if process.returncode not in return_code:
self.log.debug('Command Output: %s, Error Msg: %s', output, error)
return error, False
else:
self.log.debug('Command Output: %s', output)
return output, True | 0.0016 |
def from_ordered_sequence(cls, iseq):
"""
Return the root of a balanced binary search tree populated with the
values in iterable *iseq*.
"""
seq = list(iseq)
# optimize for usually all fits by making longest first
bst = cls(seq.pop())
bst._insert_from_ordered_sequence(seq)
return bst | 0.005618 |
def cut(img, left, above, right, down):
"""
从图像中复制出一个矩形选区
box = (100, 100, 400, 400)
region = im.crop(box)
矩形选区有一个4元元组定义,分别表示左、上、右、下的坐标。这个库以左上角为坐标原点,
单位是px,所以上诉代码复制了一个 300x300 pixels 的矩形选区
:param img: 加载到内存的图片
:param left: 左
:param above: 上
:param right: 右
:param down: 下
:return:
"""
box = (left, above, right, down)
region = img.crop(box)
return region | 0.002381 |
def _missing_imageinfo(self):
"""
returns list of image filenames that are missing info
"""
if 'image' not in self.data:
return
missing = []
for img in self.data['image']:
if 'url' not in img:
missing.append(img['file'])
return list(set(missing)) | 0.005848 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.