text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def new_type(cls, **kwargs) -> typing.Type:
"""Create a user defined type.
The new type will contain all attributes of the `cls` type passed in.
Any attribute's value can be overwritten using kwargs.
:param kwargs: Can include any attribute defined in
the provided user defined type.
"""
props = dict(cls.__dict__)
props.update(kwargs)
return type(cls.__name__, (cls,), props) | 0.002392 |
def update_status(self, id_number, new_value):
"""
Update a status name
:type id_number: int
:param id_number: status ID number
:type new_value: str
:param new_value: The new status name
:rtype: dict
:return: an empty dictionary
"""
data = {
'id': id_number,
'new_value': new_value
}
return self.post('updateStatus', data) | 0.004484 |
def _get_owner_cover_photo_upload_server(session, group_id, crop_x=0, crop_y=0, crop_x2=795, crop_y2=200):
"""
https://vk.com/dev/photos.getOwnerCoverPhotoUploadServer
"""
group_id = abs(group_id)
response = session.fetch("photos.getOwnerCoverPhotoUploadServer", group_id=group_id, crop_x=crop_x, crop_y=crop_y, crop_x2=crop_x2, crop_y2=crop_y2)
return response['upload_url'] | 0.009456 |
def _body(self, full_path, environ, file_like):
"""Return an iterator over the body of the response."""
magic = self._match_magic(full_path)
if magic is not None:
return [_encode(s, self.encoding) for s in magic.body(environ,
file_like)]
else:
way_to_send = environ.get('wsgi.file_wrapper', iter_and_close)
return way_to_send(file_like, self.block_size) | 0.004107 |
def url_to_attrs_dict(url, url_attr):
"""
Sanitize url dict as used in django-bootstrap3 settings.
"""
result = dict()
# If url is not a string, it should be a dict
if isinstance(url, six.string_types):
url_value = url
else:
try:
url_value = url["url"]
except TypeError:
raise BootstrapError(
'Function "url_to_attrs_dict" expects a string or a dict with key "url".'
)
crossorigin = url.get("crossorigin", None)
integrity = url.get("integrity", None)
if crossorigin:
result["crossorigin"] = crossorigin
if integrity:
result["integrity"] = integrity
result[url_attr] = url_value
return result | 0.002635 |
def interpret(self):
"""
Main interpreter loop.
"""
self.print_menu()
while True:
try:
event = input()
if event == 'q':
return
event = int(event)
event = self.model.events[event-1]
except Exception:
print('Invalid input')
self.event(event)
self.print_menu() | 0.004484 |
def __find_variant(self, value):
"""Find the messages.Variant type that describes this value.
Args:
value: The value whose variant type is being determined.
Returns:
The messages.Variant value that best describes value's type,
or None if it's a type we don't know how to handle.
"""
if isinstance(value, bool):
return messages.Variant.BOOL
elif isinstance(value, six.integer_types):
return messages.Variant.INT64
elif isinstance(value, float):
return messages.Variant.DOUBLE
elif isinstance(value, six.string_types):
return messages.Variant.STRING
elif isinstance(value, (list, tuple)):
# Find the most specific variant that covers all elements.
variant_priority = [None,
messages.Variant.INT64,
messages.Variant.DOUBLE,
messages.Variant.STRING]
chosen_priority = 0
for v in value:
variant = self.__find_variant(v)
try:
priority = variant_priority.index(variant)
except IndexError:
priority = -1
if priority > chosen_priority:
chosen_priority = priority
return variant_priority[chosen_priority]
# Unrecognized type.
return None | 0.001355 |
def get_trips(self, authentication_info, start, end):
"""Get trips for this device between start and end."""
import requests
if (authentication_info is None or
not authentication_info.is_valid()):
return []
data_url = "https://api.ritassist.nl/api/trips/GetTrips"
query = f"?equipmentId={self.identifier}&from={start}&to={end}&extendedInfo=True"
header = authentication_info.create_header()
response = requests.get(data_url + query, headers=header)
trips = response.json()
result = []
for trip_json in trips:
trip = Trip(trip_json)
result.append(trip)
return result | 0.005682 |
def set_flow_node_ref_list(self, value):
"""
Setter for 'flow_node_ref' field.
:param value - a new value of 'flow_node_ref' field. Must be a list of String objects (ID of referenced nodes).
"""
if value is None or not isinstance(value, list):
raise TypeError("FlowNodeRefList new value must be a list")
else:
for element in value:
if not isinstance(element, str):
raise TypeError("FlowNodeRefList elements in variable must be of String class")
self.__flow_node_ref_list = value | 0.006689 |
def InitPrivateKey(self):
"""Makes sure this client has a private key set.
It first tries to load an RSA key from the certificate.
If no certificate is found, or it is invalid, we make a new random RSA key,
and store it as our certificate.
Returns:
An RSA key - either from the certificate or a new random key.
"""
if self.private_key:
try:
self.common_name = rdf_client.ClientURN.FromPrivateKey(self.private_key)
logging.info("Starting client %s", self.common_name)
return self.private_key
except type_info.TypeValueError:
pass
# We either have an invalid key or no key. We just generate a new one.
key = rdf_crypto.RSAPrivateKey.GenerateKey(
bits=config.CONFIG["Client.rsa_key_length"])
self.common_name = rdf_client.ClientURN.FromPrivateKey(key)
logging.info("Client pending enrolment %s", self.common_name)
# Save the keys
self.SavePrivateKey(key)
return key | 0.004057 |
def process_credentials_elements(cred_tree):
""" Receive an XML object with the credentials to run
a scan against a given target.
@param:
<credentials>
<credential type="up" service="ssh" port="22">
<username>scanuser</username>
<password>mypass</password>
</credential>
<credential type="up" service="smb">
<username>smbuser</username>
<password>mypass</password>
</credential>
</credentials>
@return: Dictionary containing the credentials for a given target.
Example form:
{'ssh': {'type': type,
'port': port,
'username': username,
'password': pass,
},
'smb': {'type': type,
'username': username,
'password': pass,
},
}
"""
credentials = {}
for credential in cred_tree:
service = credential.attrib.get('service')
credentials[service] = {}
credentials[service]['type'] = credential.attrib.get('type')
if service == 'ssh':
credentials[service]['port'] = credential.attrib.get('port')
for param in credential:
credentials[service][param.tag] = param.text
return credentials | 0.001347 |
def subtree_leaf_positions(subtree):
"""Return tree positions of all leaves of a subtree."""
relative_leaf_positions = subtree.treepositions('leaves')
subtree_root_pos = subtree.treeposition()
absolute_leaf_positions = []
for rel_leaf_pos in relative_leaf_positions:
absolute_leaf_positions.append( subtree_root_pos + rel_leaf_pos)
return absolute_leaf_positions | 0.005076 |
def urban_lookup(word):
'''
Return a Urban Dictionary definition for a word or None if no result was
found.
'''
url = "http://api.urbandictionary.com/v0/define"
params = dict(term=word)
resp = requests.get(url, params=params)
resp.raise_for_status()
res = resp.json()
if not res['list']:
return
return res['list'][0]['definition'] | 0.037901 |
def get_segment_summary_times(scienceFile, segmentName):
"""
This function will find the times for which the segment_summary is set
for the flag given by segmentName.
Parameters
-----------
scienceFile : SegFile
The segment file that we want to use to determine this.
segmentName : string
The DQ flag to search for times in the segment_summary table.
Returns
---------
summSegList : ligo.segments.segmentlist
The times that are covered in the segment summary table.
"""
# Parse the segmentName
segmentName = segmentName.split(':')
if not len(segmentName) in [2,3]:
raise ValueError("Invalid channel name %s." %(segmentName))
ifo = segmentName[0]
channel = segmentName[1]
version = ''
if len(segmentName) == 3:
version = int(segmentName[2])
# Load the filename
xmldoc = utils.load_filename(scienceFile.cache_entry.path,
gz=scienceFile.cache_entry.path.endswith("gz"),
contenthandler=ContentHandler)
# Get the segment_def_id for the segmentName
segmentDefTable = table.get_table(xmldoc, "segment_definer")
for entry in segmentDefTable:
if (entry.ifos == ifo) and (entry.name == channel):
if len(segmentName) == 2 or (entry.version==version):
segDefID = entry.segment_def_id
break
else:
raise ValueError("Cannot find channel %s in segment_definer table."\
%(segmentName))
# Get the segmentlist corresponding to this segmentName in segment_summary
segmentSummTable = table.get_table(xmldoc, "segment_summary")
summSegList = segments.segmentlist([])
for entry in segmentSummTable:
if entry.segment_def_id == segDefID:
segment = segments.segment(entry.start_time, entry.end_time)
summSegList.append(segment)
summSegList.coalesce()
return summSegList | 0.004032 |
def delete_maintenance_window(self, id, **kwargs): # noqa: E501
"""Delete a specific maintenance window # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_maintenance_window(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerMaintenanceWindow
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_maintenance_window_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.delete_maintenance_window_with_http_info(id, **kwargs) # noqa: E501
return data | 0.002141 |
def present(name,
host='localhost',
password=None,
password_hash=None,
allow_passwordless=False,
unix_socket=False,
password_column=None,
**connection_args):
'''
Ensure that the named user is present with the specified properties. A
passwordless user can be configured by omitting ``password`` and
``password_hash``, and setting ``allow_passwordless`` to ``True``.
name
The name of the user to manage
host
Host for which this user/password combo applies
password
The password to use for this user. Will take precedence over the
``password_hash`` option if both are specified.
password_hash
The password in hashed form. Be sure to quote the password because YAML
doesn't like the ``*``. A password hash can be obtained from the mysql
command-line client like so::
mysql> SELECT PASSWORD('mypass');
+-------------------------------------------+
| PASSWORD('mypass') |
+-------------------------------------------+
| *6C8989366EAF75BB670AD8EA7A7FC1176A95CEF4 |
+-------------------------------------------+
1 row in set (0.00 sec)
allow_passwordless
If ``True``, then ``password`` and ``password_hash`` can be omitted to
permit a passwordless login.
.. versionadded:: 0.16.2
unix_socket
If ``True`` and allow_passwordless is ``True``, the unix_socket auth
plugin will be used.
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'User {0}@{1} is already present'.format(name, host)}
passwordless = not any((password, password_hash))
# check if user exists with the same password (or passwordless login)
if passwordless:
if not salt.utils.data.is_true(allow_passwordless):
ret['comment'] = 'Either password or password_hash must be ' \
'specified, unless allow_passwordless is True'
ret['result'] = False
return ret
else:
if __salt__['mysql.user_exists'](name, host, passwordless=True, unix_socket=unix_socket, password_column=password_column,
**connection_args):
ret['comment'] += ' with passwordless login'
return ret
else:
err = _get_mysql_error()
if err is not None:
ret['comment'] = err
ret['result'] = False
return ret
else:
if __salt__['mysql.user_exists'](name, host, password, password_hash, unix_socket=unix_socket, password_column=password_column,
**connection_args):
ret['comment'] += ' with the desired password'
if password_hash and not password:
ret['comment'] += ' hash'
return ret
else:
err = _get_mysql_error()
if err is not None:
ret['comment'] = err
ret['result'] = False
return ret
# check if user exists with a different password
if __salt__['mysql.user_exists'](name, host, unix_socket=unix_socket, **connection_args):
# The user is present, change the password
if __opts__['test']:
ret['comment'] = \
'Password for user {0}@{1} is set to be '.format(name, host)
ret['result'] = None
if passwordless:
ret['comment'] += 'cleared'
if not salt.utils.data.is_true(allow_passwordless):
ret['comment'] += ', but allow_passwordless != True'
ret['result'] = False
else:
ret['comment'] += 'changed'
return ret
if __salt__['mysql.user_chpass'](name, host,
password, password_hash,
allow_passwordless, unix_socket,
**connection_args):
ret['comment'] = \
'Password for user {0}@{1} has been ' \
'{2}'.format(name, host,
'cleared' if passwordless else 'changed')
ret['changes'][name] = 'Updated'
else:
ret['comment'] = \
'Failed to {0} password for user ' \
'{1}@{2}'.format('clear' if passwordless else 'change',
name, host)
err = _get_mysql_error()
if err is not None:
ret['comment'] += ' ({0})'.format(err)
if passwordless and not salt.utils.data.is_true(allow_passwordless):
ret['comment'] += '. Note: allow_passwordless must be True ' \
'to permit passwordless login.'
ret['result'] = False
else:
err = _get_mysql_error()
if err is not None:
ret['comment'] = err
ret['result'] = False
return ret
# The user is not present, make it!
if __opts__['test']:
ret['comment'] = \
'User {0}@{1} is set to be added'.format(name, host)
ret['result'] = None
if passwordless:
ret['comment'] += ' with passwordless login'
if not salt.utils.data.is_true(allow_passwordless):
ret['comment'] += ', but allow_passwordless != True'
ret['result'] = False
return ret
if __salt__['mysql.user_create'](name, host,
password, password_hash,
allow_passwordless, unix_socket=unix_socket, password_column=password_column,
**connection_args):
ret['comment'] = \
'The user {0}@{1} has been added'.format(name, host)
if passwordless:
ret['comment'] += ' with passwordless login'
ret['changes'][name] = 'Present'
else:
ret['comment'] = 'Failed to create user {0}@{1}'.format(name, host)
err = _get_mysql_error()
if err is not None:
ret['comment'] += ' ({0})'.format(err)
ret['result'] = False
return ret | 0.000918 |
def version():
""" Print the current version and exit. """
from topydo.lib.Version import VERSION, LICENSE
print("topydo {}\n".format(VERSION))
print(LICENSE)
sys.exit(0) | 0.005263 |
def _scan_pages_for_same(self, progress_cb=_stub_progress):
"""! @brief Read the full page data to determine if it is unchanged.
When this function exits, the same flag will be set to either True or False for
every page. In addition, sectors that need at least one page programmed will have
the same flag set to False for all pages within that sector.
"""
progress = 0
# Read page data if unknown - after this page.same will be True or False
unknown_pages = [page for page in self.page_list if page.same is None]
if unknown_pages:
self._enable_read_access()
for page in unknown_pages:
if page.cached_estimate_data is not None:
data = page.cached_estimate_data
offset = len(data)
else:
data = []
offset = 0
assert len(page.data) == page.size
data.extend(self.flash.target.read_memory_block8(page.addr + offset,
page.size - offset))
page.same = same(page.data, data)
page.cached_estimate_data = None # This data isn't needed anymore.
progress += page.get_verify_weight()
# Update progress
if self.sector_erase_weight > 0:
progress_cb(float(progress) / float(self.sector_erase_weight))
# If we have to program any pages of a sector, then mark all pages of that sector
# as needing to be programmed, since the sector will be erased.
for sector in self.sector_list:
if sector.are_any_pages_not_same():
sector.mark_all_pages_not_same()
return progress | 0.009125 |
def _convert_filetime_to_timestamp(filetime):
"""
Windows returns times as 64-bit unsigned longs that are the number
of hundreds of nanoseconds since Jan 1 1601. This converts it to
a datetime object.
:param filetime:
A FILETIME struct object
:return:
An integer unix timestamp
"""
hundreds_nano_seconds = struct.unpack(
b'>Q',
struct.pack(
b'>LL',
filetime.dwHighDateTime,
filetime.dwLowDateTime
)
)[0]
seconds_since_1601 = hundreds_nano_seconds / 10000000
return seconds_since_1601 - 11644473600 | 0.001613 |
def add_mount(self,
volume_mount):
"""
Args:
volume_mount (VolumeMount):
"""
self._add_mount(
name=volume_mount.name,
mount_path=volume_mount.mount_path,
sub_path=volume_mount.sub_path,
read_only=volume_mount.read_only
) | 0.008876 |
def _display(self, layout):
"""launch layouts display"""
print(file=self.out)
TextWriter().format(layout, self.out) | 0.014388 |
def fit(self,
X,
vocab=None,
initial_embedding_dict=None,
fixed_initialization=None):
"""Run GloVe and return the new matrix.
Parameters
----------
X : array-like of shape = [n_words, n_words]
The square count matrix.
vocab : iterable or None (default: None)
Rownames for `X`.
initial_embedding_dict : dict or None (default: None)
Map into representations that we want to use for a "warm
start" -- e.g., GloVe vectors trained on a massive corpus.
Learned representations of words in `vocab` are initialized
from `initial_embedding_dict` wherever possible; words not
found in `initial_embedding_dict` are randomly initialized.
fixed_initialization : dict or None (default: None)
If a dict, this will replace the random initializations
of `W`, `C`, `bw` and `bc`. Dict keys must be
['W', 'C', 'bw', 'bc'], and values should be np.arrays
of appropriate size ((n_words, n) for `W` and `C`, and
(n_words, ) for `bw` and `bc`).
Returns
-------
np.array
Of shape = [n_words, embedding_dim]. I.e. each row is the
embedding of the corresponding element in `vocab`.
"""
if fixed_initialization is not None:
assert self.test_mode, \
"Fixed initialization parameters can only be provided" \
" in test mode. Initialize {} with `test_mode=True`.". \
format(self.__class__.split(".")[-1])
self._check_dimensions(
X, vocab, initial_embedding_dict
)
weights, log_coincidence = self._initialize(X)
return self._fit(X, weights, log_coincidence,
vocab=vocab,
initial_embedding_dict=initial_embedding_dict,
fixed_initialization=fixed_initialization) | 0.003447 |
def _call(self, x, out=None):
"""Extend ``x`` from the subspace."""
if out is None:
out = self.range.zero()
else:
out.set_zero()
out[self.index] = x
return out | 0.008929 |
def delete(self, instance_id):
'''
Delete a server
'''
nt_ks = self.compute_conn
response = nt_ks.servers.delete(instance_id)
return True | 0.010811 |
def do_bugout(self, args):
"""bugout [ <logger> ] - remove a console logging handler from a logger"""
args = args.split()
if _debug: ConsoleCmd._debug("do_bugout %r", args)
# get the logger name and logger
if args:
loggerName = args[0]
if loggerName in logging.Logger.manager.loggerDict:
logger = logging.getLogger(loggerName)
else:
logger = None
else:
loggerName = '__root__'
logger = logging.getLogger()
# remove the logging handler
if not logger:
self.stdout.write("not a valid logger name\n")
elif not loggerName in self.handlers:
self.stdout.write("no handler for %s\n" % loggerName)
else:
handler = self.handlers[loggerName]
del self.handlers[loggerName]
# see if this (or its parent) is a module level logger
if hasattr(logger, 'globs'):
logger.globs['_debug'] -= 1
elif hasattr(logger.parent, 'globs'):
logger.parent.globs['_debug'] -= 1
# remove it from the logger
logger.removeHandler(handler)
self.stdout.write("handler to %s removed\n" % loggerName)
self.stdout.write("\n") | 0.003782 |
def utterances_from_tier(eafob: Eaf, tier_name: str) -> List[Utterance]:
""" Returns utterances found in the given Eaf object in the given tier."""
try:
speaker = eafob.tiers[tier_name][2]["PARTICIPANT"]
except KeyError:
speaker = None # We don't know the name of the speaker.
tier_utterances = []
annotations = sort_annotations(
list(eafob.get_annotation_data_for_tier(tier_name)))
for i, annotation in enumerate(annotations):
eaf_stem = eafob.eaf_path.stem
utter_id = "{}.{}.{}".format(eaf_stem, tier_name, i)
start_time = eafob.time_origin + annotation[0]
end_time = eafob.time_origin + annotation[1]
text = annotation[2]
utterance = Utterance(eafob.media_path, eafob.eaf_path, utter_id,
start_time, end_time, text, speaker)
tier_utterances.append(utterance)
return tier_utterances | 0.002157 |
def run_numerical_categorical_analysis(args, schema_list):
"""Makes the numerical and categorical analysis files.
Args:
args: the command line args
schema_list: python object of the schema json file.
Raises:
ValueError: if schema contains unknown column types.
"""
header = [column['name'] for column in schema_list]
input_files = file_io.get_matching_files(args.input_file_pattern)
# Check the schema is valid
for col_schema in schema_list:
col_type = col_schema['type'].lower()
if col_type != 'string' and col_type != 'integer' and col_type != 'float':
raise ValueError('Schema contains an unsupported type %s.' % col_type)
# initialize the results
def _init_numerical_results():
return {'min': float('inf'),
'max': float('-inf'),
'count': 0,
'sum': 0.0}
numerical_results = collections.defaultdict(_init_numerical_results)
categorical_results = collections.defaultdict(set)
# for each file, update the numerical stats from that file, and update the set
# of unique labels.
for input_file in input_files:
with file_io.FileIO(input_file, 'r') as f:
for line in f:
parsed_line = dict(zip(header, line.strip().split(',')))
for col_schema in schema_list:
col_name = col_schema['name']
col_type = col_schema['type']
if col_type.lower() == 'string':
categorical_results[col_name].update([parsed_line[col_name]])
else:
# numerical column.
# if empty, skip
if not parsed_line[col_name].strip():
continue
numerical_results[col_name]['min'] = (
min(numerical_results[col_name]['min'],
float(parsed_line[col_name])))
numerical_results[col_name]['max'] = (
max(numerical_results[col_name]['max'],
float(parsed_line[col_name])))
numerical_results[col_name]['count'] += 1
numerical_results[col_name]['sum'] += float(parsed_line[col_name])
# Update numerical_results to just have min/min/mean
for col_schema in schema_list:
if col_schema['type'].lower() != 'string':
col_name = col_schema['name']
mean = numerical_results[col_name]['sum'] / numerical_results[col_name]['count']
del numerical_results[col_name]['sum']
del numerical_results[col_name]['count']
numerical_results[col_name]['mean'] = mean
# Write the numerical_results to a json file.
file_io.write_string_to_file(
os.path.join(args.output_dir, NUMERICAL_ANALYSIS_FILE),
json.dumps(numerical_results, indent=2, separators=(',', ': ')))
# Write the vocab files. Each label is on its own line.
for name, unique_labels in six.iteritems(categorical_results):
labels = '\n'.join(list(unique_labels))
file_io.write_string_to_file(
os.path.join(args.output_dir, CATEGORICAL_ANALYSIS_FILE % name),
labels) | 0.011092 |
async def _notify_event_internal(self, conn_string, name, event):
"""Notify that an event has occured.
This method will send a notification and ensure that all callbacks
registered for it have completed by the time it returns. In
particular, if the callbacks are awaitable, this method will await
them before returning. The order in which the callbacks are called
is undefined.
This is a low level method that is not intended to be called directly.
You should use the high level public notify_* methods for each of the
types of events to ensure consistency in how the event objects are
created.
Args:
conn_string (str): The connection string for the device that the
event is associated with.
name (str): The name of the event. Must be in SUPPORTED_EVENTS.
event (object): The event object. The type of this object will
depend on what is being notified.
"""
try:
self._currently_notifying = True
conn_id = self._get_conn_id(conn_string)
event_maps = self._monitors.get(conn_string, {})
wildcard_maps = self._monitors.get(None, {})
wildcard_handlers = wildcard_maps.get(name, {})
event_handlers = event_maps.get(name, {})
for handler, func in itertools.chain(event_handlers.items(), wildcard_handlers.items()):
try:
result = func(conn_string, conn_id, name, event)
if inspect.isawaitable(result):
await result
except: #pylint:disable=bare-except;This is a background function and we are logging exceptions
self._logger.warning("Error calling notification callback id=%s, func=%s", handler, func, exc_info=True)
finally:
for action in self._deferred_adjustments:
self._adjust_monitor_internal(*action)
self._deferred_adjustments = []
self._currently_notifying = False | 0.003321 |
def restore_position(self):
"""
restore cursor and scroll position
"""
# restore text cursor position:
self.mark_set(tkinter.INSERT, self.old_text_pos)
# restore scroll position:
self.yview_moveto(self.old_first) | 0.007463 |
def OnInit(self):
"""Initialize by creating the split window with the tree"""
project = compass.CompassProjectParser(sys.argv[1]).parse()
frame = MyFrame(None, -1, 'wxCompass', project)
frame.Show(True)
self.SetTopWindow(frame)
return True | 0.006969 |
def load_env_file():
"""Adds environment variables defined in :any:`ENV_FILE` to os.environ.
Supports bash style comments and variable interpolation.
"""
if not os.path.exists(ENV_FILE):
return
for line in open(ENV_FILE, 'r'):
line = line.strip()
if not line:
continue
name, value = line.split('=', 1)
if not name or not value or name.startswith('#') or len(name) == 0 or name.isspace():
continue
if re.match(r'^(["\']).*\1$', value):
if value.startswith('"'):
value = os.path.expandvars(value)
value = value[1:-1]
os.environ[name] = value | 0.002928 |
def migrate(self, host, port, key, dest_db, timeout, *,
copy=False, replace=False):
"""Atomically transfer a key from a Redis instance to another one."""
if not isinstance(host, str):
raise TypeError("host argument must be str")
if not isinstance(timeout, int):
raise TypeError("timeout argument must be int")
if not isinstance(dest_db, int):
raise TypeError("dest_db argument must be int")
if not host:
raise ValueError("Got empty host")
if dest_db < 0:
raise ValueError("dest_db must be greater equal 0")
if timeout < 0:
raise ValueError("timeout must be greater equal 0")
flags = []
if copy:
flags.append(b'COPY')
if replace:
flags.append(b'REPLACE')
fut = self.execute(b'MIGRATE', host, port,
key, dest_db, timeout, *flags)
return wait_ok(fut) | 0.003052 |
def get_volume(self):
"""Get the current volume."""
self.request(EP_GET_VOLUME)
return 0 if self.last_response is None else self.last_response.get('payload').get('volume') | 0.015385 |
def generate_credential(s):
'''basic_auth_header will return a base64 encoded header object to
:param username: the username
'''
if sys.version_info[0] >= 3:
s = bytes(s, 'utf-8')
credentials = base64.b64encode(s).decode('utf-8')
else:
credentials = base64.b64encode(s)
return credentials | 0.002976 |
def get_object(self, cont, obj, local_file=None, return_bin=False):
'''
Retrieve a file from Swift
'''
try:
if local_file is None and return_bin is False:
return False
headers, body = self.conn.get_object(cont, obj, resp_chunk_size=65536)
if return_bin is True:
fp = sys.stdout
else:
dirpath = dirname(local_file)
if dirpath and not isdir(dirpath):
mkdirs(dirpath)
fp = salt.utils.files.fopen(local_file, 'wb') # pylint: disable=resource-leakage
read_length = 0
for chunk in body:
read_length += len(chunk)
fp.write(chunk)
fp.close()
return True
# ClientException
# file/dir exceptions
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: %s: %s', exc.code, exc.msg)
log.error(' Content: \n%s', getattr(exc, 'read', lambda: six.text_type(exc))())
return False | 0.004198 |
def iter(context, sequence, limit=10):
"""Iter to list all the jobs events."""
params = {'limit': limit,
'offset': 0}
uri = '%s/%s/%s' % (context.dci_cs_api, RESOURCE, sequence)
while True:
j = context.session.get(uri, params=params).json()
if len(j['jobs_events']):
for i in j['jobs_events']:
yield i
else:
break
params['offset'] += params['limit'] | 0.002212 |
def cmp_store_tlv_params(self, intf, tlv_data):
"""Compare and store the received TLV.
Compares the received TLV with stored TLV. Store the new TLV if it is
different.
"""
flag = False
attr_obj = self.get_attr_obj(intf)
remote_evb_mode = self.pub_lldp.get_remote_evb_mode(tlv_data)
if attr_obj.remote_evb_mode_uneq_store(remote_evb_mode):
flag = True
remote_evb_cfgd = self.pub_lldp.get_remote_evb_cfgd(tlv_data)
if attr_obj.remote_evb_cfgd_uneq_store(remote_evb_cfgd):
flag = True
remote_mgmt_addr = self.pub_lldp.get_remote_mgmt_addr(tlv_data)
if attr_obj.remote_mgmt_addr_uneq_store(remote_mgmt_addr):
flag = True
remote_sys_desc = self.pub_lldp.get_remote_sys_desc(tlv_data)
if attr_obj.remote_sys_desc_uneq_store(remote_sys_desc):
flag = True
remote_sys_name = self.pub_lldp.get_remote_sys_name(tlv_data)
if attr_obj.remote_sys_name_uneq_store(remote_sys_name):
flag = True
remote_port = self.pub_lldp.get_remote_port(tlv_data)
if attr_obj.remote_port_uneq_store(remote_port):
flag = True
remote_chassis_id_mac = self.pub_lldp.\
get_remote_chassis_id_mac(tlv_data)
if attr_obj.remote_chassis_id_mac_uneq_store(remote_chassis_id_mac):
flag = True
remote_port_id_mac = self.pub_lldp.get_remote_port_id_mac(tlv_data)
if attr_obj.remote_port_id_mac_uneq_store(remote_port_id_mac):
flag = True
return flag | 0.001252 |
def paintEvent(self, event):
""" Reimplemented to paint the background panel.
"""
painter = QtGui.QStylePainter(self)
option = QtGui.QStyleOptionFrame()
option.initFrom(self)
painter.drawPrimitive(QtGui.QStyle.PE_PanelTipLabel, option)
painter.end()
super(CallTipWidget, self).paintEvent(event) | 0.005571 |
def cli(ctx, obj):
"""Show Alerta server and client versions."""
client = obj['client']
click.echo('alerta {}'.format(client.mgmt_status()['version']))
click.echo('alerta client {}'.format(client_version))
click.echo('requests {}'.format(requests_version))
click.echo('click {}'.format(click.__version__))
ctx.exit() | 0.002907 |
def open_url(absolute_or_relative_url):
"""
Loads a web page in the current browser session.
:param absolgenerateute_or_relative_url:
an absolute url to web page in case of config.base_url is not specified,
otherwise - relative url correspondingly
:Usage:
open_url('http://mydomain.com/subpage1')
open_url('http://mydomain.com/subpage2')
# OR
config.base_url = 'http://mydomain.com'
open_url('/subpage1')
open_url('/subpage2')
"""
# todo: refactor next line when app_host is removed
base_url = selene.config.app_host if selene.config.app_host else selene.config.base_url
driver().get(base_url + absolute_or_relative_url) | 0.004184 |
def enable_audio_video_cmd(param, enable):
"""Return command to enable/disable all audio/video streams."""
cmd = 'configManager.cgi?action=setConfig'
formats = [('Extra', 3), ('Main', 4)]
if param == 'Video':
formats.append(('Snap', 3))
for fmt, num in formats:
for i in range(num):
cmd += '&Encode[0].{}Format[{}].{}Enable={}'.format(
fmt, i, param, str(enable).lower())
return cmd | 0.002222 |
def convert_examples_to_features(examples, seq_length, tokenizer):
"""Loads a data file into a list of `InputFeature`s."""
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > seq_length - 2:
tokens_a = tokens_a[0:(seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
input_type_ids = []
tokens.append("[CLS]")
input_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
input_type_ids.append(0)
tokens.append("[SEP]")
input_type_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
input_type_ids.append(1)
tokens.append("[SEP]")
input_type_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < seq_length:
input_ids.append(0)
input_mask.append(0)
input_type_ids.append(0)
assert len(input_ids) == seq_length
assert len(input_mask) == seq_length
assert len(input_type_ids) == seq_length
if ex_index < 5:
logger.info("*** Example ***")
logger.info("unique_id: %s" % (example.unique_id))
logger.info("tokens: %s" % " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"input_type_ids: %s" % " ".join([str(x) for x in input_type_ids]))
features.append(
InputFeatures(
unique_id=example.unique_id,
tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
input_type_ids=input_type_ids))
return features | 0.003272 |
def clear(self):
''' Remove all content from the document but do not reset title.
Returns:
None
'''
self._push_all_models_freeze()
try:
while len(self._roots) > 0:
r = next(iter(self._roots))
self.remove_root(r)
finally:
self._pop_all_models_freeze() | 0.005435 |
def execute(helper, config, args):
"""
Swaps old and new URLs.
If old_environment was active, new_environment will become the active environment
"""
old_env_name = args.old_environment
new_env_name = args.new_environment
# swap C-Names
out("Assuming that {} is the currently active environment...".format(old_env_name))
out("Swapping environment cnames: {} will become active, {} will become inactive.".format(new_env_name,
old_env_name))
helper.swap_environment_cnames(old_env_name, new_env_name)
helper.wait_for_environments([old_env_name, new_env_name], status='Ready', include_deleted=False) | 0.008174 |
def _postrun(cls, span, obj, **kwargs):
""" Trigger to execute just before closing the span
:param opentracing.span.Span span: the SpanContext instance
:param Any obj: Object to use as context
:param dict kwargs: additional data
"""
span.set_tag("response.status_code", obj.status_code)
span.set_tag(
"response.content_lenght", len(getattr(obj, 'content', ""))
) | 0.004545 |
def PowercycleNode(r, node, force=False):
"""
Powercycles a node.
@type node: string
@param node: Node name
@type force: bool
@param force: Whether to force the operation
@rtype: string
@return: job id
"""
query = {
"force": force,
}
return r.request("post", "/2/nodes/%s/powercycle" % node, query=query) | 0.002755 |
def discover_filename(label, scopes=None):
'''
Check the filesystem for the existence of a .plist file matching the job label.
Optionally specify one or more scopes to search (default all).
:param label: string
:param scope: tuple or list or oneOf(USER, USER_ADMIN, DAEMON_ADMIN, USER_OS, DAEMON_OS)
'''
if scopes is None:
scopes = [k for k in PLIST_LOCATIONS]
elif not isinstance(scopes, (list, tuple)):
scopes = (scopes, )
for thisscope in scopes:
plistfilename = compute_filename(label, thisscope)
if os.path.isfile(plistfilename):
return plistfilename
return None | 0.004601 |
def search_globs(path, patterns):
# type: (str, List[str]) -> bool
""" Test whether the given *path* contains any patterns in *patterns*
Args:
path (str):
A file path to test for matches.
patterns (list[str]):
A list of glob string patterns to test against. If *path* matches
any of those patters, it will return True.
Returns:
bool: **True** if the ``path`` matches any pattern in *patterns*.
"""
for pattern in (p for p in patterns if p):
if pattern.startswith('/'):
# If pattern starts with root it means it match from root only
regex = fnmatch.translate(pattern[1:])
regex = regex.replace('\\Z', '')
temp_path = path[1:] if path.startswith('/') else path
m = re.search(regex, temp_path)
if m and m.start() == 0:
return True
else:
regex = fnmatch.translate(pattern)
regex = regex.replace('\\Z', '')
if re.search(regex, path):
return True
return False | 0.000906 |
def recipients(messenger, addresses):
"""Structures recipients data.
:param str|unicode, MessageBase messenger: MessengerBase heir
:param list[str|unicode]|str|unicode addresses: recipients addresses or Django User
model heir instances (NOTE: if supported by a messenger)
:return: list of Recipient
:rtype: list[Recipient]
"""
if isinstance(messenger, six.string_types):
messenger = get_registered_messenger_object(messenger)
return messenger._structure_recipients_data(addresses) | 0.003766 |
def create_parser() -> FileAwareParser:
"""
Create a command line parser
:return: parser
"""
parser = FileAwareParser(description="Clear data from FHIR observation fact table", prog="removefacts",
use_defaults=False)
parser.add_argument("-ss", "--sourcesystem", metavar="SOURCE SYSTEM CODE", help="Sourcesystem code")
parser.add_argument("-u", "--uploadid", metavar="UPLOAD IDENTIFIER",
help="Upload identifer -- uniquely identifies this batch", type=int,
nargs='*')
add_connection_args(parser, strong_config_file=False)
parser.add_argument("-p", "--testprefix", metavar="SS PREFIX",
help=f"Sourcesystem_cd prefix for test suite functions (Default: {default_test_prefix}")
parser.add_argument("--testlist", help="List leftover test suite entries", action="store_true")
parser.add_argument("--removetestlist", help="Remove leftover test suite entries", action="store_true")
return parser | 0.006763 |
def send(self, from_, to, subject, text='', html='', cc=[], bcc=[],
headers={}, attachments=[]):
"""
Send an email.
"""
if isinstance(to, string_types):
raise TypeError('"to" parameter must be enumerable')
if text == '' and html == '':
raise ValueError('"text" and "html" must not both be empty')
return self._session.post('{}/send'.format(self._url), json={
'from': from_,
'to': to,
'cc': cc,
'bcc': bcc,
'subject': subject,
'headers': headers,
'text': text,
'html': html,
'attachments': list(self._process_attachments(attachments)),
}).json() | 0.004027 |
def build_board_2048():
""" builds a 2048 starting board
Printing Grid
0 0 0 2
0 0 4 0
0 0 0 0
0 0 0 0
"""
grd = Grid(4,4, [2,4])
grd.new_tile()
grd.new_tile()
print(grd)
return grd | 0.014134 |
def F_(self, X):
"""
computes h()
:param X:
:return:
"""
if self._interpol:
if not hasattr(self, '_F_interp'):
if self._lookup:
x = self._x_lookup
F_x = self._f_lookup
else:
x = np.linspace(0, self._max_interp_X, self._num_interp_X)
F_x = self._F(x)
self._F_interp = interp.interp1d(x, F_x, kind='linear', axis=-1, copy=False, bounds_error=False,
fill_value=0, assume_sorted=True)
return self._F_interp(X)
else:
return self._F(X) | 0.005714 |
def dump(data, out, ac_parser=None, **options):
"""
Save 'data' to 'out'.
:param data: A mapping object may have configurations data to dump
:param out:
An output file path, a file, a file-like object, :class:`pathlib.Path`
object represents the file or a namedtuple 'anyconfig.globals.IOInfo'
object represents output to dump some data to.
:param ac_parser: Forced parser type or parser object
:param options:
Backend specific optional arguments, e.g. {"indent": 2} for JSON
loader/dumper backend
:raises: ValueError, UnknownProcessorTypeError, UnknownFileTypeError
"""
ioi = anyconfig.ioinfo.make(out)
psr = find(ioi, forced_type=ac_parser)
LOGGER.info("Dumping: %s", ioi.path)
psr.dump(data, ioi, **options) | 0.00125 |
def add_cert(self, cert):
"""
Adds a trusted certificate to this store.
Adding a certificate with this method adds this certificate as a
*trusted* certificate.
:param X509 cert: The certificate to add to this store.
:raises TypeError: If the certificate is not an :class:`X509`.
:raises OpenSSL.crypto.Error: If OpenSSL was unhappy with your
certificate.
:return: ``None`` if the certificate was added successfully.
"""
if not isinstance(cert, X509):
raise TypeError()
# As of OpenSSL 1.1.0i adding the same cert to the store more than
# once doesn't cause an error. Accordingly, this code now silences
# the error for OpenSSL < 1.1.0i as well.
if _lib.X509_STORE_add_cert(self._store, cert._x509) == 0:
code = _lib.ERR_peek_error()
err_reason = _lib.ERR_GET_REASON(code)
_openssl_assert(
err_reason == _lib.X509_R_CERT_ALREADY_IN_HASH_TABLE
)
_lib.ERR_clear_error() | 0.001845 |
def run(self):
"""Execute the build command."""
module = self.distribution.ext_modules[0]
base_dir = os.path.dirname(__file__)
if base_dir:
os.chdir(base_dir)
exclusions = []
for define in self.define or []:
module.define_macros.append(define)
for library in self.libraries or []:
module.libraries.append(library)
building_for_windows = self.plat_name in ('win32','win-amd64')
building_for_osx = 'macosx' in self.plat_name
building_for_linux = 'linux' in self.plat_name
building_for_freebsd = 'freebsd' in self.plat_name
building_for_openbsd = 'openbsd' in self.plat_name # need testing
if building_for_linux:
module.define_macros.append(('USE_LINUX_PROC', '1'))
elif building_for_windows:
module.define_macros.append(('USE_WINDOWS_PROC', '1'))
module.define_macros.append(('_CRT_SECURE_NO_WARNINGS', '1'))
module.libraries.append('kernel32')
module.libraries.append('advapi32')
module.libraries.append('user32')
module.libraries.append('crypt32')
module.libraries.append('ws2_32')
elif building_for_osx:
module.define_macros.append(('USE_MACH_PROC', '1'))
module.include_dirs.append('/usr/local/opt/openssl/include')
module.include_dirs.append('/opt/local/include')
module.library_dirs.append('/opt/local/lib')
module.include_dirs.append('/usr/local/include')
module.library_dirs.append('/usr/local/lib')
elif building_for_freebsd:
module.define_macros.append(('USE_FREEBSD_PROC', '1'))
module.include_dirs.append('/opt/local/include')
module.library_dirs.append('/opt/local/lib')
module.include_dirs.append('/usr/local/include')
module.library_dirs.append('/usr/local/lib')
elif building_for_openbsd:
module.define_macros.append(('USE_OPENBSD_PROC', '1'))
module.include_dirs.append('/opt/local/include')
module.library_dirs.append('/opt/local/lib')
module.include_dirs.append('/usr/local/include')
module.library_dirs.append('/usr/local/lib')
else:
module.define_macros.append(('USE_NO_PROC', '1'))
if has_function('memmem'):
module.define_macros.append(('HAVE_MEMMEM', '1'))
if has_function('strlcpy'):
module.define_macros.append(('HAVE_STRLCPY', '1'))
if has_function('strlcat'):
module.define_macros.append(('HAVE_STRLCAT', '1'))
if self.enable_profiling:
module.define_macros.append(('PROFILING_ENABLED', '1'))
if self.dynamic_linking:
module.libraries.append('yara')
else:
if not self.define or not ('HASH_MODULE', '1') in self.define:
if (has_function('MD5_Init', libraries=['crypto']) and
has_function('SHA256_Init', libraries=['crypto'])):
module.define_macros.append(('HASH_MODULE', '1'))
module.define_macros.append(('HAVE_LIBCRYPTO', '1'))
module.libraries.append('crypto')
else:
exclusions.append('yara/libyara/modules/hash.c')
if self.enable_magic:
module.define_macros.append(('MAGIC_MODULE', '1'))
module.libraries.append('magic')
else:
exclusions.append('yara/libyara/modules/magic.c')
if self.enable_cuckoo:
module.define_macros.append(('CUCKOO_MODULE', '1'))
module.libraries.append('jansson')
else:
exclusions.append('yara/libyara/modules/cuckoo.c')
if self.enable_dotnet:
module.define_macros.append(('DOTNET_MODULE', '1'))
else:
exclusions.append('yara/libyara/modules/dotnet.c')
if self.enable_dex:
module.define_macros.append(('DEX_MODULE', '1'))
else:
exclusions.append('yara/libyara/modules/dex.c')
if self.enable_macho:
module.define_macros.append(('MACHO_MODULE', '1'))
else:
exclusions.append('yara/libyara/modules/macho.c')
exclusions = [os.path.normpath(x) for x in exclusions]
for directory, _, files in os.walk('yara/libyara/'):
for x in files:
x = os.path.normpath(os.path.join(directory, x))
if x.endswith('.c') and x not in exclusions:
module.sources.append(x)
build_ext.run(self) | 0.01328 |
def _set_host_table(self, v, load=False):
"""
Setter method for host_table, mapped from YANG variable /isis_state/host_table (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_host_table is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_host_table() directly.
YANG Description: The set of IS-IS Host names and Router ID
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=host_table.host_table, is_container='container', presence=False, yang_name="host-table", rest_name="host-table", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-router-table', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """host_table must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=host_table.host_table, is_container='container', presence=False, yang_name="host-table", rest_name="host-table", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-router-table', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)""",
})
self.__host_table = t
if hasattr(self, '_set'):
self._set() | 0.005609 |
def _collapse_subtree(self, name, recursive=True):
"""Collapse a sub-tree."""
oname = name
children = self._db[name]["children"]
data = self._db[name]["data"]
del_list = []
while (len(children) == 1) and (not data):
del_list.append(name)
name = children[0]
children = self._db[name]["children"]
data = self._db[name]["data"]
parent = self._db[oname]["parent"]
self._db[name]["parent"] = parent
if parent:
self._db[parent]["children"].remove(oname)
self._db[parent]["children"] = sorted(self._db[parent]["children"] + [name])
else:
self._root = name
self._root_hierarchy_length = len(
self.root_name.split(self._node_separator)
)
for node in del_list:
self._del_node(node)
if recursive:
for child in copy.copy(children):
self._collapse_subtree(child) | 0.002973 |
def fetch_replace_restriction(self, ):
"""Fetch whether unloading is restricted
:returns: True, if unloading is restricted
:rtype: :class:`bool`
:raises: None
"""
inter = self.get_refobjinter()
restricted = self.status() is None
return restricted or inter.fetch_action_restriction(self, 'replace') | 0.005525 |
def variability_prob(self, whiteness):
"""Use the probability of the spectral variability
to identify clouds over land.
Equation 15 (Zhu and Woodcock, 2012)
Parameters
----------
ndvi: ndarray
ndsi: ndarray
whiteness: ndarray
Output
------
ndarray :
probability of cloud over land based on variability
"""
if self.sat in ['LT5', 'LE7']:
# check for green and red saturation
# if red is saturated and less than nir, ndvi = LE07_clip_L1TP_039027_20150529_20160902_01_T1_B1.TIF
mod_ndvi = np.where(self.red_saturated & (self.nir > self.red), 0, self.ndvi)
# if green is saturated and less than swir1, ndsi = LE07_clip_L1TP_039027_20150529_20160902_01_T1_B1.TIF
mod_ndsi = np.where(self.green_saturated & (self.swir1 > self.green), 0, self.ndsi)
ndi_max = np.fmax(np.absolute(mod_ndvi), np.absolute(mod_ndsi))
else:
ndi_max = np.fmax(np.absolute(self.ndvi), np.absolute(self.ndsi))
f_max = 1.0 - np.fmax(ndi_max, whiteness)
return f_max | 0.005164 |
def _batch_entry_run(self):
"""The inside of ``_batch_entry``'s infinite loop.
Separated out so it can be properly unit tested.
"""
time.sleep(self.secs_between_batches)
with self._batch_lock:
self.process_batches() | 0.007463 |
def _send_scp(self, x, y, p, *args, **kwargs):
"""Determine the best connection to use to send an SCP packet and use
it to transmit.
This internal version of the method is identical to send_scp except it
has positional arguments for x, y and p.
See the arguments for
:py:meth:`~rig.machine_control.scp_connection.SCPConnection` for
details.
"""
# Determine the size of packet we expect in return, this is usually the
# size that we are informed we should expect by SCAMP/SARK or else is
# the default.
if self._scp_data_length is None:
length = consts.SCP_SVER_RECEIVE_LENGTH_MAX
else:
length = self._scp_data_length
connection = self._get_connection(x, y)
return connection.send_scp(length, x, y, p, *args, **kwargs) | 0.002315 |
async def on_command_error(self, context, exception):
"""|coro|
The default command error handler provided by the bot.
By default this prints to ``sys.stderr`` however it could be
overridden to have a different implementation.
This only fires if you do not specify any listeners for command error.
"""
if self.extra_events.get('on_command_error', None):
return
if hasattr(context.command, 'on_error'):
return
cog = context.cog
if cog:
if Cog._get_overridden_method(cog.cog_command_error) is not None:
return
print('Ignoring exception in command {}:'.format(context.command), file=sys.stderr)
traceback.print_exception(type(exception), exception, exception.__traceback__, file=sys.stderr) | 0.004756 |
def _generate_examples(self, num_examples, data_path, label_path):
"""Generate MNIST examples as dicts.
Args:
num_examples (int): The number of example.
data_path (str): Path to the data files
label_path (str): Path to the labels
Yields:
Generator yielding the next examples
"""
images = _extract_mnist_images(data_path, num_examples)
labels = _extract_mnist_labels(label_path, num_examples)
data = list(zip(images, labels))
# Data is shuffled automatically to distribute classes uniformly.
for image, label in data:
yield {
"image": image,
"label": label,
} | 0.003063 |
def stop_server( working_dir, clean=False, kill=False ):
"""
Stop the blockstackd server.
"""
timeout = 1.0
dead = False
for i in xrange(0, 5):
# try to kill the main supervisor
pid_file = get_pidfile_path(working_dir)
if not os.path.exists(pid_file):
dead = True
break
pid = read_pid_file(pid_file)
if pid is not None:
try:
os.kill(pid, signal.SIGTERM)
except OSError, oe:
if oe.errno == errno.ESRCH:
# already dead
log.info("Process %s is not running" % pid)
try:
os.unlink(pid_file)
except:
pass
return
except Exception, e:
log.exception(e)
os.abort()
else:
log.info("Corrupt PID file. Please make sure all instances of this program have stopped and remove {}".format(pid_file))
os.abort()
# is it actually dead?
blockstack_opts = get_blockstack_opts()
srv = BlockstackRPCClient('localhost', blockstack_opts['rpc_port'], timeout=5, protocol='http')
try:
res = blockstack_ping(proxy=srv)
except socket.error as se:
# dead?
if se.errno == errno.ECONNREFUSED:
# couldn't connect, so infer dead
try:
os.kill(pid, 0)
log.info("Server %s is not dead yet..." % pid)
except OSError, oe:
log.info("Server %s is dead to us" % pid)
dead = True
break
else:
continue
log.info("Server %s is still running; trying again in %s seconds" % (pid, timeout))
time.sleep(timeout)
timeout *= 2
if not dead and kill:
# be sure to clean up the pidfile
log.info("Killing server %s" % pid)
clean = True
try:
os.kill(pid, signal.SIGKILL)
except Exception, e:
pass
if clean:
# blow away the pid file
try:
os.unlink(pid_file)
except:
pass
log.debug("Blockstack server stopped") | 0.007772 |
def get_conn(filename):
"""Returns new sqlite3.Connection object with _dict_factory() as row factory"""
conn = sqlite3.connect(filename)
conn.row_factory = _dict_factory
return conn | 0.00995 |
def _significand(self):
"""Return the significand of self, as a BigFloat.
If self is a nonzero finite number, return a BigFloat m
with the same precision as self, such that
0.5 <= m < 1. and
self = +/-m * 2**e
for some exponent e.
If self is zero, infinity or nan, return a copy of self with
the sign set to 0.
"""
m = self.copy()
if self and is_finite(self):
mpfr.mpfr_set_exp(m, 0)
mpfr.mpfr_setsign(m, m, False, ROUND_TIES_TO_EVEN)
return m | 0.003521 |
def is_zh(ch):
"""return True if ch is Chinese character.
full-width puncts/latins are not counted in.
"""
x = ord(ch)
# CJK Radicals Supplement and Kangxi radicals
if 0x2e80 <= x <= 0x2fef:
return True
# CJK Unified Ideographs Extension A
elif 0x3400 <= x <= 0x4dbf:
return True
# CJK Unified Ideographs
elif 0x4e00 <= x <= 0x9fbb:
return True
# CJK Compatibility Ideographs
elif 0xf900 <= x <= 0xfad9:
return True
# CJK Unified Ideographs Extension B
elif 0x20000 <= x <= 0x2a6df:
return True
else:
return False | 0.00161 |
def _port_postfix(self):
"""
Returns empty string for the default port and ':port' otherwise
"""
port = self.real_connection.port
default_port = {'https': 443, 'http': 80}[self._protocol]
return ':{}'.format(port) if port != default_port else '' | 0.006826 |
def to_float(s, default=0.0, allow_nan=False):
"""
Return input converted into a float. If failed, then return ``default``.
Note that, by default, ``allow_nan=False``, so ``to_float`` will not return
``nan``, ``inf``, or ``-inf``.
Examples::
>>> to_float('1.5')
1.5
>>> to_float(1)
1.0
>>> to_float('')
0.0
>>> to_float('nan')
0.0
>>> to_float('inf')
0.0
>>> to_float('-inf', allow_nan=True)
-inf
>>> to_float(None)
0.0
>>> to_float(0, default='Empty')
0.0
>>> to_float(None, default='Empty')
'Empty'
"""
try:
f = float(s)
except (TypeError, ValueError):
return default
if not allow_nan:
if f != f or f in _infs:
return default
return f | 0.001164 |
def unicode(self, b, encoding=None):
"""
Convert a byte string to unicode, using string_encoding and decode_errors.
Arguments:
b: a byte string.
encoding: the name of an encoding. Defaults to the string_encoding
attribute for this instance.
Raises:
TypeError: Because this method calls Python's built-in unicode()
function, this method raises the following exception if the
given string is already unicode:
TypeError: decoding Unicode is not supported
"""
if encoding is None:
encoding = self.string_encoding
# TODO: Wrap UnicodeDecodeErrors with a message about setting
# the string_encoding and decode_errors attributes.
return unicode(b, encoding, self.decode_errors) | 0.003559 |
def avail_sizes(call=None):
'''
Return a list of sizes available from the provider
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function, or with the --list-sizes option'
)
compconn = get_conn(client_type='compute')
ret = {}
location = get_location()
try:
sizes = compconn.virtual_machine_sizes.list(
location=location
)
for size_obj in sizes:
size = size_obj.as_dict()
ret[size['name']] = size
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('compute', exc.message)
ret = {'Error': exc.message}
return ret | 0.00134 |
def plugins(cls, enabled=True):
"""
Returns the plugins for the given class.
:param enabled | <bool> || None
:return [<Plugin>, ..]
"""
cls.loadPlugins()
plugs = getattr(cls, '_%s__plugins' % cls.__name__, {}).values()
if enabled is None:
return plugs
return filter(lambda x: x.isEnabled() == enabled, plugs) | 0.009524 |
def plot_bargraph(
self,
rank="auto",
normalize="auto",
top_n="auto",
threshold="auto",
title=None,
xlabel=None,
ylabel=None,
tooltip=None,
return_chart=False,
haxis=None,
legend="auto",
label=None,
):
"""Plot a bargraph of relative abundance of taxa for multiple samples.
Parameters
----------
rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional
Analysis will be restricted to abundances of taxa at the specified level.
normalize : 'auto' or `bool`, optional
Convert read counts to relative abundances such that each sample sums to 1.0. Setting
'auto' will choose automatically based on the data.
return_chart : `bool`, optional
When True, return an `altair.Chart` object instead of displaying the resulting plot in
the current notebook.
top_n : `int`, optional
Display the top N most abundant taxa in the entire cohort of samples.
threshold : `float`
Display only taxa that are more abundant that this threshold in one or more samples.
title : `string`, optional
Text label at the top of the plot.
xlabel : `string`, optional
Text label along the horizontal axis.
ylabel : `string`, optional
Text label along the vertical axis.
tooltip : `string` or `list`, optional
A string or list containing strings representing metadata fields. When a point in the
plot is hovered over, the value of the metadata associated with that sample will be
displayed in a modal.
haxis : `string`, optional
The metadata field (or tuple containing multiple categorical fields) used to group
samples together.
legend: `string`, optional
Title for color scale. Defaults to the field used to generate the plot, e.g.
readcount_w_children or abundance.
label : `string` or `callable`, optional
A metadata field (or function) used to label each analysis. If passing a function, a
dict containing the metadata for each analysis is passed as the first and only
positional argument. The callable function must return a string.
Examples
--------
Plot a bargraph of the top 10 most abundant genera
>>> plot_bargraph(rank='genus', top_n=10)
"""
if rank is None:
raise OneCodexException("Please specify a rank or 'auto' to choose automatically")
if not (threshold or top_n):
raise OneCodexException("Please specify at least one of: threshold, top_n")
if top_n == "auto" and threshold == "auto":
top_n = 10
threshold = None
elif top_n == "auto" and threshold != "auto":
top_n = None
elif top_n != "auto" and threshold == "auto":
threshold = None
if legend == "auto":
legend = self._field
df = self.to_df(
rank=rank, normalize=normalize, top_n=top_n, threshold=threshold, table_format="long"
)
if tooltip:
if not isinstance(tooltip, list):
tooltip = [tooltip]
else:
tooltip = []
if haxis:
tooltip.append(haxis)
tooltip.insert(0, "Label")
# takes metadata columns and returns a dataframe with just those columns
# renames columns in the case where columns are taxids
magic_metadata, magic_fields = self._metadata_fetch(tooltip, label=label)
# add sort order to long-format df
if haxis:
sort_order = magic_metadata.sort_values(magic_fields[haxis]).index.tolist()
for sort_num, sort_class_id in enumerate(sort_order):
magic_metadata.loc[sort_class_id, "sort_order"] = sort_num
df["sort_order"] = magic_metadata["sort_order"][df["classification_id"]].tolist()
sort_order = alt.EncodingSortField(field="sort_order", op="mean")
else:
sort_order = None
# transfer metadata from wide-format df (magic_metadata) to long-format df
for f in tooltip:
df[magic_fields[f]] = magic_metadata[magic_fields[f]][df["classification_id"]].tolist()
# add taxa names
df["tax_name"] = [
"{} ({})".format(self.taxonomy["name"][t], t) if t in self.taxonomy["name"] else t
for t in df["tax_id"]
]
#
# TODO: how to sort bars in bargraph
# - abundance (mean across all samples)
# - parent taxon (this will require that we make a few assumptions
# about taxonomic ranks but as all taxonomic data will be coming from
# OCX this should be okay)
#
ylabel = self._field if ylabel is None else ylabel
xlabel = "" if xlabel is None else xlabel
# should ultimately be Label, tax_name, readcount_w_children, then custom fields
tooltip_for_altair = [magic_fields[f] for f in tooltip]
tooltip_for_altair.insert(1, "tax_name")
tooltip_for_altair.insert(2, "{}:Q".format(self._field))
# generate dataframes to plot, one per facet
dfs_to_plot = []
if haxis:
# if using facets, first facet is just the vertical axis
blank_df = df.iloc[:1].copy()
blank_df[self._field] = 0
dfs_to_plot.append(blank_df)
for md_val in magic_metadata[magic_fields[haxis]].unique():
plot_df = df.where(df[magic_fields[haxis]] == md_val).dropna()
# preserve booleans
if magic_metadata[magic_fields[haxis]].dtype == "bool":
plot_df[magic_fields[haxis]] = plot_df[magic_fields[haxis]].astype(bool)
dfs_to_plot.append(plot_df)
else:
dfs_to_plot.append(df)
charts = []
for plot_num, plot_df in enumerate(dfs_to_plot):
chart = (
alt.Chart(plot_df)
.mark_bar()
.encode(
x=alt.X("Label", axis=alt.Axis(title=xlabel), sort=sort_order),
y=alt.Y(
self._field,
axis=alt.Axis(title=ylabel),
scale=alt.Scale(domain=[0, 1], zero=True, nice=False),
),
color=alt.Color("tax_name", legend=alt.Legend(title=legend)),
tooltip=tooltip_for_altair,
href="url:N",
)
)
if haxis:
if plot_num == 0:
# first plot (blank_df) has vert axis but no horiz axis
chart.encoding.x.axis = None
elif plot_num > 0:
# strip vertical axis from subsequent facets
chart.encoding.y.axis = None
# facet's title set to value of metadata in this group
chart.title = str(plot_df[magic_fields[haxis]].tolist()[0])
charts.append(chart)
# add all the facets together
final_chart = charts[0]
if len(charts) > 1:
for chart in charts[1:]:
final_chart |= chart
# add title to chart
# (cannot specify None or False for no title)
final_chart = final_chart.properties(title=title) if title else final_chart
return final_chart if return_chart else final_chart.display() | 0.003903 |
def _sign_block(self, block):
""" The block should be complete and the final
signature from the publishing validator (this validator) needs to
be added.
"""
block_header = block.block_header
header_bytes = block_header.SerializeToString()
signature = self._identity_signer.sign(header_bytes)
block.set_signature(signature)
return block | 0.004914 |
def process_msg(self, msg):
"""Process messages from the event stream."""
jmsg = json.loads(msg)
msgtype = jmsg['MessageType']
msgdata = jmsg['Data']
_LOGGER.debug('New websocket message recieved of type: %s', msgtype)
if msgtype == 'Sessions':
self._sessions = msgdata
# Check for new devices and update as needed.
self.update_device_list(self._sessions)
"""
May process other message types in the future.
Other known types are:
- PlaybackStarted
- PlaybackStopped
- SessionEnded
""" | 0.0048 |
def cancel(self, campaign_id):
"""
Cancel a Regular or Plain-Text Campaign after you send, before all of
your recipients receive it. This feature is included with MailChimp
Pro.
:param campaign_id: The unique id for the campaign.
:type campaign_id: :py:class:`str`
"""
self.campaign_id = campaign_id
return self._mc_client._post(url=self._build_path(campaign_id, 'actions/cancel-send')) | 0.006536 |
def get_network_by_id(self, net_id):
"""Return a network with that id or None."""
for elem in self.networks:
if elem.id == net_id:
return elem
return None | 0.009662 |
def is_subdomain(domain): # pragma: no cover
"""
Check if the given domain is a subdomain.
:param domain: The domain we are checking.
:type domain: str
:return: The subdomain state.
:rtype: bool
.. warning::
If an empty or a non-string :code:`domain` is given, we return :code:`None`.
"""
if domain and isinstance(domain, str):
# * The given domain is not empty nor None.
# and
# * The given domain is a string.
# We silently load the configuration.
load_config(True)
return Check(domain).is_subdomain()
# We return None, there is nothing to check.
return None | 0.002994 |
def mk_path_str(stmt,
with_prefixes=False,
prefix_onchange=False,
prefix_to_module=False,
resolve_top_prefix_to_module=False):
"""Returns the XPath path of the node.
with_prefixes indicates whether or not to prefix every node.
prefix_onchange modifies the behavior of with_prefixes and
only adds prefixes when the prefix changes mid-XPath.
prefix_to_module replaces prefixes with the module name of the prefix.
resolve_top_prefix_to_module resolves the module-level prefix
to the module name.
Prefixes may be included in the path if the prefix changes mid-path.
"""
resolved_names = mk_path_list(stmt)
xpath_elements = []
last_prefix = None
for index, resolved_name in enumerate(resolved_names):
module_name, prefix, node_name = resolved_name
xpath_element = node_name
if with_prefixes or (prefix_onchange and prefix != last_prefix):
new_prefix = prefix
if (prefix_to_module or
(index == 0 and resolve_top_prefix_to_module)):
new_prefix = module_name
xpath_element = '%s:%s' % (new_prefix, node_name)
xpath_elements.append(xpath_element)
last_prefix = prefix
return '/%s' % '/'.join(xpath_elements) | 0.001499 |
def run_preassembly(stmts_in, **kwargs):
"""Run preassembly on a list of statements.
Parameters
----------
stmts_in : list[indra.statements.Statement]
A list of statements to preassemble.
return_toplevel : Optional[bool]
If True, only the top-level statements are returned. If False,
all statements are returned irrespective of level of specificity.
Default: True
poolsize : Optional[int]
The number of worker processes to use to parallelize the
comparisons performed by the function. If None (default), no
parallelization is performed. NOTE: Parallelization is only
available on Python 3.4 and above.
size_cutoff : Optional[int]
Groups with size_cutoff or more statements are sent to worker
processes, while smaller groups are compared in the parent process.
Default value is 100. Not relevant when parallelization is not
used.
belief_scorer : Optional[indra.belief.BeliefScorer]
Instance of BeliefScorer class to use in calculating Statement
probabilities. If None is provided (default), then the default
scorer is used.
hierarchies : Optional[dict]
Dict of hierarchy managers to use for preassembly
flatten_evidence : Optional[bool]
If True, evidences are collected and flattened via supports/supported_by
links. Default: False
flatten_evidence_collect_from : Optional[str]
String indicating whether to collect and flatten evidence from the
`supports` attribute of each statement or the `supported_by` attribute.
If not set, defaults to 'supported_by'.
Only relevant when flatten_evidence is True.
save : Optional[str]
The name of a pickle file to save the results (stmts_out) into.
save_unique : Optional[str]
The name of a pickle file to save the unique statements into.
Returns
-------
stmts_out : list[indra.statements.Statement]
A list of preassembled top-level statements.
"""
dump_pkl_unique = kwargs.get('save_unique')
belief_scorer = kwargs.get('belief_scorer')
use_hierarchies = kwargs['hierarchies'] if 'hierarchies' in kwargs else \
hierarchies
be = BeliefEngine(scorer=belief_scorer)
pa = Preassembler(hierarchies, stmts_in)
run_preassembly_duplicate(pa, be, save=dump_pkl_unique)
dump_pkl = kwargs.get('save')
return_toplevel = kwargs.get('return_toplevel', True)
poolsize = kwargs.get('poolsize', None)
size_cutoff = kwargs.get('size_cutoff', 100)
options = {'save': dump_pkl, 'return_toplevel': return_toplevel,
'poolsize': poolsize, 'size_cutoff': size_cutoff,
'flatten_evidence': kwargs.get('flatten_evidence', False),
'flatten_evidence_collect_from':
kwargs.get('flatten_evidence_collect_from', 'supported_by')
}
stmts_out = run_preassembly_related(pa, be, **options)
return stmts_out | 0.000663 |
def _plt_gogrouped(self, goids, go2color_usr, **kws):
"""Plot grouped GO IDs."""
fout_img = self.get_outfile(kws['outfile'], goids)
sections = read_sections(kws['sections'], exclude_ungrouped=True)
# print ("KWWSSSSSSSS", kws)
# kws_plt = {k:v for k, v in kws.items if k in self.kws_plt}
grprobj_cur = self._get_grprobj(goids, sections)
# GO: purple=hdr-only, green=hdr&usr, yellow=usr-only
# BORDER: Black=hdr Blu=hdr&usr
grpcolor = GrouperColors(grprobj_cur) # get_bordercolor get_go2color_users
grp_go2color = grpcolor.get_go2color_users()
grp_go2bordercolor = grpcolor.get_bordercolor()
for goid, color in go2color_usr.items():
grp_go2color[goid] = color
objcolor = Go2Color(self.gosubdag, objgoea=None,
go2color=grp_go2color, go2bordercolor=grp_go2bordercolor)
go2txt = GrouperPlot.get_go2txt(grprobj_cur, grp_go2color, grp_go2bordercolor)
objplt = GoSubDagPlot(self.gosubdag, Go2Color=objcolor, go2txt=go2txt, **kws)
objplt.prt_goids(sys.stdout)
objplt.plt_dag(fout_img)
sys.stdout.write("{N:>6} sections read\n".format(
N="NO" if sections is None else len(sections)))
return fout_img | 0.004633 |
def get_lib_ffi_resource(module_name, libpath, c_hdr):
'''
module_name-->str: module name to retrieve resource
libpath-->str: shared library filename with optional path
c_hdr-->str: C-style header definitions for functions to wrap
Returns-->(ffi, lib)
Use this method when you are loading a package-specific shared library
If you want to load a system-wide shared library, use get_lib_ffi_shared
instead
'''
lib = SharedLibWrapper(libpath, c_hdr, module_name=module_name)
ffi = lib.ffi
return (ffi, lib) | 0.001812 |
def get_amount_arrears_balance(self, billing_cycle):
"""Get the balance of to_account at the end of billing_cycle"""
return self.to_account.balance(
transaction__date__lt=billing_cycle.date_range.lower,
) | 0.008333 |
def __convertChannelMask(self, channelsArray):
"""convert channelsArray to bitmask format
Args:
channelsArray: channel array (i.e. [21, 22])
Returns:
bitmask format corresponding to a given channel array
"""
maskSet = 0
for eachChannel in channelsArray:
mask = 1 << eachChannel
maskSet = (maskSet | mask)
return maskSet | 0.004684 |
def _generate_trials(self, unresolved_spec, output_path=""):
"""Generates Trial objects with the variant generation process.
Uses a fixed point iteration to resolve variants. All trials
should be able to be generated at once.
See also: `ray.tune.suggest.variant_generator`.
Yields:
Trial object
"""
if "run" not in unresolved_spec:
raise TuneError("Must specify `run` in {}".format(unresolved_spec))
for _ in range(unresolved_spec.get("num_samples", 1)):
for resolved_vars, spec in generate_variants(unresolved_spec):
experiment_tag = str(self._counter)
if resolved_vars:
experiment_tag += "_{}".format(resolved_vars)
self._counter += 1
yield create_trial_from_spec(
spec,
output_path,
self._parser,
experiment_tag=experiment_tag) | 0.002004 |
def parse_operations(self):
"""
Flatten routes into a path -> method -> route structure
"""
resource_defs = {
getmeta(resources.Error).resource_name: resource_definition(resources.Error),
getmeta(resources.Listing).resource_name: resource_definition(resources.Listing),
}
paths = collections.OrderedDict()
for path, operation in self.parent.op_paths():
# Cut of first item (will be the parents path)
path = '/' + path[1:] # type: UrlPath
# Filter out swagger endpoints
if self.SWAGGER_TAG in operation.tags:
continue
# Add to resource definitions
if operation.resource:
resource_defs[getmeta(operation.resource).resource_name] = resource_definition(operation.resource)
# Add any resource definitions from responses
if operation.responses:
for response in operation.responses:
resource = response.resource
# Ensure we have a resource
if resource and resource is not DefaultResource:
resource_name = getmeta(resource).resource_name
# Don't generate a resource definition if one has already been created.
if resource_name not in resource_defs:
resource_defs[resource_name] = resource_definition(resource)
# Add path parameters
path_spec = paths.setdefault(path.format(self.swagger_node_formatter), {})
# Add parameters
parameters = self.generate_parameters(path)
if parameters:
path_spec['parameters'] = parameters
# Add methods
for method in operation.methods:
path_spec[method.value.lower()] = operation.to_swagger()
return paths, resource_defs | 0.004082 |
def do_connected(self, args):
'''Find a connected component from positive labels on an item.'''
connected = self.label_store.connected_component(args.content_id)
for label in connected:
self.stdout.write('{0}\n'.format(label)) | 0.007634 |
def _merge_skyline(self, skylineq, segment):
"""
Arguments:
skylineq (collections.deque):
segment (HSegment):
"""
if len(skylineq) == 0:
skylineq.append(segment)
return
if skylineq[-1].top == segment.top:
s = skylineq[-1]
skylineq[-1] = HSegment(s.start, s.length+segment.length)
else:
skylineq.append(segment) | 0.004515 |
def union(seq1=(), *seqs):
r"""Return the set union of `seq1` and `seqs`, duplicates removed, order random.
Examples:
>>> union()
[]
>>> union([1,2,3])
[1, 2, 3]
>>> union([1,2,3], {1:2, 5:1})
[1, 2, 3, 5]
>>> union((1,2,3), ['a'], "bcd")
['a', 1, 2, 3, 'd', 'b', 'c']
>>> union([1,2,3], iter([0,1,1,1]))
[0, 1, 2, 3]
"""
if not seqs: return list(seq1)
res = set(seq1)
for seq in seqs:
res.update(set(seq))
return list(res) | 0.005988 |
def intersect_keys(keys, reffile, cache=False, clean_accs=False):
"""Extract SeqRecords from the index by matching keys.
keys - an iterable of sequence identifiers/accessions to select
reffile - name of a FASTA file to extract the specified sequences from
cache - save an index of the reference FASTA sequence offsets to disk?
clean_accs - strip HMMer extensions from sequence accessions?
"""
# Build/load the index of reference sequences
index = None
if cache:
refcache = reffile + '.sqlite'
if os.path.exists(refcache):
if os.stat(refcache).st_mtime < os.stat(reffile).st_mtime:
logging.warn("Outdated cache; rebuilding index")
else:
try:
index = (SeqIO.index_db(refcache,
key_function=clean_accession)
if clean_accs
else SeqIO.index_db(refcache))
except Exception:
logging.warn("Skipping corrupted cache; rebuilding index")
index = None
else:
refcache = ':memory:'
if index is None:
# Rebuild the index, for whatever reason
index = (SeqIO.index_db(refcache, [reffile], 'fasta',
key_function=clean_accession)
if clean_accs
else SeqIO.index_db(refcache, [reffile], 'fasta'))
# Extract records by key
if clean_accs:
keys = (clean_accession(k) for k in keys)
for key in keys:
try:
record = index[key]
except LookupError:
# Missing keys are rare, so it's faster not to check every time
logging.info("No match: %s", repr(key))
continue
yield record | 0.000548 |
def response(self, model=None, code=HTTPStatus.OK, description=None, **kwargs):
"""
Endpoint response OpenAPI documentation decorator.
It automatically documents HTTPError%(code)d responses with relevant
schemas.
Arguments:
model (flask_marshmallow.Schema) - it can be a class or an instance
of the class, which will be used for OpenAPI documentation
purposes. It can be omitted if ``code`` argument is set to an
error HTTP status code.
code (int) - HTTP status code which is documented.
description (str)
Example:
>>> @namespace.response(BaseTeamSchema(many=True))
... @namespace.response(code=HTTPStatus.FORBIDDEN)
... def get_teams():
... if not user.is_admin:
... abort(HTTPStatus.FORBIDDEN)
... return Team.query.all()
"""
code = HTTPStatus(code)
if code is HTTPStatus.NO_CONTENT:
assert model is None
if model is None and code not in {HTTPStatus.ACCEPTED, HTTPStatus.NO_CONTENT}:
if code.value not in http_exceptions.default_exceptions:
raise ValueError("`model` parameter is required for code %d" % code)
model = self.model(
name='HTTPError%d' % code,
model=DefaultHTTPErrorSchema(http_code=code)
)
if description is None:
description = code.description
def response_serializer_decorator(func):
"""
This decorator handles responses to serialize the returned value
with a given model.
"""
def dump_wrapper(*args, **kwargs):
# pylint: disable=missing-docstring
response = func(*args, **kwargs)
extra_headers = None
if response is None:
if model is not None:
raise ValueError("Response cannot not be None with HTTP status %d" % code)
return flask.Response(status=code)
elif isinstance(response, flask.Response) or model is None:
return response
elif isinstance(response, tuple):
response, _code, extra_headers = unpack(response)
else:
_code = code
if HTTPStatus(_code) is code:
response = model.dump(response).data
return response, _code, extra_headers
return dump_wrapper
def decorator(func_or_class):
if code.value in http_exceptions.default_exceptions:
# If the code is handled by raising an exception, it will
# produce a response later, so we don't need to apply a useless
# wrapper.
decorated_func_or_class = func_or_class
elif isinstance(func_or_class, type):
# Handle Resource classes decoration
# pylint: disable=protected-access
func_or_class._apply_decorator_to_methods(response_serializer_decorator)
decorated_func_or_class = func_or_class
else:
decorated_func_or_class = wraps(func_or_class)(
response_serializer_decorator(func_or_class)
)
if model is None:
api_model = None
else:
if isinstance(model, Model):
api_model = model
else:
api_model = self.model(model=model)
if getattr(model, 'many', False):
api_model = [api_model]
doc_decorator = self.doc(
responses={
code.value: (description, api_model)
}
)
return doc_decorator(decorated_func_or_class)
return decorator | 0.0015 |
def update_model(self, url, text):
""" Update prediction model with a page by given url and text content.
Return a list of item duplicates (for testing purposes).
"""
min_hash = get_min_hash(text, self.too_common_shingles, self.num_perm)
item_url = canonicalize_url(url)
item_path, item_query = _parse_url(item_url)
all_duplicates = [
(url, self.seen_urls[url]) for url in self.lsh.query(min_hash)]
duplicates = [(url, m.query) for url, m in all_duplicates
if m.path == item_path]
# Hypothesis (1) - just paths
n_path_nodup = self._nodup_filter(min_hash, (
self.urls_by_path.get(item_path, set())
.difference(url for url, _ in duplicates)))
self.path_dupstats[item_path].update(len(duplicates), n_path_nodup)
# Other hypotheses, if param is in the query
for param, value in item_query.items():
self._update_with_param(
duplicates, min_hash, item_path, item_query, param, [value])
# Other hypotheses, if param is not in the query
for param in (
self.params_by_path.get(item_path, set()) - set(item_query)):
self._update_with_param(
duplicates, min_hash, item_path, item_query, param,
self.param_values.get((item_path, param), set()))
# Update indexes
for param, value in item_query.items():
self.urls_by_path_q[item_path, _q_key(item_query)].add(item_url)
item_qwp_key = _q_key(_without_key(item_query, param))
self.urls_by_path_qwp[item_path, param, item_qwp_key].add(item_url)
self.params_by_path[item_path].add(param)
self.param_values[item_path, param].add(value)
if not item_query:
self.urls_by_path_q[item_path, ()].add(item_url)
self.urls_by_path[item_path].add(item_url)
if item_url in self.lsh:
self.lsh.remove(item_url)
self.lsh.insert(item_url, min_hash)
self.seen_urls[item_url] = URLMeta(item_path, item_query, min_hash)
if len(self.seen_urls) % 100 == 0:
self.log_dupstats()
return all_duplicates | 0.000892 |
async def async_send(self, request, **kwargs):
"""Prepare and send request object according to configuration.
:param ClientRequest request: The request object to be sent.
:param dict headers: Any headers to add to the request.
:param content: Any body data to add to the request.
:param config: Any specific config overrides
"""
kwargs.setdefault('stream', True)
# In the current backward compatible implementation, return the HTTP response
# and plug context inside. Could be remove if we modify Autorest,
# but we still need it to be backward compatible
pipeline_response = await self.config.pipeline.run(request, **kwargs)
response = pipeline_response.http_response
response.context = pipeline_response.context
return response | 0.003563 |
def set_mounted(unitname: str, mounted: bool, swallow_exc: bool = False):
""" Mount or unmount a unit.
Worker for the contextlibs
:param unitname: The systemd unit for the mount to affect. This probably
should be one of :py:attr:`_SYSROOT_INACTIVE_UNIT` or
:py:attr:`_BOOT_UNIT` but it could be anything
:param mounted: ``True`` to start the mount unit, ``False`` to stop it
:param swallow_exc: ``True`` to capture all exceptions, ``False`` to pass
them upwards. This is useful for when you don't super
care about the success of the mount, like when trying
to restore the system after you write the boot part
"""
try:
if mounted:
LOG.info(f"Starting {unitname}")
interface().StartUnit(unitname, 'replace')
LOG.info(f"Started {unitname}")
else:
LOG.info(f"Stopping {unitname}")
interface().StopUnit(unitname, 'replace')
LOG.info(f"Stopped {unitname}")
except Exception:
LOG.info(
f"Exception {'starting' if mounted else 'stopping'} {unitname}")
if not swallow_exc:
raise | 0.000803 |
def load_ndarray_file(nd_bytes):
"""Load ndarray file and return as list of numpy array.
Parameters
----------
nd_bytes : str or bytes
The internal ndarray bytes
Returns
-------
out : dict of str to numpy array or list of numpy array
The output list or dict, depending on whether the saved type is list or dict.
"""
handle = NDListHandle()
olen = mx_uint()
nd_bytes = bytearray(nd_bytes)
ptr = (ctypes.c_char * len(nd_bytes)).from_buffer(nd_bytes)
_check_call(_LIB.MXNDListCreate(
ptr, len(nd_bytes),
ctypes.byref(handle), ctypes.byref(olen)))
keys = []
arrs = []
for i in range(olen.value):
key = ctypes.c_char_p()
cptr = mx_float_p()
pdata = ctypes.POINTER(mx_uint)()
ndim = mx_uint()
_check_call(_LIB.MXNDListGet(
handle, mx_uint(i), ctypes.byref(key),
ctypes.byref(cptr), ctypes.byref(pdata), ctypes.byref(ndim)))
shape = tuple(pdata[:ndim.value])
dbuffer = (mx_float * np.prod(shape)).from_address(ctypes.addressof(cptr.contents))
ret = np.frombuffer(dbuffer, dtype=np.float32).reshape(shape)
ret = np.array(ret, dtype=np.float32)
keys.append(py_str(key.value))
arrs.append(ret)
_check_call(_LIB.MXNDListFree(handle))
if len(keys) == 0 or len(keys[0]) == 0:
return arrs
else:
return {keys[i] : arrs[i] for i in range(len(keys))} | 0.002716 |
def exit_with_error(self, error, **kwargs):
"""Report an error and exit.
This raises a SystemExit exception to ask the interpreter to quit.
Parameters
----------
error: string
The error to report before quitting.
"""
self.error(error, **kwargs)
raise SystemExit(error) | 0.005764 |
def ToLatLng(self):
"""
Returns that latitude and longitude that this point represents
under a spherical Earth model.
"""
rad_lat = math.atan2(self.z, math.sqrt(self.x * self.x + self.y * self.y))
rad_lng = math.atan2(self.y, self.x)
return (rad_lat * 180.0 / math.pi, rad_lng * 180.0 / math.pi) | 0.003096 |
def fromsegwizard(file, coltype = int, strict = True):
"""
Read a segmentlist from the file object file containing a segwizard
compatible segment list. Parsing stops on the first line that
cannot be parsed (which is consumed). The segmentlist will be
created with segment whose boundaries are of type coltype, which
should raise ValueError if it cannot convert its string argument.
Two-column, three-column, and four-column segwizard files are
recognized, but the entire file must be in the same format, which
is decided by the first parsed line. If strict is True and the
file is in three- or four-column format, then each segment's
duration is checked against that column in the input file.
NOTE: the output is a segmentlist as described by the file; if
the segments in the input file are not coalesced or out of order,
then thusly shall be the output of this function. It is
recommended that this function's output be coalesced before use.
"""
commentpat = re.compile(r"\s*([#;].*)?\Z", re.DOTALL)
twocolsegpat = re.compile(r"\A\s*([\d.+-eE]+)\s+([\d.+-eE]+)\s*\Z")
threecolsegpat = re.compile(r"\A\s*([\d.+-eE]+)\s+([\d.+-eE]+)\s+([\d.+-eE]+)\s*\Z")
fourcolsegpat = re.compile(r"\A\s*([\d]+)\s+([\d.+-eE]+)\s+([\d.+-eE]+)\s+([\d.+-eE]+)\s*\Z")
format = None
l = segments.segmentlist()
for line in file:
line = commentpat.split(line)[0]
if not line:
continue
try:
[tokens] = fourcolsegpat.findall(line)
num = int(tokens[0])
seg = segments.segment(map(coltype, tokens[1:3]))
duration = coltype(tokens[3])
this_line_format = 4
except ValueError:
try:
[tokens] = threecolsegpat.findall(line)
seg = segments.segment(map(coltype, tokens[0:2]))
duration = coltype(tokens[2])
this_line_format = 3
except ValueError:
try:
[tokens] = twocolsegpat.findall(line)
seg = segments.segment(map(coltype, tokens[0:2]))
duration = abs(seg)
this_line_format = 2
except ValueError:
break
if strict:
if abs(seg) != duration:
raise ValueError("segment '%s' has incorrect duration" % line)
if format is None:
format = this_line_format
elif format != this_line_format:
raise ValueError("segment '%s' format mismatch" % line)
l.append(seg)
return l | 0.027839 |
def nodes_on_wire(self, wire, only_ops=False):
"""
Iterator for nodes that affect a given wire
Args:
wire (tuple(Register, index)): the wire to be looked at.
only_ops (bool): True if only the ops nodes are wanted
otherwise all nodes are returned.
Yield:
DAGNode: the successive ops on the given wire
Raises:
DAGCircuitError: if the given wire doesn't exist in the DAG
"""
current_node = self.input_map.get(wire, None)
if not current_node:
raise DAGCircuitError('The given wire %s is not present in the circuit'
% str(wire))
more_nodes = True
while more_nodes:
more_nodes = False
# allow user to just get ops on the wire - not the input/output nodes
if current_node.type == 'op' or not only_ops:
yield current_node
# find the adjacent node that takes the wire being looked at as input
for node, edges in self._multi_graph.adj[current_node].items():
if any(wire == edge['wire'] for edge in edges.values()):
current_node = node
more_nodes = True
break | 0.003834 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.