text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def model_segments(copy_file, work_dir, paired):
"""Perform segmentation on input copy number log2 ratio file.
"""
out_file = os.path.join(work_dir, "%s.cr.seg" % dd.get_sample_name(paired.tumor_data))
tumor_counts, normal_counts = heterogzygote_counts(paired)
if not utils.file_exists(out_file):
with file_transaction(paired.tumor_data, out_file) as tx_out_file:
params = ["-T", "ModelSegments",
"--denoised-copy-ratios", copy_file,
"--allelic-counts", tumor_counts,
"--output-prefix", dd.get_sample_name(paired.tumor_data),
"-O", os.path.dirname(tx_out_file)]
if normal_counts:
params += ["--normal-allelic-counts", normal_counts]
_run_with_memory_scaling(params, tx_out_file, paired.tumor_data)
for tx_fname in glob.glob(os.path.join(os.path.dirname(tx_out_file),
"%s*" % dd.get_sample_name(paired.tumor_data))):
shutil.copy(tx_fname, os.path.join(work_dir, os.path.basename(tx_fname)))
return {"seg": out_file, "tumor_hets": out_file.replace(".cr.seg", ".hets.tsv"),
"final_seg": out_file.replace(".cr.seg", ".modelFinal.seg")} | 0.004637 |
def is_merged(sheet, row, column):
"""
Check if a row, column cell is a merged cell
"""
for cell_range in sheet.merged_cells:
row_low, row_high, column_low, column_high = cell_range
if (row in range(row_low, row_high)) and \
(column in range(column_low, column_high)):
# TODO: IS NECESARY THIS IF?
if ((column_high - column_low) < sheet.ncols - 1) and \
((row_high - row_low) < sheet.nrows - 1):
return (True, cell_range)
return False | 0.001825 |
def azureTables(self, *args, **kwargs):
"""
List Tables in an Account Managed by Auth
Retrieve a list of all tables in an account.
This method gives output: ``v1/azure-table-list-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["azureTables"], *args, **kwargs) | 0.005602 |
def draw_transition(self, from_pos, to_pos, width, waypoints=None, selected=False, depth=0):
"""Draw a state with the given properties
This method is called by the controller to draw the specified transition.
:param tuple from_pos: Starting position
:param tuple to_pos: Ending position
:param float width: A measure for the width of a transition line
:param list waypoints: A list of optional waypoints to connect in between
:param bool selected: Whether the transition shell be shown as active/selected
:param float depth: The Z layer
:return: The OpenGL id of the transition
:rtype: int
"""
if not waypoints:
waypoints = []
# "Generate" unique ID for each object
id = self.name_counter
self.name_counter += 1
glPushName(id)
self._set_closest_stroke_width(width)
color = self.transition_color if not selected else self.transition_selected_color
color.set()
points = [from_pos]
points.extend(waypoints)
last_p = to_pos # Transition endpoint
sec_last_p = points[len(points) - 1] # Point before endpoint
# Calculate max possible arrow length
length = min(width, dist(sec_last_p, last_p) / 2.)
mid, p2, p3 = self._calculate_arrow_points(last_p, sec_last_p, length)
self._draw_triangle(last_p, p2, p3, depth, fill_color=color)
points.append(mid)
# Draw the transitions as simple straight line connecting start- way- and endpoints
glBegin(GL_LINE_STRIP)
for point in points:
glVertex3f(point[0], point[1], depth)
glEnd()
self._set_closest_stroke_width(width / 1.5)
for waypoint in waypoints:
self._draw_circle(waypoint[0], waypoint[1], width / 6., depth + 1, fill_color=color)
glPopName()
return id | 0.004661 |
def detach_lb_from_subnets(self, name, subnets):
"""
Detaches load balancer from one or more subnets.
:type name: string
:param name: The name of the Load Balancer
:type subnets: List of strings
:param subnets: The name of the subnet(s) to detach.
:rtype: List of strings
:return: An updated list of subnets for this Load Balancer.
"""
params = {'LoadBalancerName' : name}
self.build_list_params(params, subnets,
'Subnets.member.%d')
return self.get_list('DettachLoadBalancerFromSubnets',
params,
None) | 0.007215 |
def add_requirement_libs_from(self, req_libs, platforms=None):
"""Multi-platform dependency resolution for PEX files.
:param builder: Dump the requirements into this builder.
:param interpreter: The :class:`PythonInterpreter` to resolve requirements for.
:param req_libs: A list of :class:`PythonRequirementLibrary` targets to resolve.
:param log: Use this logger.
:param platforms: A list of :class:`Platform`s to resolve requirements for.
Defaults to the platforms specified by PythonSetup.
"""
reqs = [req for req_lib in req_libs for req in req_lib.requirements]
self.add_resolved_requirements(reqs, platforms=platforms) | 0.004399 |
def _get_date(self, decrypted_content):
"""This method is used to decode the packed dates of entries"""
# Just copied from original KeePassX source
date_field = struct.unpack('<5B', decrypted_content[:5])
dw1 = date_field[0]
dw2 = date_field[1]
dw3 = date_field[2]
dw4 = date_field[3]
dw5 = date_field[4]
y = (dw1 << 6) | (dw2 >> 2)
mon = ((dw2 & 0x03) << 2) | (dw3 >> 6)
d = (dw3 >> 1) & 0x1F
h = ((dw3 & 0x01) << 4) | (dw4 >> 4)
min_ = ((dw4 & 0x0F) << 2) | (dw5 >> 6)
s = dw5 & 0x3F
return datetime(y, mon, d, h, min_, s) | 0.004587 |
def save(self, idempotency_key=None):
"""Return a deferred."""
updated_params = self.serialize(None)
headers = populate_headers(idempotency_key)
if not updated_params:
util.logger.debug("Trying to save already saved object %r", self)
return defer.succeed(self)
d = self.request('post', self.instance_url(), updated_params, headers)
return d.addCallback(self.refresh_from).addCallback(lambda _: self) | 0.004228 |
def authenticate(self, username, password):
"""
Authenticate user on server.
:param username: Username used to be authenticated.
:type username: six.string_types
:param password: Password used to be authenticated.
:type password: six.string_types
:return: True if successful.
:raises: InvalidCredentials, AuthenticationNotSupported, MemcachedException
:rtype: bool
"""
self._username = username
self._password = password
# Reopen the connection with the new credentials.
self.disconnect()
self._open_connection()
return self.authenticated | 0.004491 |
def handler(self):
"""returns the handler"""
if self._handler is None:
self._handler = self.HTTPSClientAuthHandler(key=self._keyfile,
cert=self._certificatefile)
return self._handler | 0.010949 |
async def nodes(self, *,
dc=None, near=None, watch=None, consistency=None):
"""Lists nodes in a given DC
Parameters:
dc (str): Specify datacenter that will be used.
Defaults to the agent's local datacenter.
near (str): Sort the node list in ascending order based on the
estimated round trip time from that node.
watch (Blocking): Do a blocking query
consistency (Consistency): Force consistency
Returns:
CollectionMeta: where value is a list
It returns a body like this::
[
{
"Node": "baz",
"Address": "10.1.10.11",
"TaggedAddresses": {
"lan": "10.1.10.11",
"wan": "10.1.10.11"
}
},
{
"Node": "foobar",
"Address": "10.1.10.12",
"TaggedAddresses": {
"lan": "10.1.10.11",
"wan": "10.1.10.12"
}
}
]
"""
params = {"dc": dc, "near": near}
response = await self._api.get("/v1/catalog/nodes",
params=params,
watch=watch,
consistency=consistency)
return consul(response) | 0.002066 |
def of_file(self, abspath, nbytes=0):
"""Use default hash method to return hash value of a piece of a file
Estimate processing time on:
:param abspath: the absolute path to the file
:param nbytes: only has first N bytes of the file. if 0, hash all file
CPU = i7-4600U 2.10GHz - 2.70GHz, RAM = 8.00 GB
1 second can process 0.25GB data
- 0.59G - 2.43 sec
- 1.3G - 5.68 sec
- 1.9G - 7.72 sec
- 2.5G - 10.32 sec
- 3.9G - 16.0 sec
ATTENTION:
if you change the meta data (for example, the title, years
information in audio, video) of a multi-media file, then the hash
value gonna also change.
"""
if not os.path.exists(abspath):
raise FileNotFoundError(
"[Errno 2] No such file or directory: '%s'" % abspath)
m = self.default_hash_method()
with open(abspath, "rb") as f:
if nbytes:
data = f.read(nbytes)
if data:
m.update(data)
else:
while True:
data = f.read(self._chunk_size)
if not data:
break
m.update(data)
if self.return_int:
return int(m.hexdigest(), 16)
else:
return m.hexdigest() | 0.002861 |
def relabel(self, label=None, group=None, depth=1):
"""Clone object and apply new group and/or label.
Applies relabeling to children up to the supplied depth.
Args:
label (str, optional): New label to apply to returned object
group (str, optional): New group to apply to returned object
depth (int, optional): Depth to which relabel will be applied
If applied to container allows applying relabeling to
contained objects up to the specified depth
Returns:
Returns relabelled object
"""
relabelled = super(DynamicMap, self).relabel(label, group, depth)
if depth > 0:
from ..util import Dynamic
def dynamic_relabel(obj, **dynkwargs):
return obj.relabel(group=group, label=label, depth=depth-1)
dmap = Dynamic(self, streams=self.streams, operation=dynamic_relabel)
dmap.data = relabelled.data
with util.disable_constant(dmap):
dmap.group = relabelled.group
dmap.label = relabelled.label
return dmap
return relabelled | 0.00339 |
def json2lte(self, filename):
""" convert json to lte
return tuple of json, lte file content
"""
data_json = open(filename, 'r').read().strip()
latins = lattice.Lattice(data_json)
self.lattice_instance = latins
self.all_beamlines = latins.getAllBl()
if self.use_beamline is None:
self.use_beamline = 'BL' if 'BL' in self.all_beamlines else self.all_beamlines[
0]
bl_ele_list = [latins.getFullBeamline(k, True)
for k in self.all_beamlines]
self.beamlines_dict = dict(zip(self.all_beamlines, bl_ele_list))
data_lte = latins.generateLatticeFile(self.use_beamline, 'sio')
return data_json, data_lte | 0.004011 |
def average_overlap_ratio(ref_intervals, est_intervals, matching):
"""Compute the Average Overlap Ratio between a reference and estimated
note transcription. Given a reference and corresponding estimated note,
their overlap ratio (OR) is defined as the ratio between the duration of
the time segment in which the two notes overlap and the time segment
spanned by the two notes combined (earliest onset to latest offset):
>>> OR = ((min(ref_offset, est_offset) - max(ref_onset, est_onset)) /
... (max(ref_offset, est_offset) - min(ref_onset, est_onset)))
The Average Overlap Ratio (AOR) is given by the mean OR computed over all
matching reference and estimated notes. The metric goes from 0 (worst) to 1
(best).
Note: this function assumes the matching of reference and estimated notes
(see :func:`match_notes`) has already been performed and is provided by the
``matching`` parameter. Furthermore, it is highly recommended to validate
the intervals (see :func:`validate_intervals`) before calling this
function, otherwise it is possible (though unlikely) for this function to
attempt a divide-by-zero operation.
Parameters
----------
ref_intervals : np.ndarray, shape=(n,2)
Array of reference notes time intervals (onset and offset times)
est_intervals : np.ndarray, shape=(m,2)
Array of estimated notes time intervals (onset and offset times)
matching : list of tuples
A list of matched reference and estimated notes.
``matching[i] == (i, j)`` where reference note ``i`` matches estimated
note ``j``.
Returns
-------
avg_overlap_ratio : float
The computed Average Overlap Ratio score
"""
ratios = []
for match in matching:
ref_int = ref_intervals[match[0]]
est_int = est_intervals[match[1]]
overlap_ratio = (
(min(ref_int[1], est_int[1]) - max(ref_int[0], est_int[0])) /
(max(ref_int[1], est_int[1]) - min(ref_int[0], est_int[0])))
ratios.append(overlap_ratio)
if len(ratios) == 0:
return 0
else:
return np.mean(ratios) | 0.000461 |
def load_rsa_key(key, key_type, key_encoding):
# (bytes, EncryptionKeyType, KeyEncodingType) -> Any
# TODO: narrow down the output type
"""Load an RSA key object from the provided raw key bytes.
:param bytes key: Raw key bytes to load
:param EncryptionKeyType key_type: Type of key to load
:param KeyEncodingType key_encoding: Encoding used to serialize ``key``
:returns: Loaded key
:rtype: TODO:
:raises ValueError: if ``key_type`` and ``key_encoding`` are not a valid pairing
"""
try:
loader = _RSA_KEY_LOADING[key_type][key_encoding]
except KeyError:
raise ValueError("Invalid key type and encoding: {} and {}".format(key_type, key_encoding))
kwargs = dict(data=key, backend=default_backend())
if key_type is EncryptionKeyType.PRIVATE:
kwargs["password"] = None
loaded_key = loader(**kwargs)
if loaded_key.key_size < MinimumKeySizes.RSA.value:
_LOGGER.warning("RSA keys smaller than %d bits are unsafe" % MinimumKeySizes.RSA.value)
return loaded_key | 0.003784 |
def add_token_to_database(encoded_token, identity_claim):
"""
Adds a new token to the database. It is not revoked when it is added.
:param identity_claim:
"""
decoded_token = decode_token(encoded_token)
jti = decoded_token['jti']
token_type = decoded_token['type']
user_identity = decoded_token[identity_claim]
expires = _epoch_utc_to_datetime(decoded_token['exp'])
revoked = False
db_token = TokenBlacklist(
jti=jti,
token_type=token_type,
user_identity=user_identity,
expires=expires,
revoked=revoked,
)
db.session.add(db_token)
db.session.commit() | 0.001546 |
def __response_url(self, message_id):
"""
URL for responding to agent requests.
"""
if self.from_.pid != 0:
path = AGENT_RESPONSE_PATH % (self.from_.pid, message_id)
return "http://%s:%s/%s" % (self.host, self.port, path) | 0.007299 |
def resolve(container, expression):
"""
Return the string that is the resolution of the alignment expression
`expression`, which selects ids from `container`.
"""
itemgetter = getattr(container, 'get_item', container.get)
tokens = []
expression = expression.strip()
for sel_delim, _id, _range in selection_re.findall(expression):
tokens.append(delimiters.get(sel_delim, ''))
item = itemgetter(_id)
if item is None:
raise XigtStructureError(
'Referred Item (id: {}) from reference "{}" does not '
'exist in the given container.'
.format(_id, expression)
)
# treat None values as empty strings for resolution
value = item.value() or ''
if _range:
for spn_delim, start, end in span_re.findall(_range):
start = int(start) if start else None
end = int(end) if end else None
tokens.extend([
delimiters.get(spn_delim, ''),
value[start:end]
])
else:
tokens.append(value)
return ''.join(tokens) | 0.00085 |
def serialize(self, raw=False):
'''Encode the private part of the key in a base64 format by default,
but when raw is True it will return hex encoded bytes.
@return: bytes
'''
if raw:
return self._key.encode()
return self._key.encode(nacl.encoding.Base64Encoder) | 0.006231 |
def variance_at(self, singular_value):
"""get the error variance of all three terms at a singluar value
Parameters
----------
singular_value : int
singular value to test
Returns
-------
dict : dict
dictionary of (err var term,prediction_name), standard_deviation pairs
"""
results = {}
results.update(self.first_prediction(singular_value))
results.update(self.second_prediction(singular_value))
results.update(self.third_prediction(singular_value))
return results | 0.005051 |
def pid(name):
'''
Returns the PID of a container
name
Container name
CLI Example:
.. code-block:: bash
salt myminion nspawn.pid arch1
'''
try:
return int(info(name).get('PID'))
except (TypeError, ValueError) as exc:
raise CommandExecutionError(
'Unable to get PID for container \'{0}\': {1}'.format(name, exc)
) | 0.0025 |
def build(matrix):
"""Yield lines generated from given matrix"""
max_x = max(matrix, key=lambda t: t[0])[0]
min_x = min(matrix, key=lambda t: t[0])[0]
max_y = max(matrix, key=lambda t: t[1])[1]
min_y = min(matrix, key=lambda t: t[1])[1]
yield from (
# '{}:'.format(j).ljust(4) + ''.join(matrix[i, j] for i in range(min_x, max_x+1))
''.join(matrix[i, j] for i in range(min_x, max_x+1))
for j in range(min_y, max_y+1)
) | 0.004264 |
def update(self):
""" Update will try to update the target directory
w.r.t source directory. Only files that are common
to both directories will be updated, no new files
or directories are created """
self._copyfiles = False
self._updatefiles = True
self._purge = False
self._creatdirs = False
if self._verbose:
self.log('Updating directory %s with %s\n' %
(self._dir2, self._dir1))
self._dirdiffandupdate(self._dir1, self._dir2) | 0.003676 |
def get_iam_policy(self, client=None):
"""Retrieve the IAM policy for the bucket.
See
https://cloud.google.com/storage/docs/json_api/v1/buckets/getIamPolicy
If :attr:`user_project` is set, bills the API request to that project.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:rtype: :class:`google.api_core.iam.Policy`
:returns: the policy instance, based on the resource returned from
the ``getIamPolicy`` API request.
"""
client = self._require_client(client)
query_params = {}
if self.user_project is not None:
query_params["userProject"] = self.user_project
info = client._connection.api_request(
method="GET",
path="%s/iam" % (self.path,),
query_params=query_params,
_target_object=None,
)
return Policy.from_api_repr(info) | 0.001779 |
def _serialize_data(self, data):
"""Return serialized data or list of ids, depending on `hydrate_data` query param."""
if self.request and self.request.query_params.get('hydrate_data', False):
serializer = DataSerializer(data, many=True, read_only=True)
serializer.bind('data', self)
return serializer.data
else:
return [d.id for d in data] | 0.009709 |
def vprint(msg, pretty=False):
"""
Print the provided string {msg}, but only when the --verbose option is on.
:param msg String to print.
:param pretty If on, then pprint() will be used instead of the regular print function.
"""
if not config["verbose"]:
return
if pretty:
pp(msg)
else:
print(msg) | 0.005525 |
def remove_assigned_resource(self, resource_type: str,
value: Union[str, int, float, bool] = None,
parameters: dict = None):
"""Remove assigned resources from the processing block.
All matching resources will be removed. If only type is specified
all resources of the specified type will be removed.
If value and/or parameters are specified they will be used
for matching the resource to remove.
Args:
resource_type (str): Resource type
value: Resource value
parameters (dict, optional): Parameters specific to the resource
"""
resources = DB.get_hash_value(self.key, 'resources_assigned')
resources = ast.literal_eval(resources)
new_resources = []
for resource in resources:
if resource['type'] != resource_type:
new_resources.append(resource)
elif value is not None and resource['value'] != value:
new_resources.append(resource)
elif parameters is not None and \
resource['parameters'] != parameters:
new_resources.append(resource)
DB.set_hash_value(self.key, 'resources_assigned', new_resources) | 0.00307 |
def ids(self):
"""
list[str]: All applicable IDs
"""
ids = []
try:
ids.append(self.election_group_id)
except ValueError:
pass
if isinstance(self.spec.subtypes, tuple):
try:
ids.append(self.subtype_group_id)
except ValueError:
pass
if self.spec.can_have_orgs:
try:
ids.append(self.organisation_group_id)
except ValueError:
pass
try:
if self.ballot_id not in ids:
ids.append(self.ballot_id)
except ValueError:
pass
return ids | 0.00289 |
def prune(self):
"""
On a subtree where the root node's s_center is empty,
return a new subtree with no empty s_centers.
"""
if not self[0] or not self[1]: # if I have an empty branch
direction = not self[0] # graft the other branch here
#if trace:
# print('Grafting {} branch'.format(
# 'right' if direction else 'left'))
result = self[direction]
#if result: result.verify()
return result
else:
# Replace the root node with the greatest predecessor.
heir, self[0] = self[0].pop_greatest_child()
#if trace:
# print('Replacing {} with {}.'.format(
# self.x_center, heir.x_center
# ))
# print('Removed greatest predecessor:')
# self.print_structure()
#if self[0]: self[0].verify()
#if self[1]: self[1].verify()
# Set up the heir as the new root node
(heir[0], heir[1]) = (self[0], self[1])
#if trace: print('Setting up the heir:')
#if trace: heir.print_structure()
# popping the predecessor may have unbalanced this node;
# fix it
heir.refresh_balance()
heir = heir.rotate()
#heir.verify()
#if trace: print('Rotated the heir:')
#if trace: heir.print_structure()
return heir | 0.007937 |
def signature(self):
"""The 32-byte ECC tag signature programmed at chip production. The
signature is provided as a string and can only be read.
The signature attribute is always loaded from the tag when it
is accessed, i.e. it is not cached. If communication with the
tag fails for some reason the signature attribute is set to a
32-byte string of all zeros.
"""
log.debug("read tag signature")
try:
return bytes(self.transceive(b"\x3C\x00"))
except tt2.Type2TagCommandError:
return 32 * b"\0" | 0.003333 |
def _read(self):
"""Read the kube config file.
"""
stream = self.path.read_text()
data = yaml.load(stream)
return data | 0.018868 |
def map_title(self):
"""Get the map title from the layer keywords if possible.
:returns: None on error, otherwise the title.
:rtype: None, str
"""
# noinspection PyBroadException
try:
title = self._keyword_io.read_keywords(
self.impact, 'map_title')
return title
except KeywordNotFoundError:
return None
except Exception: # pylint: disable=broad-except
return None | 0.004049 |
def get_account_metadata(self, prefix=None):
"""
Returns a dictionary containing metadata about the account.
"""
headers = self.get_account_headers()
if prefix is None:
prefix = ACCOUNT_META_PREFIX
low_prefix = prefix.lower()
ret = {}
for hkey, hval in list(headers.items()):
lowkey = hkey.lower()
if lowkey.startswith(low_prefix):
cleaned = hkey.replace(low_prefix, "").replace("-", "_")
ret[cleaned] = hval
return ret | 0.003578 |
def update_translations(condition=None):
"""
Updates FieldTranslations table
"""
if condition is None:
condition = {}
# Number of updated translations
num_translations = 0
# Module caching
FieldTranslation._init_module_cache()
# Current languages dict
LANGUAGES = dict(lang for lang in MODELTRANSLATION_LANG_CHOICES)
if settings.LANGUAGE_CODE in LANGUAGES:
del LANGUAGES[settings.LANGUAGE_CODE]
# For each module, we are going to update the translations
for key in FieldTranslation._modules.keys():
module = FieldTranslation._modules[key]
# Class of the module
clsmembers = inspect.getmembers(sys.modules[key], inspect.isclass)
for cls in clsmembers:
cls = cls[1]
# If the model has in Meta "translatable_fields", we insert this fields
if hasattr(cls,"_meta") and not cls._meta.abstract and hasattr(cls._meta,"translatable_fields") and len(cls._meta.translatable_fields)>0:
objects = cls.objects.filter(**condition)
# For each object, language and field are updated
for obj in objects:
for lang in LANGUAGES.keys():
for field in cls._meta.translatable_fields:
if FieldTranslation.update(obj=obj, field=field, lang=lang, context=""):
num_translations += 1
return num_translations | 0.03166 |
def _raise_on_bad_jar_filename(jar_filename):
"""Ensure that jar_filename is a valid path to a jar file."""
if jar_filename is None:
return
if not isinstance(jar_filename, string_type):
raise TypeError("jar_filename is not a string: %r" % jar_filename)
if not os.path.exists(jar_filename):
raise ValueError("jar_filename does not exist: %r" % jar_filename) | 0.004695 |
def cached_property(prop):
"""
A replacement for the property decorator that will only compute the
attribute's value on the first call and serve a cached copy from
then on.
"""
def cache_wrapper(self):
if not hasattr(self, "_cache"):
self._cache = {}
if prop.__name__ not in self._cache:
return_value = prop(self)
if isgenerator(return_value):
return_value = tuple(return_value)
self._cache[prop.__name__] = return_value
return self._cache[prop.__name__]
return property(cache_wrapper) | 0.001664 |
def create_stack(
self,
stack_name,
stack_template_name,
parameters=None,
capabilities=None
):
"""Create a stack using Amazon's Cloud formation"""
# Build template_path
stack_template_path = pathlib.Path(
self.template_dir).joinpath(stack_template_name)
# Create stack
create_stack(
stack_name,
stack_template_path,
parameters=parameters,
capabilities=capabilities
) | 0.005725 |
def _setup_metric_group_definitions(self):
"""
Return the dict of MetricGroupDefinition objects for this metrics
context, by processing its 'metric-group-infos' property.
"""
# Dictionary of MetricGroupDefinition objects, by metric group name
metric_group_definitions = dict()
for mg_info in self.properties['metric-group-infos']:
mg_name = mg_info['group-name']
mg_def = MetricGroupDefinition(
name=mg_name,
resource_class=_resource_class_from_group(mg_name),
metric_definitions=dict())
for i, m_info in enumerate(mg_info['metric-infos']):
m_name = m_info['metric-name']
m_def = MetricDefinition(
index=i,
name=m_name,
type=_metric_type(m_info['metric-type']),
unit=_metric_unit_from_name(m_name))
mg_def.metric_definitions[m_name] = m_def
metric_group_definitions[mg_name] = mg_def
return metric_group_definitions | 0.001813 |
def software_breakpoint_set(self, addr, thumb=False, arm=False, flash=False, ram=False):
"""Sets a software breakpoint at the specified address.
If ``thumb`` is ``True``, the breakpoint is set in THUMB-mode, while if
``arm`` is ``True``, the breakpoint is set in ARM-mode, otherwise a
normal breakpoint is set.
If ``flash`` is ``True``, the breakpoint is set in flash, otherwise if
``ram`` is ``True``, the breakpoint is set in RAM. If both are
``True`` or both are ``False``, then the best option is chosen for
setting the breakpoint in software.
Args:
self (JLink): the ``JLink`` instance
addr (int): the address where the breakpoint will be set
thumb (bool): boolean indicating to set the breakpoint in THUMB mode
arm (bool): boolean indicating to set the breakpoint in ARM mode
flash (bool): boolean indicating to set the breakpoint in flash
ram (bool): boolean indicating to set the breakpoint in RAM
Returns:
An integer specifying the breakpoint handle. This handle should sbe
retained for future breakpoint operations.
Raises:
TypeError: if the given address is not an integer.
JLinkException: if the breakpoint could not be set.
"""
if flash and not ram:
flags = enums.JLinkBreakpoint.SW_FLASH
elif not flash and ram:
flags = enums.JLinkBreakpoint.SW_RAM
else:
flags = enums.JLinkBreakpoint.SW
if thumb:
flags = flags | enums.JLinkBreakpoint.THUMB
elif arm:
flags = flags | enums.JLinkBreakpoint.ARM
handle = self._dll.JLINKARM_SetBPEx(int(addr), flags)
if handle <= 0:
raise errors.JLinkException('Software breakpoint could not be set.')
return handle | 0.002103 |
def _add32(ins):
""" Pops last 2 bytes from the stack and adds them.
Then push the result onto the stack.
Optimizations:
* If any of the operands is ZERO,
then do NOTHING: A + 0 = 0 + A = A
"""
op1, op2 = tuple(ins.quad[2:])
if _int_ops(op1, op2) is not None:
o1, o2 = _int_ops(op1, op2)
if int(o2) == 0: # A + 0 = 0 + A = A => Do Nothing
output = _32bit_oper(o1)
output.append('push de')
output.append('push hl')
return output
if op1[0] == '_' and op2[0] != '_':
op1, op2 = op2, op1 # swap them
if op2[0] == '_':
output = _32bit_oper(op1)
output.append('ld bc, (%s)' % op2)
output.append('add hl, bc')
output.append('ex de, hl')
output.append('ld bc, (%s + 2)' % op2)
output.append('adc hl, bc')
output.append('push hl')
output.append('push de')
return output
output = _32bit_oper(op1, op2)
output.append('pop bc')
output.append('add hl, bc')
output.append('ex de, hl')
output.append('pop bc')
output.append('adc hl, bc')
output.append('push hl') # High and low parts are reversed
output.append('push de')
return output | 0.000796 |
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Same as parent but sets the widget for any OrderFields to
HiddenTextInput.
"""
if isinstance(db_field, fields.OrderField):
kwargs['widget'] = widgets.HiddenTextInput
return super(ListView, self).formfield_for_dbfield(db_field, **kwargs) | 0.00565 |
def get_terminal_size(defaultw=80):
""" Checks various methods to determine the terminal size
Methods:
- shutil.get_terminal_size (only Python3)
- fcntl.ioctl
- subprocess.check_output
- os.environ
Parameters
----------
defaultw : int
Default width of terminal.
Returns
-------
width, height : int
Width and height of the terminal. If one of them could not be
found, None is return in its place.
"""
if hasattr(shutil_get_terminal_size, "__call__"):
return shutil_get_terminal_size()
else:
try:
import fcntl, termios, struct
fd = 0
hw = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ,
'1234'))
return (hw[1], hw[0])
except:
try:
out = sp.check_output(["tput", "cols"])
width = int(out.decode("utf-8").strip())
return (width, None)
except:
try:
hw = (os.environ['LINES'], os.environ['COLUMNS'])
return (hw[1], hw[0])
except:
return (defaultw, None) | 0.004065 |
def delete(cls, object_version, key=None):
"""Delete tags.
:param object_version: The object version instance or id.
:param key: Key of the tag to delete.
Default: delete all tags.
"""
with db.session.begin_nested():
q = cls.query.filter_by(
version_id=as_object_version_id(object_version))
if key:
q = q.filter_by(key=key)
q.delete() | 0.004396 |
def consume(self, queue, consumer, consumer_tag='', no_local=False,
no_ack=True, exclusive=False, nowait=True, ticket=None,
cb=None, cancel_cb=None):
'''Start a queue consumer.
Accepts the following optional arg in addition to those of
`BasicClass.consume()`:
:param cancel_cb: a callable to be called when the broker cancels the
consumer; e.g., when the consumer's queue is deleted. See
www.rabbitmq.com/consumer-cancel.html.
:type cancel_cb: None or callable with signature cancel_cb(consumer_tag)
'''
# Register the consumer's broker-cancel callback entry
if cancel_cb is not None:
if not callable(cancel_cb):
raise ValueError('cancel_cb is not callable: %r' % (cancel_cb,))
if not consumer_tag:
consumer_tag = self._generate_consumer_tag()
self._broker_cancel_cb_map[consumer_tag] = cancel_cb
# Start consumer
super(RabbitBasicClass, self).consume(queue, consumer, consumer_tag,
no_local, no_ack, exclusive,
nowait, ticket, cb) | 0.004934 |
def run(self):
"""Run the monitor.
This function loops forever, checking for messages about dead database
clients and cleaning up state accordingly.
"""
# Initialize the subscription channel.
self.subscribe(ray.gcs_utils.XRAY_HEARTBEAT_BATCH_CHANNEL)
self.subscribe(ray.gcs_utils.XRAY_DRIVER_CHANNEL)
# TODO(rkn): If there were any dead clients at startup, we should clean
# up the associated state in the state tables.
# Handle messages from the subscription channels.
while True:
# Update the mapping from raylet client ID to IP address.
# This is only used to update the load metrics for the autoscaler.
self.update_raylet_map()
# Process autoscaling actions
if self.autoscaler:
self.autoscaler.update()
self._maybe_flush_gcs()
# Process a round of messages.
self.process_messages()
# Wait for a heartbeat interval before processing the next round of
# messages.
time.sleep(ray._config.heartbeat_timeout_milliseconds() * 1e-3) | 0.001706 |
def backward(A, pobs, T=None, beta_out=None, dtype=np.float32):
"""Compute all backward coefficients. With scaling!
Parameters
----------
A : ndarray((N,N), dtype = float)
transition matrix of the hidden states
pobs : ndarray((T,N), dtype = float)
pobs[t,i] is the observation probability for observation at time t given hidden state i
beta_out : ndarray((T,N), dtype = float), optional, default = None
containter for the beta result variables. If None, a new container will be created.
dtype : type, optional, default = np.float32
data type of the result.
Returns
-------
beta : ndarray((T,N), dtype = float), optional, default = None
beta[t,i] is the ith backward coefficient of time t. These can be
used in many different algorithms related to HMMs.
"""
# set T
if T is None:
T = pobs.shape[0] # if not set, use the length of pobs as trajectory length
elif T > pobs.shape[0]:
raise ValueError('T must be at most the length of pobs.')
# set N
N = A.shape[0]
# initialize output if necessary
if beta_out is None:
beta_out = np.zeros((T, N), dtype=dtype)
elif T > beta_out.shape[0]:
raise ValueError('beta_out must at least have length T in order to fit trajectory.')
# initialization
beta_out[T-1, :] = 1.0
# scaling factor
scale = np.sum(beta_out[T-1, :])
# scale
beta_out[T-1, :] /= scale
# induction
for t in range(T-2, -1, -1):
# beta_i(t) = sum_j A_i,j * beta_j(t+1) * B_j,ob(t+1)
np.dot(A, beta_out[t+1, :] * pobs[t+1, :], out=beta_out[t, :])
# scaling factor
scale = np.sum(beta_out[t, :])
# scale
beta_out[t, :] /= scale
return beta_out | 0.002792 |
def low(data, **kwargs):
'''
Execute a single low data call
This function is mostly intended for testing the state system
CLI Example:
.. code-block:: bash
salt '*' state.low '{"state": "pkg", "fun": "installed", "name": "vi"}'
'''
st_kwargs = __salt__.kwargs
__opts__['grains'] = __grains__
chunks = [data]
st_ = salt.client.ssh.state.SSHHighState(
__opts__,
__pillar__,
__salt__,
__context__['fileclient'])
for chunk in chunks:
chunk['__id__'] = chunk['name'] if not chunk.get('__id__') else chunk['__id__']
err = st_.state.verify_data(data)
if err:
return err
file_refs = salt.client.ssh.state.lowstate_file_refs(
chunks,
_merge_extra_filerefs(
kwargs.get('extra_filerefs', ''),
__opts__.get('extra_filerefs', '')
)
)
roster = salt.roster.Roster(__opts__, __opts__.get('roster', 'flat'))
roster_grains = roster.opts['grains']
# Create the tar containing the state pkg and relevant files.
trans_tar = salt.client.ssh.state.prep_trans_tar(
__context__['fileclient'],
chunks,
file_refs,
__pillar__,
st_kwargs['id_'],
roster_grains)
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, __opts__['hash_type'])
cmd = 'state.pkg {0}/salt_state.tgz pkg_sum={1} hash_type={2}'.format(
__opts__['thin_dir'],
trans_tar_sum,
__opts__['hash_type'])
single = salt.client.ssh.Single(
__opts__,
cmd,
fsclient=__context__['fileclient'],
minion_opts=__salt__.minion_opts,
**st_kwargs)
single.shell.send(
trans_tar,
'{0}/salt_state.tgz'.format(__opts__['thin_dir']))
stdout, stderr, _ = single.cmd_block()
# Clean up our tar
try:
os.remove(trans_tar)
except (OSError, IOError):
pass
# Read in the JSON data and return the data structure
try:
return salt.utils.json.loads(stdout)
except Exception as e:
log.error("JSON Render failed for: %s\n%s", stdout, stderr)
log.error(six.text_type(e))
# If for some reason the json load fails, return the stdout
return stdout | 0.001266 |
def add_schedule(self, name, activation_date, day_period='one_time',
final_action='ALERT_FAILURE', activated=True,
minute_period='one_time', day_mask=None,
repeat_until_date=None, comment=None):
"""
Add a schedule to an existing task.
:param str name: name for this schedule
:param int activation_date: when to start this task. Activation date
should be a UTC time represented in milliseconds.
:param str day_period: when this task should be run. Valid options:
'one_time', 'daily', 'weekly', 'monthly', 'yearly'. If 'daily' is
selected, you can also provide a value for 'minute_period'.
(default: 'one_time')
:param str minute_period: only required if day_period is set to 'daily'.
Valid options: 'each_quarter' (15 min), 'each_half' (30 minutes), or
'hourly', 'one_time' (default: 'one_time')
:param int day_mask: If the task day_period=weekly, then specify the day
or days for repeating. Day masks are: sun=1, mon=2, tue=4, wed=8,
thu=16, fri=32, sat=64. To repeat for instance every Monday, Wednesday
and Friday, the value must be 2 + 8 + 32 = 42
:param str final_action: what type of action to perform after the
scheduled task runs. Options are: 'ALERT_FAILURE', 'ALERT', or
'NO_ACTION' (default: ALERT_FAILURE)
:param bool activated: whether to activate the schedule (default: True)
:param str repeat_until_date: if this is anything but a one time task run,
you can specify the date when this task should end. The format is the
same as the `activation_date` param.
:param str comment: optional comment
:raises ActionCommandFailed: failed adding schedule
:return: None
"""
json = {
'name': name,
'activation_date': activation_date,
'day_period': day_period,
'day_mask': day_mask,
'activated': activated,
'final_action': final_action,
'minute_period': minute_period,
'repeat_until_date': repeat_until_date if repeat_until_date else None,
'comment': comment}
if 'daily' in day_period:
minute_period = minute_period if minute_period != 'one_time' else 'hourly'
json['minute_period'] = minute_period
return self.make_request(
ActionCommandFailed,
method='create',
resource='task_schedule',
json=json) | 0.006008 |
def add_volume_bricks(name, bricks):
'''
Add brick(s) to an existing volume
name
Volume name
bricks
List of bricks to add to the volume
.. code-block:: yaml
myvolume:
glusterfs.add_volume_bricks:
- bricks:
- host1:/srv/gluster/drive1
- host2:/srv/gluster/drive2
Replicated Volume:
glusterfs.add_volume_bricks:
- name: volume2
- bricks:
- host1:/srv/gluster/drive2
- host2:/srv/gluster/drive3
'''
ret = {'name': name,
'changes': {},
'comment': '',
'result': False}
volinfo = __salt__['glusterfs.info']()
if name not in volinfo:
ret['comment'] = 'Volume {0} does not exist'.format(name)
return ret
if int(volinfo[name]['status']) != 1:
ret['comment'] = 'Volume {0} is not started'.format(name)
return ret
current_bricks = [brick['path'] for brick in volinfo[name]['bricks'].values()]
if not set(bricks) - set(current_bricks):
ret['result'] = True
ret['comment'] = 'Bricks already added in volume {0}'.format(name)
return ret
bricks_added = __salt__['glusterfs.add_volume_bricks'](name, bricks)
if bricks_added:
ret['result'] = True
ret['comment'] = 'Bricks successfully added to volume {0}'.format(name)
new_bricks = [brick['path'] for brick in __salt__['glusterfs.info']()[name]['bricks'].values()]
ret['changes'] = {'new': new_bricks, 'old': current_bricks}
return ret
ret['comment'] = 'Adding bricks to volume {0} failed'.format(name)
return ret | 0.001768 |
def infer_literal(self, args, diagnostic=None):
"""
Infer type from an LITERAL!
Type of literal depend of language.
We adopt a basic convention
"""
literal, t = args
#self.type_node.add(EvalCtx.from_sig(Val(literal, t)))
self.infer_node.scope_node.add(EvalCtx.from_sig(Val(literal, t))) | 0.008571 |
def add_layers_to_canvas_with_custom_orders(
order, impact_function, iface=None):
"""Helper to add layers to the map canvas following a specific order.
From top to bottom in the legend:
[
('FromCanvas', layer name, full layer URI, QML),
('FromAnalysis', layer purpose, layer group, None),
...
]
The full layer URI is coming from our helper.
:param order: Special structure the list of layers to add.
:type order: list
:param impact_function: The multi exposure impact function used.
:type impact_function: MultiExposureImpactFunction
:param iface: QGIS QgisAppInterface instance.
:type iface: QgisAppInterface
"""
root = QgsProject.instance().layerTreeRoot()
# Make all layers hidden.
for child in root.children():
child.setItemVisibilityChecked(False)
group_analysis = root.insertGroup(0, impact_function.name)
group_analysis.setItemVisibilityChecked(True)
group_analysis.setCustomProperty(MULTI_EXPOSURE_ANALYSIS_FLAG, True)
# Insert layers in the good order in the group.
for layer_definition in order:
if layer_definition[0] == FROM_CANVAS['key']:
style = QDomDocument()
style.setContent(layer_definition[3])
layer = load_layer(layer_definition[2], layer_definition[1])[0]
layer.importNamedStyle(style)
QgsProject.instance().addMapLayer(layer, False)
layer_node = group_analysis.addLayer(layer)
layer_node.setItemVisibilityChecked(True)
else:
if layer_definition[2] == impact_function.name:
for layer in impact_function.outputs:
if layer.keywords['layer_purpose'] == layer_definition[1]:
QgsProject.instance().addMapLayer(
layer, False)
layer_node = group_analysis.addLayer(layer)
layer_node.setItemVisibilityChecked(True)
try:
title = layer.keywords['title']
if qgis_version() >= 21800:
layer.setName(title)
else:
layer.setLayerName(title)
except KeyError:
pass
break
else:
for sub_impact_function in impact_function.impact_functions:
# Iterate over each sub impact function used in the
# multi exposure analysis.
if sub_impact_function.name == layer_definition[2]:
for layer in sub_impact_function.outputs:
purpose = layer_definition[1]
if layer.keywords['layer_purpose'] == purpose:
QgsProject.instance().addMapLayer(
layer, False)
layer_node = group_analysis.addLayer(
layer)
layer_node.setItemVisibilityChecked(True)
try:
title = layer.keywords['title']
if qgis_version() >= 21800:
layer.setName(title)
else:
layer.setLayerName(title)
except KeyError:
pass
break
if iface:
iface.setActiveLayer(impact_function.analysis_impacted) | 0.000266 |
def count_cycle(iterable, n=None):
"""Cycle through the items from *iterable* up to *n* times, yielding
the number of completed cycles along with each item. If *n* is omitted the
process repeats indefinitely.
>>> list(count_cycle('AB', 3))
[(0, 'A'), (0, 'B'), (1, 'A'), (1, 'B'), (2, 'A'), (2, 'B')]
"""
iterable = tuple(iterable)
if not iterable:
return iter(())
counter = count() if n is None else range(n)
return ((i, item) for i in counter for item in iterable) | 0.001938 |
def get_read_strategy(cls, response):
'''Return the appropriate algorithm of reading response.
Returns:
str: ``chunked``, ``length``, ``close``.
'''
chunked_match = re.match(
r'chunked($|;)',
response.fields.get('Transfer-Encoding', '')
)
if chunked_match:
return 'chunked'
elif 'Content-Length' in response.fields:
return 'length'
else:
return 'close' | 0.004082 |
def get_percentage_relative_to(val, other):
"""Finds percentage between 2 numbers
:param val: number
:param other: number to compare to
:return: percentage of delta between first and second
"""
val = float(val)
other = float(other)
ratio = val / other - 1
return ratio * 100.0 | 0.003185 |
def memoize(max_cache_size=1000):
"""Python 2.4 compatible memoize decorator.
It creates a cache that has a maximum size. If the cache exceeds the max,
it is thrown out and a new one made. With such behavior, it is wise to set
the cache just a little larger that the maximum expected need.
Parameters:
max_cache_size - the size to which a cache can grow
"""
def wrapper(f):
@wraps(f)
def fn(*args, **kwargs):
if kwargs:
key = (args, tuple(kwargs.items()))
else:
key = args
try:
return fn.cache[key]
except KeyError:
if fn.count >= max_cache_size:
fn.cache = {}
fn.count = 0
result = f(*args, **kwargs)
fn.cache[key] = result
fn.count += 1
return result
except TypeError:
return f(*args, **kwargs)
fn.cache = {}
fn.count = 0
return fn
return wrapper | 0.000932 |
def dist_abs(self, src, tar):
"""Return the MRA comparison rating of two strings.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
int
MRA comparison rating
Examples
--------
>>> cmp = MRA()
>>> cmp.dist_abs('cat', 'hat')
5
>>> cmp.dist_abs('Niall', 'Neil')
6
>>> cmp.dist_abs('aluminum', 'Catalan')
0
>>> cmp.dist_abs('ATCG', 'TAGC')
5
"""
if src == tar:
return 6
if src == '' or tar == '':
return 0
src = list(mra(src))
tar = list(mra(tar))
if abs(len(src) - len(tar)) > 2:
return 0
length_sum = len(src) + len(tar)
if length_sum < 5:
min_rating = 5
elif length_sum < 8:
min_rating = 4
elif length_sum < 12:
min_rating = 3
else:
min_rating = 2
for _ in range(2):
new_src = []
new_tar = []
minlen = min(len(src), len(tar))
for i in range(minlen):
if src[i] != tar[i]:
new_src.append(src[i])
new_tar.append(tar[i])
src = new_src + src[minlen:]
tar = new_tar + tar[minlen:]
src.reverse()
tar.reverse()
similarity = 6 - max(len(src), len(tar))
if similarity >= min_rating:
return similarity
return 0 | 0.001228 |
def references_json(references):
''' Given a list of all models in a graph, return JSON representing
them and their properties.
Args:
references (seq[Model]) :
A list of models to convert to JSON
Returns:
list
'''
references_json = []
for r in references:
ref = r.ref
ref['attributes'] = r._to_json_like(include_defaults=False)
references_json.append(ref)
return references_json | 0.002141 |
def listen_init(self):
"""Setup the service to listen for clients."""
self.dispatcher = ObjectDispatch(self)
self.factory = MsgPackProtocolFactory(self.dispatcher)
self.server = UnixServer(self.loop, self.factory, self.path)
self.server.start() | 0.007042 |
def download_file(save_path, file_url):
""" Download file from http url link """
r = requests.get(file_url) # create HTTP response object
with open(save_path, 'wb') as f:
f.write(r.content)
return save_path | 0.004274 |
def status(self, jobId=None, jobType=None):
"""
Inquire about status when publishing an item, adding an item in
async mode, or adding with a multipart upload. "Partial" is
available for Add Item Multipart, when only a part is uploaded
and the item is not committed.
Input:
jobType The type of asynchronous job for which the status has
to be checked. Default is none, which check the
item's status. This parameter is optional unless
used with the operations listed below.
Values: publish, generateFeatures, export,
and createService
jobId - The job ID returned during publish, generateFeatures,
export, and createService calls.
"""
params = {
"f" : "json"
}
if jobType is not None:
params['jobType'] = jobType
if jobId is not None:
params["jobId"] = jobId
url = "%s/status" % self.root
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url) | 0.005143 |
def getOverlayTransformTrackedDeviceComponent(self, ulOverlayHandle, pchComponentName, unComponentNameSize):
"""Gets the transform information when the overlay is rendering on a component."""
fn = self.function_table.getOverlayTransformTrackedDeviceComponent
punDeviceIndex = TrackedDeviceIndex_t()
result = fn(ulOverlayHandle, byref(punDeviceIndex), pchComponentName, unComponentNameSize)
return result, punDeviceIndex | 0.01087 |
def remove_cmdline_arg(args, arg, n=1):
"""
Removes the command line argument *args* from a list of arguments *args*, e.g. as returned from
:py:func:`global_cmdline_args`. When *n* is 1 or less, only the argument is removed. Otherwise,
the following *n-1* values are removed. Example:
.. code-block:: python
args = global_cmdline_values()
# -> ["--local-scheduler", "--workers", "4"]
remove_cmdline_arg(args, "--local-scheduler")
# -> ["--workers", "4"]
remove_cmdline_arg(args, "--workers", 2)
# -> ["--local-scheduler"]
"""
if arg in args:
idx = args.index(arg)
args = list(args)
del args[idx:idx + max(n, 1)]
return args | 0.004104 |
def normal(target, seeds, scale, loc):
r"""
Produces values from a Weibull distribution given a set of random numbers.
Parameters
----------
target : OpenPNM Object
The object with which this function as associated. This argument
is required to (1) set number of values to generate (geom.Np or
geom.Nt) and (2) provide access to other necessary values
(i.e. geom['pore.seed']).
seeds : string, optional
The dictionary key on the Geometry object containing random seed values
(between 0 and 1) to use in the statistical distribution.
scale : float
The standard deviation of the Normal distribution
loc : float
The mean of the Normal distribution
Examples
--------
The following code illustrates the inner workings of this function,
which uses the 'norm' method of the scipy.stats module. This can
be used to find suitable values of 'scale' and 'loc'.
>>> import scipy
>>> func = scipy.stats.norm(scale=.0001, loc=0.001)
>>> import matplotlib.pyplot as plt
>>> fig = plt.hist(func.ppf(q=scipy.rand(10000)), bins=50)
"""
seeds = target[seeds]
value = spts.norm.ppf(q=seeds, scale=scale, loc=loc)
return value | 0.000792 |
def post_merge_request(profile, payload):
"""Do a POST request to Github's API to merge.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
payload
A dict of information to pass to Github's API as the payload for
a merge request, something like this::
{ "base": <base>, "head": <head>, "commit_message": <mesg>}
Returns:
The response returned by the ``requests`` library when it does the
POST request.
"""
repo = profile["repo"]
url = GITHUB_API_BASE_URL + "repos/" + repo + "/merges"
headers = get_headers(profile)
response = requests.post(url, json=payload, headers=headers)
return response | 0.001144 |
def start_plasma_store(self):
"""Start the plasma store."""
stdout_file, stderr_file = self.new_log_files("plasma_store")
process_info = ray.services.start_plasma_store(
stdout_file=stdout_file,
stderr_file=stderr_file,
object_store_memory=self._ray_params.object_store_memory,
plasma_directory=self._ray_params.plasma_directory,
huge_pages=self._ray_params.huge_pages,
plasma_store_socket_name=self._plasma_store_socket_name)
assert (
ray_constants.PROCESS_TYPE_PLASMA_STORE not in self.all_processes)
self.all_processes[ray_constants.PROCESS_TYPE_PLASMA_STORE] = [
process_info
] | 0.002759 |
async def monitor_mode(self, poll_devices=False, device=None,
workdir=None):
"""Place the IM in monitoring mode."""
print("Running monitor mode")
await self.connect(poll_devices, device, workdir)
self.plm.monitor_mode() | 0.010791 |
def UNIFAC_RQ(groups, subgroup_data=None):
r'''Calculates UNIFAC parameters R and Q for a chemical, given a dictionary
of its groups, as shown in [1]_. Most UNIFAC methods use the same subgroup
values; however, a dictionary of `UNIFAC_subgroup` instances may be
specified as an optional second parameter.
.. math::
r_i = \sum_{k=1}^{n} \nu_k R_k
q_i = \sum_{k=1}^{n}\nu_k Q_k
Parameters
----------
groups : dict[count]
Dictionary of numeric subgroup IDs : their counts
subgroup_data : None or dict[UNIFAC_subgroup]
Optional replacement for standard subgroups; leave as None to use the
original UNIFAC subgroup r and q values.
Returns
-------
R : float
R UNIFAC parameter (normalized Van der Waals Volume) [-]
Q : float
Q UNIFAC parameter (normalized Van der Waals Area) [-]
Notes
-----
These parameters have some predictive value for other chemical properties.
Examples
--------
Hexane
>>> UNIFAC_RQ({1:2, 2:4})
(4.4998000000000005, 3.856)
References
----------
.. [1] Gmehling, Jurgen. Chemical Thermodynamics: For Process Simulation.
Weinheim, Germany: Wiley-VCH, 2012.
'''
if subgroup_data is not None:
subgroups = subgroup_data
else:
subgroups = UFSG
ri = 0.
qi = 0.
for group, count in groups.items():
ri += subgroups[group].R*count
qi += subgroups[group].Q*count
return ri, qi | 0.003924 |
def _get_ca_bundle_path(self):
"""
Return a path to the CA bundle which is used for verifying the hosts
SSL certificate.
"""
if self.ca_certs_bundle_path:
# User provided a custom path
return self.ca_certs_bundle_path
# Return first bundle which is available
for file_path in COMMON_CA_LOCATIONS:
if self._is_valid_ca_bundle_file(file_path=file_path):
return file_path
return None | 0.004016 |
def _get_args(args):
"""Argparse logic lives here.
returns: parsed arguments.
"""
parser = argparse.ArgumentParser(
description='A tool to extract features into a simple format.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument('--no-cache', action='store_true')
parser.add_argument('--deploy', action='store_true')
parser.add_argument('--cache-path', type=str, default='fex-cache.pckl',
help='Path for cache file')
parser.add_argument('--path', type=str, default='features.csv',
help='Path to write the dataset to')
args = parser.parse_args(args)
if args.no_cache:
args.cache_path = None
return args | 0.001332 |
def getActiveJobCountForClientInfo(self, clientInfo):
""" Return the number of jobs for the given clientInfo and a status that is
not completed.
"""
with ConnectionFactory.get() as conn:
query = 'SELECT count(job_id) ' \
'FROM %s ' \
'WHERE client_info = %%s ' \
' AND status != %%s' % self.jobsTableName
conn.cursor.execute(query, [clientInfo, self.STATUS_COMPLETED])
activeJobCount = conn.cursor.fetchone()[0]
return activeJobCount | 0.009709 |
def remove_from_role(server_context, role, user_id=None, email=None, container_path=None):
"""
Remove user/group from security role
:param server_context: A LabKey server context. See utils.create_server_context.
:param role: (from get_roles) to remove user from
:param user_id: to remove permissions from (must supply this or email or both)
:param email: to remove permissions from (must supply this or user_id or both)
:param container_path: additional project path context
:return:
"""
return __make_security_role_api_request(server_context, 'removeAssignment.api', role, user_id=user_id, email=email,
container_path=container_path) | 0.008345 |
def collect_results(self, data_values):
"""Receive the data from the consumers polled and process it.
:param dict data_values: The poll data returned from the consumer
:type data_values: dict
"""
self.last_poll_results['timestamp'] = self.poll_data['timestamp']
# Get the name and consumer name and remove it from what is reported
consumer_name = data_values['consumer_name']
del data_values['consumer_name']
process_name = data_values['name']
del data_values['name']
# Add it to our last poll global data
if consumer_name not in self.last_poll_results:
self.last_poll_results[consumer_name] = dict()
self.last_poll_results[consumer_name][process_name] = data_values
# Calculate the stats
self.stats = self.calculate_stats(self.last_poll_results) | 0.002265 |
def scroll_to_beginning_vertically(self, steps=10, *args,**selectors):
"""
Scroll the object which has *selectors* attributes to *beginning* vertically.
See `Scroll Forward Vertically` for more details.
"""
return self.device(**selectors).scroll.vert.toBeginning(steps=steps) | 0.012658 |
def format_currency_field(__, prec, number, locale):
"""Formats a currency field."""
locale = Locale.parse(locale)
currency = get_territory_currencies(locale.territory)[0]
if prec is None:
pattern, currency_digits = None, True
else:
prec = int(prec)
pattern = locale.currency_formats['standard']
pattern = modify_number_pattern(pattern, frac_prec=(prec, prec))
currency_digits = False
return format_currency(number, currency, pattern, locale=locale,
currency_digits=currency_digits) | 0.001745 |
def get_path_info(self, path):
"""
Get information about ``path`` as a dict of properties.
The return value, based upon ``fs.FileStatus`` from the Java API,
has the following fields:
* ``block_size``: HDFS block size of ``path``
* ``group``: group associated with ``path``
* ``kind``: ``'file'`` or ``'directory'``
* ``last_access``: last access time of ``path``
* ``last_mod``: last modification time of ``path``
* ``name``: fully qualified path name
* ``owner``: owner of ``path``
* ``permissions``: file system permissions associated with ``path``
* ``replication``: replication factor of ``path``
* ``size``: size in bytes of ``path``
:type path: str
:param path: a path in the filesystem
:rtype: dict
:return: path information
:raises: :exc:`~exceptions.IOError`
"""
_complain_ifclosed(self.closed)
return self.fs.get_path_info(path) | 0.001967 |
def get_registered_courses (self):
""" 履修登録済み授業を取得 """
kdb = twins.kdb.Kdb()
_reged = []
for x in ((1, "A"), (2, "A"), (3, "A"), (4, "B"), (5, "B"), (6, "B")):
self.req("RSW0001000-flow")
self.get({
"_eventId": "search",
"moduleCode": x[0],
"gakkiKbnCode": x[1]
})
self.post({"_eventId": "output"}, True)
r = self.post({
"_eventId": "output",
"outputType": "csv",
"fileEncoding": "UTF8",
"logicalDeleteFlg": 0
}, True)
_reged += list(csv.reader(r.text.strip().split("\n")))
if _reged == []:
return []
already_appeared = []
reged = []
for c in [kdb.get_course_info(c[0]) for c in _reged]:
# 重複を除去
if c is None or c["id"] in already_appeared:
continue
reged.append(c)
already_appeared.append(c["id"])
return reged | 0.002558 |
def set_property_value(self, name, value, dry_run=False):
"""Set or remove property value.
See DAVResource.set_property_value()
"""
raise DAVError(
HTTP_FORBIDDEN, err_condition=PRECONDITION_CODE_ProtectedProperty
) | 0.007463 |
def download(client, target_dir):
"""Download images from play store into folder herachy."""
print('download image previews')
print(
"Warning! Downloaded images are only previews!"
"They may be to small for upload.")
tree = {}
listings = client.list('listings')
languages = map(lambda listing: listing['language'], listings)
parameters = [{'imageType': image_type, 'language': language}
for image_type in image_types for language in languages]
tree = {image_type: {language: list()
for language in languages}
for image_type in image_types}
for params in parameters:
result = client.list('images', **params)
image_type = params['imageType']
language = params['language']
tree[image_type][language] = map(
lambda r: r['url'], result)
for image_type, language_map in tree.items():
for language, files in language_map.items():
if len(files) > 0:
mkdir_p(
os.path.join(target_dir, 'images', image_type, language))
if image_type in single_image_types:
if len(files) > 0:
image_url = files[0]
path = os.path.join(
target_dir,
'images',
image_type,
language,
image_type)
load_and_save_image(image_url, path)
else:
for idx, image_url in enumerate(files):
path = os.path.join(
target_dir,
'images',
image_type,
language,
image_type + '_' + str(idx))
load_and_save_image(image_url, path) | 0.00053 |
async def playing(self):
"""Return what is currently playing."""
# TODO: This is hack-ish
if self._setstate is None:
await self.protocol.start()
# No SET_STATE_MESSAGE received yet, use default
if self._setstate is None:
return MrpPlaying(protobuf.SetStateMessage(), None)
return MrpPlaying(self._setstate, self._nowplaying) | 0.005025 |
def getSubjectSequence(self, title):
"""
Obtain information about a subject sequence given its title.
This information is cached in self._subjectTitleToSubject. It can
be obtained from either a) an sqlite database (given via the
sqliteDatabaseFilename argument to __init__), b) the FASTA that was
originally given to BLAST (via the databaseFilename argument), or
c) from the BLAST database using blastdbcmd (which can be
unreliable - occasionally failing to find subjects that are in its
database).
@param title: A C{str} sequence title from a BLAST hit. Of the form
'gi|63148399|gb|DQ011818.1| Description...'.
@return: An C{AARead} or C{DNARead} instance, depending on the type of
BLAST database in use.
"""
if self.params.application in {'blastp', 'blastx'}:
readClass = AARead
else:
readClass = DNARead
if self._subjectTitleToSubject is None:
if self._databaseFilename is None:
if self._sqliteDatabaseFilename is None:
# Fall back to blastdbcmd. ncbidb has to be imported
# as below so ncbidb.getSequence can be patched by our
# test suite.
from dark import ncbidb
seq = ncbidb.getSequence(
title, self.params.applicationParams['database'])
return readClass(seq.description, str(seq.seq))
else:
# An Sqlite3 database is used to look up subjects.
self._subjectTitleToSubject = SqliteIndex(
self._sqliteDatabaseFilename,
fastaDirectory=self._databaseDirectory,
readClass=readClass)
else:
# Build an in-memory dict to look up subjects. This only
# works for small databases, obviously.
titles = {}
for read in FastaReads(self._databaseFilename,
readClass=readClass):
titles[read.id] = read
self._subjectTitleToSubject = titles
return self._subjectTitleToSubject[title] | 0.000869 |
def get_object_cache_keys(instance):
"""
Return the cache keys associated with an object.
"""
if instance.pk is None or instance._state.adding:
return []
keys = []
tr_models = instance._parler_meta.get_all_models()
# TODO: performs a query to fetch the language codes. Store that in memcached too.
for language in instance.get_available_languages():
for tr_model in tr_models:
keys.append(get_translation_cache_key(tr_model, instance.pk, language))
return keys | 0.005693 |
def from_xdr_object(cls, op_xdr_object):
"""Creates a :class:`CreateAccount` object from an XDR Operation
object.
"""
if not op_xdr_object.sourceAccount:
source = None
else:
source = encode_check(
'account', op_xdr_object.sourceAccount[0].ed25519).decode()
destination = encode_check(
'account',
op_xdr_object.body.createAccountOp.destination.ed25519).decode()
starting_balance = Operation.from_xdr_amount(
op_xdr_object.body.createAccountOp.startingBalance)
return cls(
source=source,
destination=destination,
starting_balance=starting_balance,
) | 0.002725 |
def get_users(self, omit_empty_organisms=False):
"""
Get all users known to this Apollo instance
:type omit_empty_organisms: bool
:param omit_empty_organisms: Will omit users having no access to any organism
:rtype: list of dicts
:return: list of user info dictionaries
"""
payload = {}
if omit_empty_organisms:
payload['omitEmptyOrganisms'] = omit_empty_organisms
res = self.post('loadUsers', payload)
data = [_fix_user(user) for user in res]
return data | 0.00531 |
def set_assets(self, asset_ids=None):
"""Sets the assets.
arg: assetIds (osid.id.Id): the asset Ids
raise: INVALID_ARGUMENT - assetIds is invalid
raise: NullArgument - assetIds is null
raise: NoAccess - metadata.is_read_only() is true
compliance: mandatory - This method must be implemented.
"""
if asset_ids is None:
raise NullArgument()
metadata = Metadata(**settings.METADATA['asset_ids'])
if metadata.is_read_only():
raise NoAccess()
if self._is_valid_input(asset_ids, metadata, array=True):
for asset_id in asset_ids:
self._my_map['assetIds'].append(str(asset_id))
else:
raise InvalidArgument | 0.002614 |
def all_exist(filepaths):
"""Returns true if all files in the list exist."""
for fname in filepaths:
if not tf.gfile.Exists(fname):
return False
return True | 0.02907 |
def camel_to_snake_case(string):
"""Converts 'string' presented in camel case to snake case.
e.g.: CamelCase => snake_case
"""
s = _1.sub(r'\1_\2', string)
return _2.sub(r'\1_\2', s).lower() | 0.004739 |
def to_decimal(alpha_number, alphabet=ALPHABET, default=_marker):
"""Converts an alphanumeric code (e.g AB12) to an integer
:param alpha_number: representation of an alphanumeric code
:param alphabet: alphabet to use when alpha_number is a non-int string
:type number: int, string, Alphanumber, float
:type alphabet: string
"""
num = api.to_int(alpha_number, default=None)
if num is not None:
return num
alpha_number = str(alpha_number)
regex = re.compile(r"([A-Z]+)(\d+)", re.IGNORECASE)
matches = re.findall(regex, alpha_number)
if not matches:
if default is not _marker:
return default
raise ValueError("Not a valid alpha number: {}".format(alpha_number))
alpha = matches[0][0]
number = int(matches[0][1])
max_num = 10 ** len(matches[0][1]) - 1
len_alphabet = len(alphabet)
for pos_char, alpha_char in enumerate(reversed(alpha)):
index_char = alphabet.find(alpha_char)
number += (index_char * max_num * len_alphabet ** pos_char)
return number | 0.000933 |
def get_circulations(elements: T) -> Iterable[T]:
"""Iterate over all possible circulations of an ordered collection (tuple or list).
Example:
>>> list(get_circulations([1, 2, 3]))
[[1, 2, 3], [2, 3, 1], [3, 1, 2]]
"""
for i in range(len(elements)):
yield elements[i:] + elements[:i] | 0.006309 |
def register(self, collector):
"""Add a collector to the registry."""
with self._lock:
names = self._get_names(collector)
duplicates = set(self._names_to_collectors).intersection(names)
if duplicates:
raise ValueError(
'Duplicated timeseries in CollectorRegistry: {0}'.format(
duplicates))
for name in names:
self._names_to_collectors[name] = collector
self._collector_to_names[collector] = names | 0.00365 |
def _bdev(dev=None):
'''
Resolve a bcacheX or cache to a real dev
:return: basename of bcache dev
'''
if dev is None:
dev = _fssys('cache0')
else:
dev = _bcpath(dev)
if not dev:
return False
else:
return _devbase(os.path.dirname(dev)) | 0.003344 |
def openlines(image, linelength=10, dAngle=10, mask=None):
"""
Do a morphological opening along lines of different angles.
Return difference between max and min response to different angles for each pixel.
This effectively removes dots and only keeps lines.
image - pixel image to operate on
length - length of the structural element
angluar_resolution - angle step for the rotating lines
mask - if present, only use unmasked pixels for operations
"""
nAngles = 180//dAngle
openingstack = np.zeros((nAngles,image.shape[0],image.shape[1]),image.dtype)
for iAngle in range(nAngles):
angle = dAngle * iAngle
se = strel_line(linelength,angle)
openingstack[iAngle,:,:] = opening(image, mask=mask, footprint=se)
imLines = np.max(openingstack,axis=0) - np.min(openingstack,axis=0)
return imLines | 0.012615 |
async def async_open(self) -> None:
"""Opens connection to the LifeSOS ethernet interface."""
await self._loop.create_connection(
lambda: self,
self._host,
self._port) | 0.009091 |
def from_dict(cls, fields, mapping):
"""
Create a Record from a dictionary of field mappings.
The *fields* object is used to determine the column indices
of fields in the mapping.
Args:
fields: the Relation schema for the table of this record
mapping: a dictionary or other mapping from field names to
column values
Returns:
a :class:`Record` object
"""
iterable = [None] * len(fields)
for key, value in mapping.items():
try:
index = fields.index(key)
except KeyError:
raise ItsdbError('Invalid field name(s): ' + key)
iterable[index] = value
return cls(fields, iterable) | 0.002587 |
def check_flags(self, ds):
'''
Check the flag_values, flag_masks and flag_meanings attributes for
variables to ensure they are CF compliant.
CF §3.5 The attributes flag_values, flag_masks and flag_meanings are
intended to make variables that contain flag values self describing.
Status codes and Boolean (binary) condition flags may be expressed with
different combinations of flag_values and flag_masks attribute
definitions.
The flag_values and flag_meanings attributes describe a status flag
consisting of mutually exclusive coded values.
The flag_meanings attribute is a string whose value is a blank
separated list of descriptive words or phrases, one for each flag
value. Each word or phrase should consist of characters from the
alphanumeric set and the following five: '_', '-', '.', '+', '@'.
The flag_masks and flag_meanings attributes describe a number of
independent Boolean conditions using bit field notation by setting
unique bits in each flag_masks value.
The flag_masks, flag_values and flag_meanings attributes, used
together, describe a blend of independent Boolean conditions and
enumerated status codes. A flagged condition is identified by a bitwise
AND of the variable value and each flag_masks value; a result that
matches the flag_values value indicates a true condition.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
'''
ret_val = []
for name in cfutil.get_flag_variables(ds):
variable = ds.variables[name]
flag_values = getattr(variable, "flag_values", None)
flag_masks = getattr(variable, "flag_masks", None)
valid_flags_var = TestCtx(BaseCheck.HIGH, self.section_titles['3.5'])
# Check that the variable defines mask or values
valid_flags_var.assert_true(flag_values is not None or flag_masks is not None,
"{} does not define either flag_masks or flag_values".format(name))
ret_val.append(valid_flags_var.to_result())
valid_meanings = self._check_flag_meanings(ds, name)
ret_val.append(valid_meanings)
# check flag_values
if flag_values is not None:
valid_values = self._check_flag_values(ds, name)
ret_val.append(valid_values)
# check flag_masks
if flag_masks is not None:
valid_masks = self._check_flag_masks(ds, name)
ret_val.append(valid_masks)
if flag_values is not None and flag_masks is not None:
allv = list(map(lambda a, b: a & b == a, list(zip(flag_values, flag_masks))))
allvr = Result(BaseCheck.MEDIUM, all(allv), self.section_titles['3.5'])
if not allvr.value:
allvr.msgs = ["flag masks and flag values for '{}' combined don't equal flag value".format(name)]
ret_val.append(allvr)
return ret_val | 0.002514 |
def umount(self, source):
"""
Unmount partion
:param source: Full partition path like /dev/sda1
"""
args = {
'source': source,
}
self._umount_chk.check(args)
response = self._client.raw('disk.umount', args)
result = response.get()
if result.state != 'SUCCESS':
raise RuntimeError('failed to umount partition: %s' % result.stderr) | 0.006865 |
def vlcom3(a, v1, b, v2, c, v3):
"""
This subroutine computes the vector linear combination
a*v1 + b*v2 + c*v3 of double precision, 3-dimensional vectors.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vlcom3_c.html
:param a: Coefficient of v1
:type a: float
:param v1: Vector in 3-space
:type v1: 3-Element Array of floats
:param b: Coefficient of v2
:type b: float
:param v2: Vector in 3-space
:type v2: 3-Element Array of floats
:param c: Coefficient of v3
:type c: float
:param v3: Vector in 3-space
:type v3: 3-Element Array of floats
:return: Linear Vector Combination a*v1 + b*v2 + c*v3
:rtype: 3-Element Array of floats
"""
v1 = stypes.toDoubleVector(v1)
v2 = stypes.toDoubleVector(v2)
v3 = stypes.toDoubleVector(v3)
sumv = stypes.emptyDoubleVector(3)
a = ctypes.c_double(a)
b = ctypes.c_double(b)
c = ctypes.c_double(c)
libspice.vlcom3_c(a, v1, b, v2, c, v3, sumv)
return stypes.cVectorToPython(sumv) | 0.006744 |
def folderitem(self, obj, item, index):
"""Applies new properties to the item (Batch) that is currently being
rendered as a row in the list
:param obj: client to be rendered as a row in the list
:param item: dict representation of the batch, suitable for the list
:param index: current position of the item within the list
:type obj: ATContentType/DexterityContentType
:type item: dict
:type index: int
:return: the dict representation of the item
:rtype: dict
"""
obj = api.get_object(obj)
url = "{}/analysisrequests".format(api.get_url(obj))
bid = api.get_id(obj)
cbid = obj.getClientBatchID()
title = api.get_title(obj)
client = obj.getClient()
created = api.get_creation_date(obj)
date = obj.getBatchDate()
item["BatchID"] = bid
item["ClientBatchID"] = cbid
item["replace"]["BatchID"] = get_link(url, bid)
item["Title"] = title
item["replace"]["Title"] = get_link(url, title)
item["created"] = self.ulocalized_time(created, long_format=True)
item["BatchDate"] = self.ulocalized_time(date, long_format=True)
if client:
client_url = api.get_url(client)
client_name = client.getName()
client_id = client.getClientID()
item["Client"] = client_name
item["ClientID"] = client_id
item["replace"]["Client"] = get_link(client_url, client_name)
item["replace"]["ClientID"] = get_link(client_url, client_id)
return item | 0.001231 |
def duration(self):
"""
Returns the current value of the counter and then multiplies it by
:attr:`factor`
:rtype: float
"""
d = self.for_attempt(self.cur_attempt)
self.cur_attempt += 1
return d | 0.007752 |
def path(self, which=None):
"""Extend ``nailgun.entity_mixins.Entity.path``.
The format of the returned path depends on the value of ``which``:
download_html
/api/compliance/arf_reports/:id/download_html
Otherwise, call ``super``.
"""
if which in ('download_html',):
return '{0}/{1}'.format(
super(ArfReport, self).path(which='self'),
which
)
return super(ArfReport, self).path(which) | 0.003914 |
Subsets and Splits