code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def vms(nictag):
'''
List all vms connect to nictag
nictag : string
name of nictag
CLI Example:
.. code-block:: bash
salt '*' nictagadm.vms admin
'''
ret = {}
cmd = 'nictagadm vms {0}'.format(nictag)
res = __salt__['cmd.run_all'](cmd)
retcode = res['retcode']
if retcode != 0:
ret['Error'] = res['stderr'] if 'stderr' in res else 'Failed to get list of vms.'
else:
ret = res['stdout'].splitlines()
return ret | List all vms connect to nictag
nictag : string
name of nictag
CLI Example:
.. code-block:: bash
salt '*' nictagadm.vms admin |
def getShocks(self):
'''
Gets permanent and transitory shocks (combining idiosyncratic and aggregate shocks), but
only consumers who update their macroeconomic beliefs this period incorporate all pre-
viously unnoticed aggregate permanent shocks. Agents correctly observe the level of all
real variables (market resources, consumption, assets, etc), but misperceive the aggregate
productivity level.
Parameters
----------
None
Returns
-------
None
'''
# The strange syntax here is so that both StickyEconsumerType and StickyEmarkovConsumerType
# run the getShocks method of their first superclass: AggShockConsumerType and
# AggShockMarkovConsumerType respectively. This will be simplified in Python 3.
super(self.__class__,self).getShocks() # Get permanent and transitory combined shocks
newborns = self.t_age == 0
self.TranShkNow[newborns] = self.TranShkAggNow*self.wRteNow # Turn off idiosyncratic shocks for newborns
self.PermShkNow[newborns] = self.PermShkAggNow
self.getUpdaters() # Randomly draw which agents will update their beliefs
# Calculate innovation to the productivity level perception error
pLvlErrNew = self.getpLvlError()
self.pLvlErrNow *= pLvlErrNew # Perception error accumulation
# Calculate (mis)perceptions of the permanent shock
PermShkPcvd = self.PermShkNow/pLvlErrNew
PermShkPcvd[self.update] *= self.pLvlErrNow[self.update] # Updaters see the true permanent shock and all missed news
self.pLvlErrNow[self.update] = 1.0
self.PermShkNow = PermShkPcvd | Gets permanent and transitory shocks (combining idiosyncratic and aggregate shocks), but
only consumers who update their macroeconomic beliefs this period incorporate all pre-
viously unnoticed aggregate permanent shocks. Agents correctly observe the level of all
real variables (market resources, consumption, assets, etc), but misperceive the aggregate
productivity level.
Parameters
----------
None
Returns
-------
None |
def verify_checksum(file_id, pessimistic=False, chunk_size=None, throws=True,
checksum_kwargs=None):
"""Verify checksum of a file instance.
:param file_id: The file ID.
"""
f = FileInstance.query.get(uuid.UUID(file_id))
# Anything might happen during the task, so being pessimistic and marking
# the file as unchecked is a reasonable precaution
if pessimistic:
f.clear_last_check()
db.session.commit()
f.verify_checksum(
progress_callback=progress_updater, chunk_size=chunk_size,
throws=throws, checksum_kwargs=checksum_kwargs)
db.session.commit() | Verify checksum of a file instance.
:param file_id: The file ID. |
def save_file(self, filename = 'StockChart'):
""" save htmlcontent as .html file """
filename = filename + '.html'
with open(filename, 'w') as f:
#self.buildhtml()
f.write(self.htmlcontent)
f.closed | save htmlcontent as .html file |
def trades(self, cursor=None, order='asc', limit=10, sse=False):
"""Retrieve the trades JSON from this instance's Horizon server.
Retrieve the trades JSON response for the account associated with
this :class:`Address`.
:param cursor: A paging token, specifying where to start returning records from.
When streaming this can be set to "now" to stream object created since your request time.
:type cursor: int, str
:param str order: The order in which to return rows, "asc" or "desc".
:param int limit: Maximum number of records to return.
:param bool sse: Use the SSE client for connecting to Horizon.
"""
return self.horizon.account_trades(
self.address, cursor=cursor, order=order, limit=limit, sse=sse) | Retrieve the trades JSON from this instance's Horizon server.
Retrieve the trades JSON response for the account associated with
this :class:`Address`.
:param cursor: A paging token, specifying where to start returning records from.
When streaming this can be set to "now" to stream object created since your request time.
:type cursor: int, str
:param str order: The order in which to return rows, "asc" or "desc".
:param int limit: Maximum number of records to return.
:param bool sse: Use the SSE client for connecting to Horizon. |
def min_or(a, b, c, d, w):
"""
Lower bound of result of ORing 2-intervals.
:param a: Lower bound of first interval
:param b: Upper bound of first interval
:param c: Lower bound of second interval
:param d: Upper bound of second interval
:param w: bit width
:return: Lower bound of ORing 2-intervals
"""
m = (1 << (w - 1))
while m != 0:
if ((~a) & c & m) != 0:
temp = (a | m) & -m
if temp <= b:
a = temp
break
elif (a & (~c) & m) != 0:
temp = (c | m) & -m
if temp <= d:
c = temp
break
m >>= 1
return a | c | Lower bound of result of ORing 2-intervals.
:param a: Lower bound of first interval
:param b: Upper bound of first interval
:param c: Lower bound of second interval
:param d: Upper bound of second interval
:param w: bit width
:return: Lower bound of ORing 2-intervals |
def expand(self, basedir, config, sourcedir, targetdir, cwd):
"""
Validate that given paths are not the same.
Args:
basedir (string): Project base directory used to prepend relative
paths. If empty or equal to '.', it will be filled with current
directory path.
config (string): Settings file path.
sourcedir (string): Source directory path.
targetdir (string): Compiled files target directory path.
cwd (string): Current directory path to prepend base dir if empty.
Returns:
tuple: Expanded arguments in the same order
"""
# Expand home directory if any
expanded_basedir = os.path.expanduser(basedir)
expanded_config = os.path.expanduser(config)
expanded_sourcedir = os.path.expanduser(sourcedir)
expanded_targetdir = os.path.expanduser(targetdir)
# If not absolute, base dir is prepended with current directory
if not os.path.isabs(expanded_basedir):
expanded_basedir = os.path.join(cwd, expanded_basedir)
# Prepend paths with base dir if they are not allready absolute
if not os.path.isabs(expanded_config):
expanded_config = os.path.join(expanded_basedir,
expanded_config)
if not os.path.isabs(expanded_sourcedir):
expanded_sourcedir = os.path.join(expanded_basedir,
expanded_sourcedir)
if not os.path.isabs(expanded_targetdir):
expanded_targetdir = os.path.join(expanded_basedir,
expanded_targetdir)
# Normalize paths
expanded_basedir = os.path.normpath(expanded_basedir)
expanded_config = os.path.normpath(expanded_config)
expanded_sourcedir = os.path.normpath(expanded_sourcedir)
expanded_targetdir = os.path.normpath(expanded_targetdir)
return (expanded_basedir, expanded_config, expanded_sourcedir,
expanded_targetdir) | Validate that given paths are not the same.
Args:
basedir (string): Project base directory used to prepend relative
paths. If empty or equal to '.', it will be filled with current
directory path.
config (string): Settings file path.
sourcedir (string): Source directory path.
targetdir (string): Compiled files target directory path.
cwd (string): Current directory path to prepend base dir if empty.
Returns:
tuple: Expanded arguments in the same order |
def set_alias(self, alias_hosted_zone_id, alias_dns_name):
"""Make this an alias resource record set"""
self.alias_hosted_zone_id = alias_hosted_zone_id
self.alias_dns_name = alias_dns_name | Make this an alias resource record set |
def get_trackrs(self):
"""
Extract each Trackr device from the trackrApiInterface state.
return a list of all Trackr objects from account.
"""
trackrs = []
for trackr in self.state:
trackrs.append(trackrDevice(trackr, self))
return trackrs | Extract each Trackr device from the trackrApiInterface state.
return a list of all Trackr objects from account. |
def run(self, mod):
"""Find all assert statements in *mod* and rewrite them."""
if not mod.body:
# Nothing to do.
return
# Insert some special imports at the top of the module but after any
# docstrings and __future__ imports.
aliases = [ast.alias(py.builtin.builtins.__name__, "@py_builtins"),
ast.alias("dessert.rewrite", "@dessert_ar")]
expect_docstring = True
pos = 0
lineno = 0
for item in mod.body:
if (expect_docstring and isinstance(item, ast.Expr) and
isinstance(item.value, ast.Str)):
doc = item.value.s
if "PYTEST_DONT_REWRITE" in doc:
# The module has disabled assertion rewriting.
return
lineno += len(doc) - 1
expect_docstring = False
elif (not isinstance(item, ast.ImportFrom) or item.level > 0 or
item.module != "__future__"):
lineno = item.lineno
break
pos += 1
imports = [ast.Import([alias], lineno=lineno, col_offset=0)
for alias in aliases]
mod.body[pos:pos] = imports
# Collect asserts.
nodes = [mod]
while nodes:
node = nodes.pop()
for name, field in ast.iter_fields(node):
if isinstance(field, list):
new = []
for i, child in enumerate(field):
if isinstance(child, ast.Assert):
# Transform assert.
new.extend(self.visit(child))
else:
new.append(child)
if isinstance(child, ast.AST):
nodes.append(child)
setattr(node, name, new)
elif (isinstance(field, ast.AST) and
# Don't recurse into expressions as they can't contain
# asserts.
not isinstance(field, ast.expr)):
nodes.append(field) | Find all assert statements in *mod* and rewrite them. |
def translate_doc(self, d, field_mapping=None, map_identifiers=None, **kwargs):
"""
Translate a solr document (i.e. a single result row)
"""
if field_mapping is not None:
self.map_doc(d, field_mapping)
subject = self.translate_obj(d, M.SUBJECT)
obj = self.translate_obj(d, M.OBJECT)
# TODO: use a more robust method; we need equivalence as separate field in solr
if map_identifiers is not None:
if M.SUBJECT_CLOSURE in d:
subject['id'] = self.map_id(subject, map_identifiers, d[M.SUBJECT_CLOSURE])
else:
logging.info("NO SUBJECT CLOSURE IN: "+str(d))
if M.SUBJECT_TAXON in d:
subject['taxon'] = self.translate_obj(d,M.SUBJECT_TAXON)
if M.OBJECT_TAXON in d:
obj['taxon'] = self.translate_obj(d, M.OBJECT_TAXON)
qualifiers = []
if M.RELATION in d and isinstance(d[M.RELATION],list):
# GO overloads qualifiers and relation
relation = None
for rel in d[M.RELATION]:
if rel.lower() == 'not':
qualifiers.append(rel)
else:
relation = rel
if relation is not None:
d[M.RELATION] = relation
else:
d[M.RELATION] = None
negated = 'not' in qualifiers
assoc = {'id':d.get(M.ID),
'subject': subject,
'object': obj,
'negated': negated,
'relation': self.translate_obj(d,M.RELATION),
'publications': self.translate_objs(d,M.SOURCE), # note 'source' is used in the golr schema
}
if self.invert_subject_object and assoc['relation'] is not None:
assoc['relation']['inverse'] = True
if len(qualifiers) > 0:
assoc['qualifiers'] = qualifiers
if M.OBJECT_CLOSURE in d:
assoc['object_closure'] = d.get(M.OBJECT_CLOSURE)
if M.IS_DEFINED_BY in d:
if isinstance(d[M.IS_DEFINED_BY],list):
assoc['provided_by'] = d[M.IS_DEFINED_BY]
else:
# hack for GO Golr instance
assoc['provided_by'] = [d[M.IS_DEFINED_BY]]
if M.EVIDENCE_OBJECT in d:
assoc['evidence'] = d[M.EVIDENCE_OBJECT]
assoc['types'] = [t for t in d[M.EVIDENCE_OBJECT] if t.startswith('ECO:')]
if self._use_amigo_schema(self.object_category):
for f in M.AMIGO_SPECIFIC_FIELDS:
if f in d:
assoc[f] = d[f]
# solr does not allow nested objects, so evidence graph is json-encoded
if M.EVIDENCE_GRAPH in d:
assoc[M.EVIDENCE_GRAPH] = json.loads(d[M.EVIDENCE_GRAPH])
return assoc | Translate a solr document (i.e. a single result row) |
def mod_watch(name, url='http://localhost:8080/manager', timeout=180):
'''
The tomcat watcher, called to invoke the watch command.
When called, it will reload the webapp in question
.. note::
This state exists to support special handling of the ``watch``
:ref:`requisite <requisites>`. It should not be called directly.
Parameters for this function should be set by the state being triggered.
'''
msg = __salt__['tomcat.reload'](name, url, timeout)
result = msg.startswith('OK')
ret = {'name': name,
'result': result,
'changes': {name: result},
'comment': msg
}
return ret | The tomcat watcher, called to invoke the watch command.
When called, it will reload the webapp in question
.. note::
This state exists to support special handling of the ``watch``
:ref:`requisite <requisites>`. It should not be called directly.
Parameters for this function should be set by the state being triggered. |
def pathIndex(self, path):
'''Return index of item with *path*.'''
if path == self.root.path:
return QModelIndex()
if not path.startswith(self.root.path):
return QModelIndex()
parts = []
while True:
if path == self.root.path:
break
head, tail = os.path.split(path)
if head == path:
if path:
parts.append(path)
break
parts.append(tail)
path = head
parts.reverse()
if parts:
item = self.root
count = 0
for count, part in enumerate(parts):
matched = False
for child in item.children:
if child.name == part:
item = child
matched = True
break
if not matched:
break
if count + 1 == len(parts):
return self.createIndex(item.row, 0, item)
return QModelIndex() | Return index of item with *path*. |
def list_asgs(access_token, subscription_id, resource_group):
'''Get details about the application security groups for a resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
Returns:
HTTP response. ASG JSON body.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Network/virtualNetworks/',
'?api-version=', NETWORK_API])
return do_get(endpoint, access_token) | Get details about the application security groups for a resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
Returns:
HTTP response. ASG JSON body. |
def _bind_parameters(operation, parameters):
""" Helper method that binds parameters to a SQL query. """
# inspired by MySQL Python Connector (conversion.py)
string_parameters = {}
for (name, value) in iteritems(parameters):
if value is None:
string_parameters[name] = 'NULL'
elif isinstance(value, basestring):
string_parameters[name] = "'" + _escape(value) + "'"
else:
string_parameters[name] = str(value)
return operation % string_parameters | Helper method that binds parameters to a SQL query. |
def ets(self):
"""Equitable Threat Score, Gilbert Skill Score, v, (a - R)/(a + b + c - R), R=(a+b)(a+c)/N"""
r = (self.table[0, 0] + self.table[0, 1]) * (self.table[0, 0] + self.table[1, 0]) / self.N
return (self.table[0, 0] - r) / (self.table[0, 0] + self.table[0, 1] + self.table[1, 0] - r) | Equitable Threat Score, Gilbert Skill Score, v, (a - R)/(a + b + c - R), R=(a+b)(a+c)/N |
def json_2_text(inp, out, verbose = False):
"""Convert a Wikipedia article to Text object.
Concatenates the sections in wikipedia file and rearranges other information so it
can be interpreted as a Text object.
Links and other elements with start and end positions are annotated
as layers.
Parameters
----------
inp: directory of parsed et.wikipedia articles in json format
out: output directory of .txt files
verbose: if True, prints every article title and total count of converted files
if False prints every 50th count
Returns
-------
estnltk.text.Text
The Text object.
"""
for root, dirs, filenames in os.walk(inp):
for f in filenames:
log = codecs.open(os.path.join(root, f), 'r')
j_obj = json.load(log)
j_obj = json_format(j_obj)
#not needed, cause the json_format takes care of the right structuring
#text = Text(j_obj)
textWriter(j_obj, out, verbose) | Convert a Wikipedia article to Text object.
Concatenates the sections in wikipedia file and rearranges other information so it
can be interpreted as a Text object.
Links and other elements with start and end positions are annotated
as layers.
Parameters
----------
inp: directory of parsed et.wikipedia articles in json format
out: output directory of .txt files
verbose: if True, prints every article title and total count of converted files
if False prints every 50th count
Returns
-------
estnltk.text.Text
The Text object. |
def raise_error(error_type: str) -> None:
"""Raise the appropriate error based on error message."""
try:
error = next((v for k, v in ERROR_CODES.items() if k in error_type))
except StopIteration:
error = AirVisualError
raise error(error_type) | Raise the appropriate error based on error message. |
def remove_scene(self, scene_id):
"""remove a scene by Scene ID"""
if self.state.activeSceneId == scene_id:
err_msg = "Requested to delete scene {sceneNum}, which is currently active. Cannot delete active scene.".format(sceneNum=scene_id)
logging.info(err_msg)
return(False, 0, err_msg)
try:
del self.state.scenes[scene_id]
logging.debug("Deleted scene {sceneNum}".format(sceneNum=scene_id))
except KeyError:
err_msg = "Requested to delete scene {sceneNum}, which does not exist".format(sceneNum=scene_id)
logging.info(err_msg)
return(False, 0, err_msg)
# if we are here, we deleted a scene, so publish it
sequence_number = self.zmq_publisher.publish_scene_remove(scene_id)
logging.debug("Removed scene {sceneNum}".format(sceneNum=scene_id))
return (True, sequence_number, "OK") | remove a scene by Scene ID |
def __set_title(self, value):
"""
Sets title of this axis.
"""
# OpenOffice on Debian "squeeze" ignore value of target.XAxis.String
# unless target.HasXAxisTitle is set to True first. (Despite the
# fact that target.HasXAxisTitle is reported to be False until
# target.XAxis.String is set to non empty value.)
self._target.setPropertyValue(self._has_axis_title_property, True)
target = self._get_title_target()
target.setPropertyValue('String', text_type(value)) | Sets title of this axis. |
def check_process_counts(self):
"""Check for the minimum consumer process levels and start up new
processes needed.
"""
LOGGER.debug('Checking minimum consumer process levels')
for name in self.consumers:
processes_needed = self.process_spawn_qty(name)
if processes_needed:
LOGGER.info('Need to spawn %i processes for %s',
processes_needed, name)
self.start_processes(name, processes_needed) | Check for the minimum consumer process levels and start up new
processes needed. |
def check_for_errors(self):
"""Check Connection for errors.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:return:
"""
if not self.exceptions:
if not self.is_closed:
return
why = AMQPConnectionError('connection was closed')
self.exceptions.append(why)
self.set_state(self.CLOSED)
self.close()
raise self.exceptions[0] | Check Connection for errors.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:return: |
def key_exists(hive, key, use_32bit_registry=False):
'''
Check that the key is found in the registry. This refers to keys and not
value/data pairs. To check value/data pairs, use ``value_exists``
Args:
hive (str): The hive to connect to
key (str): The key to check
use_32bit_registry (bool): Look in the 32bit portion of the registry
Returns:
bool: True if exists, otherwise False
Usage:
.. code-block:: python
import salt.utils.win_reg as reg
reg.key_exists(hive='HKLM', key='SOFTWARE\\Microsoft')
'''
local_hive = _to_unicode(hive)
local_key = _to_unicode(key)
registry = Registry()
try:
hkey = registry.hkeys[local_hive]
except KeyError:
raise CommandExecutionError('Invalid Hive: {0}'.format(local_hive))
access_mask = registry.registry_32[use_32bit_registry]
handle = None
try:
handle = win32api.RegOpenKeyEx(hkey, local_key, 0, access_mask)
return True
except pywintypes.error as exc:
if exc.winerror == 2:
return False
raise
finally:
if handle:
win32api.RegCloseKey(handle) | Check that the key is found in the registry. This refers to keys and not
value/data pairs. To check value/data pairs, use ``value_exists``
Args:
hive (str): The hive to connect to
key (str): The key to check
use_32bit_registry (bool): Look in the 32bit portion of the registry
Returns:
bool: True if exists, otherwise False
Usage:
.. code-block:: python
import salt.utils.win_reg as reg
reg.key_exists(hive='HKLM', key='SOFTWARE\\Microsoft') |
def reset(self, indices=None):
"""Reset the environment and convert the resulting observation.
Args:
indices: The batch indices of environments to reset; defaults to all.
Returns:
Batch of observations.
"""
if indices is None:
indices = np.arange(len(self._envs))
if self._blocking:
observs = [self._envs[index].reset() for index in indices]
else:
observs = [self._envs[index].reset(blocking=False) for index in indices]
observs = [observ() for observ in observs]
observ = np.stack(observs)
return observ | Reset the environment and convert the resulting observation.
Args:
indices: The batch indices of environments to reset; defaults to all.
Returns:
Batch of observations. |
def args(parsed_args, name=None):
"""Interpret parsed args to streams"""
strings = parsed_args.arg_strings(name)
files = [s for s in strings if os.path.isfile(s)]
if files:
streams = [open(f) for f in files]
else:
streams = []
if getattr(parsed_args, 'paste', not files):
streams.append(clipboard_stream())
if getattr(parsed_args, 'stdin', False):
streams.append(sys.stdin)
elif not streams:
streams = [sys.stdin]
return streams | Interpret parsed args to streams |
def molmz(df, noise=10000):
"""
The mz of the molecular ion.
"""
d = ((df.values > noise) * df.columns).max(axis=1)
return Trace(d, df.index, name='molmz') | The mz of the molecular ion. |
def get_current_user(self):
"""Get data from the current user endpoint"""
url = self.current_user_url
result = self.get(url)
return result | Get data from the current user endpoint |
def write(gctoo, out_fname, data_null="NaN", metadata_null="-666", filler_null="-666", data_float_format="%.4f"):
"""Write a gctoo object to a gct file.
Args:
gctoo (gctoo object)
out_fname (string): filename for output gct file
data_null (string): how to represent missing values in the data (default = "NaN")
metadata_null (string): how to represent missing values in the metadata (default = "-666")
filler_null (string): what value to fill the top-left filler block with (default = "-666")
data_float_format (string): how many decimal points to keep in representing data
(default = 4 digits; None will keep all digits)
Returns:
None
"""
# Create handle for output file
if not out_fname.endswith(".gct"):
out_fname += ".gct"
f = open(out_fname, "w")
# Write first two lines
dims = [str(gctoo.data_df.shape[0]), str(gctoo.data_df.shape[1]),
str(gctoo.row_metadata_df.shape[1]), str(gctoo.col_metadata_df.shape[1])]
write_version_and_dims(VERSION, dims, f)
# Write top half of the gct
write_top_half(f, gctoo.row_metadata_df, gctoo.col_metadata_df,
metadata_null, filler_null)
# Write bottom half of the gct
write_bottom_half(f, gctoo.row_metadata_df, gctoo.data_df,
data_null, data_float_format, metadata_null)
f.close()
logger.info("GCT has been written to {}".format(out_fname)) | Write a gctoo object to a gct file.
Args:
gctoo (gctoo object)
out_fname (string): filename for output gct file
data_null (string): how to represent missing values in the data (default = "NaN")
metadata_null (string): how to represent missing values in the metadata (default = "-666")
filler_null (string): what value to fill the top-left filler block with (default = "-666")
data_float_format (string): how many decimal points to keep in representing data
(default = 4 digits; None will keep all digits)
Returns:
None |
def compile_insert(self, query, values):
"""
Compile insert statement into SQL
:param query: A QueryBuilder instance
:type query: QueryBuilder
:param values: The insert values
:type values: dict or list
:return: The compiled insert
:rtype: str
"""
table = self.wrap_table(query.from__)
if not isinstance(values, list):
values = [values]
# If there is only one row to insert, we just use the normal grammar
if len(values) == 1:
return super(SQLiteQueryGrammar, self).compile_insert(query, values)
names = self.columnize(values[0].keys())
columns = []
# SQLite requires us to build the multi-row insert as a listing of select with
# unions joining them together. So we'll build out this list of columns and
# then join them all together with select unions to complete the queries.
for column in values[0].keys():
columns.append("%s AS %s" % (self.get_marker(), self.wrap(column)))
columns = [", ".join(columns)] * len(values)
return "INSERT INTO %s (%s) SELECT %s" % (
table,
names,
" UNION ALL SELECT ".join(columns),
) | Compile insert statement into SQL
:param query: A QueryBuilder instance
:type query: QueryBuilder
:param values: The insert values
:type values: dict or list
:return: The compiled insert
:rtype: str |
def to_fmt(self):
"""
Return an Fmt representation for pretty-printing
"""
params = ""
txt = fmt.sep(" ", ['fun'])
name = self.show_name()
if name != "":
txt.lsdata.append(name)
tparams = []
if self.tparams is not None:
tparams = list(self.tparams)
if self.variadic:
tparams.append('...')
params = '(' + ", ".join(tparams) + ')'
txt.lsdata.append(': ' + params)
txt.lsdata.append('-> ' + self.tret)
return txt | Return an Fmt representation for pretty-printing |
def get_phenotype(self, individual_id):
"""
Return the phenotype of an individual
If individual does not exist return 0
Arguments:
individual_id (str): Represents the individual id
Returns:
int : Integer that represents the phenotype
"""
phenotype = 0 # This is if unknown phenotype
if individual_id in self.individuals:
phenotype = self.individuals[individual_id].phenotype
return phenotype | Return the phenotype of an individual
If individual does not exist return 0
Arguments:
individual_id (str): Represents the individual id
Returns:
int : Integer that represents the phenotype |
def read(database, table, key):
"""Does a single read operation."""
with database.snapshot() as snapshot:
result = snapshot.execute_sql('SELECT u.* FROM %s u WHERE u.id="%s"' %
(table, key))
for row in result:
key = row[0]
for i in range(NUM_FIELD):
field = row[i + 1] | Does a single read operation. |
def parse_yaml(self, y):
'''Parse a YAML specification of a service port connector into this
object.
'''
self.connector_id = y['connectorId']
self.name = y['name']
if 'transMethod' in y:
self.trans_method = y['transMethod']
else:
self.trans_method = ''
if RTS_EXT_NS_YAML + 'comment' in y:
self.comment = y[RTS_EXT_NS_YAML + 'comment']
else:
self.comment = ''
if RTS_EXT_NS_YAML + 'visible' in y:
visible = y[RTS_EXT_NS_YAML + 'visible']
if visible == True or visible == 'true' or visible == '1':
self.visible = True
else:
self.visible = False
if 'sourceServicePort' not in y:
raise InvalidServicePortConnectorNodeError
self.source_service_port = \
TargetPort().parse_yaml(y['sourceServicePort'])
if 'targetServicePort' not in y:
raise InvalidServicePortConnectorNodeError
self.target_service_port = \
TargetPort().parse_yaml(y['targetServicePort'])
if RTS_EXT_NS_YAML + 'properties' in y:
for p in y[RTS_EXT_NS_YAML + 'properties']:
if 'value' in p:
value = p['value']
else:
value = None
self._properties[p['name']] = value
return self | Parse a YAML specification of a service port connector into this
object. |
def trigger(self, id, **kwargs):
"""
Triggers a build of a specific Build Configuration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.trigger(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build Configuration id (required)
:param str callback_url: Optional Callback URL
:param bool temporary_build: Is it a temporary build or a standard build?
:param bool force_rebuild: DEPRECATED: Use RebuildMode.
:param bool build_dependencies: Should we build also dependencies of this BuildConfiguration?
:param bool keep_pod_on_failure: Should we keep the build container running, if the build fails?
:param bool timestamp_alignment: Should we add a timestamp during the alignment? Valid only for temporary builds.
:param str rebuild_mode: Rebuild Modes: FORCE: always rebuild the configuration; EXPLICIT_DEPENDENCY_CHECK: check if any of user defined dependencies has been update; IMPLICIT_DEPENDENCY_CHECK: check if any captured dependency has been updated;
:return: BuildRecordSingleton
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.trigger_with_http_info(id, **kwargs)
else:
(data) = self.trigger_with_http_info(id, **kwargs)
return data | Triggers a build of a specific Build Configuration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.trigger(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build Configuration id (required)
:param str callback_url: Optional Callback URL
:param bool temporary_build: Is it a temporary build or a standard build?
:param bool force_rebuild: DEPRECATED: Use RebuildMode.
:param bool build_dependencies: Should we build also dependencies of this BuildConfiguration?
:param bool keep_pod_on_failure: Should we keep the build container running, if the build fails?
:param bool timestamp_alignment: Should we add a timestamp during the alignment? Valid only for temporary builds.
:param str rebuild_mode: Rebuild Modes: FORCE: always rebuild the configuration; EXPLICIT_DEPENDENCY_CHECK: check if any of user defined dependencies has been update; IMPLICIT_DEPENDENCY_CHECK: check if any captured dependency has been updated;
:return: BuildRecordSingleton
If the method is called asynchronously,
returns the request thread. |
def lock_file(path, maxdelay=.1, lock_cls=LockFile, timeout=10.0):
"""Cooperative file lock. Uses `lockfile.LockFile` polling under the hood.
`maxdelay` defines the interval between individual polls.
"""
lock = lock_cls(path)
max_t = time.time() + timeout
while True:
if time.time() >= max_t:
raise LockTimeout("Timeout waiting to acquire lock for %s" % (path,)) # same exception messages as in lockfile
try:
lock.acquire(timeout=0)
except AlreadyLocked:
sleep(maxdelay)
else:
try:
yield lock
break
finally:
lock.release() | Cooperative file lock. Uses `lockfile.LockFile` polling under the hood.
`maxdelay` defines the interval between individual polls. |
def write_markdown_to_file(self, f):
"""Prints this library to file `f`.
Args:
f: File to write to.
Returns:
Dictionary of documented members.
"""
print("---", file=f)
print("---", file=f)
print("<!-- This file is machine generated: DO NOT EDIT! -->", file=f)
print("", file=f)
# TODO(touts): Do not insert these. Let the doc writer put them in
# the module docstring explicitly.
print("#", self._title, file=f)
if self._prefix:
print(self._prefix, file=f)
print("[TOC]", file=f)
print("", file=f)
if self._module is not None:
self._write_module_markdown_to_file(f, self._module) | Prints this library to file `f`.
Args:
f: File to write to.
Returns:
Dictionary of documented members. |
def show_replace(self):
"""Show replace widgets"""
self.show(hide_replace=False)
for widget in self.replace_widgets:
widget.show() | Show replace widgets |
def extension (network, session, version, scn_extension, start_snapshot,
end_snapshot, **kwargs):
"""
Function that adds an additional network to the existing network container.
The new network can include every PyPSA-component (e.g. buses, lines, links).
To connect it to the existing network, transformers are needed.
All components and its timeseries of the additional scenario need to be inserted in the fitting 'model_draft.ego_grid_pf_hv_extension_' table.
The scn_name in the tables have to be labled with 'extension_' + scn_name (e.g. 'extension_nep2035').
Until now, the tables include three additional scenarios:
'nep2035_confirmed': all new lines and needed transformers planed in the 'Netzentwicklungsplan 2035' (NEP2035) that have been confirmed by the Bundesnetzagentur (BNetzA)
'nep2035_b2': all new lines and needed transformers planned in the NEP 2035 in the scenario 2035 B2
'BE_NO_NEP 2035': DC-lines and transformers to connect the upcomming electrical-neighbours Belgium and Norway
Generation, loads and its timeseries in Belgium and Norway for scenario 'NEP 2035'
Parameters
-----
network : The existing network container (e.g. scenario 'NEP 2035')
session : session-data
overlay_scn_name : Name of the additional scenario (WITHOUT 'extension_')
start_snapshot, end_snapshot: Simulation time
Returns
------
network : Network container including existing and additional network
"""
if version is None:
ormcls_prefix = 'EgoGridPfHvExtension'
else:
ormcls_prefix = 'EgoPfHvExtension'
# Adding overlay-network to existing network
scenario = NetworkScenario(session,
version = version,
prefix=ormcls_prefix,
method=kwargs.get('method', 'lopf'),
start_snapshot=start_snapshot,
end_snapshot=end_snapshot,
scn_name='extension_' + scn_extension)
network = scenario.build_network(network)
# Allow lossless links to conduct bidirectional
network.links.loc[network.links.efficiency == 1.0, 'p_min_pu'] = -1
# Set coordinates for new buses
extension_buses = network.buses[network.buses.scn_name ==
'extension_' + scn_extension]
for idx, row in extension_buses.iterrows():
wkt_geom = to_shape(row['geom'])
network.buses.loc[idx, 'x'] = wkt_geom.x
network.buses.loc[idx, 'y'] = wkt_geom.y
return network | Function that adds an additional network to the existing network container.
The new network can include every PyPSA-component (e.g. buses, lines, links).
To connect it to the existing network, transformers are needed.
All components and its timeseries of the additional scenario need to be inserted in the fitting 'model_draft.ego_grid_pf_hv_extension_' table.
The scn_name in the tables have to be labled with 'extension_' + scn_name (e.g. 'extension_nep2035').
Until now, the tables include three additional scenarios:
'nep2035_confirmed': all new lines and needed transformers planed in the 'Netzentwicklungsplan 2035' (NEP2035) that have been confirmed by the Bundesnetzagentur (BNetzA)
'nep2035_b2': all new lines and needed transformers planned in the NEP 2035 in the scenario 2035 B2
'BE_NO_NEP 2035': DC-lines and transformers to connect the upcomming electrical-neighbours Belgium and Norway
Generation, loads and its timeseries in Belgium and Norway for scenario 'NEP 2035'
Parameters
-----
network : The existing network container (e.g. scenario 'NEP 2035')
session : session-data
overlay_scn_name : Name of the additional scenario (WITHOUT 'extension_')
start_snapshot, end_snapshot: Simulation time
Returns
------
network : Network container including existing and additional network |
def get_document_models():
"""Return dict of index.doc_type: model."""
mappings = {}
for i in get_index_names():
for m in get_index_models(i):
key = "%s.%s" % (i, m._meta.model_name)
mappings[key] = m
return mappings | Return dict of index.doc_type: model. |
def parse_seconds(value):
'''
Parse string into Seconds instances.
Handled formats:
HH:MM:SS
HH:MM
SS
'''
svalue = str(value)
colons = svalue.count(':')
if colons == 2:
hours, minutes, seconds = [int(v) for v in svalue.split(':')]
elif colons == 1:
hours, minutes = [int(v) for v in svalue.split(':')]
seconds = 0
elif colons == 0:
hours = 0
minutes = 0
seconds = int(svalue)
else:
raise ValueError('Must be in seconds or HH:MM:SS format')
return Seconds.from_hms(hours, minutes, seconds) | Parse string into Seconds instances.
Handled formats:
HH:MM:SS
HH:MM
SS |
def get_resource_uri(self, obj):
"""
Return the uri of the given object.
"""
url = 'api:%s:%s-detail' % (
self.api_version,
getattr(
self, 'resource_view_name',
self.Meta.model._meta.model_name
)
)
return reverse(url, request=self.context.get('request', None), kwargs={
self.lookup_field: getattr(obj, self.lookup_field)
}) | Return the uri of the given object. |
def get_orderbook(self):
"""Get orderbook for the instrument
:Retruns:
orderbook : dict
orderbook dict for the instrument
"""
if self in self.parent.books.keys():
return self.parent.books[self]
return {
"bid": [0], "bidsize": [0],
"ask": [0], "asksize": [0]
} | Get orderbook for the instrument
:Retruns:
orderbook : dict
orderbook dict for the instrument |
def _match_data_to_parameter(cls, data):
""" find the appropriate parameter for a parameter field """
in_value = data["in"]
for cls in [QueryParameter, HeaderParameter, FormDataParameter,
PathParameter, BodyParameter]:
if in_value == cls.IN:
return cls
return None | find the appropriate parameter for a parameter field |
def absent(email, profile="splunk", **kwargs):
'''
Ensure a splunk user is absent
.. code-block:: yaml
ensure example test user 1:
splunk.absent:
- email: '[email protected]'
- name: 'exampleuser'
The following parameters are required:
email
This is the email of the user in splunk
name
This is the splunk username used to identify the user.
'''
user_identity = kwargs.get('name')
ret = {
'name': user_identity,
'changes': {},
'result': None,
'comment': 'User {0} is absent.'.format(user_identity)
}
target = __salt__['splunk.get_user'](email, profile=profile)
if not target:
ret['comment'] = 'User {0} does not exist'.format(user_identity)
ret['result'] = True
return ret
if __opts__['test']:
ret['comment'] = "User {0} is all set to be deleted".format(user_identity)
ret['result'] = None
return ret
result = __salt__['splunk.delete_user'](email, profile=profile)
if result:
ret['comment'] = 'Deleted user {0}'.format(user_identity)
ret['changes'].setdefault('old', 'User {0} exists'.format(user_identity))
ret['changes'].setdefault('new', 'User {0} deleted'.format(user_identity))
ret['result'] = True
else:
ret['comment'] = 'Failed to delete {0}'.format(user_identity)
ret['result'] = False
return ret | Ensure a splunk user is absent
.. code-block:: yaml
ensure example test user 1:
splunk.absent:
- email: '[email protected]'
- name: 'exampleuser'
The following parameters are required:
email
This is the email of the user in splunk
name
This is the splunk username used to identify the user. |
def save_new_environment(name, datadir, srcdir, ckan_version,
deploy_target=None, always_prod=False):
"""
Save an environment's configuration to the source dir and data dir
"""
with open(datadir + '/.version', 'w') as f:
f.write('2')
cp = ConfigParser.SafeConfigParser()
cp.read(srcdir + '/.datacats-environment')
if not cp.has_section('datacats'):
cp.add_section('datacats')
cp.set('datacats', 'name', name)
cp.set('datacats', 'ckan_version', ckan_version)
if deploy_target:
if not cp.has_section('deploy'):
cp.add_section('deploy')
cp.set('deploy', 'target', deploy_target)
if always_prod:
cp.set('datacats', 'always_prod', 'true')
with open(srcdir + '/.datacats-environment', 'w') as config:
cp.write(config)
save_srcdir_location(datadir, srcdir) | Save an environment's configuration to the source dir and data dir |
def count_curves(self, keys=None, alias=None):
"""
Counts the number of curves in the well that will be selected with the
given key list and the given alias dict. Used by Project's curve table.
"""
if keys is None:
keys = [k for k, v in self.data.items() if isinstance(v, Curve)]
else:
keys = utils.flatten_list(keys)
return len(list(filter(None, [self.get_mnemonic(k, alias=alias) for k in keys]))) | Counts the number of curves in the well that will be selected with the
given key list and the given alias dict. Used by Project's curve table. |
def save_as(self, new_filename):
"""
Save our file with the name provided.
Args:
new_filename: New name for the workbook file. String.
Returns:
Nothing.
"""
xfile._save_file(self._filename, self._datasourceTree, new_filename) | Save our file with the name provided.
Args:
new_filename: New name for the workbook file. String.
Returns:
Nothing. |
def wipe_cfg_vals_from_git_cfg(*cfg_opts):
"""Remove a set of options from Git config."""
for cfg_key_suffix in cfg_opts:
cfg_key = f'cherry-picker.{cfg_key_suffix.replace("_", "-")}'
cmd = "git", "config", "--local", "--unset-all", cfg_key
subprocess.check_call(cmd, stderr=subprocess.STDOUT) | Remove a set of options from Git config. |
def hkeys(self, name, key_start, key_end, limit=10):
"""
Return a list of the top ``limit`` keys between ``key_start`` and
``key_end`` in hash ``name``
Similiar with **Redis.HKEYS**
.. note:: The range is (``key_start``, ``key_end``]. The ``key_start``
isn't in the range, but ``key_end`` is.
:param string name: the hash name
:param string key_start: The lower bound(not included) of keys to be
returned, empty string ``''`` means -inf
:param string key_end: The upper bound(included) of keys to be
returned, empty string ``''`` means +inf
:param int limit: number of elements will be returned.
:return: a list of keys
:rtype: list
>>> ssdb.hkeys('hash_1', 'a', 'g', 10)
['b', 'c', 'd', 'e', 'f', 'g']
>>> ssdb.hkeys('hash_2', 'key ', 'key4', 3)
['key1', 'key2', 'key3']
>>> ssdb.hkeys('hash_1', 'f', '', 10)
['g']
>>> ssdb.hkeys('hash_2', 'keys', '', 10)
[]
"""
limit = get_positive_integer('limit', limit)
return self.execute_command('hkeys', name, key_start, key_end, limit) | Return a list of the top ``limit`` keys between ``key_start`` and
``key_end`` in hash ``name``
Similiar with **Redis.HKEYS**
.. note:: The range is (``key_start``, ``key_end``]. The ``key_start``
isn't in the range, but ``key_end`` is.
:param string name: the hash name
:param string key_start: The lower bound(not included) of keys to be
returned, empty string ``''`` means -inf
:param string key_end: The upper bound(included) of keys to be
returned, empty string ``''`` means +inf
:param int limit: number of elements will be returned.
:return: a list of keys
:rtype: list
>>> ssdb.hkeys('hash_1', 'a', 'g', 10)
['b', 'c', 'd', 'e', 'f', 'g']
>>> ssdb.hkeys('hash_2', 'key ', 'key4', 3)
['key1', 'key2', 'key3']
>>> ssdb.hkeys('hash_1', 'f', '', 10)
['g']
>>> ssdb.hkeys('hash_2', 'keys', '', 10)
[] |
def get_string_from_data(self, offset, data):
"""Get an ASCII string from data."""
s = self.get_bytes_from_data(offset, data)
end = s.find(b'\0')
if end >= 0:
s = s[:end]
return s | Get an ASCII string from data. |
def _add_encoded(self, encoded):
"""Returns E(a + b), given self=E(a) and b.
Args:
encoded (EncodedNumber): an :class:`EncodedNumber` to be added
to `self`.
Returns:
EncryptedNumber: E(a + b), calculated by encrypting b and
taking the product of E(a) and E(b) modulo
:attr:`~PaillierPublicKey.n` ** 2.
Raises:
ValueError: if scalar is out of range or precision.
"""
if self.public_key != encoded.public_key:
raise ValueError("Attempted to add numbers encoded against "
"different public keys!")
# In order to add two numbers, their exponents must match.
a, b = self, encoded
if a.exponent > b.exponent:
a = self.decrease_exponent_to(b.exponent)
elif a.exponent < b.exponent:
b = b.decrease_exponent_to(a.exponent)
# Don't bother to salt/obfuscate in a basic operation, do it
# just before leaving the computer.
encrypted_scalar = a.public_key.raw_encrypt(b.encoding, 1)
sum_ciphertext = a._raw_add(a.ciphertext(False), encrypted_scalar)
return EncryptedNumber(a.public_key, sum_ciphertext, a.exponent) | Returns E(a + b), given self=E(a) and b.
Args:
encoded (EncodedNumber): an :class:`EncodedNumber` to be added
to `self`.
Returns:
EncryptedNumber: E(a + b), calculated by encrypting b and
taking the product of E(a) and E(b) modulo
:attr:`~PaillierPublicKey.n` ** 2.
Raises:
ValueError: if scalar is out of range or precision. |
def msg_curse(self, args=None, max_width=None):
"""Return the dict to display in the curse interface."""
# Init the return message
ret = []
# Only process if stats exist and display plugin enable...
if args.disable_process:
msg = "PROCESSES DISABLED (press 'z' to display)"
ret.append(self.curse_add_line(msg))
return ret
if not self.stats:
return ret
# Display the filter (if it exists)
if glances_processes.process_filter is not None:
msg = 'Processes filter:'
ret.append(self.curse_add_line(msg, "TITLE"))
msg = ' {} '.format(glances_processes.process_filter)
if glances_processes.process_filter_key is not None:
msg += 'on column {} '.format(glances_processes.process_filter_key)
ret.append(self.curse_add_line(msg, "FILTER"))
msg = '(\'ENTER\' to edit, \'E\' to reset)'
ret.append(self.curse_add_line(msg))
ret.append(self.curse_new_line())
# Build the string message
# Header
msg = 'TASKS'
ret.append(self.curse_add_line(msg, "TITLE"))
# Compute processes
other = self.stats['total']
msg = '{:>4}'.format(self.stats['total'])
ret.append(self.curse_add_line(msg))
if 'thread' in self.stats:
msg = ' ({} thr),'.format(self.stats['thread'])
ret.append(self.curse_add_line(msg))
if 'running' in self.stats:
other -= self.stats['running']
msg = ' {} run,'.format(self.stats['running'])
ret.append(self.curse_add_line(msg))
if 'sleeping' in self.stats:
other -= self.stats['sleeping']
msg = ' {} slp,'.format(self.stats['sleeping'])
ret.append(self.curse_add_line(msg))
msg = ' {} oth '.format(other)
ret.append(self.curse_add_line(msg))
# Display sort information
try:
sort_human = self.sort_for_human[glances_processes.sort_key]
except KeyError:
sort_human = '?'
if glances_processes.auto_sort:
msg = 'sorted automatically'
ret.append(self.curse_add_line(msg))
msg = ' by {}'.format(sort_human)
else:
msg = 'sorted by {}'.format(sort_human)
ret.append(self.curse_add_line(msg))
# Return the message with decoration
return ret | Return the dict to display in the curse interface. |
def _release_info():
"""Check latest fastfood release info from PyPI."""
pypi_url = 'http://pypi.python.org/pypi/fastfood/json'
headers = {
'Accept': 'application/json',
}
request = urllib.Request(pypi_url, headers=headers)
response = urllib.urlopen(request).read().decode('utf_8')
data = json.loads(response)
return data | Check latest fastfood release info from PyPI. |
def sync_matchers(saltenv=None, refresh=False, extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: 2019.2.0
Sync engine modules from ``salt://_matchers`` to the minion
saltenv
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
If not passed, then all environments configured in the :ref:`top files
<states-top>` will be checked for engines to sync. If no top files are
found, then the ``base`` environment will be synced.
refresh : True
If ``True``, refresh the available execution modules on the minion.
This refresh will be performed even if no new matcher modules are synced.
Set to ``False`` to prevent this refresh.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Examples:
.. code-block:: bash
salt '*' saltutil.sync_matchers
salt '*' saltutil.sync_matchers saltenv=base,dev
'''
ret = _sync('matchers', saltenv, extmod_whitelist, extmod_blacklist)
if refresh:
refresh_modules()
return ret | .. versionadded:: 2019.2.0
Sync engine modules from ``salt://_matchers`` to the minion
saltenv
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
If not passed, then all environments configured in the :ref:`top files
<states-top>` will be checked for engines to sync. If no top files are
found, then the ``base`` environment will be synced.
refresh : True
If ``True``, refresh the available execution modules on the minion.
This refresh will be performed even if no new matcher modules are synced.
Set to ``False`` to prevent this refresh.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Examples:
.. code-block:: bash
salt '*' saltutil.sync_matchers
salt '*' saltutil.sync_matchers saltenv=base,dev |
def pvariance(data, mu=None):
"""Return the population variance of ``data``.
data should be an iterable of Real-valued numbers, with at least one
value. The optional argument mu, if given, should be the mean of
the data. If it is missing or None, the mean is automatically calculated.
Use this function to calculate the variance from the entire population.
To estimate the variance from a sample, the ``variance`` function is
usually a better choice.
If you have already calculated the mean of the data, you can pass it as
the optional second argument to avoid recalculating it:
This function does not check that ``mu`` is actually the mean of ``data``.
Giving arbitrary values for ``mu`` may lead to invalid or impossible
results.
Decimals and Fractions are supported:
"""
if iter(data) is data:
data = list(data)
n = len(data)
if n < 1:
raise StatisticsError('pvariance requires at least one data point')
ss = _ss(data, mu)
return ss / n | Return the population variance of ``data``.
data should be an iterable of Real-valued numbers, with at least one
value. The optional argument mu, if given, should be the mean of
the data. If it is missing or None, the mean is automatically calculated.
Use this function to calculate the variance from the entire population.
To estimate the variance from a sample, the ``variance`` function is
usually a better choice.
If you have already calculated the mean of the data, you can pass it as
the optional second argument to avoid recalculating it:
This function does not check that ``mu`` is actually the mean of ``data``.
Giving arbitrary values for ``mu`` may lead to invalid or impossible
results.
Decimals and Fractions are supported: |
def _create_update_tracking_related_event(instance):
"""
Create a TrackingEvent and TrackedFieldModification for an UPDATE event
for each related model.
"""
events = {}
# Create a dict mapping related model field to modified fields
for field, related_fields in instance._tracked_related_fields.items():
if not isinstance(instance._meta.get_field(field), ManyToManyField):
if isinstance(instance._meta.get_field(field), ForeignKey):
# Compare pk
value = getattr(instance, '{0}_id'.format(field))
else:
value = getattr(instance, field)
if instance._original_fields[field] != value:
for related_field in related_fields:
events.setdefault(related_field, []).append(field)
# Create the events from the events dict
for related_field, fields in events.items():
try:
related_instances = getattr(instance, related_field[1])
except ObjectDoesNotExist:
continue
# FIXME: isinstance(related_instances, RelatedManager ?)
if hasattr(related_instances, 'all'):
related_instances = related_instances.all()
else:
related_instances = [related_instances]
for related_instance in related_instances:
event = _create_event(related_instance, UPDATE)
for field in fields:
fieldname = '{0}__{1}'.format(related_field[0], field)
_create_tracked_field(
event, instance, field, fieldname=fieldname
) | Create a TrackingEvent and TrackedFieldModification for an UPDATE event
for each related model. |
def transformer_base_v1():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.norm_type = "layer"
hparams.hidden_size = 512
hparams.batch_size = 4096
hparams.max_length = 256
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.optimizer_adam_epsilon = 1e-9
hparams.learning_rate_schedule = "legacy"
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 0.1
hparams.learning_rate_warmup_steps = 4000
hparams.initializer_gain = 1.0
hparams.num_hidden_layers = 6
hparams.initializer = "uniform_unit_scaling"
hparams.weight_decay = 0.0
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.98
hparams.num_sampled_classes = 0
hparams.label_smoothing = 0.1
hparams.shared_embedding_and_softmax_weights = True
hparams.symbol_modality_num_shards = 16
# Add new ones like this.
hparams.add_hparam("filter_size", 2048)
# Layer-related flags. If zero, these fall back on hparams.num_hidden_layers.
hparams.add_hparam("num_encoder_layers", 0)
hparams.add_hparam("num_decoder_layers", 0)
# Attention-related flags.
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("attention_key_channels", 0)
hparams.add_hparam("attention_value_channels", 0)
hparams.add_hparam("ffn_layer", "dense_relu_dense")
hparams.add_hparam("parameter_attention_key_channels", 0)
hparams.add_hparam("parameter_attention_value_channels", 0)
# All hyperparameters ending in "dropout" are automatically set to 0.0
# when not in training mode.
hparams.add_hparam("attention_dropout", 0.0)
hparams.add_hparam("attention_dropout_broadcast_dims", "")
hparams.add_hparam("relu_dropout", 0.0)
hparams.add_hparam("relu_dropout_broadcast_dims", "")
hparams.add_hparam("pos", "timing") # timing, none
hparams.add_hparam("nbr_decoder_problems", 1)
hparams.add_hparam("proximity_bias", False)
hparams.add_hparam("causal_decoder_self_attention", True)
hparams.add_hparam("use_pad_remover", True)
hparams.add_hparam("self_attention_type", "dot_product")
hparams.add_hparam("conv_first_kernel", 3)
hparams.add_hparam("attention_variables_3d", False)
hparams.add_hparam("use_target_space_embedding", True)
# These parameters are only used when ffn_layer=="local_moe_tpu"
hparams.add_hparam("moe_overhead_train", 1.0)
hparams.add_hparam("moe_overhead_eval", 2.0)
hparams.moe_num_experts = 16
hparams.moe_loss_coef = 1e-3
# If specified, use this value instead of problem name in metrics.py.
# This is useful for programs that can automatically compare experiments side
# by side based on the same metric names.
hparams.add_hparam("overload_eval_metric_name", "")
# For making a transformer encoder unidirectional by using masked
# attention.
hparams.add_hparam("unidirectional_encoder", False)
# For hard attention.
hparams.add_hparam("hard_attention_k", 0)
return hparams | Set of hyperparameters. |
def str_is_well_formed(xml_str):
"""
Args:
xml_str : str
DataONE API XML doc.
Returns:
bool: **True** if XML doc is well formed.
"""
try:
str_to_etree(xml_str)
except xml.etree.ElementTree.ParseError:
return False
else:
return True | Args:
xml_str : str
DataONE API XML doc.
Returns:
bool: **True** if XML doc is well formed. |
def resolve_addresses(self, node):
"""
Resolve addresses of children of Addrmap and Regfile components
"""
# Get alignment based on 'alignment' property
# This remains constant for all children
prop_alignment = self.alignment_stack[-1]
if prop_alignment is None:
# was not specified. Does not contribute to alignment
prop_alignment = 1
prev_node = None
for child_node in node.children(skip_not_present=False):
if not isinstance(child_node, AddressableNode):
continue
if child_node.inst.addr_offset is not None:
# Address is already known. Do not need to infer
prev_node = child_node
continue
if node.env.chk_implicit_addr:
node.env.msg.message(
node.env.chk_implicit_addr,
"Address offset of component '%s' is not explicitly set" % child_node.inst.inst_name,
child_node.inst.inst_src_ref
)
# Get alignment specified by '%=' allocator, if any
alloc_alignment = child_node.inst.addr_align
if alloc_alignment is None:
# was not specified. Does not contribute to alignment
alloc_alignment = 1
# Calculate alignment based on current addressing mode
if self.addressing_mode_stack[-1] == rdltypes.AddressingType.compact:
if isinstance(child_node, RegNode):
# Regs are aligned based on their accesswidth
mode_alignment = child_node.get_property('accesswidth') // 8
else:
# Spec does not specify for other components
# Assuming absolutely compact packing
mode_alignment = 1
elif self.addressing_mode_stack[-1] == rdltypes.AddressingType.regalign:
# Components are aligned to a multiple of their size
# Spec vaguely suggests that alignment is also a power of 2
mode_alignment = child_node.size
mode_alignment = roundup_pow2(mode_alignment)
elif self.addressing_mode_stack[-1] == rdltypes.AddressingType.fullalign:
# Same as regalign except for arrays
# Arrays are aligned to their total size
# Both are rounded to power of 2
mode_alignment = child_node.total_size
mode_alignment = roundup_pow2(mode_alignment)
else:
raise RuntimeError
# Calculate resulting address offset
alignment = max(prop_alignment, alloc_alignment, mode_alignment)
if prev_node is None:
next_offset = 0
else:
next_offset = prev_node.inst.addr_offset + prev_node.total_size
# round next_offset up to alignment
child_node.inst.addr_offset = roundup_to(next_offset, alignment)
prev_node = child_node
# Sort children by address offset
# Non-addressable child components are sorted to be first (signals)
def get_child_sort_key(inst):
if not isinstance(inst, comp.AddressableComponent):
return -1
else:
return inst.addr_offset
node.inst.children.sort(key=get_child_sort_key) | Resolve addresses of children of Addrmap and Regfile components |
def GetTSKFileByPathSpec(self, path_spec):
"""Retrieves the SleuthKit file object for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
pytsk3.File: TSK file.
Raises:
PathSpecError: if the path specification is missing inode and location.
"""
# Opening a file by inode number is faster than opening a file
# by location.
inode = getattr(path_spec, 'inode', None)
location = getattr(path_spec, 'location', None)
if inode is not None:
tsk_file = self._tsk_file_system.open_meta(inode=inode)
elif location is not None:
tsk_file = self._tsk_file_system.open(location)
else:
raise errors.PathSpecError(
'Path specification missing inode and location.')
return tsk_file | Retrieves the SleuthKit file object for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
pytsk3.File: TSK file.
Raises:
PathSpecError: if the path specification is missing inode and location. |
def get_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE,
create=False):
"""Get an rdataset matching the specified properties in the
current node.
None is returned if an rdataset of the specified type and
class does not exist and I{create} is not True.
@param rdclass: The class of the rdataset
@type rdclass: int
@param rdtype: The type of the rdataset
@type rdtype: int
@param covers: The covered type.
@type covers: int
@param create: If True, create the rdataset if it is not found.
@type create: bool
@rtype: dns.rdataset.Rdataset object or None
"""
try:
rds = self.find_rdataset(rdclass, rdtype, covers, create)
except KeyError:
rds = None
return rds | Get an rdataset matching the specified properties in the
current node.
None is returned if an rdataset of the specified type and
class does not exist and I{create} is not True.
@param rdclass: The class of the rdataset
@type rdclass: int
@param rdtype: The type of the rdataset
@type rdtype: int
@param covers: The covered type.
@type covers: int
@param create: If True, create the rdataset if it is not found.
@type create: bool
@rtype: dns.rdataset.Rdataset object or None |
def info(torrent_path):
"""Print out information from .torrent file."""
my_torrent = Torrent.from_file(torrent_path)
size = my_torrent.total_size
click.secho('Name: %s' % my_torrent.name, fg='blue')
click.secho('Files:')
for file_tuple in my_torrent.files:
click.secho(file_tuple.name)
click.secho('Hash: %s' % my_torrent.info_hash, fg='blue')
click.secho('Size: %s (%s)' % (humanize_filesize(size), size), fg='blue')
click.secho('Magnet: %s' % my_torrent.get_magnet(), fg='yellow') | Print out information from .torrent file. |
def fromutc(self, dt):
'''See datetime.tzinfo.fromutc'''
if dt.tzinfo is not None and dt.tzinfo is not self:
raise ValueError('fromutc: dt.tzinfo is not self')
return (dt + self._utcoffset).replace(tzinfo=self) | See datetime.tzinfo.fromutc |
def get_rate_limits():
"""Retrieve status (and optionally) version from the API."""
client = get_rates_api()
with catch_raise_api_exception():
data, _, headers = client.rates_limits_list_with_http_info()
ratelimits.maybe_rate_limit(client, headers)
return {
k: RateLimitsInfo.from_dict(v)
for k, v in six.iteritems(data.to_dict().get("resources", {}))
} | Retrieve status (and optionally) version from the API. |
def monitoring_problems(self):
"""Get Alignak scheduler monitoring status
Returns an object with the scheduler livesynthesis
and the known problems
:return: scheduler live synthesis
:rtype: dict
"""
if self.app.type != 'scheduler':
return {'_status': u'ERR',
'_message': u"This service is only available for a scheduler daemon"}
res = self.identity()
res.update(self.app.get_monitoring_problems())
return res | Get Alignak scheduler monitoring status
Returns an object with the scheduler livesynthesis
and the known problems
:return: scheduler live synthesis
:rtype: dict |
def cublasZhpmv(handle, uplo, n, alpha, AP, x, incx, beta, y, incy):
"""
Matrix-vector product for Hermitian-packed matrix.
"""
status = _libcublas.cublasZhpmv_v2(handle,
_CUBLAS_FILL_MODE[uplo],
n, ctypes.byref(cuda.cuDoubleComplex(alpha.real,
alpha.imag)),
int(AP), int(x), incx,
ctypes.byref(cuda.cuDoubleComplex(beta.real,
beta.imag)),
int(y), incy)
cublasCheckStatus(status) | Matrix-vector product for Hermitian-packed matrix. |
def run_compute(self, compute=None, model=None, detach=False,
times=None, **kwargs):
"""
Run a forward model of the system on the enabled dataset using
a specified set of compute options.
To attach and set custom values for compute options, including choosing
which backend to use, see:
* :meth:`add_compute`
To define the dataset types and times at which the model should be
computed see:
* :meth:`add_dataset`
To disable or enable existing datasets see:
* :meth:`enable_dataset`
* :meth:`disable_dataset`
:parameter str compute: (optional) name of the compute options to use.
If not provided or None, run_compute will use an existing set of
attached compute options if only 1 exists. If more than 1 exist,
then compute becomes a required argument. If no compute options
exist, then this will use default options and create and attach
a new set of compute options with a default label.
:parameter str model: (optional) name of the resulting model. If not
provided this will default to 'latest'. NOTE: existing models
with the same name will be overwritten - including 'latest'
:parameter bool datach: [EXPERIMENTAL] whether to detach from the computation run,
or wait for computations to complete. If detach is True, see
:meth:`get_model` and :meth:`phoebe.parameters.parameters.JobParameter`
for details on how to check the job status and retrieve the results.
Alternatively, you can provide the server location (host and port) as
a string to detach and the bundle will temporarily enter client mode,
submit the job to the server, and leave client mode. The resulting
:meth:`phoebe.parameters.parameters.JobParameter` will then contain
the necessary information to pull the results from the server at anytime
in the future.
:parameter list times: [EXPERIMENTAL] override the times at which to compute the model.
NOTE: this only (temporarily) replaces the time array for datasets
with times provided (ie empty time arrays are still ignored). So if
you attach a rv to a single component, the model will still only
compute for that single component. ALSO NOTE: this option is ignored
if detach=True (at least for now).
:parameter **kwargs: any values in the compute options to temporarily
override for this single compute run (parameter values will revert
after run_compute is finished)
:return: :class:`phoebe.parameters.parameters.ParameterSet` of the
newly-created model containing the synthetic data.
"""
if isinstance(detach, str):
# then we want to temporarily go in to client mode
self.as_client(server=detach)
self.run_compute(compute=compute, model=model, time=time, **kwargs)
self.as_client(False)
return self.get_model(model)
# protomesh and pbmesh were supported kwargs in 2.0.x but are no longer
# so let's raise an error if they're passed here
if 'protomesh' in kwargs.keys():
raise ValueError("protomesh is no longer a valid option")
if 'pbmesh' in kwargs.keys():
raise ValueError("pbmesh is no longer a valid option")
if model is None:
model = 'latest'
if model in self.models:
logger.warning("overwriting model: {}".format(model))
self.remove_model(model)
self._check_label(model)
if isinstance(times, float) or isinstance(times, int):
times = [times]
# handle case where compute is not provided
if compute is None:
computes = self.get_compute(**kwargs).computes
if len(computes)==0:
# NOTE: this doesn't take **kwargs since we want those to be
# temporarily overriden as is the case when the compute options
# are already attached
self.add_compute()
computes = self.get_compute().computes
# now len(computes) should be 1 and will trigger the next
# if statement
if len(computes)==1:
compute = computes[0]
elif len(computes)>1:
raise ValueError("must provide label of compute options since more than one are attached")
# handle the ability to send multiple compute options/backends - here
# we'll just always send a list of compute options
if isinstance(compute, str):
computes = [compute]
else:
computes = compute
# if interactive mode was ever off, let's make sure all constraints
# have been run before running system checks or computing the model
changed_params = self.run_delayed_constraints()
# any kwargs that were used just to filter for get_compute should be
# removed so that they aren't passed on to all future get_value(...
# **kwargs) calls
for k in parameters._meta_fields_filter:
if k in kwargs.keys():
dump = kwargs.pop(k)
# we'll wait to here to run kwargs and system checks so that
# add_compute is already called if necessary
self._kwargs_checks(kwargs, ['skip_checks', 'jobid'])
if not kwargs.get('skip_checks', False):
passed, msg = self.run_checks(computes=computes, **kwargs)
if passed is None:
# then just raise a warning
logger.warning(msg)
if passed is False:
# then raise an error
raise ValueError("system failed to pass checks: {}".format(msg))
# let's first make sure that there is no duplication of enabled datasets
datasets = []
# compute_ so we don't write over compute which we need if detach=True
for compute_ in computes:
# TODO: filter by value instead of if statement once implemented
for enabled_param in self.filter(qualifier='enabled',
compute=compute_,
context='compute').to_list():
if enabled_param.get_value():
item = (enabled_param.dataset, enabled_param.component)
if item in datasets:
raise ValueError("dataset {}@{} is enabled in multiple compute options".format(item[0], item[1]))
datasets.append(item)
# now if we're supposed to detach we'll just prepare the job for submission
# either in another subprocess or through some queuing system
if detach and mpi.within_mpirun:
logger.warning("cannot detach when within mpirun, ignoring")
detach = False
if (detach or mpi.enabled) and not mpi.within_mpirun:
if detach:
logger.warning("detach support is EXPERIMENTAL")
if times is not None:
# TODO: support overriding times with detached - issue here is
# that it isn't necessarilly trivially to send this array
# through the script. May need to convert to list first to
# avoid needing to import numpy?
logger.warning("overriding time is not supported within detach - ignoring")
# we'll track everything through the model name as well as
# a random string, to avoid any conflicts
jobid = kwargs.get('jobid', parameters._uniqueid())
# we'll build a python script that can replicate this bundle as it
# is now, run compute, and then save the resulting model
script_fname = "_{}.py".format(jobid)
f = open(script_fname, 'w')
f.write("import os; os.environ['PHOEBE_ENABLE_PLOTTING'] = 'FALSE'; os.environ['PHOEBE_ENABLE_SYMPY'] = 'FALSE'; os.environ['PHOEBE_ENABLE_ONLINE_PASSBANDS'] = 'FALSE';\n")
f.write("import phoebe; import json\n")
# TODO: can we skip the history context? And maybe even other models
# or datasets (except times and only for run_compute but not run_fitting)
f.write("bdict = json.loads(\"\"\"{}\"\"\")\n".format(json.dumps(self.to_json())))
f.write("b = phoebe.Bundle(bdict)\n")
# TODO: make sure this works with multiple computes
compute_kwargs = kwargs.items()+[('compute', compute), ('model', model)]
compute_kwargs_string = ','.join(["{}={}".format(k,"\'{}\'".format(v) if isinstance(v, str) else v) for k,v in compute_kwargs])
f.write("model_ps = b.run_compute({})\n".format(compute_kwargs_string))
f.write("model_ps.save('_{}.out', incl_uniqueid=True)\n".format(jobid))
f.close()
script_fname = os.path.abspath(script_fname)
cmd = mpi.detach_cmd.format(script_fname)
# TODO: would be nice to catch errors caused by the detached script...
# but that would probably need to be the responsibility of the
# jobparam to return a failed status and message
subprocess.call(cmd, shell=True)
# create model parameter and attach (and then return that instead of None)
job_param = JobParameter(self,
location=os.path.dirname(script_fname),
status_method='exists',
retrieve_method='local',
uniqueid=jobid)
metawargs = {'context': 'model', 'model': model}
self._attach_params([job_param], **metawargs)
if isinstance(detach, str):
self.save(detach)
if not detach:
return job_param.attach()
else:
logger.info("detaching from run_compute. Call get_model('{}').attach() to re-attach".format(model))
# return self.get_model(model)
return job_param
for compute in computes:
computeparams = self.get_compute(compute=compute)
if not computeparams.kind:
raise KeyError("could not recognize backend from compute: {}".format(compute))
logger.info("running {} backend to create '{}' model".format(computeparams.kind, model))
compute_class = getattr(backends, '{}Backend'.format(computeparams.kind.title()))
# compute_func = getattr(backends, computeparams.kind)
metawargs = {'compute': compute, 'model': model, 'context': 'model'} # dataset, component, etc will be set by the compute_func
params = compute_class().run(self, compute, times=times, **kwargs)
# average over any exposure times before attaching parameters
if computeparams.kind == 'phoebe':
# TODO: we could eventually do this for all backends - we would
# just need to copy the computeoption parameters into each backend's
# compute PS, and include similar logic for oversampling that is
# currently in backends._extract_info_from_bundle_by_time into
# backends._extract_info_from_bundle_by_dataset. We'd also
# need to make sure that exptime is not being passed to any
# alternate backend - and ALWAYS handle it here
for dataset in params.datasets:
# not all dataset-types currently support exposure times.
# Once they do, this ugly if statement can be removed
if len(self.filter(dataset=dataset, qualifier='exptime')):
exptime = self.get_value(qualifier='exptime', dataset=dataset, context='dataset', unit=u.d)
if exptime > 0:
if self.get_value(qualifier='fti_method', dataset=dataset, compute=compute, context='compute', **kwargs)=='oversample':
times_ds = self.get_value(qualifier='times', dataset=dataset, context='dataset')
# exptime = self.get_value(qualifier='exptime', dataset=dataset, context='dataset', unit=u.d)
fti_oversample = self.get_value(qualifier='fti_oversample', dataset=dataset, compute=compute, context='compute', check_visible=False, **kwargs)
# NOTE: this is hardcoded for LCs which is the
# only dataset that currently supports oversampling,
# but this will need to be generalized if/when
# we expand that support to other dataset kinds
fluxes = np.zeros(times_ds.shape)
# the oversampled times and fluxes will be
# sorted according to times this may cause
# exposures to "overlap" each other, so we'll
# later need to determine which times (and
# therefore fluxes) belong to which datapoint
times_oversampled_sorted = params.get_value('times', dataset=dataset)
fluxes_oversampled = params.get_value('fluxes', dataset=dataset)
for i,t in enumerate(times_ds):
# rebuild the unsorted oversampled times - see backends._extract_from_bundle_by_time
# TODO: try to optimize this by having these indices returned by the backend itself
times_oversampled_this = np.linspace(t-exptime/2., t+exptime/2., fti_oversample)
sample_inds = np.searchsorted(times_oversampled_sorted, times_oversampled_this)
fluxes[i] = np.mean(fluxes_oversampled[sample_inds])
params.set_value(qualifier='times', dataset=dataset, value=times_ds)
params.set_value(qualifier='fluxes', dataset=dataset, value=fluxes)
self._attach_params(params, **metawargs)
redo_kwargs = deepcopy(kwargs)
redo_kwargs['compute'] = computes if len(computes)>1 else computes[0]
redo_kwargs['model'] = model
self._add_history(redo_func='run_compute',
redo_kwargs=redo_kwargs,
undo_func='remove_model',
undo_kwargs={'model': model})
return self.get_model(model) | Run a forward model of the system on the enabled dataset using
a specified set of compute options.
To attach and set custom values for compute options, including choosing
which backend to use, see:
* :meth:`add_compute`
To define the dataset types and times at which the model should be
computed see:
* :meth:`add_dataset`
To disable or enable existing datasets see:
* :meth:`enable_dataset`
* :meth:`disable_dataset`
:parameter str compute: (optional) name of the compute options to use.
If not provided or None, run_compute will use an existing set of
attached compute options if only 1 exists. If more than 1 exist,
then compute becomes a required argument. If no compute options
exist, then this will use default options and create and attach
a new set of compute options with a default label.
:parameter str model: (optional) name of the resulting model. If not
provided this will default to 'latest'. NOTE: existing models
with the same name will be overwritten - including 'latest'
:parameter bool datach: [EXPERIMENTAL] whether to detach from the computation run,
or wait for computations to complete. If detach is True, see
:meth:`get_model` and :meth:`phoebe.parameters.parameters.JobParameter`
for details on how to check the job status and retrieve the results.
Alternatively, you can provide the server location (host and port) as
a string to detach and the bundle will temporarily enter client mode,
submit the job to the server, and leave client mode. The resulting
:meth:`phoebe.parameters.parameters.JobParameter` will then contain
the necessary information to pull the results from the server at anytime
in the future.
:parameter list times: [EXPERIMENTAL] override the times at which to compute the model.
NOTE: this only (temporarily) replaces the time array for datasets
with times provided (ie empty time arrays are still ignored). So if
you attach a rv to a single component, the model will still only
compute for that single component. ALSO NOTE: this option is ignored
if detach=True (at least for now).
:parameter **kwargs: any values in the compute options to temporarily
override for this single compute run (parameter values will revert
after run_compute is finished)
:return: :class:`phoebe.parameters.parameters.ParameterSet` of the
newly-created model containing the synthetic data. |
def get(key, default=-1):
"""Backport support for original codes."""
if isinstance(key, int):
return OptionNumber(key)
if key not in OptionNumber._member_map_:
extend_enum(OptionNumber, key, default)
return OptionNumber[key] | Backport support for original codes. |
def can_see_members(self, user):
"""Determine if given user can see other group members.
:param user: User to be checked.
:returns: True or False.
"""
if self.privacy_policy == PrivacyPolicy.PUBLIC:
return True
elif self.privacy_policy == PrivacyPolicy.MEMBERS:
return self.is_member(user) or self.is_admin(user)
elif self.privacy_policy == PrivacyPolicy.ADMINS:
return self.is_admin(user) | Determine if given user can see other group members.
:param user: User to be checked.
:returns: True or False. |
def assemble_oligos(dna_list, reference=None):
'''Given a list of DNA sequences, assemble into a single construct.
:param dna_list: List of DNA sequences - they must be single-stranded.
:type dna_list: coral.DNA list
:param reference: Expected sequence - once assembly completed, this will
be used to reorient the DNA (assembly could potentially occur from either
side of a linear DNA construct if oligos are in a random order). If this
fails, an AssemblyError is raised.
:type reference: coral.DNA
:raises: AssemblyError if it can't assemble for any reason.
:returns: A single assembled DNA sequence
:rtype: coral.DNA
'''
# FIXME: this protocol currently only supports 5' ends on the assembly
# Find all matches for every oligo. If more than 2 per side, error.
# Self-oligo is included in case the 3' end is self-complementary.
# 1) Find all unique 3' binders (and non-binders).
match_3 = [bind_unique(seq, dna_list, right=True) for i, seq in
enumerate(dna_list)]
# 2) Find all unique 5' binders (and non-binders).
match_5 = [bind_unique(seq, dna_list, right=False) for i, seq in
enumerate(dna_list)]
# Assemble into 2-tuple
zipped = zip(match_5, match_3)
# 3) If none found, error out with 'oligo n has no binders'
for i, oligo_match in enumerate(zipped):
if not any(oligo_match):
error = 'Oligo {} has no binding partners.'.format(i + 1)
raise AssemblyError(error)
# 4) There should be exactly 2 oligos that bind at 3' end but
# not 5'.
ends = []
for i, (five, three) in enumerate(zipped):
if five is None and three is not None:
ends.append(i)
# 5) If more than 2, error with 'too many ends'.
if len(ends) > 2:
raise AssemblyError('Too many (>2) end oligos found.')
# 6) If more than 2, error with 'not enough ends'.
if len(ends) < 2:
raise AssemblyError('Not enough (<2) end oligos found.')
# NOTE:If 1-4 are satisfied, unique linear assembly has been found (proof?)
# 8) Start with first end and build iteratively
last_index = ends[0]
assembly = dna_list[last_index]
flip = True
# This would be slightly less complicated if the sequences were tied to
# their match info in a tuple
# Append next region n - 1 times
for i in range(len(dna_list) - 1):
if flip:
# Next oligo needs to be flipped before concatenation
# Grab 3' match from last oligo's info
current_index, matchlen = zipped[last_index][1]
# Get new oligo sequence, make double-stranded for concatenation
next_oligo = dna_list[current_index].to_ds()
# Reverse complement for concatenation
next_oligo = next_oligo.reverse_complement()
# Don't reverse complement the next one
flip = False
else:
# Grab 5' match from last oligo's info
current_index, matchlen = zipped[last_index][0]
# Get new oligo sequence, make double-stranded for concatenation
next_oligo = dna_list[current_index].to_ds()
# Reverse complement the next one
flip = True
# Trim overlap from new sequence
next_oligo = next_oligo[(matchlen - 1):]
# Concatenate and update last oligo's information
assembly += next_oligo
last_index = current_index
if reference:
if assembly == reference or assembly == reference.reverse_complement():
return assembly
else:
raise AssemblyError('Assembly did not match reference')
else:
return assembly | Given a list of DNA sequences, assemble into a single construct.
:param dna_list: List of DNA sequences - they must be single-stranded.
:type dna_list: coral.DNA list
:param reference: Expected sequence - once assembly completed, this will
be used to reorient the DNA (assembly could potentially occur from either
side of a linear DNA construct if oligos are in a random order). If this
fails, an AssemblyError is raised.
:type reference: coral.DNA
:raises: AssemblyError if it can't assemble for any reason.
:returns: A single assembled DNA sequence
:rtype: coral.DNA |
def bar(self, width, **_):
"""Returns the completed progress bar. Every time this is called the animation moves.
Positional arguments:
width -- the width of the entire bar (including borders).
"""
width -= self._width_offset
self._position += self._direction
# Change direction.
if self._position <= 0 and self._direction < 0:
self._position = 0
self._direction = 1
elif self._position > width:
self._position = width - 1
self._direction = -1
final_bar = (
self.CHAR_LEFT_BORDER +
self.CHAR_EMPTY * self._position +
self.CHAR_ANIMATED +
self.CHAR_EMPTY * (width - self._position) +
self.CHAR_RIGHT_BORDER
)
return final_bar | Returns the completed progress bar. Every time this is called the animation moves.
Positional arguments:
width -- the width of the entire bar (including borders). |
def taskfile_user_data(file_, role):
"""Return the data for user
:param file_: the file that holds the data
:type file_: :class:`jukeboxcore.djadapter.models.File`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the user
:rtype: depending on role
:raises: None
"""
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
return file_.user.username | Return the data for user
:param file_: the file that holds the data
:type file_: :class:`jukeboxcore.djadapter.models.File`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the user
:rtype: depending on role
:raises: None |
def _get_requirement_attr(self, attr, path):
"""
Gets the attribute for a given requirement file in path
:param attr: string, attribute
:param path: string, path
:return: The attribute for the requirement, or the global default
"""
for req_file in self.requirements:
if path.strip("/") == req_file.path.strip("/"):
return getattr(req_file, attr)
return getattr(self, attr) | Gets the attribute for a given requirement file in path
:param attr: string, attribute
:param path: string, path
:return: The attribute for the requirement, or the global default |
def update(self, environments):
"""
Method to update environments vip
:param environments vip: List containing environments vip desired
to updated
:return: None
"""
data = {'environments_vip': environments}
environments_ids = [str(env.get('id')) for env in environments]
uri = 'api/v3/environment-vip/%s/' % ';'.join(environments_ids)
return super(ApiEnvironmentVip, self).put(uri, data) | Method to update environments vip
:param environments vip: List containing environments vip desired
to updated
:return: None |
def dropKey(self, key):
'''Drop an attribute/element/key-value pair from all the dictionaries.
If the dictionary key does not exist in a particular dictionary, then
that dictionary is left unchanged.
Side effect: if the key is a number and it matches a list (interpreted
as a dictionary), it will cause the "keys" to shift just as a list
would be expected to.
Example of use:
>>> test = [
... {"name": "Jim", "age": 18, "income": 93000, "wigs": 68 },
... {"name": "Larry", "age": 18, "wigs": [3, 2, 9]},
... {"name": "Joe", "age": 20, "income": 15000, "wigs": [1, 2, 3]},
... {"name": "Jim", "age": 29, "zim": {"zam": "99"} },
... {"name": "Bill", "age": 19, "income": 29000 },
... ]
>>> print PLOD(test).dropKey("income").returnString()
[
{age: 18, name: 'Jim' , wigs: 68, zim: None },
{age: 18, name: 'Larry', wigs: [3, 2, 9], zim: None },
{age: 20, name: 'Joe' , wigs: [1, 2, 3], zim: None },
{age: 29, name: 'Jim' , wigs: None , zim: {'zam': '99'}},
{age: 19, name: 'Bill' , wigs: None , zim: None }
]
.. versionadded:: 0.1.2
:param key:
The dictionary key (or cascading list of keys point to final key)
that should be removed.
:returns: self
'''
result = []
for row in self.table:
result.append(internal.remove_member(row, key))
self.table = result
return self | Drop an attribute/element/key-value pair from all the dictionaries.
If the dictionary key does not exist in a particular dictionary, then
that dictionary is left unchanged.
Side effect: if the key is a number and it matches a list (interpreted
as a dictionary), it will cause the "keys" to shift just as a list
would be expected to.
Example of use:
>>> test = [
... {"name": "Jim", "age": 18, "income": 93000, "wigs": 68 },
... {"name": "Larry", "age": 18, "wigs": [3, 2, 9]},
... {"name": "Joe", "age": 20, "income": 15000, "wigs": [1, 2, 3]},
... {"name": "Jim", "age": 29, "zim": {"zam": "99"} },
... {"name": "Bill", "age": 19, "income": 29000 },
... ]
>>> print PLOD(test).dropKey("income").returnString()
[
{age: 18, name: 'Jim' , wigs: 68, zim: None },
{age: 18, name: 'Larry', wigs: [3, 2, 9], zim: None },
{age: 20, name: 'Joe' , wigs: [1, 2, 3], zim: None },
{age: 29, name: 'Jim' , wigs: None , zim: {'zam': '99'}},
{age: 19, name: 'Bill' , wigs: None , zim: None }
]
.. versionadded:: 0.1.2
:param key:
The dictionary key (or cascading list of keys point to final key)
that should be removed.
:returns: self |
def fit(self, X, y=None, init=None):
"""
Computes the position of the points in the embedding space
Parameters
----------
X : array, shape=[n_samples, n_features], or [n_samples, n_samples] \
if dissimilarity='precomputed'
Input data.
init : {None or ndarray, shape (n_samples,)}, optional
If None, randomly chooses the initial configuration
if ndarray, initialize the SMACOF algorithm with this array.
"""
self.fit_transform(X, init=init)
return self | Computes the position of the points in the embedding space
Parameters
----------
X : array, shape=[n_samples, n_features], or [n_samples, n_samples] \
if dissimilarity='precomputed'
Input data.
init : {None or ndarray, shape (n_samples,)}, optional
If None, randomly chooses the initial configuration
if ndarray, initialize the SMACOF algorithm with this array. |
def owned_expansions(self):
"""List of expansions owned by the player."""
owned = {}
for el in self.expansion_locations:
def is_near_to_expansion(t):
return t.position.distance_to(el) < self.EXPANSION_GAP_THRESHOLD
th = next((x for x in self.townhalls if is_near_to_expansion(x)), None)
if th:
owned[el] = th
return owned | List of expansions owned by the player. |
def annihilate(predicate: tuple, stack: tuple) -> tuple:
'''Squash and reduce the input stack.
Removes the elements of input that match predicate and only keeps the last
match at the end of the stack.
'''
extra = tuple(filter(lambda x: x not in predicate, stack))
head = reduce(lambda x, y: y if y in predicate else x, stack, None)
return extra + (head,) if head else extra | Squash and reduce the input stack.
Removes the elements of input that match predicate and only keeps the last
match at the end of the stack. |
def followingPrefix(prefix):
"""Returns a String that sorts just after all Strings beginning with a prefix"""
prefixBytes = array('B', prefix)
changeIndex = len(prefixBytes) - 1
while (changeIndex >= 0 and prefixBytes[changeIndex] == 0xff ):
changeIndex = changeIndex - 1;
if(changeIndex < 0):
return None
newBytes = array('B', prefix[0:changeIndex + 1])
newBytes[changeIndex] = newBytes[changeIndex] + 1
return newBytes.tostring() | Returns a String that sorts just after all Strings beginning with a prefix |
def set_circuit_breakers(mv_grid, mode='load', debug=False):
""" Calculates the optimal position of a circuit breaker on all routes of mv_grid, adds and connects them to graph.
Args
----
mv_grid: MVGridDing0
Description#TODO
debug: bool, defaults to False
If True, information is printed during process
Notes
-----
According to planning principles of MV grids, a MV ring is run as two strings (half-rings) separated by a
circuit breaker which is open at normal operation [#]_, [#]_.
Assuming a ring (route which is connected to the root node at either sides), the optimal position of a circuit
breaker is defined as the position (virtual cable) between two nodes where the conveyed current is minimal on
the route. Instead of the peak current, the peak load is used here (assuming a constant voltage).
If a ring is dominated by loads (peak load > peak capacity of generators), only loads are used for determining
the location of circuit breaker. If generators are prevailing (peak load < peak capacity of generators),
only generator capacities are considered for relocation.
The core of this function (calculation of the optimal circuit breaker position) is the same as in
ding0.grid.mv_grid.models.Route.calc_circuit_breaker_position but here it is
1. applied to a different data type (NetworkX Graph) and it
2. adds circuit breakers to all rings.
The re-location of circuit breakers is necessary because the original position (calculated during routing with
method mentioned above) shifts during the connection of satellites and therefore it is no longer valid.
References
----------
.. [#] X. Tao, "Automatisierte Grundsatzplanung von Mittelspannungsnetzen", Dissertation, 2006
.. [#] FGH e.V.: "Technischer Bericht 302: Ein Werkzeug zur Optimierung der Störungsbeseitigung
für Planung und Betrieb von Mittelspannungsnetzen", Tech. rep., 2008
"""
# get power factor for loads and generators
cos_phi_load = cfg_ding0.get('assumptions', 'cos_phi_load')
cos_phi_feedin = cfg_ding0.get('assumptions', 'cos_phi_gen')
# iterate over all rings and circuit breakers
for ring, circ_breaker in zip(mv_grid.rings_nodes(include_root_node=False), mv_grid.circuit_breakers()):
nodes_peak_load = []
nodes_peak_generation = []
# iterate over all nodes of ring
for node in ring:
# node is LV station -> get peak load and peak generation
if isinstance(node, LVStationDing0):
nodes_peak_load.append(node.peak_load / cos_phi_load)
nodes_peak_generation.append(node.peak_generation / cos_phi_feedin)
# node is cable distributor -> get all connected nodes of subtree using graph_nodes_from_subtree()
elif isinstance(node, CableDistributorDing0):
nodes_subtree = mv_grid.graph_nodes_from_subtree(node)
nodes_subtree_peak_load = 0
nodes_subtree_peak_generation = 0
for node_subtree in nodes_subtree:
# node is LV station -> get peak load and peak generation
if isinstance(node_subtree, LVStationDing0):
nodes_subtree_peak_load += node_subtree.peak_load / \
cos_phi_load
nodes_subtree_peak_generation += node_subtree.peak_generation / \
cos_phi_feedin
# node is LV station -> get peak load and peak generation
if isinstance(node_subtree, GeneratorDing0):
nodes_subtree_peak_generation += node_subtree.capacity / \
cos_phi_feedin
nodes_peak_load.append(nodes_subtree_peak_load)
nodes_peak_generation.append(nodes_subtree_peak_generation)
else:
raise ValueError('Ring node has got invalid type.')
if mode == 'load':
node_peak_data = nodes_peak_load
elif mode == 'loadgen':
# is ring dominated by load or generation?
# (check if there's more load than generation in ring or vice versa)
if sum(nodes_peak_load) > sum(nodes_peak_generation):
node_peak_data = nodes_peak_load
else:
node_peak_data = nodes_peak_generation
else:
raise ValueError('parameter \'mode\' is invalid!')
# calc optimal circuit breaker position
# set init value
diff_min = 10e6
# check where difference of demand/generation in two half-rings is minimal
for ctr in range(len(node_peak_data)):
# split route and calc demand difference
route_data_part1 = sum(node_peak_data[0:ctr])
route_data_part2 = sum(node_peak_data[ctr:len(node_peak_data)])
diff = abs(route_data_part1 - route_data_part2)
# equality has to be respected, otherwise comparison stops when demand/generation=0
if diff <= diff_min:
diff_min = diff
position = ctr
else:
break
# relocate circuit breaker
node1 = ring[position-1]
node2 = ring[position]
circ_breaker.branch = mv_grid._graph.adj[node1][node2]['branch']
circ_breaker.branch_nodes = (node1, node2)
circ_breaker.branch.circuit_breaker = circ_breaker
circ_breaker.geo_data = calc_geo_centre_point(node1, node2)
if debug:
logger.debug('Ring: {}'.format(ring))
logger.debug('Circuit breaker {0} was relocated to edge {1}-{2} '
'(position on route={3})'.format(
circ_breaker, node1, node2, position)
)
logger.debug('Peak load sum: {}'.format(sum(nodes_peak_load)))
logger.debug('Peak loads: {}'.format(nodes_peak_load)) | Calculates the optimal position of a circuit breaker on all routes of mv_grid, adds and connects them to graph.
Args
----
mv_grid: MVGridDing0
Description#TODO
debug: bool, defaults to False
If True, information is printed during process
Notes
-----
According to planning principles of MV grids, a MV ring is run as two strings (half-rings) separated by a
circuit breaker which is open at normal operation [#]_, [#]_.
Assuming a ring (route which is connected to the root node at either sides), the optimal position of a circuit
breaker is defined as the position (virtual cable) between two nodes where the conveyed current is minimal on
the route. Instead of the peak current, the peak load is used here (assuming a constant voltage).
If a ring is dominated by loads (peak load > peak capacity of generators), only loads are used for determining
the location of circuit breaker. If generators are prevailing (peak load < peak capacity of generators),
only generator capacities are considered for relocation.
The core of this function (calculation of the optimal circuit breaker position) is the same as in
ding0.grid.mv_grid.models.Route.calc_circuit_breaker_position but here it is
1. applied to a different data type (NetworkX Graph) and it
2. adds circuit breakers to all rings.
The re-location of circuit breakers is necessary because the original position (calculated during routing with
method mentioned above) shifts during the connection of satellites and therefore it is no longer valid.
References
----------
.. [#] X. Tao, "Automatisierte Grundsatzplanung von Mittelspannungsnetzen", Dissertation, 2006
.. [#] FGH e.V.: "Technischer Bericht 302: Ein Werkzeug zur Optimierung der Störungsbeseitigung
für Planung und Betrieb von Mittelspannungsnetzen", Tech. rep., 2008 |
def extract_run_id(key):
"""Extract date part from run id
Arguments:
key - full key name, such as shredded-archive/run=2012-12-11-01-31-33/
(trailing slash is required)
>>> extract_run_id('shredded-archive/run=2012-12-11-01-11-33/')
'shredded-archive/run=2012-12-11-01-11-33/'
>>> extract_run_id('shredded-archive/run=2012-12-11-01-11-33')
>>> extract_run_id('shredded-archive/run=2012-13-11-01-11-33/')
"""
filename = key.split('/')[-2] # -1 element is empty string
run_id = filename.lstrip('run=')
try:
datetime.strptime(run_id, '%Y-%m-%d-%H-%M-%S')
return key
except ValueError:
return None | Extract date part from run id
Arguments:
key - full key name, such as shredded-archive/run=2012-12-11-01-31-33/
(trailing slash is required)
>>> extract_run_id('shredded-archive/run=2012-12-11-01-11-33/')
'shredded-archive/run=2012-12-11-01-11-33/'
>>> extract_run_id('shredded-archive/run=2012-12-11-01-11-33')
>>> extract_run_id('shredded-archive/run=2012-13-11-01-11-33/') |
def set_activate_user_form(self, card_id, **kwargs):
"""
设置开卡字段接口
详情请参考
https://mp.weixin.qq.com/wiki?t=resource/res_main&id=mp1451025283
"6 激活会员卡" -> "6.2 一键激活" -> "步骤二:设置开卡字段接口"
参数示例:
{
"card_id": "pbLatjnrwUUdZI641gKdTMJzHGfc",
"service_statement": {
"name": "会员守则",
"url": "https://www.qq.com"
},
"bind_old_card": {
"name": "老会员绑定",
"url": "https://www.qq.com"
},
"required_form": {
"can_modify":false,
"rich_field_list": [
{
"type": "FORM_FIELD_RADIO",
"name": "兴趣",
"values": [
"钢琴",
"舞蹈",
"足球"
]
},
{
"type": "FORM_FIELD_SELECT",
"name": "喜好",
"values": [
"郭敬明",
"韩寒",
"南派三叔"
]
},
{
"type": "FORM_FIELD_CHECK_BOX",
"name": "职业",
"values": [
"赛车手",
"旅行家"
]
}
],
"common_field_id_list": [
"USER_FORM_INFO_FLAG_MOBILE"
]
},
"optional_form": {
"can_modify":false,
"common_field_id_list": [
"USER_FORM_INFO_FLAG_LOCATION",
"USER_FORM_INFO_FLAG_BIRTHDAY"
],
"custom_field_list": [
"喜欢的电影"
]
}
}
common_field_id_list 值见常量 `wechatpy.constants.UserFormInfoFlag`
:param card_id: 卡券ID
:param kwargs: 其他非必填参数,见微信文档
"""
kwargs['card_id'] = card_id
return self._post(
'card/membercard/activateuserform/set',
data=kwargs
) | 设置开卡字段接口
详情请参考
https://mp.weixin.qq.com/wiki?t=resource/res_main&id=mp1451025283
"6 激活会员卡" -> "6.2 一键激活" -> "步骤二:设置开卡字段接口"
参数示例:
{
"card_id": "pbLatjnrwUUdZI641gKdTMJzHGfc",
"service_statement": {
"name": "会员守则",
"url": "https://www.qq.com"
},
"bind_old_card": {
"name": "老会员绑定",
"url": "https://www.qq.com"
},
"required_form": {
"can_modify":false,
"rich_field_list": [
{
"type": "FORM_FIELD_RADIO",
"name": "兴趣",
"values": [
"钢琴",
"舞蹈",
"足球"
]
},
{
"type": "FORM_FIELD_SELECT",
"name": "喜好",
"values": [
"郭敬明",
"韩寒",
"南派三叔"
]
},
{
"type": "FORM_FIELD_CHECK_BOX",
"name": "职业",
"values": [
"赛车手",
"旅行家"
]
}
],
"common_field_id_list": [
"USER_FORM_INFO_FLAG_MOBILE"
]
},
"optional_form": {
"can_modify":false,
"common_field_id_list": [
"USER_FORM_INFO_FLAG_LOCATION",
"USER_FORM_INFO_FLAG_BIRTHDAY"
],
"custom_field_list": [
"喜欢的电影"
]
}
}
common_field_id_list 值见常量 `wechatpy.constants.UserFormInfoFlag`
:param card_id: 卡券ID
:param kwargs: 其他非必填参数,见微信文档 |
def fcat(*fs):
"""Concatenate a sequence of farrays.
The variadic *fs* input is a homogeneous sequence of functions or arrays.
"""
items = list()
for f in fs:
if isinstance(f, boolfunc.Function):
items.append(f)
elif isinstance(f, farray):
items.extend(f.flat)
else:
raise TypeError("expected Function or farray")
return farray(items) | Concatenate a sequence of farrays.
The variadic *fs* input is a homogeneous sequence of functions or arrays. |
def artifact_filename(self):
"""Returns the canonical maven-style filename for an artifact pointed at by this coordinate.
:API: public
:rtype: string
"""
def maybe_compenent(component):
return '-{}'.format(component) if component else ''
return '{org}-{name}{rev}{classifier}.{ext}'.format(org=self.org,
name=self.name,
rev=maybe_compenent(self.rev),
classifier=maybe_compenent(self.classifier),
ext=self.ext) | Returns the canonical maven-style filename for an artifact pointed at by this coordinate.
:API: public
:rtype: string |
def set_presence(self, state, status={}, priority=0):
"""
Change the presence broadcast by the client.
:param state: New presence state to broadcast
:type state: :class:`aioxmpp.PresenceState`
:param status: New status information to broadcast
:type status: :class:`dict` or :class:`str`
:param priority: New priority for the resource
:type priority: :class:`int`
:return: Stanza token of the presence stanza or :data:`None` if the
presence is unchanged or the stream is not connected.
:rtype: :class:`~.stream.StanzaToken`
If the client is currently connected, the new presence is broadcast
immediately.
`status` must be either a string or something which can be passed to
the :class:`dict` constructor. If it is a string, it is wrapped into a
dict using ``{None: status}``. The mapping must map
:class:`~.LanguageTag` objects (or :data:`None`) to strings. The
information will be used to generate internationalised presence status
information. If you do not need internationalisation, simply use the
string version of the argument.
"""
if not isinstance(priority, numbers.Integral):
raise TypeError(
"invalid priority: got {}, expected integer".format(
type(priority)
)
)
if not isinstance(state, aioxmpp.PresenceState):
raise TypeError(
"invalid state: got {}, expected aioxmpp.PresenceState".format(
type(state),
)
)
if isinstance(status, str):
new_status = {None: status}
else:
new_status = dict(status)
new_priority = int(priority)
emit_state_event = self._state != state
emit_overall_event = (
emit_state_event or
self._priority != new_priority or
self._status != new_status
)
self._state = state
self._status = new_status
self._priority = new_priority
if emit_state_event:
self.on_presence_state_changed()
if emit_overall_event:
self.on_presence_changed()
return self.resend_presence() | Change the presence broadcast by the client.
:param state: New presence state to broadcast
:type state: :class:`aioxmpp.PresenceState`
:param status: New status information to broadcast
:type status: :class:`dict` or :class:`str`
:param priority: New priority for the resource
:type priority: :class:`int`
:return: Stanza token of the presence stanza or :data:`None` if the
presence is unchanged or the stream is not connected.
:rtype: :class:`~.stream.StanzaToken`
If the client is currently connected, the new presence is broadcast
immediately.
`status` must be either a string or something which can be passed to
the :class:`dict` constructor. If it is a string, it is wrapped into a
dict using ``{None: status}``. The mapping must map
:class:`~.LanguageTag` objects (or :data:`None`) to strings. The
information will be used to generate internationalised presence status
information. If you do not need internationalisation, simply use the
string version of the argument. |
def is_ancestor_of_bank(self, id_, bank_id):
"""Tests if an ``Id`` is an ancestor of a bank.
arg: id (osid.id.Id): an ``Id``
arg: bank_id (osid.id.Id): the ``Id`` of a bank
return: (boolean) - ``true`` if this ``id`` is an ancestor of
``bank_id,`` ``false`` otherwise
raise: NotFound - ``bank_id`` is not found
raise: NullArgument - ``bank_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``.
"""
# Implemented from template for
# osid.resource.BinHierarchySession.is_ancestor_of_bin
if self._catalog_session is not None:
return self._catalog_session.is_ancestor_of_catalog(id_=id_, catalog_id=bank_id)
return self._hierarchy_session.is_ancestor(id_=id_, ancestor_id=bank_id) | Tests if an ``Id`` is an ancestor of a bank.
arg: id (osid.id.Id): an ``Id``
arg: bank_id (osid.id.Id): the ``Id`` of a bank
return: (boolean) - ``true`` if this ``id`` is an ancestor of
``bank_id,`` ``false`` otherwise
raise: NotFound - ``bank_id`` is not found
raise: NullArgument - ``bank_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``. |
def get_clusters_representation(chromosome, count_clusters=None):
""" Convert chromosome to cluster representation:
chromosome : [0, 1, 1, 0, 2, 3, 3]
clusters: [[0, 3], [1, 2], [4], [5, 6]]
"""
if count_clusters is None:
count_clusters = ga_math.calc_count_centers(chromosome)
# Initialize empty clusters
clusters = [[] for _ in range(count_clusters)]
# Fill clusters with index of data
for _idx_data in range(len(chromosome)):
clusters[chromosome[_idx_data]].append(_idx_data)
return clusters | Convert chromosome to cluster representation:
chromosome : [0, 1, 1, 0, 2, 3, 3]
clusters: [[0, 3], [1, 2], [4], [5, 6]] |
def get_sections_2d_nts(self, sortby=None):
"""Get high GO IDs that are actually used to group current set of GO IDs."""
sections_2d_nts = []
for section_name, hdrgos_actual in self.get_sections_2d():
hdrgo_nts = self.gosubdag.get_nts(hdrgos_actual, sortby=sortby)
sections_2d_nts.append((section_name, hdrgo_nts))
return sections_2d_nts | Get high GO IDs that are actually used to group current set of GO IDs. |
def check_key(data_object, key, cardinal=False):
"""
Update the value of an index key by matching values or getting positionals.
"""
itype = (int, np.int32, np.int64)
if not isinstance(key, itype + (slice, tuple, list, np.ndarray)):
raise KeyError("Unknown key type {} for key {}".format(type(key), key))
keys = data_object.index.values
if cardinal and data_object._cardinal is not None:
keys = data_object[data_object._cardinal[0]].unique()
elif isinstance(key, itype) and key in keys:
key = list(sorted(data_object.index.values[key]))
elif isinstance(key, itype) and key < 0:
key = list(sorted(data_object.index.values[key]))
elif isinstance(key, itype):
key = [key]
elif isinstance(key, slice):
key = list(sorted(data_object.index.values[key]))
elif isinstance(key, (tuple, list, pd.Index)) and not np.all(k in keys for k in key):
key = list(sorted(data_object.index.values[key]))
return key | Update the value of an index key by matching values or getting positionals. |
def text(self, path, wholetext=False, lineSep=None):
"""
Loads a text file stream and returns a :class:`DataFrame` whose schema starts with a
string column named "value", and followed by partitioned columns if there
are any.
The text files must be encoded as UTF-8.
By default, each line in the text file is a new row in the resulting DataFrame.
.. note:: Evolving.
:param paths: string, or list of strings, for input path(s).
:param wholetext: if true, read each file from input path(s) as a single row.
:param lineSep: defines the line separator that should be used for parsing. If None is
set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``.
>>> text_sdf = spark.readStream.text(tempfile.mkdtemp())
>>> text_sdf.isStreaming
True
>>> "value" in str(text_sdf.schema)
True
"""
self._set_opts(wholetext=wholetext, lineSep=lineSep)
if isinstance(path, basestring):
return self._df(self._jreader.text(path))
else:
raise TypeError("path can be only a single string") | Loads a text file stream and returns a :class:`DataFrame` whose schema starts with a
string column named "value", and followed by partitioned columns if there
are any.
The text files must be encoded as UTF-8.
By default, each line in the text file is a new row in the resulting DataFrame.
.. note:: Evolving.
:param paths: string, or list of strings, for input path(s).
:param wholetext: if true, read each file from input path(s) as a single row.
:param lineSep: defines the line separator that should be used for parsing. If None is
set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``.
>>> text_sdf = spark.readStream.text(tempfile.mkdtemp())
>>> text_sdf.isStreaming
True
>>> "value" in str(text_sdf.schema)
True |
def field_function(self, type_code, func_name):
"""Return the field function."""
assert func_name in ('to_json', 'from_json')
name = "field_%s_%s" % (type_code.lower(), func_name)
return getattr(self, name) | Return the field function. |
def script(name,
source,
saltenv='base',
args=None,
template=None,
exec_driver=None,
stdin=None,
python_shell=True,
output_loglevel='debug',
ignore_retcode=False,
use_vt=False,
keep_env=None):
'''
Run :py:func:`cmd.script <salt.modules.cmdmod.script>` within a container
.. note::
While the command is run within the container, it is initiated from the
host. Therefore, the PID in the return dict is from the host, not from
the container.
name
Container name or ID
source
Path to the script. Can be a local path on the Minion or a remote file
from the Salt fileserver.
args
A string containing additional command-line options to pass to the
script.
template : None
Templating engine to use on the script before running.
exec_driver : None
If not passed, the execution driver will be detected as described
:ref:`above <docker-execution-driver>`.
stdin : None
Standard input to be used for the script
output_loglevel : debug
Level at which to log the output from the script. Set to ``quiet`` to
suppress logging.
use_vt : False
Use SaltStack's utils.vt to stream output to console.
keep_env : None
If not passed, only a sane default PATH environment variable will be
set. If ``True``, all environment variables from the container's host
will be kept. Otherwise, a comma-separated list (or Python list) of
environment variable names can be passed, and those environment
variables will be kept.
CLI Example:
.. code-block:: bash
salt myminion docker.script mycontainer salt://docker_script.py
salt myminion docker.script mycontainer salt://scripts/runme.sh 'arg1 arg2 "arg 3"'
salt myminion docker.script mycontainer salt://scripts/runme.sh stdin='one\\ntwo\\nthree\\nfour\\nfive\\n' output_loglevel=quiet
'''
return _script(name,
source,
saltenv=saltenv,
args=args,
template=template,
exec_driver=exec_driver,
stdin=stdin,
python_shell=python_shell,
output_loglevel=output_loglevel,
ignore_retcode=ignore_retcode,
use_vt=use_vt,
keep_env=keep_env) | Run :py:func:`cmd.script <salt.modules.cmdmod.script>` within a container
.. note::
While the command is run within the container, it is initiated from the
host. Therefore, the PID in the return dict is from the host, not from
the container.
name
Container name or ID
source
Path to the script. Can be a local path on the Minion or a remote file
from the Salt fileserver.
args
A string containing additional command-line options to pass to the
script.
template : None
Templating engine to use on the script before running.
exec_driver : None
If not passed, the execution driver will be detected as described
:ref:`above <docker-execution-driver>`.
stdin : None
Standard input to be used for the script
output_loglevel : debug
Level at which to log the output from the script. Set to ``quiet`` to
suppress logging.
use_vt : False
Use SaltStack's utils.vt to stream output to console.
keep_env : None
If not passed, only a sane default PATH environment variable will be
set. If ``True``, all environment variables from the container's host
will be kept. Otherwise, a comma-separated list (or Python list) of
environment variable names can be passed, and those environment
variables will be kept.
CLI Example:
.. code-block:: bash
salt myminion docker.script mycontainer salt://docker_script.py
salt myminion docker.script mycontainer salt://scripts/runme.sh 'arg1 arg2 "arg 3"'
salt myminion docker.script mycontainer salt://scripts/runme.sh stdin='one\\ntwo\\nthree\\nfour\\nfive\\n' output_loglevel=quiet |
def _extract_models(cls, apis):
'''An helper function to extract all used models from the apis.'''
# TODO: This would probably be much better if the info would be
# extracted from the classes, rather than from the swagger
# representation...
models = set()
for api in apis:
for op in api.get('operations', []):
models.add(op['type'])
for param in op.get('parameters', []):
models.add(param.get('type', 'void'))
for msg in op['responseMessages']:
models.add(msg.get('responseModel', 'void'))
# Convert from swagger name representation to classes
models = map(lambda m: Model.name_to_cls[m], models)
ret = {}
for model in models:
if model.native_type:
continue
obj = model.schema.copy()
obj['id'] = model.name
ret[model.name] = obj
return ret | An helper function to extract all used models from the apis. |
async def disconnect(self, requested=True):
"""
Disconnects this player from it's voice channel.
"""
if self.state == PlayerState.DISCONNECTING:
return
await self.update_state(PlayerState.DISCONNECTING)
if not requested:
log.debug(
f"Forcing player disconnect for guild {self.channel.guild.id}"
f" due to player manager request."
)
guild_id = self.channel.guild.id
voice_ws = self.node.get_voice_ws(guild_id)
if not voice_ws.closed:
await voice_ws.voice_state(guild_id, None)
await self.node.destroy_guild(guild_id)
await self.close()
self.manager.remove_player(self) | Disconnects this player from it's voice channel. |
def download(self,age=None,metallicity=None,outdir=None,force=False):
"""
Check valid parameter range and download isochrones from:
http://stev.oapd.inaf.it/cgi-bin/cmd
"""
try:
from urllib.error import URLError
except ImportError:
from urllib2 import URLError
if age is None: age = float(self.age)
if metallicity is None: metallicity = float(self.metallicity)
if outdir is None: outdir = './'
basename = self.params2filename(age,metallicity)
outfile = os.path.join(outdir,basename)
if os.path.exists(outfile) and not force:
try:
self.verify(outfile,self.survey,age,metallicity)
logger.info("Found %s; skipping..."%(outfile))
return
except Exception as e:
msg = "Overwriting corrupted %s..."%(outfile)
logger.warn(msg)
os.remove(outfile)
mkdir(outdir)
self.print_info(age,metallicity)
self.query_server(outfile,age,metallicity)
if not os.path.exists(outfile):
raise RuntimeError('Download failed')
try:
self.verify(outfile,self.survey,age,metallicity)
except Exception as e:
msg = "Output file is corrupted."
logger.error(msg)
msg = "Removing %s."%outfile
logger.info(msg)
os.remove(outfile)
raise(e)
return outfile | Check valid parameter range and download isochrones from:
http://stev.oapd.inaf.it/cgi-bin/cmd |
def to_copy(self, column_names=None, selection=None, strings=True, virtual=False, selections=True):
"""Return a copy of the DataFrame, if selection is None, it does not copy the data, it just has a reference
:param column_names: list of column names, to copy, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used
:param selection: {selection}
:param strings: argument passed to DataFrame.get_column_names when column_names is None
:param virtual: argument passed to DataFrame.get_column_names when column_names is None
:param selections: copy selections to a new DataFrame
:return: dict
"""
if column_names:
column_names = _ensure_strings_from_expressions(column_names)
df = vaex.from_items(*self.to_items(column_names=column_names, selection=selection, strings=strings, virtual=False))
if virtual:
for name, value in self.virtual_columns.items():
df.add_virtual_column(name, value)
if selections:
# the filter selection does not need copying
for key, value in self.selection_histories.items():
if key != FILTER_SELECTION_NAME:
df.selection_histories[key] = list(value)
for key, value in self.selection_history_indices.items():
if key != FILTER_SELECTION_NAME:
df.selection_history_indices[key] = value
df.functions.update(self.functions)
df.copy_metadata(self)
return df | Return a copy of the DataFrame, if selection is None, it does not copy the data, it just has a reference
:param column_names: list of column names, to copy, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used
:param selection: {selection}
:param strings: argument passed to DataFrame.get_column_names when column_names is None
:param virtual: argument passed to DataFrame.get_column_names when column_names is None
:param selections: copy selections to a new DataFrame
:return: dict |
def _get_button_label(self):
"""Gets Button label from user and returns string"""
dlg = wx.TextEntryDialog(self, _('Button label:'))
if dlg.ShowModal() == wx.ID_OK:
label = dlg.GetValue()
else:
label = ""
dlg.Destroy()
return label | Gets Button label from user and returns string |
def set_outputs(self, *outputs):
""" Set the outputs of the view
"""
self._outputs = OrderedDict()
for output in outputs:
out_name = None
type_or_serialize = None
if isinstance((list, tuple), output):
if len(output) == 1:
out_name = output[0]
elif len(output) == 2:
out_name = output[0]
type_or_serialize = output[1]
else:
raise ValueError("invalid output format")
else:
out_name = output
self.add_output(out_name, type_or_serialize) | Set the outputs of the view |
def _PrintEventLabelsCounter(
self, event_labels_counter, session_identifier=None):
"""Prints the event labels counter.
Args:
event_labels_counter (collections.Counter): number of event tags per
label.
session_identifier (Optional[str]): session identifier.
"""
if not event_labels_counter:
return
title = 'Event tags generated per label'
if session_identifier:
title = '{0:s}: {1:s}'.format(title, session_identifier)
table_view = views.ViewsFactory.GetTableView(
self._views_format_type,
column_names=['Label', 'Number of event tags'], title=title)
for key, value in sorted(event_labels_counter.items()):
if key == 'total':
continue
table_view.AddRow([key, value])
try:
total = event_labels_counter['total']
except KeyError:
total = 'N/A'
table_view.AddRow(['Total', total])
table_view.Write(self._output_writer) | Prints the event labels counter.
Args:
event_labels_counter (collections.Counter): number of event tags per
label.
session_identifier (Optional[str]): session identifier. |
def create_contract(self, price=0, address=None, caller=None, balance=0, init=None, gas=None):
"""
Create a contract account. Sends a transaction to initialize the contract
:param address: the address of the new account, if known. If omitted, a new address will be generated as closely to the Yellow Paper as possible.
:param balance: the initial balance of the account in Wei
:param init: the initialization code of the contract
The way that the Solidity compiler expects the constructor arguments to
be passed is by appending the arguments to the byte code produced by the
Solidity compiler. The arguments are formatted as defined in the Ethereum
ABI2. The arguments are then copied from the init byte array to the EVM
memory through the CODECOPY opcode with appropriate values on the stack.
This is done when the byte code in the init byte array is actually run
on the network.
"""
expected_address = self.create_account(self.new_address(sender=caller))
if address is None:
address = expected_address
elif caller is not None and address != expected_address:
raise EthereumError(f"Error: contract created from address {hex(caller)} with nonce {self.get_nonce(caller)} was expected to be at address {hex(expected_address)}, but create_contract was called with address={hex(address)}")
self.start_transaction('CREATE', address, price, init, caller, balance, gas=gas)
self._process_pending_transaction()
return address | Create a contract account. Sends a transaction to initialize the contract
:param address: the address of the new account, if known. If omitted, a new address will be generated as closely to the Yellow Paper as possible.
:param balance: the initial balance of the account in Wei
:param init: the initialization code of the contract
The way that the Solidity compiler expects the constructor arguments to
be passed is by appending the arguments to the byte code produced by the
Solidity compiler. The arguments are formatted as defined in the Ethereum
ABI2. The arguments are then copied from the init byte array to the EVM
memory through the CODECOPY opcode with appropriate values on the stack.
This is done when the byte code in the init byte array is actually run
on the network. |
Subsets and Splits