code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def run(self):
"""Delete the specified gene and solve using the desired method."""
obj_reaction = self._get_objective()
genes = set()
gene_assoc = {}
for reaction in self._model.reactions:
assoc = None
if reaction.genes is None:
continue
elif isinstance(reaction.genes, string_types):
assoc = boolean.Expression(reaction.genes)
else:
variables = [boolean.Variable(g) for g in reaction.genes]
assoc = boolean.Expression(boolean.And(*variables))
genes.update(v.symbol for v in assoc.variables)
gene_assoc[reaction.id] = assoc
reactions = set(self._mm.reactions)
start_time = time.time()
testing_genes = set(self._args.gene)
deleted_reactions = set()
logger.info('Trying model without genes: {}...'.format(
', '.join(sorted(testing_genes))))
for reaction in reactions:
if reaction not in gene_assoc:
continue
assoc = gene_assoc[reaction]
if any(boolean.Variable(gene) in assoc.variables
for gene in testing_genes):
new_assoc = assoc.substitute(
lambda v: v if v.symbol not in testing_genes else False)
if new_assoc.has_value() and not new_assoc.value:
logger.info('Deleting reaction {}...'.format(reaction))
deleted_reactions.add(reaction)
if self._args.method in ['moma', 'moma2']:
solver = self._get_solver(quadratic=True)
else:
solver = self._get_solver()
if self._args.method == 'fba':
logger.info('Solving using FBA...')
prob = fluxanalysis.FluxBalanceProblem(self._mm, solver)
try:
prob.maximize(obj_reaction)
except fluxanalysis.FluxBalanceError as e:
self.report_flux_balance_error(e)
wild = prob.get_flux(obj_reaction)
for reaction in deleted_reactions:
flux_var = prob.get_flux_var(reaction)
prob.prob.add_linear_constraints(flux_var == 0)
prob.maximize(obj_reaction)
deleteflux = prob.get_flux(obj_reaction)
elif self._args.method in ['lin_moma', 'lin_moma2', 'moma', 'moma2']:
prob = moma.MOMAProblem(self._mm, solver)
wt_fluxes = prob.get_minimal_fba_flux(obj_reaction)
wild = wt_fluxes[obj_reaction]
for reaction in deleted_reactions:
flux_var = prob.get_flux_var(reaction)
prob.prob.add_linear_constraints(flux_var == 0)
try:
if self._args.method == 'moma':
logger.info('Solving using MOMA...')
prob.moma(wt_fluxes)
elif self._args.method == 'lin_moma':
logger.info('Solving using linear MOMA...')
prob.lin_moma(wt_fluxes)
elif self._args.method == 'moma2':
logger.info('Solving using combined-model MOMA...')
prob.moma2(obj_reaction, wild)
elif self._args.method == 'lin_moma2':
logger.info('Solving using combined-model linear MOMA...')
prob.lin_moma2(obj_reaction, wild)
except moma.MOMAError:
self.fail('Error computing the MOMA result.')
deleteflux = prob.get_flux(obj_reaction)
logger.info(
'Solving took {:.2f} seconds'.format(time.time() - start_time))
logger.info(
'Objective reaction after gene deletion has flux {}'.format(
deleteflux + 0))
if wild != 0:
logger.info(
'Objective reaction has {:.2%} flux of wild type flux'.format(
abs(deleteflux / wild))) | Delete the specified gene and solve using the desired method. |
def lipisha_ipn(self):
"""Process lipisha IPN - Initiate/Acknowledge"""
if not (self.request.POST.get('api_key') == LIPISHA_API_KEY and
self.request.POST.get('api_signature') == LIPISHA_API_SIGNATURE):
raise HTTPBadRequest
return process_lipisha_payment(self.request) | Process lipisha IPN - Initiate/Acknowledge |
def process_details():
"""
Returns details about the current process
"""
results = {"argv": sys.argv, "working.directory": os.getcwd()}
# Process ID and execution IDs (UID, GID, Login, ...)
for key, method in {
"pid": "getpid",
"ppid": "getppid",
"login": "getlogin",
"uid": "getuid",
"euid": "geteuid",
"gid": "getgid",
"egid": "getegid",
"groups": "getgroups",
}.items():
try:
results[key] = getattr(os, method)()
except (AttributeError, OSError):
results[key] = None
return results | Returns details about the current process |
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: AwsContext for this AwsInstance
:rtype: twilio.rest.accounts.v1.credential.aws.AwsContext
"""
if self._context is None:
self._context = AwsContext(self._version, sid=self._solution['sid'], )
return self._context | Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: AwsContext for this AwsInstance
:rtype: twilio.rest.accounts.v1.credential.aws.AwsContext |
def submit(self, password=''):
"""Submits the participation to the web site.
The passwords is sent as plain text.
:return: the evaluation results.
"""
url = '{}/api/submit'.format(BASE_URL)
try:
r = requests.post(url,
data=self.dumps(),
headers={'content-type': 'application/json'},
auth=(self['metadata']['email'], password))
response = r.json()
except requests.exceptions.HTTPError as e:
logging.error('Error while submitting the participation. {}'.format(e))
return Job()
if 'error' in response:
logging.error('Error while processing the participation. {}'.format(response['error']))
return Job()
return Job(response) | Submits the participation to the web site.
The passwords is sent as plain text.
:return: the evaluation results. |
def save_file(fullpath, entry):
""" Save a message file out, without mangling the headers """
with tempfile.NamedTemporaryFile('w', delete=False) as file:
tmpfile = file.name
# we can't just use file.write(str(entry)) because otherwise the
# headers "helpfully" do MIME encoding normalization.
# str(val) is necessary to get around email.header's encoding
# shenanigans
for key, val in entry.items():
print('{}: {}'.format(key, str(val)), file=file)
print('', file=file)
file.write(entry.get_payload())
shutil.move(tmpfile, fullpath) | Save a message file out, without mangling the headers |
def make_list_table(headers, data, title='', columns=None):
"""Build a list-table directive.
:param headers: List of header values.
:param data: Iterable of row data, yielding lists or tuples with rows.
:param title: Optional text to show as the table title.
:param columns: Optional widths for the columns.
"""
results = []
add = results.append
add('.. list-table:: %s' % title)
add(' :header-rows: 1')
if columns:
add(' :widths: %s' % (','.join(str(c) for c in columns)))
add('')
add(' - * %s' % headers[0])
for h in headers[1:]:
add(' * %s' % h)
for row in data:
add(' - * %s' % row[0])
for r in row[1:]:
add(' * %s' % r)
add('')
return '\n'.join(results) | Build a list-table directive.
:param headers: List of header values.
:param data: Iterable of row data, yielding lists or tuples with rows.
:param title: Optional text to show as the table title.
:param columns: Optional widths for the columns. |
def version(versioninfo=False):
'''
.. versionadded:: 2015.8.0
Returns the version of Git installed on the minion
versioninfo : False
If ``True``, return the version in a versioninfo list (e.g. ``[2, 5,
0]``)
CLI Example:
.. code-block:: bash
salt myminion git.version
'''
contextkey = 'git.version'
contextkey_info = 'git.versioninfo'
if contextkey not in __context__:
try:
version_ = _git_run(['git', '--version'])['stdout']
except CommandExecutionError as exc:
log.error(
'Failed to obtain the git version (error follows):\n%s',
exc
)
version_ = 'unknown'
try:
__context__[contextkey] = version_.split()[-1]
except IndexError:
# Somehow git --version returned no stdout while not raising an
# error. Should never happen but we should still account for this
# possible edge case.
log.error('Running \'git --version\' returned no stdout')
__context__[contextkey] = 'unknown'
if not versioninfo:
return __context__[contextkey]
if contextkey_info not in __context__:
# Set ptr to the memory location of __context__[contextkey_info] to
# prevent repeated dict lookups
ptr = __context__.setdefault(contextkey_info, [])
for part in __context__[contextkey].split('.'):
try:
ptr.append(int(part))
except ValueError:
ptr.append(part)
return __context__[contextkey_info] | .. versionadded:: 2015.8.0
Returns the version of Git installed on the minion
versioninfo : False
If ``True``, return the version in a versioninfo list (e.g. ``[2, 5,
0]``)
CLI Example:
.. code-block:: bash
salt myminion git.version |
def enforce_reset(self):
"""enforce parameter bounds on the ensemble by resetting
violating vals to bound
"""
ub = (self.ubnd * (1.0+self.bound_tol)).to_dict()
lb = (self.lbnd * (1.0 - self.bound_tol)).to_dict()
#for iname,name in enumerate(self.columns):
#self.loc[self.loc[:,name] > ub[name],name] = ub[name] * (1.0 + self.bound_tol)
#self.loc[self.loc[:,name] < lb[name],name] = lb[name].copy() * (1.0 - self.bound_tol)
# self.loc[self.loc[:,name] > ub[name],name] = ub[name]
# self.loc[self.loc[:,name] < lb[name],name] = lb[name]
val_arr = self.values
for iname, name in enumerate(self.columns):
val_arr[val_arr[:,iname] > ub[name],iname] = ub[name]
val_arr[val_arr[:, iname] < lb[name],iname] = lb[name] | enforce parameter bounds on the ensemble by resetting
violating vals to bound |
def _rtg_add_summary_file(eval_files, base_dir, data):
"""Parse output TP FP and FN files to generate metrics for plotting.
"""
out_file = os.path.join(base_dir, "validate-summary.csv")
if not utils.file_uptodate(out_file, eval_files.get("tp", eval_files.get("fp", eval_files["fn"]))):
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
writer = csv.writer(out_handle)
writer.writerow(["sample", "caller", "vtype", "metric", "value"])
base = _get_sample_and_caller(data)
for metric in ["tp", "fp", "fn"]:
for vtype, bcftools_types in [("SNPs", "--types snps"),
("Indels", "--exclude-types snps")]:
in_file = eval_files.get(metric)
if in_file and os.path.exists(in_file):
cmd = ("bcftools view {bcftools_types} {in_file} | grep -v ^# | wc -l")
count = int(subprocess.check_output(cmd.format(**locals()), shell=True))
else:
count = 0
writer.writerow(base + [vtype, metric, count])
eval_files["summary"] = out_file
return eval_files | Parse output TP FP and FN files to generate metrics for plotting. |
def plot_lnp(fignum, s, datablock, fpars, direction_type_key):
"""
plots lines and planes on a great circle with alpha 95 and mean
Parameters
_________
fignum : number of plt.figure() object
datablock : nested list of dictionaries with keys in 3.0 or 2.5 format
3.0 keys: dir_dec, dir_inc, dir_tilt_correction = [-1,0,100], direction_type_key =['p','l']
2.5 keys: dec, inc, tilt_correction = [-1,0,100],direction_type_key =['p','l']
fpars : Fisher parameters calculated by, e.g., pmag.dolnp() or pmag.dolnp3_0()
direction_type_key : key for dictionary direction_type ('specimen_direction_type')
Effects
_______
plots the site level figure
"""
# make the stereonet
plot_net(fignum)
#
# plot on the data
#
dec_key, inc_key, tilt_key = 'dec', 'inc', 'tilt_correction'
if 'dir_dec' in datablock[0].keys(): # this is data model 3.0
dec_key, inc_key, tilt_key = 'dir_dec', 'dir_inc', 'dir_tilt_correction'
coord = datablock[0][tilt_key]
title = s
if coord == '-1':
title = title + ": specimen coordinates"
if coord == '0':
title = title + ": geographic coordinates"
if coord == '100':
title = title + ": tilt corrected coordinates"
DIblock, GCblock = [], []
for plotrec in datablock:
if plotrec[direction_type_key] == 'p': # direction is pole to plane
GCblock.append((float(plotrec[dec_key]), float(plotrec[inc_key])))
else: # assume direction is a directed line
DIblock.append((float(plotrec[dec_key]), float(plotrec[inc_key])))
if len(DIblock) > 0:
plot_di(fignum, DIblock) # plot directed lines
if len(GCblock) > 0:
for pole in GCblock:
plot_circ(fignum, pole, 90., 'g') # plot directed lines
#
# put on the mean direction
#
x, y = [], []
XY = pmag.dimap(float(fpars["dec"]), float(fpars["inc"]))
x.append(XY[0])
y.append(XY[1])
plt.figure(num=fignum)
plt.scatter(x, y, marker='d', s=80, c='g')
plt.title(title)
#
# get the alpha95
#
Xcirc, Ycirc = [], []
Da95, Ia95 = pmag.circ(float(fpars["dec"]), float(
fpars["inc"]), float(fpars["alpha95"]))
for k in range(len(Da95)):
XY = pmag.dimap(Da95[k], Ia95[k])
Xcirc.append(XY[0])
Ycirc.append(XY[1])
plt.plot(Xcirc, Ycirc, 'g') | plots lines and planes on a great circle with alpha 95 and mean
Parameters
_________
fignum : number of plt.figure() object
datablock : nested list of dictionaries with keys in 3.0 or 2.5 format
3.0 keys: dir_dec, dir_inc, dir_tilt_correction = [-1,0,100], direction_type_key =['p','l']
2.5 keys: dec, inc, tilt_correction = [-1,0,100],direction_type_key =['p','l']
fpars : Fisher parameters calculated by, e.g., pmag.dolnp() or pmag.dolnp3_0()
direction_type_key : key for dictionary direction_type ('specimen_direction_type')
Effects
_______
plots the site level figure |
def write(series, output, scale=None):
"""Write a `TimeSeries` to a WAV file
Parameters
----------
series : `TimeSeries`
the series to write
output : `file`, `str`
the file object or filename to write to
scale : `float`, optional
the factor to apply to scale the data to (-1.0, 1.0),
pass `scale=1` to not apply any scale, otherwise
the data will be auto-scaled
See also
--------
scipy.io.wavfile.write
for details on how the WAV file is actually written
Examples
--------
>>> from gwpy.timeseries import TimeSeries
>>> t = TimeSeries([1, 2, 3, 4, 5])
>>> t = TimeSeries.write('test.wav')
"""
fsamp = int(series.sample_rate.decompose().value)
if scale is None:
scale = 1 / numpy.abs(series.value).max()
data = (series.value * scale).astype('float32')
return wavfile.write(output, fsamp, data) | Write a `TimeSeries` to a WAV file
Parameters
----------
series : `TimeSeries`
the series to write
output : `file`, `str`
the file object or filename to write to
scale : `float`, optional
the factor to apply to scale the data to (-1.0, 1.0),
pass `scale=1` to not apply any scale, otherwise
the data will be auto-scaled
See also
--------
scipy.io.wavfile.write
for details on how the WAV file is actually written
Examples
--------
>>> from gwpy.timeseries import TimeSeries
>>> t = TimeSeries([1, 2, 3, 4, 5])
>>> t = TimeSeries.write('test.wav') |
def command(
commands: str or list,
prefix: str or list = "/",
separator: str = " ",
case_sensitive: bool = False
):
"""Filter commands, i.e.: text messages starting with "/" or any other custom prefix.
Args:
commands (``str`` | ``list``):
The command or list of commands as string the filter should look for.
Examples: "start", ["start", "help", "settings"]. When a message text containing
a command arrives, the command itself and its arguments will be stored in the *command*
field of the :class:`Message <pyrogram.Message>`.
prefix (``str`` | ``list``, *optional*):
A prefix or a list of prefixes as string the filter should look for.
Defaults to "/" (slash). Examples: ".", "!", ["/", "!", "."].
Can be None or "" (empty string) to allow commands with no prefix at all.
separator (``str``, *optional*):
The command arguments separator. Defaults to " " (white space).
Examples: /start first second, /start-first-second, /start.first.second.
case_sensitive (``bool``, *optional*):
Pass True if you want your command(s) to be case sensitive. Defaults to False.
Examples: when True, command="Start" would trigger /Start but not /start.
"""
def func(flt, message):
text = message.text or message.caption
if text:
for p in flt.p:
if text.startswith(p):
s = text.split(flt.s)
c, a = s[0][len(p):], s[1:]
c = c if flt.cs else c.lower()
message.command = ([c] + a) if c in flt.c else None
break
return bool(message.command)
commands = commands if type(commands) is list else [commands]
commands = {c if case_sensitive else c.lower() for c in commands}
prefixes = set(prefix) if prefix else {""}
return create("Command", func=func, c=commands, p=prefixes, s=separator, cs=case_sensitive) | Filter commands, i.e.: text messages starting with "/" or any other custom prefix.
Args:
commands (``str`` | ``list``):
The command or list of commands as string the filter should look for.
Examples: "start", ["start", "help", "settings"]. When a message text containing
a command arrives, the command itself and its arguments will be stored in the *command*
field of the :class:`Message <pyrogram.Message>`.
prefix (``str`` | ``list``, *optional*):
A prefix or a list of prefixes as string the filter should look for.
Defaults to "/" (slash). Examples: ".", "!", ["/", "!", "."].
Can be None or "" (empty string) to allow commands with no prefix at all.
separator (``str``, *optional*):
The command arguments separator. Defaults to " " (white space).
Examples: /start first second, /start-first-second, /start.first.second.
case_sensitive (``bool``, *optional*):
Pass True if you want your command(s) to be case sensitive. Defaults to False.
Examples: when True, command="Start" would trigger /Start but not /start. |
def get_objects_for_subject(subject=None,
object_category=None,
relation=None,
**kwargs):
"""
Convenience method: Given a subject (e.g. gene, disease, variant), return all associated objects (phenotypes, functions, interacting genes, etc)
"""
searchresult = search_associations(subject=subject,
fetch_objects=True,
rows=0,
object_category=object_category,
relation=relation,
**kwargs
)
objs = searchresult['objects']
return objs | Convenience method: Given a subject (e.g. gene, disease, variant), return all associated objects (phenotypes, functions, interacting genes, etc) |
def wrap_make_secure_channel(make_secure_channel_func, tracer=None):
"""Wrap the google.cloud._helpers.make_secure_channel."""
def call(*args, **kwargs):
channel = make_secure_channel_func(*args, **kwargs)
try:
host = kwargs.get('host')
tracer_interceptor = OpenCensusClientInterceptor(tracer, host)
intercepted_channel = grpc.intercept_channel(
channel, tracer_interceptor)
return intercepted_channel # pragma: NO COVER
except Exception:
log.warning(
'Failed to wrap secure channel, '
'clientlibs grpc calls not traced.')
return channel
return call | Wrap the google.cloud._helpers.make_secure_channel. |
def sample_categorical(prob, rng):
"""Sample from independent categorical distributions
Each batch is an independent categorical distribution.
Parameters
----------
prob : numpy.ndarray
Probability of the categorical distribution. Shape --> (batch_num, category_num)
rng : numpy.random.RandomState
Returns
-------
ret : numpy.ndarray
Sampling result. Shape --> (batch_num,)
"""
ret = numpy.empty(prob.shape[0], dtype=numpy.float32)
for ind in range(prob.shape[0]):
ret[ind] = numpy.searchsorted(numpy.cumsum(prob[ind]), rng.rand()).clip(min=0.0,
max=prob.shape[
1] - 0.5)
return ret | Sample from independent categorical distributions
Each batch is an independent categorical distribution.
Parameters
----------
prob : numpy.ndarray
Probability of the categorical distribution. Shape --> (batch_num, category_num)
rng : numpy.random.RandomState
Returns
-------
ret : numpy.ndarray
Sampling result. Shape --> (batch_num,) |
def from_zip(cls, src='/tmp/app.zip', dest='/app'):
"""
Unzips a zipped app project file and instantiates it.
:param src: zipfile path
:param dest: destination folder to extract the zipfile content
Returns
A project instance.
"""
try:
zf = zipfile.ZipFile(src, 'r')
except FileNotFoundError:
raise errors.InvalidPathError(src)
except zipfile.BadZipFile:
raise errors.InvalidZipFileError(src)
[zf.extract(file, dest) for file in zf.namelist()]
zf.close()
return cls.from_path(dest) | Unzips a zipped app project file and instantiates it.
:param src: zipfile path
:param dest: destination folder to extract the zipfile content
Returns
A project instance. |
def text_pixels(self, text, clear_screen=True, x=0, y=0, text_color='black', font=None):
"""
Display `text` starting at pixel (x, y).
The EV3 display is 178x128 pixels
- (0, 0) would be the top left corner of the display
- (89, 64) would be right in the middle of the display
'text_color' : PIL says it supports "common HTML color names". There
are 140 HTML color names listed here that are supported by all modern
browsers. This is probably a good list to start with.
https://www.w3schools.com/colors/colors_names.asp
'font' : can be any font displayed here
http://ev3dev-lang.readthedocs.io/projects/python-ev3dev/en/ev3dev-stretch/display.html#bitmap-fonts
- If font is a string, it is the name of a font to be loaded.
- If font is a Font object, returned from :meth:`ev3dev2.fonts.load`, then it is
used directly. This is desirable for faster display times.
"""
if clear_screen:
self.clear()
if font is not None:
if isinstance(font, str):
assert font in fonts.available(), "%s is an invalid font" % font
font = fonts.load(font)
return self.draw.text((x, y), text, fill=text_color, font=font)
else:
return self.draw.text((x, y), text, fill=text_color) | Display `text` starting at pixel (x, y).
The EV3 display is 178x128 pixels
- (0, 0) would be the top left corner of the display
- (89, 64) would be right in the middle of the display
'text_color' : PIL says it supports "common HTML color names". There
are 140 HTML color names listed here that are supported by all modern
browsers. This is probably a good list to start with.
https://www.w3schools.com/colors/colors_names.asp
'font' : can be any font displayed here
http://ev3dev-lang.readthedocs.io/projects/python-ev3dev/en/ev3dev-stretch/display.html#bitmap-fonts
- If font is a string, it is the name of a font to be loaded.
- If font is a Font object, returned from :meth:`ev3dev2.fonts.load`, then it is
used directly. This is desirable for faster display times. |
def parse(
args: typing.List[str] = None,
arg_parser: ArgumentParser = None
) -> dict:
"""Parses the arguments for the cauldron server"""
parser = arg_parser or create_parser()
return vars(parser.parse_args(args)) | Parses the arguments for the cauldron server |
def authenticate(user=None): # noqa: E501
"""Authenticate
Authenticate with the API # noqa: E501
:param user: The user authentication object.
:type user: dict | bytes
:rtype: UserAuth
"""
if connexion.request.is_json:
user = UserAuth.from_dict(connexion.request.get_json()) # noqa: E501
credentials = mapUserAuthToCredentials(user)
auth = ApitaxAuthentication.login(credentials)
if(not auth):
return ErrorResponse(status=401, message="Invalid credentials")
access_token = create_access_token(identity={'username': user.username, 'role': auth['role']})
refresh_token = create_refresh_token(identity={'username': user.username, 'role': auth['role']})
return AuthResponse(status=201, message='User ' + user.username + ' was authenticated as ' + auth['role'], access_token=access_token, refresh_token=refresh_token, auth=UserAuth(username=auth['credentials'].username, api_token=auth['credentials'].token)) | Authenticate
Authenticate with the API # noqa: E501
:param user: The user authentication object.
:type user: dict | bytes
:rtype: UserAuth |
def remove_security_group(self, name):
"""
Remove a security group from container
"""
for group in self.security_groups:
if group.isc_name == name:
group.delete() | Remove a security group from container |
def pformat_dict_summary_html(dict):
"""
Briefly print the dictionary keys.
"""
if not dict:
return ' {}'
html = []
for key, value in sorted(six.iteritems(dict)):
if not isinstance(value, DICT_EXPANDED_TYPES):
value = '...'
html.append(_format_dict_item(key, value))
return mark_safe(u'<br/>'.join(html)) | Briefly print the dictionary keys. |
def main():
'''
entry point of the application.
Parses the CLI commands and runs the actions.
'''
args = CLI.parse_args(__doc__)
if args['--verbose']:
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
logging.basicConfig(level=logging.DEBUG)
if not args['-k']:
print("No API key given. Please create an API key on <https://octopart.com/api/dashboard>")
return ReturnValues.NO_APIKEY
if args['-t'] == 'octopart':
engine = PyPartsOctopart(args['-k'], verbose=args['--verbose'])
elif args['-t'] == 'parts.io':
engine = PyPartsPartsIO(args['-k'], verbose=args['--verbose'])
else:
engine = PyPartsBase(args['-k'], verbose=args['--verbose'])
try:
if 'lookup' in args or 'search' in args:
return engine.part_search(args['<part>'])
elif 'specs' in args:
return engine.part_specs(args['<part>'])
elif 'datasheet' in args:
if args['<action>'] == 'open':
if args['--output']:
return engine.part_datasheet(args['<part>'], command=args['--command'], path=args['--output'])
else:
return engine.part_datasheet(args['<part>'], command=args['--command'])
elif args['<action>'] == 'save':
return engine.part_datasheet(args['<part>'], path=args['--output'])
elif 'show' in args:
return engine.part_show(args['<part>'], printout=args['--print'])
except OctopartException as err:
print(err)
return ReturnValues.RUNTIME_ERROR | entry point of the application.
Parses the CLI commands and runs the actions. |
def prob_t_profiles(self, profile_pair, multiplicity, t,
return_log=False, ignore_gaps=True):
'''
Calculate the probability of observing a node pair at a distance t
Parameters
----------
profile_pair: numpy arrays
Probability distributions of the nucleotides at either
end of the branch. pp[0] = parent, pp[1] = child
multiplicity : numpy array
The number of times an alignment pattern is observed
t : float
Length of the branch separating parent and child
ignore_gaps: bool
If True, ignore mutations to and from gaps in distance calculations
return_log : bool
Whether or not to exponentiate the result
'''
if t<0:
logP = -ttconf.BIG_NUMBER
else:
Qt = self.expQt(t)
if len(Qt.shape)==3:
res = np.einsum('ai,ija,aj->a', profile_pair[1], Qt, profile_pair[0])
else:
res = np.einsum('ai,ij,aj->a', profile_pair[1], Qt, profile_pair[0])
if ignore_gaps and (self.gap_index is not None): # calculate the probability that neither outgroup/node has a gap
non_gap_frac = (1-profile_pair[0][:,self.gap_index])*(1-profile_pair[1][:,self.gap_index])
# weigh log LH by the non-gap probability
logP = np.sum(multiplicity*np.log(res)*non_gap_frac)
else:
logP = np.sum(multiplicity*np.log(res))
return logP if return_log else np.exp(logP) | Calculate the probability of observing a node pair at a distance t
Parameters
----------
profile_pair: numpy arrays
Probability distributions of the nucleotides at either
end of the branch. pp[0] = parent, pp[1] = child
multiplicity : numpy array
The number of times an alignment pattern is observed
t : float
Length of the branch separating parent and child
ignore_gaps: bool
If True, ignore mutations to and from gaps in distance calculations
return_log : bool
Whether or not to exponentiate the result |
def normalize_lcdict_byinst(
lcdict,
magcols='all',
normto='sdssr',
normkeylist=('stf','ccd','flt','fld','prj','exp'),
debugmode=False,
quiet=False
):
'''This is a function to normalize light curves across all instrument
combinations present.
Use this to normalize a light curve containing a variety of:
- HAT station IDs ('stf')
- camera IDs ('ccd')
- filters ('flt')
- observed field names ('fld')
- HAT project IDs ('prj')
- exposure times ('exp')
Parameters
----------
lcdict : dict
The input lcdict to process.
magcols : 'all' or list of str
If this is 'all', all of the columns in the lcdict that are indicated to
be magnitude measurement columns are normalized. If this is a list of
str, must contain the keys of the lcdict specifying which magnitude
columns will be normalized.
normto : {'zero', 'jmag', 'hmag', 'kmag', 'bmag', 'vmag', 'sdssg', 'sdssr', 'sdssi'}
This indicates which column will be the normalization target. If this is
'zero', will normalize to 0.0 for each LC column. Otherwise, will
normalize to the value of one of the other keys in the
lcdict['objectinfo'][magkey], meaning the normalization will be to some
form of catalog magnitude.
normkeylist : list of str
These are the column keys to use to form the normalization
index. Measurements in the specified `magcols` with identical
normalization index values will be considered as part of a single
measurement 'era', and will be normalized to zero. Once all eras have
been normalized this way, the final light curve will be re-normalized as
specified in `normto`.
debugmode : bool
If True, will indicate progress as time-groups are found and processed.
quiet : bool
If True, will not emit any messages when processing.
Returns
-------
dict
Returns the lcdict with the magnitude measurements normalized as
specified. The normalization happens IN PLACE.
'''
# check if this lc has been normalized already. return as-is if so
if 'lcinstnormcols' in lcdict and len(lcdict['lcinstnormcols']) > 0:
if not quiet:
LOGWARNING('this lightcurve is already '
'normalized by instrument keys, '
'returning...')
return lcdict
# generate the normalization key
normkeycols = []
availablenormkeys = []
for key in normkeylist:
if key in lcdict and lcdict[key] is not None:
normkeycols.append(lcdict[key])
availablenormkeys.append(key)
# transpose to turn these into rows
normkeycols = list(zip(*normkeycols))
# convert to a string rep for each key and post-process for simplicity
allkeys = [repr(x) for x in normkeycols]
allkeys = [a.replace('(','').replace(')','').replace("'",'').replace(' ','')
for a in allkeys]
# turn these into a numpy array and get the unique values
allkeys = np.array(allkeys)
normkeys = np.unique(allkeys)
# figure out the apertures
# HATLC V2 format
if 'lcapertures' in lcdict:
apertures = sorted(lcdict['lcapertures'].keys())
# LCC-CSV-V1 format HATLC
elif 'objectinfo' in lcdict and 'lcapertures' in lcdict['objectinfo']:
apertures = sorted(lcdict['objectinfo']['lcapertures'].keys())
# put together the column names
aimcols = [('aim_%s' % x) for x in apertures if ('aim_%s' % x) in lcdict]
armcols = [('arm_%s' % x) for x in apertures if ('arm_%s' % x) in lcdict]
aepcols = [('aep_%s' % x)for x in apertures if ('aep_%s' % x) in lcdict]
atfcols = [('atf_%s' % x) for x in apertures if ('atf_%s' % x) in lcdict]
psimcols = [x for x in ['psim','psrm','psep','pstf'] if x in lcdict]
irmcols = [('irm_%s' % x) for x in apertures if ('irm_%s' % x) in lcdict]
iepcols = [('iep_%s' % x) for x in apertures if ('iep_%s' % x) in lcdict]
itfcols = [('itf_%s' % x) for x in apertures if ('itf_%s' % x) in lcdict]
# next, find all the mag columns to normalize
if magcols == 'all':
cols_to_normalize = (aimcols + armcols + aepcols + atfcols +
psimcols + irmcols + iepcols + itfcols)
elif magcols == 'redmags':
cols_to_normalize = (irmcols + (['psrm'] if 'psrm' in lcdict else []) +
irmcols)
elif magcols == 'epdmags':
cols_to_normalize = (aepcols + (['psep'] if 'psep' in lcdict else []) +
iepcols)
elif magcols == 'tfamags':
cols_to_normalize = (atfcols + (['pstf'] if 'pstf' in lcdict else []) +
itfcols)
elif magcols == 'epdtfa':
cols_to_normalize = (aepcols + (['psep'] if 'psep' in lcdict else []) +
iepcols + atfcols +
(['pstf'] if 'pstf' in lcdict else []) +
itfcols)
else:
cols_to_normalize = magcols.split(',')
cols_to_normalize = [x.strip() for x in cols_to_normalize]
colsnormalized = []
# go through each column and normalize them
for col in cols_to_normalize:
if col in lcdict:
# note: this requires the columns in ndarray format
# unlike normalize_lcdict
thismags = lcdict[col]
# go through each key in normusing
for nkey in normkeys:
thisind = allkeys == nkey
# make sure we have at least 3 elements in the matched set of
# magnitudes corresponding to this key. also make sure that the
# magnitudes corresponding to this key aren't all nan.
thismagsize = thismags[thisind].size
thismagfinite = np.where(np.isfinite(thismags[thisind]))[0].size
if thismagsize > 2 and thismagfinite > 2:
# do the normalization and update the thismags in the lcdict
medmag = np.nanmedian(thismags[thisind])
lcdict[col][thisind] = lcdict[col][thisind] - medmag
if debugmode:
LOGDEBUG('magcol: %s, currkey: "%s", nelem: %s, '
'medmag: %s' %
(col, nkey, len(thismags[thisind]), medmag))
# we remove mags that correspond to keys with less than 3
# (finite) elements because we can't get the median mag
# correctly and renormalizing them to zero would just set them
# to zero
else:
lcdict[col][thisind] = np.nan
# everything should now be normalized to zero
# add back the requested normto
if normto in ('jmag', 'hmag', 'kmag',
'bmag', 'vmag',
'sdssg', 'sdssr', 'sdssi'):
if (normto in lcdict['objectinfo'] and
lcdict['objectinfo'][normto] is not None):
lcdict[col] = lcdict[col] + lcdict['objectinfo'][normto]
else:
if not quiet:
LOGWARNING('no %s available in lcdict, '
'normalizing to 0.0' % normto)
normto = 'zero'
# update the colsnormalized list
colsnormalized.append(col)
else:
if not quiet:
LOGWARNING('column %s is not present, skipping...' % col)
continue
# add the lcnormcols key to the lcdict
lcinstnormcols = ('cols normalized: %s - '
'normalized to: %s - '
'norm keys used: %s') % (repr(colsnormalized),
normto,
repr(availablenormkeys))
lcdict['lcinstnormcols'] = lcinstnormcols
return lcdict | This is a function to normalize light curves across all instrument
combinations present.
Use this to normalize a light curve containing a variety of:
- HAT station IDs ('stf')
- camera IDs ('ccd')
- filters ('flt')
- observed field names ('fld')
- HAT project IDs ('prj')
- exposure times ('exp')
Parameters
----------
lcdict : dict
The input lcdict to process.
magcols : 'all' or list of str
If this is 'all', all of the columns in the lcdict that are indicated to
be magnitude measurement columns are normalized. If this is a list of
str, must contain the keys of the lcdict specifying which magnitude
columns will be normalized.
normto : {'zero', 'jmag', 'hmag', 'kmag', 'bmag', 'vmag', 'sdssg', 'sdssr', 'sdssi'}
This indicates which column will be the normalization target. If this is
'zero', will normalize to 0.0 for each LC column. Otherwise, will
normalize to the value of one of the other keys in the
lcdict['objectinfo'][magkey], meaning the normalization will be to some
form of catalog magnitude.
normkeylist : list of str
These are the column keys to use to form the normalization
index. Measurements in the specified `magcols` with identical
normalization index values will be considered as part of a single
measurement 'era', and will be normalized to zero. Once all eras have
been normalized this way, the final light curve will be re-normalized as
specified in `normto`.
debugmode : bool
If True, will indicate progress as time-groups are found and processed.
quiet : bool
If True, will not emit any messages when processing.
Returns
-------
dict
Returns the lcdict with the magnitude measurements normalized as
specified. The normalization happens IN PLACE. |
def get_event_timelines(self, event_ids, session=None, lightweight=None):
"""
Returns a list of event timelines based on event id's
supplied.
:param list event_ids: List of event id's to return
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.EventTimeline]
"""
url = '%s%s' % (self.url, 'eventTimelines')
params = {
'eventIds': ','.join(str(x) for x in event_ids),
'alt': 'json',
'regionCode': 'UK',
'locale': 'en_GB'
}
(response, elapsed_time) = self.request(params=params, session=session, url=url)
return self.process_response(response, resources.EventTimeline, elapsed_time, lightweight) | Returns a list of event timelines based on event id's
supplied.
:param list event_ids: List of event id's to return
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.EventTimeline] |
def make_if_statement(instr, queue, stack, context):
"""
Make an ast.If block from a POP_JUMP_IF_TRUE or POP_JUMP_IF_FALSE.
"""
test_expr = make_expr(stack)
if isinstance(instr, instrs.POP_JUMP_IF_TRUE):
test_expr = ast.UnaryOp(op=ast.Not(), operand=test_expr)
first_block = popwhile(op.is_not(instr.arg), queue, side='left')
if isinstance(first_block[-1], instrs.RETURN_VALUE):
body = instrs_to_body(first_block, context)
return ast.If(test=test_expr, body=body, orelse=[])
jump_to_end = expect(
first_block.pop(), instrs.JUMP_FORWARD, "at end of if-block"
)
body = instrs_to_body(first_block, context)
# First instruction after the whole if-block.
end = jump_to_end.arg
if instr.arg is jump_to_end.arg:
orelse = []
else:
orelse = instrs_to_body(
popwhile(op.is_not(end), queue, side='left'),
context,
)
return ast.If(test=test_expr, body=body, orelse=orelse) | Make an ast.If block from a POP_JUMP_IF_TRUE or POP_JUMP_IF_FALSE. |
def write_file(filename, contents):
"""Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it.
"""
contents = "\n".join(contents)
# assuming the contents has been vetted for utf-8 encoding
contents = contents.encode("utf-8")
with open(filename, "wb") as f: # always write POSIX-style manifest
f.write(contents) | Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it. |
def provides(self):
"""
A set of distribution names and versions provided by this distribution.
:return: A set of "name (version)" strings.
"""
plist = self.metadata.provides
s = '%s (%s)' % (self.name, self.version)
if s not in plist:
plist.append(s)
return plist | A set of distribution names and versions provided by this distribution.
:return: A set of "name (version)" strings. |
def page_models(self, constructor, paging, constraints=None, *, columns=None, order_by=None):
"""Specialization of DataAccess.page that returns models instead of cursor objects."""
records, count = self.page(constructor.table_name, paging, constraints, columns=columns,
order_by=order_by)
return ([constructor(r) for r in records], count) | Specialization of DataAccess.page that returns models instead of cursor objects. |
def insert(self, packet, **kwargs):
''' Insert a packet into the database
Arguments
packet
The :class:`ait.core.tlm.Packet` instance to insert into
the database
'''
values = [ ]
pd = packet._defn
for defn in pd.fields:
val = getattr(packet.raw, defn.name)
if val is None and defn.name in pd.history:
val = getattr(packet.history, defn.name)
values.append(val)
qmark = ['?'] * len(values)
sql = 'INSERT INTO %s VALUES (%s)' % (pd.name, ', '.join(qmark))
self._conn.execute(sql, values) | Insert a packet into the database
Arguments
packet
The :class:`ait.core.tlm.Packet` instance to insert into
the database |
def _get_db_version(self):
""" Get the schema version of the nipap psql db.
"""
dbname = self._cfg.get('nipapd', 'db_name')
self._execute("SELECT description FROM pg_shdescription JOIN pg_database ON objoid = pg_database.oid WHERE datname = '%s'" % dbname)
comment = self._curs_pg.fetchone()
if comment is None:
raise NipapDatabaseNoVersionError("Could not find comment of psql database %s" % dbname)
db_version = None
m = re.match('NIPAP database - schema version: ([0-9]+)', comment[0])
if m:
db_version = int(m.group(1))
else:
raise NipapError("Could not match schema version database comment")
return db_version | Get the schema version of the nipap psql db. |
def slot_availability_array(events, slots):
"""
Return a numpy array mapping events to slots
- Rows corresponds to events
- Columns correspond to stags
Array has value 0 if event cannot be scheduled in a given slot
(1 otherwise)
"""
array = np.ones((len(events), len(slots)))
for row, event in enumerate(events):
for col, slot in enumerate(slots):
if slot in event.unavailability or event.duration > slot.duration:
array[row, col] = 0
return array | Return a numpy array mapping events to slots
- Rows corresponds to events
- Columns correspond to stags
Array has value 0 if event cannot be scheduled in a given slot
(1 otherwise) |
def build_text_part(name, thread, struct):
"""
create an urwid.Text widget (wrapped in approproate Attributes)
to display a plain text parts in a threadline.
create an urwid.Columns widget (wrapped in approproate Attributes)
to display a list of tag strings, as part of a threadline.
:param name: id of part to build
:type name: str
:param thread: the thread to get local info for
:type thread: :class:`alot.db.thread.Thread`
:param struct: theming attributes for this part, as provided by
:class:`alot.settings.theme.Theme.get_threadline_theming`
:type struct: dict
:return: overall width (in characters) and a widget.
:rtype: tuple[int, AttrFliwWidget]
"""
part_w = None
width = None
# extract min and max allowed width from theme
minw = 0
maxw = None
width_tuple = struct['width']
if width_tuple is not None:
if width_tuple[0] == 'fit':
minw, maxw = width_tuple[1:]
content = prepare_string(name, thread, maxw)
# pad content if not long enough
if minw:
alignment = struct['alignment']
if alignment == 'left':
content = content.ljust(minw)
elif alignment == 'center':
content = content.center(minw)
else:
content = content.rjust(minw)
# define width and part_w
text = urwid.Text(content, wrap='clip')
width = text.pack()[0]
part_w = AttrFlipWidget(text, struct)
return width, part_w | create an urwid.Text widget (wrapped in approproate Attributes)
to display a plain text parts in a threadline.
create an urwid.Columns widget (wrapped in approproate Attributes)
to display a list of tag strings, as part of a threadline.
:param name: id of part to build
:type name: str
:param thread: the thread to get local info for
:type thread: :class:`alot.db.thread.Thread`
:param struct: theming attributes for this part, as provided by
:class:`alot.settings.theme.Theme.get_threadline_theming`
:type struct: dict
:return: overall width (in characters) and a widget.
:rtype: tuple[int, AttrFliwWidget] |
def mul(mean1, var1, mean2, var2):
"""
Multiply Gaussian (mean1, var1) with (mean2, var2) and return the
results as a tuple (mean, var).
Strictly speaking the product of two Gaussian PDFs is a Gaussian
function, not Gaussian PDF. It is, however, proportional to a Gaussian
PDF, so it is safe to treat the output as a PDF for any filter using
Bayes equation, which normalizes the result anyway.
Parameters
----------
mean1 : scalar
mean of first Gaussian
var1 : scalar
variance of first Gaussian
mean2 : scalar
mean of second Gaussian
var2 : scalar
variance of second Gaussian
Returns
-------
mean : scalar
mean of product
var : scalar
variance of product
Examples
--------
>>> mul(1, 2, 3, 4)
(1.6666666666666667, 1.3333333333333333)
References
----------
Bromily. "Products and Convolutions of Gaussian Probability Functions",
Tina Memo No. 2003-003.
http://www.tina-vision.net/docs/memos/2003-003.pdf
"""
mean = (var1*mean2 + var2*mean1) / (var1 + var2)
var = 1 / (1/var1 + 1/var2)
return (mean, var) | Multiply Gaussian (mean1, var1) with (mean2, var2) and return the
results as a tuple (mean, var).
Strictly speaking the product of two Gaussian PDFs is a Gaussian
function, not Gaussian PDF. It is, however, proportional to a Gaussian
PDF, so it is safe to treat the output as a PDF for any filter using
Bayes equation, which normalizes the result anyway.
Parameters
----------
mean1 : scalar
mean of first Gaussian
var1 : scalar
variance of first Gaussian
mean2 : scalar
mean of second Gaussian
var2 : scalar
variance of second Gaussian
Returns
-------
mean : scalar
mean of product
var : scalar
variance of product
Examples
--------
>>> mul(1, 2, 3, 4)
(1.6666666666666667, 1.3333333333333333)
References
----------
Bromily. "Products and Convolutions of Gaussian Probability Functions",
Tina Memo No. 2003-003.
http://www.tina-vision.net/docs/memos/2003-003.pdf |
def EnumerateFilesystemsFromClient(args):
"""List all local filesystems mounted on this system."""
del args # Unused.
for drive in win32api.GetLogicalDriveStrings().split("\x00"):
if not drive:
continue
try:
volume = win32file.GetVolumeNameForVolumeMountPoint(drive).rstrip("\\")
label, _, _, _, fs_type = win32api.GetVolumeInformation(drive)
except win32api.error:
continue
yield rdf_client_fs.Filesystem(
device=volume,
mount_point="/%s:/" % drive[0],
type=fs_type,
label=UnicodeFromCodePage(label)) | List all local filesystems mounted on this system. |
def DecodeValueFromAttribute(self, attribute_name, value, ts):
"""Given a serialized value, decode the attribute.
Only attributes which have been previously defined are permitted.
Args:
attribute_name: The string name of the attribute.
value: The serialized attribute value.
ts: The timestamp of this attribute.
"""
try:
# Get the Attribute object from our schema.
attribute = Attribute.PREDICATES[attribute_name]
cls = attribute.attribute_type
self._AddAttributeToCache(attribute, LazyDecoder(cls, value, ts),
self.synced_attributes)
except KeyError:
pass
except (ValueError, rdfvalue.DecodeError):
logging.debug("%s: %s invalid encoding. Skipping.", self.urn,
attribute_name) | Given a serialized value, decode the attribute.
Only attributes which have been previously defined are permitted.
Args:
attribute_name: The string name of the attribute.
value: The serialized attribute value.
ts: The timestamp of this attribute. |
def fill_tree_from_xml(tag, ar_tree, namespace):
# type: (_Element, ArTree, str) -> None
"""Parse the xml tree into ArTree objects."""
for child in tag: # type: _Element
name_elem = child.find('./' + namespace + 'SHORT-NAME')
# long_name = child.find('./' + namespace + 'LONG-NAME')
if name_elem is not None and child is not None:
fill_tree_from_xml(child, ar_tree.append_child(name_elem.text, child), namespace)
if name_elem is None and child is not None:
fill_tree_from_xml(child, ar_tree, namespace) | Parse the xml tree into ArTree objects. |
def tournament_selection(random, population, args):
"""Return a tournament sampling of individuals from the population.
This function selects ``num_selected`` individuals from the population.
It selects each one by using random sampling without replacement
to pull ``tournament_size`` individuals and adds the best of the
tournament as its selection. If ``tournament_size`` is greater than
the population size, the population size is used instead as the size
of the tournament.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *num_selected* -- the number of individuals to be selected (default 1)
- *tournament_size* -- the tournament size (default 2)
"""
num_selected = args.setdefault('num_selected', 1)
tournament_size = args.setdefault('tournament_size', 2)
if tournament_size > len(population):
tournament_size = len(population)
selected = []
for _ in range(num_selected):
tourn = random.sample(population, tournament_size)
selected.append(max(tourn))
return selected | Return a tournament sampling of individuals from the population.
This function selects ``num_selected`` individuals from the population.
It selects each one by using random sampling without replacement
to pull ``tournament_size`` individuals and adds the best of the
tournament as its selection. If ``tournament_size`` is greater than
the population size, the population size is used instead as the size
of the tournament.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *num_selected* -- the number of individuals to be selected (default 1)
- *tournament_size* -- the tournament size (default 2) |
def delete_service(self, service_name, params=None):
"""
Delete the service of the given name. It may fail if there are
any service keys or app bindings. Use purge() if you want
to delete it all.
"""
if not self.space.has_service_with_name(service_name):
logging.warning("Service not found so... succeeded?")
return True
guid = self.get_instance_guid(service_name)
logging.info("Deleting service %s with guid %s" % (service_name, guid))
# MAINT: this endpoint changes in newer version of api
return self.api.delete("/v2/service_instances/%s?accepts_incomplete=true" %
(guid), params=params) | Delete the service of the given name. It may fail if there are
any service keys or app bindings. Use purge() if you want
to delete it all. |
def suggest_next_locations(self, context = None, pending_X = None, ignored_X = None):
"""
Run a single optimization step and return the next locations to evaluate the objective.
Number of suggested locations equals to batch_size.
:param context: fixes specified variables to a particular context (values) for the optimization run (default, None).
:param pending_X: matrix of input configurations that are in a pending state (i.e., do not have an evaluation yet) (default, None).
:param ignored_X: matrix of input configurations that the user black-lists, i.e., those configurations will not be suggested again (default, None).
"""
self.model_parameters_iterations = None
self.num_acquisitions = 0
self.context = context
self._update_model(self.normalization_type)
suggested_locations = self._compute_next_evaluations(pending_zipped_X = pending_X, ignored_zipped_X = ignored_X)
return suggested_locations | Run a single optimization step and return the next locations to evaluate the objective.
Number of suggested locations equals to batch_size.
:param context: fixes specified variables to a particular context (values) for the optimization run (default, None).
:param pending_X: matrix of input configurations that are in a pending state (i.e., do not have an evaluation yet) (default, None).
:param ignored_X: matrix of input configurations that the user black-lists, i.e., those configurations will not be suggested again (default, None). |
def sojourn_time(p):
"""
Calculate sojourn time based on a given transition probability matrix.
Parameters
----------
p : array
(k, k), a Markov transition probability matrix.
Returns
-------
: array
(k, ), sojourn times. Each element is the expected time a Markov
chain spends in each states before leaving that state.
Notes
-----
Refer to :cite:`Ibe2009` for more details on sojourn times for Markov
chains.
Examples
--------
>>> from giddy.markov import sojourn_time
>>> import numpy as np
>>> p = np.array([[.5, .25, .25], [.5, 0, .5], [.25, .25, .5]])
>>> sojourn_time(p)
array([2., 1., 2.])
"""
p = np.asarray(p)
pii = p.diagonal()
if not (1 - pii).all():
print("Sojourn times are infinite for absorbing states!")
return 1 / (1 - pii) | Calculate sojourn time based on a given transition probability matrix.
Parameters
----------
p : array
(k, k), a Markov transition probability matrix.
Returns
-------
: array
(k, ), sojourn times. Each element is the expected time a Markov
chain spends in each states before leaving that state.
Notes
-----
Refer to :cite:`Ibe2009` for more details on sojourn times for Markov
chains.
Examples
--------
>>> from giddy.markov import sojourn_time
>>> import numpy as np
>>> p = np.array([[.5, .25, .25], [.5, 0, .5], [.25, .25, .5]])
>>> sojourn_time(p)
array([2., 1., 2.]) |
def set_breakpoint(self, file_name, line_number, condition=None, enabled=True):
""" Create a breakpoint, register it in the class's lists and returns
a tuple of (error_message, break_number)
"""
c_file_name = self.canonic(file_name)
import linecache
line = linecache.getline(c_file_name, line_number)
if not line:
return "Line %s:%d does not exist." % (c_file_name, line_number), None
bp = IKBreakpoint(c_file_name, line_number, condition, enabled)
if self.pending_stop or IKBreakpoint.any_active_breakpoint:
self.enable_tracing()
else:
self.disable_tracing()
return None, bp.number | Create a breakpoint, register it in the class's lists and returns
a tuple of (error_message, break_number) |
def setup(self, app):
'''
Setup properties from parent app on the command
'''
self.logger = app.logger
self.shell.logger = self.logger
if not self.command_name:
raise EmptyCommandNameException()
self.app = app
self.arguments_declaration = self.arguments
self.arguments = app.arguments
if self.use_subconfig:
_init_config(self)
else:
self.config = self.app.config | Setup properties from parent app on the command |
def pre_init(self, value, obj):
"""Convert a string value to JSON only if it needs to be deserialized.
SubfieldBase metaclass has been modified to call this method instead of
to_python so that we can check the obj state and determine if it needs to be
deserialized"""
try:
if obj._state.adding:
# Make sure the primary key actually exists on the object before
# checking if it's empty. This is a special case for South datamigrations
# see: https://github.com/bradjasper/django-jsonfield/issues/52
if getattr(obj, "pk", None) is not None:
if isinstance(value, six.string_types):
try:
return json.loads(value, **self.load_kwargs)
except ValueError:
raise ValidationError(_("Enter valid JSON"))
except AttributeError:
# south fake meta class doesn't create proper attributes
# see this:
# https://github.com/bradjasper/django-jsonfield/issues/52
pass
return value | Convert a string value to JSON only if it needs to be deserialized.
SubfieldBase metaclass has been modified to call this method instead of
to_python so that we can check the obj state and determine if it needs to be
deserialized |
def bounds(self):
"""Finds min/max for bounds across blocks
Returns:
tuple(float):
length 6 tuple of floats containing min/max along each axis
"""
bounds = [np.inf,-np.inf, np.inf,-np.inf, np.inf,-np.inf]
def update_bounds(ax, nb, bounds):
"""internal helper to update bounds while keeping track"""
if nb[2*ax] < bounds[2*ax]:
bounds[2*ax] = nb[2*ax]
if nb[2*ax+1] > bounds[2*ax+1]:
bounds[2*ax+1] = nb[2*ax+1]
return bounds
# get bounds for each block and update
for i in range(self.n_blocks):
try:
bnds = self[i].GetBounds()
for a in range(3):
bounds = update_bounds(a, bnds, bounds)
except AttributeError:
# Data object doesn't have bounds or is None
pass
return bounds | Finds min/max for bounds across blocks
Returns:
tuple(float):
length 6 tuple of floats containing min/max along each axis |
def prior_from_config(cp, variable_params, prior_section,
constraint_section):
"""Gets arguments and keyword arguments from a config file.
Parameters
----------
cp : WorkflowConfigParser
Config file parser to read.
variable_params : list
List of of model parameter names.
prior_section : str
Section to read prior(s) from.
constraint_section : str
Section to read constraint(s) from.
Returns
-------
pycbc.distributions.JointDistribution
The prior.
"""
# get prior distribution for each variable parameter
logging.info("Setting up priors for each parameter")
dists = distributions.read_distributions_from_config(cp, prior_section)
constraints = distributions.read_constraints_from_config(
cp, constraint_section)
return distributions.JointDistribution(variable_params, *dists,
constraints=constraints) | Gets arguments and keyword arguments from a config file.
Parameters
----------
cp : WorkflowConfigParser
Config file parser to read.
variable_params : list
List of of model parameter names.
prior_section : str
Section to read prior(s) from.
constraint_section : str
Section to read constraint(s) from.
Returns
-------
pycbc.distributions.JointDistribution
The prior. |
def WriteSerialized(cls, attribute_container):
"""Writes an attribute container to serialized form.
Args:
attribute_container (AttributeContainer): attribute container.
Returns:
str: A JSON string containing the serialized form.
"""
json_dict = cls.WriteSerializedDict(attribute_container)
return json.dumps(json_dict) | Writes an attribute container to serialized form.
Args:
attribute_container (AttributeContainer): attribute container.
Returns:
str: A JSON string containing the serialized form. |
def packageipa(env, console):
"""
Package the built app as an ipa for distribution in iOS App Store
"""
ipa_path, app_path = _get_ipa(env)
output_dir = path.dirname(ipa_path)
if path.exists(ipa_path):
console.quiet('Removing %s' % ipa_path)
os.remove(ipa_path)
zf = zipfile.ZipFile(ipa_path, mode='w')
payload_dir = 'Payload'
for (dirpath, dirnames, filenames) in os.walk(app_path):
for filename in filenames:
filepath = path.join(dirpath, filename)
prefix = path.commonprefix([filepath, path.dirname(app_path)])
write_path = path.join(payload_dir, filepath[len(prefix) + 1:])
console.quiet('Write %s' % write_path)
zf.write(filepath, write_path)
zf.close()
console.quiet('Packaged %s' % ipa_path) | Package the built app as an ipa for distribution in iOS App Store |
def _default_output_dir():
"""Default output directory."""
try:
dataset_name = gin.query_parameter("inputs.dataset_name")
except ValueError:
dataset_name = "random"
dir_name = "{model_name}_{dataset_name}_{timestamp}".format(
model_name=gin.query_parameter("train.model").configurable.name,
dataset_name=dataset_name,
timestamp=datetime.datetime.now().strftime("%Y%m%d_%H%M"),
)
dir_path = os.path.join("~", "trax", dir_name)
print()
trax.log("No --output_dir specified")
return dir_path | Default output directory. |
def thaicheck(word: str) -> bool:
"""
Check if a word is an "authentic Thai word"
:param str word: word
:return: True or False
"""
pattern = re.compile(r"[ก-ฬฮ]", re.U) # สำหรับตรวจสอบพยัญชนะ
res = re.findall(pattern, word) # ดึงพยัญชนะทัั้งหมดออกมา
if res == []:
return False
if _check1(res[len(res) - 1]) or len(res) == 1:
if _check2(word):
word2 = list(word)
i = 0
thai = True
if word in [
"ฆ่า",
"เฆี่ยน",
"ศึก",
"ศอก",
"เศิก",
"เศร้า",
"ธ",
"ณ",
"ฯพณฯ",
"ใหญ่",
"หญ้า",
"ควาย",
"ความ",
"กริ่งเกรง",
"ผลิ",
]: # ข้อยกเว้น คำเหล่านี้เป็นคำไทยแท้
return True
while i < len(word2) and thai:
thai = _check3(word2[i])
if not thai:
return False
i += 1
return True
return False
if word in ["กะ", "กระ", "ปะ", "ประ"]:
return True
return False | Check if a word is an "authentic Thai word"
:param str word: word
:return: True or False |
def _do_connect(self):
""" Connect to the remote. """
self.load_system_host_keys()
if self.username is None or self.port is None:
self._configure()
try:
self.connect(hostname=self.hostname,
port=self.port,
username=self.username,
key_filename=self.key_filename,
sock=self.proxy)
except socket.error as e:
raise GerritError("Failed to connect to server: %s" % e)
try:
version_string = self._transport.remote_version
pattern = re.compile(r'^.*GerritCodeReview_([a-z0-9-\.]*) .*$')
self.remote_version = _extract_version(version_string, pattern)
except AttributeError:
self.remote_version = None | Connect to the remote. |
def _unsign_data(self, data, options):
'''Verify and remove signature'''
if options['signature_algorithm_id'] not in self.signature_algorithms:
raise Exception('Unknown signature algorithm id: %d'
% options['signature_algorithm_id'])
signature_algorithm = \
self.signature_algorithms[options['signature_algorithm_id']]
algorithm = self._get_algorithm_info(signature_algorithm)
key_salt = ''
if algorithm['salt_size']:
key_salt = data[-algorithm['salt_size']:]
data = data[:-algorithm['salt_size']]
key = self._generate_key(options['signature_passphrase_id'],
self.signature_passphrases, key_salt, algorithm)
data = self._decode(data, algorithm, key)
return data | Verify and remove signature |
def node_list_to_coordinate_lines(G, node_list, use_geom=True):
"""
Given a list of nodes, return a list of lines that together follow the path
defined by the list of nodes.
Parameters
----------
G : networkx multidigraph
route : list
the route as a list of nodes
use_geom : bool
if True, use the spatial geometry attribute of the edges to draw
geographically accurate edges, rather than just lines straight from node
to node
Returns
-------
lines : list of lines given as pairs ( (x_start, y_start), (x_stop, y_stop) )
"""
edge_nodes = list(zip(node_list[:-1], node_list[1:]))
lines = []
for u, v in edge_nodes:
# if there are parallel edges, select the shortest in length
data = min(G.get_edge_data(u, v).values(), key=lambda x: x['length'])
# if it has a geometry attribute (ie, a list of line segments)
if 'geometry' in data and use_geom:
# add them to the list of lines to plot
xs, ys = data['geometry'].xy
lines.append(list(zip(xs, ys)))
else:
# if it doesn't have a geometry attribute, the edge is a straight
# line from node to node
x1 = G.nodes[u]['x']
y1 = G.nodes[u]['y']
x2 = G.nodes[v]['x']
y2 = G.nodes[v]['y']
line = [(x1, y1), (x2, y2)]
lines.append(line)
return lines | Given a list of nodes, return a list of lines that together follow the path
defined by the list of nodes.
Parameters
----------
G : networkx multidigraph
route : list
the route as a list of nodes
use_geom : bool
if True, use the spatial geometry attribute of the edges to draw
geographically accurate edges, rather than just lines straight from node
to node
Returns
-------
lines : list of lines given as pairs ( (x_start, y_start), (x_stop, y_stop) ) |
def with_timeout(timeout, d, reactor=reactor):
"""Returns a `Deferred` that is in all respects equivalent to `d`, e.g. when `cancel()` is called on it `Deferred`,
the wrapped `Deferred` will also be cancelled; however, a `Timeout` will be fired after the `timeout` number of
seconds if `d` has not fired by that time.
When a `Timeout` is raised, `d` will be cancelled. It is up to the caller to worry about how `d` handles
cancellation, i.e. whether it has full/true support for cancelling, or does cancelling it just prevent its callbacks
from being fired but doesn't cancel the underlying operation.
"""
if timeout is None or not isinstance(d, Deferred):
return d
ret = Deferred(canceller=lambda _: (
d.cancel(),
timeout_d.cancel(),
))
timeout_d = sleep(timeout, reactor)
timeout_d.addCallback(lambda _: (
d.cancel(),
ret.errback(Failure(Timeout())) if not ret.called else None,
))
timeout_d.addErrback(lambda f: f.trap(CancelledError))
d.addCallback(lambda result: (
timeout_d.cancel(),
ret.callback(result),
))
d.addErrback(lambda f: (
if_(not f.check(CancelledError), lambda: (
timeout_d.cancel(),
ret.errback(f),
)),
))
return ret | Returns a `Deferred` that is in all respects equivalent to `d`, e.g. when `cancel()` is called on it `Deferred`,
the wrapped `Deferred` will also be cancelled; however, a `Timeout` will be fired after the `timeout` number of
seconds if `d` has not fired by that time.
When a `Timeout` is raised, `d` will be cancelled. It is up to the caller to worry about how `d` handles
cancellation, i.e. whether it has full/true support for cancelling, or does cancelling it just prevent its callbacks
from being fired but doesn't cancel the underlying operation. |
def reset(self):
""" (re)set all instance attributes to default.
Every attribute is set to ``None``, except :attr:`author`
and :attr:`failures` which are set to ``[]``.
"""
self.config = None
self.html = None
self.parsed_tree = None
self.tidied = False
self.next_page_link = None
self.title = None
self.author = set()
self.language = None
self.date = None
self.body = None
self.failures = set()
self.success = False
LOGGER.debug(u'Reset extractor instance to defaults/empty.') | (re)set all instance attributes to default.
Every attribute is set to ``None``, except :attr:`author`
and :attr:`failures` which are set to ``[]``. |
async def getiter(self, url: str, url_vars: Dict[str, str] = {},
*, accept: str = sansio.accept_format(),
jwt: Opt[str] = None,
oauth_token: Opt[str] = None
) -> AsyncGenerator[Any, None]:
"""Return an async iterable for all the items at a specified endpoint."""
data, more = await self._make_request("GET", url, url_vars, b"", accept,
jwt=jwt, oauth_token=oauth_token)
if isinstance(data, dict) and "items" in data:
data = data["items"]
for item in data:
yield item
if more:
# `yield from` is not supported in coroutines.
async for item in self.getiter(more, url_vars, accept=accept,
jwt=jwt, oauth_token=oauth_token):
yield item | Return an async iterable for all the items at a specified endpoint. |
def serialize(ad_objects, output_format='json', indent=2, attributes_only=False):
"""Serialize the object to the specified format
:param ad_objects list: A list of ADObjects to serialize
:param output_format str: The output format, json or yaml. Defaults to json
:param indent int: The number of spaces to indent, defaults to 2
:param attributes only: Only serialize the attributes found in the first record of the list
of ADObjects
:return: A serialized, formatted representation of the list of ADObjects
:rtype: str
"""
# If the request is to only show attributes for objects returned
# in the query, overwrite ad_objects with only those attributes present in
# the first object in the list
if attributes_only:
ad_objects = [key for key in sorted(ad_objects[0].keys())]
if output_format == 'json':
return json.dumps(ad_objects, indent=indent, ensure_ascii=False, sort_keys=True)
elif output_format == 'yaml':
return yaml.dump(sorted(ad_objects), indent=indent) | Serialize the object to the specified format
:param ad_objects list: A list of ADObjects to serialize
:param output_format str: The output format, json or yaml. Defaults to json
:param indent int: The number of spaces to indent, defaults to 2
:param attributes only: Only serialize the attributes found in the first record of the list
of ADObjects
:return: A serialized, formatted representation of the list of ADObjects
:rtype: str |
def linear_trend_timewise(x, param):
"""
Calculate a linear least-squares regression for the values of the time series versus the sequence from 0 to
length of the time series minus one.
This feature uses the index of the time series to fit the model, which must be of a datetime
dtype.
The parameters control which of the characteristics are returned.
Possible extracted attributes are "pvalue", "rvalue", "intercept", "slope", "stderr", see the documentation of
linregress for more information.
:param x: the time series to calculate the feature of. The index must be datetime.
:type x: pandas.Series
:param param: contains dictionaries {"attr": x} with x an string, the attribute name of the regression model
:type param: list
:return: the different feature values
:return type: list
"""
ix = x.index
# Get differences between each timestamp and the first timestamp in seconds.
# Then convert to hours and reshape for linear regression
times_seconds = (ix - ix[0]).total_seconds()
times_hours = np.asarray(times_seconds / float(3600))
linReg = linregress(times_hours, x.values)
return [("attr_\"{}\"".format(config["attr"]), getattr(linReg, config["attr"]))
for config in param] | Calculate a linear least-squares regression for the values of the time series versus the sequence from 0 to
length of the time series minus one.
This feature uses the index of the time series to fit the model, which must be of a datetime
dtype.
The parameters control which of the characteristics are returned.
Possible extracted attributes are "pvalue", "rvalue", "intercept", "slope", "stderr", see the documentation of
linregress for more information.
:param x: the time series to calculate the feature of. The index must be datetime.
:type x: pandas.Series
:param param: contains dictionaries {"attr": x} with x an string, the attribute name of the regression model
:type param: list
:return: the different feature values
:return type: list |
def __build_cmd_maps(cls):
"""Build the mapping from command names to method names.
One command name maps to at most one method.
Multiple command names can map to the same method.
Only used by __init__() to initialize self._cmd_map. MUST NOT be used
elsewhere.
Returns:
A tuple (cmd_map, hidden_cmd_map, internal_cmd_map).
"""
cmd_map_all = {}
cmd_map_visible = {}
cmd_map_internal = {}
for name in dir(cls):
obj = getattr(cls, name)
if iscommand(obj):
for cmd in getcommands(obj):
if cmd in cmd_map_all.keys():
raise PyShellError("The command '{}' already has cmd"
" method '{}', cannot register a"
" second method '{}'.".format( \
cmd, cmd_map_all[cmd], obj.__name__))
cmd_map_all[cmd] = obj.__name__
if isvisiblecommand(obj):
cmd_map_visible[cmd] = obj.__name__
if isinternalcommand(obj):
cmd_map_internal[cmd] = obj.__name__
return cmd_map_all, cmd_map_visible, cmd_map_internal | Build the mapping from command names to method names.
One command name maps to at most one method.
Multiple command names can map to the same method.
Only used by __init__() to initialize self._cmd_map. MUST NOT be used
elsewhere.
Returns:
A tuple (cmd_map, hidden_cmd_map, internal_cmd_map). |
def load(self, filename, params=None, force=False, depthrange=None, timerange=None, output_is_dict=True, **kwargs):
"""
NetCDF data loader
:parameter filename: file name
:parameter params: a list of variables to load (default : load ALL variables).
:parameter depthrange: if a depth dimension is found, subset along this dimension.
:parameter timerange: if a time dimension is found, subset along this dimension.
.. note:: using :attr:`altimetry.tools.nctools.limit` allows subsetting to a given region.
:parameter kwargs: additional arguments for subsetting along given dimensions.
.. note:: You can index along any dimension by providing the name of the dimensions to subsample along. Values associated to the provided keywords should be a length 2 or 3 tuple (min,max,<step>) (cf. :func:`altimetry.data.nctools.load_ncVar`).
:keyword output_is_dict: data structures are dictionnaries (eg. my_hydro_data.variable['data']). If false uses an object with attributes (eg. my_hydro_data.variable.data).
:return {type:dict} outStr: Output data structure containing all recorded parameters as specificied by NetCDF file PARAMETER list.
:author: Renaud Dussurget
"""
if (params is not None) & isinstance(params,str): params=[params]
#Open file
self._filename = filename
try:
ncf = ncfile(self._filename, "r")
except Exception,e:
warn(repr(e),stacklevel=2)
return {}
#Load global attributes
akeys = ncf.ncattrs()
attrStr=OrderedDict()
for A in akeys : attrStr.update({A:ncf.getncattr(A)})
#Get list of recorded parameters:
dum = ncf.variables.keys()
nparam = np.shape(dum)[0]
# par_list=np.array([''.join(ncf.variables.keys()[0,i,:].compressed()) for i in np.arange(nparam)])
par_list = np.array(['{0}'.format(v) for v in ncf.variables.keys()])
#remove empty items and update nparam
par_list = par_list.compress([len(par) != 0 for par in par_list])
nparam = par_list.size
if nparam == 0 : self.Error('File has no data ({0})'.format(self._filename))
#Get dimensions
ncdimlist = np.array(['{0}'.format(d) for d in ncf.dimensions.keys()])
ndims = len(ncdimlist)
dimStr = OrderedDict()
dimStr.update({'_ndims':ndims})
if ndims == 0 : self.Error('File has no dimensions ({0})'.format(self._filename))
#Check for the presence of strategic dimensions
checkedDims = np.array(['lon', 'lat', 'time', 'depth'])
existDim = -np.ones(4,dtype=int)
if not self.use_local_dims :
for i,d in enumerate(ncdimlist) :
if ( (d.lower().startswith('lon')) | (d.lower().find('longitude') != -1) ) & (d.find('LatLon') ==-1) : existDim[0]=i
if ( (d.lower().startswith('lat')) | (d.lower().find('latitude') != -1) ) & (d.find('LatLon') ==-1): existDim[1]=i
if (d.lower().startswith('time')) | (d.lower().startswith('date')) : existDim[2]=i
if (d.lower().startswith('lev')) | (d.lower().startswith('dep')) : existDim[3]=i
# existDim[0] = np.where([d.lower().startswith('lon') | (d.lower().find('longitude') != -1) for d in ncdimlist])[0]
# existDim[1] = np.where([d.lower().startswith('lat') | (d.lower().find('latitude') != -1) for d in ncdimlist])[0]
# existDim[2] = np.where([(d.lower().startswith('time')) | (d.lower().startswith('date')) for d in ncdimlist])[0]
# existDim[3] = np.where([(d.lower().startswith('lev')) | (d.lower().startswith('dep')) for d in ncdimlist])[0]
identified = existDim > -1
# checkedDims[identified]=checkedDims[identified][existDim.compress(identified).astype(int)]
# checkedDims=checkedDims[identified]
# for cn, vn in enumerate(checkedDims) : dimStr.update({cn:len(ncf.dimensions[vn])})
#Update dimension structure with identified dimensions
#Load dimensional variables
#TODO : Add scaling here in case...
for i,d in enumerate(existDim) :
if identified[i] :
dimStr.update({ncdimlist[d]:len(ncf.dimensions[ncdimlist[d]])}) #Append dimension
cmd = 'load_ncVar(\'' + ncdimlist[d] + '\',nc=ncf)'
self.message(4, 'loading : {0}={1}'.format(checkedDims[i],cmd))
locals()[checkedDims[i]]=load_ncVar(ncdimlist[d], nc=ncf,**kwargs)
missdims=set(ncdimlist)
missdims.difference_update(ncdimlist[existDim[identified]])
missdims=list(missdims)
for i,d in enumerate(missdims) :
dimStr.update({d:len(ncf.dimensions[d])})
if ncf.variables.has_key(d) :
cmd = 'load_ncVar(\'' + d + '\',nc=ncf)'
self.message(4, 'loading : {0}={1}'.format(d,cmd))
locals()[d]=load_ncVar(d, nc=ncf,**kwargs)
#If the variable associated to the dimension do not exist, generate it
else :
self.message(1, '[WARNING] Netcdf file not standard - creating data for {0} dimnsion'.format(d))
ndim=len(ncf.dimensions[d])
cmd = '=var'
self.message(4, 'loading : {0}={1}'.format(d,cmd))
locals()[d]={'_dimensions':{'_ndims':1,d:ndim}, 'data':np.arange(ndim)}
# #Update dimension structure with identified dimensions
# for cn, vn in zip(*(checkedDims[identified], ncdimlist[identified])) : dimStr.update({cn:len(ncf.dimensions[vn])})
# for vn in ncdimlist[~identified] : dimStr.update({vn:len(ncf.dimensions[vn])})
#Load dimensional variables
#TODO : Add scaling here in case...
# for cn, vn in zip(*(checkedDims[identified], ncdimlist[identified])) :
## cmd = 'self.'+cn+'=ncf.variables[\''+vn+'\'][:]'
# cmd = cn + '=load_ncVar(\'' + vn + '\',nc=ncf)'
# self.message(4, 'exec : ' + cmd)
# exec(cmd)
#
# for vn in ncdimlist[~identified] :
## cmd = 'self.'+vn+'=ncf.variables[\''+vn+'\'][:]'
# cmd = vn + '=load_ncVar(\'' + vn + '\',nc=ncf)'
# self.message(4, 'exec : ' + cmd)
# exec(cmd)
#Update dimlist with dimensions present in the object
# dimlist = np.append(checkedDims[identified], ncdimlist[~identified])
dimlist=ncdimlist.copy()
# dimlist[existDim[identified]]=checkedDims[identified]
if identified.sum() > 0 : dimlist[existDim[identified]]=checkedDims[identified]
else : dimlist = dimlist[[]]
# for d in ncdimlist[checkedDims[identified]] :
# if not d.startswith('_') : dimlist = np.append(dimlist,d)
# dimlist=[(d if not d.startswith('_') else None) for d in dimStr.keys()]
if params is not None :
if force : par_list = [i.upper() for i in params]
else :par_list = list(set(params).intersection(par_list))
else : par_list = par_list.tolist()
#remove dimensional variable
for d in ncdimlist[existDim[identified]] :
par_list.pop(par_list.index(d))
self.message(2, 'Recorded parameters : ' + str(nparam) + ' -> ' + str(par_list))
#Extract within limits
if (existDim[0] > -1) & (existDim[1] > -1):
llind, flag = in_limits(lon['data'],lat['data'], limit=self.limit)
if isinstance(flag,tuple) :
lon['data'] = recale(lon['data'].compress(flag[0]),degrees=True)
lon['_dimensions'][lon['_dimensions'].keys()[1]] = flag[0].sum()
lat['data'] = lat['data'].compress(flag[1])
lat['_dimensions'][lat['_dimensions'].keys()[1]] = flag[1].sum()
else :
lon['data'] = recale(lon['data'].compress(flag),degrees=True)
lon['_dimensions'][lon['_dimensions'].keys()[1]] = flag.sum()
lat['data'] = lat['data'].compress(flag)
lat['_dimensions'][lat['_dimensions'].keys()[1]] = flag.sum()
locals()[ncdimlist[existDim[0]]]=lon.copy()
locals()[ncdimlist[existDim[1]]]=lat.copy()
dimStr.update({ncdimlist[existDim[0]]:len(lon['data'])})
dimStr.update({ncdimlist[existDim[1]]:len(lat['data'])})
# self.message(4, 'self.lon & self.lat updated')
if (existDim[2] > -1):
if (timerange is not None) : timeflag = (time['data'] >= np.min(timerange)) & (time['data'] <= np.max(timerange))
else : timeflag = np.ones(len(time['data']), dtype=bool)
if timeflag.sum() == 0 : self.Error('No data within specified depth range (min/max = {0}/{1})'.format(np.min(time), np.max(time)))
time['data'] = time['data'].compress(timeflag)
time['_dimensions'][time['_dimensions'].keys()[1]] = timeflag.sum()
locals()[ncdimlist[existDim[2]]]=time.copy()
dimStr.update({ncdimlist[existDim[2]]:len(time['data'])})
# self.message(4, 'self.lon & self.lat updated')
#Extract within depth range
if (existDim[3] > -1):
if (depthrange is not None) : depthflag = (depth['data'] >= np.min(depthrange)) & (depth['data'] <= np.max(depthrange))
else : depthflag = np.ones(len(depth['data']), dtype=bool)
if depthflag.sum() == 0 : self.Error('No data within specified depth range (min/max = {0}/{1})'.format(np.min(depth), np.max(depth)))
depth['data'] = depth['data'].compress(depthflag)
depth['_dimensions'][depth['_dimensions'].keys()[1]] = depthflag.sum()
locals()[ncdimlist[existDim[3]]]=depth.copy()
dimStr.update({ncdimlist[existDim[3]]:len(depth['data'])})
#Create output data structure
outStr = OrderedDict()
outStr.update({'_dimensions':dimStr})
outStr.update({'_attributes':attrStr})
if (existDim[0] > -1) : outStr.update({ncdimlist[existDim[0]]:lon})
if (existDim[1] > -1) : outStr.update({ncdimlist[existDim[1]]:lat})
if (existDim[2] > -1) : outStr.update({ncdimlist[existDim[2]]:time})
if (existDim[3] > -1) : outStr.update({ncdimlist[existDim[3]]:depth})
#Update object with remaining variables
for d in dimlist.compress([not outStr.has_key(f) for f in dimlist]) :
# cmd = 'outStr.update({\''+d+'\':'+d+'[\'data\']})'
cmd = 'outStr.update({\''+d+'\':'+d+'})'
self.message(4, 'exec : '+cmd)
exec(cmd)
ncdimStr=outStr.copy()
#Get dimension lengths
shape=()
for d in dimlist: shape += np.shape(locals()[d]['data'])
ndims = np.size(shape)
# #Create dimension structure
# curDim = [str(dimname) for dimname in dimStr.keys()[1:]] #[str(dimname) for dimname in ncf.variables['LONGITUDE'].dimensions]
# curDimval = [dimStr[dim] for dim in curDim] #[len(ncf.dimensions[dimname]) for dimname in curDim]
#
# outStr={'_dimensions':{'_ndims':ndims,'nbpoints':sz[0]},'lon':lon,'lat':lat,'date':date}
# for d in dimlist : outStr.update({d:self.__dict__[d]})
#Sort NCDIMLIST to match DIMLIST
# ncdimlist[np.sort(existDim.astype(np.int)[identified])]=ncdimlist[existDim[identified].tolist()]
#Setup kwargs with current dimensionnal properties
for d, ncd in zip(*(dimlist,ncdimlist)):
if not kwargs.has_key(ncd) :
if kwargs.has_key(d) :
kwargs.update({ncd:kwargs[d]})
del kwargs[d]
else :
dvar=ncdimStr[d]['data']
if isinstance(dvar,np.ma.masked_array) : kwargs.update({ncd:(np.nanmin(dvar.data),np.nanmax(dvar.data))})
else : kwargs.update({ncd:(np.nanmin(dvar),np.nanmax(dvar))})
# else :
# outStr['NbLatitudes']['data']
for param in par_list :
# dumVar = load_ncVar(param, nc=ncf, lon=llind[0], lat=llind[1], time=np.arange(len(time['data'])).compress(timeflag),**kwargs) #Load variables
# dumVar = load_ncVar(param, nc=ncf, longitude=(self.limit[1],self.limit[3]), latitude=(self.limit[0],self.limit[2]), time=(self.time.min(),self.time.max()),**kwargs) #Load variables
dumVar = load_ncVar(param, nc=ncf, **kwargs) #Load variables
# dimStr = dumVar['_dimensions']
#update dimensions
# curDim = [str(dimname) for dimname in dimStr.keys()[1:]] #[str(dimname) for dimname in ncf.variables['LONGITUDE'].dimensions]
# curDimval = [dimStr[dim] for dim in curDim] #[len(ncf.dimensions[dimname]) for dimname in curDim]
# curDim = dimlist[where_list(curDim, ncdimlist.tolist())] #Convert to object dimension names
# curDim = dimlist[where_list(curDim, dimlist.tolist())] #Convert to object dimension names (???)
## curDim = [str(dimname) for dimname in ncf.variables[param].dimensions]
## curDimval = [len(ncf.dimensions[dimname]) for dimname in curDim]
# flag = [(np.array(dimname) == outStr['_dimensions'].keys()).sum() == 0 for dimname in curDim] #find dimensions to update
# dimUpdate = np.array(curDim).compress(flag)
# for enum in enumerate(dimUpdate) :
# self.message(2, 'Appending dimensions {0}:{1} to dataStructure'.format(enum[1], np.array(curDimval).compress(flag)[enum[0]]))
# outStr['_dimensions'].update({enum[1]:np.array(curDimval).compress(flag)[enum[0]]}) #Append new dimension
# outStr['_dimensions']['_ndims'] += 1 #update dimension counts
# cmd = 'dumStr = {\'' + param + '\':dumVar[\'data\']}'
#Set list as variable with attributes
# if (not output_is_dict):
# var=dumVar.pop('data')
# for k in dumVar.keys():
# setattr(var, k, dumVar.pop(k))
# dumVar=var.copy()
cmd = 'dumStr = {\'' + param + '\':dumVar}'
self.message(4, 'exec : ' + cmd)
exec(cmd)
outStr.update(dumStr)
#Update output dimensions with extracted dimensions
for ddum in dumStr[param]['_dimensions'].keys()[1:] :
if outStr['_dimensions'].get(ddum) != dumStr[param]['_dimensions'][ddum] : outStr['_dimensions'][ddum]=dumStr[param]['_dimensions'][ddum]
# cmd = 'self.'+param+'='
ncf.close()
return outStr | NetCDF data loader
:parameter filename: file name
:parameter params: a list of variables to load (default : load ALL variables).
:parameter depthrange: if a depth dimension is found, subset along this dimension.
:parameter timerange: if a time dimension is found, subset along this dimension.
.. note:: using :attr:`altimetry.tools.nctools.limit` allows subsetting to a given region.
:parameter kwargs: additional arguments for subsetting along given dimensions.
.. note:: You can index along any dimension by providing the name of the dimensions to subsample along. Values associated to the provided keywords should be a length 2 or 3 tuple (min,max,<step>) (cf. :func:`altimetry.data.nctools.load_ncVar`).
:keyword output_is_dict: data structures are dictionnaries (eg. my_hydro_data.variable['data']). If false uses an object with attributes (eg. my_hydro_data.variable.data).
:return {type:dict} outStr: Output data structure containing all recorded parameters as specificied by NetCDF file PARAMETER list.
:author: Renaud Dussurget |
def minor_releases(self, manager):
"""
Return all minor release line labels found in ``manager``.
"""
# TODO: yea deffo need a real object for 'manager', heh. E.g. we do a
# very similar test for "do you have any actual releases yet?"
# elsewhere. (This may be fodder for changing how we roll up
# pre-major-release features though...?)
return [
key for key, value in six.iteritems(manager)
if any(x for x in value if not x.startswith('unreleased'))
] | Return all minor release line labels found in ``manager``. |
def splitter(structured):
"""
Separates structured data into a list of actives or a list of decoys. actives are labeled with a '1' in their status
fields, while decoys are labeled with a '0' in their status fields.
:param structured: either roc_structure or score_structure.
roc_structure: list [(id, best_score, best_query, status, fpf, tpf), ..., ]
score_structure: list [(id, best_score, best_query, status, net decoy count, net active count), ...,]
:return: actives: list [(id, best_score, best_query, status = 1, fpf/net decoy count, tpf/net active count), ..., ]
:return decoys: list [(id, best_score, best_query, status = 0, fpf/net decoy count, tpf/net active count), ..., ]
"""
actives = []
decoys = []
for mol in structured:
status = mol[3]
if status == '1':
actives.append(mol)
elif status == '0':
decoys.append(mol)
return actives, decoys | Separates structured data into a list of actives or a list of decoys. actives are labeled with a '1' in their status
fields, while decoys are labeled with a '0' in their status fields.
:param structured: either roc_structure or score_structure.
roc_structure: list [(id, best_score, best_query, status, fpf, tpf), ..., ]
score_structure: list [(id, best_score, best_query, status, net decoy count, net active count), ...,]
:return: actives: list [(id, best_score, best_query, status = 1, fpf/net decoy count, tpf/net active count), ..., ]
:return decoys: list [(id, best_score, best_query, status = 0, fpf/net decoy count, tpf/net active count), ..., ] |
def scope(self, *args, **kwargs):
# type: (*Any, **Any) -> Scope
"""Return a single scope based on the provided name.
If additional `keyword=value` arguments are provided, these are added to the request parameters. Please
refer to the documentation of the KE-chain API for additional query parameters.
:return: a single :class:`models.Scope`
:raises NotFoundError: When no `Scope` is found
:raises MultipleFoundError: When more than a single `Scope` is found
"""
_scopes = self.scopes(*args, **kwargs)
if len(_scopes) == 0:
raise NotFoundError("No scope fits criteria")
if len(_scopes) != 1:
raise MultipleFoundError("Multiple scopes fit criteria")
return _scopes[0] | Return a single scope based on the provided name.
If additional `keyword=value` arguments are provided, these are added to the request parameters. Please
refer to the documentation of the KE-chain API for additional query parameters.
:return: a single :class:`models.Scope`
:raises NotFoundError: When no `Scope` is found
:raises MultipleFoundError: When more than a single `Scope` is found |
def get_sub_extractors_by_property(extractor, property_name, return_property_list=False):
'''Divides Recording or Sorting Extractor based on the property_name (e.g. group)
Parameters
----------
extractor: RecordingExtractor or SortingExtractor
The extractor to be subdivided in subextractors
property_name: str
The property used to subdivide the extractor
return_property_list: bool
If True the property list is returned
Returns
-------
List of subextractors
'''
if isinstance(extractor, RecordingExtractor):
if property_name not in extractor.get_channel_property_names():
raise ValueError("'property_name' must be must be a property of the recording channels")
else:
sub_list = []
recording = extractor
properties = np.array([recording.get_channel_property(chan, property_name)
for chan in recording.get_channel_ids()])
prop_list = np.unique(properties)
for prop in prop_list:
prop_idx = np.where(prop == properties)
chan_idx = list(np.array(recording.get_channel_ids())[prop_idx])
sub_list.append(SubRecordingExtractor(recording, channel_ids=chan_idx))
if return_property_list:
return sub_list, prop_list
else:
return sub_list
elif isinstance(extractor, SortingExtractor):
if property_name not in extractor.get_unit_property_names():
raise ValueError("'property_name' must be must be a property of the units")
else:
sub_list = []
sorting = extractor
properties = np.array([sorting.get_unit_property(unit, property_name)
for unit in sorting.get_unit_ids()])
prop_list = np.unique(properties)
for prop in prop_list:
prop_idx = np.where(prop == properties)
unit_idx = list(np.array(sorting.get_unit_ids())[prop_idx])
sub_list.append(SubSortingExtractor(sorting, unit_ids=unit_idx))
if return_property_list:
return sub_list, prop_list
else:
return sub_list
else:
raise ValueError("'extractor' must be a RecordingExtractor or a SortingExtractor") | Divides Recording or Sorting Extractor based on the property_name (e.g. group)
Parameters
----------
extractor: RecordingExtractor or SortingExtractor
The extractor to be subdivided in subextractors
property_name: str
The property used to subdivide the extractor
return_property_list: bool
If True the property list is returned
Returns
-------
List of subextractors |
def processRequest(self, arg, **kw):
"""
Parameters:
arg -- XML Soap data string
"""
if self.debug:
log.msg('===>PROCESS RESPONSE: %s' %str(arg), debug=1)
if arg is None:
return
for h in self.handlers:
arg = h.processRequest(arg, **kw)
s = str(arg)
if self.debug:
log.msg(s, debug=1)
return s | Parameters:
arg -- XML Soap data string |
def pop(self, count=1):
"""
Return new deque with rightmost element removed. Popping the empty queue
will return the empty queue. A optional count can be given to indicate the
number of elements to pop. Popping with a negative index is the same as
popleft. Executes in amortized O(k) where k is the number of elements to pop.
>>> pdeque([1, 2]).pop()
pdeque([1])
>>> pdeque([1, 2]).pop(2)
pdeque([])
>>> pdeque([1, 2]).pop(-1)
pdeque([2])
"""
if count < 0:
return self.popleft(-count)
new_right_list, new_left_list = PDeque._pop_lists(self._right_list, self._left_list, count)
return PDeque(new_left_list, new_right_list, max(self._length - count, 0), self._maxlen) | Return new deque with rightmost element removed. Popping the empty queue
will return the empty queue. A optional count can be given to indicate the
number of elements to pop. Popping with a negative index is the same as
popleft. Executes in amortized O(k) where k is the number of elements to pop.
>>> pdeque([1, 2]).pop()
pdeque([1])
>>> pdeque([1, 2]).pop(2)
pdeque([])
>>> pdeque([1, 2]).pop(-1)
pdeque([2]) |
def GetZipInfo(self):
"""Retrieves the ZIP info object.
Returns:
zipfile.ZipInfo: a ZIP info object or None if not available.
Raises:
PathSpecError: if the path specification is incorrect.
"""
if not self._zip_info:
location = getattr(self.path_spec, 'location', None)
if location is None:
raise errors.PathSpecError('Path specification missing location.')
if not location.startswith(self._file_system.LOCATION_ROOT):
raise errors.PathSpecError('Invalid location in path specification.')
if len(location) == 1:
return None
zip_file = self._file_system.GetZipFile()
try:
self._zip_info = zip_file.getinfo(location[1:])
except KeyError:
pass
return self._zip_info | Retrieves the ZIP info object.
Returns:
zipfile.ZipInfo: a ZIP info object or None if not available.
Raises:
PathSpecError: if the path specification is incorrect. |
def check_url (self):
"""Try to get URL data from queue and check it."""
try:
url_data = self.urlqueue.get(timeout=QUEUE_POLL_INTERVALL_SECS)
if url_data is not None:
try:
self.check_url_data(url_data)
finally:
self.urlqueue.task_done(url_data)
self.setName(self.origname)
except urlqueue.Empty:
pass
except Exception:
self.internal_error() | Try to get URL data from queue and check it. |
def get_sequences_from_cluster(c1, c2, data):
"""get all sequences from on cluster"""
seqs1 = data[c1]['seqs']
seqs2 = data[c2]['seqs']
seqs = list(set(seqs1 + seqs2))
names = []
for s in seqs:
if s in seqs1 and s in seqs2:
names.append("both")
elif s in seqs1:
names.append(c1)
else:
names.append(c2)
return seqs, names | get all sequences from on cluster |
def render(self, fname=''):
"""Render the circuit expression and store the result in a file
Args:
fname (str): Path to an image file to store the result in.
Returns:
str: The path to the image file
"""
import qnet.visualization.circuit_pyx as circuit_visualization
from tempfile import gettempdir
from time import time, sleep
if not fname:
tmp_dir = gettempdir()
fname = os.path.join(tmp_dir, "tmp_{}.png".format(hash(time)))
if circuit_visualization.draw_circuit(self, fname):
done = False
for k in range(20):
if os.path.exists(fname):
done = True
break
else:
sleep(.5)
if done:
return fname
raise CannotVisualize() | Render the circuit expression and store the result in a file
Args:
fname (str): Path to an image file to store the result in.
Returns:
str: The path to the image file |
def _local_to_shape(self, local_x, local_y):
"""Translate local coordinates point to shape coordinates.
Shape coordinates have the same unit as local coordinates, but are
offset such that the origin of the shape coordinate system (0, 0) is
located at the top-left corner of the shape bounding box.
"""
return (
local_x - self.shape_offset_x,
local_y - self.shape_offset_y
) | Translate local coordinates point to shape coordinates.
Shape coordinates have the same unit as local coordinates, but are
offset such that the origin of the shape coordinate system (0, 0) is
located at the top-left corner of the shape bounding box. |
def modis_filename2modisdate(modis_fname):
"""
#+
# MODIS_FILENAME2DATE : Convert MODIS file name to MODIS date
#
# @author: Renaud DUSSURGET (LER PAC/IFREMER)
# @history: Created by RD on 29/10/2012
#
#-
"""
if not isinstance(modis_fname,list) : modis_fname=[modis_fname]
return [os.path.splitext(os.path.basename(m))[0][1:12] for m in modis_fname] | #+
# MODIS_FILENAME2DATE : Convert MODIS file name to MODIS date
#
# @author: Renaud DUSSURGET (LER PAC/IFREMER)
# @history: Created by RD on 29/10/2012
#
#- |
def windyields(self, ini, end, delta, **keyw):
"""
This function returns the wind yields and ejected masses.
X_i, E_i = data.windyields(ini, end, delta)
Parameters
----------
ini : integer
The starting cycle.
end : integer
The finishing cycle.
delta : integer
The cycle interval.
keyw : dict
A dict of key word arguments.
Returns
-------
list
The function returns a list of the wind yields(X_i) and
a list of the ejected masses(E_i) in the mass units that
were used (usually solar masses).
Notes
-----
The following keywords cand also be used:
+------------------+---------------+
| Keyword Argument | Default Value |
+==================+===============+
| abund | "iso_massf" |
+------------------+---------------+
| tmass | "mass" |
+------------------+---------------+
| cycle | "cycle" |
+------------------+---------------+
The keyword arguments are used when the variables within the
input file differ in name from their default values typically
found in an MPPNP output file. If the data table differs in
name, use these keywords. For example, if the table for the
abundances is called "abundances" instead of "iso_massf", then
use abund = "abundances" as a keyword argument.
"""
if ("tmass" in keyw) == False:
keyw["tmass"] = "mass"
if ("abund" in keyw) == False:
keyw["abund"] = "iso_massf"
if ("cycle" in keyw) == False:
keyw["cycle"] = "cycle"
print("Windyields() initialised. Reading files...")
ypsinit = []
niso = 0
X_i = []
E_i = []
totalmass = []
ypssurf = []
cycles = []
first = True
# The following statements copy global functions into local memory,
# which is called faster, speeding up the code slightly
wc = self._windcalc
cycleret = self.se.cycles
retrieve = self.se.get
capp = cycles.extend
tapp = totalmass.extend
yapp = ypssurf.extend
# Retrieve the data from the files
for i in range(ini,end+1,delta):
step = int(i)
capp([int(cycleret[i-ini])])
tapp([retrieve(step,keyw["tmass"])])
yapp([retrieve(step,keyw["abund"])])
print("Reading complete. Calculating yields and ejected masses...")
nsteps = len(cycles)-1
niso = len(ypssurf[0])
X_i = np.zeros([niso], float)
E_i = np.zeros([niso], float)
# Call the windyields calculator
X_i, E_i = wc(first, totalmass, nsteps, niso, ypssurf, \
ypsinit, X_i, E_i, cycles)
return X_i, E_i | This function returns the wind yields and ejected masses.
X_i, E_i = data.windyields(ini, end, delta)
Parameters
----------
ini : integer
The starting cycle.
end : integer
The finishing cycle.
delta : integer
The cycle interval.
keyw : dict
A dict of key word arguments.
Returns
-------
list
The function returns a list of the wind yields(X_i) and
a list of the ejected masses(E_i) in the mass units that
were used (usually solar masses).
Notes
-----
The following keywords cand also be used:
+------------------+---------------+
| Keyword Argument | Default Value |
+==================+===============+
| abund | "iso_massf" |
+------------------+---------------+
| tmass | "mass" |
+------------------+---------------+
| cycle | "cycle" |
+------------------+---------------+
The keyword arguments are used when the variables within the
input file differ in name from their default values typically
found in an MPPNP output file. If the data table differs in
name, use these keywords. For example, if the table for the
abundances is called "abundances" instead of "iso_massf", then
use abund = "abundances" as a keyword argument. |
def create_organisation(self, organisation_json):
'''
Create an Organisation object from a JSON object
Returns:
Organisation: The organisation from the given `organisation_json`.
'''
return trolly.organisation.Organisation(
trello_client=self,
organisation_id=organisation_json['id'],
name=organisation_json['name'],
data=organisation_json,
) | Create an Organisation object from a JSON object
Returns:
Organisation: The organisation from the given `organisation_json`. |
def on_peer_down(self, peer):
"""Peer down handler.
Cleans up the paths in global tables that was received from this peer.
"""
LOG.debug('Cleaning obsolete paths whose source/version: %s/%s',
peer.ip_address, peer.version_num)
# Launch clean-up for each global tables.
self._table_manager.clean_stale_routes(peer) | Peer down handler.
Cleans up the paths in global tables that was received from this peer. |
def main():
"""main
Entrypoint to this script. This will execute the functionality as a standalone
element
"""
src_dir = sys.argv[1]
os.chdir(src_dir)
config = get_config(src_dir)
cmd = 'python -c "import f5;print(f5.__version__)"'
version = \
subprocess.check_output([cmd], shell=True).strip()
tmp_dist = "/var/deb_dist"
project = config['project']
tmp_dist = "/var/deb_dist"
os_version = "1404"
deb_dir = "%s/deb_dist" % config['dist_dir']
print("Building %s debian packages..." % project)
shutil.copyfile("%s/stdeb.cfg" % (deb_dir), "./stdeb.cfg")
shutil.copytree(deb_dir, tmp_dist)
cmd = 'python setup.py --command-packages=stdeb.command sdist_dsc ' + \
'--dist-dir=%s' % tmp_dist
print(subprocess.check_output([cmd], shell=True))
os.chdir("%s/%s-%s" % (tmp_dist, project, version))
cmd = 'dpkg-buildpackage -rfakeroot -uc -us'.split()
subprocess.check_output(cmd) # args will not show up in ps...
os.chdir(src_dir)
pkg = "python-%s_%s-1" % (project, version)
os_pkg = pkg + "_%s_all.deb" % os_version
pkg = pkg + "_all.deb"
shutil.copyfile("%s/%s" % (tmp_dist, pkg), "%s/%s" % (deb_dir, os_pkg))
cmd = "python %s/add_pkg_name.py deb_pkg %s/%s" % \
(config['scripts'], deb_dir, os_pkg) | main
Entrypoint to this script. This will execute the functionality as a standalone
element |
def key_expand(self, key):
"""
Derive public key and account number from **private key**
:param key: Private key to generate account and public key of
:type key: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.key_expand(
key="781186FB9EF17DB6E3D1056550D9FAE5D5BBADA6A6BC370E4CBB938B1DC71DA3"
)
{
"private": "781186FB9EF17DB6E3D1056550D9FAE5D5BBADA6A6BC370E4CBB938B1DC71DA3",
"public": "3068BB1CA04525BB0E416C485FE6A67FD52540227D267CC8B6E8DA958A7FA039",
"account": "xrb_1e5aqegc1jb7qe964u4adzmcezyo6o146zb8hm6dft8tkp79za3sxwjym5rx"
}
"""
key = self._process_value(key, 'privatekey')
payload = {"key": key}
resp = self.call('key_expand', payload)
return resp | Derive public key and account number from **private key**
:param key: Private key to generate account and public key of
:type key: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.key_expand(
key="781186FB9EF17DB6E3D1056550D9FAE5D5BBADA6A6BC370E4CBB938B1DC71DA3"
)
{
"private": "781186FB9EF17DB6E3D1056550D9FAE5D5BBADA6A6BC370E4CBB938B1DC71DA3",
"public": "3068BB1CA04525BB0E416C485FE6A67FD52540227D267CC8B6E8DA958A7FA039",
"account": "xrb_1e5aqegc1jb7qe964u4adzmcezyo6o146zb8hm6dft8tkp79za3sxwjym5rx"
} |
def binary_report(self, sha256sum, apikey):
"""
retrieve report from file scan
"""
url = self.base_url + "file/report"
params = {"apikey": apikey, "resource": sha256sum}
rate_limit_clear = self.rate_limit()
if rate_limit_clear:
response = requests.post(url, data=params)
if response.status_code == self.HTTP_OK:
json_response = response.json()
response_code = json_response['response_code']
return json_response
elif response.status_code == self.HTTP_RATE_EXCEEDED:
time.sleep(20)
else:
self.logger.warning("retrieve report: %s, HTTP code: %d", os.path.basename(filename), response.status_code) | retrieve report from file scan |
def save(self):
"""Write changed .pth file back to disk"""
if not self.dirty:
return
data = '\n'.join(map(self.make_relative, self.paths))
if data:
log.debug("Saving %s", self.filename)
data = (
"import sys; sys.__plen = len(sys.path)\n"
"%s\n"
"import sys; new=sys.path[sys.__plen:];"
" del sys.path[sys.__plen:];"
" p=getattr(sys,'__egginsert',0); sys.path[p:p]=new;"
" sys.__egginsert = p+len(new)\n"
) % data
if os.path.islink(self.filename):
os.unlink(self.filename)
f = open(self.filename, 'wt')
f.write(data)
f.close()
elif os.path.exists(self.filename):
log.debug("Deleting empty %s", self.filename)
os.unlink(self.filename)
self.dirty = False | Write changed .pth file back to disk |
def encrypt(self, txt, key):
"""
XOR ciphering with a PBKDF2 checksum
"""
# log.debug("encrypt(txt='%s', key='%s')", txt, key)
assert isinstance(txt, six.text_type), "txt: %s is not text type!" % repr(txt)
assert isinstance(key, six.text_type), "key: %s is not text type!" % repr(key)
if len(txt) != len(key):
raise SecureJSLoginError("encrypt error: %s and '%s' must have the same length!" % (txt, key))
pbkdf2_hash = PBKDF2SHA1Hasher1().get_salt_hash(txt)
txt=force_bytes(txt)
key=force_bytes(key)
crypted = self.xor(txt, key)
crypted = binascii.hexlify(crypted)
crypted = six.text_type(crypted, "ascii")
return "%s$%s" % (pbkdf2_hash, crypted) | XOR ciphering with a PBKDF2 checksum |
def gct2gctx_main(args):
""" Separate from main() in order to make command-line tool. """
in_gctoo = parse_gct.parse(args.filename, convert_neg_666=False)
if args.output_filepath is None:
basename = os.path.basename(args.filename)
out_name = os.path.splitext(basename)[0] + ".gctx"
else:
out_name = args.output_filepath
""" If annotations are supplied, parse table and set metadata_df """
if args.row_annot_path is None:
pass
else:
row_metadata = pd.read_csv(args.row_annot_path, sep='\t', index_col=0, header=0, low_memory=False)
assert all(in_gctoo.data_df.index.isin(row_metadata.index)), \
"Row ids in matrix missing from annotations file"
in_gctoo.row_metadata_df = row_metadata.loc[row_metadata.index.isin(in_gctoo.data_df.index)]
if args.col_annot_path is None:
pass
else:
col_metadata = pd.read_csv(args.col_annot_path, sep='\t', index_col=0, header=0, low_memory=False)
assert all(in_gctoo.data_df.columns.isin(col_metadata.index)), \
"Column ids in matrix missing from annotations file"
in_gctoo.col_metadata_df = col_metadata.loc[col_metadata.index.isin(in_gctoo.data_df.columns)]
write_gctx.write(in_gctoo, out_name) | Separate from main() in order to make command-line tool. |
def patch_namespaced_role(self, name, namespace, body, **kwargs):
"""
partially update the specified Role
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_role(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Role (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1Role
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_role_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_role_with_http_info(name, namespace, body, **kwargs)
return data | partially update the specified Role
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_role(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Role (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1Role
If the method is called asynchronously,
returns the request thread. |
def query(self):
""" Main accounts query """
query = (
self.book.session.query(Account)
.join(Commodity)
.filter(Commodity.namespace != "template")
.filter(Account.type != AccountType.root.value)
)
return query | Main accounts query |
def _premis_version_from_data(data):
"""Given tuple ``data`` encoding a PREMIS element, attempt to return the
PREMIS version it is using. If none can be found, return the default PREMIS
version.
"""
for child in data:
if isinstance(child, dict):
version = child.get("version")
if version:
return version
return utils.PREMIS_VERSION | Given tuple ``data`` encoding a PREMIS element, attempt to return the
PREMIS version it is using. If none can be found, return the default PREMIS
version. |
def msg_curse(self, args=None, max_width=None):
"""Return the dict to display in the curse interface."""
# Init the return message
ret = []
# Only process if stats exist and display plugin enable...
if not self.stats or self.is_disable():
return ret
# Max size for the interface name
name_max_width = max_width - 12
# Header
msg = '{:{width}}'.format('SENSORS', width=name_max_width)
ret.append(self.curse_add_line(msg, "TITLE"))
# Stats
for i in self.stats:
# Do not display anything if no battery are detected
if i['type'] == 'battery' and i['value'] == []:
continue
# New line
ret.append(self.curse_new_line())
msg = '{:{width}}'.format(i["label"][:name_max_width],
width=name_max_width)
ret.append(self.curse_add_line(msg))
if i['value'] in (b'ERR', b'SLP', b'UNK', b'NOS'):
msg = '{:>13}'.format(i['value'])
ret.append(self.curse_add_line(
msg, self.get_views(item=i[self.get_key()],
key='value',
option='decoration')))
else:
if (args.fahrenheit and i['type'] != 'battery' and
i['type'] != 'fan_speed'):
value = to_fahrenheit(i['value'])
unit = 'F'
else:
value = i['value']
unit = i['unit']
try:
msg = '{:>13.0f}{}'.format(value, unit)
ret.append(self.curse_add_line(
msg, self.get_views(item=i[self.get_key()],
key='value',
option='decoration')))
except (TypeError, ValueError):
pass
return ret | Return the dict to display in the curse interface. |
def write(self):
'''Write signature file with signature of script, input, output and dependent files.
Because local input and output files can only be determined after the execution
of workflow. They are not part of the construction.
'''
if not self.output_files.valid():
raise ValueError(
f'Cannot write signature with undetermined output {self.output_files}'
)
else:
if 'TARGET' in env.config['SOS_DEBUG'] or 'ALL' in env.config['SOS_DEBUG']:
env.log_to_file(
'TARGET',
f'write signature {self.sig_id} with output {self.output_files}'
)
ret = super(RuntimeInfo, self).write()
if ret is False:
env.logger.debug(f'Failed to write signature {self.sig_id}')
return ret
send_message_to_controller(['step_sig', self.sig_id, ret])
send_message_to_controller([
'workflow_sig', 'tracked_files', self.sig_id,
repr({
'input_files': [
str(f.resolve())
for f in self.input_files
if isinstance(f, file_target)
],
'dependent_files': [
str(f.resolve())
for f in self.dependent_files
if isinstance(f, file_target)
],
'output_files': [
str(f.resolve())
for f in self.output_files
if isinstance(f, file_target)
]
})
])
return True | Write signature file with signature of script, input, output and dependent files.
Because local input and output files can only be determined after the execution
of workflow. They are not part of the construction. |
def save(self, *args, **kwargs):
"""
call synchronizer "after_external_layer_saved" method
for any additional operation that must be executed after save
"""
after_save = kwargs.pop('after_save', True)
super(LayerExternal, self).save(*args, **kwargs)
# call after_external_layer_saved method of synchronizer
if after_save:
try:
synchronizer = self.synchronizer
except ImproperlyConfigured:
pass
else:
if synchronizer:
synchronizer.after_external_layer_saved(self.config)
# reload schema
self._reload_schema() | call synchronizer "after_external_layer_saved" method
for any additional operation that must be executed after save |
def saelgv(vec1, vec2):
"""
Find semi-axis vectors of an ellipse generated by two arbitrary
three-dimensional vectors.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/saelgv_c.html
:param vec1: First vector used to generate an ellipse.
:type vec1: 3-Element Array of floats
:param vec2: Second vector used to generate an ellipse.
:type vec2: 3-Element Array of floats
:return: Semi-major axis of ellipse, Semi-minor axis of ellipse.
:rtype: tuple
"""
vec1 = stypes.toDoubleVector(vec1)
vec2 = stypes.toDoubleVector(vec2)
smajor = stypes.emptyDoubleVector(3)
sminor = stypes.emptyDoubleVector(3)
libspice.saelgv_c(vec1, vec2, smajor, sminor)
return stypes.cVectorToPython(smajor), stypes.cVectorToPython(sminor) | Find semi-axis vectors of an ellipse generated by two arbitrary
three-dimensional vectors.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/saelgv_c.html
:param vec1: First vector used to generate an ellipse.
:type vec1: 3-Element Array of floats
:param vec2: Second vector used to generate an ellipse.
:type vec2: 3-Element Array of floats
:return: Semi-major axis of ellipse, Semi-minor axis of ellipse.
:rtype: tuple |
def stateDict(self):
"""Saves internal values to be loaded later
:returns: dict -- {'parametername': value, ...}
"""
state = {
'duration' : self._duration,
'intensity' : self._intensity,
'risefall' : self._risefall,
'stim_type' : self.name
}
return state | Saves internal values to be loaded later
:returns: dict -- {'parametername': value, ...} |
def hash_array(vals, encoding='utf8', hash_key=None, categorize=True):
"""
Given a 1d array, return an array of deterministic integers.
.. versionadded:: 0.19.2
Parameters
----------
vals : ndarray, Categorical
encoding : string, default 'utf8'
encoding for data & key when strings
hash_key : string key to encode, default to _default_hash_key
categorize : bool, default True
Whether to first categorize object arrays before hashing. This is more
efficient when the array contains duplicate values.
.. versionadded:: 0.20.0
Returns
-------
1d uint64 numpy array of hash values, same length as the vals
"""
if not hasattr(vals, 'dtype'):
raise TypeError("must pass a ndarray-like")
dtype = vals.dtype
if hash_key is None:
hash_key = _default_hash_key
# For categoricals, we hash the categories, then remap the codes to the
# hash values. (This check is above the complex check so that we don't ask
# numpy if categorical is a subdtype of complex, as it will choke).
if is_categorical_dtype(dtype):
return _hash_categorical(vals, encoding, hash_key)
elif is_extension_array_dtype(dtype):
vals, _ = vals._values_for_factorize()
dtype = vals.dtype
# we'll be working with everything as 64-bit values, so handle this
# 128-bit value early
if np.issubdtype(dtype, np.complex128):
return hash_array(vals.real) + 23 * hash_array(vals.imag)
# First, turn whatever array this is into unsigned 64-bit ints, if we can
# manage it.
elif isinstance(dtype, np.bool):
vals = vals.astype('u8')
elif issubclass(dtype.type, (np.datetime64, np.timedelta64)):
vals = vals.view('i8').astype('u8', copy=False)
elif issubclass(dtype.type, np.number) and dtype.itemsize <= 8:
vals = vals.view('u{}'.format(vals.dtype.itemsize)).astype('u8')
else:
# With repeated values, its MUCH faster to categorize object dtypes,
# then hash and rename categories. We allow skipping the categorization
# when the values are known/likely to be unique.
if categorize:
from pandas import factorize, Categorical, Index
codes, categories = factorize(vals, sort=False)
cat = Categorical(codes, Index(categories),
ordered=False, fastpath=True)
return _hash_categorical(cat, encoding, hash_key)
try:
vals = hashing.hash_object_array(vals, hash_key, encoding)
except TypeError:
# we have mixed types
vals = hashing.hash_object_array(vals.astype(str).astype(object),
hash_key, encoding)
# Then, redistribute these 64-bit ints within the space of 64-bit ints
vals ^= vals >> 30
vals *= np.uint64(0xbf58476d1ce4e5b9)
vals ^= vals >> 27
vals *= np.uint64(0x94d049bb133111eb)
vals ^= vals >> 31
return vals | Given a 1d array, return an array of deterministic integers.
.. versionadded:: 0.19.2
Parameters
----------
vals : ndarray, Categorical
encoding : string, default 'utf8'
encoding for data & key when strings
hash_key : string key to encode, default to _default_hash_key
categorize : bool, default True
Whether to first categorize object arrays before hashing. This is more
efficient when the array contains duplicate values.
.. versionadded:: 0.20.0
Returns
-------
1d uint64 numpy array of hash values, same length as the vals |
def _start_again(self, message=None):
"""Simple method to form a start again message and give the answer in readable form."""
logging.debug("Start again message delivered: {}".format(message))
the_answer = self._get_text_answer()
return "{0} The correct answer was {1}. Please start a new game.".format(
message,
the_answer
) | Simple method to form a start again message and give the answer in readable form. |
def seqToKV(seq, strict=False):
"""Represent a sequence of pairs of strings as newline-terminated
key:value pairs. The pairs are generated in the order given.
@param seq: The pairs
@type seq: [(str, (unicode|str))]
@return: A string representation of the sequence
@rtype: bytes
"""
def err(msg):
formatted = 'seqToKV warning: %s: %r' % (msg, seq)
if strict:
raise KVFormError(formatted)
else:
logging.warning(formatted)
lines = []
for k, v in seq:
if isinstance(k, bytes):
k = k.decode('utf-8')
elif not isinstance(k, str):
err('Converting key to string: %r' % k)
k = str(k)
if '\n' in k:
raise KVFormError(
'Invalid input for seqToKV: key contains newline: %r' % (k, ))
if ':' in k:
raise KVFormError(
'Invalid input for seqToKV: key contains colon: %r' % (k, ))
if k.strip() != k:
err('Key has whitespace at beginning or end: %r' % (k, ))
if isinstance(v, bytes):
v = v.decode('utf-8')
elif not isinstance(v, str):
err('Converting value to string: %r' % (v, ))
v = str(v)
if '\n' in v:
raise KVFormError(
'Invalid input for seqToKV: value contains newline: %r' %
(v, ))
if v.strip() != v:
err('Value has whitespace at beginning or end: %r' % (v, ))
lines.append(k + ':' + v + '\n')
return ''.join(lines).encode('utf-8') | Represent a sequence of pairs of strings as newline-terminated
key:value pairs. The pairs are generated in the order given.
@param seq: The pairs
@type seq: [(str, (unicode|str))]
@return: A string representation of the sequence
@rtype: bytes |
def load(patterns, full_reindex):
'''
Load one or more CADA CSV files matching patterns
'''
header('Loading CSV files')
for pattern in patterns:
for filename in iglob(pattern):
echo('Loading {}'.format(white(filename)))
with open(filename) as f:
reader = csv.reader(f)
# Skip header
reader.next()
for idx, row in enumerate(reader, 1):
try:
advice = csv.from_row(row)
skipped = False
if not full_reindex:
index(advice)
echo('.' if idx % 50 else white(idx), nl=False)
except Exception:
echo(cyan('s') if idx % 50 else white('{0}(s)'.format(idx)), nl=False)
skipped = True
if skipped:
echo(white('{}(s)'.format(idx)) if idx % 50 else '')
else:
echo(white(idx) if idx % 50 else '')
success('Processed {0} rows'.format(idx))
if full_reindex:
reindex() | Load one or more CADA CSV files matching patterns |
def rotation(f, line = 'fast'):
""" Find rotation of the survey
Find the clock-wise rotation and origin of `line` as ``(rot, cdpx, cdpy)``
The clock-wise rotation is defined as the angle in radians between line
given by the first and last trace of the first line and the axis that gives
increasing CDP-Y, in the direction that gives increasing CDP-X.
By default, the first line is the 'fast' direction, which is inlines if the
file is inline sorted, and crossline if it's crossline sorted.
Parameters
----------
f : SegyFile
line : { 'fast', 'slow', 'iline', 'xline' }
Returns
-------
rotation : float
cdpx : int
cdpy : int
Notes
-----
.. versionadded:: 1.2
"""
if f.unstructured:
raise ValueError("Rotation requires a structured file")
lines = { 'fast': f.fast,
'slow': f.slow,
'iline': f.iline,
'xline': f.xline,
}
if line not in lines:
error = "Unknown line {}".format(line)
solution = "Must be any of: {}".format(' '.join(lines.keys()))
raise ValueError('{} {}'.format(error, solution))
l = lines[line]
origin = f.header[0][segyio.su.cdpx, segyio.su.cdpy]
cdpx, cdpy = origin[segyio.su.cdpx], origin[segyio.su.cdpy]
rot = f.xfd.rotation( len(l),
l.stride,
len(f.offsets),
np.fromiter(l.keys(), dtype = np.intc) )
return rot, cdpx, cdpy | Find rotation of the survey
Find the clock-wise rotation and origin of `line` as ``(rot, cdpx, cdpy)``
The clock-wise rotation is defined as the angle in radians between line
given by the first and last trace of the first line and the axis that gives
increasing CDP-Y, in the direction that gives increasing CDP-X.
By default, the first line is the 'fast' direction, which is inlines if the
file is inline sorted, and crossline if it's crossline sorted.
Parameters
----------
f : SegyFile
line : { 'fast', 'slow', 'iline', 'xline' }
Returns
-------
rotation : float
cdpx : int
cdpy : int
Notes
-----
.. versionadded:: 1.2 |
def _add_https(self, q):
'''for push, pull, and other api interactions, the user can optionally
define a custom registry. If the registry name doesn't include http
or https, add it.
Parameters
==========
q: the parsed image query (names), including the original
'''
# If image uses http or https, add back
if not q['registry'].startswith('http'):
if q['original'].startswith('http:'):
q['registry'] = 'http://%s' % q['registry']
elif q['original'].startswith('https:'):
q['registry'] = 'https://%s' % q['registry']
# Otherwise, guess from the user's environment
else:
prefix = 'https://'
# The user can set an environment variable to specify nohttps
nohttps = os.environ.get('SREGISTRY_REGISTRY_NOHTTPS')
if nohttps != None:
prefix = 'http://'
q['registry'] = '%s%s' %(prefix, q['registry'])
return q | for push, pull, and other api interactions, the user can optionally
define a custom registry. If the registry name doesn't include http
or https, add it.
Parameters
==========
q: the parsed image query (names), including the original |
def list_virtual_machine_scale_set_vm_network_interfaces(scale_set,
vm_index,
resource_group,
**kwargs):
'''
.. versionadded:: 2019.2.0
Get information about all network interfaces in a specific virtual machine within a scale set.
:param scale_set: The name of the scale set to query.
:param vm_index: The virtual machine index.
:param resource_group: The resource group name assigned to the
scale set.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.list_virtual_machine_scale_set_vm_network_interfaces testset testvm testgroup
'''
result = {}
netconn = __utils__['azurearm.get_client']('network', **kwargs)
try:
nics = __utils__['azurearm.paged_object_to_list'](
netconn.network_interfaces.list_virtual_machine_scale_set_vm_network_interfaces(
virtual_machine_scale_set_name=scale_set,
virtualmachine_index=vm_index,
resource_group_name=resource_group
)
)
for nic in nics:
result[nic['name']] = nic
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
result = {'error': str(exc)}
return result | .. versionadded:: 2019.2.0
Get information about all network interfaces in a specific virtual machine within a scale set.
:param scale_set: The name of the scale set to query.
:param vm_index: The virtual machine index.
:param resource_group: The resource group name assigned to the
scale set.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.list_virtual_machine_scale_set_vm_network_interfaces testset testvm testgroup |
def remove(self, component):
# type: (str) -> None
"""
Kills/Removes the component with the given name
:param component: A component name
:raise KeyError: Unknown component
"""
with self.__lock:
# Find its factory
factory = self.__names.pop(component)
components = self.__queue[factory]
# Clear the queue
del components[component]
if not components:
# No more component for this factory
del self.__queue[factory]
# Kill the component
try:
with use_ipopo(self.__context) as ipopo:
# Try to instantiate the component right now
ipopo.kill(component)
except (BundleException, ValueError):
# iPOPO not yet started or component not instantiated
pass | Kills/Removes the component with the given name
:param component: A component name
:raise KeyError: Unknown component |
def _get_nets_lacnic(self, *args, **kwargs):
"""
Deprecated. This will be removed in a future release.
"""
from warnings import warn
warn('Whois._get_nets_lacnic() has been deprecated and will be '
'removed. You should now use Whois.get_nets_lacnic().')
return self.get_nets_lacnic(*args, **kwargs) | Deprecated. This will be removed in a future release. |
def increase_route_count(self, crawled_request):
"""Increase the count that determines how many times a URL of a certain route has been crawled.
Args:
crawled_request (:class:`nyawc.http.Request`): The request that possibly matches a route.
"""
for route in self.__routing_options.routes:
if re.compile(route).match(crawled_request.url):
count_key = str(route) + crawled_request.method
if count_key in self.__routing_count.keys():
self.__routing_count[count_key] += 1
else:
self.__routing_count[count_key] = 1
break | Increase the count that determines how many times a URL of a certain route has been crawled.
Args:
crawled_request (:class:`nyawc.http.Request`): The request that possibly matches a route. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.