text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def bool_env(key, default=False):
"""Parse an environment variable as a boolean switch
`True` is returned if the variable value matches one of the following:
- ``'1'``
- ``'y'``
- ``'yes'``
- ``'true'``
The match is case-insensitive (so ``'Yes'`` will match as `True`)
Parameters
----------
key : `str`
the name of the environment variable to find
default : `bool`
the default return value if the key is not found
Returns
-------
True
if the environment variable matches as 'yes' or similar
False
otherwise
Examples
--------
>>> import os
>>> from gwpy.utils.env import bool_env
>>> os.environ['GWPY_VALUE'] = 'yes'
>>> print(bool_env('GWPY_VALUE'))
True
>>> os.environ['GWPY_VALUE'] = 'something else'
>>> print(bool_env('GWPY_VALUE'))
False
>>> print(bool_env('GWPY_VALUE2'))
False
"""
try:
return os.environ[key].lower() in TRUE
except KeyError:
return default | 0.000963 |
def optimize(thumbnail_file, jpg_command=None, png_command=None,
gif_command=None):
"""
A post processing function to optimize file size. Accepts commands
to optimize JPG, PNG and GIF images as arguments. Example:
THUMBNAILS = {
# Other options...
'POST_PROCESSORS': [
{
'processor': 'thumbnails.post_processors.optimize',
'png_command': 'optipng -force -o3 "%(filename)s"',
'jpg_command': 'jpegoptim -f --strip-all "%(filename)s"',
},
],
}
Note: using output redirection in commands may cause unpredictable results.
For example 'optipng -force -o3 "%(filename)s" &> /dev/null' may cause
optimize command to fail on some systems.
"""
temp_dir = get_or_create_temp_dir()
thumbnail_filename = os.path.join(temp_dir, "%s" % shortuuid.uuid())
f = open(thumbnail_filename, 'wb')
f.write(thumbnail_file.read())
f.close()
# Detect filetype
filetype = imghdr.what(thumbnail_filename)
# Construct command to optimize image based on filetype
command = None
if filetype == "jpg" or filetype == "jpeg":
command = jpg_command
elif filetype == "png":
command = png_command
elif filetype == "gif":
command = gif_command
# Run Command
if command:
command = command % {'filename': thumbnail_filename}
call(command, shell=True)
optimized_file = File(open(thumbnail_filename, 'rb'))
os.remove(thumbnail_filename)
return optimized_file | 0.000635 |
def damping(temp, relhum, freq, pres=101325):
"""
Calculates the damping factor for sound in dB/m
depending on temperature, humidity and sound frequency.
Source: http://www.sengpielaudio.com/LuftdaempfungFormel.htm
temp: Temperature in degrees celsius
relhum: Relative humidity as percentage, e.g. 50
freq: Sound frequency in herz
pres: Atmospheric pressure in kilopascal
"""
temp += 273.15 # convert to kelvin
pres = pres / 101325.0 # convert to relative pressure
c_humid = 4.6151 - 6.8346 * pow((273.15 / temp), 1.261)
hum = relhum * pow(10.0, c_humid) * pres
tempr = temp / 293.15 # convert to relative air temp (re 20 deg C)
frO = pres * (24.0 + 4.04e4 * hum * (0.02 + hum) / (0.391 + hum))
frN = (pres * pow(tempr, -0.5) * (9.0 + 280.0 * hum * math.exp(-4.17 *
(pow(tempr, (-1.0 / 3.0)) - 1.0))))
damp = (8.686 * freq * freq * (
1.84e-11 * (1.0 / pres) * math.sqrt(tempr) +
pow(tempr, -2.5) *
(
0.01275 * (math.exp(-2239.1 / temp) * 1.0 /
(frO + freq * freq / frO)) +
0.1068 * (
math.exp(-3352 / temp) * 1.0 /
(frN + freq * freq / frN)
)
)
)
)
return damp | 0.003823 |
def _resolve_memory_references(self, expression: Expression) -> Union[float, int]:
"""
Traverse the given Expression, and replace any Memory References with whatever values
have been so far provided by the user for those memory spaces. Declared memory defaults
to zero.
:param expression: an Expression
"""
if isinstance(expression, BinaryExp):
left = self._resolve_memory_references(expression.op1)
right = self._resolve_memory_references(expression.op2)
return expression.fn(left, right)
elif isinstance(expression, Function):
return expression.fn(self._resolve_memory_references(expression.expression))
elif isinstance(expression, Parameter):
raise ValueError(f"Unexpected Parameter in gate expression: {expression}")
elif isinstance(expression, float) or isinstance(expression, int):
return expression
elif isinstance(expression, MemoryReference):
return self._variables_shim.get(ParameterAref(name=expression.name, index=expression.offset), 0)
else:
raise ValueError(f"Unexpected expression in gate parameter: {expression}") | 0.007365 |
def infer_format(filename:str) -> str:
"""Return extension identifying format of given filename"""
_, ext = os.path.splitext(filename)
return ext | 0.012739 |
def _extract_stats_from_line(self, txt, stats_delim=' ', val_delim=':'):
"""
extracts the stats from a line of text to the class params
STR:7 AGI:9 STA:5 INT:5 Health:21 CON:8 max_health:21
"""
result = {}
stats_txt = txt.split(stats_delim)
for s in stats_txt:
#print('s = ', s)
if s.strip(' ').strip('\n') != '':
k,v = s.split(val_delim)
result[k.strip(' ')] = v.strip(' ').strip('\n')
return result | 0.007692 |
def _setup_root_filesystem(self, root_dir):
"""Setup the filesystem layout in the given root directory.
Create a copy of the existing proc- and dev-mountpoints in the specified root
directory. Afterwards we chroot into it.
@param root_dir: The path of the root directory that is used to execute the process.
"""
root_dir = root_dir.encode()
# Create an empty proc folder into the root dir. The grandchild still needs a view of
# the old /proc, therefore we do not mount a fresh /proc here.
proc_base = os.path.join(root_dir, b"proc")
util.makedirs(proc_base, exist_ok=True)
dev_base = os.path.join(root_dir, b"dev")
util.makedirs(dev_base, exist_ok=True)
# Create a copy of the host's dev- and proc-mountpoints.
# They are marked as private in order to not being changed
# by existing mounts during run execution.
container.make_bind_mount(b"/dev/", dev_base, recursive=True, private=True)
container.make_bind_mount(b"/proc/", proc_base, recursive=True, private=True)
os.chroot(root_dir) | 0.006162 |
def _dtype(cls, tensor: tf.Tensor) -> tf.Tensor:
'''Converts `tensor` to tf.float32 datatype if needed.'''
if tensor.dtype != tf.float32:
tensor = tf.cast(tensor, tf.float32)
return tensor | 0.008929 |
def plot_phens(phen_grid, **kwargs):
"""
Plots circles colored according to the values in phen_grid.
-1 serves as a sentinel value, indicating that a circle should not be
plotted in that location.
"""
denom, palette = get_kwargs(phen_grid, kwargs, True)
grid = color_grid(phen_grid, palette, denom)
for i in range(len(grid)):
for j in range(len(grid[i])):
if grid[i][j] != -1 and tuple(grid[i][j]) != -1:
plt.gca().add_patch(plt.Circle((j, i),
radius=.3, lw=1, ec="black",
facecolor=grid[i][j], zorder=2)) | 0.001543 |
def get_netconf_client_capabilities_output_session_host_ip(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_netconf_client_capabilities = ET.Element("get_netconf_client_capabilities")
config = get_netconf_client_capabilities
output = ET.SubElement(get_netconf_client_capabilities, "output")
session = ET.SubElement(output, "session")
host_ip = ET.SubElement(session, "host-ip")
host_ip.text = kwargs.pop('host_ip')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.00495 |
def _function_contents(func):
"""
The signature is as follows (should be byte/chars):
< _code_contents (see above) from func.__code__ >
,( comma separated _object_contents for function argument defaults)
,( comma separated _object_contents for any closure contents )
See also: https://docs.python.org/3/reference/datamodel.html
- func.__code__ - The code object representing the compiled function body.
- func.__defaults__ - A tuple containing default argument values for those arguments that have defaults, or None if no arguments have a default value
- func.__closure__ - None or a tuple of cells that contain bindings for the function's free variables.
:Returns:
Signature contents of a function. (in bytes)
"""
contents = [_code_contents(func.__code__, func.__doc__)]
# The function contents depends on the value of defaults arguments
if func.__defaults__:
function_defaults_contents = [_object_contents(cc) for cc in func.__defaults__]
defaults = bytearray(b',(')
defaults.extend(bytearray(b',').join(function_defaults_contents))
defaults.extend(b')')
contents.append(defaults)
else:
contents.append(b',()')
# The function contents depends on the closure captured cell values.
closure = func.__closure__ or []
try:
closure_contents = [_object_contents(x.cell_contents) for x in closure]
except AttributeError:
closure_contents = []
contents.append(b',(')
contents.append(bytearray(b',').join(closure_contents))
contents.append(b')')
retval = bytearray(b'').join(contents)
return retval | 0.002978 |
def get_parent_var(name, global_ok=False, default=None, skip_frames=0):
"""
Directly gets a variable from a parent frame-scope.
Returns
--------
Any
The content of the variable found by the given name, or None.
"""
scope = get_parent_scope_from_var(name, global_ok=global_ok, skip_frames=skip_frames + 1)
if not scope:
return default
if name in scope.locals:
return scope.locals.get(name, default)
return scope.globals.get(name, default) | 0.003953 |
def _set_env_from_extras(self, extras):
"""
Sets the environment variable `GOOGLE_APPLICATION_CREDENTIALS` with either:
- The path to the keyfile from the specified connection id
- A generated file's path if the user specified JSON in the connection id. The
file is assumed to be deleted after the process dies due to how mkstemp()
works.
The environment variable is used inside the gcloud command to determine correct
service account to use.
"""
key_path = self._get_field(extras, 'key_path', False)
keyfile_json_str = self._get_field(extras, 'keyfile_dict', False)
if not key_path and not keyfile_json_str:
self.log.info('Using gcloud with application default credentials.')
elif key_path:
os.environ[G_APP_CRED] = key_path
else:
# Write service account JSON to secure file for gcloud to reference
service_key = tempfile.NamedTemporaryFile(delete=False)
service_key.write(keyfile_json_str)
os.environ[G_APP_CRED] = service_key.name
# Return file object to have a pointer to close after use,
# thus deleting from file system.
return service_key | 0.004702 |
def _from_dict(cls, _dict):
"""Initialize a Conversions object from a json dictionary."""
args = {}
if 'pdf' in _dict:
args['pdf'] = PdfSettings._from_dict(_dict.get('pdf'))
if 'word' in _dict:
args['word'] = WordSettings._from_dict(_dict.get('word'))
if 'html' in _dict:
args['html'] = HtmlSettings._from_dict(_dict.get('html'))
if 'segment' in _dict:
args['segment'] = SegmentSettings._from_dict(_dict.get('segment'))
if 'json_normalizations' in _dict:
args['json_normalizations'] = [
NormalizationOperation._from_dict(x)
for x in (_dict.get('json_normalizations'))
]
return cls(**args) | 0.002646 |
def predictions(dev_dataset,
all_results,
tokenizer,
max_answer_length=64,
null_score_diff_threshold=0.0,
n_best_size=10,
version_2=False):
"""Get prediction results
Parameters
----------
dev_dataset: dataset
Examples of transform.
all_results: dict
A dictionary containing model prediction results.
tokenizer: callable
Tokenizer function.
max_answer_length: int, default 64
Maximum length of the answer tokens.
null_score_diff_threshold: float, default 0.0
If null_score - best_non_null is greater than the threshold predict null.
n_best_size: int, default 10
The total number of n-best predictions.
version_2: bool, default False
If true, the SQuAD examples contain some that do not have an answer.
Returns
-------
all_predictions: dict
All final predictions.
all_nbest_json: dict
All n-best predictions.
scores_diff_json: dict
Record the difference between null score and the score of best non-null.
when version_2 is True.
"""
_PrelimPrediction = namedtuple('PrelimPrediction',
['feature_index', 'start_index', 'end_index',
'start_logit', 'end_logit'])
_NbestPrediction = namedtuple(
'NbestPrediction', ['text', 'start_logit', 'end_logit'])
all_predictions = OrderedDict()
all_nbest_json = OrderedDict()
scores_diff_json = OrderedDict()
for features in dev_dataset:
results = all_results[features[0].example_id]
example_qas_id = features[0].qas_id
prelim_predictions = []
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min mull score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for features_id, (result, feature) in enumerate(zip(results, features)):
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
if version_2:
feature_null_score = result.start_logits[0] + \
result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = features_id
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=features_id,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
if version_2:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature.tokens[pred.start_index:(
pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = feature.doc_tokens[orig_doc_start:(
orig_doc_end + 1)]
tok_text = ' '.join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(' ##', '')
tok_text = tok_text.replace('##', '')
# Clean whitespace
tok_text = tok_text.strip()
tok_text = ' '.join(tok_text.split())
orig_text = ' '.join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, tokenizer)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ''
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# if we didn't inlude the empty option in the n-best, inlcude it
if version_2:
if '' not in seen_predictions:
nbest.append(
_NbestPrediction(
text='',
start_logit=null_start_logit,
end_logit=null_end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text='empty', start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = nd.softmax(nd.array(total_scores)).asnumpy()
nbest_json = []
for (i, entry) in enumerate(nbest):
output = OrderedDict()
output['text'] = entry.text
output['probability'] = float(probs[i])
output['start_logit'] = entry.start_logit
output['end_logit'] = entry.end_logit
nbest_json.append(output)
if not version_2:
all_predictions[example_qas_id] = nbest_json[0]['text']
else:
# predict '' iff the null score - the score of best non-null > threshold
score_diff = score_null - best_non_null_entry.start_logit - \
best_non_null_entry.end_logit
scores_diff_json[example_qas_id] = score_diff
if score_diff > null_score_diff_threshold:
all_predictions[example_qas_id] = ''
else:
all_predictions[example_qas_id] = best_non_null_entry.text
all_nbest_json[example_qas_id] = nbest_json
return all_predictions, all_nbest_json, scores_diff_json | 0.001226 |
def notify_admin_of_new_comment(comID):
"""
Sends an email to the admin with details regarding comment with ID = comID
"""
comment = query_get_comment(comID)
if len(comment) > 0:
(comID2,
id_bibrec,
id_user,
body,
date_creation,
star_score, nb_votes_yes, nb_votes_total,
title,
nb_abuse_reports, round_name, restriction) = comment
else:
return
user_info = query_get_user_contact_info(id_user)
if len(user_info) > 0:
(nickname, email, last_login) = user_info
if not len(nickname) > 0:
nickname = email.split('@')[0]
else:
nickname = email = "ERROR: Could not retrieve"
review_stuff = '''
Star score = %s
Title = %s''' % (star_score, title)
washer = EmailWasher()
try:
body = washer.wash(body)
except:
body = cgi.escape(body)
record_info = webcomment_templates.tmpl_email_new_comment_admin(id_bibrec)
out = '''
The following %(comment_or_review)s has just been posted (%(date)s).
AUTHOR:
Nickname = %(nickname)s
Email = %(email)s
User ID = %(uid)s
RECORD CONCERNED:
Record ID = %(recID)s
URL = <%(siteurl)s/%(CFG_SITE_RECORD)s/%(recID)s/%(comments_or_reviews)s/>
%(record_details)s
%(comment_or_review_caps)s:
%(comment_or_review)s ID = %(comID)s %(review_stuff)s
Body =
<--------------->
%(body)s
<--------------->
ADMIN OPTIONS:
To moderate the %(comment_or_review)s go to %(siteurl)s/%(CFG_SITE_RECORD)s/%(recID)s/%(comments_or_reviews)s/display?%(arguments)s
''' % \
{'comment_or_review': star_score > 0 and 'review' or 'comment',
'comment_or_review_caps': star_score > 0 and 'REVIEW' or 'COMMENT',
'comments_or_reviews': star_score > 0 and 'reviews' or 'comments',
'date': date_creation,
'nickname': nickname,
'email': email,
'uid': id_user,
'recID': id_bibrec,
'record_details': record_info,
'comID': comID2,
'review_stuff': star_score > 0 and review_stuff or "",
'body': body.replace('<br />', '\n'),
'siteurl': CFG_SITE_URL,
'CFG_SITE_RECORD': CFG_SITE_RECORD,
'arguments': 'ln=en&do=od#%s' % comID
}
from_addr = '%s WebComment <%s>' % (
CFG_SITE_NAME, CFG_WEBALERT_ALERT_ENGINE_EMAIL)
comment_collection = get_comment_collection(comID)
to_addrs = get_collection_moderators(comment_collection)
rec_collection = guess_primary_collection_of_a_record(id_bibrec)
report_nums = get_fieldvalues(id_bibrec, "037__a")
report_nums += get_fieldvalues(id_bibrec, "088__a")
report_nums = ', '.join(report_nums)
subject = "A new comment/review has just been posted [%s|%s]" % (
rec_collection, report_nums)
send_email(from_addr, to_addrs, subject, out) | 0.00135 |
def dump_environment(self):
""" Try to dump memory
Not currently implemented feature
:return: None
"""
# Dump the Alignak configuration to a temporary ini file
path = os.path.join(tempfile.gettempdir(),
'dump-env-%s-%s-%d.ini' % (self.type, self.name, int(time.time())))
try:
with open(path, "w") as out_file:
self.alignak_env.write(out_file)
except Exception as exp: # pylint: disable=broad-except
logger.error("Dumping daemon environment raised an error: %s. ", exp) | 0.006623 |
def _dissociate_gene(self, cobra_gene):
"""Dissociates a cobra.Gene object with a cobra.Reaction.
Parameters
----------
cobra_gene : cobra.core.Gene.Gene
"""
self._genes.discard(cobra_gene)
cobra_gene._reaction.discard(self) | 0.007092 |
def factory(ec, code=None, token=None, refresh=None, **kwargs):
"""
Create a token handler
:param code:
:param token:
:param refresh:
:return: TokenHandler instance
"""
TTYPE = {'code': 'A', 'token': 'T', 'refresh': 'R'}
args = {}
if code:
args['code_handler'] = init_token_handler(ec, code, TTYPE['code'])
if token:
args['access_token_handler'] = init_token_handler(ec, token, TTYPE['token'])
if refresh:
args['refresh_token_handler'] = init_token_handler(ec, token, TTYPE['refresh'])
return TokenHandler(**args) | 0.005051 |
def hash(value, chars=None):
'Get N chars (default: all) of secure hash hexdigest of value.'
value = hash_func(value).hexdigest()
if chars: value = value[:chars]
return mark_safe(value) | 0.031746 |
def reinforce_grid(self):
""" Performs grid reinforcement measures for all MV and LV grids
Args:
Returns:
"""
# TODO: Finish method and enable LV case
for grid_district in self.mv_grid_districts():
# reinforce MV grid
grid_district.mv_grid.reinforce_grid()
# reinforce LV grids
for lv_load_area in grid_district.lv_load_areas():
if not lv_load_area.is_aggregated:
for lv_grid_district in lv_load_area.lv_grid_districts():
lv_grid_district.lv_grid.reinforce_grid() | 0.003205 |
def gene_id_check(genes, errors, columns, row_number):
"""
Validate gene identifiers against a known set.
Parameters
----------
genes : set
The known set of gene identifiers.
errors :
Passed by goodtables.
columns :
Passed by goodtables.
row_number :
Passed by goodtables.
"""
message = ("Gene '{value}' in column {col} and row {row} does not "
"appear in the metabolic model.")
for column in columns:
if "gene" in column['header'] and column['value'] not in genes:
message = message.format(
value=column['value'],
row=row_number,
col=column['number'])
errors.append({
'code': 'bad-value',
'message': message,
'row-number': row_number,
'column-number': column['number'],
}) | 0.001085 |
def get_shell(self):
"""
Return shell which is currently bound to Help,
or another running shell if it has been terminated
"""
if (not hasattr(self.shell, 'get_doc') or
(hasattr(self.shell, 'is_running') and
not self.shell.is_running())):
self.shell = None
if self.main.ipyconsole is not None:
shell = self.main.ipyconsole.get_current_shellwidget()
if shell is not None and shell.kernel_client is not None:
self.shell = shell
if self.shell is None:
self.shell = self.internal_shell
return self.shell | 0.002861 |
def _get_list(self, value, context=None):
"""
Get a configuration value. The result is None if "value" is None,
otherwise the result is a list.
"value" may be a list, dict, or str value.
If a list, each element of the list may be a list, dict, or
str value, and the value extraction proceeds recursively.
During processing, if a dict is encountered, each element of
the dict is checked for existence in the context. If it
exists the associated value will be processed recursively as
before.
The final result will be the flattened list resulting from the
recursion. Even if the initial "value" is a str, the result
will be a list, with one element.
"""
log = self._params.get('log', self._discard)
res = []
if value is None:
return res
if context is None:
context = self._context
if isinstance(value, list):
log.debug("Processing list %s", value)
for v in value:
res.extend(self._get_list(v, context=context))
elif isinstance(value, dict):
log.debug("Processing dict %s", value)
for k in value:
if k in context:
res.extend(self._get_list(value[k], context=context))
else:
log.debug("Processing value '%s'", value)
res.append(value)
return res | 0.001362 |
def bothify(self, text='## ??', letters=string.ascii_letters):
"""
Replaces all placeholders with random numbers and letters.
:param text: string to be parsed
:returns: string with all numerical and letter placeholders filled in
"""
return self.lexify(self.numerify(text), letters=letters) | 0.005917 |
def _load_script(self):
"""Loads the script from the filesystem
:raises exceptions.IOError: if the script file could not be opened
"""
script_text = filesystem.read_file(self.path, self.filename)
if not script_text:
raise IOError("Script file could not be opened or was empty: {0}"
"".format(os.path.join(self.path, self.filename)))
self.script = script_text | 0.004484 |
def team_players(self, team):
"""Store output of team players to a CSV file"""
headers = ['Jersey Number', 'Name', 'Position', 'Nationality',
'Date of Birth']
result = [headers]
result.extend([player['shirtNumber'],
player['name'],
player['position'],
player['nationality'],
player['dateOfBirth']]
for player in team)
self.generate_output(result) | 0.003846 |
def SetCacheMode(mode):
"""Set the Configure cache mode. mode must be one of "auto", "force",
or "cache"."""
global cache_mode
if mode == "auto":
cache_mode = AUTO
elif mode == "force":
cache_mode = FORCE
elif mode == "cache":
cache_mode = CACHE
else:
raise ValueError("SCons.SConf.SetCacheMode: Unknown mode " + mode) | 0.002646 |
def position(self):
"""
Returns the current position of the motor in pulses of the rotary
encoder. When the motor rotates clockwise, the position will increase.
Likewise, rotating counter-clockwise causes the position to decrease.
Writing will set the position to that value.
"""
self._position, value = self.get_attr_int(self._position, 'position')
return value | 0.004695 |
def get_number_of_partitions_for(self, ar):
"""Return the number of selected partitions
"""
# fetch the number of partitions from the request
uid = api.get_uid(ar)
num = self.request.get("primary", {}).get(uid)
if num is None:
# get the number of partitions from the template
template = ar.getTemplate()
if template:
num = len(template.getPartitions())
else:
num = DEFAULT_NUMBER_OF_PARTITIONS
try:
num = int(num)
except (TypeError, ValueError):
num = DEFAULT_NUMBER_OF_PARTITIONS
return num | 0.002994 |
def set_logfile(path, instance):
"""Specify logfile path"""
global logfile
logfile = os.path.normpath(path) + '/hfos.' + instance + '.log' | 0.006623 |
def update_payload(self, fields=None):
"""Wrap submitted data within an extra dict."""
payload = super(JobTemplate, self).update_payload(fields)
effective_user = payload.pop(u'effective_user', None)
if effective_user:
payload[u'ssh'] = {u'effective_user': effective_user}
return {u'job_template': payload} | 0.005602 |
def split(self, spike_ids=None, spike_clusters_rel=0):
"""Split the selected spikes."""
if spike_ids is None:
spike_ids = self.emit('request_split', single=True)
spike_ids = np.asarray(spike_ids, dtype=np.int64)
assert spike_ids.dtype == np.int64
assert spike_ids.ndim == 1
if len(spike_ids) == 0:
msg = ("You first need to select spikes in the feature "
"view with a few Ctrl+Click around the spikes "
"that you want to split.")
self.emit('error', msg)
return
self.clustering.split(spike_ids,
spike_clusters_rel=spike_clusters_rel)
self._global_history.action(self.clustering) | 0.002601 |
def channel(self, rpc_timeout=60, lazy=False):
"""Open Channel.
:param int rpc_timeout: Timeout before we give up waiting for an RPC
response from the server.
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
"""
LOGGER.debug('Opening a new Channel')
if not compatibility.is_integer(rpc_timeout):
raise AMQPInvalidArgument('rpc_timeout should be an integer')
elif self.is_closed:
raise AMQPConnectionError('socket/connection closed')
with self.lock:
channel_id = self._get_next_available_channel_id()
channel = Channel(channel_id, self, rpc_timeout,
on_close_impl=self._cleanup_channel)
self._channels[channel_id] = channel
if not lazy:
channel.open()
LOGGER.debug('Channel #%d Opened', channel_id)
return self._channels[channel_id] | 0.001721 |
def _kill(self, variable, code_loc): # pylint:disable=no-self-use
"""
Kill previous defs. addr_list is a list of normalized addresses.
"""
# Case 1: address perfectly match, we kill
# Case 2: a is a subset of the original address
# Case 3: a is a superset of the original address
# the previous definition is killed. mark it in data graph.
if variable in self._live_defs:
for loc in self._live_defs.lookup_defs(variable):
pv = ProgramVariable(variable, loc, arch=self.project.arch)
self._data_graph_add_edge(pv, ProgramVariable(variable, code_loc, arch=self.project.arch), type='kill')
self._live_defs.kill_def(variable, code_loc) | 0.003995 |
def query(self, time_indices):
"""Query the values at given time indices.
Args:
time_indices: 0-based time indices to query, as a `list` of `int`.
Returns:
Values as a list of `numpy.ndarray` (for time indices in memory) or
`None` (for time indices discarded).
"""
if self._disposed:
raise ValueError(
'Cannot query: this _WatchStore instance is already disposed')
if not isinstance(time_indices, (tuple, list)):
time_indices = [time_indices]
output = []
for time_index in time_indices:
if isinstance(self._data[time_index], _TensorValueDiscarded):
output.append(None)
else:
data_item = self._data[time_index]
if (hasattr(data_item, 'dtype') and
tensor_helper.translate_dtype(data_item.dtype) == 'string'):
_, _, data_item = tensor_helper.array_view(data_item)
data_item = np.array(
tensor_helper.process_buffers_for_display(data_item),
dtype=np.object)
output.append(data_item)
return output | 0.007442 |
def row_contributions(self, X):
"""Returns the row contributions towards each principal component.
Each row contribution towards each principal component is equivalent to the amount of
inertia it contributes. This is calculated by dividing the squared row coordinates by the
eigenvalue associated to each principal component.
"""
utils.validation.check_is_fitted(self, 's_')
return np.square(self.row_coordinates(X)).div(self.eigenvalues_, axis='columns') | 0.009766 |
def iMath_propagate_labels_through_mask(image, labels, stopping_value=100, propagation_method=0):
"""
>>> import ants
>>> wms = ants.image_read('~/desktop/wms.nii.gz')
>>> thal = ants.image_read('~/desktop/thal.nii.gz')
>>> img2 = ants.iMath_propagate_labels_through_mask(wms, thal, 500, 0)
"""
return iMath(image, 'PropagateLabelsThroughMask', labels, stopping_value, propagation_method) | 0.007212 |
def remove_observer(self, callback):
"""Remove an observer from this event.
Args:
callback: A function or coroutine callback to remove from this
event.
Raises:
ValueError: If the callback is not an observer of this event.
"""
if callback not in self._observers:
raise ValueError('{} is not an observer of {}'
.format(callback, self))
self._observers.remove(callback) | 0.004024 |
def _get_params_for_optimizer(self, prefix, named_parameters):
"""Parse kwargs configuration for the optimizer identified by
the given prefix. Supports param group assignment using wildcards:
optimizer__lr=0.05,
optimizer__param_groups=[
('rnn*.period', {'lr': 0.3, 'momentum': 0}),
('rnn0', {'lr': 0.1}),
]
The first positional argument are the param groups.
"""
kwargs = self._get_params_for(prefix)
params = list(named_parameters)
pgroups = []
for pattern, group in kwargs.pop('param_groups', []):
matches = [i for i, (name, _) in enumerate(params) if
fnmatch.fnmatch(name, pattern)]
if matches:
p = [params.pop(i)[1] for i in reversed(matches)]
pgroups.append({'params': p, **group})
if params:
pgroups.append({'params': [p for _, p in params]})
return [pgroups], kwargs | 0.001965 |
def _pack_prms():
"""if you introduce new 'save-able' parameter dictionaries, then you have
to include them here"""
config_dict = {
"Paths": prms.Paths.to_dict(),
"FileNames": prms.FileNames.to_dict(),
"Db": prms.Db.to_dict(),
"DbCols": prms.DbCols.to_dict(),
"DataSet": prms.DataSet.to_dict(),
"Reader": prms.Reader.to_dict(),
"Instruments": prms.Instruments.to_dict(),
# "excel_db_cols": prms.excel_db_cols.to_dict(),
# "excel_db_filename_cols": prms.excel_db_filename_cols.to_dict(),
"Batch": prms.Batch.to_dict(),
}
return config_dict | 0.001565 |
def get_links(self, request=None):
"""
Return a dictionary containing all the links that should be
included in the API schema.
"""
links = LinkNode()
# Generate (path, method, view) given (path, method, callback).
paths = []
view_endpoints = []
for path, method, callback in self.endpoints:
view = self.create_view(callback, method, request)
if getattr(view, 'exclude_from_schema', False):
continue
path = self.coerce_path(path, method, view)
paths.append(path)
view_endpoints.append((path, method, view))
# Only generate the path prefix for paths that will be included
if not paths:
return None
prefix = self.determine_path_prefix(paths)
for path, method, view in view_endpoints:
if not self.has_view_permissions(path, method, view):
continue
link = self.get_link(path, method, view, version=getattr(request, 'version', None))
subpath = path[len(prefix):]
keys = self.get_keys(subpath, method, view)
try:
insert_into(links, keys, link)
except Exception:
continue
return links | 0.002309 |
def add_resource(self, resource):
'''Perform an atomic prepend for a new resource'''
resource.validate()
self.update(__raw__={
'$push': {
'resources': {
'$each': [resource.to_mongo()],
'$position': 0
}
}
})
self.reload()
post_save.send(self.__class__, document=self,
resource_added=resource.id) | 0.004338 |
def phrase_contains_special_keys(expansion: model.Expansion) -> bool:
"""
Determine if the expansion contains any special keys, including those resulting from any processed macros
(<script>, <file>, etc). If any are found, the phrase cannot be undone.
Python Zen: »In the face of ambiguity, refuse the temptation to guess.«
The question 'What does the phrase expansion "<ctrl>+a<shift>+<insert>" do?' cannot be answered. Because the key
bindings cannot be assumed to result in the actions "select all text, then replace with clipboard content",
the undo operation can not be performed. Thus always disable undo, when special keys are found.
"""
found_special_keys = KEY_FIND_RE.findall(expansion.string.lower())
return bool(found_special_keys) | 0.007282 |
def datetime2unix(T):
"""
converts datetime to UT1 unix epoch time
"""
T = atleast_1d(T)
ut1_unix = empty(T.shape, dtype=float)
for i, t in enumerate(T):
if isinstance(t, (datetime, datetime64)):
pass
elif isinstance(t, str):
try:
ut1_unix[i] = float(t) # it was ut1_unix in a string
continue
except ValueError:
t = parse(t) # datetime in a string
elif isinstance(t, (float, int)): # assuming ALL are ut1_unix already
return T
else:
raise TypeError('I only accept datetime or parseable date string')
# ut1 seconds since unix epoch, need [] for error case
ut1_unix[i] = forceutc(t).timestamp()
return ut1_unix | 0.001253 |
def sign(self, keys: list) -> None:
"""
Sign the current document.
Warning : current signatures will be replaced with the new ones.
:param keys: List of libnacl key instances
:return:
"""
if not isinstance(self.identity, Identity):
raise MalformedDocumentError("Can not return full revocation document created from inline")
self.signatures = []
for key in keys:
signing = base64.b64encode(key.signature(bytes(self.raw(), 'ascii')))
self.signatures.append(signing.decode("ascii")) | 0.006791 |
def _generate_ascii(self, matrix, foreground, background):
"""
Generates an identicon "image" in the ASCII format. The image will just
output the matrix used to generate the identicon.
Arguments:
matrix - Matrix describing which blocks in the identicon should be
painted with foreground (background if inverted) colour.
foreground - Character which should be used for representing
foreground.
background - Character which should be used for representing
background.
Returns:
ASCII representation of an identicon image, where one block is one
character.
"""
return "\n".join(["".join([foreground if cell else background for cell in row]) for row in matrix]) | 0.003755 |
def time_series(
self,
start_date='-30d',
end_date='now',
precision=None,
distrib=None,
tzinfo=None):
"""
Returns a generator yielding tuples of ``(<datetime>, <value>)``.
The data points will start at ``start_date``, and be at every time interval specified by
``precision``.
``distrib`` is a callable that accepts ``<datetime>`` and returns ``<value>``
"""
start_date = self._parse_date_time(start_date, tzinfo=tzinfo)
end_date = self._parse_date_time(end_date, tzinfo=tzinfo)
if end_date < start_date:
raise ValueError("`end_date` must be greater than `start_date`.")
if precision is None:
precision = (end_date - start_date) / 30
precision = self._parse_timedelta(precision)
if distrib is None:
def distrib(dt): return self.generator.random.uniform(0, precision) # noqa
if not callable(distrib):
raise ValueError(
"`distrib` must be a callable. Got {} instead.".format(distrib))
datapoint = start_date
while datapoint < end_date:
dt = timestamp_to_datetime(datapoint, tzinfo)
datapoint += precision
yield (dt, distrib(dt)) | 0.003774 |
def share(self, plotters, keys=None, draw=None, auto_update=False):
"""
Share the formatoptions of this plotter with others
This method shares the formatoptions of this :class:`Plotter` instance
with others to make sure that, if the formatoption of this changes,
those of the others change as well
Parameters
----------
plotters: list of :class:`Plotter` instances or a :class:`Plotter`
The plotters to share the formatoptions with
keys: string or iterable of strings
The formatoptions to share, or group names of formatoptions to
share all formatoptions of that group (see the
:attr:`fmt_groups` property). If None, all formatoptions of this
plotter are unshared.
%(InteractiveBase.start_update.parameters.draw)s
%(InteractiveBase.update.parameters.auto_update)s
See Also
--------
unshare, unshare_me"""
auto_update = auto_update or not self.no_auto_update
if isinstance(plotters, Plotter):
plotters = [plotters]
keys = self._set_sharing_keys(keys)
for plotter in plotters:
for key in keys:
fmto = self._shared.get(key, getattr(self, key))
if not getattr(plotter, key) == fmto:
plotter._shared[key] = getattr(self, key)
fmto.shared.add(getattr(plotter, key))
# now exit if we are not initialized
if self._initialized:
self.update(force=keys, auto_update=auto_update, draw=draw)
for plotter in plotters:
if not plotter._initialized:
continue
old_registered = plotter._registered_updates.copy()
plotter._registered_updates.clear()
try:
plotter.update(force=keys, auto_update=auto_update, draw=draw)
except:
raise
finally:
plotter._registered_updates.clear()
plotter._registered_updates.update(old_registered)
if draw is None:
draw = rcParams['auto_draw']
if draw:
self.draw()
if rcParams['auto_show']:
self.show() | 0.001321 |
def hangup(self):
""" End the phone call.
Does nothing if the call is already inactive.
"""
if self.active:
self._gsmModem.write('ATH')
self.answered = False
self.active = False
if self.id in self._gsmModem.activeCalls:
del self._gsmModem.activeCalls[self.id] | 0.008427 |
def _cnvkit_segment(cnr_file, cov_interval, data, items, out_file=None, detailed=False):
"""Perform segmentation and copy number calling on normalized inputs
"""
if not out_file:
out_file = "%s.cns" % os.path.splitext(cnr_file)[0]
if not utils.file_uptodate(out_file, cnr_file):
with file_transaction(data, out_file) as tx_out_file:
if not _cna_has_values(cnr_file):
with open(tx_out_file, "w") as out_handle:
out_handle.write("chromosome\tstart\tend\tgene\tlog2\tprobes\tCN1\tCN2\tbaf\tweight\n")
else:
# Scale cores to avoid memory issues with segmentation
# https://github.com/etal/cnvkit/issues/346
if cov_interval == "genome":
cores = max(1, dd.get_cores(data) // 2)
else:
cores = dd.get_cores(data)
cmd = [_get_cmd(), "segment", "-p", str(cores), "-o", tx_out_file, cnr_file]
small_vrn_files = _compatible_small_variants(data, items)
if len(small_vrn_files) > 0 and _cna_has_values(cnr_file) and cov_interval != "genome":
cmd += ["--vcf", small_vrn_files[0].name, "--sample-id", small_vrn_files[0].sample]
if small_vrn_files[0].normal:
cmd += ["--normal-id", small_vrn_files[0].normal]
resources = config_utils.get_resources("cnvkit_segment", data["config"])
user_options = resources.get("options", [])
cmd += [str(x) for x in user_options]
if cov_interval == "genome" and "--threshold" not in user_options:
cmd += ["--threshold", "0.00001"]
# For tumors, remove very low normalized regions, avoiding upcaptured noise
# https://github.com/bcbio/bcbio-nextgen/issues/2171#issuecomment-348333650
# unless we want detailed segmentation for downstream tools
paired = vcfutils.get_paired(items)
if paired:
#if detailed:
# cmd += ["-m", "hmm-tumor"]
if "--drop-low-coverage" not in user_options:
cmd += ["--drop-low-coverage"]
# preferentially use conda installed Rscript
export_cmd = ("%s && export TMPDIR=%s && "
% (utils.get_R_exports(), os.path.dirname(tx_out_file)))
do.run(export_cmd + " ".join(cmd), "CNVkit segment")
return out_file | 0.004262 |
def received_raw(self):
"""
Return a list of all received headers in raw format
"""
output = []
for i in self.message.get_all("received", []):
output.append(decode_header_part(i))
return output | 0.007905 |
def OnRefreshSelectedCells(self, event):
"""Event handler for refreshing the selected cells via menu"""
self.grid.actions.refresh_selected_frozen_cells()
self.grid.ForceRefresh()
event.Skip() | 0.008889 |
def add_state_segments(self, *args, **kwargs):
"""DEPRECATED: use :meth:`Plot.add_segments_bar`
"""
warnings.warn('add_state_segments() was renamed add_segments_bar(), '
'this warning will result in an error in the future',
DeprecationWarning)
return self.add_segments_bar(*args, **kwargs) | 0.005479 |
def alwaysCalledWith(cls, spy, *args, **kwargs): #pylint: disable=invalid-name
"""
Checking the inspector is always called with partial args/kwargs
Args: SinonSpy, args/kwargs
"""
cls.__is_spy(spy)
if not (spy.alwaysCalledWith(*args, **kwargs)):
raise cls.failException(cls.message) | 0.011696 |
def create_tar (archive, compression, cmd, verbosity, interactive, filenames):
"""Create a TAR archive with the tarfile Python module."""
mode = get_tar_mode(compression)
try:
with tarfile.open(archive, mode) as tfile:
for filename in filenames:
tfile.add(filename)
except Exception as err:
msg = "error creating %s: %s" % (archive, err)
raise util.PatoolError(msg)
return None | 0.004454 |
def GET(self, url):
"""returns text content of HTTP GET response."""
r = requests.get(url)
if self.verbose:
sys.stdout.write("%s %s\n" % (r.status_code, r.encoding))
sys.stdout.write(str(r.headers) + "\n")
self.encoding = r.encoding
return r.text | 0.006452 |
def request_help(self, req, msg):
"""Return help on the available requests.
Return a description of the available requests using a sequence of
#help informs.
Parameters
----------
request : str, optional
The name of the request to return help for (the default is to
return help for all requests).
Informs
-------
request : str
The name of a request.
description : str
Documentation for the named request.
Returns
-------
success : {'ok', 'fail'}
Whether sending the help succeeded.
informs : int
Number of #help inform messages sent.
Examples
--------
::
?help
#help halt ...description...
#help help ...description...
...
!help ok 5
?help halt
#help halt ...description...
!help ok 1
"""
if not msg.arguments:
for name, method in sorted(self._request_handlers.items()):
doc = method.__doc__
req.inform(name, doc)
num_methods = len(self._request_handlers)
return req.make_reply("ok", str(num_methods))
else:
name = msg.arguments[0]
if name in self._request_handlers:
method = self._request_handlers[name]
doc = method.__doc__.strip()
req.inform(name, doc)
return req.make_reply("ok", "1")
return req.make_reply("fail", "Unknown request method.") | 0.001213 |
def mean_interval(data, alpha=_alpha):
"""
Interval assuming gaussian posterior.
"""
mean =np.mean(data)
sigma = np.std(data)
scale = scipy.stats.norm.ppf(1-alpha/2.)
return interval(mean,mean-scale*sigma,mean+scale*sigma) | 0.016 |
def clear(self):
""" Clear any existing values from this queue. """
logger.debug('Clearing queue: "%s"', self.name)
return self.redis.delete(self.name) | 0.011429 |
def _create_session(
self,
alias,
url,
headers,
cookies,
auth,
timeout,
max_retries,
backoff_factor,
proxies,
verify,
debug,
disable_warnings):
""" Create Session: create a HTTP session to a server
``url`` Base url of the server
``alias`` Robot Framework alias to identify the session
``headers`` Dictionary of default headers
``cookies`` Dictionary of cookies
``auth`` List of username & password for HTTP Basic Auth
``timeout`` Connection timeout
``max_retries`` The maximum number of retries each connection should attempt.
``backoff_factor`` The pause between for each retry
``proxies`` Dictionary that contains proxy urls for HTTP and HTTPS communication
``verify`` Whether the SSL cert will be verified. A CA_BUNDLE path can also be provided.
``debug`` Enable http verbosity option more information
https://docs.python.org/2/library/httplib.html#httplib.HTTPConnection.set_debuglevel
``disable_warnings`` Disable requests warning useful when you have large number of testcases
"""
self.builtin.log('Creating session: %s' % alias, 'DEBUG')
s = session = requests.Session()
s.headers.update(headers)
s.auth = auth if auth else s.auth
s.proxies = proxies if proxies else s.proxies
try:
max_retries = int(max_retries)
except ValueError as err:
raise ValueError("Error converting max_retries parameter: %s" % err)
if max_retries > 0:
http = requests.adapters.HTTPAdapter(max_retries=Retry(total=max_retries, backoff_factor=backoff_factor))
https = requests.adapters.HTTPAdapter(max_retries=Retry(total=max_retries, backoff_factor=backoff_factor))
# Replace the session's original adapters
s.mount('http://', http)
s.mount('https://', https)
# Disable requests warnings, useful when you have large number of testcase
# you will observe drastical changes in Robot log.html and output.xml files size
if disable_warnings:
logging.basicConfig() # you need to initialize logging, otherwise you will not see anything from requests
logging.getLogger().setLevel(logging.ERROR)
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.ERROR)
requests_log.propagate = True
if not verify:
requests.packages.urllib3.disable_warnings()
# verify can be a Boolean or a String
if isinstance(verify, bool):
s.verify = verify
elif isinstance(verify, str) or isinstance(verify, unicode):
if verify.lower() == 'true' or verify.lower() == 'false':
s.verify = self.builtin.convert_to_boolean(verify)
else:
# String for CA_BUNDLE, not a Boolean String
s.verify = verify
else:
# not a Boolean nor a String
s.verify = verify
# cant pass these into the Session anymore
self.timeout = float(timeout) if timeout is not None else None
self.cookies = cookies
self.verify = verify if self.builtin.convert_to_boolean(verify) != True else None
s.url = url
# Enable http verbosity
if int(debug) >= 1:
self.debug = int(debug)
httplib.HTTPConnection.debuglevel = self.debug
self._cache.register(session, alias=alias)
return session | 0.004305 |
def show_ordering_diagram(analyser, amount_clusters = None):
"""!
@brief Display cluster-ordering (reachability-plot) diagram.
@param[in] analyser (ordering_analyser): cluster-ordering analyser whose ordering diagram should be displayed.
@param[in] amount_clusters (uint): if it is not 'None' then it displays connectivity radius line that can used for allocation of specified amount of clusters
and colorize diagram by corresponding cluster colors.
Example demonstrates general abilities of 'ordering_visualizer' class:
@code
# Display cluster-ordering diagram with connectivity radius is used for allocation of three clusters.
ordering_visualizer.show_ordering_diagram(analyser, 3);
# Display cluster-ordering diagram without radius.
ordering_visualizer.show_ordering_diagram(analyser);
@endcode
"""
ordering = analyser.cluster_ordering
axis = plt.subplot(111)
if amount_clusters is not None:
radius, borders = analyser.calculate_connvectivity_radius(amount_clusters)
# divide into cluster groups to visualize by colors
left_index_border = 0
current_index_border = 0
for index_border in range(len(borders)):
right_index_border = borders[index_border]
axis.bar(range(left_index_border, right_index_border), ordering[left_index_border:right_index_border], width = 1.0, color = color_list.TITLES[index_border])
left_index_border = right_index_border
current_index_border = index_border
axis.bar(range(left_index_border, len(ordering)), ordering[left_index_border:len(ordering)], width = 1.0, color = color_list.TITLES[current_index_border + 1])
plt.xlim([0, len(ordering)])
plt.axhline(y = radius, linewidth = 2, color = 'black')
plt.text(0, radius + radius * 0.03, " Radius: " + str(round(radius, 4)) + ";\n Clusters: " + str(amount_clusters), color = 'b', fontsize = 10)
else:
axis.bar(range(0, len(ordering)), ordering[0:len(ordering)], width = 1.0, color = 'black')
plt.xlim([0, len(ordering)])
plt.show() | 0.018465 |
def resize(im, short, max_size):
"""
only resize input image to target size and return scale
:param im: BGR image input by opencv
:param short: one dimensional size (the short side)
:param max_size: one dimensional max size (the long side)
:return: resized image (NDArray) and scale (float)
"""
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(short) / float(im_size_min)
# prevent bigger axis from being more than max_size:
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
return im, im_scale | 0.002639 |
def _windows_system_appdata():
"""
Return the path to the Windows Common App Data folder.
On Windows 7, for example, this is C:\\ProgramData
:return: String reprsentation the path to the Windows Common
App Data folder
"""
# Could also use os.environ['ALLUSERSPROFILE'] - maybe?
csidl_common_appdata = 35
sh_get_folder_path = windll.shell32.SHGetFolderPathW
sh_get_folder_path.argtypes = [wintypes.HWND,
ctypes.c_int,
wintypes.HANDLE,
wintypes.DWORD,
wintypes.LPCWSTR]
path_buf = ctypes.create_unicode_buffer(wintypes.MAX_PATH)
result = sh_get_folder_path(0, csidl_common_appdata, 0, 0, path_buf)
return str(path_buf.value) | 0.001214 |
def set_webhook_handler(self, scope, callback):
"""
Allows adding a webhook_handler as an alternative to the decorators
"""
scope = scope.lower()
if scope == 'after_send':
self._after_send = callback
return
if scope not in Page.WEBHOOK_ENDPOINTS:
raise ValueError("The 'scope' argument must be one of {}.".format(Page.WEBHOOK_ENDPOINTS))
self._webhook_handlers[scope] = callback | 0.006342 |
def list_logdir(self, id, filter=None, sort=None): # pylint: disable=invalid-name,redefined-builtin
"""Get a list of logdir files.
:param id: Result ID as an int.
:param filter: Filter to apply as string.
:param sort: Sort field to apply as string.
:return: :class:`results.LogDirFile <results.LogDirFile>` list
"""
schema = LogDirFileSchema()
resp = self.service.list(self.base+str(id)+'/logdir/', filter, sort)
return self.service.decode(schema, resp, many=True) | 0.007449 |
async def execute_async(self, operation, op_type, message, timeout=0):
"""Execute a request and wait on a response asynchronously.
:param operation: The type of operation to be performed. This value will
be service-specific, but common values include READ, CREATE and UPDATE.
This value will be added as an application property on the message.
:type operation: bytes
:param op_type: The type on which to carry out the operation. This will
be specific to the entities of the service. This value will be added as
an application property on the message.
:type op_type: bytes
:param message: The message to send in the management request.
:type message: ~uamqp.message.Message
:param timeout: Provide an optional timeout in milliseconds within which a response
to the management request must be received.
:type timeout: int
:rtype: ~uamqp.message.Message
"""
start_time = self._counter.get_current_ms()
operation_id = str(uuid.uuid4())
self._responses[operation_id] = None
def on_complete(operation_result, status_code, description, wrapped_message):
result = constants.MgmtExecuteResult(operation_result)
if result != constants.MgmtExecuteResult.Ok:
_logger.error(
"Failed to complete mgmt operation.\nStatus code: %r\nMessage: %r",
status_code, description)
message = Message(message=wrapped_message) if wrapped_message else None
self._responses[operation_id] = (status_code, message, description)
self._mgmt_op.execute(operation, op_type, None, message.get_message(), on_complete)
while not self._responses[operation_id] and not self.mgmt_error:
if timeout > 0:
now = self._counter.get_current_ms()
if (now - start_time) >= timeout:
raise TimeoutException("Failed to receive mgmt response in {}ms".format(timeout))
await self.connection.work_async()
if self.mgmt_error:
raise self.mgmt_error
response = self._responses.pop(operation_id)
return response | 0.004887 |
def buy_market_order(self, amount):
"""Place a buy order at market price.
:param amount: Amount of major currency to buy at market price.
:type amount: int | float | str | unicode | decimal.Decimal
:return: Order details.
:rtype: dict
"""
amount = str(amount)
self._log("buy {} {} at market price".format(amount, self.major))
return self._rest_client.post(
endpoint='/buy',
payload={'book': self.name, 'amount': amount}
) | 0.003802 |
def cudnnGetConvolution2dDescriptor(convDesc):
""""
Get a convolution descriptor.
This function queries a previously initialized 2D convolution descriptor object.
Parameters
----------
convDesc : cudnnConvolutionDescriptor
Handle to a previously created convolution descriptor.
Returns
-------
pad_h : int
zero-padding height: number of rows of zeros implicitly concatenated onto
the top and onto the bottom of input images.
pad_w : int
zero-padding width: number of columns of zeros implicitly concatenated
onto the left and onto the right of input images.
u : int
Vertical filter stride.
v : int
Horizontal filter stride.
dilation_h : int
Filter height dilation.
dilation_w : int
Filter width dilation.
mode : cudnnConvolutionMode
Either CUDNN_CONVOLUTION or CUDNN_CROSS_CORRELATION.
computeType : cudnnDataType
Compute precision
"""
pad_h = ctypes.c_int()
pad_w = ctypes.c_int()
u = ctypes.c_int()
v = ctypes.c_int()
dilation_h = ctypes.c_int()
dilation_w = ctypes.c_int()
mode = ctypes.c_int()
computeType = ctypes.c_int()
status = _libcudnn.cudnnGetConvolution2dDescriptor(convDesc, ctypes.byref(pad_h),
ctypes.byref(pad_w), ctypes.byref(u),
ctypes.byref(v), ctypes.byref(dilation_h),
ctypes.byref(dilation_w),
ctypes.byref(mode), ctypes.byref(computeType))
cudnnCheckStatus(status)
return (pad_h.value, pad_w.value, u.value, v.value, upscalex.value, upscaley.value, mode.value,
computeType.value) | 0.006565 |
def combine_comments(comments):
'''
Given a list of comments, or a comment submitted as a string, return a
single line of text containing all of the comments.
'''
if isinstance(comments, list):
for idx in range(len(comments)):
if not isinstance(comments[idx], six.string_types):
comments[idx] = six.text_type(comments[idx])
else:
if not isinstance(comments, six.string_types):
comments = [six.text_type(comments)]
else:
comments = [comments]
return ' '.join(comments).strip() | 0.001727 |
async def viewers_js(request):
'''
Viewers determines the viewers installed based on settings, then uses the
conversion infrastructure to convert all these JS files into a single JS
bundle, that is then served. As with media, it will simply serve a cached
version if necessary.
'''
# Generates single bundle as such:
# BytesResource -> ViewerNodePackageBuilder -> nodepackage -> ... -> min.js
response = singletons.server.response
# Create a viewers resource, which is simply a JSON encoded description of
# the viewers necessary for this viewers bundle.
viewers_resource = singletons.viewers.get_resource()
url_string = viewers_resource.url_string
target_ts = TypeString('min.js') # get a minified JS bundle
target_resource = TypedResource(url_string, target_ts)
if target_resource.cache_exists():
return await response.file(target_resource.cache_path, headers={
'Content-Type': 'application/javascript',
})
# Otherwise, does not exist, save this descriptor to cache and kick off
# conversion process
if not viewers_resource.cache_exists():
viewers_resource.save()
# Queue up a single function that will in turn queue up conversion process
await singletons.workers.async_enqueue_sync(
enqueue_conversion_path,
url_string,
str(target_ts),
singletons.workers.enqueue_convert
)
return response.text(NOT_LOADED_JS, headers={
'Content-Type': 'application/javascript',
}) | 0.000647 |
def approximation(self, latitude, longitude):
"""
Dummy approximation with nearest points. The nearest the neighbour the
more important will be its elevation.
"""
d = 1. / self.square_side
d_meters = d * mod_utils.ONE_DEGREE
# Since the less the distance => the more important should be the
# distance of the point, we'll use d-distance as importance coef
# here:
importance_1 = d_meters - mod_utils.distance(latitude + d, longitude, latitude, longitude)
elevation_1 = self.geo_elevation_data.get_elevation(latitude + d, longitude, approximate=False)
importance_2 = d_meters - mod_utils.distance(latitude - d, longitude, latitude, longitude)
elevation_2 = self.geo_elevation_data.get_elevation(latitude - d, longitude, approximate=False)
importance_3 = d_meters - mod_utils.distance(latitude, longitude + d, latitude, longitude)
elevation_3 = self.geo_elevation_data.get_elevation(latitude, longitude + d, approximate=False)
importance_4 = d_meters - mod_utils.distance(latitude, longitude - d, latitude, longitude)
elevation_4 = self.geo_elevation_data.get_elevation(latitude, longitude - d, approximate=False)
# TODO(TK) Check if coordinates inside the same file, and only then decide if to call
# self.geo_elevation_data.get_elevation or just self.get_elevation
if elevation_1 == None or elevation_2 == None or elevation_3 == None or elevation_4 == None:
elevation = self.get_elevation(latitude, longitude, approximate=False)
if not elevation:
return None
elevation_1 = elevation_1 or elevation
elevation_2 = elevation_2 or elevation
elevation_3 = elevation_3 or elevation
elevation_4 = elevation_4 or elevation
# Normalize importance:
sum_importances = float(importance_1 + importance_2 + importance_3 + importance_4)
# Check normalization:
assert abs(importance_1 / sum_importances + \
importance_2 / sum_importances + \
importance_3 / sum_importances + \
importance_4 / sum_importances - 1 ) < 0.000001
result = importance_1 / sum_importances * elevation_1 + \
importance_2 / sum_importances * elevation_2 + \
importance_3 / sum_importances * elevation_3 + \
importance_4 / sum_importances * elevation_4
return result | 0.011444 |
def menu_callback(m):
'''called on menu selection'''
if m.returnkey.startswith('# '):
cmd = m.returnkey[2:]
if m.handler is not None:
if m.handler_result is None:
return
cmd += m.handler_result
process_stdin(cmd)
elif m.returnkey == 'menuSettings':
wxsettings.WXSettings(mestate.settings)
elif m.returnkey.startswith("mode-"):
idx = int(m.returnkey[5:])
mestate.flightmode_selections[idx] = m.IsChecked()
else:
print('Unknown menu selection: %s' % m.returnkey) | 0.001736 |
def _construct_regex(cls, fmt):
"""Given a format string, construct the regex with class attributes."""
return re.compile(fmt.format(**vars(cls)), flags=re.U) | 0.011494 |
def AddNEP5Token(self, token):
"""
Add a NEP-5 compliant token to the wallet.
Args:
token (NEP5Token): an instance of type neo.Wallets.NEP5Token.
Note:
Prints a warning to the console if the token already exists in the wallet.
"""
if token.ScriptHash.ToBytes() in self._tokens.keys():
logger.error("Token already in wallet")
return
self._tokens[token.ScriptHash.ToBytes()] = token | 0.006173 |
def pop(self):
"""
Removes the last traversal path node from this traversal path.
"""
node = self.nodes.pop()
self.__keys.remove(node.key) | 0.011236 |
def get_version():
"""Return the version of the Sparser executable on the path.
Returns
-------
version : str
The version of Sparser that is found on the Sparser path.
"""
assert sparser_path is not None, "Sparser path is not defined."
with open(os.path.join(sparser_path, 'version.txt'), 'r') as f:
version = f.read().strip()
return version | 0.002564 |
def initialize_time(self, control_freq):
"""
Initializes the time constants used for simulation.
"""
self.cur_time = 0
self.model_timestep = self.sim.model.opt.timestep
if self.model_timestep <= 0:
raise XMLError("xml model defined non-positive time step")
self.control_freq = control_freq
if control_freq <= 0:
raise SimulationError(
"control frequency {} is invalid".format(control_freq)
)
self.control_timestep = 1. / control_freq | 0.003591 |
def lock_retention_policy(self, client=None):
"""Lock the bucket's retention policy.
:raises ValueError:
if the bucket has no metageneration (i.e., new or never reloaded);
if the bucket has no retention policy assigned;
if the bucket's retention policy is already locked.
"""
if "metageneration" not in self._properties:
raise ValueError("Bucket has no retention policy assigned: try 'reload'?")
policy = self._properties.get("retentionPolicy")
if policy is None:
raise ValueError("Bucket has no retention policy assigned: try 'reload'?")
if policy.get("isLocked"):
raise ValueError("Bucket's retention policy is already locked.")
client = self._require_client(client)
query_params = {"ifMetagenerationMatch": self.metageneration}
if self.user_project is not None:
query_params["userProject"] = self.user_project
path = "/b/{}/lockRetentionPolicy".format(self.name)
api_response = client._connection.api_request(
method="POST", path=path, query_params=query_params, _target_object=self
)
self._set_properties(api_response) | 0.004039 |
def createPen(self, altStyle=None, altWidth=None):
""" Creates a pen from the config values with the style overridden by altStyle if the
None-option is selected in the combo box.
"""
pen = self.configValue
if pen is not None:
style = self.findByNodePath('style').configValue
if style is None and altStyle is not None:
pen.setStyle(altStyle)
width = self.findByNodePath('width').configValue
if width == 0.0 and altWidth is not None:
#logger.debug("Setting altWidth = {!r}".format(altWidth))
pen.setWidthF(altWidth)
return pen | 0.005926 |
def run(self):
"""Run command."""
for ext in ['*.so', '*.pyd']:
for file in glob.glob('./pybase64/' + ext):
log.info("removing '%s'", file)
if self.dry_run:
continue
os.remove(file) | 0.00722 |
def register_group(self, name, policies=None, mount_point=DEFAULT_MOUNT_POINT):
"""Register a new group and maps a set of policies to it.
Supported methods:
POST: /auth/{mount_point}/groups/{name}. Produces: 204 (empty body)
:param name: The name of the group.
:type name: str | unicode
:param policies: The list or comma-separated string of policies associated with the group.
:type policies: list
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
params = {
'policies': policies,
}
api_path = '/v1/auth/{mount_point}/groups/{name}'.format(
mount_point=mount_point,
name=name,
)
return self._adapter.post(
url=api_path,
json=params,
) | 0.003119 |
def save_dispatcher(dsp, path):
"""
Write Dispatcher object in Python pickle format.
Pickles are a serialized byte stream of a Python object.
This format will preserve Python objects used as nodes or edges.
:param dsp:
A dispatcher that identifies the model adopted.
:type dsp: schedula.Dispatcher
:param path:
File or filename to write.
File names ending in .gz or .bz2 will be compressed.
:type path: str, file
.. testsetup::
>>> from tempfile import mkstemp
>>> file_name = mkstemp()[1]
Example::
>>> from schedula import Dispatcher
>>> dsp = Dispatcher()
>>> dsp.add_data('a', default_value=1)
'a'
>>> dsp.add_function(function=max, inputs=['a', 'b'], outputs=['c'])
'max'
>>> save_dispatcher(dsp, file_name)
"""
import dill
with open(path, 'wb') as f:
dill.dump(dsp, f) | 0.001068 |
def get(self, key, *, encoding=_NOTSET):
"""Get the value of a key."""
return self.execute(b'GET', key, encoding=encoding) | 0.014493 |
def construct_datapipeline(env='',
generated=None,
previous_env=None,
region='us-east-1',
settings=None,
pipeline_data=None):
"""Create the Pipeline JSON from template.
This handles the common repeatable patterns in a pipeline, such as
judgement, infrastructure, tagger and qe.
Args:
env (str): Deploy environment name, e.g. dev, stage, prod.
generated (gogoutils.Generator): Gogo Application name generator.
previous_env (str): The previous deploy environment to use as
Trigger.
region (str): AWS Region to deploy to.
settings (dict): Environment settings from configurations.
Returns:
dict: Pipeline JSON template rendered with configurations.
"""
LOG.info('%s block for [%s].', env, region)
if env.startswith('prod'):
template_name = 'pipeline/pipeline_{}_datapipeline.json.j2'.format(env)
else:
template_name = 'pipeline/pipeline_stages_datapipeline.json.j2'
LOG.debug('%s info:\n%s', env, pformat(settings))
gen_app_name = generated.app_name()
data = copy.deepcopy(settings)
data['app'].update({
'appname': gen_app_name,
'repo_name': generated.repo,
'group_name': generated.project,
'environment': env,
'region': region,
'previous_env': previous_env,
'promote_restrict': pipeline_data['promote_restrict'],
'owner_email': pipeline_data['owner_email']
})
LOG.debug('Block data:\n%s', pformat(data))
pipeline_json = get_template(template_file=template_name, data=data, formats=generated)
return pipeline_json | 0.001134 |
def bitsBitOp(self, other, op, getVldFn, reduceCheckFn):
"""
:attention: If other is Bool signal, convert this to bool
(not ideal, due VHDL event operator)
"""
other = toHVal(other)
iamVal = isinstance(self, Value)
otherIsVal = isinstance(other, Value)
if iamVal and otherIsVal:
other = other._auto_cast(self._dtype)
return bitsBitOp__val(self, other, op, getVldFn)
else:
if other._dtype == BOOL:
self = self._auto_cast(BOOL)
return op._evalFn(self, other)
elif self._dtype == other._dtype:
pass
else:
raise TypeError("Can not apply operator %r (%r, %r)" %
(op, self._dtype, other._dtype))
if otherIsVal:
r = reduceCheckFn(self, other)
if r is not None:
return r
elif iamVal:
r = reduceCheckFn(other, self)
if r is not None:
return r
return Operator.withRes(op, [self, other], self._dtype) | 0.000949 |
def store(self, transient_file, persistent_file):
'''Makes PersistentFile from TransientFile'''
#for i in range(5):
# persistent_file = PersistentFile(self.persistent_root,
# persistent_name, self)
# if not os.path.exists(persistent_file.path):
# break
#else:
# raise Exception('Unable to find free file name')
dirname = os.path.dirname(persistent_file.path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
os.rename(transient_file.path, persistent_file.path)
return persistent_file | 0.006192 |
def get_version_naive(cls, name, ignore=''):
""" Checks a string for a possible version of an object (no prefix, no suffix) without filtering date out
Assumes only up to 4 digit padding
:param name: str, string that represents a possible name of an object
:return: (float, int, list(str), None), gets the version number then the string matches
"""
match = cls._get_regex_search(name, cls.REGEX_VERSION.format(SEP=cls.REGEX_SEPARATORS), ignore=ignore)
if match is not None:
if len(match) > 1:
for m in match:
m.update({'version': int(m['match'].upper().replace('V', ''))})
compound_version = '.'.join([str(m['version']) for m in match])
compound_version = float(compound_version) if compound_version.count('.') == 1 else compound_version
return {'compound_matches': match,
'compound_version': compound_version,
'pattern': match[0]['pattern'],
'input': match[0]['input']}
elif len(match) == 1:
match = match[0]
match.update({'version': int(match['match'].upper().replace('V', ''))})
return match
return None | 0.00613 |
def randombytes(n):
"""Return n random bytes."""
# Use /dev/urandom if it is available. Fall back to random module
# if not. It might be worthwhile to extend this function to use
# other platform-specific mechanisms for getting random bytes.
if os.path.exists("/dev/urandom"):
f = open("/dev/urandom")
s = f.read(n)
f.close()
return s
else:
L = [chr(random.randrange(0, 256)) for i in range(n)]
return "".join(L) | 0.002058 |
def _database_from_key(self, key):
"""
gets the database name for the given key. Should ensure a uniform spread
of keys over the databases in order to minimize waiting times. Since the
database has to be locked for updates and multiple processes want to write,
each process has to wait until the lock has been released.
By default the LRU databases will be stored in a sub directory "traj_info_usage"
lying next to the main database.
:param key: hash of the TrajInfo instance
:return: str, database path
"""
if not self.filename:
return None
from pyemma.util.files import mkdir_p
hash_value_long = int(key, 16)
# bin hash to one of either 10 different databases
# TODO: make a configuration parameter out of this number
db_name = str(hash_value_long)[-1] + '.db'
directory = os.path.dirname(self.filename) + os.path.sep + 'traj_info_usage'
mkdir_p(directory)
return os.path.join(directory, db_name) | 0.006573 |
def _sqlfile_to_statements(sql):
"""
Takes a SQL string containing 0 or more statements and returns a
list of individual statements as strings. Comments and
empty statements are ignored.
"""
statements = (sqlparse.format(stmt, strip_comments=True).strip() for stmt in sqlparse.split(sql))
return [stmt for stmt in statements if stmt] | 0.008287 |
def print_minized_help(self, *, no_pager=False):
"""Return the formatted help text.
Override default ArgumentParser to just include the usage and the
description. The `help` action is used for provide more detail.
"""
formatter = self._get_formatter()
formatter.add_usage(
self.usage, self._actions,
self._mutually_exclusive_groups)
formatter.add_text(self.description)
if no_pager:
print(formatter.format_help())
else:
print_with_pager(formatter.format_help()) | 0.003436 |
def get_loc(self, key, method=None, tolerance=None):
"""Adapted from pandas.tseries.index.DatetimeIndex.get_loc"""
if isinstance(key, str):
return self._get_string_slice(key)
else:
return pd.Index.get_loc(self, key, method=method,
tolerance=tolerance) | 0.00597 |
def clicks(times=None, frames=None, sr=22050, hop_length=512,
click_freq=1000.0, click_duration=0.1, click=None, length=None):
"""Returns a signal with the signal `click` placed at each specified time
Parameters
----------
times : np.ndarray or None
times to place clicks, in seconds
frames : np.ndarray or None
frame indices to place clicks
sr : number > 0
desired sampling rate of the output signal
hop_length : int > 0
if positions are specified by `frames`, the number of samples between frames.
click_freq : float > 0
frequency (in Hz) of the default click signal. Default is 1KHz.
click_duration : float > 0
duration (in seconds) of the default click signal. Default is 100ms.
click : np.ndarray or None
optional click signal sample to use instead of the default blip.
length : int > 0
desired number of samples in the output signal
Returns
-------
click_signal : np.ndarray
Synthesized click signal
Raises
------
ParameterError
- If neither `times` nor `frames` are provided.
- If any of `click_freq`, `click_duration`, or `length` are out of range.
Examples
--------
>>> # Sonify detected beat events
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr)
>>> y_beats = librosa.clicks(frames=beats, sr=sr)
>>> # Or generate a signal of the same length as y
>>> y_beats = librosa.clicks(frames=beats, sr=sr, length=len(y))
>>> # Or use timing instead of frame indices
>>> times = librosa.frames_to_time(beats, sr=sr)
>>> y_beat_times = librosa.clicks(times=times, sr=sr)
>>> # Or with a click frequency of 880Hz and a 500ms sample
>>> y_beat_times880 = librosa.clicks(times=times, sr=sr,
... click_freq=880, click_duration=0.5)
Display click waveform next to the spectrogram
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> S = librosa.feature.melspectrogram(y=y, sr=sr)
>>> ax = plt.subplot(2,1,2)
>>> librosa.display.specshow(librosa.power_to_db(S, ref=np.max),
... x_axis='time', y_axis='mel')
>>> plt.subplot(2,1,1, sharex=ax)
>>> librosa.display.waveplot(y_beat_times, sr=sr, label='Beat clicks')
>>> plt.legend()
>>> plt.xlim(15, 30)
>>> plt.tight_layout()
"""
# Compute sample positions from time or frames
if times is None:
if frames is None:
raise ParameterError('either "times" or "frames" must be provided')
positions = frames_to_samples(frames, hop_length=hop_length)
else:
# Convert times to positions
positions = time_to_samples(times, sr=sr)
if click is not None:
# Check that we have a well-formed audio buffer
util.valid_audio(click, mono=True)
else:
# Create default click signal
if click_duration <= 0:
raise ParameterError('click_duration must be strictly positive')
if click_freq <= 0:
raise ParameterError('click_freq must be strictly positive')
angular_freq = 2 * np.pi * click_freq / float(sr)
click = np.logspace(0, -10,
num=int(np.round(sr * click_duration)),
base=2.0)
click *= np.sin(angular_freq * np.arange(len(click)))
# Set default length
if length is None:
length = positions.max() + click.shape[0]
else:
if length < 1:
raise ParameterError('length must be a positive integer')
# Filter out any positions past the length boundary
positions = positions[positions < length]
# Pre-allocate click signal
click_signal = np.zeros(length, dtype=np.float32)
# Place clicks
for start in positions:
# Compute the end-point of this click
end = start + click.shape[0]
if end >= length:
click_signal[start:] += click[:length - start]
else:
# Normally, just add a click here
click_signal[start:end] += click
return click_signal | 0.000707 |
def create_key_id(vault, name, version=None):
"""
:param vault: The vault uri.
:type vault: str
:param name: The key name.
:type name: str
:param version: The key version.
:type version: str
:rtype: KeyVaultId
"""
return KeyId(vault=vault, name=name, version=version) | 0.005764 |
def downsample(time_series: DataFrame, freq: str) -> DataFrame:
"""
Downsample the given route, stop, or feed time series,
(outputs of :func:`.routes.compute_route_time_series`,
:func:`.stops.compute_stop_time_series`, or
:func:`.miscellany.compute_feed_time_series`,
respectively) to the given Pandas frequency string (e.g. '15Min').
Return the given time series unchanged if the given frequency is
shorter than the original frequency.
"""
f = time_series.copy()
# Can't downsample to a shorter frequency
if f.empty or pd.tseries.frequencies.to_offset(freq) < f.index.freq:
return f
result = None
if "stop_id" in time_series.columns.names:
# It's a stops time series
result = f.resample(freq).sum()
else:
# It's a route or feed time series.
inds = [
"num_trips",
"num_trip_starts",
"num_trip_ends",
"service_distance",
"service_duration",
]
frames = []
# Resample num_trips in a custom way that depends on
# num_trips and num_trip_ends
def agg_num_trips(group):
return (
group["num_trips"].iloc[-1]
+ group["num_trip_ends"].iloc[:-1].sum()
)
num_trips = f.groupby(pd.Grouper(freq=freq)).apply(agg_num_trips)
frames.append(num_trips)
# Resample the rest of the indicators via summing
frames.extend([f[ind].resample(freq).agg("sum") for ind in inds[1:]])
g = pd.concat(frames, axis=1, keys=inds)
# Calculate speed and add it to f. Can't resample it.
speed = g["service_distance"] / g["service_duration"]
speed = pd.concat({"service_speed": speed}, axis=1)
result = pd.concat([g, speed], axis=1)
# Reset column names and sort the hierarchical columns to allow slicing;
# see http://pandas.pydata.org/pandas-docs/stable/advanced.html#sorting-a-multiindex
result.columns.names = f.columns.names
result = result.sort_index(axis=1, sort_remaining=True)
return result | 0.000944 |
def get_lb_pkgs(self):
"""Retrieves the local load balancer packages.
:returns: A dictionary containing the load balancer packages
"""
_filter = {'items': {'description':
utils.query_filter('*Load Balancer*')}}
packages = self.prod_pkg.getItems(id=0, filter=_filter)
pkgs = []
for package in packages:
if not package['description'].startswith('Global'):
pkgs.append(package)
return pkgs | 0.003922 |
def check_service_windows(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.service_windows``
"""
table = 'service_windows'
problems = []
# Preliminary checks
if pfeed.service_windows is None:
problems.append(['error', 'Missing table', table, []])
else:
f = pfeed.service_windows.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check service window ID
problems = gt.check_column_id(problems, table, f,
'service_window_id')
# Check start_time and end_time
for column in ['start_time', 'end_time']:
problems = gt.check_column(problems, table, f, column, gt.valid_time)
# Check weekday columns
v = lambda x: x in range(2)
for col in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']:
problems = gt.check_column(problems, table, f, col, v)
return gt.format_problems(problems, as_df=as_df) | 0.003425 |
def main():
"""
This is the main body of the process that does the work.
Summary:
- load the raw data
- read in rules list
- create log events for AIKIF according to rules [map]
- create new facts / reports based on rules [report]
OUTPUT =
AIKIF mapping : Date_of_transaction => event
AIKIF mapping : Amount => fact
AIKIF mapping : Details => location
New column : trans_type = DB WHERE amount > 0 ELSE CR
summing : details contains "CALTEX" into Travel Expense
Done
"""
print('AIKIF example: Processing Finance data\n')
data = read_bank_statements('your_statement.csv')
print(data)
maps = load_column_maps()
rules = load_rules()
for m in maps:
print('AIKIF mapping : ' + m[0] + ' => ' + m[1])
for rule in rules:
#print(rule)
if rule[0] == 'agg':
print('summing : ' + rule[1] + ' into ' + rule[2] )
elif rule[0] == 'derive':
print('New column : ' + rule[1] + ' = ' + rule[2] + ' WHERE ' + rule[1] + ' ELSE ' + rule[3] )
print('Done\n') | 0.010708 |
def _output_current_byte(self):
"""
Prints out the ASCII value of the current byte
"""
if self.tape[self.pointer] is None:
print "{}".format(chr(0)),
else:
print "{}".format(chr(int(self.tape[self.pointer]))), | 0.007326 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.