content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import unittest
def test():
"""Runs the unit tests without test coverage."""
tests = unittest.TestLoader().discover('cabotage/tests', pattern='test*.py')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
return 1 | bcb57638bea41f3823cd22aa7a43b159591ad99b | 4,000 |
def nm_to_uh(s):
"""Get the userhost part of a nickmask.
(The source of an Event is a nickmask.)
"""
return s.split("!")[1] | 5e6c07b7b287000a401ba81117bc55be47cc9a24 | 4,001 |
def upload_example_done(request, object_id):
"""
This view is a callback that receives POST data
from uploadify when the download is complete.
See also /media/js/uploadify_event_handlers.js.
"""
example = get_object_or_404(Example, id=object_id)
#
# Grab the post data sent by our OnComplete handler and parse it. Set the fields
# on our example object as appropriate and save.
#
if request.method == 'POST':
post_response = request.POST['s3_response']
location_rexp = '<Location>(.*)</Location>'
example.file_url = unquote_plus(re.search(location_rexp, post_response).group(1))
example.file_name = request.POST['file_obj[name]']
example.file_size = request.POST['file_obj[size]']
example.file_upload_speed = request.POST['upload_data[speed]']
example.file_uploaded = datetime.now()
example.save()
print example.file_url
print example.file_name
print example.file_uploaded
return HttpResponse((reverse('examples_example_detail', args=[example.id]))) | b748cb1708ddfdd2959f723902c45668c8774df2 | 4,002 |
def epoch_in_milliseconds(epoch):
"""
>>> epoch_in_milliseconds(datetime_from_seconds(-12345678999.0001))
-12345679000000
"""
return epoch_in_seconds(epoch) * 1000 | 75ae0779ae2f6d1987c1fdae0a6403ef725a6893 | 4,003 |
def get_workspaces(clue, workspaces):
"""
Imports all workspaces if none were provided.
Returns list of workspace names
"""
if workspaces is None:
logger.info("no workspaces specified, importing all toggl workspaces...")
workspaces = clue.get_toggl_workspaces()
logger.info("The following workspaces will be imported: %s", str(workspaces))
return workspaces | aae8b5f2585a7865083b433f4aaf9874d5ec500e | 4,004 |
import pprint
def create_hparams(hparams_string=None, hparams_json=None, verbose=True):
"""Create model hyperparameters. Parse nondefault from given string."""
hparams = tf.contrib.training.HParams(
training_stage='train_style_extractor',#['train_text_encoder','train_style_extractor','train_style_attention','train_refine_layernorm']
full_refine=False,
################################
# Experiment Parameters #
################################
epochs=500,
iters=1000000,
iters_per_checkpoint=5000,
log_per_checkpoint=1,
seed=1234,
dynamic_loss_scaling=True,
fp16_run=False,
distributed_run=False,
dist_backend="nccl",
dist_url="tcp://localhost:54321",
cudnn_enabled=True,
cudnn_benchmark=False,
numberworkers=8,
ignore_layers=['embedding.weight'],
################################
# Data Parameters #
################################
load_mel=True,
training_files='../../../spk_ttsdatafull_libri500_unpacked/training_with_mel_frame.txt',
mel_dir='../../../spk_ttsdatafull_libri500_unpacked/',
text_cleaners=['english_cleaners'],
is_partial_refine=False,
is_refine_style=False,
use_GAN=False,
GAN_type='wgan-gp',#['lsgan', 'wgan-gp']
GAN_alpha=1.0,
GP_beata=10.0,
Generator_pretrain_step=1,
add_noise=False,
################################
# Audio Parameters #
################################
max_wav_value=32768.0,
num_mels=80,
num_freq=1025,
min_mel_freq=0,
max_mel_freq=8000,
sample_rate=16000,
frame_length_ms=50,
frame_shift_ms=12.5,
preemphasize=0.97,
min_level_db=-100,
ref_level_db=0, # suggest use 20 for griffin-lim and 0 for wavenet
max_abs_value=4,
symmetric_specs=True, # if true, suggest use 4 as max_abs_value
# Eval:
griffin_lim_iters=60,
power=1.5, # Power to raise magnitudes to prior to Griffin-Lim
threshold=0.5, # for stop token
minlenratio=0.0, # Minimum length ratio in inference.
maxlenratio=50.0, # Maximum length ratio in inference.
use_phone=True,
phone_set_file="../../../spk_ttsdatafull_libri500_unpacked/phone_set.json",
n_symbols=5000, # len(symbols),
embed_dim=512, # Dimension of character embedding.
pretrained_model=None,
# VQVAE
use_vqvae=False,
aux_encoder_kernel_size=3,
aux_encoder_n_convolutions=2,
aux_encoder_embedding_dim=512,
speaker_embedding_dim=256,
commit_loss_weight=1.0, # Contribution of commitment loss, between 0.1 and 2.0 (default: 1.0)
eprenet_conv_layers=3, # Number of encoder prenet convolution layers.
eprenet_conv_chans=512, # Number of encoder prenet convolution channels.
eprenet_conv_filts=5, # Filter size of encoder prenet convolution.
dprenet_layers=2, # Number of decoder prenet layers.
dprenet_units=256, # Number of decoder prenet hidden units.
positionwise_layer_type="linear", # FFN or conv or (conv+ffn) in encoder after self-attention
positionwise_conv_kernel_size=1, # Filter size of conv
elayers=6, # Number of encoder layers.
eunits=1536, # Number of encoder hidden units.
adim=384, # Number of attention transformation dimensions.
aheads=4, # Number of heads for multi head attention.
dlayers=6, # Number of decoder layers.
dunits=1536, # Number of decoder hidden units.
duration_predictor_layers=2,
duration_predictor_chans=384,
duration_predictor_kernel_size=3,
use_gaussian_upsampling=False,
postnet_layers=5, # Number of postnet layers.
postnet_chans=512, # Number of postnet channels.
postnet_filts=5, # Filter size of postnet.
use_scaled_pos_enc=True, # Whether to use trainable scaled positional encoding.
use_batch_norm=True, # Whether to use batch normalization in posnet.
encoder_normalize_before=True, # Whether to perform layer normalization before encoder block.
decoder_normalize_before=True, # Whether to perform layer normalization before decoder block.
encoder_concat_after=False, # Whether to concatenate attention layer's input and output in encoder.
decoder_concat_after=False, # Whether to concatenate attention layer's input and output in decoder.
reduction_factor=1, # Reduction factor.
is_multi_speakers=True,
is_spk_layer_norm=True,
pretrained_spkemb_dim=512,
n_speakers=8000,
spk_embed_dim=128, # Number of speaker embedding dimenstions.
spk_embed_integration_type="concat", # concat or add, How to integrate speaker embedding.
use_ssim_loss=True,
use_f0=False,
log_f0=False,
f0_joint_train=False,
f0_alpha=0.1,
stop_gradient_from_pitch_predictor=False,
pitch_predictor_layers=2,
pitch_predictor_chans=384,
pitch_predictor_kernel_size=3,
pitch_predictor_dropout=0.5,
pitch_embed_kernel_size=9,
pitch_embed_dropout=0.5,
is_multi_styles=False,
n_styles=6,
style_embed_dim=128, # Number of style embedding dimenstions.
style_embed_integration_type="concat", # concat or add, How to integrate style embedding.
style_vector_type='mha',#gru or mha, How to generate style vector.
style_query_level='sentence',#phone or sentence
# value: pytorch, xavier_uniform, xavier_normal, kaiming_uniform, kaiming_normal
transformer_init="pytorch", # How to initialize transformer parameters.
initial_encoder_alpha=1.0,
initial_decoder_alpha=1.0,
transformer_enc_dropout_rate=0.1, # Dropout rate in encoder except attention & positional encoding.
transformer_enc_positional_dropout_rate=0.1, # Dropout rate after encoder positional encoding.
transformer_enc_attn_dropout_rate=0.1, # Dropout rate in encoder self-attention module.
transformer_dec_dropout_rate=0.1, # Dropout rate in decoder except attention & positional encoding.
transformer_dec_positional_dropout_rate=0.1, # Dropout rate after decoder positional encoding.
transformer_dec_attn_dropout_rate=0.1, # Dropout rate in deocoder self-attention module.
transformer_enc_dec_attn_dropout_rate=0.1, # Dropout rate in encoder-deocoder attention module.
duration_predictor_dropout_rate=0.1,
eprenet_dropout_rate=0.5, # Dropout rate in encoder prenet.
dprenet_dropout_rate=0.5, # Dropout rate in decoder prenet.
postnet_dropout_rate=0.5, # Dropout rate in postnet.
use_masking=True, # Whether to apply masking for padded part in loss calculation.
use_weighted_masking=False, # Whether to apply weighted masking in loss calculation.
bce_pos_weight=1.0, # Positive sample weight in bce calculation (only for use_masking=true).
loss_type="L2", # L1, L2, L1+L2, How to calculate loss.
# Reference:
# Efficiently Trainable Text-to-Speech System Based on Deep Convolutional Networks with Guided Attention
# https://arxiv.org/abs/1710.08969
use_gst=False,
use_mutual_information=False,
mutual_information_lambda=0.1,
mi_loss_type='unbias',#['bias','unbias']
style_extractor_presteps=300000,
choosestl_steps=100000,
gst_train_att=False,
att_name='100k_noshuffle_gru',
shuffle=False,
gst_reference_encoder='multiheadattention',#'multiheadattention' or 'convs'
gst_reference_encoder_mha_layers=4,
gst_tokens=10,
gst_heads=4,
gst_conv_layers=6,
gst_conv_chans_list=(32, 32, 64, 64, 128, 128),
gst_conv_kernel_size=3,
gst_conv_stride=2,
gst_gru_layers=1,
gst_gru_units=128,
step_use_predicted_dur=20000,
################################
# Optimization Hyperparameters #
################################
learning_rate_decay_scheme='noam',
use_saved_learning_rate=True,
warmup_steps=10000, # Optimizer warmup steps.
decay_steps=12500, # halves the learning rate every 12.5k steps
decay_rate=0.5, # learning rate decay rate
# decay_end=300000,
# decay_rate=0.01,
initial_learning_rate=0.5, # Initial value of learning rate.
final_learning_rate=1e-5,
weight_decay=1e-6,
grad_clip_thresh=1.0,
batch_criterion='utterance',
batch_size=2,
mask_padding=True # set model's padded outputs to padded values
)
if hparams_json:
print('Parsing hparams in json # {}'.format(hparams_json))
with open(hparams_json) as json_file:
hparams.parse_json(json_file.read())
if hparams_string:
print('Parsing command line hparams # {}'.format(hparams_string))
hparams.parse(hparams_string)
# if hparams.use_phone:
# from text.phones import Phones
# phone_class = Phones(hparams.phone_set_file)
# hparams.n_symbols = len(phone_class._symbol_to_id)
# del phone_class
if verbose:
print('Final parsed hparams:')
pprint(hparams.values())
return hparams | d4d255241b7322a10369bc313a0ddc971c0115a6 | 4,005 |
def ChromiumFetchSync(name, work_dir, git_repo, checkout='origin/master'):
"""Some Chromium projects want to use gclient for clone and dependencies."""
if os.path.isdir(work_dir):
print '%s directory already exists' % name
else:
# Create Chromium repositories one deeper, separating .gclient files.
parent = os.path.split(work_dir)[0]
Mkdir(parent)
proc.check_call(['gclient', 'config', git_repo], cwd=parent)
proc.check_call(['git', 'clone', git_repo], cwd=parent)
proc.check_call(['git', 'fetch'], cwd=work_dir)
proc.check_call(['git', 'checkout', checkout], cwd=work_dir)
proc.check_call(['gclient', 'sync'], cwd=work_dir)
return (name, work_dir) | 8bb0f593eaf874ab1a6ff95a913ef34b566a47bc | 4,006 |
def kld_error(res, error='simulate', rstate=None, return_new=False,
approx=False):
"""
Computes the `Kullback-Leibler (KL) divergence
<https://en.wikipedia.org/wiki/Kullback-Leibler_divergence>`_ *from* the
discrete probability distribution defined by `res` *to* the discrete
probability distribution defined by a **realization** of `res`.
Parameters
----------
res : :class:`~dynesty.results.Results` instance
:class:`~dynesty.results.Results` instance for the distribution we
are computing the KL divergence *from*.
error : {`'jitter'`, `'resample'`, `'simulate'`}, optional
The error method employed, corresponding to :meth:`jitter_run`,
:meth:`resample_run`, and :meth:`simulate_run`, respectively.
Default is `'simulate'`.
rstate : `~numpy.random.RandomState`, optional
`~numpy.random.RandomState` instance.
return_new : bool, optional
Whether to return the realization of the run used to compute the
KL divergence. Default is `False`.
approx : bool, optional
Whether to approximate all sets of uniform order statistics by their
associated marginals (from the Beta distribution). Default is `False`.
Returns
-------
kld : `~numpy.ndarray` with shape (nsamps,)
The cumulative KL divergence defined *from* `res` *to* a
random realization of `res`.
new_res : :class:`~dynesty.results.Results` instance, optional
The :class:`~dynesty.results.Results` instance corresponding to
the random realization we computed the KL divergence *to*.
"""
# Define our original importance weights.
logp2 = res.logwt - res.logz[-1]
# Compute a random realization of our run.
if error == 'jitter':
new_res = jitter_run(res, rstate=rstate, approx=approx)
elif error == 'resample':
new_res, samp_idx = resample_run(res, rstate=rstate, return_idx=True)
logp2 = logp2[samp_idx] # re-order our original results to match
elif error == 'simulate':
new_res, samp_idx = resample_run(res, rstate=rstate, return_idx=True)
new_res = jitter_run(new_res)
logp2 = logp2[samp_idx] # re-order our original results to match
else:
raise ValueError("Input `'error'` option '{0}' is not valid."
.format(error))
# Define our new importance weights.
logp1 = new_res.logwt - new_res.logz[-1]
# Compute the KL divergence.
kld = np.cumsum(np.exp(logp1) * (logp1 - logp2))
if return_new:
return kld, new_res
else:
return kld | 430ad6cb1c25c0489343717d7cf8a44bdfb5725d | 4,007 |
def article_detail(request, slug):
"""
Show details of the article
"""
article = get_article_by_slug(slug=slug, annotate=True)
comment_form = CommentForm()
total_views = r.incr(f'article:{article.id}:views')
return render(request, 'articles/post/detail.html',
{'article': article,
'section': article.category,
'comment_form': comment_form,
'total_views': total_views}) | 079d31509fe573ef207600e007e51ac14e9121c4 | 4,008 |
def deactivate(userid, tfa_response):
"""
Deactivate 2FA for a specified user.
Turns off 2FA by nulling-out the ``login.twofa_secret`` field for the user record,
and clear any remaining recovery codes.
Parameters:
userid: The user for which 2FA should be disabled.
tfa_response: User-supplied response. May be either the Google Authenticator
(or other app) supplied code, or a recovery code.
Returns: Boolean True if 2FA was successfully disabled, otherwise Boolean False if the
verification of `tfa_response` failed (bad challenge-response or invalid recovery code).
"""
# Sanity checking for length requirement of recovery code/TOTP is performed in verify() function
if verify(userid, tfa_response):
# Verification passed, so disable 2FA
force_deactivate(userid)
return True
else:
return False | 1ac0f5e716675ccb118c9656bcc7c9b4bd3f9606 | 4,009 |
def hsi_normalize(data, max_=4096, min_ = 0, denormalize=False):
"""
Using this custom normalizer for RGB and HSI images.
Normalizing to -1to1. It also denormalizes, with denormalize = True)
"""
HSI_MAX = max_
HSI_MIN = min_
NEW_MAX = 1
NEW_MIN = -1
if(denormalize):
scaled = (data - NEW_MIN) * (HSI_MAX - HSI_MIN)/(NEW_MAX - NEW_MIN) + HSI_MIN
return scaled.astype(np.float32)
scaled = (data - HSI_MIN) * (NEW_MAX - NEW_MIN)/(HSI_MAX - HSI_MIN) + NEW_MIN
return scaled.astype(np.float32) | 7f276e90843c81bc3cf7715e54dadd4a78162f93 | 4,010 |
from typing import Optional
def safe_elem_text(elem: Optional[ET.Element]) -> str:
"""Return the stripped text of an element if available. If not available, return the empty string"""
text = getattr(elem, "text", "")
return text.strip() | 12c3fe0c96ffdb5578e485b064ee4df088192114 | 4,011 |
def resource(filename):
"""Returns the URL a static resource, including versioning."""
return "/static/{0}/{1}".format(app.config["VERSION"], filename) | b330c052180cfd3d1b622c18cec7e633fa7a7910 | 4,012 |
import os
def get_legacy_description(location):
"""
Return the text of a legacy DESCRIPTION.rst.
"""
location = os.path.join(location, 'DESCRIPTION.rst')
if os.path.exists(location):
with open(location) as i:
return i.read() | 1d91c9875d6a6de862bed586c5bf4b67eb6e8886 | 4,013 |
import csv
def read_q_stats(csv_path):
"""Return list of Q stats from file"""
q_list = []
with open(csv_path, newline='') as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
q_list.append(float(row['q']))
return q_list | f5bee4859dc4bac45c4c3e8033da1b4aba5d2818 | 4,014 |
def _validate(config):
"""Validate the configuation.
"""
diff = set(REQUIRED_CONFIG_KEYS) - set(config.keys())
if len(diff) > 0:
raise ValueError(
"config is missing required keys".format(diff))
elif config['state_initial']['status'] not in config['status_values']:
raise ValueError(
"initial status '{}' is not among the allowed status values"
.format(config['state_initial']['status']))
else:
return config | 07c92e5a5cc722efbbdc684780b1edb66aea2532 | 4,015 |
def exp_by_squaring(x, n):
"""
Compute x**n using exponentiation by squaring.
"""
if n == 0:
return 1
if n == 1:
return x
if n % 2 == 0:
return exp_by_squaring(x * x, n // 2)
return exp_by_squaring(x * x, (n - 1) // 2) * x | ef63d2bf6f42690fd7c5975af0e961e2a3c6172f | 4,016 |
def _compare(expected, actual):
"""
Compare SslParams object with dictionary
"""
if expected is None and actual is None:
return True
if isinstance(expected, dict) and isinstance(actual, SslParams):
return expected == actual.__dict__
return False | 4a82d1631b97960ecc44028df4c3e43dc664d3e5 | 4,017 |
def update_token(refresh_token, user_id):
"""
Refresh the tokens for a given user
:param: refresh_token
Refresh token of the user
:param: user_id
ID of the user for whom the token is to be generated
:returns:
Generated JWT token
"""
token = Token.query.filter_by(refresh_token=refresh_token).first()
token.access_token = Token.encode_token(user_id, "access").decode("utf-8")
token.refresh_token = Token.encode_token(user_id, "refresh").decode(
"utf-8"
)
db.session.commit()
return token | 0b91cf19f808067a9c09b33d7497548743debe14 | 4,018 |
from re import X
def minimax(board):
"""
Returns the optimal action for the current player on the board.
"""
def max_value(state, depth=0):
if ttt.terminal(state):
return (None, ttt.utility(state))
v = (None, -2)
for action in ttt.actions(state):
v = max(v, (action, min_value(ttt.result(state, action), depth+1)[1] - (depth/10)), key=lambda x: x[1])
return v
def min_value(state, depth=0):
if ttt.terminal(state):
return (None, ttt.utility(state))
v = (None, 2)
for action in ttt.actions(state):
v = min(v, (action, max_value(ttt.result(state, action), depth+1)[1] + (depth/10)), key=lambda x: x[1])
return v
if ttt.player(board) == X:
return max_value(board)[0]
elif ttt.player(board) == O:
return min_value(board)[0] | 8de42db3ad40d597bf9600bcd5fec7c7f775f84d | 4,019 |
import random
def random_order_dic_keys_into_list(in_dic):
"""
Read in dictionary keys, and return random order list of IDs.
"""
id_list = []
for key in in_dic:
id_list.append(key)
random.shuffle(id_list)
return id_list | d18ac34f983fbaff59bfd90304cd8a4a5ebad42e | 4,020 |
from typing import Union
from pathlib import Path
from typing import Dict
import json
def read_json(json_path: Union[str, Path]) -> Dict:
"""
Read json file from a path.
Args:
json_path: File path to a json file.
Returns:
Python dictionary
"""
with open(json_path, "r") as fp:
data = json.load(fp)
return data | c0b55e5363a134282977ee8a01083490e9908fcf | 4,021 |
def igraph_to_csc(g, save=False, fn="csc_matlab"):
"""
Convert an igraph to scipy.sparse.csc.csc_matrix
Positional arguments:
=====================
g - the igraph graph
Optional arguments:
===================
save - save file to disk
fn - the file name to be used when writing (appendmat = True by default)
"""
assert isinstance(g, igraph.Graph), "Arg1 'g' must be an igraph graph"
print "Creating CSC from igraph object ..."
gs = csc_matrix(g.get_adjacency().data) # Equiv of calling to_dense so may case MemError
print "CSC creation complete ..."
if save:
print "Saving to MAT file ..."
sio.savemat(fn, {"data":gs}, True) # save as MAT format only. No other options!
return gs | 12ea73531599cc03525e898e39b88f2ed0ad97c3 | 4,022 |
def xml2dict(data):
"""Turn XML into a dictionary."""
converter = XML2Dict()
if hasattr(data, 'read'):
# Then it's a file.
data = data.read()
return converter.fromstring(data) | 0c73989b4ea83b2b1c126b7f1b39c6ebc9e18115 | 4,023 |
def balance_dataset(data, size=60000):
"""Implements upsampling and downsampling for the three classes (low, medium, and high)
Parameters
----------
data : pandas DataFrame
A dataframe containing the labels indicating the different nightlight intensity bins
size : int
The number of samples per classes for upsampling and downsampling
Returns
-------
pandas DataFrame
The data with relabelled and balanced nightlight intensity classes
"""
bin_labels = data.label.unique()
classes = []
for label in bin_labels:
class_ = data[data.label == label].reset_index()
if len(class_) >= size:
sample = class_.sample(
n=size, replace=False, random_state=SEED
)
elif len(class_) < size:
sample = class_.sample(
n=size, replace=True, random_state=SEED
)
classes.append(sample)
data_balanced = pd.concat(classes)
data_balanced = data_balanced.sample(
frac=1, random_state=SEED
).reset_index(drop=True)
data_balanced = data_balanced.iloc[:, 1:]
return data_balanced | 93cd5888c28f9e208379d7745790b7e1e0cb5b79 | 4,024 |
def updateStopList(userId, newStop):
"""
Updates the list of stops for the user in the dynamodb table
"""
response = dynamodb_table.query(
KeyConditionExpression=Key('userId').eq(userId))
if response and len(response["Items"]) > 0:
stops = response["Items"][0]['stops']
else:
stops = {}
if newStop['code'] in stops:
existingStop = stops[newStop['code']]
if 'buses' in existingStop:
newStop['buses'] = list(
set(existingStop['buses'] + newStop['buses']))
stops[newStop['code']] = newStop
response = dynamodb_table.update_item(
Key={
'userId': userId
},
UpdateExpression="set stops = :s",
ExpressionAttributeValues={
':s': stops
}
)
card_title = render_template('card_title')
responseText = render_template(
"add_bus_success", stop=newStop['code'], route=",".join(newStop['buses']))
return statement(responseText).simple_card(card_title, responseText) | 433ae6c4562f6a6541fc262925ce0bba6fb742ec | 4,025 |
import re
def is_blacklisted_module(module: str) -> bool:
"""Return `True` if the given module matches a blacklisted pattern."""
# Exclude stdlib modules such as the built-in "_thread"
if is_stdlib_module(module):
return False
# Allow user specified exclusions via CLI
blacklist = set.union(MODULE_BLACKLIST_PATTERNS, config.excluded_imports)
return any(re.fullmatch(p, module) for p in blacklist) | 391d63a4d8a4f24d1d3ba355745ffe0079143e68 | 4,026 |
def api_detach(sess, iqn):
"""
Detach the given volume from the instance using OCI API calls.
Parameters
----------
sess: OCISession
The OCISEssion instance..
iqn: str
The iSCSI qualified name.
Returns
-------
bool
True on success, False otherwise.
"""
if sess is None:
_logger.error("Need OCI Service to detach volume.\n"
"Make sure to install and configure "
"OCI Python SDK (python-oci-sdk)\n")
return False
for v in sess.this_instance().all_volumes():
if v.get_iqn() == iqn:
try:
print "Detaching volume"
v.detach()
return True
except OCISDKError as e:
_logger.debug("Failed to disconnect volume", exc_info=True)
_logger.error("Failed to disconnect volume %s from this instance: %s" % (iqn, e))
return False
_logger.error("Volume not found...\n")
return False | f52baa2a647c112fdde386afd46fc04fe4fe9da3 | 4,027 |
def ComponentLibrary(self, lib_name, *args, **kwargs):
"""Pseudo-builder for library to handle platform-dependent type.
Args:
self: Environment in which we were called.
lib_name: Library name.
args: Positional arguments.
kwargs: Keyword arguments.
Returns:
Passthrough return code from env.StaticLibrary() or env.SharedLibrary().
"""
# Clone and modify environment
env = _ComponentPlatformSetup(self, 'ComponentLibrary', **kwargs)
# Make appropriate library type
if env.get('COMPONENT_STATIC'):
lib_outputs = env.StaticLibrary(lib_name, *args, **kwargs)
else:
lib_outputs = env.SharedLibrary(lib_name, *args, **kwargs)
# Add dependencies on includes
env.Depends(lib_outputs, env['INCLUDES'])
# Scan library outputs for files we need to link against this library, and
# files we need to run executables linked against this library.
need_for_link = []
need_for_debug = []
need_for_run = []
for o in lib_outputs:
if o.suffix in env['COMPONENT_LIBRARY_LINK_SUFFIXES']:
need_for_link.append(o)
if o.suffix in env['COMPONENT_LIBRARY_DEBUG_SUFFIXES']:
need_for_debug.append(o)
if o.suffix == env['SHLIBSUFFIX']:
need_for_run.append(o)
all_outputs = lib_outputs
# Install library in intermediate directory, so other libs and programs can
# link against it
all_outputs += env.Replicate('$LIB_DIR', need_for_link)
# Publish output
env.Publish(lib_name, 'link', need_for_link)
env.Publish(lib_name, 'run', need_for_run)
env.Publish(lib_name, 'debug', need_for_debug)
# Add an alias to build and copy the library, and add it to the right groups
a = self.Alias(lib_name, all_outputs)
for group in env['COMPONENT_LIBRARY_GROUPS']:
SCons.Script.Alias(group, a)
# Store list of components for this library
env._StoreComponents(lib_name)
# Let component_targets know this target is available in the current mode.
env.SetTargetProperty(lib_name, TARGET_PATH=lib_outputs[0])
# If library should publish itself, publish as if it was a program
if env.get('COMPONENT_LIBRARY_PUBLISH'):
env['PROGRAM_BASENAME'] = lib_name
env.Defer(ComponentProgramDeferred)
# Return the library
return lib_outputs[0] | 8169be26c5ac30e090809fa80e3d5657a3616772 | 4,028 |
def _build_geojson_query(query):
"""
See usages below.
"""
# this is basically a translation of the postgis ST_AsGeoJSON example into sqlalchemy/geoalchemy2
return func.json_build_object(
"type",
"FeatureCollection",
"features",
func.json_agg(func.ST_AsGeoJSON(query.subquery(), maxdecimaldigits=5).cast(JSON)),
) | dd7a0893258cf95e1244458ba9bd74c5239f65c5 | 4,029 |
import urllib
def url_encode(obj, charset='utf-8', encode_keys=False, sort=False, key=None,
separator='&'):
"""URL encode a dict/`MultiDict`. If a value is `None` it will not appear
in the result string. Per default only values are encoded into the target
charset strings. If `encode_keys` is set to ``True`` unicode keys are
supported too.
If `sort` is set to `True` the items are sorted by `key` or the default
sorting algorithm.
.. versionadded:: 0.5
`sort`, `key`, and `separator` were added.
:param obj: the object to encode into a query string.
:param charset: the charset of the query string.
:param encode_keys: set to `True` if you have unicode keys.
:param sort: set to `True` if you want parameters to be sorted by `key`.
:param separator: the separator to be used for the pairs.
:param key: an optional function to be used for sorting. For more details
check out the :func:`sorted` documentation.
"""
if isinstance(obj, MultiDict):
items = obj.lists()
elif isinstance(obj, dict):
items = []
for k, v in obj.iteritems():
if not isinstance(v, (tuple, list)):
v = [v]
items.append((k, v))
else:
items = obj or ()
if sort:
items.sort(key=key)
tmp = []
for key, values in items:
if encode_keys and isinstance(key, unicode):
key = key.encode(charset)
else:
key = str(key)
for value in values:
if value is None:
continue
elif isinstance(value, unicode):
value = value.encode(charset)
else:
value = str(value)
tmp.append('%s=%s' % (urllib.quote(key),
urllib.quote_plus(value)))
return separator.join(tmp) | 2106032d6a4cf895c525e9db702655af3439a95c | 4,030 |
from datetime import datetime
def create_export_and_wait_for_completion(name, bucket, prefix, encryption_config, role_arn=None):
"""
Request QLDB to export the contents of the journal for the given time period and S3 configuration. Before calling
this function the S3 bucket should be created, see
:py:class:`pyqldbsamples.export_journal.create_s3_bucket_if_not_exists`
:type name: str
:param name: Name of the ledger to create a journal export for.
:type bucket: str
:param bucket: S3 bucket to write the data to.
:type prefix: str
:param prefix: S3 prefix to be prefixed to the files being written.
:type encryption_config: dict
:param encryption_config: Encryption configuration for S3.
:type role_arn: str
:param role_arn: The IAM role ARN to be used when exporting the journal.
:rtype: dict
:return: The result of the request.
"""
if role_arn is None:
role_arn = create_export_role(EXPORT_ROLE_NAME, encryption_config.get('KmsKeyArn'), ROLE_POLICY_NAME, bucket)
try:
start_time = datetime.utcnow() - timedelta(minutes=JOURNAL_EXPORT_TIME_WINDOW_MINUTES)
end_time = datetime.utcnow()
result = create_export(name, start_time, end_time, bucket, prefix, encryption_config, role_arn)
wait_for_export_to_complete(Constants.LEDGER_NAME, result.get('ExportId'))
logger.info('JournalS3Export for exportId {} is completed.'.format(result.get('ExportId')))
return result
except Exception as e:
logger.exception('Unable to create an export!')
raise e | 9fb6f66dc02d70ffafe1c388188b99b9695a6900 | 4,031 |
def sample_student(user, **kwargs):
"""create and return sample student"""
return models.Student.objects.create(user=user, **kwargs) | a70c3a181b1ee0627465f016953952e082a51c27 | 4,032 |
from datetime import datetime
def normalise_field_value(value):
""" Converts a field value to a common type/format to make comparable to another. """
if isinstance(value, datetime):
return make_timezone_naive(value)
elif isinstance(value, Decimal):
return decimal_to_string(value)
return value | 3cbc4c4d7ae027c030e70a1a2bd268bdd0ebe556 | 4,033 |
import sys
def edit_wn_list(item_list, list_name, all_values, tenant_file_name):
"""
Edit WAN network list
:param item_list: Item list to save
:param list_name: Name of List
:param all_values: All values
:param tenant_file_name: File-system friendly tenant_name
:return: shallow copy of item_list.
"""
loop = True
while loop:
action = [
("View list", 'view'),
("Add to list", 'add'),
("Remove items from list", 'remove'),
("Load/Save list", 'file'),
("Go Back", 'back')
]
banner = "\nSelect Action:"
line_fmt = "{0}: {1}"
# just pull 2nd value
selected_action = menus.quick_menu(banner, line_fmt, action)[1]
if selected_action == 'view':
print("\n{0} ({1} entries):".format(list_name, len(item_list)))
for item in item_list:
print("\t{0}".format(item))
elif selected_action == 'add':
item_list = add_to_list(item_list, list_name, all_values)
elif selected_action == 'remove':
item_list = remove_from_list(item_list, list_name, all_values)
elif selected_action == 'file':
item_list = load_save_list(item_list, list_name, all_values, tenant_file_name)
elif selected_action == 'back':
loop = False
else:
sys.exit()
# return a shallow copy of site list
return item_list[:] | 624b383d9a5aa6a925c790787de01b14dfa50dce | 4,034 |
from typing import Any
from typing import Tuple
from typing import List
import collections
import yaml
def parse_yaml(stream: Any) -> Tuple[Swagger, List[str]]:
"""
Parse the Swagger specification from the given text.
:param stream: YAML representation of the Swagger spec satisfying file interface
:return: (parsed Swagger specification, parsing errors if any)
"""
# adapted from https://stackoverflow.com/questions/5121931/in-python-how-can-you-load-yaml-mappings-as-ordereddicts
# and https://stackoverflow.com/questions/13319067/parsing-yaml-return-with-line-number
object_pairs_hook = collections.OrderedDict
class OrderedLoader(yaml.SafeLoader):
def compose_node(self, parent, index):
# the line number where the previous token has ended (plus empty lines)
node = Composer.compose_node(self, parent, index)
node.__lineno__ = self.line + 1
return node
def construct_mapping(loader, node, deep=False):
loader.flatten_mapping(node)
mapping = Constructor.construct_pairs(loader, node, deep=deep)
ordered_hook = object_pairs_hook(mapping)
# assert not hasattr(ordered_hook, "__lineno__"), \
# "Expected ordered mapping to have no __lineno__ attribute set before"
# setattr(ordered_hook, "__lineno__", node.__lineno__)
return RawDict(adict=ordered_hook, source=stream.name, lineno=node.__lineno__)
OrderedLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, construct_mapping)
raw_dict = yaml.load(stream, OrderedLoader)
swagger = Swagger()
errors = [] # type: List[str]
adict = raw_dict.adict
tag_exists: bool = False
if 'tags' in adict:
if len(adict['tags']) > 0:
tag_exists = True
for tag in adict['tags']:
for key, value in tag.adict.items():
if key == 'name':
swagger.name = value
if swagger.name == '':
if not (OptionKey.PermitAbsenseOfTagNameIfNoTagsExist in parse_options
and not tag_exists):
errors.append('missing tag "name" in the swagger specification')
swagger.base_path = adict.get('basePath', '')
for path_id, path_dict in adict.get('paths', RawDict()).adict.items():
path, path_errors = _parse_path(raw_dict=path_dict)
path.identifier = path_id
path.swagger = swagger
errors.extend(['in path {!r}: {}'.format(path_id, error) for error in path_errors])
if not path_errors:
swagger.paths[path_id] = path
for def_id, def_dict in adict.get('definitions', RawDict()).adict.items():
typedef, def_errors = _parse_typedef(raw_dict=def_dict)
errors.extend(['in definition {!r}: {}'.format(def_id, error) for error in def_errors])
adef = Definition()
adef.swagger = swagger
adef.identifier = def_id
adef.typedef = typedef
if not def_errors:
swagger.definitions[def_id] = adef
for param_id, param_dict in adict.get('parameters', RawDict()).adict.items():
param, param_errors = _parse_parameter(raw_dict=param_dict)
errors.extend(['in parameter {!r}: {}'.format(param_id, error) for error in param_errors])
if not param_errors:
swagger.parameters[param_id] = param
swagger.raw_dict = raw_dict
return swagger, errors | 7155520db98bf1d884ba46f22b46297a901f4411 | 4,035 |
import itertools
def dataset_first_n(dataset, n, show_classes=False, class_labels=None, **kw):
"""
Plots first n images of a dataset containing tensor images.
"""
# [(img0, cls0), ..., # (imgN, clsN)]
first_n = list(itertools.islice(dataset, n))
# Split (image, class) tuples
first_n_images, first_n_classes = zip(*first_n)
if show_classes:
titles = first_n_classes
if class_labels:
titles = [class_labels[cls] for cls in first_n_classes]
else:
titles = []
return tensors_as_images(first_n_images, titles=titles, **kw) | ed8394fc2a1b607597599f36545c9182a9bc8187 | 4,036 |
def unit_conversion(thing, units, length=False):
"""converts base data between metric, imperial, or nautical units"""
if 'n/a' == thing:
return 'n/a'
try:
thing = round(thing * CONVERSION[units][0 + length], 2)
except TypeError:
thing = 'fubar'
return thing, CONVERSION[units][2 + length] | 96bfb9cda575a8b2efc959b6053284bec1d286a6 | 4,037 |
import functools
def timed(func):
"""Decorate function to print elapsed time upon completion."""
@functools.wraps(func)
def wrap(*args, **kwargs):
t1 = default_timer()
result = func(*args, **kwargs)
t2 = default_timer()
print('func:{} args:[{}, {}] took: {:.4f} sec'.format(
func.__name__, args, kwargs, t2 - t1))
return result
return wrap | d572488c674607b94e2b80235103d6f0bb27738f | 4,038 |
from typing import List
from typing import Optional
import os
def plot_rollouts_segment_wise(
segments_ground_truth: List[List[StepSequence]],
segments_multiple_envs: List[List[List[StepSequence]]],
segments_nominal: List[List[StepSequence]],
use_rec: bool,
idx_iter: int,
idx_round: Optional[int] = None,
state_labels: Optional[List[str]] = None,
save_dir: Optional[str] = None,
) -> List[plt.Figure]:
"""
Plot the different rollouts in separate figures and the different state dimensions along the columns.
:param segments_ground_truth: list of lists containing rollout segments from the ground truth environment
:param segments_multiple_envs: list of lists of lists containing rollout segments from different environment
instances, e.g. samples from a posterior coming from `NDPR`
:param segments_nominal: list of lists containing rollout segments from the nominal environment
:param use_rec: `True` if pre-recorded actions have been used to generate the rollouts
:param idx_iter: selected iteration
:param idx_round: selected round
:param state_labels: y-axes labels to override the default value which is extracted from the state space's labels
:param save_dir: if not `None` create a subfolder plots in `save_dir` and save the plots in there
:return: list of handles to the created figures
"""
# Extract the state dimension, and the number of most likely samples from the data
dim_state = segments_ground_truth[0][0].get_data_values("states")[0, :].size
num_samples = len(segments_multiple_envs[0][0])
# Extract the state labels if not explicitly given
if state_labels is None:
env_spec = segments_ground_truth[0][0].rollout_info.get("env_spec", None)
state_labels = env_spec.state_space.labels if env_spec is not None else np.empty(dim_state, dtype=object)
else:
if len(state_labels) != dim_state:
raise pyrado.ShapeErr(given=state_labels, expected_match=(dim_state,))
colors = plt.get_cmap("Reds")(np.linspace(0.5, 1.0, num_samples))
fig_list = []
for idx_r in range(len(segments_ground_truth)):
fig, axs = plt.subplots(nrows=dim_state, figsize=(16, 9), tight_layout=True, sharex="col")
for idx_state in range(dim_state):
# Plot the real segments
cnt_step = [0]
for segment_real in segments_ground_truth[idx_r]:
axs[idx_state].plot(
np.arange(cnt_step[-1], cnt_step[-1] + segment_real.length),
segment_real.get_data_values("states", truncate_last=True)[:, idx_state],
c="black",
label="real" if cnt_step[-1] == 0 else "", # only print once
)
cnt_step.append(cnt_step[-1] + segment_real.length)
# Plot the maximum likely simulated segments
for idx_seg, sml in enumerate(segments_multiple_envs[idx_r]):
for idx_dp, smdp in enumerate(sml):
axs[idx_state].plot(
np.arange(cnt_step[idx_seg], cnt_step[idx_seg] + smdp.length),
smdp.get_data_values("states", truncate_last=True)[:, idx_state],
c=colors[idx_dp],
ls="--",
label=f"sim ml {idx_dp}" if cnt_step[idx_seg] == 0 else "", # only print once for each dp set
)
# Plot the nominal simulation's segments
for idx_seg, sn in enumerate(segments_nominal[idx_r]):
axs[idx_state].plot(
np.arange(cnt_step[idx_seg], cnt_step[idx_seg] + sn.length),
sn.get_data_values("states", truncate_last=True)[:, idx_state],
c="steelblue",
ls="-.",
label="sim nom" if cnt_step[idx_seg] == 0 else "", # only print once
)
axs[idx_state].set_ylabel(state_labels[idx_state])
# Set window title and the legend, placing the latter above the plot expanding and expanding it fully
use_rec = ", using rec actions" if use_rec else ""
rnd = f"round {idx_round}, " if idx_round is not None else ""
fig.canvas.set_window_title(
f"Target Domain and Simulated Rollouts (iteration {idx_iter}, {rnd}rollout {idx_r}{use_rec})"
)
lg = axs[0].legend(
ncol=2 + num_samples,
bbox_to_anchor=(0.0, 1.02, 1.0, 0.102),
loc="lower left",
mode="expand",
borderaxespad=0.0,
)
# Save if desired
if save_dir is not None:
for fmt in ["pdf", "pgf"]:
os.makedirs(os.path.join(save_dir, "plots"), exist_ok=True)
use_rec = "_use_rec" if use_rec else ""
rnd = f"_round_{idx_round}" if idx_round is not None else ""
fig.savefig(
os.path.join(save_dir, "plots", f"posterior_iter_{idx_iter}{rnd}_rollout_{idx_r}{use_rec}.{fmt}"),
bbox_extra_artists=(lg,),
dpi=500,
)
# Append current figure
fig_list.append(fig)
return fig_list | e5ea2a8b9007be06599e9946aa536f93a72624e2 | 4,039 |
def fade_out(s, fade=cf.output.fade_out):
"""
Apply fade-out to waveform time signal.
Arguments:
ndarray:s -- Audio time series
float:fade (cf.output.fade_out) -- Fade-out length in seconds
Returns faded waveform.
"""
length = int(fade * sr)
shape = [1] * len(s.shape)
shape[0] = length
win = np.hanning(length * 2)[length:]
win = win.reshape(shape)
if length < len(s):
s[-length:] = s[-length:] * win
return s | 0b554dbb1da7253e39c651ccdc38ba91e67a1ee4 | 4,040 |
def create_arch(T, D, units=64, alpha=0, dr_rate=.3):
"""Creates the architecture of miint"""
X = K.Input(shape=(T, D))
active_mask = K.Input(shape=(T, 1))
edges = K.Input(shape=(T, None))
ycell = netRNN(T=T, D=D, units=units, alpha=alpha, dr_rate=dr_rate)
yrnn = K.layers.RNN(ycell, return_sequences=True)
Y = yrnn((X, edges, active_mask))
return K.Model(inputs=[X, active_mask, edges], outputs=Y) | cc9723657a7a0822d73cc78f6e1698b33257f9e0 | 4,041 |
def redact(str_to_redact, items_to_redact):
""" return str_to_redact with items redacted
"""
if items_to_redact:
for item_to_redact in items_to_redact:
str_to_redact = str_to_redact.replace(item_to_redact, '***')
return str_to_redact | f86f24d3354780568ec2f2cbf5d32798a43fdb6a | 4,042 |
def FibreDirections(mesh):
"""
Routine dedicated to compute the fibre direction of components in integration point for
the Material in Florence and for the auxiliar routines in this script. First three directions
are taken into the code for Rotation matrix, so always it should be present in this order,
Normal, Tangential, Axial.
"""
ndim = mesh.InferSpatialDimension()
nfibre = 2
# Geometric definitions per element
divider = mesh.elements.shape[1]
directrix = [0.,1.,0.]
fibre_direction = np.zeros((mesh.nelem,nfibre,ndim),dtype=np.float64)
# Loop throught the element in the mesh
for elem in range(mesh.nelem):
# Geometric definitions per element
center = np.sum(mesh.points[mesh.elements[elem,:],:],axis=0)/divider
tangential = np.cross(directrix,center)
tangential = tangential/np.linalg.norm(tangential)
normal = np.cross(tangential,directrix)
# Define the anisotropic orientations
fibre_direction[elem,0,:]=np.multiply(directrix,np.cos(np.pi/4.)) + np.multiply(tangential,np.sin(np.pi/4.))
fibre_direction[elem,1,:]=np.multiply(directrix,np.cos(np.pi/4.)) - np.multiply(tangential,np.sin(np.pi/4.))
return fibre_direction | 9408702e72dde7586f42137ad25a0a944ed28a93 | 4,043 |
def put(consul_url=None, token=None, key=None, value=None, **kwargs):
"""
Put values into Consul
:param consul_url: The Consul server URL.
:param key: The key to use as the starting point for the list.
:param value: The value to set the key to.
:param flags: This can be used to specify an unsigned value
between 0 and 2^64-1. Clients can choose to use
this however makes sense for their application.
:param cas: This flag is used to turn the PUT into a
Check-And-Set operation.
:param acquire: This flag is used to turn the PUT into a
lock acquisition operation.
:param release: This flag is used to turn the PUT into a
lock release operation.
:return: Boolean & message of success or failure.
CLI Example:
.. code-block:: bash
salt '*' consul.put key='web/key1' value="Hello there"
salt '*' consul.put key='web/key1' value="Hello there" acquire='d5d371f4-c380-5280-12fd-8810be175592'
salt '*' consul.put key='web/key1' value="Hello there" release='d5d371f4-c380-5280-12fd-8810be175592'
"""
ret = {}
if not consul_url:
consul_url = _get_config()
if not consul_url:
log.error("No Consul URL found.")
ret["message"] = "No Consul URL found."
ret["res"] = False
return ret
if not key:
raise SaltInvocationError('Required argument "key" is missing.')
# Invalid to specified these together
conflicting_args = ["cas", "release", "acquire"]
for _l1 in conflicting_args:
for _l2 in conflicting_args:
if _l1 in kwargs and _l2 in kwargs and _l1 != _l2:
raise SaltInvocationError(
"Using arguments `{}` and `{}` together is invalid.".format(
_l1, _l2
)
)
query_params = {}
available_sessions = session_list(consul_url=consul_url, return_list=True)
_current = get(consul_url=consul_url, token=token, key=key)
if "flags" in kwargs:
if kwargs["flags"] >= 0 and kwargs["flags"] <= 2 ** 64:
query_params["flags"] = kwargs["flags"]
if "cas" in kwargs:
if _current["res"]:
if kwargs["cas"] == 0:
ret["message"] = "Key {} exists, index must be non-zero.".format(key)
ret["res"] = False
return ret
if kwargs["cas"] != _current["data"]["ModifyIndex"]:
ret["message"] = "Key {} exists, but indexes do not match.".format(key)
ret["res"] = False
return ret
query_params["cas"] = kwargs["cas"]
else:
ret[
"message"
] = "Key {} does not exists, CAS argument can not be used.".format(key)
ret["res"] = False
return ret
if "acquire" in kwargs:
if kwargs["acquire"] not in available_sessions:
ret["message"] = "{} is not a valid session.".format(kwargs["acquire"])
ret["res"] = False
return ret
query_params["acquire"] = kwargs["acquire"]
if "release" in kwargs:
if _current["res"]:
if "Session" in _current["data"]:
if _current["data"]["Session"] == kwargs["release"]:
query_params["release"] = kwargs["release"]
else:
ret["message"] = "{} locked by another session.".format(key)
ret["res"] = False
return ret
else:
ret["message"] = "{} is not a valid session.".format(kwargs["acquire"])
ret["res"] = False
else:
log.error("Key {0} does not exist. Skipping release.")
data = value
function = "kv/{}".format(key)
method = "PUT"
res = _query(
consul_url=consul_url,
token=token,
function=function,
method=method,
data=data,
query_params=query_params,
)
if res["res"]:
ret["res"] = True
ret["data"] = "Added key {} with value {}.".format(key, value)
else:
ret["res"] = False
ret["data"] = "Unable to add key {} with value {}.".format(key, value)
if "error" in res:
ret["error"] = res["error"]
return ret | 3b80283da426e5515026fb8dc0db619b2a471f41 | 4,044 |
def prepare_w16():
"""
Prepare a 16-qubit W state using sqrt(iswaps) and local gates,
respecting linear topology
"""
ket = qf.zero_state(16)
circ = w16_circuit()
ket = circ.run(ket)
return ket | 74d0599e1520aab44088480616e2062153a789aa | 4,045 |
import requests
def get_All_Endpoints(config):
"""
:return:
"""
url = 'https://{}:9060/ers/config/endpoint'.format(config['hostname'])
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
body = {}
response = requests.request('GET', url, headers=headers, data=body, auth=HTTPBasicAuth('Admin', 'C1sco12345'), verify=False)
result = response.json()
return result['SearchResult']['resources'] | f74ad8ba7d65de11851b71318b2818776c5dc29b | 4,046 |
import os
import filelock
import tempfile
import zipfile
import shutil
def download_if_needed(folder_name):
""" Folder name will be saved as `.cache/textattack/[folder name]`. If it
doesn't exist on disk, the zip file will be downloaded and extracted.
Args:
folder_name (str): path to folder or file in cache
Returns:
str: path to the downloaded folder or file on disk
"""
cache_dest_path = path_in_cache(folder_name)
os.makedirs(os.path.dirname(cache_dest_path), exist_ok=True)
# Use a lock to prevent concurrent downloads.
cache_dest_lock_path = cache_dest_path + '.lock'
cache_file_lock = filelock.FileLock(cache_dest_lock_path)
cache_file_lock.acquire()
# Check if already downloaded.
if os.path.exists(cache_dest_path):
cache_file_lock.release()
return cache_dest_path
# If the file isn't found yet, download the zip file to the cache.
downloaded_file = tempfile.NamedTemporaryFile(
dir=config('CACHE_DIR'),
suffix='.zip', delete=False)
http_get(folder_name, downloaded_file)
# Move or unzip the file.
downloaded_file.close()
if zipfile.is_zipfile(downloaded_file.name):
unzip_file(downloaded_file.name, cache_dest_path)
else:
get_logger().info(f'Copying {downloaded_file.name} to {cache_dest_path}.')
shutil.copyfile(downloaded_file.name, cache_dest_path)
cache_file_lock.release()
# Remove the temporary file.
os.remove(downloaded_file.name)
get_logger().info(f'Successfully saved {folder_name} to cache.')
return cache_dest_path | efbb4e024c217a946ce8d35d26b94c33aa5f0615 | 4,047 |
import os
def get_local_repository_directory():
""" Return settins.LOCAL_REPO_DIR.
Ruturn None on any errors.
"""
if os.path.isdir(settings.LOCAL_REPO_DIR):
return settings.LOCAL_REPO_DIR
else:
logger.error("Local repository directory not found. LOCAL_REPO_DIR: '{}'.".format(settings.LOCAL_REPO_DIR))
return None | 8c9730687a933ddf8ec8049cd25f6393bd1a33d5 | 4,048 |
import pandas
import numpy
def synthetic_peptides_by_subsequence(
num_peptides,
fraction_binders=0.5,
lengths=range(8, 20),
binding_subsequences=["A?????Q"]):
"""
Generate a toy dataset where each peptide is a binder if and only if it
has one of the specified subsequences.
Parameters
----------
num_peptides : int
Number of rows in result
fraction_binders : float
Fraction of rows in result where "binder" col is 1
lengths : dict, Series, or list
If a dict or Series, then this should map lengths to the fraction of the
result to have the given peptide length. If it's a list of lengths then
all lengths are given equal weight.
binding_subsequences : list of string
Peptides with any of the given subsequences will be considered binders.
Question marks ("?") in these sequences will be replaced by random
amino acids.
Returns
----------
pandas.DataFrame, indexed by peptide sequence. The "binder" column is a
binary indicator for whether the peptide is a binder.
"""
if not isinstance(lengths, dict):
lengths = dict((length, 1.0) for length in lengths)
lengths_series = pandas.Series(lengths)
lengths_series /= len(lengths)
num_binders = int(round(num_peptides * fraction_binders))
num_non_binders = num_peptides - num_binders
print(num_binders, num_non_binders)
peptides = []
# Generate non-binders
for (length, weight) in lengths_series.iteritems():
peptides.extend(
random_peptides(round(weight * num_non_binders), round(length)))
for binding_core in binding_subsequences:
# Generate binders
lengths_binders = lengths_series.ix[
lengths_series.index >= len(binding_core)
]
normalized_lengths_binders = (
lengths_binders /
lengths_binders.sum() /
len(binding_subsequences))
for (length, weight) in normalized_lengths_binders.iteritems():
if length >= len(binding_core):
num_peptides_to_make = int(round(weight * num_binders))
if length == len(binding_core):
start_positions = [0] * num_peptides_to_make
else:
start_positions = numpy.random.choice(
length - len(binding_core), num_peptides_to_make)
peptides.extend(
"".join([
random_peptides(1, length=start_position)[0],
binding_core,
random_peptides(1, length=length - len(
binding_core) - start_position)[0],
])
for start_position in start_positions)
df = pandas.DataFrame(index=set(peptides))
df["binder"] = False
for binding_core in binding_subsequences:
df["binder"] = df["binder"] | df.index.str.contains(
binding_core,
regex=False)
def replace_question_marks(s):
while "?" in s:
s = s.replace("?", numpy.random.choice(AMINO_ACIDS))
return s
df.index = df.index.map(replace_question_marks)
df_shuffled = df.sample(frac=1)
return df_shuffled | d4cfa202043e3a98a7960246a7d8775ff147201c | 4,049 |
def gce_zones() -> list:
"""Returns the list of GCE zones"""
_bcds = dict.fromkeys(['us-east1', 'europe-west1'], ['b', 'c', 'd'])
_abcfs = dict.fromkeys(['us-central1'], ['a', 'b', 'c', 'f'])
_abcs = dict.fromkeys(
[
'us-east4',
'us-west1',
'europe-west4',
'europe-west3',
'europe-west2',
'asia-east1',
'asia-southeast1',
'asia-northeast1',
'asia-south1',
'australia-southeast1',
'southamerica-east1',
'asia-east2',
'asia-northeast2',
'europe-north1',
'europe-west6',
'northamerica-northeast1',
'us-west2',
],
['a', 'b', 'c'],
)
_zones_combo = {**_bcds, **_abcfs, **_abcs}
zones = [f'{loc}-{zone}' for loc, zones in _zones_combo.items() for zone in zones]
return zones | 10e684b2f458fe54699eb9886af148b092ec604d | 4,050 |
def empty_netbox_query():
"""Return an empty list to a list query."""
value = {
"count": 0,
"next": None,
"previous": None,
"results": [],
}
return value | 9b017c34a3396a82edc269b10b6bfc6b7f878bc3 | 4,051 |
import psutil
import time
def get_process_metrics(proc):
""" Extracts CPU times, memory infos and connection infos about a given
process started via Popen(). Also obtains the return code. """
p = psutil.Process(proc.pid)
max_cpu = [0, 0]
max_mem = [0, 0]
conns = []
while proc.poll() is None:
try:
cpu = list(p.cpu_times())
mem = list(p.memory_info())
conns = p.connections('all')
for child in p.children(recursive=True):
c_cpu = list(child.cpu_times())
c_mem = list(child.memory_info())
cpu[0] += c_cpu[0]
cpu[1] += c_cpu[1]
mem[0] += c_mem[0]
mem[1] += c_mem[1]
if max_cpu[0] < cpu[0]:
max_cpu = cpu
if max_mem[0] < mem[0]:
max_mem = mem
except (psutil.AccessDenied, psutil.NoSuchProcess):
pass
time.sleep(1)
retcode = proc.wait()
return retcode, max_cpu, max_mem, conns | 7be8688debbde33bbcfb43b483d8669241e029d6 | 4,052 |
def tau_from_T(Tobs, Tkin):
"""
Line optical depth from observed temperature and excitation temperature in Kelvin
"""
tau = -np.log(1.-(Tobs/Tkin))
return tau | 089cdc9ae3692037fa886b5c168e03ee2b6ec9ce | 4,053 |
def create_classes_names_list(training_set):
"""
:param training_set: dict(list, list)
:return: (list, list)
"""
learn_classes_list = []
for k, v in training_set.items():
learn_classes_list.extend([str(k)] * len(v))
return learn_classes_list | 0b30153afb730d4e0c31e87635c9ece71c530a41 | 4,054 |
def getSpecialDistribution(queries, kind, burstCount=1, requestsPerBurst=1,
pauseLength=1.0):
"""get a distribution that virtualizes some specifique network situation.
totalTime is the total amount of time the query transmission will take.
Used parameters for the distributions:
- bursts: burstCount, requestsPerBurst, pauseLength(pause between bursts)"""
ret = []
i = 0
query = None
c = 0
if burstCount < 1 or requestsPerBurst < 1:
raise Exception("Invalid parameter for bursts mode")
if kind == "bursts":
for i in range(burstCount):
for j in range(requestsPerBurst):
if len(queries) != 0:
query = queries.pop()
else:
c += 1
if j == requestsPerBurst - 1:
ret.append( (query, pauseLength) )
else:
ret.append( (query, 0) )
if c > 0:
log.warning("Filled up with the last name {} times".format(c))
return ret
elif kind == "infinite":
# return a generator
return loopList([(queries, 0.0001) for query in queries])
elif kind == "file":
# TODO: take timestamps from some kind of file
raise Exception("Not yet implemented")
else:
raise Exception("Invalid kind of distribution: {}".format(kind)) | 8e95c7de1f0699f31f975cbe57915cef74016a22 | 4,055 |
from petastorm.spark import SparkDatasetConverter
def get_petastorm_dataset(cache_dir: str, partitions: int=4):
"""
This Dataloader assumes that the dataset has been converted to Delta table already
The Delta Table Schema is:
root
|-- sample_id: string (nullable = true)
|-- value: string (nullable = true)
|-- sample_label: string (nullable = true)
|-- filepath: string (nullable = true)
|-- filename: string (nullable = true)
|-- extension: string (nullable = true)
|-- set: string (nullable = true)
|-- label: integer (nullable = true)
See: TBD to Load and convert the aclImdb dataset from the tf sample dataset lib
Args:
cache_dir: Cache Directory for Peatstorm
partitions: Num Partitions for Petastorm partitions need to match num horovod threads / gpus (TO CHECK)
Returns:
df_train: spark df of training data
df_val: spark df of val data
size_train: size of the training dataset for use in batch step calcs
size_val: size of the val dataset for use in validation batch step calcs
"""
spark.conf.set(SparkDatasetConverter.PARENT_CACHE_DIR_URL_CONF, cache_dir)
train_frame = spark.sql("select value, `label` \
from brian_petastorm_datasets.aclImdb_label \
where `set` = 'train'")
df_test = spark.sql("select value, `label` \
from brian_petastorm_datasets.aclImdb_label \
where `set` = 'test'")
df_train, df_val = train_frame.randomSplit([0.8,0.2], seed=12345)
df_train.repartition(partitions)
df_val.repartition(partitions)
df_test.repartition(partitions)
size_train = df_train.count()
size_val = df_val.count()
size_test = df_test.count()
return df_train, df_val, df_test, size_train, size_val, size_test | 27f6777b2c1cebad08f8a416d360a2b7096febec | 4,056 |
def get_corners(square_to_edges, edge_to_squares):
"""Get squares ids of squares which place in grid in corner."""
return get_squares_with_free_edge(square_to_edges, edge_to_squares, 2) | 1370f472aedf83f7aa17b64a813946a3a760968d | 4,057 |
async def get_untagged_joke():
"""
Gets an untagged joke from the jokes table and sends returns it
:return: json = {joke_id, joke}
"""
df = jokes.get_untagged_joke()
if not df.empty:
response = {"joke": df["joke"][0], "joke_id": int(df["id"][0])}
else:
response = {"joke": "No more jokes to tag", "joke_id": -1}
return response | a2dde1d5ddba47beb5e7e51b9f512f0a861336da | 4,058 |
def map_parallel(function, xs):
"""Apply a remote function to each element of a list."""
if not isinstance(xs, list):
raise ValueError('The xs argument must be a list.')
if not hasattr(function, 'remote'):
raise ValueError('The function argument must be a remote function.')
# EXERCISE: Modify the list comprehension below to invoke "function"
# remotely on each element of "xs". This should essentially submit
# one remote task for each element of the list and then return the
# resulting list of ObjectIDs.
return [function.remote(x) for x in xs] | 1fe75868d5ff12a361a6aebd9e4e49bf92c32126 | 4,059 |
def log_interp(x,y,xnew):
"""
Apply interpolation in logarithmic space for both x and y.
Beyound input x range, returns 10^0=1
"""
ynew = 10**ius(np.log10(x), np.log10(y), ext=3)(np.log10(xnew))
return ynew | 16ef0cc494f61c031f9fd8f8e820a17bb6c83df8 | 4,060 |
def inten_sat_compact(args):
"""
Memory saving version of inten_scale followed by saturation.
Useful for multiprocessing.
Parameters
----------
im : numpy.ndarray
Image of dtype np.uint8.
Returns
-------
numpy.ndarray
Intensity scale and saturation of input.
"""
return ((inten_scale(args[0]) * saturation(args[0])) ** 2).astype(np.float32) | 9624891f9d09c13d107907fcd30e2f102ff00ee2 | 4,061 |
def masseuse_memo(A, memo, ind=0):
"""
Return the max with memo
:param A:
:param memo:
:param ind:
:return:
"""
# Stop if
if ind > len(A)-1:
return 0
if ind not in memo:
memo[ind] = max(masseuse_memo(A, memo, ind + 2) + A[ind], masseuse_memo(A, memo, ind + 1))
return memo[ind] | 03d108cb551f297fc4fa53cf9575d03af497ee38 | 4,062 |
import torch
def unique_pairs(bonded_nbr_list):
"""
Reduces the bonded neighbor list to only include unique pairs of bonds. For example,
if atoms 3 and 5 are bonded, then `bonded_nbr_list` will have items [3, 5] and also
[5, 3]. This function will reduce the pairs only to [3, 5] (i.e. only the pair in which
the first index is lower).
Args:
bonded_nbr_list (list): list of arrays of bonded pairs for each molecule.
Returns:
sorted_pairs (list): same as bonded_nbr_list but without duplicate pairs.
"""
unique_pairs = []
for pair in bonded_nbr_list:
# sort according to the first item in the pair
sorted_pair = torch.sort(pair)[0].numpy().tolist()
if sorted_pair not in unique_pairs:
unique_pairs.append(sorted_pair)
# now make sure that the sorting is still good (this may be unnecessary but I added
# it just to make sure)
idx = list(range(len(unique_pairs)))
# first_arg = list of the the first node in each pair
first_arg = [pair[0] for pair in unique_pairs]
# sorted_idx = sort the indices of unique_pairs by the first node in each pair
sorted_idx = [item[-1] for item in sorted(zip(first_arg, idx))]
# re-arrange by sorted_idx
sorted_pairs = torch.LongTensor(np.array(unique_pairs)[sorted_idx])
return sorted_pairs | e974728ad831a956f1489b83bb77b15833ae9b82 | 4,063 |
def permission(*perms: str):
""" Decorator that runs the command only if the author has the specified permissions.
perms must be a string matching any property of discord.Permissions.
NOTE: this function is deprecated. Use the command 'permissions' attribute instead.
"""
def decorator(func):
@wraps(func)
async def wrapped(message: discord.Message, *args, **kwargs):
member_perms = message.channel.permissions_for(message.author)
if all(getattr(member_perms, perm, False) for perm in perms):
await func(message, *args, **kwargs)
return wrapped
return decorator | b0ef0dfec36a243152dff4ca11ab779d2c417ab8 | 4,064 |
from re import T
def validate_script(value):
"""Check if value is a valid script"""
if not sabnzbd.__INITIALIZED__ or (value and sabnzbd.filesystem.is_valid_script(value)):
return None, value
elif (value and value == "None") or not value:
return None, "None"
return T("%s is not a valid script") % value, None | d4a5d6922fb14524bc9d11f57807d9a7f0e937f1 | 4,065 |
async def post_user(ctx: Context, user: MemberOrUser) -> t.Optional[dict]:
"""
Create a new user in the database.
Used when an infraction needs to be applied on a user absent in the guild.
"""
log.trace(f"Attempting to add user {user.id} to the database.")
payload = {
'discriminator': int(user.discriminator),
'id': user.id,
'in_guild': False,
'name': user.name,
'roles': []
}
try:
response = await ctx.bot.api_client.post('bot/users', json=payload)
log.info(f"User {user.id} added to the DB.")
return response
except ResponseCodeError as e:
log.error(f"Failed to add user {user.id} to the DB. {e}")
await ctx.send(f":x: The attempt to add the user to the DB failed: status {e.status}") | 25a6a710d7d94cc9837d9d67408a09fd6ff48596 | 4,066 |
from typing import List
def delete(ids: List = Body(...)):
"""
Deletes from an embeddings index. Returns list of ids deleted.
Args:
ids: list of ids to delete
Returns:
ids deleted
"""
try:
return application.get().delete(ids)
except ReadOnlyError as e:
raise HTTPException(status_code=403, detail=e.args[0]) from e | 9075db7c7dd174b850d1d4acbe1cdb4001162b5d | 4,067 |
def main():
""" Simple Event Viewer """
events = None
try:
events = remote('127.0.0.1', EventOutServerPort, ssl=False, timeout=5)
while True:
event_data = ''
while True:
tmp = len(event_data)
event_data += events.recv(numb=8192, timeout=1).decode('latin-1')
if tmp == len(event_data):
break
if len(event_data):
# fix the JSON mess
event_data = fix_json(event_data)
if not len(event_data):
log.warning('[Simple Event Viewer]: callback data invalid!\n')
return False
for event in event_data:
log.info('[Event From]: {}\n{}'.format(color(event.get('host'), GREEN), event))
except (PwnlibException, EOFError, KeyboardInterrupt):
log.warning("[Simple Event Viewer]")
if events:
events.close()
return False | d96500a3114785dbb408681e96d7ffb7a5c59d04 | 4,068 |
def fsi_acm_up_profiler_descending(vp1, vp2, vp3):
"""
Description:
Calculates the VEL3D Series A and L upwards velocity data product VELPTMN-VLU-DSC_L1
for the Falmouth Scientific (FSI) Acoustic Current Meter (ACM) mounted on a McLane
profiler.
Because of the orientation of the ACM stinger fingers (see Notes) upward
current velocity can be calculated in several different ways. This function
calculates the vertical velocity to be used when the profiler is descending,
avoiding the use of data from vp4 which will be contaminated by the sheet-flow
wake of the stinger's central post.
Usage:
w_fsi_dsc = fsi_acm_up_profiler_descending(vp1, vp2, vp3)
where
w_fsi_dsc = velocity up; VELPTMN-VLU-DSC_L1 [m/s]
vp1 = raw beam velocity from the port stinger finger; VELPTMN-VP1_L0 [cm/s]
vp2 = raw beam velocity from the lower stinger finger; VELPTMN-VP2_L0 [cm/s]
vp3 = raw beam velocity from the starboard stinger finger; VELPTMN-VP3_L0 [cm/s]
Implemented by:
2015-02-13: Russell Desiderio. Initial code.
Notes:
The VEL3D series A and L instruments are FSI current meters modified for use on a
McLane profiler. The FSI ACM has 4 raw beam velocities. The correspondences between
the MMP manual designations and the IDD designations are:
(Xplus, Yplus, Xminus, Yminus) (MMP manual, page G-22)
(va , vb , vc , vd ) (IDD, VEL3D series A)
(vp1 , vp2 , vp3 , vp4 ) (IDD, VEL3D series L)
(left , down , right , up ) (spatial orientation)
This is also the ordering of these parameters in telemetered and recovered data.
The MMP manual Rev E, page 8-30, incorrectly calculates the upward velocities wU and wD.
For more information see the Notes to worker function fsi_acm_horz_vel.
References:
OOI (2015). Data Product Specification for Mean Point Water Velocity
Data from FSI Acoustic Current Meters. Document Control Number
1341-00792. https://alfresco.oceanobservatories.org/ (See:
Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-00792_Data_Product_SPEC_VELPTMN_ACM_OOI.pdf)
OOI (2015). 1341-00792_VELPTMN Artifact: McLane Moored Profiler User Manual.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI >>
>> REFERENCE >> Data Product Specification Artifacts >> 1341-00792_VELPTMN >>
MMP-User Manual-Rev-E-WEB.pdf)
"""
# find the x-velocity in the instrument coordinate system
x = -(vp1 + vp3) / np.sqrt(2.0)
# the z-velocity in the instrument coordinate system is also the w velocity in the
# earth coordinate system because the effects of pitch and roll are negligible.
w = -x + np.sqrt(2.0) * vp2
# change units from cm/s to m/s
return w / 100.0 | e176ff1b23bf4b5624cdc8a698d03c8d2ee1947a | 4,069 |
from typing import Tuple
def colour_name(colour: Tuple[int, int, int]) -> str:
"""Return the colour name associated with this colour value, or the empty
string if this colour value isn't in our colour list.
>>> colour_name((1, 128, 181))
'Pacific Point'
>>> colour_name(PACIFIC_POINT)
'Pacific Point'
"""
colour_names = {
PACIFIC_POINT: 'Pacific Point',
REAL_RED: 'Real Red',
OLD_OLIVE: 'Old Olive',
DAFFODIL_DELIGHT: 'Daffodil Delight'
}
if colour in colour_names:
return colour_names[colour]
else:
return '' | e596bf802b8f168e6c8d9bd9b8e4113b61e7fd58 | 4,070 |
def score_fn(subj_score, comp_score):
"""
Generates the TextStim with the updated score values
Parameters
----------
subj_score : INT
The subjects score at the moment
comp_score : INT
The computer's score at the moment'
Returns
-------
score_stim : psychopy.visual.text.TextStim
The visual stimulus ready to be drawn.
e.g.
5 - 4
Spacebar to continue
"""
score = stimuli.score_text.format(subj_score, comp_score)
#To edit the score_text go to the stimuli.py module
score_stim = visual.TextStim(win, text = score, pos = (0, -.6))
return score_stim | 2d52b4c8d47543c6c1c98e5aa7feb8c3341ff7a4 | 4,071 |
def parse_write_beam(line):
"""
Write_beam (type -2)
If btype = −2, output particle phase-space coordinate information at given location V3(m)
into filename fort.Bmpstp with particle sample frequency Bnseg. Here, the maximum number
of phase- space files which can be output is 100. Here, 40 and 50 should be avoided
since these are used for initial and final phase space output.
"""
x = line.split()
v = v_from_line(line)
d={}
d['filename']='fort.'+x[2]
d['sample_frequency'] = int(x[1])
d['s'] = float(v[3])
if int(x[2]) in [40, 50]:
print('warning, overwriting file fort.'+x[2])
return d | 7ce86ae39a51ea8d4636e37bea26edd3caae19e8 | 4,072 |
def get_tone(pinyin):
"""Renvoie le ton du pinyin saisi par l'utilisateur.
Args:
pinyin {str}:
l'entrée pinyin par l'utilisateur
Returns:
number/None :
Si pas None, la partie du ton du pinyin (chiffre)
"""
# Prenez le dernier chaine du pinyin
tone = pinyin[-1]
# Déterminer s'il s'agit d'un type numérique
if tone.isdigit():
return tone
else:
return None | fc0b02902053b3f2470acf952812573f5125c4cf | 4,073 |
def authIfV2(sydent, request, requireTermsAgreed=True):
"""For v2 APIs check that the request has a valid access token associated with it
:returns Account|None: The account object if there is correct auth, or None for v1 APIs
:raises MatrixRestError: If the request is v2 but could not be authed or the user has not accepted terms
"""
if request.path.startswith('/_matrix/identity/v2'):
token = tokenFromRequest(request)
if token is None:
raise MatrixRestError(401, "M_UNAUTHORIZED", "Unauthorized")
accountStore = AccountStore(sydent)
account = accountStore.getAccountByToken(token)
if account is None:
raise MatrixRestError(401, "M_UNAUTHORIZED", "Unauthorized")
if requireTermsAgreed:
terms = get_terms(sydent)
if (
terms.getMasterVersion() is not None and
account.consentVersion != terms.getMasterVersion()
):
raise MatrixRestError(403, "M_TERMS_NOT_SIGNED", "Terms not signed")
return account
return None | 6c3f60df233cc030dfc3ec2658bd2a70c5a20aed | 4,074 |
def gen_rho(K):
"""The Ideal Soliton Distribution, we precompute
an array for speed
"""
return [1.0/K] + [1.0/(d*(d-1)) for d in range(2, K+1)] | 40382af047d0f2efba0eb6db17c28b92e47d3c92 | 4,075 |
import numpy as np
def assert_array_max_ulp(a, b, maxulp=1, dtype=None):
"""
Check that all items of arrays differ in at most N Units in the Last Place.
Parameters
----------
a, b : array_like
Input arrays to be compared.
maxulp : int, optional
The maximum number of units in the last place that elements of `a` and
`b` can differ. Default is 1.
dtype : dtype, optional
Data-type to convert `a` and `b` to if given. Default is None.
Returns
-------
ret : ndarray
Array containing number of representable floating point numbers between
items in `a` and `b`.
Raises
------
AssertionError
If one or more elements differ by more than `maxulp`.
Notes
-----
For computing the ULP difference, this API does not differentiate between
various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000
is zero).
See Also
--------
assert_array_almost_equal_nulp : Compare two arrays relatively to their
spacing.
Examples
--------
>>> a = np.linspace(0., 1., 100)
>>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a)))
"""
__tracebackhide__ = True # Hide traceback for py.test
ret = nulp_diff(a, b, dtype)
if not np.all(ret <= maxulp):
raise AssertionError("Arrays are not almost equal up to %g "
"ULP (max difference is %g ULP)" %
(maxulp, np.max(ret)))
return ret | 8ca9698e5b213f753002535061b17aeb59f12e83 | 4,076 |
def Ambient_Switching(crop_PPFDmin, Trans, consumption):
"""
Inputs: consumption (returned from Light_Sel)
"""
#How much energy can you save if you switch off when ambient lighting is enough for plant needs?
#Assume that when ambient is higher than max recommended PPFD, that the greenhouse is cloaked, allowing it to still rely on outside light.
#Assume that peak solar always happens in the afternoons
#Inputs are Detroit 2010 data for solar insolation, consumption in J, and transmissivity.
count = 0
for i in Detroit['PPFD (Micromoles/m^2/s)']:
if (i*Trans) > (crop_PPFDmin):
count = count + 1
energy_savings = count *consumption
#print("If lights are strageically shut off during highly sunny hours, then", energy_savings, "J will be saved")
return energy_savings | 5a8ab9d6eb0c6b3ddd7bb3f7efb1599b952aa345 | 4,077 |
from typing import Callable
def get_numerical_gradient(position: np.ndarray, function: Callable[[np.ndarray], float],
delta_magnitude: float = 1e-6) -> np.ndarray:
""" Returns the numerical derivative of an input function at the specified position."""
dimension = position.shape[0]
vec_low = np.zeros(dimension)
vec_high = np.zeros(dimension)
for ii in range(dimension):
delta_vec = np.zeros(dimension)
delta_vec[ii] = delta_magnitude/2.0
vec_low[ii] = function(position-delta_vec)
vec_high[ii] = function(position+delta_vec)
return (vec_high-vec_low)/delta_magnitude | a439acd3934006e2b8f9188e3204e12ef3885ae5 | 4,078 |
import matplotlib.pyplot as plt
from scipy.signal import periodogram
from .dipole import Dipole
def plot_psd(dpl, *, fmin=0, fmax=None, tmin=None, tmax=None, layer='agg',
ax=None, show=True):
"""Plot power spectral density (PSD) of dipole time course
Applies `~scipy.signal.periodogram` from SciPy with ``window='hamming'``.
Note that no spectral averaging is applied across time, as most
``hnn_core`` simulations are short-duration. However, passing a list of
`Dipole` instances will plot their average (Hamming-windowed) power, which
resembles the `Welch`-method applied over time.
Parameters
----------
dpl : instance of Dipole | list of Dipole instances
The Dipole object.
fmin : float
Minimum frequency to plot (in Hz). Default: 0 Hz
fmax : float
Maximum frequency to plot (in Hz). Default: None (plot up to Nyquist)
tmin : float or None
Start time of data to include (in ms). If None, use entire simulation.
tmax : float or None
End time of data to include (in ms). If None, use entire simulation.
layer : str, default 'agg'
The layer to plot. Can be one of 'agg', 'L2', and 'L5'
ax : instance of matplotlib figure | None
The matplotlib axis.
show : bool
If True, show the figure
Returns
-------
fig : instance of matplotlib Figure
The matplotlib figure handle.
"""
if ax is None:
_, ax = plt.subplots(1, 1, constrained_layout=True)
if isinstance(dpl, Dipole):
dpl = [dpl]
scale_applied = dpl[0].scale_applied
sfreq = dpl[0].sfreq
trial_power = []
for dpl_trial in dpl:
if dpl_trial.scale_applied != scale_applied:
raise RuntimeError('All dipoles must be scaled equally!')
if dpl_trial.sfreq != sfreq:
raise RuntimeError('All dipoles must be sampled equally!')
data, _ = _get_plot_data_trange(dpl_trial.times,
dpl_trial.data[layer],
tmin, tmax)
freqs, Pxx = periodogram(data, sfreq, window='hamming', nfft=len(data))
trial_power.append(Pxx)
ax.plot(freqs, np.mean(np.array(Pxx, ndmin=2), axis=0))
if fmax is not None:
ax.set_xlim((fmin, fmax))
ax.ticklabel_format(axis='both', scilimits=(-2, 3))
ax.set_xlabel('Frequency (Hz)')
if scale_applied == 1:
ylabel = 'Power spectral density\n(nAm' + r'$^2 \ Hz^{-1}$)'
else:
ylabel = 'Power spectral density\n' +\
r'([nAm$\times$ {:.0f}]'.format(scale_applied) +\
r'$^2 \ Hz^{-1}$)'
ax.set_ylabel(ylabel, multialignment='center')
plt_show(show)
return ax.get_figure() | 2fdcb69c88991bb4137587e98b068a912bd66f75 | 4,079 |
def stable_point(r):
"""
repeat the process n times to
make sure we have reaches fixed points
"""
n = 1500
x = np.zeros(n)
x[0] = np.random.uniform(0, 0.5)
for i in range(n - 1):
x[i + 1] = f(x[i], r)
print(x[-200:])
return x[-200:] | 9d9c32abfb0fea74abb32ec8cebd8c76738669b1 | 4,080 |
def vary_on_headers(*headers):
"""
A view decorator that adds the specified headers to the Vary header of the
response. Usage:
@vary_on_headers('Cookie', 'Accept-language')
def index(request):
...
Note that the header names are not case-sensitive.
"""
def decorator(func):
@wraps(func)
def inner_func(*args, **kwargs):
response = func(*args, **kwargs)
patch_vary_headers(response, headers)
return response
return inner_func
return decorator | 9892ac00aa31b0e294f79b2d1539d6d79f3eaed7 | 4,081 |
import pathlib
from typing import Dict
from typing import Union
import logging
def load_frames(
frame_dir: pathlib.Path,
df_frames: pd.DataFrame,
) -> Dict[int, Dict[str, Union[str, np.ndarray]]]:
"""Load frame files from a directory.
Args:
frame_dir: Path to directory where frames are stored in a target
class folder or background class folder
df_frames: Dataframe with frame information.
Returns:
Dictionary where key is frame index and value is a dictionary
with the target class and frame image
"""
logger = logging.getLogger(__name__)
logger.debug("Loading frames at %s", frame_dir)
frame_filepaths = frame_dir.rglob("*.jpeg")
frames_dict = {}
df_video_frames = df_frames[frame_dir.name == df_frames["video_name"]]
for frame_filepath in frame_filepaths:
frame_img = imageio.imread(frame_filepath)
_, frame_index = frame_filepath.stem.split("___")
frame_index = int(frame_index)
target = df_video_frames.loc[df_video_frames["frame_ind"] == frame_index, "target"].item()
logger.debug("Frame %s target class is %s", frame_filepath.name, target)
frames_dict[frame_index] = {"image": frame_img, "target": target}
return frames_dict | 81de670c0b42d40cf71ca7d46e63be57dc9c3f12 | 4,082 |
def non_daemonic_process_pool_map(func, jobs, n_workers, timeout_per_job=None):
"""
function for calculating in parallel a function that may not be run
a in a regular pool (due to forking processes for example)
:param func: a function that accepts one input argument
:param jobs: a list of input arguments to func
:param n_workers: number of parallel workers
:param timeout_per_job: timeout for processing a single job
:return: list of results in the order of the "jobs" list
"""
END_TOKEN = 'END'
q_in = Queue()
q_out = Queue()
def queue_worker(q_in, q_out):
arg_in = q_in.get()
while arg_in != END_TOKEN:
try:
result = func(arg_in)
except Exception as e:
logger.exception(e)
logger.error(f'Queue worker failed on input: {arg_in}, with {str(e)}')
result = None
q_out.put((arg_in, result))
arg_in = q_in.get()
q_out.put(END_TOKEN)
# put jobs
[q_in.put(c) for c in jobs + n_workers * [END_TOKEN]]
# start workers
workers = [Process(target=queue_worker, args=(q_in, q_out))
for _ in range(n_workers)]
[w.start() for w in workers]
# wait for results
n_finished = 0
outputs = []
while n_finished < n_workers:
output = q_out.get(timeout=timeout_per_job)
logger.info(f'queue out, got: {output}')
if output == END_TOKEN:
n_finished += 1
logger.info(f'{n_finished}/{n_workers} queue workers done')
else:
outputs.append(output)
# wait for workers to join
logger.info('Joining queue workers')
[w.join() for w in workers]
logger.info('Joined all queue workers')
# sort in original order
results = [output[1] for output in
sorted(outputs, key=lambda output: jobs.index(output[0]))]
return results | 41fbdaae1e584839692eae4d5034ffd6828eb5c7 | 4,083 |
import random
import textwrap
from datetime import datetime
def result_list_handler(*args: list, **kwargs) -> str:
"""
Handles the main search result for each query. It checks whether there are any result for this qeury or not.
1. If there was results, then it sorts and decorates the them.
2 Otherwise it shows a message containing there were no results for this query
:param args: 1. *[0] -> query
2. *[1] -> a list of search results objects
:param kwargs:
:return: Final decorated search results
"""
query = args[0]
search_res = args[1]
print(UD.bidirectional(u'\u0688'))
x = len([None for ch in query if UD.bidirectional(ch) in ('R', 'AL')]) / float(len(query))
# print('{t} => {c}'.format(t=query.encode('utf-8'), c='RTL' if x > 0.5 else 'LTR'))
# print(UD.bidirectional("dds".decode('utf-8')))
# direction = 'RTL' if x > 0.5 else 'LTR'
dir_str = "‏" if x > 0.5 else '‎'
fruit = random.choice(fruit_list)
print(search_res)
if int(search_res["hits"]["total"]["value"]) > 0:
text = f"<b>{_search_emoji} نتایج جستجو برای: {textwrap.shorten(query, width=100, placeholder='...')}</b>\n"
text += f"{_checkmark_emoji} نتایج بهتر پایین لیست هستند.\n\n\n"
_headphone_emoji = emoji.EMOJI_ALIAS_UNICODE[':headphone:']
for index, hit in reversed(list(enumerate(search_res['hits']['hits']))):
duration = timedelta(seconds=int(hit['_source']['duration']))
d = datetime(1, 1, 1) + duration
_performer = hit['_source']['performer']
_title = hit['_source']['title']
_file_name = hit['_source']['file_name']
if not (len(_title) < 2 or len(_performer) < 2):
name = f"{_performer} - {_title}"
elif not len(_performer) < 2:
name = f"{_performer} - {_file_name}"
else:
name = _file_name
# name = f"{_file_name if (_performer == 'None' and _title == 'None') else (_performer if _title == 'None' else _title)}".replace(
# ".mp3", "")
text += f"<b>{str(index + 1)}. {dir_str} {_headphone_emoji} {fruit if index == 0 else ''}</b>" \
f"<b>{textwrap.shorten(name, width=35, placeholder='...')}</b>\n" \
f"{dir_str} {_floppy_emoji} | {round(int(hit['_source']['file_size']) / 1000_000, 1)} {'مگابایت' if x > 0.5 else 'MB'} " \
f"{_clock_emoji} | {str(d.hour) + ':' if d.hour > 0 else ''}{d.minute}:{d.second}\n{dir_str}" \
f" دانلود: " \
f" /dl_{hit['_id']} \n" \
f" {34 * '-' if not index == 0 else ''}{dir_str} \n\n"
else:
text = f"{_traffic_light} هیچ نتیجه ای برای این عبارت پیدا نشد:" \
f"\n<pre>{textwrap.shorten(query, width=200, placeholder='...')}</pre>"
return text | c0fb0db46e3c47d24b06c290ba9d0129eb436edf | 4,084 |
def flip_mask(mask, x_flip, y_flip):
"""
Args:
mask: バイナリマスク
[height, width]
"""
mask = mask.copy()
if y_flip:
mask = np.flip(mask, axis=0)
if x_flip:
mask = np.flip(mask, axis=1)
return mask | d3d783fb3e5913448f4e9d06f1f96d89559a686c | 4,085 |
def sbn2journal(sbn_record, permalink_template="http://id.sbn.it/bid/%s"):
"""
Creates a `dbmodels.Journal` instance out of a dictionary with metadata.
:param record: the dictionary returned by `resolution.supporting_functions.enrich_metadata()`
:return: an instance of `dbmodels.Journal`
"""
bid = normalize_bid(sbn_record["codiceIdentificativo"])
metadata = {
'short_title' : sbn_record["titolo"].split(":")[0].split("/")[0].strip()
, 'full_title' : sbn_record["titolo"]
, 'bid' : bid
, 'sbn_link' : permalink_template % bid
, 'identifiers' : []
, "provenance" : "lbcatalogue"
}
if "numeri" in sbn_record:
identifiers = sbn_record["numeri"]
for identifier in identifiers:
tmp = [{
"identifier_type" : key
,"value": identifier[key]
} for key in identifier.keys()][0]
metadata["identifiers"].append(SBN_Identifier(**tmp))
return Journal(**metadata) | 059b00aeb81dd1bdbc987f31c045b6eb5aedc3b3 | 4,086 |
def median(data):
"""Calculates the median value from |data|."""
data = sorted(data)
n = len(data)
if n % 2 == 1:
return data[n / 2]
else:
n2 = n / 2
return (data[n2 - 1] + data[n2]) / 2.0 | ad2b3f7eb3f5446c81c6c400bc16c7833e75c05c | 4,087 |
import tqdm
import time
import torch
import logging
from typing import OrderedDict
def evaluation(eval_loader,
model,
criterion,
num_classes,
batch_size,
ep_idx,
progress_log,
scale,
vis_params,
batch_metrics=None,
dataset='val',
device=None,
debug=False):
"""
Evaluate the model and return the updated metrics
:param eval_loader: data loader
:param model: model to evaluate
:param criterion: loss criterion
:param num_classes: number of classes
:param batch_size: number of samples to process simultaneously
:param ep_idx: epoch index (for hypertrainer log)
:param progress_log: progress log file (for hypertrainer log)
:param scale: Scale to which values in sat img have been redefined. Useful during visualization
:param vis_params: (Dict) Parameters useful during visualization
:param batch_metrics: (int) Metrics computed every (int) batches. If left blank, will not perform metrics.
:param dataset: (str) 'val or 'tst'
:param device: device used by pytorch (cpu ou cuda)
:param debug: if True, debug functions will be performed
:return: (dict) eval_metrics
"""
eval_metrics = create_metrics_dict(num_classes)
model.eval()
for batch_index, data in enumerate(tqdm(eval_loader, dynamic_ncols=True, desc=f'Iterating {dataset} '
f'batches with {device.type}')):
progress_log.open('a', buffering=1).write(tsv_line(ep_idx, dataset, batch_index, len(eval_loader), time.time()))
with torch.no_grad():
try: # For HPC when device 0 not available. Error: RuntimeError: CUDA error: invalid device ordinal
inputs = data['sat_img'].to(device)
labels = data['map_img'].to(device)
except RuntimeError:
logging.exception(f'Unable to use device {device}. Trying "cuda:0"')
device = torch.device('cuda')
inputs = data['sat_img'].to(device)
labels = data['map_img'].to(device)
labels_flatten = flatten_labels(labels)
outputs = model(inputs)
if isinstance(outputs, OrderedDict):
outputs = outputs['out']
# vis_batch_range: range of batches to perform visualization on. see README.md for more info.
# vis_at_eval: (bool) if True, will perform visualization at eval time, as long as vis_batch_range is valid
if vis_params['vis_batch_range'] and vis_params['vis_at_eval']:
min_vis_batch, max_vis_batch, increment = vis_params['vis_batch_range']
if batch_index in range(min_vis_batch, max_vis_batch, increment):
vis_path = progress_log.parent.joinpath('visualization')
if ep_idx == 0 and batch_index == min_vis_batch:
logging.info(
f'Visualizing on {dataset} outputs for batches in range {vis_params["vis_batch_range"]} '
f'images will be saved to {vis_path}\n')
vis_from_batch(vis_params, inputs, outputs,
batch_index=batch_index,
vis_path=vis_path,
labels=labels,
dataset=dataset,
ep_num=ep_idx + 1,
scale=scale)
outputs_flatten = flatten_outputs(outputs, num_classes)
loss = criterion(outputs, labels)
eval_metrics['loss'].update(loss.item(), batch_size)
if (dataset == 'val') and (batch_metrics is not None):
# Compute metrics every n batches. Time consuming.
if not batch_metrics <= len(eval_loader):
logging.error(f"Batch_metrics ({batch_metrics}) is smaller than batch size "
f"{len(eval_loader)}. Metrics in validation loop won't be computed")
if (batch_index + 1) % batch_metrics == 0: # +1 to skip val loop at very beginning
a, segmentation = torch.max(outputs_flatten, dim=1)
eval_metrics = iou(segmentation, labels_flatten, batch_size, num_classes, eval_metrics)
eval_metrics = report_classification(segmentation, labels_flatten, batch_size, eval_metrics,
ignore_index=eval_loader.dataset.dontcare)
elif dataset == 'tst':
a, segmentation = torch.max(outputs_flatten, dim=1)
eval_metrics = iou(segmentation, labels_flatten, batch_size, num_classes, eval_metrics)
eval_metrics = report_classification(segmentation, labels_flatten, batch_size, eval_metrics,
ignore_index=eval_loader.dataset.dontcare)
logging.debug(OrderedDict(dataset=dataset, loss=f'{eval_metrics["loss"].avg:.4f}'))
if debug and device.type == 'cuda':
res, mem = gpu_stats(device=device.index)
logging.debug(OrderedDict(device=device, gpu_perc=f'{res.gpu} %',
gpu_RAM=f'{mem.used / (1024 ** 2):.0f}/{mem.total / (1024 ** 2):.0f} MiB'))
logging.info(f"{dataset} Loss: {eval_metrics['loss'].avg}")
if batch_metrics is not None:
logging.info(f"{dataset} precision: {eval_metrics['precision'].avg}")
logging.info(f"{dataset} recall: {eval_metrics['recall'].avg}")
logging.info(f"{dataset} fscore: {eval_metrics['fscore'].avg}")
logging.info(f"{dataset} iou: {eval_metrics['iou'].avg}")
return eval_metrics | cb42241366cc1b5672c8953fd78e5bbcacf879da | 4,088 |
from typing import Optional
def split_text_by_length(text: str,
length: Optional[int] = None, # 方案一:length + delta
delta: Optional[int] = 30,
max_length: Optional[int] = None, # 方案二:直接确定长度上下限
min_length: Optional[int] = None,
ignore_=False):
"""
根据给定的长度切分文本
:param text: 文本
:param delta:
:param length:
:param max_length: 文章允许的最长长度。
:param min_length: 文章允许的最短长度。比这还短就丢弃。
:return : 迭代器,每次返回切分出来的那一段
:param ignore_: 如果最后一段太短,是否丢弃掉该段。默认不丢弃
"""
if length:
max_length = length + delta
min_length = length - delta
if not max_length or not min_length:
logger.error(f"split_text_by_length 缺少必要参数!!!")
return None
while len(text) > max_length:
s = text[:max_length]
index = search_split_pos(s) # 上策
if index < min_length:
index = search_split_pos(s, keys=",") # 中策
if index == -1:
index = (max_length + min_length) // 2 # 直接切分,下下策
yield text[:index]
text = text[index:]
else:
if len(text) < min_length and ignore_:
return # 结束迭代
yield text | 60bf713a2cbe3eff85237d9637a303668a9f436b | 4,089 |
def _tfidf_fit_transform(vectors: np.ndarray):
""" Train TF-IDF (Term Frequency — Inverse Document Frequency)
Transformer & Extract TF-IDF features on training data
"""
transformer = TfidfTransformer()
features = transformer.fit_transform(vectors).toarray()
return features, transformer | c38aa629d11258291f306052ac0e4c9c2a474ebd | 4,090 |
from typing import List
def _is_missing_sites(spectra: List[XAS]):
"""
Determines if the collection of spectra are missing any indicies for the given element
"""
structure = spectra[0].structure
element = spectra[0].absorbing_element
# Find missing symmeterically inequivalent sites
symm_sites = SymmSites(structure)
absorption_indicies = {spectrum.absorbing_index for spectrum in spectra}
missing_site_spectra_indicies = set(structure.indices_from_symbol(element)) - absorption_indicies
for site_index in absorption_indicies:
missing_site_spectra_indicies -= set(symm_sites.get_equivalent_site_indices(site_index))
return len(missing_site_spectra_indicies) != 0 | 0ae7ad0622e8ec398306e05def214b0ad40fd90f | 4,091 |
def get_objects(params, meta):
"""
Retrieve a list of objects based on their upas.
params:
guids - list of string - KBase IDs (upas) to fetch
post_processing - object of post-query filters (see PostProcessing def at top of this module)
output:
objects - list of ObjectData - see the ObjectData type description in the module docstring above.
search_time - int - time it took to perform the search on ES
access_group_narrative_info - dict of {access_group_id: narrative_info} -
Information about the workspaces in which the objects in the
results reside. This data only applies to workspace objects.
"""
# KBase convention is to wrap params in an array
if isinstance(params, list) and len(params) == 1:
params = params[0]
post_processing = params.get('post_processing', {})
search_results = _search_objects({'query': {'terms': {'_id': params['guids']}}}, meta)
objects = _get_object_data_from_search_results(search_results, post_processing)
(narrative_infos, ws_infos) = _fetch_narrative_info(search_results, meta)
return [{
'search_time': search_results['search_time'],
'objects': objects,
'access_group_narrative_info': narrative_infos,
'access_groups_info': ws_infos
}] | 17d38a1a5e09847700537076c0bfefdd55947682 | 4,092 |
import os
def heat_transfer_delta():
"""
:return: net - OpenModelica network converted to a pandapipes network
:rtype: pandapipesNet
:Example:
>>> pandapipes.networks.simple_water_networks.heat_transfer_delta()
"""
return from_json(os.path.join(heat_tranfer_modelica_path, "delta.json")) | 276c691ef5e6cdbb7a8fa7a0ce878d0962cc06a1 | 4,093 |
import os
def extract(file_path, extract_path):
"""
Extract if exists.
Args:
file_path (str): Path of the file to be extracted
extract_path (str): Path to copy the extracted files
Returns:
True if extracted successfully, False otherwise
"""
if (os.path.exists(file_path) and os.path.isfile(file_path)):
with rarfile.RarFile(file_path, 'r') as compressed:
compressed.extractall(extract_path)
compressed.close()
return True
return False | 8887e0796db6980d1b06610e9d95f29faf232c75 | 4,094 |
def parseMidi(midifile):
"""Take a MIDI file and return the list Of Chords and Interval Vectors.
The file is first parsed, midi or xml. Then with chordify and
PC-Set we compute a list of PC-chords and Interval Vectors.
"""
mfile = ms.converter.parse(midifile)
mChords = mfile.chordify()
chordList = []
chordVectors = []
for c in mChords.recurse().getElementsByClass('Chord'):
chordList.append(c.orderedPitchClasses)
chordVectors.append(c.intervalVector)
# print('The number of chords found is : ', len(chordList))
return chordList, chordVectors | 8c803c297eee5cc29a78d6c8b864a85e8bfd3d52 | 4,095 |
def get_similarity(s1, s2):
"""
Return similarity of both strings as a float between 0 and 1
"""
return SM(None, s1, s2).ratio() | 3964670a69a135fbc6837e9c68a2e7ac713d67dc | 4,096 |
from typing import Union
from typing import Sequence
from re import T
def concrete_values_from_iterable(
value: Value, ctx: CanAssignContext
) -> Union[None, Value, Sequence[Value]]:
"""Return the exact values that can be extracted from an iterable.
Three possible return types:
- ``None`` if the argument is not iterable
- A sequence of :class:`Value` if we know the exact types in the iterable
- A single :class:`Value` if we just know that the iterable contains this
value, but not the precise number of them.
Examples:
- ``int`` -> ``None``
- ``tuple[int, str]`` -> ``(int, str)``
- ``tuple[int, ...]`` -> ``int``
"""
if isinstance(value, MultiValuedValue):
subvals = [concrete_values_from_iterable(val, ctx) for val in value.vals]
if any(subval is None for subval in subvals):
return None
value_subvals = [subval for subval in subvals if isinstance(subval, Value)]
seq_subvals = [
subval
for subval in subvals
if subval is not None and not isinstance(subval, Value)
]
if not value_subvals and len(set(map(len, seq_subvals))) == 1:
return [unite_values(*vals) for vals in zip(*seq_subvals)]
return unite_values(*value_subvals, *chain.from_iterable(seq_subvals))
elif isinstance(value, AnnotatedValue):
return concrete_values_from_iterable(value.value, ctx)
value = replace_known_sequence_value(value)
if isinstance(value, SequenceIncompleteValue) and value.typ is tuple:
return value.members
tv_map = IterableValue.can_assign(value, ctx)
if not isinstance(tv_map, CanAssignError):
return tv_map.get(T, UNRESOLVED_VALUE)
return None | 3acdda92df4e27d4eecf39630570b90049580d6d | 4,097 |
import os
def ReadSimInfo(basefilename):
"""
Reads in the information in .siminfo and returns it as a dictionary
"""
filename = basefilename + ".siminfo"
if (os.path.isfile(filename)==False):
print("file not found")
return []
cosmodata = {}
siminfofile = open(filename,"r")
line = siminfofile.readline().strip().split(" : ")
while(line[0]!=""):
cosmodata[line[0]] = float(line[1])
line = siminfofile.readline().strip().split(" : ")
siminfofile.close()
return cosmodata | 74b568d953c155f8998e051a83b35b003377ac26 | 4,098 |
def quat_conjugate(quat_a):
"""Create quatConjugate-node to conjugate a quaternion.
Args:
quat_a (NcNode or NcAttrs or str or list or tuple): Quaternion to
conjugate.
Returns:
NcNode: Instance with quatConjugate-node and output-attribute(s)
Example:
::
Op.quat_conjugate(create_node("decomposeMatrix").outputQuat)
"""
created_node = _create_operation_node("quat_conjugate", quat_a)
return created_node | 2bff8b1e472ad2975ba96084843004ce86205f9f | 4,099 |
Subsets and Splits