content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def author(repo, subset, x):
"""``author(string)``
Alias for ``user(string)``.
"""
# i18n: "author" is a keyword
n = encoding.lower(getstring(x, _("author requires a string")))
return [r for r in subset if n in encoding.lower(repo[r].user())] | ee7bd62d52bd0e36ab910e53ca8e029780f4d6c6 | 15,809 |
def pianoroll_plot_setup(figsize=None, side_piano_ratio=0.025,
faint_pr=True, xlim=None):
"""Makes a tiny piano left of the y-axis and a faint piano on the main figure.
This function sets up the figure for pretty plotting a piano roll. It makes a
small imshow plot to the left of the main plot that looks like a piano. This
piano side plot is aligned along the y-axis of the main plot, such that y
values align with MIDI values (y=0 is the lowest C-1, y=11 is C0, etc).
Additionally, a main figure is set up that shares the y-axis of the piano side
plot. Optionally, a set of faint horizontal lines are drawn on the main figure
that correspond to the black keys on the piano (and a line separating B & C
and E & F). This function returns the formatted figure, the side piano axis,
and the main axis for plotting your data.
By default, this will draw 11 octaves of piano keys along the y-axis; you will
probably want reduce what is visible using `ax.set_ylim()` on either returned
axis.
Using with imshow piano roll data:
A common use case is for using imshow() on the main axis to display a piano
roll alongside the piano side plot AND the faint piano roll behind your
data. In this case, if your data is a 2D array you have to use a masked
numpy array to make certain values invisible on the plot, and therefore make
the faint piano roll visible. Here's an example:
midi = np.flipud([
[0.0, 0.0, 1.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
])
midi_masked = np.ma.masked_values(midi, 0.0) # Mask out all 0.0's
fig, ax, sp = plotting.pianoroll_plot_setup()
ax.imshow(midi_masked, origin='lower', aspect='auto') # main subplot axis
sp.set_ylabel('My favorite MIDI data') # side piano axis
fig.show()
The other option is to use imshow in RGBA mode, where your data is split
into 4 channels. Every alpha value that is 0.0 will be transparent and show
the faint piano roll below your data.
Args:
figsize: Size if the matplotlib figure. Will be passed to `plt.figure()`.
Defaults to None.
side_piano_ratio: Width of the y-axis piano in terms of raio of the whole
figure. Defaults to 1/40th.
faint_pr: Whether to draw faint black & white keys across the main plot.
Defaults to True.
xlim: Tuple containing the min and max of the x values for the main plot.
Only used to determine the x limits for the faint piano roll in the main
plot. Defaults to (0, 1000).
Returns:
(figure, main_axis, left_piano_axis)
figure: A matplotlib figure object containing both subplots set up with an
aligned piano roll.
main_axis: A matplotlib axis object to be used for plotting. Optionally
has a faint piano roll in the background.
left_piano_axis: A matplotlib axis object that has a small, aligned piano
along the left side y-axis of the main_axis subplot.
"""
octaves = 11
# Setup figure and gridspec.
fig = plt.figure(figsize=figsize)
gs_ratio = int(1 / side_piano_ratio)
gs = gridspec.GridSpec(1, 2, width_ratios=[1, gs_ratio])
left_piano_ax = fig.add_subplot(gs[0])
# Make a piano on the left side of the y-axis with imshow().
keys = np.array(
[0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0] # notes in descending order; B -> C
)
keys = np.tile(keys, octaves)[:, None]
left_piano_ax.imshow(keys, cmap='binary', aspect='auto',
extent=[0, 0.625, -0.5, octaves*12-0.5])
# Make the lines between keys.
for i in range(octaves):
left_piano_ax.hlines(i*12 - 0.5, -0.5, 1, colors='black', linewidth=0.5)
left_piano_ax.hlines(i*12 + 1.0, -0.5, 1, colors='black', linewidth=0.5)
left_piano_ax.hlines(i*12 + 3.0, -0.5, 1, colors='black', linewidth=0.5)
left_piano_ax.hlines(i*12 + 4.5, -0.5, 1, colors='black', linewidth=0.5)
left_piano_ax.hlines(i*12 + 6.0, -0.5, 1, colors='black', linewidth=0.5)
left_piano_ax.hlines(i*12 + 8.0, -0.5, 1, colors='black', linewidth=0.5)
left_piano_ax.hlines(i*12 + 10.0, -0.5, 1, colors='black', linewidth=0.5)
# Set the limits of the side piano and remove ticks so it looks nice.
left_piano_ax.set_xlim(0, 0.995)
left_piano_ax.set_xticks([])
# Create the aligned axis we'll return to the user.
main_ax = fig.add_subplot(gs[1], sharey=left_piano_ax)
# Draw a faint piano roll behind the main axes (if the user wants).
if faint_pr:
xlim = (0, 1000) if xlim is None else xlim
x_min, x_max = xlim
x_delta = x_max - x_min
main_ax.imshow(np.tile(keys, x_delta), cmap='binary', aspect='auto',
alpha=0.05, extent=[x_min, x_max, -0.5, octaves*12-0.5])
for i in range(octaves):
main_ax.hlines(i * 12 + 4.5, x_min, x_max, colors='black',
linewidth=0.5, alpha=0.25)
main_ax.hlines(i * 12 - 0.5, x_min, x_max, colors='black',
linewidth=0.5, alpha=0.25)
main_ax.set_xlim(*xlim)
# Some final cosmetic tweaks before returning the axis obj's and figure.
plt.setp(main_ax.get_yticklabels(), visible=False)
gs.tight_layout(fig)
return fig, main_ax, left_piano_ax | dc2a43be63d77ee99230399b687e86c09570db6c | 15,810 |
def exercise(request, exercisename):
"""Show single sport and its totals."""
e = exercisename
cur_user = request.user
exercises = Exercise.objects.filter(owner=cur_user, sport=e).order_by('-date')
context = {'exercises': exercises, 'total': Stats.total(cur_user, sport=e),
'totaltime': Stats.totaltime(cur_user, sport=e)}
return render(request, 'distances/exercises.html', context) | 8648673d6bdb3997d9b9d38155e2cb2039ff4f1b | 15,811 |
import random
def randomBinaryMatrix(scale, type):
"""
Generates a pseudo random BinaryMatrix of a given scale(small,large) and
datatype(int).
"""
if(scale == "small" and type == "int"):
nrow = random.randint(1, 10)
ncol = random.randint(1, 10)
data = []
for i in range(nrow):
data.append([])
for _j in range(ncol):
data[i].append(random.randint(0, 1))
return BinaryMatrix(
nrow=nrow,
ncol=ncol,
data=data
)
if(scale == "large" and type == "int"):
nrow = random.randint(10, 100)
ncol = random.randint(10, 100)
data = []
for i in range(nrow):
data.append([])
for _j in range(ncol):
data[i].append(random.randint(0, 1))
return BinaryMatrix(
nrow=nrow,
ncol=ncol,
data=data
) | 289d266eee4f6244774f7138e9efbe18970545f4 | 15,812 |
from typing import Optional
def load_batch(server_context: ServerContext, assay_id: int, batch_id: int) -> Optional[Batch]:
"""
Loads a batch from the server.
:param server_context: A LabKey server context. See utils.create_server_context.
:param assay_id: The protocol id of the assay from which to load a batch.
:param batch_id:
:return:
"""
load_batch_url = server_context.build_url("assay", "getAssayBatch.api")
loaded_batch = None
payload = {"assayId": assay_id, "batchId": batch_id}
json_body = server_context.make_request(load_batch_url, json=payload)
if json_body is not None:
loaded_batch = Batch(**json_body["batch"])
return loaded_batch | 731d463bc1e0380107390caabae8c57c7e6cff02 | 15,813 |
def canvas_compose(mode, dst, src):
"""Compose two alpha premultiplied images
https://ciechanow.ski/alpha-compositing/
http://ssp.impulsetrain.com/porterduff.html
"""
src_a = src[..., -1:] if len(src.shape) == 3 else src
dst_a = dst[..., -1:] if len(dst.shape) == 3 else dst
if mode == COMPOSE_OVER:
return src + dst * (1 - src_a)
elif mode == COMPOSE_OUT:
return src * (1 - dst_a)
elif mode == COMPOSE_IN:
return src * dst_a
elif mode == COMPOSE_ATOP:
return src * dst_a + dst * (1 - src_a)
elif mode == COMPOSE_XOR:
return src * (1 - dst_a) + dst * (1 - src_a)
elif isinstance(mode, tuple) and len(mode) == 4:
k1, k2, k3, k4 = mode
return (k1 * src * dst + k2 * src + k3 * dst + k4).clip(0, 1)
raise ValueError(f"invalid compose mode: {mode}") | 9d95b840f814a77077050cb43a081c01c496640b | 15,814 |
import select
async def get_timelog_user_id(
*,
user_id: int,
epic_id: int,
month: int,
year: int,
session: Session = Depends(get_session),
):
"""
Get list of timelogs by user_id, month.
Parameters
----------
user_id : str
ID of user from which to pull timelogs.
year_month : int
Month and year from which to pull timelog(s).
session : Session
SQL session that is to be used to get the timelogs.
Defaults to creating a dependency on the running SQL model session.
"""
statement = (
select(
TimeLog.id,
AppUser.username.label("username"),
Epic.name.label("epic_name"),
EpicArea.name.label("epic_area_name"),
TimeLog.start_time,
TimeLog.end_time,
TimeLog.count_hours,
TimeLog.count_days,
)
.join(AppUser)
.join(EpicArea)
.join(Epic)
.where(TimeLog.user_id == user_id)
.where(TimeLog.epic_id == epic_id)
.where(TimeLog.month == month)
.where(TimeLog.year == year)
.order_by(TimeLog.end_time.desc())
)
results = session.exec(statement).all()
return results | fe4bdcbda40c2d32b743262cb14139e89890b237 | 15,815 |
def _cross(
vec1,
vec2,
):
"""Cross product between vec1 and vec2 in R^3"""
vec3 = np.zeros((3,))
vec3[0] = +(vec1[1] * vec2[2] - vec1[2] * vec2[1])
vec3[1] = -(vec1[0] * vec2[2] - vec1[2] * vec2[0])
vec3[2] = +(vec1[0] * vec2[1] - vec1[1] * vec2[0])
return vec3 | 2958a7365908bbd38c75f79e489d136f21fcc011 | 15,816 |
def _simplex_dot3D(g, x, y, z):
""" 3D dot product """
return g[0] * x + g[1] * y + g[2] * z | fcc48153b34af7cef0811f21fc04d22e6536797a | 15,817 |
import inspect
def __get_report_failures(test_data: TestData) -> str:
"""
Gets test report with all failed test soft asserts
:param test_data: test data from yaml file
:return: str test report with all soft asserts
"""
test_id = __get_test_id()
failed_assert_reports = __FAILED_EXPECTATIONS.get(test_id)
meta_info = inspect.stack()[2][1:4]
expectation_report = ExpectationReport(test_data.test_name, failed_assert_reports, meta_info)
return expectation_report.get_report_message() | cedb24569acedf9bd251c233f220c44a1bb05772 | 15,818 |
def initialized_sm(registrations, uninitialized_sm):
""" The equivalent of an app with commit """
uninitialized_sm.initialize()
return uninitialized_sm | 491e4366b81379b053d1dea203d338766c3afa86 | 15,819 |
def coordinateToIndex(coordinate):
"""Return a raw index (e.g [4, 4]) from board coordinate (e.g. e4)"""
return [abs(int(coordinate[1]) - 8), ("a", "b", "c", "d", "e", "f", "g", "h").index(coordinate[0])] | d3dcf6d01c4bec2058cffef88867d45ba51ea560 | 15,821 |
import re
import logging
def parse_page(url):
"""parge the page and get all the links of images, max number is 100 due to limit by google
Args:
url (str): url of the page
Returns:
A set containing the urls of images
"""
page_content = download_page(url)
if page_content:
link_list = re.findall('src="(.*?)"', page_content)
if len(link_list) == 0:
print('get 0 links from page {0}'.format(url))
logging.info('get 0 links from page {0}'.format(url))
return set()
else:
return set(link_list)
else:
return set() | 5833e0092650488e8ef430de0eafd79f6e5d2ffa | 15,822 |
def json_page_resp(name, page, paginator):
"""
Returns a standardized page response
"""
page_rows = paginator.get_page(page)
return JsonResponse({'page':page, 'pages':paginator.num_pages, name:[x['json'] for x in page_rows], 'size':len(page_rows)}, safe=False) | d615cfeaa2fafdb35333eee6aa6d63e1511a1dd3 | 15,824 |
from .interpreters import ScriptRunnerPlugin
def get_script_runner():
"""
Gets the script runner plugin instance if any otherwise returns None.
:rtype: hackedit.api.interpreters.ScriptRunnerPlugin
"""
return _window().get_plugin_instance(ScriptRunnerPlugin) | 2f76f46dd502fbd6ce9bd0a5f90eb7eed8bb64ca | 15,825 |
def latest_version():
"""
Returns the latest version, as specified by the Git tags.
"""
versions = []
for t in tags():
assert t == t.strip()
parts = t.split(".")
assert len(parts) == 3, t
parts[0] = parts[0].lstrip("v")
v = tuple(map(int, parts))
versions.append((v, t))
_, latest = max(versions)
assert latest in tags()
return latest | 346edcc6d087ca1511411b52de20b90f1a993f3a | 15,827 |
import string
def get_age_group(df,n: int=10):
"""Assigns a category to the age DR
Parameters
----------
df : Dataframe
n : number of categories
Returns
-------
Dataset with Age_group column
"""
df["Age_group"] = pd.cut(df["Age"], n, labels = list(string.ascii_uppercase)[:n])
return df | f793316c7c494adec1bfedf8613edf6c4ed5e2e2 | 15,828 |
def transformer_encoder_layer(query_input,
key_input,
attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd="n",
postprocess_cmd="da",
param_initializer=None,
name=''):
"""The encoder layers that can be stacked to form a deep encoder.
This module consits of a multi-head (self) attention followed by
position-wise feed-forward networks and both the two components companied
with the post_process_layer to add residual connection, layer normalization
and droput.
"""
key_input = pre_process_layer(
key_input,
preprocess_cmd,
prepostprocess_dropout,
name=name + '_pre_att') if key_input else None
value_input = key_input if key_input else None
attn_output = multi_head_attention(
pre_process_layer(
query_input,
preprocess_cmd,
prepostprocess_dropout,
name=name + '_pre_att'),
key_input,
value_input,
attn_bias,
d_key,
d_value,
d_model,
n_head,
attention_dropout,
param_initializer=param_initializer,
name=name + '_multi_head_att')
attn_output = post_process_layer(
query_input,
attn_output,
postprocess_cmd,
prepostprocess_dropout,
name=name + '_post_att')
ffd_output = positionwise_feed_forward(
pre_process_layer(
attn_output,
preprocess_cmd,
prepostprocess_dropout,
name=name + '_pre_ffn'),
d_inner_hid,
d_model,
relu_dropout,
hidden_act,
param_initializer=param_initializer,
name=name + '_ffn')
return post_process_layer(
attn_output,
ffd_output,
postprocess_cmd,
prepostprocess_dropout,
name=name + '_post_ffn') | fe300a8c72c39c7e847f400f8f874dabab80b6e6 | 15,829 |
def generate(ode, lenght=int(2e4)):
"""
Time series generation from a ODE
:param ode: ODE object;
:param lenght: serie lenght;
:return: time serie.
"""
state = ode.initial_state
data = np.zeros([int(state.shape[0]), lenght])
for i in range(5000):
state = runge_kutta(ode, state)
for i in range(lenght):
state = runge_kutta(ode, state)
data[:, i] = state
return data | 442f5359e3225d00cf0396e710e3978d1a6e37f8 | 15,830 |
def complexFormatToRealImag(complexVec):
"""
A reformatting function which converts a complex vector into real valued array.
Let the values in the input array be [r1+j*i1,r 2+j*i2,..., rN+j*iN]
then the output array will be [r1, i1, r2, i2,..., rN, iN]
:param complexVec: complex numpy ndarray
:return: returns a 1D numpy array of a length N containing complex numbers.
"""
N = len(complexVec)
ret = np.empty((2*N,), dtype=np.real(complexVec).dtype)
ret[0::2] = complexVec.real
ret[1::2] = complexVec.imag
return ret | d955f2b31581036594ca79cd4755327eaa8b2446 | 15,831 |
def displayaction(uid):
""" Display the command from the xml file
"""
tree = ET.parse(OPENSTRIATOFILE)
root = tree.getroot()
textaction = root.findall("./action[@uid='"+uid+"']")
if len(textaction) == 0:
return "This UID does not exist!"
else:
return "UID %s action: %s" % (uid, textaction[0].text) | e34133a168b20cc9b018175ee5b4363fd2ff9690 | 15,832 |
def get_parent(inst, rel_type='cloudify.relationships.contained_in'):
"""
Gets the parent of an instance
:param `cloudify.context.NodeInstanceContext` inst: Cloudify instance
:param string rel_type: Relationship type
:returns: Parent context
:rtype: :class:`cloudify.context.RelationshipSubjectContext` or None
"""
for rel in inst.relationships:
if rel_type in rel.type_hierarchy:
return rel.target
return None | 06bc76ec55735a47a3cf26df2daa4346290671ee | 15,833 |
def _query_param(key, value):
"""ensure that a query parameter's value is a string
of bytes in UTF-8 encoding.
"""
if isinstance(value, unicode):
pass
elif isinstance(value, str):
value = value.decode('utf-8')
else:
value = unicode(value)
return key, value.encode('utf-8') | 9c89517afd8d1684b1bb954f66cd2072296dee82 | 15,834 |
def _create_or_get_dragonnet(embedding, is_training, treatment, outcome, split, getter=None):
"""
Make predictions for the outcome, using the treatment and embedding,
and predictions for the treatment, using the embedding
Both outcome and treatment are assumed to be binary
Note that we return the loss as a sum (and not a mean). This makes more sense for training dynamics
Parameters
----------
bert
is_training
treatment
outcome
label_dict
split
getter custom getter, for polyak averaging support
Returns
-------
"""
treatment_float = tf.cast(treatment, tf.float32)
with tf.variable_scope('dragon_net', reuse=tf.AUTO_REUSE, custom_getter=getter):
with tf.variable_scope('treatment'):
loss_t, per_example_loss_t, logits_t, expectation_t = _make_feedforward_classifier(
embedding, treatment, 2, split, num_hidden_layers=2)
with tf.variable_scope('outcome_st_treatment'):
loss_ot1, per_example_loss_ot1, logits_ot1, expectation_ot1 = _make_feedforward_classifier(
embedding, outcome, 2, split=split*treatment_float, num_hidden_layers=0)
with tf.variable_scope('outcome_st_no_treatment'):
loss_ot0, per_example_loss_ot0, logits_ot0, expectation_ot0 = _make_feedforward_classifier(
embedding, outcome, 2, split=split*(1.-treatment_float), num_hidden_layers=0)
tf.losses.add_loss(loss_ot0)
tf.losses.add_loss(loss_ot1)
tf.losses.add_loss(loss_t)
training_loss = loss_ot0 + loss_ot1 + loss_t
training_loss = training_loss
outcome_st_treat = {'per_example_loss': per_example_loss_ot1,
'logits': logits_ot1,
'expectations': expectation_ot1}
outcome_st_no_treat = {'per_example_loss': per_example_loss_ot0,
'logits': logits_ot0,
'expectations': expectation_ot0}
treat = {'per_example_loss': per_example_loss_t,
'logits': logits_t,
'expectations': expectation_t}
return training_loss, outcome_st_treat, outcome_st_no_treat, treat | 7fc7fead338ac2c33bcfa016f9d66e34d15ac59c | 15,835 |
def fit_ols(Y, X):
"""Fit OLS model to both Y and X"""
model = sm.OLS(Y, X)
model = model.fit()
return model | dcc86cab7fe15400130febd36d5aa8139a68c64f | 15,836 |
def compute_modularity_per_code(mutual_information):
"""Computes the modularity from mutual information."""
# Mutual information has shape [num_codes, num_factors].
squared_mi = np.square(mutual_information)
max_squared_mi = np.max(squared_mi, axis=1)
numerator = np.sum(squared_mi, axis=1) - max_squared_mi
denominator = max_squared_mi * (squared_mi.shape[1] - 1.)
delta = numerator / denominator
modularity_score = 1. - delta
index = (max_squared_mi == 0.)
modularity_score[index] = 0.
return modularity_score | 5c81b583c6313818da435dd367a3d53933025227 | 15,837 |
import logging
def post_new_attending():
"""Posts attending physician information to the server
This method generates the new attending physician’s
dictionary with all of his/her information, then validates
that all of the information is the correct type. If the
validation stage is satisfied, then the attending’s
dictionary is added to the database.
Parameters
----------
N/A
Returns
-------
String
result of adding a new attending
"""
new_dict = request.get_json()
validate = validate_new_attending(new_dict)
if validate is not True:
return validate, 400
attending = add_new_attending(new_dict["attending_username"],
new_dict["attending_email"],
new_dict["attending_phone"])
if attending is True:
logging.info("New Attending Physician Added!")
logging.info("Physician User Name: {}".format(
new_dict["attending_username"]))
logging.info("Physician Email: {}".format(
new_dict["attending_email"]))
return "New Attending Physician Successfully Added", 200
else:
return "Failed to Add New Attending Physician", 400 | 18ddcb3bfcc601a22abccac7828ed6ac36368a33 | 15,839 |
def submitFeatureWeightedGridStatistics(geoType, dataSetURI, varID, startTime, endTime, attribute, value,
gmlIDs, verbose, coverage, delim, stat, grpby, timeStep, summAttr,
weighted, wfs_url, outputfname, sleepSecs, async=False):
"""
Makes a featureWeightedGridStatistics algorithm call.
The web service interface implemented is summarized here:
https://my.usgs.gov/confluence/display/GeoDataPortal/Generating+Area+Weighted+Statistics+Of+A+Gridded+Dataset+For+A+Set+Of+Vector+Polygon+Features
Note that varID and stat can be a list of strings.
"""
# test for dods:
dataSetURI = dodsReplace(dataSetURI)
log.info('Generating feature collection.')
featureCollection = _getFeatureCollectionGeoType(geoType, attribute, value, gmlIDs, wfs_url)
if featureCollection is None:
return
processid = 'gov.usgs.cida.gdp.wps.algorithm.FeatureWeightedGridStatisticsAlgorithm'
if not weighted:
processid = 'gov.usgs.cida.gdp.wps.algorithm.FeatureGridStatisticsAlgorithm'
solo_inputs = [("FEATURE_ATTRIBUTE_NAME", attribute),
("DATASET_URI", dataSetURI),
("TIME_START", startTime),
("TIME_END", endTime),
("REQUIRE_FULL_COVERAGE", str(coverage).lower()),
("DELIMITER", delim),
("GROUP_BY", grpby),
("SUMMARIZE_TIMESTEP", str(timeStep).lower()),
("SUMMARIZE_FEATURE_ATTRIBUTE", str(summAttr).lower()),
("FEATURE_COLLECTION", featureCollection)]
if isinstance(stat, list):
num_stats = len(stat)
if num_stats > 7:
raise Exception('Too many statistics were submitted.')
else:
num_stats = 1
if isinstance(varID, list):
num_varids = len(varID)
else:
num_varids = 1
inputs = [('', '')] * (len(solo_inputs) + num_varids + num_stats)
count = 0
rm_cnt = 0
for solo_input in solo_inputs:
if solo_input[1] is not None:
inputs[count] = solo_input
count += 1
else:
rm_cnt += 1
del inputs[count:count + rm_cnt]
if num_stats > 1:
for stat_in in stat:
if stat_in not in ["MEAN", "MINIMUM", "MAXIMUM", "VARIANCE", "STD_DEV", "SUM", "COUNT"]:
raise Exception('The statistic {} is not in the allowed list: "MEAN", "MINIMUM", "MAXIMUM", ' +
'"VARIANCE", "STD_DEV", "SUM", "COUNT"'.format(stat_in))
inputs[count] = ("STATISTICS", stat_in)
count += 1
elif num_stats == 1:
if stat not in ["MEAN", "MINIMUM", "MAXIMUM", "VARIANCE", "STD_DEV", "SUM", "COUNT"]:
raise Exception('The statistic {} is not in the allowed list: "MEAN", "MINIMUM", "MAXIMUM", ' +
'"VARIANCE", "STD_DEV", "SUM", "COUNT"'.format(stat))
inputs[count] = ("STATISTICS", stat)
count += 1
if num_varids > 1:
for var in varID:
inputs[count] = ("DATASET_ID", var)
count += 1
elif num_varids == 1:
inputs[count] = ("DATASET_ID", varID)
output = "OUTPUT"
return _executeRequest(processid, inputs, output, verbose, outputfname, sleepSecs, async=async) | ab6f1cbeee1943f75aa16c9153d2a317113d2398 | 15,840 |
def fetch_words(url):
"""
Fetch a list of words from a URL
Args:
url: the url of any text document (no decoding to utf-8 added)
Returns:
A list of strings containing the words in the document
"""
with urlopen(url) as story:
story_words = []
for line in story:
line_words = line.split()
for word in line_words:
story_words.append(word)
return story_words | 6679425f5f3680bd0b47888d59530a55b4c23443 | 15,841 |
def generate_fgsm_examples(sess, model, x, y, X, Y, attack_params, verbose, attack_log_fpath):
"""
Untargeted attack. Y is not needed.
"""
fgsm = FastGradientMethod(model, back='tf', sess=sess)
fgsm_params = {'eps': 0.1, 'ord': np.inf, 'y': None, 'clip_min': 0, 'clip_max': 1}
fgsm_params = override_params(fgsm_params, attack_params)
X_adv = fgsm.generate_np(X, **fgsm_params)
return X_adv | 7b682622f843dd2c5d421c3d52b15e3a204edb0a | 15,842 |
def s3_bucket_for(bucket_prefix, path):
"""returns s3 bucket for path"""
suffix = s3_bucket_suffix_for(path)
return "{}-{}".format(bucket_prefix, suffix) | a59145474d2965a9e5f98d4728a6ac90d0d42cdf | 15,843 |
def regrid_create_operator(regrid, name, parameters):
"""Create a new `RegridOperator` instance.
:Parameters:
regrid: `ESMF.Regrid`
The `ESMF` regridding operator between two fields.
name: `str`
A descriptive name for the operator.
parameters: `dict`
Parameters that describe the complete coordinate system of
the destination grid.
:Returns:
`RegridOperator`
The new regrid operator.
"""
return RegridOperator(regrid, name, **parameters) | 1c44dbe1c4826ee566cfb6b95ac704c9af19fc30 | 15,844 |
def _decode_hmc_values(hmc_ref):
"""Decrypts any sensitive HMC values that were encrypted in the DB"""
if hmc_ref is not None:
hmc_ref = jsonutils.to_primitive(hmc_ref)
#Make sure to DeCrypt the Password after retrieving from the database
## del two lines by lixx
#if hmc_ref.get('password') is not None:
# hmc_ref['password'] = EncryptHandler().decode(hmc_ref['password'])
return hmc_ref | 7e1b33265811d79f245853cb016e5acd45627028 | 15,845 |
import logging
def dtensor_shutdown_tpu_system():
"""Shutdown TPU system."""
@def_function.function
def _shutdown_tpu_system():
return gen_dtensor_ops.shutdown_tpu_system()
success = _shutdown_tpu_system() if context.is_tfrt_enabled() else True
if success:
logging.info("TPU system shut down.")
else:
logging.warning("TPU system fails to shut down.") | 23140407222646fd9adb845ae5e04ca4a3a9cc5a | 15,846 |
import json
import math
def edit_comment(request):
"""
Edit an existing comment
"""
response = {"status": "success",
"data": {}}
if "char_id" in request.POST:
char_id = request.POST["char_id"]
else:
response["status"] = "fail"
response["data"]["message"] = "Paste ID was not provided (POST parameter 'char_id')"
return HttpResponse(json.dumps(response), status=422)
try:
paste = Paste.objects.get(char_id=char_id)
except ObjectDoesNotExist:
response["status"] = "fail"
response["data"]["message"] = "The paste couldn't be found."
return HttpResponse(json.dumps(response))
if "id" in request.POST:
id = int(request.POST["id"])
else:
response["status"] = "fail"
response["data"]["message"] = "Comment ID was not provided (POST parameter 'id')"
return HttpResponse(json.dumps(response), status=422)
if "page" in request.POST:
page = int(request.POST["page"])
else:
page = 0
if not request.user.is_authenticated():
response["status"] = "fail"
response["data"]["message"] = "You are not logged in."
return HttpResponse(json.dumps(response), status=422)
try:
comment = Comment.objects.get(id=id)
except ObjectDoesNotExist:
response["status"] = "fail"
response["data"]["message"] = "The comment doesn't exist."
return HttpResponse(json.dumps(response), status=400)
if comment.user != request.user:
response["status"] = "fail"
response["data"]["message"] = "You are trying to edit someone else's comment."
return HttpResponse(json.dumps(response), status=422)
submit_form = SubmitCommentForm(request.POST or None)
if submit_form.is_valid():
comment_data = submit_form.cleaned_data
comment.text = comment_data["text"]
comment.save()
total_comment_count = Comment.objects.filter(paste=paste).count()
start = page * Comment.COMMENTS_PER_PAGE
end = start + Comment.COMMENTS_PER_PAGE
response["data"]["edited_comment_id"] = comment.id
response["data"]["comments"] = queryset_to_list(Comment.objects.filter(paste=paste) \
.select_related("user") \
[start:end],
fields=["id", "text", "submitted", "edited", "user__username=username"])
response["data"]["page"] = page
response["data"]["pages"] = math.ceil(float(total_comment_count) / float(Comment.COMMENTS_PER_PAGE))
if response["data"]["pages"] == 0:
response["data"]["pages"] = 1
response["data"]["total_comment_count"] = total_comment_count
else:
response["status"] = "fail"
response["data"]["message"] = "Provided text wasn't valid."
return HttpResponse(json.dumps(response)) | b56f0b3f3c0d0635b4faa9a06320bc4b715ea0d1 | 15,847 |
import torch
def make_positions(tensor, padding_idx, onnx_trace=False):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1. Padding symbols are ignored.
"""
# The series of casts and type-conversions here are carefully
# balanced to both work with ONNX export and XLA. In particular XLA
# prefers ints, cumsum defaults to output longs, and ONNX doesn't know
# how to handle the dtype kwarg in cumsum.
mask = tensor.ne(padding_idx).int()
return (
(torch.cumsum(mask, dim=1) - 1).type_as(mask) * mask
).long() | e5d117d64669f514b5cab4ad08ec526dd421493e | 15,849 |
import warnings
def taubin_curv(coords, resolution):
"""Curvature calculation based on algebraic circle fit by Taubin.
Adapted from: "https://github.com/PmagPy/PmagPy/blob/2efd4a92ddc19c26b953faaa5c08e3d8ebd305c9/SPD/lib
/lib_curvature.py"
G. Taubin, "Estimation Of Planar Curves, Surfaces And Nonplanar
Space Curves Defined By Implicit Equations, With
Applications To Edge And Range Image Segmentation",
IEEE Trans. PAMI, Vol. 13, pages 1115-1138, (1991)
Parameters
----------
coords : list
Nested list of paired x and y coordinates for each point of the line where a curve needs to be fited.
[[x_1, y_1], [x_2, y_2], ....]
resolution : float or int
Number of pixels per mm in original image.
Returns
-------
float or int(0)
If the radius of the fitted circle is finite, it will return the curvature (1/radius).
If the radius is infinite, it will return 0.
"""
warnings.filterwarnings("ignore") # suppress RuntimeWarnings from dividing by zero
xy = np.array(coords)
x = xy[:, 0] - np.mean(xy[:, 0]) # norming points by x avg
y = xy[:, 1] - np.mean(xy[:, 1]) # norming points by y avg
# centroid = [np.mean(xy[:, 0]), np.mean(xy[:, 1])]
z = x * x + y * y
zmean = np.mean(z)
z0 = ((z - zmean) / (2. * np.sqrt(zmean))) # changed from using old_div to Python 3 native division
zxy = np.array([z0, x, y]).T
u, s, v = np.linalg.svd(zxy, full_matrices=False) #
v = v.transpose()
a = v[:, 2]
a[0] = (a[0]) / (2. * np.sqrt(zmean))
a = np.concatenate([a, [(-1. * zmean * a[0])]], axis=0)
# a, b = (-1 * a[1:3]) / a[0] / 2 + centroid
r = np.sqrt(a[1] * a[1] + a[2] * a[2] - 4 * a[0] * a[3]) / abs(a[0]) / 2
if np.isfinite(r):
curv = 1 / (r / resolution)
if curv >= 0.00001:
return curv
else:
return 0
else:
return 0 | f3728528dbec5681b3915683af22b8e9838e73ce | 15,850 |
import hmac
def calculate_mac(mac_type, credentials, options, url_encode=False):
"""Calculates a message authentication code (MAC)."""
normalized = normalize_string(mac_type, options)
digestmod = module_for_algorithm(credentials['algorithm'])
result = hmac.new(credentials['key'], normalized, digestmod)
if url_encode:
mac = urlsafe_b64encode(result.digest())
else:
mac = b64encode(result.digest())
return mac | 0701dbe3881ab500a70f3895af64d2ca6cb2905d | 15,852 |
import math
def run():
"""
Test Case - Fbx mesh group Import scaling in Atom:
1. Creates a new level called MeshScalingTemporaryLevel
2. Has a list of 12 meshes, which it will do the following for each one:
- Create an entity and attach the mesh to it.
- Sets it with an initial offset of x:-15, y:0, z:0
- For each additional mesh the x offset is modified by +3.0
3. Enters game mode to take a screenshot for comparison, then exits game mode.
4. Prints general.log("FBX mesh group scaling test has completed.")
5. Exit the Editor and ends the test.
Tests will fail immediately if any of these log lines are found:
1. Trace::Assert
2. Trace::Error
3. Traceback (most recent call last):
:return: None
"""
def after_level_load():
"""Function to call after creating/opening a level to ensure it loads."""
# Give everything a second to initialize.
general.idle_enable(True)
general.update_viewport()
general.idle_wait(0.5) # half a second is more than enough for updating the viewport.
# Close out problematic windows, FPS meters, and anti-aliasing.
if general.is_helpers_shown(): # Turn off the helper gizmos if visible
general.toggle_helpers()
if general.is_pane_visible("Error Report"): # Close Error Report windows that block focus.
general.close_pane("Error Report")
if general.is_pane_visible("Error Log"): # Close Error Log windows that block focus.
general.close_pane("Error Log")
general.run_console("r_displayInfo=0")
general.run_console("r_antialiasingmode=0")
return True
# Create a new test level
test_level_name = 'MeshGroupingTemporaryLevel'
heightmap_resolution = 128
heightmap_meters_per_pixel = 1
terrain_texture_resolution = 128
use_terrain = False
# Return codes are ECreateLevelResult defined in CryEdit.h
return_code = general.create_level_no_prompt(
test_level_name, heightmap_resolution, heightmap_meters_per_pixel, terrain_texture_resolution, use_terrain)
if return_code == 1:
general.log(f"{test_level_name} level already exists")
elif return_code == 2:
general.log("Failed to create directory")
elif return_code == 3:
general.log("Directory length is too long")
elif return_code != 0:
general.log("Unknown error, failed to create level")
else:
general.log(f"{test_level_name} level created successfully")
after_level_load()
helper.init_idle()
helper.open_level(test_level_name)
general.idle_wait_frames(1)
# These are the meshes that are used to test FBX mesh import scaling.
meshes = [
"cube_group.azmodel",
"cube_parent.azmodel",
"cube_parent_plus_locator.azmodel",
"cube_parent_plus_locator_rotatez_90.azmodel",
"cube_parent__rotatez_90_locator.azmodel",
"cube_parent__scaley_2_locator.azmodel",
"cube_parent__transx_100_locator.azmodel"
]
# Initial offset values to iterate off of for mesh scaling of meshes.
offset = math.Vector3()
offset.x = -15.0
offset.y = 0.0
offset.z = 0.0
# For each mesh, create an entity and attach the mesh to it, then scale it using the values in offset.
meshIndex = 0
for mesh in meshes:
meshIndex = meshIndex + 1
offset.x += 3.0
entityName = "TestEntity{}".format(meshIndex)
helper_create_entity_with_mesh("dag_hierarchy/" + mesh, offset, entityName)
helper.enter_game_mode(["", ""])
# Example: how to capture a screenshot
general.set_viewport_size(1280, 720)
general.set_cvar_integer('r_DisplayInfo', 0)
general.idle_wait_frames(1)
ScreenshotHelper(general.idle_wait_frames).capture_screenshot_blocking(
"screenshot_atom_FBXMeshGroupImportScaling.dds")
helper.exit_game_mode(["", ""])
general.log("FBX mesh group scaling test has completed.")
helper.close_editor() | fb7c5194d755e277e14f778c6fe52f9c5d1a36be | 15,853 |
def get_prime(num_dict):
"""获取字典里所有的素数"""
prime_dict = {}
for key, value in num_dict.items():
if value:
prime_dict.update({key: key})
return prime_dict | 49c62ae43bfe5af15f191cd8d831e82ae56c766d | 15,854 |
def get_shared_keys(param_list):
"""
For the given list of parameter dictionaries, return a list of the dictionary
keys that appear in every parameter dictionary
>>> get_shared_keys([{'a':0, 'b':1, 'c':2, 'd':3}, {'a':0, 'b':1, 'c':3}, {'a':0, 'b':'beta'}])
['a', 'b']
>>> get_shared_keys([{'a':0, 'd':3}, {'a':0, 'b':1, 'c':2, 'd':3}, {'a':0, 'b':1, 'c':2}])
['a']
"""
if not param_list:
return
keys = set(param_list[0].keys())
for i in range(1, len(param_list)):
keys = keys.intersection(param_list[i].keys())
keys = list(keys)
keys.sort()
return keys | 0f6aa0df4d61ba166ac7d660be80a98fdbc29080 | 15,855 |
def labeledTest(*labels):
"""This decorator mark a class as an integrationTest
this is used in the test call for filtering integrationTest
and unittest.
We mark the difference by the usage of service dependency:
* An unittest can run without additional services.
* An integration test need additional services (such as
redis or postgres).
Usage:
@labeledTest("integration")
class FakeOutputTest(BaseApiTest):
pass
"""
def wrapper(cl):
cl._label = set(labels)
return cl
return wrapper | 4cb5adab516b19517066104d547d8efb0ae90cbd | 15,856 |
def birth(sim):
"""Similar to create agent, but just one individual"""
age = 0
qualification = int(sim.seed.gammavariate(3, 3))
qualification = [qualification if qualification < 21 else 20][0]
money = sim.seed.randrange(20, 40)
month = sim.seed.randrange(1, 13, 1)
gender = sim.seed.choice(['Male', 'Female'])
sim.total_pop += 1
a = Agent((sim.total_pop - 1), gender, age, qualification, money, month)
return a | c44323bb36b5807e4b25a12bb739150bd70e1b98 | 15,857 |
def offer_better_greeting():
"""Give player optional compliments."""
player = request.args["person"]
# if they didn't tick box, `wants_compliments` won't be
# in query args -- so let's use safe `.get()` method of
# dict-like things
wants = request.args.get("wants_compliments")
nice_things = sample(COMPLIMENTS, 3) if wants else []
return render_template("compliments.html",
compliments=nice_things,
name=player) | 9f65f9a1169262020f6ec227d44e0160a904f00f | 15,858 |
def get_trip_length(grouped_counts):
"""
Gets the frequency of the length of a trip for a customer
Args:
grouped_counts (Pandas.DataFrame): The grouped dataframe returned from
a get_trips method call
Returns:
Pandas.DataFrame: the dataframe containing the frequencies for each
trip length (in days)
"""
return frequency(grouped_counts, 0) | 974bb0fc7f0430d0e6605857dba22f7b036e3945 | 15,859 |
def extract_begin_end(data):
""" Finds nif:beginIndex and nif:endIndex values.
:param data: Data sent by the client.
:return: Begin index and end index, -1 if error.
"""
try:
begin = data.split("nif:beginIndex")[1].split("\"")[1]
end = data.split("nif:endIndex")[1].split("\"")[1]
return int(begin), int(end)
except IndexError:
return -1, -1 | d5f5ce211f645f10d6a0aed1c6446963f0c3fe3e | 15,860 |
def setup_parameters():
"""
Helper routine to fill in all relevant parameters
Note that this file will be used for all versions of SDC, containing more than necessary for each individual run
Returns:
description (dict)
controller_params (dict)
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1E-08
level_params['dt'] = 1E-02
level_params['nsweeps'] = [3, 1]
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['collocation_class'] = CollGaussRadau_Right
sweeper_params['num_nodes'] = [3]
sweeper_params['QI'] = ['LU']
sweeper_params['QE'] = ['EE']
sweeper_params['initial_guess'] = 'zero'
# This comes as read-in for the problem class
problem_params = dict()
problem_params['nu'] = 2
problem_params['L'] = 1.0
problem_params['nvars'] = [(256, 256), (64, 64)]
problem_params['eps'] = [0.04, 0.16]
problem_params['radius'] = 0.25
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 20
controller_params['hook_class'] = monitor
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = None # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = None # pass sweeper (see part B)
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = mesh_to_mesh_fft2d
return description, controller_params | 6cadf729f8f796c6b07c94bf8b74913e6a893799 | 15,861 |
def get_operation(op, inplanes, outplanes, stride, conv_type):
"""Set up conv and pool operations."""
kernel_size = Ops.ops_to_kernel_size[op]
padding = [(k - 1) // 2 for k in kernel_size]
if op in Ops.pooling_ops:
if inplanes == outplanes:
return nn.AvgPool2d(kernel_size, stride=stride, padding=padding)
else:
return nn.Sequential(nn.Conv2d(inplanes, outplanes, 1, 1, 0),
nn.AvgPool2d(kernel_size, stride=stride, padding=padding))
else:
if conv_type == 'depthwise_separable':
return depthwise_separable_conv_general(inplanes, outplanes, stride, kernel_size, padding)
else:
return nn.Conv2d(inplanes, outplanes, kernel_size, stride, padding=padding) | f561db9c230236f3ead248e04cae198dbd9d4415 | 15,862 |
def encode(
structure_klifs_ids, fingerprints_filepath=None, local_klifs_download_path=None, n_cores=1
):
"""
Encode structures.
Parameters
----------
structure_klifs_ids : list of int
Structure KLIFS IDs.
fingerprints_filepath : str or pathlib.Path
Path to output json file. Default None.
local_klifs_download_path : str or None
If path to local KLIFS download is given, set up local KLIFS session.
If None is given, set up remote KLIFS session.
n_cores : int
Number of cores used to generate fingerprints.
Returns
-------
kissim.encoding.FingerprintGenerator
Fingerprints.
"""
# Set up KLIFS session
klifs_session = _setup_klifs_session(local_klifs_download_path)
# Generate fingerprints
fingerprints = FingerprintGenerator.from_structure_klifs_ids(
structure_klifs_ids, klifs_session, n_cores
)
# Optionally: Save fingerprints to json file
if fingerprints_filepath:
logger.info(f"Write fingerprints to file: {fingerprints_filepath}")
fingerprints.to_json(fingerprints_filepath)
return fingerprints | 7b5d3400455bdffc25e88cc07f58292f56ac6e12 | 15,863 |
def log_like_repressed(params, data_rep):
"""Conv wrapper for log likelihood for 2-state promoter w/
transcription bursts and repression.
data_rep: a list of arrays, each of which is n x 2, of form
data[:, 0] = SORTED unique mRNA counts
data[:, 1] = frequency of each mRNA count
Note the data pre-processing here, credit to Manuel for this observation:
'The likelihood asks for unique mRNA entries and their corresponding
counts to speed up the process of computing the probability distribution.
Instead of computing the probability of 3 mRNAs n times, it computes it
once and multiplies the value by n.'
This also reduces the size of the data arrays by ~10-fold,
which reduces the time penalty of emcee's pickling
to share the data within the multiprocessing Pool.
"""
# kR_list contains, in order, kRon_0p5, kRon_1, kRon_2, kRon_10, kRoff
k_burst, mean_burst, *kR_list = params
params_local = np.array([k_burst, mean_burst, 0, kR_list[-1]])
target = 0
for i, expt in enumerate(data_rep):
max_m = expt[0].max()
# kRoff is never plugged in below b/c loop terminates first
params_local[2] = kR_list[i]
# note log_probs contains values for ALL m < max_m,
# not just those in the data set...
log_probs = srep.models.log_prob_m_bursty_rep(max_m, *params_local)
# ...so extract just the ones we want & * by their occurence
target += np.sum(expt[1] * log_probs[expt[0]])
return target | 8451de8f1c578c8343bd1c91d1dc0326b51cc5a3 | 15,864 |
def freq_mask(spec, F=30, num_masks=1, pad_value=0.):
"""Frequency masking
Args:
spec (torch.Tensor): input tensor of shape `(dim, T)`
F (int): maximum width of each mask
num_masks (int): number of masks
pad_value (float): value for padding
Returns:
freq masked tensor (torch.Tensor): output tensor of shape `(dim, T)`
"""
cloned = spec.clone()
num_mel_channels = cloned.size(0)
for i in range(num_masks):
f = np.random.randint(0, F + 1)
f_zero = np.random.randint(0, num_mel_channels - f + 1)
if f == 0:
continue
cloned[f_zero:f_zero + f] = pad_value
return cloned | 714dac7127e4dd1e790df016296321f97cfe37c7 | 15,865 |
def prepare_file_hierarchy(path):
"""
Create a temporary folder structure like the following:
test_find_dotenv0/
└── child1
├── child2
│ └── child3
│ └── child4
└── .env
Then try to automatically `find_dotenv` starting in `child4`
"""
curr_dir = path
dirs = []
for f in ['child1', 'child2', 'child3', 'child4']:
curr_dir /= f
dirs.append(curr_dir)
curr_dir.mkdir()
return (dirs[0], dirs[-1]) | 25b66a7bc728f8f4b90cd9d8e678c914d2d60be9 | 15,866 |
def cmd2dict(cmd):
"""Returns a dictionary of what to replace each value by."""
pixel_count = cmd[cmd.shape[0] - 1, cmd.shape[1] - 1]
scaling_dict = dict()
for i in range(0, cmd.shape[0]):
scaling_dict[cmd[i, 0]] = round(
((cmd[i, 1] - cmd[0, 1]) / (pixel_count - cmd[0, 1])) * 255
)
return scaling_dict | 17f28fdcc5497c7d8d6aa55bbc61460e988586eb | 15,867 |
def cached_part(query, cache=None):
"""Get cached part of the query.
Use either supplied cache object or global cache object (default).
In the process, query is into two parts: the beginning of the query
and the remainder. Function tries to find longest possible beginning of the query
which is cached, then returns the cached state and the remainder of the query.
(query == state.query + "/" + remainder)
"""
if cache is None:
cache = get_cache()
if isinstance(
cache, NoCache
): # Just an optimization - to avoid looping over all query splits
return State(), encode(decode(query))
for key, remainder in all_splits(query):
if key == "":
return State(), remainder
if cache.contains(key):
state = cache.get(key)
if state is None:
continue
return state, remainder
# Should never get here, but this is a sensible default:
return State(), encode(decode(query)) | c1b8d9589b12171ae11e2f49911142252f54d9cd | 15,868 |
def exist_key(bucket: str, key: str) -> bool:
"""Exist key or not.
Args:
bucket (str): S3 bucket name.
key (str): Object key.
Returns:
bool: Exist or not.
"""
try:
s3.Object(bucket, key).get()
except s3.meta.client.exceptions.NoSuchKey:
return False
return True | 1e47467c85d0461d76f0d562a2ee9c7cff5dbf4e | 15,869 |
def calculate_bleu_score(candidate_file: str, reference_file: str) -> float:
"""
Calculates the average BLEU score of the given files, interpreting each line as a sentence.
Partially taken from https://stackoverflow.com/a/49886758/3918865.
Args:
candidate_file: the name of the file that contains the candidate sentences (hypotheses)
reference_file: the name of the file that contains the reference sentences (targets)
Returns:
the average BLEU score
"""
candidate = open(candidate_file, 'r').readlines()
reference = open(reference_file, 'r').readlines()
num_candidates = len(candidate)
reference = reference[:num_candidates]
assert len(reference) == len(candidate), 'Make sure there are at least as many references as candidates.'
score = 0.
for i in range(len(reference)):
ref = reference[i].strip()
cand = candidate[i].strip()
score_i = sentence_bleu([ref.split()], cand.split(), weights=(0.5, 0.5))
score += score_i
score /= num_candidates
return score | 00e6f6a852171f34b92598193fe1b08c60ba328b | 15,871 |
def _read_id_not_in_dict(read_ids, read_dict):
"""Return True if all read_ids in a list are not in the read_dict keys, otherwise False"""
for read_id in read_ids:
if read_id not in read_dict.keys():
return True
return False | 3a0e0926ed33f65cc67139311af1c860f3e371ae | 15,872 |
def generate_spectra_products(dataset, prdcfg):
"""
generates spectra products. Accepted product types:
'AMPLITUDE_PHASE_ANGLE_DOPPLER': Makes an angle Doppler plot of
complex spectra or IQ data. The plot can be along azimuth or along
range. It is plotted separately the module and the phase of the
signal.
User defined parameters:
along_azi : bool
If true the plot is performed along azimuth, otherwise
along elevation. Default true
ang : float
The fixed angle (deg). Default 0.
rng : float
The fixed range (m). Default 0.
ang_tol : float
The fixed angle tolerance (deg). Default 1.
rng_tol : float
The fixed range tolerance (m). Default 50.
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity',
'Doppler_frequency' or 'pulse_number'
ampli_vmin, ampli_vmax, phase_vmin, phase_vmax : float or None
Minimum and maximum of the color scale for the module and
phase
'AMPLITUDE_PHASE_DOPPLER': Plots a complex Doppler spectrum or IQ data
making two separate plots for the module and phase of the signal
User defined parameters:
azi, ele, rng : float
azimuth and elevation (deg) and range (m) of the ray to
plot
azi_to, ele_tol, rng_tol : float
azimuth and elevation (deg) and range (m) tolerance
respect to nominal position to plot. Default 1, 1, 50.
ind_ray, ind_rng : int
index of the ray and range to plot. Alternative to
defining its antenna coordinates
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity',
'Doppler_frequency' or 'pulse_number'
ampli_vmin, ampli_vmax, phase_vmin, phase_vmax : float or None
Minimum and maximum of the color scale for the module and
phase
'AMPLITUDE_PHASE_RANGE_DOPPLER': Plots a complex spectra or IQ data
range-Doppler making two separate plots for the module and phase
of the signal User defined parameters:
azi, ele : float
azimuth and elevation (deg) of the ray to plot
azi_to, ele_tol : float
azimuth and elevation (deg) tolerance respect to nominal
position to plot. Default 1, 1.
ind_ray : int
index of the ray to plot. Alternative to
defining its antenna coordinates
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity',
'Doppler_frequency' or 'pulse_number'
ampli_vmin, ampli_vmax, phase_vmin, phase_vmax : float or None
Minimum and maximum of the color scale for the module and
phase
'AMPLITUDE_PHASE_TIME_DOPPLER': Plots a complex spectra or IQ data
time-Doppler making two separate plots for the module and phase of
the signal
User defined parameters:
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity' or
'Doppler frequency'
ampli_vmin, ampli_vmax, phase_vmin, phase_vmax : float or None
Minimum and maximum of the color scale for the module and
phase
plot_type : str
Can be 'final' or 'temporal'. If final the data is only
plotted at the end of the processing
'ANGLE_DOPPLER': Makes an angle Doppler plot. The plot can be along
azimuth or along range
User defined parameters:
along_azi : bool
If true the plot is performed along azimuth, otherwise
along elevation. Default true
ang : float
The fixed angle (deg). Default 0.
rng : float
The fixed range (m). Default 0.
ang_tol : float
The fixed angle tolerance (deg). Default 1.
rng_tol : float
The fixed range tolerance (m). Default 50.
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity',
'Doppler_frequency' or 'pulse_number'
vmin, vmax : float or None
Minimum and maximum of the color scale
'COMPLEX_ANGLE_DOPPLER': Makes an angle Doppler plot of complex
spectra or IQ data. The plot can be along azimuth or along range.
The real and imaginary parts are plotted separately
User defined parameters:
along_azi : bool
If true the plot is performed along azimuth, otherwise
along elevation. Default true
ang : float
The fixed angle (deg). Default 0.
rng : float
The fixed range (m). Default 0.
ang_tol : float
The fixed angle tolerance (deg). Default 1.
rng_tol : float
The fixed range tolerance (m). Default 50.
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity',
'Doppler_frequency' or 'pulse_number'
vmin, vmax : float or None
Minimum and maximum of the color scale
'COMPLEX_DOPPLER': Plots a complex Doppler spectrum or IQ data making
two separate plots for the real and imaginary parts
User defined parameters:
azi, ele, rng : float
azimuth and elevation (deg) and range (m) of the ray to
plot
azi_to, ele_tol, rng_tol : float
azimuth and elevation (deg) and range (m) tolerance
respect to nominal position to plot. Default 1, 1, 50.
ind_ray, ind_rng : int
index of the ray and range to plot. Alternative to
defining its antenna coordinates
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity',
'Doppler_frequency' or 'pulse_number'
vmin, vmax : float or None
Minimum and maximum of the color scale
'COMPLEX_RANGE_DOPPLER': Plots the complex spectra or IQ data
range-Doppler making two separate plots for the real and imaginary
parts
User defined parameters:
azi, ele : float
azimuth and elevation (deg) of the ray to plot
azi_to, ele_tol : float
azimuth and elevation (deg) tolerance respect to nominal
position to plot. Default 1, 1.
ind_ray : int
index of the ray to plot. Alternative to
defining its antenna coordinates
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity',
'Doppler_frequency' or 'pulse_number'
vmin, vmax : float or None
Minimum and maximum of the color scale
'COMPLEX_TIME_DOPPLER': Plots the complex spectra or IQ data
time-Doppler making two separate plots for the real and imaginary
parts
User defined parameters:
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity' or
'Doppler frequency'
vmin, vmax : float or None
Minimum and maximum of the color scale
plot_type : str
Can be 'final' or 'temporal'. If final the data is only
plotted at the end of the processing
'DOPPLER': Plots a Doppler spectrum variable or IQ data variable
User defined parameters:
azi, ele, rng : float
azimuth and elevation (deg) and range (m) of the ray to
plot
azi_to, ele_tol, rng_tol : float
azimuth and elevation (deg) and range (m) tolerance
respect to nominal position to plot. Default 1, 1, 50.
ind_ray, ind_rng : int
index of the ray and range to plot. Alternative to
defining its antenna coordinates
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity',
'Doppler_frequency' or 'pulse_number'
vmin, vmax : float or None
Minimum and maximum of the color scale
'RANGE_DOPPLER': Makes a range-Doppler plot of spectral or IQ data
User defined parameters:
azi, ele : float
azimuth and elevation (deg) of the ray to plot
azi_to, ele_tol : float
azimuth and elevation (deg) tolerance respect to nominal
position to plot. Default 1, 1.
ind_ray : int
index of the ray to plot. Alternative to
defining its antenna coordinates
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity',
'Doppler_frequency' or 'pulse_number'
vmin, vmax : float or None
Minimum and maximum of the color scale
'SAVEALL': Saves radar spectra or IQ volume data including all or a
list of userdefined fields in a netcdf file
User defined parameters:
datatypes: list of str or None
The list of data types to save. If it is None, all fields
in the radar object will be saved
physical: Bool
If True the data will be saved in physical units (floats).
Otherwise it will be quantized and saved as binary
'SAVEVOL': Saves one field of a radar spectra or IQ volume data in a
netcdf file
User defined parameters:
physical: Bool
If True the data will be saved in physical units (floats).
Otherwise it will be quantized and saved as binary
'TIME_DOPPLER': Makes a time-Doppler plot of spectral or IQ data at a
point of interest.
User defined parameters:
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity',
'Doppler_frequency' or 'pulse_number'
vmin, vmax : float or None
Minimum and maximum of the color scale
plot_type : str
Can be 'final' or 'temporal'. If final the data is only
plotted at the end of the processing
Parameters
----------
dataset : spectra
spectra object
prdcfg : dictionary of dictionaries
product configuration dictionary of dictionaries
Returns
-------
None or name of generated files
"""
dssavedir = prdcfg['dsname']
if 'dssavename' in prdcfg:
dssavedir = prdcfg['dssavename']
if prdcfg['type'] == 'RANGE_DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
azi = prdcfg.get('azi', None)
ele = prdcfg.get('ele', None)
azi_tol = prdcfg.get('azi_tol', 1.)
ele_tol = prdcfg.get('ele_tol', 1.)
if azi is None or ele is None:
ind_ray = prdcfg.get('ind_ray', 0)
azi = dataset['radar_out'].azimuth['data'][ind_ray]
ele = dataset['radar_out'].elevation['data'][ind_ray]
else:
ind_ray = find_ray_index(
dataset['radar_out'].elevation['data'],
dataset['radar_out'].azimuth['data'], ele, azi,
ele_tol=ele_tol, azi_tol=azi_tol)
if ind_ray is None:
warn('Ray azi='+str(azi)+', ele='+str(ele) +
' out of radar coverage')
return None
gateinfo = 'az'+'{:.1f}'.format(azi)+'el'+'{:.1f}'.format(ele)
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
vmin = prdcfg.get('vmin', None)
vmax = prdcfg.get('vmax', None)
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
fname_list = make_filename(
'range_Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
if dataset['radar_out'].ngates == 1:
plot_Doppler(
dataset['radar_out'], field_name, ind_ray, 0, prdcfg,
fname_list, xaxis_info=xaxis_info, vmin=vmin, vmax=vmax)
else:
plot_range_Doppler(
dataset['radar_out'], field_name, ind_ray, prdcfg, fname_list,
xaxis_info=xaxis_info, vmin=vmin, vmax=vmax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'ANGLE_DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
along_azi = prdcfg.get('along_azi', True)
ang = prdcfg.get('ang', 0)
rng = prdcfg.get('rng', 0)
ang_tol = prdcfg.get('ang_tol', 1.)
rng_tol = prdcfg.get('rng_tol', 50.)
ind_rng = find_rng_index(
dataset['radar_out'].range['data'], rng, rng_tol=rng_tol)
if ind_rng is None:
warn('No data at rng='+str(rng))
return None
if along_azi:
ind_rays = np.where(np.logical_and(
dataset['radar_out'].elevation['data'] <= ang+ang_tol,
dataset['radar_out'].elevation['data'] >= ang-ang_tol))[0]
else:
ind_rays = np.where(np.logical_and(
dataset['radar_out'].azimuth['data'] <= ang+ang_tol,
dataset['radar_out'].azimuth['data'] >= ang-ang_tol))[0]
if ind_rays.size == 0:
warn('No data for angle '+str(ang))
return None
# sort angles
if along_azi:
ang_selected = dataset['radar_out'].azimuth['data'][ind_rays]
else:
ang_selected = dataset['radar_out'].elevation['data'][ind_rays]
ind_rays = ind_rays[np.argsort(ang_selected)]
if along_azi:
gateinfo = 'azi'+'{:.1f}'.format(ang)+'rng'+'{:.1f}'.format(rng)
else:
gateinfo = 'ele'+'{:.1f}'.format(ang)+'rng'+'{:.1f}'.format(rng)
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
vmin = prdcfg.get('vmin', None)
vmax = prdcfg.get('vmax', None)
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
fname_list = make_filename(
'range_Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
if ind_rays.size == 1:
plot_Doppler(
dataset['radar_out'], field_name, ind_rays, ind_rng, prdcfg,
fname_list, xaxis_info=xaxis_info, vmin=vmin, vmax=vmax)
else:
plot_angle_Doppler(
dataset['radar_out'], field_name, ang, ind_rays, ind_rng,
prdcfg, fname_list, xaxis_info=xaxis_info,
along_azi=along_azi, vmin=vmin, vmax=vmax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'TIME_DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
vmin = prdcfg.get('vmin', None)
vmax = prdcfg.get('vmax', None)
xmin = prdcfg.get('xmin', None)
xmax = prdcfg.get('xmax', None)
ymin = prdcfg.get('ymin', None)
ymax = prdcfg.get('ymax', None)
plot_type = prdcfg.get('plot_type', 'final')
if plot_type == 'final' and not dataset['final']:
return None
if 'antenna_coordinates_az_el_r' in dataset:
az = '{:.1f}'.format(dataset['antenna_coordinates_az_el_r'][0])
el = '{:.1f}'.format(dataset['antenna_coordinates_az_el_r'][1])
r = '{:.1f}'.format(dataset['antenna_coordinates_az_el_r'][2])
gateinfo = ('az'+az+'r'+r+'el'+el)
else:
lon = '{:.3f}'.format(
dataset['point_coordinates_WGS84_lon_lat_alt'][0])
lat = '{:.3f}'.format(
dataset['point_coordinates_WGS84_lon_lat_alt'][1])
alt = '{:.1f}'.format(
dataset['point_coordinates_WGS84_lon_lat_alt'][2])
gateinfo = ('lon'+lon+'lat'+lat+'alt'+alt)
time_info = datetime_from_radar(dataset['radar_out'])
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=time_info)
fname_list = make_filename(
'time_Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=time_info, runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
if dataset['radar_out'].nrays == 1:
plot_Doppler(
dataset['radar_out'], field_name, 0, 0, prdcfg, fname_list,
xaxis_info=xaxis_info, vmin=vmin, vmax=vmax)
else:
plot_time_Doppler(
dataset['radar_out'], field_name, prdcfg, fname_list,
xaxis_info=xaxis_info, vmin=vmin, vmax=vmax, xmin=xmin,
xmax=xmax, ymin=ymin, ymax=ymax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
azi = prdcfg.get('azi', None)
ele = prdcfg.get('ele', None)
rng = prdcfg.get('rng', None)
azi_tol = prdcfg.get('azi_tol', 1.)
ele_tol = prdcfg.get('ele_tol', 1.)
rng_tol = prdcfg.get('rng_tol', 50.)
if azi is None or ele is None or rng is None:
ind_ray = prdcfg.get('ind_ray', 0)
ind_rng = prdcfg.get('ind_rng', 0)
azi = dataset['radar_out'].azimuth['data'][ind_ray]
ele = dataset['radar_out'].elevation['data'][ind_ray]
rng = dataset['radar_out'].range['data'][ind_rng]
else:
ind_ray = find_ray_index(
dataset['radar_out'].elevation['data'],
dataset['radar_out'].azimuth['data'], ele, azi,
ele_tol=ele_tol, azi_tol=azi_tol)
ind_rng = find_rng_index(
dataset['radar_out'].range['data'], rng, rng_tol=rng_tol)
if ind_rng is None or ind_ray is None:
warn('Point azi='+str(azi)+', ele='+str(ele)+', rng='+str(rng) +
' out of radar coverage')
return None
gateinfo = (
'az'+'{:.1f}'.format(azi)+'el'+'{:.1f}'.format(ele) +
'r'+'{:.1f}'.format(rng))
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
vmin = prdcfg.get('vmin', None)
vmax = prdcfg.get('vmax', None)
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
fname_list = make_filename(
'Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
plot_Doppler(
dataset['radar_out'], field_name, ind_ray, ind_rng, prdcfg,
fname_list, xaxis_info=xaxis_info, vmin=vmin, vmax=vmax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'COMPLEX_RANGE_DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
azi = prdcfg.get('azi', None)
ele = prdcfg.get('ele', None)
azi_tol = prdcfg.get('azi_tol', 1.)
ele_tol = prdcfg.get('ele_tol', 1.)
if azi is None or ele is None:
ind_ray = prdcfg.get('ind_ray', 0)
azi = dataset['radar_out'].azimuth['data'][ind_ray]
ele = dataset['radar_out'].elevation['data'][ind_ray]
else:
ind_ray = find_ray_index(
dataset['radar_out'].elevation['data'],
dataset['radar_out'].azimuth['data'], ele, azi,
ele_tol=ele_tol, azi_tol=azi_tol)
if ind_ray is None:
warn('Ray azi='+str(azi)+', ele='+str(ele) +
' out of radar coverage')
return None
gateinfo = 'az'+'{:.1f}'.format(azi)+'el'+'{:.1f}'.format(ele)
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
vmin = prdcfg.get('vmin', None)
vmax = prdcfg.get('vmax', None)
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
fname_list = make_filename(
'c_range_Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
if dataset['radar_out'].ngates == 1:
plot_complex_Doppler(
dataset['radar_out'], field_name, ind_ray, 0, prdcfg,
fname_list, xaxis_info=xaxis_info, vmin=vmin, vmax=vmax)
else:
plot_complex_range_Doppler(
dataset['radar_out'], field_name, ind_ray, prdcfg, fname_list,
xaxis_info=xaxis_info, vmin=vmin, vmax=vmax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'COMPLEX_ANGLE_DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
along_azi = prdcfg.get('along_azi', True)
ang = prdcfg.get('ang', 0)
rng = prdcfg.get('rng', 0)
ang_tol = prdcfg.get('ang_tol', 1.)
rng_tol = prdcfg.get('rng_tol', 50.)
ind_rng = find_rng_index(
dataset['radar_out'].range['data'], rng, rng_tol=rng_tol)
if ind_rng is None:
warn('No data at rng='+str(rng))
return None
if along_azi:
ind_rays = np.where(np.logical_and(
dataset['radar_out'].elevation['data'] <= ang+ang_tol,
dataset['radar_out'].elevation['data'] >= ang-ang_tol))[0]
else:
ind_rays = np.where(np.logical_and(
dataset['radar_out'].azimuth['data'] <= ang+ang_tol,
dataset['radar_out'].azimuth['data'] >= ang-ang_tol))[0]
if ind_rays.size == 0:
warn('No data for angle '+str(ang))
return None
# sort angles
if along_azi:
ang_selected = dataset['radar_out'].azimuth['data'][ind_rays]
else:
ang_selected = dataset['radar_out'].elevation['data'][ind_rays]
ind_rays = ind_rays[np.argsort(ang_selected)]
if along_azi:
gateinfo = 'azi'+'{:.1f}'.format(ang)+'rng'+'{:.1f}'.format(rng)
else:
gateinfo = 'ele'+'{:.1f}'.format(ang)+'rng'+'{:.1f}'.format(rng)
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
vmin = prdcfg.get('vmin', None)
vmax = prdcfg.get('vmax', None)
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
fname_list = make_filename(
'range_Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
if ind_rays.size == 1:
plot_complex_Doppler(
dataset['radar_out'], field_name, ind_rays, ind_rng, prdcfg,
fname_list, xaxis_info=xaxis_info, vmin=vmin, vmax=vmax)
else:
plot_complex_angle_Doppler(
dataset['radar_out'], field_name, ang, ind_rays, ind_rng,
prdcfg, fname_list, xaxis_info=xaxis_info,
along_azi=along_azi, vmin=vmin, vmax=vmax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'COMPLEX_TIME_DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
vmin = prdcfg.get('vmin', None)
vmax = prdcfg.get('vmax', None)
plot_type = prdcfg.get('plot_type', 'final')
if plot_type == 'final' and not dataset['final']:
return None
if 'antenna_coordinates_az_el_r' in dataset:
az = '{:.1f}'.format(dataset['antenna_coordinates_az_el_r'][0])
el = '{:.1f}'.format(dataset['antenna_coordinates_az_el_r'][1])
r = '{:.1f}'.format(dataset['antenna_coordinates_az_el_r'][2])
gateinfo = ('az'+az+'r'+r+'el'+el)
else:
lon = '{:.3f}'.format(
dataset['point_coordinates_WGS84_lon_lat_alt'][0])
lat = '{:.3f}'.format(
dataset['point_coordinates_WGS84_lon_lat_alt'][1])
alt = '{:.1f}'.format(
dataset['point_coordinates_WGS84_lon_lat_alt'][2])
gateinfo = ('lon'+lon+'lat'+lat+'alt'+alt)
time_info = datetime_from_radar(dataset['radar_out'])
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=time_info)
fname_list = make_filename(
'c_time_Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=time_info, runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
if dataset['radar_out'].nrays == 1:
plot_complex_Doppler(
dataset['radar_out'], field_name, 0, 0, prdcfg, fname_list,
xaxis_info=xaxis_info, vmin=vmin, vmax=vmax)
else:
plot_complex_time_Doppler(
dataset['radar_out'], field_name, prdcfg, fname_list,
xaxis_info=xaxis_info, vmin=vmin, vmax=vmax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'COMPLEX_DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
azi = prdcfg.get('azi', None)
ele = prdcfg.get('ele', None)
rng = prdcfg.get('rng', None)
azi_tol = prdcfg.get('azi_tol', 1.)
ele_tol = prdcfg.get('ele_tol', 1.)
rng_tol = prdcfg.get('rng_tol', 50.)
if azi is None or ele is None or rng is None:
ind_ray = prdcfg.get('ind_ray', 0)
ind_rng = prdcfg.get('ind_rng', 0)
azi = dataset['radar_out'].azimuth['data'][ind_ray]
ele = dataset['radar_out'].elevation['data'][ind_ray]
rng = dataset['radar_out'].range['data'][ind_rng]
else:
ind_ray = find_ray_index(
dataset['radar_out'].elevation['data'],
dataset['radar_out'].azimuth['data'], ele, azi,
ele_tol=ele_tol, azi_tol=azi_tol)
ind_rng = find_rng_index(
dataset['radar_out'].range['data'], rng, rng_tol=rng_tol)
if ind_rng is None or ind_ray is None:
warn('Point azi='+str(azi)+', ele='+str(ele)+', rng='+str(rng) +
' out of radar coverage')
return None
gateinfo = (
'az'+'{:.1f}'.format(azi)+'el'+'{:.1f}'.format(ele) +
'r'+'{:.1f}'.format(rng))
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
vmin = prdcfg.get('vmin', None)
vmax = prdcfg.get('vmax', None)
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
fname_list = make_filename(
'c_Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
plot_complex_Doppler(
dataset['radar_out'], field_name, ind_ray, ind_rng, prdcfg,
fname_list, xaxis_info=xaxis_info, vmin=vmin, vmax=vmax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'AMPLITUDE_PHASE_DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
azi = prdcfg.get('azi', None)
ele = prdcfg.get('ele', None)
rng = prdcfg.get('rng', None)
azi_tol = prdcfg.get('azi_tol', 1.)
ele_tol = prdcfg.get('ele_tol', 1.)
rng_tol = prdcfg.get('rng_tol', 50.)
if azi is None or ele is None or rng is None:
ind_ray = prdcfg.get('ind_ray', 0)
ind_rng = prdcfg.get('ind_rng', 0)
azi = dataset['radar_out'].azimuth['data'][ind_ray]
ele = dataset['radar_out'].elevation['data'][ind_ray]
rng = dataset['radar_out'].range['data'][ind_rng]
else:
ind_ray = find_ray_index(
dataset['radar_out'].elevation['data'],
dataset['radar_out'].azimuth['data'], ele, azi,
ele_tol=ele_tol, azi_tol=azi_tol)
ind_rng = find_rng_index(
dataset['radar_out'].range['data'], rng, rng_tol=rng_tol)
if ind_rng is None or ind_ray is None:
warn('Point azi='+str(azi)+', ele='+str(ele)+', rng='+str(rng) +
' out of radar coverage')
return None
gateinfo = (
'az'+'{:.1f}'.format(azi)+'el'+'{:.1f}'.format(ele) +
'r'+'{:.1f}'.format(rng))
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
ampli_vmin = prdcfg.get('ampli_vmin', None)
ampli_vmax = prdcfg.get('ampli_vmax', None)
phase_vmin = prdcfg.get('phase_vmin', None)
phase_vmax = prdcfg.get('phase_vmax', None)
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
fname_list = make_filename(
'ap_Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
plot_amp_phase_Doppler(
dataset['radar_out'], field_name, ind_ray, ind_rng, prdcfg,
fname_list, xaxis_info=xaxis_info, ampli_vmin=ampli_vmin,
ampli_vmax=ampli_vmax, phase_vmin=phase_vmin,
phase_vmax=phase_vmax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'AMPLITUDE_PHASE_RANGE_DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
azi = prdcfg.get('azi', None)
ele = prdcfg.get('ele', None)
azi_tol = prdcfg.get('azi_tol', 1.)
ele_tol = prdcfg.get('ele_tol', 1.)
if azi is None or ele is None:
ind_ray = prdcfg.get('ind_ray', 0)
azi = dataset['radar_out'].azimuth['data'][ind_ray]
ele = dataset['radar_out'].elevation['data'][ind_ray]
else:
ind_ray = find_ray_index(
dataset['radar_out'].elevation['data'],
dataset['radar_out'].azimuth['data'], ele, azi,
ele_tol=ele_tol, azi_tol=azi_tol)
if ind_ray is None:
warn('Ray azi='+str(azi)+', ele='+str(ele) +
' out of radar coverage')
return None
gateinfo = 'az'+'{:.1f}'.format(azi)+'el'+'{:.1f}'.format(ele)
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
ampli_vmin = prdcfg.get('ampli_vmin', None)
ampli_vmax = prdcfg.get('ampli_vmax', None)
phase_vmin = prdcfg.get('phase_vmin', None)
phase_vmax = prdcfg.get('phase_vmax', None)
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
fname_list = make_filename(
'ap_range_Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
if dataset['radar_out'].ngates == 1:
plot_amp_phase_Doppler(
dataset['radar_out'], field_name, ind_ray, 0, prdcfg,
fname_list, xaxis_info=xaxis_info, ampli_vmin=ampli_vmin,
ampli_vmax=ampli_vmax, phase_vmin=phase_vmin,
phase_vmax=phase_vmax)
else:
plot_amp_phase_range_Doppler(
dataset['radar_out'], field_name, ind_ray, prdcfg, fname_list,
xaxis_info=xaxis_info, ampli_vmin=ampli_vmin,
ampli_vmax=ampli_vmax, phase_vmin=phase_vmin,
phase_vmax=phase_vmax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'AMPLITUDE_PHASE_ANGLE_DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
along_azi = prdcfg.get('along_azi', True)
ang = prdcfg.get('ang', 0)
rng = prdcfg.get('rng', 0)
ang_tol = prdcfg.get('ang_tol', 1.)
rng_tol = prdcfg.get('rng_tol', 50.)
ind_rng = find_rng_index(
dataset['radar_out'].range['data'], rng, rng_tol=rng_tol)
if ind_rng is None:
warn('No data at rng='+str(rng))
return None
if along_azi:
ind_rays = np.where(np.logical_and(
dataset['radar_out'].elevation['data'] <= ang+ang_tol,
dataset['radar_out'].elevation['data'] >= ang-ang_tol))[0]
else:
ind_rays = np.where(np.logical_and(
dataset['radar_out'].azimuth['data'] <= ang+ang_tol,
dataset['radar_out'].azimuth['data'] >= ang-ang_tol))[0]
if ind_rays.size == 0:
warn('No data for angle '+str(ang))
return None
# sort angles
if along_azi:
ang_selected = dataset['radar_out'].azimuth['data'][ind_rays]
else:
ang_selected = dataset['radar_out'].elevation['data'][ind_rays]
ind_rays = ind_rays[np.argsort(ang_selected)]
if along_azi:
gateinfo = 'azi'+'{:.1f}'.format(ang)+'rng'+'{:.1f}'.format(rng)
else:
gateinfo = 'ele'+'{:.1f}'.format(ang)+'rng'+'{:.1f}'.format(rng)
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
ampli_vmin = prdcfg.get('ampli_vmin', None)
ampli_vmax = prdcfg.get('ampli_vmax', None)
phase_vmin = prdcfg.get('phase_vmin', None)
phase_vmax = prdcfg.get('phase_vmax', None)
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
fname_list = make_filename(
'range_Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
if ind_rays.size == 1:
plot_amp_phase_Doppler(
dataset['radar_out'], field_name, ind_rays, ind_rng, prdcfg,
fname_list, xaxis_info=xaxis_info, ampli_vmin=ampli_vmin,
ampli_vmax=ampli_vmax, phase_vmin=phase_vmin,
phase_vmax=phase_vmax)
else:
plot_amp_phase_angle_Doppler(
dataset['radar_out'], field_name, ang, ind_rays, ind_rng,
prdcfg, fname_list, xaxis_info=xaxis_info,
along_azi=along_azi, ampli_vmin=ampli_vmin,
ampli_vmax=ampli_vmax, phase_vmin=phase_vmin,
phase_vmax=phase_vmax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'AMPLITUDE_PHASE_TIME_DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
ampli_vmin = prdcfg.get('ampli_vmin', None)
ampli_vmax = prdcfg.get('ampli_vmax', None)
phase_vmin = prdcfg.get('phase_vmin', None)
phase_vmax = prdcfg.get('phase_vmax', None)
plot_type = prdcfg.get('plot_type', 'final')
if plot_type == 'final' and not dataset['final']:
return None
if 'antenna_coordinates_az_el_r' in dataset:
az = '{:.1f}'.format(dataset['antenna_coordinates_az_el_r'][0])
el = '{:.1f}'.format(dataset['antenna_coordinates_az_el_r'][1])
r = '{:.1f}'.format(dataset['antenna_coordinates_az_el_r'][2])
gateinfo = ('az'+az+'r'+r+'el'+el)
else:
lon = '{:.3f}'.format(
dataset['point_coordinates_WGS84_lon_lat_alt'][0])
lat = '{:.3f}'.format(
dataset['point_coordinates_WGS84_lon_lat_alt'][1])
alt = '{:.1f}'.format(
dataset['point_coordinates_WGS84_lon_lat_alt'][2])
gateinfo = ('lon'+lon+'lat'+lat+'alt'+alt)
time_info = datetime_from_radar(dataset['radar_out'])
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=time_info)
fname_list = make_filename(
'ap_time_Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=time_info, runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
if dataset['radar_out'].nrays == 1:
plot_amp_phase_Doppler(
dataset['radar_out'], field_name, 0, 0, prdcfg, fname_list,
xaxis_info=xaxis_info, ampli_vmin=ampli_vmin,
ampli_vmax=ampli_vmax, phase_vmin=phase_vmin,
phase_vmax=phase_vmax)
else:
plot_amp_phase_time_Doppler(
dataset['radar_out'], field_name, prdcfg, fname_list,
xaxis_info=xaxis_info, ampli_vmin=ampli_vmin,
ampli_vmax=ampli_vmax, phase_vmin=phase_vmin,
phase_vmax=phase_vmax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'SAVEVOL':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
file_type = prdcfg.get('file_type', 'nc')
physical = prdcfg.get('physical', True)
new_dataset = deepcopy(dataset['radar_out'])
new_dataset.fields = dict()
new_dataset.add_field(
field_name, dataset['radar_out'].fields[field_name])
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
fname = make_filename(
'savevol', prdcfg['dstype'], prdcfg['voltype'], [file_type],
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])[0]
fname = savedir+fname
pyart.aux_io.write_spectra(fname, new_dataset, physical=physical)
print('saved file: '+fname)
return fname
if prdcfg['type'] == 'SAVEALL':
file_type = prdcfg.get('file_type', 'nc')
datatypes = prdcfg.get('datatypes', None)
physical = prdcfg.get('physical', True)
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
fname = make_filename(
'savevol', prdcfg['dstype'], 'all_fields', [file_type],
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])[0]
fname = savedir+fname
field_names = None
if datatypes is not None:
field_names = []
for datatype in datatypes:
field_names.append(get_fieldname_pyart(datatype))
if field_names is not None:
radar_aux = deepcopy(dataset['radar_out'])
radar_aux.fields = dict()
for field_name in field_names:
if field_name not in dataset['radar_out'].fields:
warn(field_name+' not in radar object')
else:
radar_aux.add_field(
field_name,
dataset['radar_out'].fields[field_name])
else:
radar_aux = dataset['radar_out']
pyart.aux_io.write_spectra(fname, radar_aux, physical=physical)
print('saved file: '+fname)
return fname
warn(' Unsupported product type: ' + prdcfg['type'])
return None | ba279b7331fda0fdcb2ef506e91d13bd11f37d2f | 15,873 |
def odds_or_evens(my_bool, nums):
"""Returns all of the odd or
even numbers from a list"""
return_list = []
for num in nums:
if my_bool:
if num % 2 == 0:
return_list.append(num)
else:
if num % 2 != 0:
return_list.append(num)
return return_list | 02b3b12acbaae10b2b0e05eec059f6571c576e80 | 15,874 |
import numpy
def local_mass_diagonal(quad_data, basis):
"""Constructs the elemental mass matrix, diagonal version
Arguments:
quad_data - Quadrature points and weights
basis - Basis and respective derivatives
Returns:
Mass matrix M, where m_ii = \int_k psi_i psi_i
"""
return numpy.sum(quad_data.w*basis.psi.T**2, axis=1) | ffaf34df758e73dea0db3ecb66de658991d8de58 | 15,875 |
def create_saved_group(uuid=None):
"""Create and save a Sample Group with all the fixings (plus gravy)."""
if uuid is None:
uuid = uuid4()
analysis_result = AnalysisResultMeta().save()
group_description = 'Includes factory-produced analysis results from all display_modules'
sample_group = SampleGroup(name='Fuzz Testing',
analysis_result=analysis_result,
description=group_description)
sample_group.id = uuid
db.session.add(sample_group)
db.session.commit()
# Add the results
analysis_result.average_genome_size = wrap_result(AGSFactory())
analysis_result.card_amr_genes = wrap_result(CARDGenesFactory())
analysis_result.functional_genes = wrap_result(FunctionalGenesFactory())
analysis_result.hmp = wrap_result(HMPFactory())
analysis_result.macrobe_abundance = wrap_result(MacrobeFactory())
analysis_result.methyltransferases = wrap_result(MethylsFactory())
analysis_result.microbe_directory = wrap_result(MicrobeDirectoryFactory())
analysis_result.pathways = wrap_result(PathwayFactory())
analysis_result.read_stats = wrap_result(ReadStatsFactory())
analysis_result.reads_classified = wrap_result(ReadsClassifiedFactory())
analysis_result.sample_similarity = wrap_result(create_mvp_sample_similarity())
# analysis_result.taxon_abundance =
analysis_result.virulence_factors = wrap_result(VFDBFactory())
analysis_result.save()
return sample_group | 7a9929518b44f6266f32300177385040b3da41c0 | 15,876 |
import copy
def words_to_indexes(tree):
"""Return a new tree based on the original tree, such that the leaf values
are replaced by their indexs."""
out = copy.deepcopy(tree)
leaves = out.leaves()
for index in range(0, len(leaves)):
path = out.leaf_treeposition(index)
out[path] = index + 1
return out | 99e4ad2aa1d318af21d934aee2128b8d7b51a99f | 15,877 |
def get_stoplist_names():
"""Return list of stoplist names"""
config = configuration()
return [name for name, value in config.items('stoplists')] | a93dec87fe840a1fab9d63527e7b54ae8a1c7cf5 | 15,878 |
def any_(criterions):
"""Return a stop criterion that given a list `criterions` of stop criterions
only returns True, if any of the criterions returns True.
This basically implements a logical OR for stop criterions.
"""
def inner(info):
return any(c(info) for c in criterions)
return inner | 600e7c1516cba6f0cd73812bcd43d5e194aa33d2 | 15,879 |
def validate_basic(params, length, allow_infnan=False, title=None):
"""
Validate parameter vector for basic correctness.
Parameters
----------
params : array_like
Array of parameters to validate.
length : int
Expected length of the parameter vector.
allow_infnan : bool, optional
Whether or not to allow `params` to contain -np.Inf, np.Inf, and
np.nan. Default is False.
title : str, optional
Description of the parameters (e.g. "autoregressive") to use in error
messages.
Returns
-------
params : ndarray
Array of validated parameters.
Notes
-----
Basic check that the parameters are numeric and that they are the right
shape. Optionally checks for NaN / infinite values.
"""
title = '' if title is None else ' for %s' % title
# Check for invalid type and coerce to non-integer
try:
params = np.array(params, dtype=object)
is_complex = [isinstance(p, complex) for p in params.ravel()]
dtype = complex if any(is_complex) else float
params = np.array(params, dtype=dtype)
except TypeError:
raise ValueError('Parameters vector%s includes invalid values.'
% title)
# Check for NaN, inf
if not allow_infnan and (np.any(np.isnan(params)) or
np.any(np.isinf(params))):
raise ValueError('Parameters vector%s includes NaN or Inf values.'
% title)
params = np.atleast_1d(np.squeeze(params))
# Check for right number of parameters
if params.shape != (length,):
plural = '' if length == 1 else 's'
raise ValueError('Specification%s implies %d parameter%s, but'
' values with shape %s were provided.'
% (title, length, plural, params.shape))
return params | c3567a7f08656c3b815eded0a6788d904b5820a5 | 15,880 |
import torch
def unsorted_segment_sum(data, segment_ids, num_segments):
"""
Computes the sum along segments of a tensor. Analogous to tf.unsorted_segment_sum.
:param data: A tensor whose segments are to be summed.
:param segment_ids: The segment indices tensor.
:param num_segments: The number of segments.
:return: A tensor of same data type as the data argument.
"""
assert all([i in data.shape for i in segment_ids.shape]), "segment_ids.shape should be a prefix of data.shape"
# segment_ids is a 1-D tensor repeat it to have the same shape as data
if len(segment_ids.shape) == 1:
s = torch.prod(torch.tensor(data.shape[1:])).long()
segment_ids = segment_ids.repeat_interleave(s).view(segment_ids.shape[0], *data.shape[1:])
assert data.shape == segment_ids.shape, "data.shape and segment_ids.shape should be equal"
shape = [num_segments] + list(data.shape[1:])
tensor = torch.zeros(*shape).scatter_add(0, segment_ids, data.float())
tensor = tensor.type(data.dtype)
return tensor | 7d8686d35afab975bff05d3cda50d1ceae537ab9 | 15,881 |
def create_parameters(address: str) -> dict:
"""Create parameters for address.
this function create parameters for having request from geocoder
and than return dictionary of parameters
Args:
address (str): the address for create parameters
Returns:
dict: takes the api key and Geocode from an other class and returns the dictionary
"""
address_to_string = address.replace(" ", "+")
params = {'apikey': developer_key,
'geocode': address_to_string}
return params | 9ad1723cf2bec66e366e83b814ee746cdddf8289 | 15,882 |
import re
def standardizeName(name):
"""
Remove stuff not used by bngl
"""
name2 = name
sbml2BnglTranslationDict = {
"^": "",
"'": "",
"*": "m",
" ": "_",
"#": "sh",
":": "_",
"α": "a",
"β": "b",
"γ": "g",
" ": "",
"+": "pl",
"/": "_",
":": "_",
"-": "_",
".": "_",
"?": "unkn",
",": "_",
"(": "",
")": "",
"[": "",
"]": "",
# "(": "__",
# ")": "__",
# "[": "__",
# "]": "__",
">": "_",
"<": "_",
}
for element in sbml2BnglTranslationDict:
name = name.replace(element, sbml2BnglTranslationDict[element])
name = re.sub("[\W]", "", name)
return name | 33caf35feb0c9dcc042add501a4470b1ccbd3b1c | 15,883 |
def _learning_rate_decay(hparams, warmup_steps=0):
"""Learning rate decay multiplier."""
scheme = hparams.learning_rate_decay_scheme
warmup_steps = tf.to_float(warmup_steps)
global_step = tf.to_float(tf.train.get_or_create_global_step())
if not scheme or scheme == "none":
return tf.constant(1.)
tf.logging.info("Applying learning rate decay: %s.", scheme)
if scheme == "exp":
decay_steps = hparams.learning_rate_decay_steps
p = (global_step - warmup_steps) / decay_steps
if hparams.learning_rate_decay_staircase:
p = tf.floor(p)
return tf.pow(hparams.learning_rate_decay_rate, p)
if scheme == "piecewise":
return _piecewise_learning_rate(global_step,
hparams.learning_rate_boundaries,
hparams.learning_rate_multiples)
if scheme == "cosine":
cycle_steps = hparams.learning_rate_cosine_cycle_steps
cycle_position = global_step % (2 * cycle_steps)
cycle_position = cycle_steps - tf.abs(cycle_steps - cycle_position)
return 0.5 * (1 + tf.cos(np.pi * cycle_position / cycle_steps))
if scheme == "cyclelinear10x":
# Cycle the rate linearly by 10x every warmup_steps, up and down.
cycle_steps = warmup_steps
cycle_position = global_step % (2 * cycle_steps)
cycle_position = tf.to_float( # Normalize to the interval [-1, 1].
cycle_position - cycle_steps) / float(cycle_steps)
cycle_position = 1.0 - tf.abs(cycle_position) # 0 to 1 and back to 0.
return (cycle_position + 0.1) * 3.0 # 10x difference each cycle (0.3-3).
if scheme == "sqrt":
return _legacy_sqrt_decay(global_step - warmup_steps)
raise ValueError("Unrecognized learning rate decay scheme: %s" %
hparams.learning_rate_decay_scheme) | 4d171ef2cb13d2f103ac722be12cd594b6533c60 | 15,884 |
import re
def Register_User():
"""Validates register form data and saves it to the database"""
# Check if the fields are filled out
if not (request.form['username'] and request.form['email'] and request.form['password'] and request.form['passwordConf']):
return redirect(url_for('Register', message = "Please fill out all the fields"))
else:
# Ensure passwords match
if request.form['password'] != request.form['passwordConf']:
return redirect(url_for('Register', message = "Passwords do not match"))
# Ensure name is only _, a-z, A-Z, 0-9, and space
if not re.search(r'^[\w_ ]+$', request.form['username']):
return redirect(url_for('Register', message = "Username can only contain _, a-z, A-Z, 0-9 and spaces."))
# Ensure a valid email
if not re.search(r'^[a-zA-Z0-9]+[\._]?[a-zA-Z0-9]+[@]\w+[.]\w+$', request.form['email']):
return redirect(url_for('Register', message = "Invalid email"))
# Connect to DB
with engine.connect() as con:
# Check if username is taken
try:
statement = text("SELECT COUNT(1) FROM user WHERE (username = :username)")
result = con.execute(statement, username = request.form['username']).scalar()
except SQLAlchemyError as e:
return redirect(url_for('Error', title = "Error: Validating user availability", msg = type(e), back = "Register_User"))
except:
return redirect(url_for('Error', title = "Error", msg = "<class 'blog.UnhandledError'>", back = "Register_User"))
if result > 0:
return redirect(url_for('Register', message = "Username is already taken"))
# Check if email is taken
try:
statement = text("SELECT COUNT(1) FROM user WHERE (email = :email)")
result = con.execute(statement, email = request.form['email']).scalar()
except SQLAlchemyError as e:
return redirect(url_for('Error', title = "Error: Validating user availability", msg = type(e), back = "Register_User"))
except:
return redirect(url_for('Error', title = "Error", msg = "<class 'blog.UnhandledError'>", back = "Register_User"))
if result > 0:
return redirect(url_for('Register', message = "Email is already taken"))
# Create new user and add to the database
try:
new_user = User(request.form['username'], request.form['email'], request.form['password'])
db.session.add(new_user)
db.session.commit()
except:
return redirect(url_for('Error', title = "Error", msg = "<class 'blog.UnhandledError'>", back = "Register_User"))
# Get the new user's ID to log them in
try:
statement = text("SELECT id FROM user WHERE (username = :username)")
result = con.execute(statement, username = request.form['username']).scalar()
except:
return redirect(url_for('Error', title = "Error: Login failed", msg = "REGISTRATION WAS SUCCESSFUL. Something went wrong loging you in. Please login."))
# Log the new user in with a session
session['user_id'] = result
# Redirect to the new user's profile
return redirect(url_for('Own_Profile')) | 6a3ed4e99436845791d8025014f4fa4ddbaec86e | 15,886 |
def extreme_rank(df, col, n, bottom=True, keep=[]):
"""
Calculate the n top or bottom of a given series
"""
t = df[list(keep)+[col]].sort_values(col, ascending=bottom).iloc[:30]
count = t['NO_MUNICIPIO'].value_counts()
count.name = '#'
perc = t['NO_MUNICIPIO'].value_counts(normalize=True)
perc.name = '%'
return pd.concat([count, perc], axis=1), t | 976395ceb26f72300cbc24b9cb849b0e47f45ba8 | 15,887 |
def ss_octile(y):
"""Obtain the octile summary statistic.
The statistic reaches the optimal performance upon a high number of
observations. According to Allingham et al. (2009), it is more stable than ss_robust.
Parameters
----------
y : array_like
Yielded points.
Returns
-------
array_like of the shape (batch_size, dim_ss=8, dim_ss_point)
"""
octiles = np.linspace(12.5, 87.5, 7)
E1, E2, E3, E4, E5, E6, E7 = np.percentile(y, octiles, axis=1)
# Combining the summary statistics.
ss_octile = np.hstack((E1, E2, E3, E4, E5, E6, E7))
ss_octile = ss_octile[:, :, np.newaxis]
return ss_octile | a38256c3fa3e2d3c5d756883524d65a48b0585f5 | 15,888 |
def englishToFrench(englishText):
"""Translates English to French"""
model_id='en-fr'
fr_text = language_translator.translate(
text=englishText,
model_id=model_id).get_result()
return(fr_text['translations'][0]['translation']) | f1ebb6195d09230c1bac2b4351b0157813e6ca80 | 15,889 |
def calc_out_of_plane_angle(a, b, c, d):
"""
Calculate the out of plane angle of the A-D vector
to the A-B-C plane
Returns the value in radians and a boolean telling if b-a-c are near-collinear
"""
collinear_cutoff = 175./180.
collinear = 0
if abs(calc_angle(b, a, c)) > np.pi * collinear_cutoff:
collinear = 1
rab = b - a
rac = c - a
rad = d - a
rab /= np.linalg.norm(rab)
rac /= np.linalg.norm(rac)
rad /= np.linalg.norm(rad)
n = np.cross(rab,rac)
n /= np.linalg.norm(n)
sin = np.dot(n,rad)
ang = np.arcsin(sin)
return ang, collinear | e24c70e210cb8a454af07a1757864b9c241acaff | 15,890 |
def compute_distances(X, Y):
"""
Computes the Mahalanobis distances between X and Y, for the special case
where covariance between components is 0.
Args:
X (np.ndarray):
3D array that represents our population of gaussians. It is
assumed that X[0] is the 2D matrix containing the coordinates
of the centroids and X[1] represents the 2D matrix of variances.
Y (np.ndarray):
2D or 3D array that can represent either a data matrix or a
DE population. If it represents a population, only the centroids
are taken into consideration.
Returns: np.ndarray
A matrix that contains all distances for each row of X to all rows
of Y, computed with the variances found in X.
"""
assert X.ndim == 3 and X.shape[0] == 2, \
'X must have shape (2,_,_)'
assert Y.ndim == 2 or (Y.ndim == 3 and Y.shape[0] == 2), \
'Y must have shape (_,_) or (2,_,_)'
m = X.shape[1]
if Y.ndim == 2:
n = Y.shape[0]
points = Y
else:
n = Y.shape[1]
points = Y[0]
centers = X[0]
sigmas = X[1]
dist_matrix = np.empty((m, n), dtype=X.dtype)
for i in range(m):
# Broadcasting
diff = (centers[i] - points) / sigmas[i]
# This computes the sum of the pairwise products of the rows. In other
# words, it computes sum([x[i] * y[i] for i in range(x.shape[0])]).
dist_matrix[i, :] = np.einsum('ij,ij->i', diff, diff)
return dist_matrix | da994051b2eb4cc614368ed2a035d7a8bf9dcade | 15,891 |
def op_item_info():
"""Helper that compiles item info spec and all common module specs
:return dict
"""
item_spec = dict(
item=dict(
type="str",
required=True
),
flatten_fields_by_label=dict(
type="bool",
default=True
),
# Direct users to field_info module instead
field=dict(
type="str",
removed_from_collection="onepassword.connect",
removed_in_version="3.0.0",
),
vault=dict(
type="str"
)
)
item_spec.update(common_options())
return item_spec | 5bc4d3ff959e9642304dff31b910ea8a6c8a9d52 | 15,892 |
import collections
def unpack_condition(tup):
"""
Convert a condition to a list of values.
Notes
-----
Rules for keys of conditions dicts:
(1) If it's numeric, treat as a point value
(2) If it's a tuple with one element, treat as a point value
(3) If it's a tuple with two elements, treat as lower/upper limits and guess a step size.
(4) If it's a tuple with three elements, treat as lower/upper/step
(5) If it's a list, ndarray or other non-tuple ordered iterable, use those values directly.
"""
if isinstance(tup, tuple):
if len(tup) == 1:
return [float(tup[0])]
elif len(tup) == 2:
return np.arange(tup[0], tup[1], dtype=np.float)
elif len(tup) == 3:
return np.arange(tup[0], tup[1], tup[2], dtype=np.float)
else:
raise ValueError('Condition tuple is length {}'.format(len(tup)))
elif isinstance(tup, collections.Iterable):
return [float(x) for x in tup]
else:
return [float(tup)] | c07e651031850896d46a94e6060c79a955ad10fd | 15,893 |
from aiida.common.datastructures import wf_data_types
from aiida.orm.workflow import Workflow
from aiida.backends.djsite.db import models
from aiida.djsite.db import models
def get_wfs_with_parameter(parameter, wf_class='Workflow'):
"""
Find workflows of a given class, with a given parameter (which must be a
node)
:param parameter: an AiiDA node
:param wf_class: the name of the workflow class
:return: an AiiDA query set with all workflows that have this parameter
"""
try:
except ImportError:
# Find attributes with this name
qdata = models.DbWorkflowData.objects.filter(aiida_obj=parameter,
data_type=wf_data_types.PARAMETER)
# Find workflows with those attributes
if wf_class == 'Workflow':
qwf = Workflow.query(data__in=qdata)
else:
qwf = Workflow.query(module_class=wf_class,data__in=qdata)
#q2 = wf_class.query(data__in=q1)
# return a Django QuerySet with the resulting class instances
return qwf.distinct().order_by('ctime') | 7ae1c11b9b6495341da853d67d3d38df2c7838cd | 15,895 |
def map_iou(boxes_true, boxes_pred, scores, thresholds = [0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75]):
"""
Mean average precision at differnet intersection over union (IoU) threshold
input:
boxes_true: Mx4 numpy array of ground true bounding boxes of one image.
bbox format: (x1, y1, w, h)
boxes_pred: Nx4 numpy array of predicted bounding boxes of one image.
bbox format: (x1, y1, w, h)
scores: length N numpy array of scores associated with predicted bboxes
thresholds: IoU shresholds to evaluate mean average precision on
output:
map: mean average precision of the image
"""
# According to the introduction, images with no ground truth bboxes will not be
# included in the map score unless there is a false positive detection (?)
# return None if both are empty, don't count the image in final evaluation (?)
if len(boxes_true) == 0 and len(boxes_pred) == 0:
return None
assert boxes_true.shape[1] == 4 or boxes_pred.shape[1] == 4, "boxes should be 2D arrays with shape[1]=4"
if len(boxes_pred):
assert len(scores) == len(boxes_pred), "boxes_pred and scores should be same length"
# sort boxes_pred by scores in decreasing order
boxes_pred = boxes_pred[np.argsort(scores)[::-1], :]
map_total = 0
# loop over thresholds
for t in thresholds:
matched_bt = set()
tp, fn = 0, 0
for i, bt in enumerate(boxes_true):
matched = False
for j, bp in enumerate(boxes_pred):
miou = calculate_iou(bt, bp)
if miou >= t and not matched and j not in matched_bt:
matched = True
tp += 1 # bt is matched for the first time, count as TP
matched_bt.add(j)
if not matched:
fn += 1 # bt has no match, count as FN
fp = len(boxes_pred) - len(matched_bt) # FP is the bp that not matched to any bt
m = tp / (tp + fn + fp)
map_total += m
return map_total / len(thresholds) | b33f6acf90a24ac473d36de4ceb06563bc1523f6 | 15,897 |
def draw_output_summary(model):
""" reads the data saved in the model class and depending on this data
chooses a visualization method to present the results with the help
of draw_optimization_overview """
if 'time_series' in model.log:
# no optimization has happend.
# hence, cost/predictions/parameters is 0-dim
fig = plt.figure()
ax = plt.subplot(1,1,1)
ax = draw_model_output(ax,model)
ax.title.set_text('Model Output')
else:
fig = draw_optimization_overview(model)
return fig | c996bf588f0aa31f32e80d80d352e6a81203a84f | 15,898 |
def general_spline_interpolation(xs, ys, p, knots=None):
"""
NOTE: SLOW SINCE IT USES B()
xs,ys: interpolation points
p: degree
knots: If None, use p+1-regular from xs[0] to slightly past x[1]
returns cs, knots
"""
# number of interpolation points (and also control points)
m = len(xs)
assert(len(ys) == m)
# use p+1-regular knot vector with ends equal to first sample and slightly
# past last sample
if knots == None:
knots = uniform_regular_knot_vector(m, p, t0=xs[0], t1=xs[-1]+0.001)
# create matrix A
A = np.zeros((m,m))
for row in range(m):
for col in range(m):
A[row, col] = B(col, p, xs[row], knots)
# compute control points
cs = np.linalg.inv(A).dot(np.array(ys))
return cs, knots | fce53b173b6e8234d0c35418ec1455793a62fc61 | 15,899 |
def number_from_string(s):
"""
Parse and return number from string.
Return float only if number is not an int. Assume number can be parsed from
string.
"""
try:
return int(s)
except ValueError:
return float(s) | 50cc7defe7c60b536d184aaf91c2831ab63043e1 | 15,900 |
def ennAvgPool(inplanes,
kernel_size=1,
stride=None,
padding=0,
ceil_mode=False):
"""enn Average Pooling."""
in_type = build_enn_divide_feature(inplanes)
return enn.PointwiseAvgPool(
in_type,
kernel_size,
stride=stride,
padding=padding,
ceil_mode=ceil_mode) | ea48e911a48237dd7ba19f0515ca4cb2e02f2fa3 | 15,901 |
def acceptable(*args, acceptables):
"""
If the characters in StringVars passed as arguments are in acceptables return True, else returns False
"""
for arg in args:
for char in arg:
if char.lower() not in acceptables:
return False
return True | 607cc752fb61e8a9348bfdd889afcbb8a8ee5189 | 15,902 |
from typing import Optional
from typing import List
from typing import Union
import warnings
def get_confusion_matrix(
ground_truth: np.ndarray,
predictions: np.ndarray,
labels: Optional[List[Union[str, float]]] = None) -> np.ndarray:
"""
Computes a confusion matrix based on predictions and ground truth vectors.
The confusion matrix (a.k.a. contingency table) has predictions in rows
and ground truth in columns. If the value order is not provide via the
``labels`` parameter, the ordering is based on the alphanumeric sorting
of the unique values in both of the input arrays.
Parameters
----------
ground_truth : numpy.ndarray
An array holding the *true* target values.
predictions : numpy.ndarray
An array holding *predictions* of the target values.
labels : List[string, number], optional (default=None)
If a certain ordering of the labels in the confusion matrix is desired,
it can be specified via this parameter. By default alphanumeric sorting
is used.
Warns
-----
UserWarning
Some of the labels provided by the user are not present in either of
the input arrays.
Raises
------
IncorrectShapeError
The ``ground_truth`` and/or ``labels`` vectors are not 1-dimensional.
The length of these two arrays does not agree.
TypeError
The ``labels`` parameter is not a list.
ValueError
The ``labels`` list empty, it contains duplicate entries or some of the
labels present in either of the input array are not accounted for by
the ``labels`` list.
Returns
-------
confusion_matrix : numpy.ndarray
A confusion matrix.
"""
if not fuav.is_1d_array(ground_truth):
raise IncorrectShapeError('The ground truth vector has to be '
'1-dimensional numpy array.')
if not fuav.is_1d_array(predictions):
raise IncorrectShapeError('The predictions vector has to be '
'1-dimensional numpy array.')
if ground_truth.shape[0] != predictions.shape[0]:
raise IncorrectShapeError('Both the ground truth and the predictions '
'vectors have to have the same length.')
all_values = np.concatenate([ground_truth, predictions])
if labels is None:
ordering = np.sort(np.unique(all_values)).tolist()
elif isinstance(labels, list):
if not labels:
raise ValueError('The labels list cannot be empty.')
labels_set = set(labels)
if len(labels_set) != len(labels):
raise ValueError('The labels list contains duplicates.')
extra_labels = labels_set.difference(all_values)
if extra_labels:
warnings.warn(
'Some of the given labels are not present in either of the '
'input arrays: {}.'.format(extra_labels), UserWarning)
unaccounted_labels = set(all_values).difference(labels_set)
if unaccounted_labels:
raise ValueError('The following labels are present in the input '
'arrays but were not given in the labels '
'parameter: {}.'.format(unaccounted_labels))
ordering = labels
else:
raise TypeError('The labels parameter has to either a list or None.')
confusion_matrix_list = []
for pred in ordering:
pdt = predictions == pred
row = [np.logical_and(pdt, ground_truth == i).sum() for i in ordering]
confusion_matrix_list.append(row)
confusion_matrix = np.array(confusion_matrix_list)
return confusion_matrix | e6e45bd987345c1fc773fc1d0eccf752b8ee637c | 15,903 |
def atom_explicit_hydrogen_valences(gra):
""" explicit hydrogen valences, by atom
"""
return dict_.transform_values(atom_explicit_hydrogen_keys(gra), len) | 2f37bfd890c0f15014b17c6bd32981231104055f | 15,904 |
def get_average(pixels):
"""
Given a list of pixels, finds the average red, blue, and green values
Input:
pixels (List[Pixel]): list of pixels to be averaged
Returns:
rgb (List[int]): list of average red, green, blue values across pixels respectively
Assumes you are returning in the order: [red, green, blue]
"""
# rgb of each pixel
pixel_r = 0
pixel_g = 0
pixel_b = 0
# how many pixels in the list[pixels]
n = 0
for pixel in pixels:
n += 1
pixel_r += pixel.red
pixel_g += pixel.green
pixel_b += pixel.blue
pixel_avg = [pixel_r//n, pixel_g//n, pixel_b//n]
return pixel_avg | 9cd694505f8d445732bc178b5d645ff273b298d1 | 15,905 |
def _leading_space_count(line):
"""Return number of leading spaces in line."""
i = 0
while i < len(line) and line[i] == ' ':
i += 1
return i | b28daa2845618df5030a79129bb7cec1167b149a | 15,906 |
def _get_marker_indices(marker, line):
""" method to find the start and end parameter markers
on a template file line. Used by write_to_template()
"""
indices = [i for i, ltr in enumerate(line) if ltr == marker]
start = indices[0:-1:2]
end = [i + 1 for i in indices[1::2]]
assert len(start) == len(end)
return start, end | 4e68f6629fd94920ddc6290c75d92e8de7b467bb | 15,907 |
def get_wrapper_depth(wrapper):
"""Return depth of wrapper function.
.. versionadded:: 3.0
"""
return wrapper.__wrapped__.__wrappers__ + (1 - wrapper.__depth__) | c1c31c45a059c4ee56b39322e966d30b742ef86e | 15,909 |
def apiTest():
"""Tests the API connection to lmessage. Returns true if it is connected."""
try:
result = api.add(2, 3)
except:
return False
return result == 5 | 5d63720e78fe5e1bcecd2b1792a0f9bf6345595d | 15,910 |
from scipy import stats as dists
def get_distribution(dist_name):
"""Fetches a scipy distribution class by name"""
if dist_name not in dists.__all__:
return None
cls = getattr(dists, dist_name)
return cls | bebdb2578dd191b1d0ee1aea96e88d6be4bc144c | 15,911 |
def ece(y_probs, y_preds, y_true, balanced=False, bins="fd", **bin_args):
"""Compute the expected calibration error (ECE).
Parameters:
y_probs (np.array): predicted class probabilities
y_preds (np.array): predicted class labels
y_true (np.array): true class labels
Returns:
exp_ce (float): expected calibration error
"""
sklearn.utils.check_consistent_length(y_preds, y_true)
# define the bin function
def bin_func(y_probs_bin, y_preds_bin, y_true_bin):
acc = (y_preds_bin == y_true_bin).mean()
conf = y_probs_bin.mean()
return abs(acc - conf)
# define the balanced bin function
def balanced_bin_func(y_probs_bin, y_preds_bin, y_true_bin):
balacc = sklearn.metrics.balanced_accuracy_score(y_true_bin, y_preds_bin)
conf = y_probs_bin.mean()
return abs(balacc - conf)
# compute the full result
bin_indices = utils.get_bin_indices(y_probs, bins=bins, lower=0, upper=1, **bin_args)
func = balanced_bin_func if balanced else bin_func
return utils.binning(y_probs, y_preds, y_true, bin_indices, func) | 073d1190d71808de03002322679bb29d75a31258 | 15,912 |
def _call_or_get(value, menu=None, choice=None, string=None, obj=None, caller=None):
"""
Call the value, if appropriate, or just return it.
Args:
value (any): the value to obtain. It might be a callable (see note).
Keyword Args:
menu (BuildingMenu, optional): the building menu to pass to value
if it is a callable.
choice (Choice, optional): the choice to pass to value if a callable.
string (str, optional): the raw string to pass to value if a callback.
obj (Object): the object to pass to value if a callable.
caller (Account or Object, optional): the caller to pass to value
if a callable.
Returns:
The value itself. If the argument is a function, call it with
specific arguments (see note).
Note:
If `value` is a function, call it with varying arguments. The
list of arguments will depend on the argument names in your callable.
- An argument named `menu` will contain the building menu or None.
- The `choice` argument will contain the choice or None.
- The `string` argument will contain the raw string or None.
- The `obj` argument will contain the object or None.
- The `caller` argument will contain the caller or None.
- Any other argument will contain the object (`obj`).
Thus, you could define callbacks like this:
def on_enter(menu, caller, obj):
def on_nomatch(string, choice, menu):
def on_leave(caller, room): # note that room will contain `obj`
"""
if callable(value):
# Check the function arguments
kwargs = {}
spec = getargspec(value)
args = spec.args
if spec.keywords:
kwargs.update(dict(menu=menu, choice=choice, string=string, obj=obj, caller=caller))
else:
if "menu" in args:
kwargs["menu"] = menu
if "choice" in args:
kwargs["choice"] = choice
if "string" in args:
kwargs["string"] = string
if "obj" in args:
kwargs["obj"] = obj
if "caller" in args:
kwargs["caller"] = caller
# Fill missing arguments
for arg in args:
if arg not in kwargs:
kwargs[arg] = obj
# Call the function and return its return value
return value(**kwargs)
return value | b5ebf790913bbdaab980ae7f050a96748f1fd3e6 | 15,913 |
import re
def is_shared_object(s):
"""
Return True if s looks like a shared object file.
Example: librt.so.1
"""
so = re.compile('^[\w_\-]+\.so\.[0-9]+\.*.[0-9]*$', re.IGNORECASE).match
return so(s) | f6d2f5f589c468613004d06c7d213f899f31b7c4 | 15,914 |
def get_name(properties, lang):
"""Return the Place name from the properties field of the elastic response
Here 'name' corresponds to the POI name in the language of the user request (i.e. 'name:{lang}' field).
If lang is None or if name:lang is not in the properties
Then name receives the local name value
'local_name' corresponds to the name in the language of the country where the POI is located.
>>> get_name({}, 'fr') is None
True
>>> get_name({'name':'spontini', 'name:en':'spontinien', 'name:fr':'spontinifr'}, None)
'spontini'
>>> get_name({'name':'spontini', 'name:en':'spontinien', 'name:fr':'spontinifr'}, 'cz')
'spontini'
>>> get_name({'name':'spontini', 'name:en':'spontinien', 'name:fr':'spontinifr'}, 'fr')
'spontinifr'
"""
name = properties.get(f"name:{lang}")
if name is None:
name = properties.get("name")
return name | 82bd6b0fe7e35dae39767b899b56b24ff91f01cb | 15,915 |
def get_task(name):
"""Return the chosen task."""
tasks_json = load_json('tasks.json')
return tasks_json[name] | 44e39dd9757247212e8e9923fd3f7756fd3b0b9a | 15,916 |
def aws_credentials(request: pytest.fixture, aws_utils: pytest.fixture, profile_name: str):
"""
Fixture for setting up temporary AWS credentials from assume role.
:param request: _pytest.fixtures.SubRequest class that handles getting
a pytest fixture from a pytest function/fixture.
:param aws_utils: aws_utils fixture.
:param profile_name: Named AWS profile to store temporary credentials.
"""
aws_credentials_obj = AwsCredentials(profile_name)
original_access_key, original_secret_access_key, original_token = aws_credentials_obj.get_aws_credentials()
aws_credentials_obj.set_aws_credentials_by_session(aws_utils.assume_session())
def teardown():
# Reset to the named profile using the original AWS credentials
aws_credentials_obj.set_aws_credentials(original_access_key, original_secret_access_key, original_token)
request.addfinalizer(teardown)
return aws_credentials_obj | 13d1549b74b597cf3b00f98a5012c4bae111eeeb | 15,917 |
def mean_predictions(predicted):
"""
Calculate the mean of predictions that overlaps. This is donne mostly to be able to plot what the model is doing.
-------------------------------------------------------
Args:
predicted : numpy array
Numpy array with shape (Number points to predict - prediction length -1, predictions length)
-------------------------------------------------------
return:
predictions_mean : list
list with len of number to predict where each position is the mean of all predictions to that step
"""
array_global = [[] for _ in range((predicted.shape[0] + predicted.shape[1]))]
for i in range(predicted.shape[0]):
for l, value in enumerate(predicted[i]):
array_global[i + l].append((float(value)))
predictions_mean = []
for i in range(len(array_global) - 1):
predictions_mean.append(np.array(array_global[i]).mean())
return predictions_mean | 7ee19312ad17b97b27fe74a35df43ea4fa1ec709 | 15,918 |
def find_best_classifier(data, possible_classifiers, target_classifier):
"""Given a list of points, a list of possible Classifiers to use as tests,
and a Classifier for determining the true classification of each point,
finds and returns the classifier with the lowest disorder. Breaks ties by
preferring classifiers that appear earlier in the list. If the best
classifier has only one branch, raises NoGoodClassifiersError."""
min_disorder = INF
for test in possible_classifiers:
avg_disorder = average_test_disorder(data, test, target_classifier)
if avg_disorder < min_disorder:
best_test = test
min_disorder = avg_disorder
if len(split_on_classifier(data, best_test))==1:
raise NoGoodClassifiersError
return best_test | 7c3dc1f8fc0933f238b372fcd3bf3133c2958398 | 15,920 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.