content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def EG(d1,d2,P):
"""
Méthode permettant de calculer l'esperance de gain du joueur 1 s'il lance d1 dés et
que le joueur 2 lance d2 dés
----------------------------------------------------
Args:
- d1 : nombre de dés lancés par le joueur 1
- d2 : nombre de dés lancés par le joueur 2
- P : matrice de probabilités
"""
s = 0
L = np.arange(1,6*d2+1)
for k in range(1,6*d1+1):
s += np.sum(P[d1,k]*P[d2,L[L<k]]) - np.sum(P[d1,k]*P[d2,L[L>k]])
return s | 7032bbff4bcf721727c2cb86d6e6f480aa520ee2 | 15,800 |
def get_tx_data(request):
"""
JSON Needed:
1. txid
E.g.:
{"txid": "hgjsyher6ygfdg"}
"""
txid = request.data['txid']
try:
# req_hex_data = get_tx_data(txid)
req_hex_data = api.gettxoutdata(txid,0) # TODO
except:
return Response(status=status.HTTP_403_FORBIDDEN, data={'status': 'failure',
'message': 'Request Unsuccessful. Error while connecting with blockchain node'})
try:
# get requested data from txid
req_json_data = hex_to_json(req_hex_data) # TODO: this is LIST
return Response(data=req_json_data, status=status.HTTP_202_ACCEPTED)
except Exception as e:
return Response(data={"status":"failure", "message": "Something Wrong Occurred."
# ,"exception":e
},
status=status.HTTP_403_FORBIDDEN) | 189f3a6cd9de5d035df0f438e42480e98a0b5f3d | 15,801 |
def roi_max_counts(images_sets, label_array):
"""
Return the brightest pixel in any ROI in any image in the image set.
Parameters
----------
images_sets : array
iterable of 4D arrays
shapes is: (len(images_sets), )
label_array : array
labeled array; 0 is background.
Each ROI is represented by a distinct label (i.e., integer).
Returns
-------
max_counts : int
maximum pixel counts
"""
max_cts = 0
for img_set in images_sets:
for img in img_set:
max_cts = max(max_cts, ndim.maximum(img, label_array))
return max_cts | 2a8993ddb417ac9852ac8a85a4b021cd3db46b66 | 15,802 |
import unicodedata
def normalize_full_width(text):
"""
a function to normalize full width characters
"""
return unicodedata.normalize('NFKC', text) | f8b443089e7083e11f6539f4103ce05f616170c4 | 15,803 |
import random
def make_definitions(acronym, words_by_letter, limit=1):
"""Find definitions an acronym given groupings of words by letters"""
definitions = []
for _ in range(limit):
definition = []
for letter in acronym.lower():
opts = words_by_letter.get(letter.lower(), [])
definition.append(random.choice(opts).title() if opts else "?")
definitions.append(" ".join(definition))
return definitions | bc0af7b4e81a443c0afe62c2d77ace15bd1ab306 | 15,804 |
def plot_effective_area_from_file(file, all_cuts=False, ax=None, **kwargs):
""" """
ax = plt.gca() if ax is None else ax
if all_cuts:
names = ["", "_NO_CUTS", "_ONLY_GH", "_ONLY_THETA"]
else:
names = tuple([""])
label_basename = kwargs["label"] if "label" in kwargs else ""
kwargs.setdefault("ls", "")
for name in names:
area = QTable.read(file, hdu="EFFECTIVE_AREA" + name)[0]
kwargs["label"] = label_basename + name.replace("_", " ")
ax.errorbar(
0.5 * (area["ENERG_LO"] + area["ENERG_HI"]).to_value(u.TeV)[1:-1],
area["EFFAREA"].to_value(u.m ** 2).T[1:-1, 0],
xerr=0.5 * (area["ENERG_LO"] - area["ENERG_HI"]).to_value(u.TeV)[1:-1],
**kwargs,
)
# Style settings
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_xlabel("True energy / TeV")
ax.set_ylabel("Effective collection area / m²")
ax.grid(which="both")
ax.legend()
ax.grid(True, which="both")
return ax | b2627c767dfe8abf64eba1b8b1c1f14a4bf52d87 | 15,805 |
def get_spreading_coefficient(dist):
"""Calculate the spreading coefficient.
Args:
dist: A Distribution from a direct (GC) spreading simulation.
Returns:
The dimensionless spreading coefficient (beta*s*A).
"""
potential = -dist.log_probs
valley = np.amin(potential)
split = int(0.5 * len(potential))
plateau = np.mean(potential[split:])
return valley - plateau | 549a0052400466f64f707588e313a9e88829a4d7 | 15,806 |
from pathlib import Path
def get_config_path() -> Path:
"""Returns path to the root of the project"""
return Path(__file__).parent / "config" | b66ece2bc77717b59e88ac65746a2e3b3e8576a2 | 15,807 |
def round(x):
"""
Return ``x`` rounded to an ``Integer``.
"""
return create_RealNumber(x).round() | 403f5f0b4316ef2f06f45885d21fe352f003e193 | 15,808 |
def author(repo, subset, x):
"""``author(string)``
Alias for ``user(string)``.
"""
# i18n: "author" is a keyword
n = encoding.lower(getstring(x, _("author requires a string")))
return [r for r in subset if n in encoding.lower(repo[r].user())] | ee7bd62d52bd0e36ab910e53ca8e029780f4d6c6 | 15,809 |
def pianoroll_plot_setup(figsize=None, side_piano_ratio=0.025,
faint_pr=True, xlim=None):
"""Makes a tiny piano left of the y-axis and a faint piano on the main figure.
This function sets up the figure for pretty plotting a piano roll. It makes a
small imshow plot to the left of the main plot that looks like a piano. This
piano side plot is aligned along the y-axis of the main plot, such that y
values align with MIDI values (y=0 is the lowest C-1, y=11 is C0, etc).
Additionally, a main figure is set up that shares the y-axis of the piano side
plot. Optionally, a set of faint horizontal lines are drawn on the main figure
that correspond to the black keys on the piano (and a line separating B & C
and E & F). This function returns the formatted figure, the side piano axis,
and the main axis for plotting your data.
By default, this will draw 11 octaves of piano keys along the y-axis; you will
probably want reduce what is visible using `ax.set_ylim()` on either returned
axis.
Using with imshow piano roll data:
A common use case is for using imshow() on the main axis to display a piano
roll alongside the piano side plot AND the faint piano roll behind your
data. In this case, if your data is a 2D array you have to use a masked
numpy array to make certain values invisible on the plot, and therefore make
the faint piano roll visible. Here's an example:
midi = np.flipud([
[0.0, 0.0, 1.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
])
midi_masked = np.ma.masked_values(midi, 0.0) # Mask out all 0.0's
fig, ax, sp = plotting.pianoroll_plot_setup()
ax.imshow(midi_masked, origin='lower', aspect='auto') # main subplot axis
sp.set_ylabel('My favorite MIDI data') # side piano axis
fig.show()
The other option is to use imshow in RGBA mode, where your data is split
into 4 channels. Every alpha value that is 0.0 will be transparent and show
the faint piano roll below your data.
Args:
figsize: Size if the matplotlib figure. Will be passed to `plt.figure()`.
Defaults to None.
side_piano_ratio: Width of the y-axis piano in terms of raio of the whole
figure. Defaults to 1/40th.
faint_pr: Whether to draw faint black & white keys across the main plot.
Defaults to True.
xlim: Tuple containing the min and max of the x values for the main plot.
Only used to determine the x limits for the faint piano roll in the main
plot. Defaults to (0, 1000).
Returns:
(figure, main_axis, left_piano_axis)
figure: A matplotlib figure object containing both subplots set up with an
aligned piano roll.
main_axis: A matplotlib axis object to be used for plotting. Optionally
has a faint piano roll in the background.
left_piano_axis: A matplotlib axis object that has a small, aligned piano
along the left side y-axis of the main_axis subplot.
"""
octaves = 11
# Setup figure and gridspec.
fig = plt.figure(figsize=figsize)
gs_ratio = int(1 / side_piano_ratio)
gs = gridspec.GridSpec(1, 2, width_ratios=[1, gs_ratio])
left_piano_ax = fig.add_subplot(gs[0])
# Make a piano on the left side of the y-axis with imshow().
keys = np.array(
[0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0] # notes in descending order; B -> C
)
keys = np.tile(keys, octaves)[:, None]
left_piano_ax.imshow(keys, cmap='binary', aspect='auto',
extent=[0, 0.625, -0.5, octaves*12-0.5])
# Make the lines between keys.
for i in range(octaves):
left_piano_ax.hlines(i*12 - 0.5, -0.5, 1, colors='black', linewidth=0.5)
left_piano_ax.hlines(i*12 + 1.0, -0.5, 1, colors='black', linewidth=0.5)
left_piano_ax.hlines(i*12 + 3.0, -0.5, 1, colors='black', linewidth=0.5)
left_piano_ax.hlines(i*12 + 4.5, -0.5, 1, colors='black', linewidth=0.5)
left_piano_ax.hlines(i*12 + 6.0, -0.5, 1, colors='black', linewidth=0.5)
left_piano_ax.hlines(i*12 + 8.0, -0.5, 1, colors='black', linewidth=0.5)
left_piano_ax.hlines(i*12 + 10.0, -0.5, 1, colors='black', linewidth=0.5)
# Set the limits of the side piano and remove ticks so it looks nice.
left_piano_ax.set_xlim(0, 0.995)
left_piano_ax.set_xticks([])
# Create the aligned axis we'll return to the user.
main_ax = fig.add_subplot(gs[1], sharey=left_piano_ax)
# Draw a faint piano roll behind the main axes (if the user wants).
if faint_pr:
xlim = (0, 1000) if xlim is None else xlim
x_min, x_max = xlim
x_delta = x_max - x_min
main_ax.imshow(np.tile(keys, x_delta), cmap='binary', aspect='auto',
alpha=0.05, extent=[x_min, x_max, -0.5, octaves*12-0.5])
for i in range(octaves):
main_ax.hlines(i * 12 + 4.5, x_min, x_max, colors='black',
linewidth=0.5, alpha=0.25)
main_ax.hlines(i * 12 - 0.5, x_min, x_max, colors='black',
linewidth=0.5, alpha=0.25)
main_ax.set_xlim(*xlim)
# Some final cosmetic tweaks before returning the axis obj's and figure.
plt.setp(main_ax.get_yticklabels(), visible=False)
gs.tight_layout(fig)
return fig, main_ax, left_piano_ax | dc2a43be63d77ee99230399b687e86c09570db6c | 15,810 |
def exercise(request, exercisename):
"""Show single sport and its totals."""
e = exercisename
cur_user = request.user
exercises = Exercise.objects.filter(owner=cur_user, sport=e).order_by('-date')
context = {'exercises': exercises, 'total': Stats.total(cur_user, sport=e),
'totaltime': Stats.totaltime(cur_user, sport=e)}
return render(request, 'distances/exercises.html', context) | 8648673d6bdb3997d9b9d38155e2cb2039ff4f1b | 15,811 |
import random
def randomBinaryMatrix(scale, type):
"""
Generates a pseudo random BinaryMatrix of a given scale(small,large) and
datatype(int).
"""
if(scale == "small" and type == "int"):
nrow = random.randint(1, 10)
ncol = random.randint(1, 10)
data = []
for i in range(nrow):
data.append([])
for _j in range(ncol):
data[i].append(random.randint(0, 1))
return BinaryMatrix(
nrow=nrow,
ncol=ncol,
data=data
)
if(scale == "large" and type == "int"):
nrow = random.randint(10, 100)
ncol = random.randint(10, 100)
data = []
for i in range(nrow):
data.append([])
for _j in range(ncol):
data[i].append(random.randint(0, 1))
return BinaryMatrix(
nrow=nrow,
ncol=ncol,
data=data
) | 289d266eee4f6244774f7138e9efbe18970545f4 | 15,812 |
from typing import Optional
def load_batch(server_context: ServerContext, assay_id: int, batch_id: int) -> Optional[Batch]:
"""
Loads a batch from the server.
:param server_context: A LabKey server context. See utils.create_server_context.
:param assay_id: The protocol id of the assay from which to load a batch.
:param batch_id:
:return:
"""
load_batch_url = server_context.build_url("assay", "getAssayBatch.api")
loaded_batch = None
payload = {"assayId": assay_id, "batchId": batch_id}
json_body = server_context.make_request(load_batch_url, json=payload)
if json_body is not None:
loaded_batch = Batch(**json_body["batch"])
return loaded_batch | 731d463bc1e0380107390caabae8c57c7e6cff02 | 15,813 |
def canvas_compose(mode, dst, src):
"""Compose two alpha premultiplied images
https://ciechanow.ski/alpha-compositing/
http://ssp.impulsetrain.com/porterduff.html
"""
src_a = src[..., -1:] if len(src.shape) == 3 else src
dst_a = dst[..., -1:] if len(dst.shape) == 3 else dst
if mode == COMPOSE_OVER:
return src + dst * (1 - src_a)
elif mode == COMPOSE_OUT:
return src * (1 - dst_a)
elif mode == COMPOSE_IN:
return src * dst_a
elif mode == COMPOSE_ATOP:
return src * dst_a + dst * (1 - src_a)
elif mode == COMPOSE_XOR:
return src * (1 - dst_a) + dst * (1 - src_a)
elif isinstance(mode, tuple) and len(mode) == 4:
k1, k2, k3, k4 = mode
return (k1 * src * dst + k2 * src + k3 * dst + k4).clip(0, 1)
raise ValueError(f"invalid compose mode: {mode}") | 9d95b840f814a77077050cb43a081c01c496640b | 15,814 |
import select
async def get_timelog_user_id(
*,
user_id: int,
epic_id: int,
month: int,
year: int,
session: Session = Depends(get_session),
):
"""
Get list of timelogs by user_id, month.
Parameters
----------
user_id : str
ID of user from which to pull timelogs.
year_month : int
Month and year from which to pull timelog(s).
session : Session
SQL session that is to be used to get the timelogs.
Defaults to creating a dependency on the running SQL model session.
"""
statement = (
select(
TimeLog.id,
AppUser.username.label("username"),
Epic.name.label("epic_name"),
EpicArea.name.label("epic_area_name"),
TimeLog.start_time,
TimeLog.end_time,
TimeLog.count_hours,
TimeLog.count_days,
)
.join(AppUser)
.join(EpicArea)
.join(Epic)
.where(TimeLog.user_id == user_id)
.where(TimeLog.epic_id == epic_id)
.where(TimeLog.month == month)
.where(TimeLog.year == year)
.order_by(TimeLog.end_time.desc())
)
results = session.exec(statement).all()
return results | fe4bdcbda40c2d32b743262cb14139e89890b237 | 15,815 |
def _cross(
vec1,
vec2,
):
"""Cross product between vec1 and vec2 in R^3"""
vec3 = np.zeros((3,))
vec3[0] = +(vec1[1] * vec2[2] - vec1[2] * vec2[1])
vec3[1] = -(vec1[0] * vec2[2] - vec1[2] * vec2[0])
vec3[2] = +(vec1[0] * vec2[1] - vec1[1] * vec2[0])
return vec3 | 2958a7365908bbd38c75f79e489d136f21fcc011 | 15,816 |
def _simplex_dot3D(g, x, y, z):
""" 3D dot product """
return g[0] * x + g[1] * y + g[2] * z | fcc48153b34af7cef0811f21fc04d22e6536797a | 15,817 |
import inspect
def __get_report_failures(test_data: TestData) -> str:
"""
Gets test report with all failed test soft asserts
:param test_data: test data from yaml file
:return: str test report with all soft asserts
"""
test_id = __get_test_id()
failed_assert_reports = __FAILED_EXPECTATIONS.get(test_id)
meta_info = inspect.stack()[2][1:4]
expectation_report = ExpectationReport(test_data.test_name, failed_assert_reports, meta_info)
return expectation_report.get_report_message() | cedb24569acedf9bd251c233f220c44a1bb05772 | 15,818 |
def initialized_sm(registrations, uninitialized_sm):
""" The equivalent of an app with commit """
uninitialized_sm.initialize()
return uninitialized_sm | 491e4366b81379b053d1dea203d338766c3afa86 | 15,819 |
from pathlib import Path
from typing import Optional
import os
import sys
def compute_options(
platform: PlatformName,
package_dir: Path,
output_dir: Path,
config_file: Optional[str],
args_archs: Optional[str],
prerelease_pythons: bool,
) -> BuildOptions:
"""
Compute the options from the environment and configuration file.
"""
manylinux_identifiers = {
f"manylinux-{build_platform}-image" for build_platform in MANYLINUX_ARCHS
}
musllinux_identifiers = {
f"musllinux-{build_platform}-image" for build_platform in MUSLLINUX_ARCHS
}
disallow = {
"linux": {"dependency-versions"},
"macos": manylinux_identifiers | musllinux_identifiers,
"windows": manylinux_identifiers | musllinux_identifiers,
}
options = ConfigOptions(package_dir, config_file, platform=platform, disallow=disallow)
build_config = options("build", env_plat=False, sep=" ") or "*"
skip_config = options("skip", env_plat=False, sep=" ")
test_skip = options("test-skip", env_plat=False, sep=" ")
prerelease_pythons = prerelease_pythons or strtobool(
os.environ.get("CIBW_PRERELEASE_PYTHONS", "0")
)
deprecated_selectors("CIBW_BUILD", build_config, error=True)
deprecated_selectors("CIBW_SKIP", skip_config)
deprecated_selectors("CIBW_TEST_SKIP", test_skip)
package_files = {"setup.py", "setup.cfg", "pyproject.toml"}
if not any(package_dir.joinpath(name).exists() for name in package_files):
names = ", ".join(sorted(package_files, reverse=True))
msg = f"cibuildwheel: Could not find any of {{{names}}} at root of package"
print(msg, file=sys.stderr)
sys.exit(2)
# This is not supported in tool.cibuildwheel, as it comes from a standard location.
# Passing this in as an environment variable will override pyproject.toml, setup.cfg, or setup.py
requires_python_str: Optional[str] = os.environ.get(
"CIBW_PROJECT_REQUIRES_PYTHON"
) or get_requires_python_str(package_dir)
requires_python = None if requires_python_str is None else SpecifierSet(requires_python_str)
build_selector = BuildSelector(
build_config=build_config,
skip_config=skip_config,
requires_python=requires_python,
prerelease_pythons=prerelease_pythons,
)
test_selector = TestSelector(skip_config=test_skip)
return _compute_single_options(
options, args_archs, build_selector, test_selector, platform, package_dir, output_dir
) | ab233556f1099b1d29877dab84a83452a0cfe7ff | 15,820 |
def coordinateToIndex(coordinate):
"""Return a raw index (e.g [4, 4]) from board coordinate (e.g. e4)"""
return [abs(int(coordinate[1]) - 8), ("a", "b", "c", "d", "e", "f", "g", "h").index(coordinate[0])] | d3dcf6d01c4bec2058cffef88867d45ba51ea560 | 15,821 |
import re
import logging
def parse_page(url):
"""parge the page and get all the links of images, max number is 100 due to limit by google
Args:
url (str): url of the page
Returns:
A set containing the urls of images
"""
page_content = download_page(url)
if page_content:
link_list = re.findall('src="(.*?)"', page_content)
if len(link_list) == 0:
print('get 0 links from page {0}'.format(url))
logging.info('get 0 links from page {0}'.format(url))
return set()
else:
return set(link_list)
else:
return set() | 5833e0092650488e8ef430de0eafd79f6e5d2ffa | 15,822 |
import os
def full_file_names(file_dir):
"""
List all full file names(with extension) in target directory.
:param file_dir:
target directory.
:return:
a list containing full file names.
"""
for _, _, files in os.walk(file_dir):
return files | e70947c2ce1ff3eab5f7dd1074ba378498aedbf7 | 15,823 |
def json_page_resp(name, page, paginator):
"""
Returns a standardized page response
"""
page_rows = paginator.get_page(page)
return JsonResponse({'page':page, 'pages':paginator.num_pages, name:[x['json'] for x in page_rows], 'size':len(page_rows)}, safe=False) | d615cfeaa2fafdb35333eee6aa6d63e1511a1dd3 | 15,824 |
from .interpreters import ScriptRunnerPlugin
def get_script_runner():
"""
Gets the script runner plugin instance if any otherwise returns None.
:rtype: hackedit.api.interpreters.ScriptRunnerPlugin
"""
return _window().get_plugin_instance(ScriptRunnerPlugin) | 2f76f46dd502fbd6ce9bd0a5f90eb7eed8bb64ca | 15,825 |
def detect_peak(a, thresh=0.3):
"""
Detect the extent of the peak in the array by looking for where the slope
changes to flat. The highest peak is detected and data and followed until
the slope flattens to a threshold.
"""
iPk = np.argmax(a)
d = np.diff(a)
g1 = np.gradient(a)
g2 = np.gradient(g1)
threshPos = np.nanmax(d) * thresh
threshNeg = -1 * threshPos
# Start searching away from the peak zone
g2L = np.flipud(g2[:iPk])
g2R = g2[iPk+1:]
iL = iPk - np.min(np.where(g2L>=0))
iR = iPk + np.min(np.where(g2R>=0)) + 1
g1[iL:iR] = np.nan
# Search for the threshold crossing point
g1L = np.flipud(g1[:iPk])
g1R = g1[iPk+1:]
iL = iPk - np.min(np.where(g1L<=threshPos))
iR = iPk + np.min(np.where(g1R>=threshNeg))
msk = np.zeros_like(a)
msk[iL:iR] = 1
# DEBUG
if False:
pl.plot(a, marker='.')
pl.plot(g1, marker='^')
pl.plot(msk, marker='o')
pl.plot(np.ones_like(a)*threshPos, color='k')
pl.plot(np.ones_like(a)*threshNeg, color='k')
pl.axhline(0, color='grey')
pl.show()
return msk | e4b520a4dd932992fb1cf20b34e253ecbaac9614 | 15,826 |
def latest_version():
"""
Returns the latest version, as specified by the Git tags.
"""
versions = []
for t in tags():
assert t == t.strip()
parts = t.split(".")
assert len(parts) == 3, t
parts[0] = parts[0].lstrip("v")
v = tuple(map(int, parts))
versions.append((v, t))
_, latest = max(versions)
assert latest in tags()
return latest | 346edcc6d087ca1511411b52de20b90f1a993f3a | 15,827 |
import string
def get_age_group(df,n: int=10):
"""Assigns a category to the age DR
Parameters
----------
df : Dataframe
n : number of categories
Returns
-------
Dataset with Age_group column
"""
df["Age_group"] = pd.cut(df["Age"], n, labels = list(string.ascii_uppercase)[:n])
return df | f793316c7c494adec1bfedf8613edf6c4ed5e2e2 | 15,828 |
def transformer_encoder_layer(query_input,
key_input,
attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd="n",
postprocess_cmd="da",
param_initializer=None,
name=''):
"""The encoder layers that can be stacked to form a deep encoder.
This module consits of a multi-head (self) attention followed by
position-wise feed-forward networks and both the two components companied
with the post_process_layer to add residual connection, layer normalization
and droput.
"""
key_input = pre_process_layer(
key_input,
preprocess_cmd,
prepostprocess_dropout,
name=name + '_pre_att') if key_input else None
value_input = key_input if key_input else None
attn_output = multi_head_attention(
pre_process_layer(
query_input,
preprocess_cmd,
prepostprocess_dropout,
name=name + '_pre_att'),
key_input,
value_input,
attn_bias,
d_key,
d_value,
d_model,
n_head,
attention_dropout,
param_initializer=param_initializer,
name=name + '_multi_head_att')
attn_output = post_process_layer(
query_input,
attn_output,
postprocess_cmd,
prepostprocess_dropout,
name=name + '_post_att')
ffd_output = positionwise_feed_forward(
pre_process_layer(
attn_output,
preprocess_cmd,
prepostprocess_dropout,
name=name + '_pre_ffn'),
d_inner_hid,
d_model,
relu_dropout,
hidden_act,
param_initializer=param_initializer,
name=name + '_ffn')
return post_process_layer(
attn_output,
ffd_output,
postprocess_cmd,
prepostprocess_dropout,
name=name + '_post_ffn') | fe300a8c72c39c7e847f400f8f874dabab80b6e6 | 15,829 |
def generate(ode, lenght=int(2e4)):
"""
Time series generation from a ODE
:param ode: ODE object;
:param lenght: serie lenght;
:return: time serie.
"""
state = ode.initial_state
data = np.zeros([int(state.shape[0]), lenght])
for i in range(5000):
state = runge_kutta(ode, state)
for i in range(lenght):
state = runge_kutta(ode, state)
data[:, i] = state
return data | 442f5359e3225d00cf0396e710e3978d1a6e37f8 | 15,830 |
def complexFormatToRealImag(complexVec):
"""
A reformatting function which converts a complex vector into real valued array.
Let the values in the input array be [r1+j*i1,r 2+j*i2,..., rN+j*iN]
then the output array will be [r1, i1, r2, i2,..., rN, iN]
:param complexVec: complex numpy ndarray
:return: returns a 1D numpy array of a length N containing complex numbers.
"""
N = len(complexVec)
ret = np.empty((2*N,), dtype=np.real(complexVec).dtype)
ret[0::2] = complexVec.real
ret[1::2] = complexVec.imag
return ret | d955f2b31581036594ca79cd4755327eaa8b2446 | 15,831 |
def displayaction(uid):
""" Display the command from the xml file
"""
tree = ET.parse(OPENSTRIATOFILE)
root = tree.getroot()
textaction = root.findall("./action[@uid='"+uid+"']")
if len(textaction) == 0:
return "This UID does not exist!"
else:
return "UID %s action: %s" % (uid, textaction[0].text) | e34133a168b20cc9b018175ee5b4363fd2ff9690 | 15,832 |
def get_parent(inst, rel_type='cloudify.relationships.contained_in'):
"""
Gets the parent of an instance
:param `cloudify.context.NodeInstanceContext` inst: Cloudify instance
:param string rel_type: Relationship type
:returns: Parent context
:rtype: :class:`cloudify.context.RelationshipSubjectContext` or None
"""
for rel in inst.relationships:
if rel_type in rel.type_hierarchy:
return rel.target
return None | 06bc76ec55735a47a3cf26df2daa4346290671ee | 15,833 |
def _query_param(key, value):
"""ensure that a query parameter's value is a string
of bytes in UTF-8 encoding.
"""
if isinstance(value, unicode):
pass
elif isinstance(value, str):
value = value.decode('utf-8')
else:
value = unicode(value)
return key, value.encode('utf-8') | 9c89517afd8d1684b1bb954f66cd2072296dee82 | 15,834 |
def _create_or_get_dragonnet(embedding, is_training, treatment, outcome, split, getter=None):
"""
Make predictions for the outcome, using the treatment and embedding,
and predictions for the treatment, using the embedding
Both outcome and treatment are assumed to be binary
Note that we return the loss as a sum (and not a mean). This makes more sense for training dynamics
Parameters
----------
bert
is_training
treatment
outcome
label_dict
split
getter custom getter, for polyak averaging support
Returns
-------
"""
treatment_float = tf.cast(treatment, tf.float32)
with tf.variable_scope('dragon_net', reuse=tf.AUTO_REUSE, custom_getter=getter):
with tf.variable_scope('treatment'):
loss_t, per_example_loss_t, logits_t, expectation_t = _make_feedforward_classifier(
embedding, treatment, 2, split, num_hidden_layers=2)
with tf.variable_scope('outcome_st_treatment'):
loss_ot1, per_example_loss_ot1, logits_ot1, expectation_ot1 = _make_feedforward_classifier(
embedding, outcome, 2, split=split*treatment_float, num_hidden_layers=0)
with tf.variable_scope('outcome_st_no_treatment'):
loss_ot0, per_example_loss_ot0, logits_ot0, expectation_ot0 = _make_feedforward_classifier(
embedding, outcome, 2, split=split*(1.-treatment_float), num_hidden_layers=0)
tf.losses.add_loss(loss_ot0)
tf.losses.add_loss(loss_ot1)
tf.losses.add_loss(loss_t)
training_loss = loss_ot0 + loss_ot1 + loss_t
training_loss = training_loss
outcome_st_treat = {'per_example_loss': per_example_loss_ot1,
'logits': logits_ot1,
'expectations': expectation_ot1}
outcome_st_no_treat = {'per_example_loss': per_example_loss_ot0,
'logits': logits_ot0,
'expectations': expectation_ot0}
treat = {'per_example_loss': per_example_loss_t,
'logits': logits_t,
'expectations': expectation_t}
return training_loss, outcome_st_treat, outcome_st_no_treat, treat | 7fc7fead338ac2c33bcfa016f9d66e34d15ac59c | 15,835 |
def fit_ols(Y, X):
"""Fit OLS model to both Y and X"""
model = sm.OLS(Y, X)
model = model.fit()
return model | dcc86cab7fe15400130febd36d5aa8139a68c64f | 15,836 |
def compute_modularity_per_code(mutual_information):
"""Computes the modularity from mutual information."""
# Mutual information has shape [num_codes, num_factors].
squared_mi = np.square(mutual_information)
max_squared_mi = np.max(squared_mi, axis=1)
numerator = np.sum(squared_mi, axis=1) - max_squared_mi
denominator = max_squared_mi * (squared_mi.shape[1] - 1.)
delta = numerator / denominator
modularity_score = 1. - delta
index = (max_squared_mi == 0.)
modularity_score[index] = 0.
return modularity_score | 5c81b583c6313818da435dd367a3d53933025227 | 15,837 |
def _CheckSemanticColorsReferences(input_api, output_api):
"""
Checks colors defined in semantic_colors_non_adaptive.xml only referencing
resources in color_palette.xml.
"""
errors = []
color_palette = None
for f in IncludedFiles(input_api):
if not f.LocalPath().endswith('/semantic_colors_non_adaptive.xml'):
continue
if color_palette is None:
color_palette = _colorXml2Dict(
input_api.ReadFile(helpers.COLOR_PALETTE_PATH))
for line_number, line in f.ChangedContents():
r = helpers.COLOR_REFERENCE_PATTERN.search(line)
if not r:
continue
color = r.group()
if _removePrefix(color) not in color_palette:
errors.append(
' %s:%d\n \t%s' % (f.LocalPath(), line_number, line.strip()))
if errors:
return [
output_api.PresubmitError(
'''
Android Semantic Color Reference Check failed:
Your new color values added in semantic_colors_non_adaptive.xml are not
defined in ui/android/java/res/values/color_palette.xml, listed below.
This is banned. Colors in semantic colors can only reference
the existing color resource from color_palette.xml.
See https://crbug.com/775198 for more information.
''', errors)
]
return [] | 63243cbdea3ad1ae4bc50d856c97350ffafc1407 | 15,838 |
import logging
def post_new_attending():
"""Posts attending physician information to the server
This method generates the new attending physician’s
dictionary with all of his/her information, then validates
that all of the information is the correct type. If the
validation stage is satisfied, then the attending’s
dictionary is added to the database.
Parameters
----------
N/A
Returns
-------
String
result of adding a new attending
"""
new_dict = request.get_json()
validate = validate_new_attending(new_dict)
if validate is not True:
return validate, 400
attending = add_new_attending(new_dict["attending_username"],
new_dict["attending_email"],
new_dict["attending_phone"])
if attending is True:
logging.info("New Attending Physician Added!")
logging.info("Physician User Name: {}".format(
new_dict["attending_username"]))
logging.info("Physician Email: {}".format(
new_dict["attending_email"]))
return "New Attending Physician Successfully Added", 200
else:
return "Failed to Add New Attending Physician", 400 | 18ddcb3bfcc601a22abccac7828ed6ac36368a33 | 15,839 |
def submitFeatureWeightedGridStatistics(geoType, dataSetURI, varID, startTime, endTime, attribute, value,
gmlIDs, verbose, coverage, delim, stat, grpby, timeStep, summAttr,
weighted, wfs_url, outputfname, sleepSecs, async=False):
"""
Makes a featureWeightedGridStatistics algorithm call.
The web service interface implemented is summarized here:
https://my.usgs.gov/confluence/display/GeoDataPortal/Generating+Area+Weighted+Statistics+Of+A+Gridded+Dataset+For+A+Set+Of+Vector+Polygon+Features
Note that varID and stat can be a list of strings.
"""
# test for dods:
dataSetURI = dodsReplace(dataSetURI)
log.info('Generating feature collection.')
featureCollection = _getFeatureCollectionGeoType(geoType, attribute, value, gmlIDs, wfs_url)
if featureCollection is None:
return
processid = 'gov.usgs.cida.gdp.wps.algorithm.FeatureWeightedGridStatisticsAlgorithm'
if not weighted:
processid = 'gov.usgs.cida.gdp.wps.algorithm.FeatureGridStatisticsAlgorithm'
solo_inputs = [("FEATURE_ATTRIBUTE_NAME", attribute),
("DATASET_URI", dataSetURI),
("TIME_START", startTime),
("TIME_END", endTime),
("REQUIRE_FULL_COVERAGE", str(coverage).lower()),
("DELIMITER", delim),
("GROUP_BY", grpby),
("SUMMARIZE_TIMESTEP", str(timeStep).lower()),
("SUMMARIZE_FEATURE_ATTRIBUTE", str(summAttr).lower()),
("FEATURE_COLLECTION", featureCollection)]
if isinstance(stat, list):
num_stats = len(stat)
if num_stats > 7:
raise Exception('Too many statistics were submitted.')
else:
num_stats = 1
if isinstance(varID, list):
num_varids = len(varID)
else:
num_varids = 1
inputs = [('', '')] * (len(solo_inputs) + num_varids + num_stats)
count = 0
rm_cnt = 0
for solo_input in solo_inputs:
if solo_input[1] is not None:
inputs[count] = solo_input
count += 1
else:
rm_cnt += 1
del inputs[count:count + rm_cnt]
if num_stats > 1:
for stat_in in stat:
if stat_in not in ["MEAN", "MINIMUM", "MAXIMUM", "VARIANCE", "STD_DEV", "SUM", "COUNT"]:
raise Exception('The statistic {} is not in the allowed list: "MEAN", "MINIMUM", "MAXIMUM", ' +
'"VARIANCE", "STD_DEV", "SUM", "COUNT"'.format(stat_in))
inputs[count] = ("STATISTICS", stat_in)
count += 1
elif num_stats == 1:
if stat not in ["MEAN", "MINIMUM", "MAXIMUM", "VARIANCE", "STD_DEV", "SUM", "COUNT"]:
raise Exception('The statistic {} is not in the allowed list: "MEAN", "MINIMUM", "MAXIMUM", ' +
'"VARIANCE", "STD_DEV", "SUM", "COUNT"'.format(stat))
inputs[count] = ("STATISTICS", stat)
count += 1
if num_varids > 1:
for var in varID:
inputs[count] = ("DATASET_ID", var)
count += 1
elif num_varids == 1:
inputs[count] = ("DATASET_ID", varID)
output = "OUTPUT"
return _executeRequest(processid, inputs, output, verbose, outputfname, sleepSecs, async=async) | ab6f1cbeee1943f75aa16c9153d2a317113d2398 | 15,840 |
def fetch_words(url):
"""
Fetch a list of words from a URL
Args:
url: the url of any text document (no decoding to utf-8 added)
Returns:
A list of strings containing the words in the document
"""
with urlopen(url) as story:
story_words = []
for line in story:
line_words = line.split()
for word in line_words:
story_words.append(word)
return story_words | 6679425f5f3680bd0b47888d59530a55b4c23443 | 15,841 |
def generate_fgsm_examples(sess, model, x, y, X, Y, attack_params, verbose, attack_log_fpath):
"""
Untargeted attack. Y is not needed.
"""
fgsm = FastGradientMethod(model, back='tf', sess=sess)
fgsm_params = {'eps': 0.1, 'ord': np.inf, 'y': None, 'clip_min': 0, 'clip_max': 1}
fgsm_params = override_params(fgsm_params, attack_params)
X_adv = fgsm.generate_np(X, **fgsm_params)
return X_adv | 7b682622f843dd2c5d421c3d52b15e3a204edb0a | 15,842 |
def s3_bucket_for(bucket_prefix, path):
"""returns s3 bucket for path"""
suffix = s3_bucket_suffix_for(path)
return "{}-{}".format(bucket_prefix, suffix) | a59145474d2965a9e5f98d4728a6ac90d0d42cdf | 15,843 |
def regrid_create_operator(regrid, name, parameters):
"""Create a new `RegridOperator` instance.
:Parameters:
regrid: `ESMF.Regrid`
The `ESMF` regridding operator between two fields.
name: `str`
A descriptive name for the operator.
parameters: `dict`
Parameters that describe the complete coordinate system of
the destination grid.
:Returns:
`RegridOperator`
The new regrid operator.
"""
return RegridOperator(regrid, name, **parameters) | 1c44dbe1c4826ee566cfb6b95ac704c9af19fc30 | 15,844 |
def _decode_hmc_values(hmc_ref):
"""Decrypts any sensitive HMC values that were encrypted in the DB"""
if hmc_ref is not None:
hmc_ref = jsonutils.to_primitive(hmc_ref)
#Make sure to DeCrypt the Password after retrieving from the database
## del two lines by lixx
#if hmc_ref.get('password') is not None:
# hmc_ref['password'] = EncryptHandler().decode(hmc_ref['password'])
return hmc_ref | 7e1b33265811d79f245853cb016e5acd45627028 | 15,845 |
import logging
def dtensor_shutdown_tpu_system():
"""Shutdown TPU system."""
@def_function.function
def _shutdown_tpu_system():
return gen_dtensor_ops.shutdown_tpu_system()
success = _shutdown_tpu_system() if context.is_tfrt_enabled() else True
if success:
logging.info("TPU system shut down.")
else:
logging.warning("TPU system fails to shut down.") | 23140407222646fd9adb845ae5e04ca4a3a9cc5a | 15,846 |
import json
import math
def edit_comment(request):
"""
Edit an existing comment
"""
response = {"status": "success",
"data": {}}
if "char_id" in request.POST:
char_id = request.POST["char_id"]
else:
response["status"] = "fail"
response["data"]["message"] = "Paste ID was not provided (POST parameter 'char_id')"
return HttpResponse(json.dumps(response), status=422)
try:
paste = Paste.objects.get(char_id=char_id)
except ObjectDoesNotExist:
response["status"] = "fail"
response["data"]["message"] = "The paste couldn't be found."
return HttpResponse(json.dumps(response))
if "id" in request.POST:
id = int(request.POST["id"])
else:
response["status"] = "fail"
response["data"]["message"] = "Comment ID was not provided (POST parameter 'id')"
return HttpResponse(json.dumps(response), status=422)
if "page" in request.POST:
page = int(request.POST["page"])
else:
page = 0
if not request.user.is_authenticated():
response["status"] = "fail"
response["data"]["message"] = "You are not logged in."
return HttpResponse(json.dumps(response), status=422)
try:
comment = Comment.objects.get(id=id)
except ObjectDoesNotExist:
response["status"] = "fail"
response["data"]["message"] = "The comment doesn't exist."
return HttpResponse(json.dumps(response), status=400)
if comment.user != request.user:
response["status"] = "fail"
response["data"]["message"] = "You are trying to edit someone else's comment."
return HttpResponse(json.dumps(response), status=422)
submit_form = SubmitCommentForm(request.POST or None)
if submit_form.is_valid():
comment_data = submit_form.cleaned_data
comment.text = comment_data["text"]
comment.save()
total_comment_count = Comment.objects.filter(paste=paste).count()
start = page * Comment.COMMENTS_PER_PAGE
end = start + Comment.COMMENTS_PER_PAGE
response["data"]["edited_comment_id"] = comment.id
response["data"]["comments"] = queryset_to_list(Comment.objects.filter(paste=paste) \
.select_related("user") \
[start:end],
fields=["id", "text", "submitted", "edited", "user__username=username"])
response["data"]["page"] = page
response["data"]["pages"] = math.ceil(float(total_comment_count) / float(Comment.COMMENTS_PER_PAGE))
if response["data"]["pages"] == 0:
response["data"]["pages"] = 1
response["data"]["total_comment_count"] = total_comment_count
else:
response["status"] = "fail"
response["data"]["message"] = "Provided text wasn't valid."
return HttpResponse(json.dumps(response)) | b56f0b3f3c0d0635b4faa9a06320bc4b715ea0d1 | 15,847 |
import sys
def aws_get_size(size):
""" Get Node Size - Ex: (cmd:<size>)"""
conn = util_get_connection()
sizes = [i for i in conn.list_sizes()]
if size:
for i in sizes:
if str(i.ram) == size or i.id == size:
print >> sys.stderr, ' - '.join([i.id, str(i.ram), str(i.price)])
return i
return None | 7ed1868d4512e660d94d474a907b8b4490123b51 | 15,848 |
import torch
def make_positions(tensor, padding_idx, onnx_trace=False):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1. Padding symbols are ignored.
"""
# The series of casts and type-conversions here are carefully
# balanced to both work with ONNX export and XLA. In particular XLA
# prefers ints, cumsum defaults to output longs, and ONNX doesn't know
# how to handle the dtype kwarg in cumsum.
mask = tensor.ne(padding_idx).int()
return (
(torch.cumsum(mask, dim=1) - 1).type_as(mask) * mask
).long() | e5d117d64669f514b5cab4ad08ec526dd421493e | 15,849 |
import warnings
def taubin_curv(coords, resolution):
"""Curvature calculation based on algebraic circle fit by Taubin.
Adapted from: "https://github.com/PmagPy/PmagPy/blob/2efd4a92ddc19c26b953faaa5c08e3d8ebd305c9/SPD/lib
/lib_curvature.py"
G. Taubin, "Estimation Of Planar Curves, Surfaces And Nonplanar
Space Curves Defined By Implicit Equations, With
Applications To Edge And Range Image Segmentation",
IEEE Trans. PAMI, Vol. 13, pages 1115-1138, (1991)
Parameters
----------
coords : list
Nested list of paired x and y coordinates for each point of the line where a curve needs to be fited.
[[x_1, y_1], [x_2, y_2], ....]
resolution : float or int
Number of pixels per mm in original image.
Returns
-------
float or int(0)
If the radius of the fitted circle is finite, it will return the curvature (1/radius).
If the radius is infinite, it will return 0.
"""
warnings.filterwarnings("ignore") # suppress RuntimeWarnings from dividing by zero
xy = np.array(coords)
x = xy[:, 0] - np.mean(xy[:, 0]) # norming points by x avg
y = xy[:, 1] - np.mean(xy[:, 1]) # norming points by y avg
# centroid = [np.mean(xy[:, 0]), np.mean(xy[:, 1])]
z = x * x + y * y
zmean = np.mean(z)
z0 = ((z - zmean) / (2. * np.sqrt(zmean))) # changed from using old_div to Python 3 native division
zxy = np.array([z0, x, y]).T
u, s, v = np.linalg.svd(zxy, full_matrices=False) #
v = v.transpose()
a = v[:, 2]
a[0] = (a[0]) / (2. * np.sqrt(zmean))
a = np.concatenate([a, [(-1. * zmean * a[0])]], axis=0)
# a, b = (-1 * a[1:3]) / a[0] / 2 + centroid
r = np.sqrt(a[1] * a[1] + a[2] * a[2] - 4 * a[0] * a[3]) / abs(a[0]) / 2
if np.isfinite(r):
curv = 1 / (r / resolution)
if curv >= 0.00001:
return curv
else:
return 0
else:
return 0 | f3728528dbec5681b3915683af22b8e9838e73ce | 15,850 |
def periodic_general(box: Box,
fractional_coordinates: bool=True,
wrapped: bool=True) -> Space:
"""Periodic boundary conditions on a parallelepiped.
This function defines a simulation on a parallelepiped, $X$, formed by
applying an affine transformation, $T$, to the unit hypercube
$U = [0, 1]^d$ along with periodic boundary conditions across all
of the faces.
Formally, the space is defined such that $X = {Tu : u \in [0, 1]^d}$.
The affine transformation, $T$, can be specified in a number of different
ways. For a parallelepiped that is: 1) a cube of side length $L$, the affine
transformation can simply be a scalar; 2) an orthorhombic unit cell can be
specified by a vector `[Lx, Ly, Lz]` of lengths for each axis; 3) a general
triclinic cell can be specified by an upper triangular matrix.
There are a number of ways to parameterize a simulation on $X$.
`periodic_general` supports two parametrizations of $X$ that can be selected
using the `fractional_coordinates` keyword argument.
1) When `fractional_coordinates=True`, particle positions are stored in the
unit cube, $u\in U$. Here, the displacement function computes the
displacement between $x, y \in X$ as $d_X(x, y) = Td_U(u, v)$ where
$d_U$ is the displacement function on the unit cube, $U$, $x = Tu$, and
$v = Tv$ with $u, v\in U$. The derivative of the displacement function
is defined so that derivatives live in $X$ (as opposed to being
backpropagated to $U$). The shift function, `shift_fn(R, dR)` is defined
so that $R$ is expected to lie in $U$ while $dR$ should lie in $X$. This
combination enables code such as `shift_fn(R, force_fn(R))` to work as
intended.
2) When `fractional_coordinates=False`, particle positions are stored in
the parallelepiped $X$. Here, for $x, y\in X$, the displacement function
is defined as $d_X(x, y) = Td_U(T^{-1}x, T^{-1}y)$. Since there is an
extra multiplication by $T^{-1}$, this parameterization is typically
slower than `fractional_coordinates=False`. As in 1), the displacement
function is defined to compute derivatives in $X$. The shift function
is defined so that $R$ and $dR$ should both lie in $X$.
Example:
```python
from jax import random
side_length = 10.0
disp_frac, shift_frac = periodic_general(side_length,
fractional_coordinates=True)
disp_real, shift_real = periodic_general(side_length,
fractional_coordinates=False)
# Instantiate random positions in both parameterizations.
R_frac = random.uniform(random.PRNGKey(0), (4, 3))
R_real = side_length * R_frac
# Make some shfit vectors.
dR = random.normal(random.PRNGKey(0), (4, 3))
disp_real(R_real[0], R_real[1]) == disp_frac(R_frac[0], R_frac[1])
transform(side_length, shift_frac(R_frac, 1.0)) == shift_real(R_real, 1.0)
```
It is often desirable to deform a simulation cell either: using a finite
deformation during a simulation, or using an infinitesimal deformation while
computing elastic constants. To do this using fractional coordinates, we can
supply a new affine transformation as `displacement_fn(Ra, Rb, box=new_box)`.
When using real coordinates, we can specify positions in a space $X$ defined
by an affine transformation $T$ and compute displacements in a deformed space
$X'$ defined by an affine transformation $T'$. This is done by writing
`displacement_fn(Ra, Rb, new_box=new_box)`.
There are a few caveats when using `periodic_general`. `periodic_general`
uses the minimum image convention, and so it will fail for potentials whose
cutoff is longer than the half of the side-length of the box. It will also
fail to find the correct image when the box is too deformed. We hope to add a
more robust box for small simulations soon (TODO) along with better error
checking. In the meantime caution is recommended.
Args:
box: A `(spatial_dim, spatial_dim)` affine transformation.
fractional_coordinates: A boolean specifying whether positions are stored
in the parallelepiped or the unit cube.
wrapped: A boolean specifying whether or not particle positions are
remapped back into the box after each step
Returns:
(displacement_fn, shift_fn) tuple.
"""
inv_box = inverse(box)
def displacement_fn(Ra, Rb, **kwargs):
_box, _inv_box = box, inv_box
if 'box' in kwargs:
_box = kwargs['box']
if not fractional_coordinates:
_inv_box = inverse(_box)
if 'new_box' in kwargs:
_box = kwargs['new_box']
if not fractional_coordinates:
Ra = transform(_inv_box, Ra)
Rb = transform(_inv_box, Rb)
dR = periodic_displacement(f32(1.0), pairwise_displacement(Ra, Rb))
return transform(_box, dR)
def u(R, dR):
if wrapped:
return periodic_shift(f32(1.0), R, dR)
return R + dR
def shift_fn(R, dR, **kwargs):
if not fractional_coordinates and not wrapped:
return R + dR
_box, _inv_box = box, inv_box
if 'box' in kwargs:
_box = kwargs['box']
_inv_box = inverse(_box)
if 'new_box' in kwargs:
_box = kwargs['new_box']
dR = transform(_inv_box, dR)
if not fractional_coordinates:
R = transform(_inv_box, R)
R = u(R, dR)
if not fractional_coordinates:
R = transform(_box, R)
return R
return displacement_fn, shift_fn | 35dc501b0d5d897fb4e68d41d4e83dc6727a5feb | 15,851 |
import hmac
def calculate_mac(mac_type, credentials, options, url_encode=False):
"""Calculates a message authentication code (MAC)."""
normalized = normalize_string(mac_type, options)
digestmod = module_for_algorithm(credentials['algorithm'])
result = hmac.new(credentials['key'], normalized, digestmod)
if url_encode:
mac = urlsafe_b64encode(result.digest())
else:
mac = b64encode(result.digest())
return mac | 0701dbe3881ab500a70f3895af64d2ca6cb2905d | 15,852 |
import math
def run():
"""
Test Case - Fbx mesh group Import scaling in Atom:
1. Creates a new level called MeshScalingTemporaryLevel
2. Has a list of 12 meshes, which it will do the following for each one:
- Create an entity and attach the mesh to it.
- Sets it with an initial offset of x:-15, y:0, z:0
- For each additional mesh the x offset is modified by +3.0
3. Enters game mode to take a screenshot for comparison, then exits game mode.
4. Prints general.log("FBX mesh group scaling test has completed.")
5. Exit the Editor and ends the test.
Tests will fail immediately if any of these log lines are found:
1. Trace::Assert
2. Trace::Error
3. Traceback (most recent call last):
:return: None
"""
def after_level_load():
"""Function to call after creating/opening a level to ensure it loads."""
# Give everything a second to initialize.
general.idle_enable(True)
general.update_viewport()
general.idle_wait(0.5) # half a second is more than enough for updating the viewport.
# Close out problematic windows, FPS meters, and anti-aliasing.
if general.is_helpers_shown(): # Turn off the helper gizmos if visible
general.toggle_helpers()
if general.is_pane_visible("Error Report"): # Close Error Report windows that block focus.
general.close_pane("Error Report")
if general.is_pane_visible("Error Log"): # Close Error Log windows that block focus.
general.close_pane("Error Log")
general.run_console("r_displayInfo=0")
general.run_console("r_antialiasingmode=0")
return True
# Create a new test level
test_level_name = 'MeshGroupingTemporaryLevel'
heightmap_resolution = 128
heightmap_meters_per_pixel = 1
terrain_texture_resolution = 128
use_terrain = False
# Return codes are ECreateLevelResult defined in CryEdit.h
return_code = general.create_level_no_prompt(
test_level_name, heightmap_resolution, heightmap_meters_per_pixel, terrain_texture_resolution, use_terrain)
if return_code == 1:
general.log(f"{test_level_name} level already exists")
elif return_code == 2:
general.log("Failed to create directory")
elif return_code == 3:
general.log("Directory length is too long")
elif return_code != 0:
general.log("Unknown error, failed to create level")
else:
general.log(f"{test_level_name} level created successfully")
after_level_load()
helper.init_idle()
helper.open_level(test_level_name)
general.idle_wait_frames(1)
# These are the meshes that are used to test FBX mesh import scaling.
meshes = [
"cube_group.azmodel",
"cube_parent.azmodel",
"cube_parent_plus_locator.azmodel",
"cube_parent_plus_locator_rotatez_90.azmodel",
"cube_parent__rotatez_90_locator.azmodel",
"cube_parent__scaley_2_locator.azmodel",
"cube_parent__transx_100_locator.azmodel"
]
# Initial offset values to iterate off of for mesh scaling of meshes.
offset = math.Vector3()
offset.x = -15.0
offset.y = 0.0
offset.z = 0.0
# For each mesh, create an entity and attach the mesh to it, then scale it using the values in offset.
meshIndex = 0
for mesh in meshes:
meshIndex = meshIndex + 1
offset.x += 3.0
entityName = "TestEntity{}".format(meshIndex)
helper_create_entity_with_mesh("dag_hierarchy/" + mesh, offset, entityName)
helper.enter_game_mode(["", ""])
# Example: how to capture a screenshot
general.set_viewport_size(1280, 720)
general.set_cvar_integer('r_DisplayInfo', 0)
general.idle_wait_frames(1)
ScreenshotHelper(general.idle_wait_frames).capture_screenshot_blocking(
"screenshot_atom_FBXMeshGroupImportScaling.dds")
helper.exit_game_mode(["", ""])
general.log("FBX mesh group scaling test has completed.")
helper.close_editor() | fb7c5194d755e277e14f778c6fe52f9c5d1a36be | 15,853 |
def get_prime(num_dict):
"""获取字典里所有的素数"""
prime_dict = {}
for key, value in num_dict.items():
if value:
prime_dict.update({key: key})
return prime_dict | 49c62ae43bfe5af15f191cd8d831e82ae56c766d | 15,854 |
def get_shared_keys(param_list):
"""
For the given list of parameter dictionaries, return a list of the dictionary
keys that appear in every parameter dictionary
>>> get_shared_keys([{'a':0, 'b':1, 'c':2, 'd':3}, {'a':0, 'b':1, 'c':3}, {'a':0, 'b':'beta'}])
['a', 'b']
>>> get_shared_keys([{'a':0, 'd':3}, {'a':0, 'b':1, 'c':2, 'd':3}, {'a':0, 'b':1, 'c':2}])
['a']
"""
if not param_list:
return
keys = set(param_list[0].keys())
for i in range(1, len(param_list)):
keys = keys.intersection(param_list[i].keys())
keys = list(keys)
keys.sort()
return keys | 0f6aa0df4d61ba166ac7d660be80a98fdbc29080 | 15,855 |
def labeledTest(*labels):
"""This decorator mark a class as an integrationTest
this is used in the test call for filtering integrationTest
and unittest.
We mark the difference by the usage of service dependency:
* An unittest can run without additional services.
* An integration test need additional services (such as
redis or postgres).
Usage:
@labeledTest("integration")
class FakeOutputTest(BaseApiTest):
pass
"""
def wrapper(cl):
cl._label = set(labels)
return cl
return wrapper | 4cb5adab516b19517066104d547d8efb0ae90cbd | 15,856 |
def birth(sim):
"""Similar to create agent, but just one individual"""
age = 0
qualification = int(sim.seed.gammavariate(3, 3))
qualification = [qualification if qualification < 21 else 20][0]
money = sim.seed.randrange(20, 40)
month = sim.seed.randrange(1, 13, 1)
gender = sim.seed.choice(['Male', 'Female'])
sim.total_pop += 1
a = Agent((sim.total_pop - 1), gender, age, qualification, money, month)
return a | c44323bb36b5807e4b25a12bb739150bd70e1b98 | 15,857 |
def offer_better_greeting():
"""Give player optional compliments."""
player = request.args["person"]
# if they didn't tick box, `wants_compliments` won't be
# in query args -- so let's use safe `.get()` method of
# dict-like things
wants = request.args.get("wants_compliments")
nice_things = sample(COMPLIMENTS, 3) if wants else []
return render_template("compliments.html",
compliments=nice_things,
name=player) | 9f65f9a1169262020f6ec227d44e0160a904f00f | 15,858 |
def get_trip_length(grouped_counts):
"""
Gets the frequency of the length of a trip for a customer
Args:
grouped_counts (Pandas.DataFrame): The grouped dataframe returned from
a get_trips method call
Returns:
Pandas.DataFrame: the dataframe containing the frequencies for each
trip length (in days)
"""
return frequency(grouped_counts, 0) | 974bb0fc7f0430d0e6605857dba22f7b036e3945 | 15,859 |
def extract_begin_end(data):
""" Finds nif:beginIndex and nif:endIndex values.
:param data: Data sent by the client.
:return: Begin index and end index, -1 if error.
"""
try:
begin = data.split("nif:beginIndex")[1].split("\"")[1]
end = data.split("nif:endIndex")[1].split("\"")[1]
return int(begin), int(end)
except IndexError:
return -1, -1 | d5f5ce211f645f10d6a0aed1c6446963f0c3fe3e | 15,860 |
def setup_parameters():
"""
Helper routine to fill in all relevant parameters
Note that this file will be used for all versions of SDC, containing more than necessary for each individual run
Returns:
description (dict)
controller_params (dict)
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1E-08
level_params['dt'] = 1E-02
level_params['nsweeps'] = [3, 1]
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['collocation_class'] = CollGaussRadau_Right
sweeper_params['num_nodes'] = [3]
sweeper_params['QI'] = ['LU']
sweeper_params['QE'] = ['EE']
sweeper_params['initial_guess'] = 'zero'
# This comes as read-in for the problem class
problem_params = dict()
problem_params['nu'] = 2
problem_params['L'] = 1.0
problem_params['nvars'] = [(256, 256), (64, 64)]
problem_params['eps'] = [0.04, 0.16]
problem_params['radius'] = 0.25
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 20
controller_params['hook_class'] = monitor
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = None # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = None # pass sweeper (see part B)
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = mesh_to_mesh_fft2d
return description, controller_params | 6cadf729f8f796c6b07c94bf8b74913e6a893799 | 15,861 |
def get_operation(op, inplanes, outplanes, stride, conv_type):
"""Set up conv and pool operations."""
kernel_size = Ops.ops_to_kernel_size[op]
padding = [(k - 1) // 2 for k in kernel_size]
if op in Ops.pooling_ops:
if inplanes == outplanes:
return nn.AvgPool2d(kernel_size, stride=stride, padding=padding)
else:
return nn.Sequential(nn.Conv2d(inplanes, outplanes, 1, 1, 0),
nn.AvgPool2d(kernel_size, stride=stride, padding=padding))
else:
if conv_type == 'depthwise_separable':
return depthwise_separable_conv_general(inplanes, outplanes, stride, kernel_size, padding)
else:
return nn.Conv2d(inplanes, outplanes, kernel_size, stride, padding=padding) | f561db9c230236f3ead248e04cae198dbd9d4415 | 15,862 |
def encode(
structure_klifs_ids, fingerprints_filepath=None, local_klifs_download_path=None, n_cores=1
):
"""
Encode structures.
Parameters
----------
structure_klifs_ids : list of int
Structure KLIFS IDs.
fingerprints_filepath : str or pathlib.Path
Path to output json file. Default None.
local_klifs_download_path : str or None
If path to local KLIFS download is given, set up local KLIFS session.
If None is given, set up remote KLIFS session.
n_cores : int
Number of cores used to generate fingerprints.
Returns
-------
kissim.encoding.FingerprintGenerator
Fingerprints.
"""
# Set up KLIFS session
klifs_session = _setup_klifs_session(local_klifs_download_path)
# Generate fingerprints
fingerprints = FingerprintGenerator.from_structure_klifs_ids(
structure_klifs_ids, klifs_session, n_cores
)
# Optionally: Save fingerprints to json file
if fingerprints_filepath:
logger.info(f"Write fingerprints to file: {fingerprints_filepath}")
fingerprints.to_json(fingerprints_filepath)
return fingerprints | 7b5d3400455bdffc25e88cc07f58292f56ac6e12 | 15,863 |
def log_like_repressed(params, data_rep):
"""Conv wrapper for log likelihood for 2-state promoter w/
transcription bursts and repression.
data_rep: a list of arrays, each of which is n x 2, of form
data[:, 0] = SORTED unique mRNA counts
data[:, 1] = frequency of each mRNA count
Note the data pre-processing here, credit to Manuel for this observation:
'The likelihood asks for unique mRNA entries and their corresponding
counts to speed up the process of computing the probability distribution.
Instead of computing the probability of 3 mRNAs n times, it computes it
once and multiplies the value by n.'
This also reduces the size of the data arrays by ~10-fold,
which reduces the time penalty of emcee's pickling
to share the data within the multiprocessing Pool.
"""
# kR_list contains, in order, kRon_0p5, kRon_1, kRon_2, kRon_10, kRoff
k_burst, mean_burst, *kR_list = params
params_local = np.array([k_burst, mean_burst, 0, kR_list[-1]])
target = 0
for i, expt in enumerate(data_rep):
max_m = expt[0].max()
# kRoff is never plugged in below b/c loop terminates first
params_local[2] = kR_list[i]
# note log_probs contains values for ALL m < max_m,
# not just those in the data set...
log_probs = srep.models.log_prob_m_bursty_rep(max_m, *params_local)
# ...so extract just the ones we want & * by their occurence
target += np.sum(expt[1] * log_probs[expt[0]])
return target | 8451de8f1c578c8343bd1c91d1dc0326b51cc5a3 | 15,864 |
def freq_mask(spec, F=30, num_masks=1, pad_value=0.):
"""Frequency masking
Args:
spec (torch.Tensor): input tensor of shape `(dim, T)`
F (int): maximum width of each mask
num_masks (int): number of masks
pad_value (float): value for padding
Returns:
freq masked tensor (torch.Tensor): output tensor of shape `(dim, T)`
"""
cloned = spec.clone()
num_mel_channels = cloned.size(0)
for i in range(num_masks):
f = np.random.randint(0, F + 1)
f_zero = np.random.randint(0, num_mel_channels - f + 1)
if f == 0:
continue
cloned[f_zero:f_zero + f] = pad_value
return cloned | 714dac7127e4dd1e790df016296321f97cfe37c7 | 15,865 |
def prepare_file_hierarchy(path):
"""
Create a temporary folder structure like the following:
test_find_dotenv0/
└── child1
├── child2
│ └── child3
│ └── child4
└── .env
Then try to automatically `find_dotenv` starting in `child4`
"""
curr_dir = path
dirs = []
for f in ['child1', 'child2', 'child3', 'child4']:
curr_dir /= f
dirs.append(curr_dir)
curr_dir.mkdir()
return (dirs[0], dirs[-1]) | 25b66a7bc728f8f4b90cd9d8e678c914d2d60be9 | 15,866 |
def cmd2dict(cmd):
"""Returns a dictionary of what to replace each value by."""
pixel_count = cmd[cmd.shape[0] - 1, cmd.shape[1] - 1]
scaling_dict = dict()
for i in range(0, cmd.shape[0]):
scaling_dict[cmd[i, 0]] = round(
((cmd[i, 1] - cmd[0, 1]) / (pixel_count - cmd[0, 1])) * 255
)
return scaling_dict | 17f28fdcc5497c7d8d6aa55bbc61460e988586eb | 15,867 |
def cached_part(query, cache=None):
"""Get cached part of the query.
Use either supplied cache object or global cache object (default).
In the process, query is into two parts: the beginning of the query
and the remainder. Function tries to find longest possible beginning of the query
which is cached, then returns the cached state and the remainder of the query.
(query == state.query + "/" + remainder)
"""
if cache is None:
cache = get_cache()
if isinstance(
cache, NoCache
): # Just an optimization - to avoid looping over all query splits
return State(), encode(decode(query))
for key, remainder in all_splits(query):
if key == "":
return State(), remainder
if cache.contains(key):
state = cache.get(key)
if state is None:
continue
return state, remainder
# Should never get here, but this is a sensible default:
return State(), encode(decode(query)) | c1b8d9589b12171ae11e2f49911142252f54d9cd | 15,868 |
def exist_key(bucket: str, key: str) -> bool:
"""Exist key or not.
Args:
bucket (str): S3 bucket name.
key (str): Object key.
Returns:
bool: Exist or not.
"""
try:
s3.Object(bucket, key).get()
except s3.meta.client.exceptions.NoSuchKey:
return False
return True | 1e47467c85d0461d76f0d562a2ee9c7cff5dbf4e | 15,869 |
import os
import fnmatch
def load_all_files(dir):
"""Returns all of the csharp source files."""
result = []
for root, dirnames, filenames in os.walk(dir):
if 'obj\\' not in root and 'bin\\' not in root:
for name in fnmatch.filter(filenames, '*.cs'):
result.append(SourceFile(os.path.join(root, name)))
return result | 5b829d588053f1a464edf4537aff168ab9f801a5 | 15,870 |
def calculate_bleu_score(candidate_file: str, reference_file: str) -> float:
"""
Calculates the average BLEU score of the given files, interpreting each line as a sentence.
Partially taken from https://stackoverflow.com/a/49886758/3918865.
Args:
candidate_file: the name of the file that contains the candidate sentences (hypotheses)
reference_file: the name of the file that contains the reference sentences (targets)
Returns:
the average BLEU score
"""
candidate = open(candidate_file, 'r').readlines()
reference = open(reference_file, 'r').readlines()
num_candidates = len(candidate)
reference = reference[:num_candidates]
assert len(reference) == len(candidate), 'Make sure there are at least as many references as candidates.'
score = 0.
for i in range(len(reference)):
ref = reference[i].strip()
cand = candidate[i].strip()
score_i = sentence_bleu([ref.split()], cand.split(), weights=(0.5, 0.5))
score += score_i
score /= num_candidates
return score | 00e6f6a852171f34b92598193fe1b08c60ba328b | 15,871 |
def _read_id_not_in_dict(read_ids, read_dict):
"""Return True if all read_ids in a list are not in the read_dict keys, otherwise False"""
for read_id in read_ids:
if read_id not in read_dict.keys():
return True
return False | 3a0e0926ed33f65cc67139311af1c860f3e371ae | 15,872 |
def generate_spectra_products(dataset, prdcfg):
"""
generates spectra products. Accepted product types:
'AMPLITUDE_PHASE_ANGLE_DOPPLER': Makes an angle Doppler plot of
complex spectra or IQ data. The plot can be along azimuth or along
range. It is plotted separately the module and the phase of the
signal.
User defined parameters:
along_azi : bool
If true the plot is performed along azimuth, otherwise
along elevation. Default true
ang : float
The fixed angle (deg). Default 0.
rng : float
The fixed range (m). Default 0.
ang_tol : float
The fixed angle tolerance (deg). Default 1.
rng_tol : float
The fixed range tolerance (m). Default 50.
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity',
'Doppler_frequency' or 'pulse_number'
ampli_vmin, ampli_vmax, phase_vmin, phase_vmax : float or None
Minimum and maximum of the color scale for the module and
phase
'AMPLITUDE_PHASE_DOPPLER': Plots a complex Doppler spectrum or IQ data
making two separate plots for the module and phase of the signal
User defined parameters:
azi, ele, rng : float
azimuth and elevation (deg) and range (m) of the ray to
plot
azi_to, ele_tol, rng_tol : float
azimuth and elevation (deg) and range (m) tolerance
respect to nominal position to plot. Default 1, 1, 50.
ind_ray, ind_rng : int
index of the ray and range to plot. Alternative to
defining its antenna coordinates
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity',
'Doppler_frequency' or 'pulse_number'
ampli_vmin, ampli_vmax, phase_vmin, phase_vmax : float or None
Minimum and maximum of the color scale for the module and
phase
'AMPLITUDE_PHASE_RANGE_DOPPLER': Plots a complex spectra or IQ data
range-Doppler making two separate plots for the module and phase
of the signal User defined parameters:
azi, ele : float
azimuth and elevation (deg) of the ray to plot
azi_to, ele_tol : float
azimuth and elevation (deg) tolerance respect to nominal
position to plot. Default 1, 1.
ind_ray : int
index of the ray to plot. Alternative to
defining its antenna coordinates
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity',
'Doppler_frequency' or 'pulse_number'
ampli_vmin, ampli_vmax, phase_vmin, phase_vmax : float or None
Minimum and maximum of the color scale for the module and
phase
'AMPLITUDE_PHASE_TIME_DOPPLER': Plots a complex spectra or IQ data
time-Doppler making two separate plots for the module and phase of
the signal
User defined parameters:
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity' or
'Doppler frequency'
ampli_vmin, ampli_vmax, phase_vmin, phase_vmax : float or None
Minimum and maximum of the color scale for the module and
phase
plot_type : str
Can be 'final' or 'temporal'. If final the data is only
plotted at the end of the processing
'ANGLE_DOPPLER': Makes an angle Doppler plot. The plot can be along
azimuth or along range
User defined parameters:
along_azi : bool
If true the plot is performed along azimuth, otherwise
along elevation. Default true
ang : float
The fixed angle (deg). Default 0.
rng : float
The fixed range (m). Default 0.
ang_tol : float
The fixed angle tolerance (deg). Default 1.
rng_tol : float
The fixed range tolerance (m). Default 50.
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity',
'Doppler_frequency' or 'pulse_number'
vmin, vmax : float or None
Minimum and maximum of the color scale
'COMPLEX_ANGLE_DOPPLER': Makes an angle Doppler plot of complex
spectra or IQ data. The plot can be along azimuth or along range.
The real and imaginary parts are plotted separately
User defined parameters:
along_azi : bool
If true the plot is performed along azimuth, otherwise
along elevation. Default true
ang : float
The fixed angle (deg). Default 0.
rng : float
The fixed range (m). Default 0.
ang_tol : float
The fixed angle tolerance (deg). Default 1.
rng_tol : float
The fixed range tolerance (m). Default 50.
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity',
'Doppler_frequency' or 'pulse_number'
vmin, vmax : float or None
Minimum and maximum of the color scale
'COMPLEX_DOPPLER': Plots a complex Doppler spectrum or IQ data making
two separate plots for the real and imaginary parts
User defined parameters:
azi, ele, rng : float
azimuth and elevation (deg) and range (m) of the ray to
plot
azi_to, ele_tol, rng_tol : float
azimuth and elevation (deg) and range (m) tolerance
respect to nominal position to plot. Default 1, 1, 50.
ind_ray, ind_rng : int
index of the ray and range to plot. Alternative to
defining its antenna coordinates
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity',
'Doppler_frequency' or 'pulse_number'
vmin, vmax : float or None
Minimum and maximum of the color scale
'COMPLEX_RANGE_DOPPLER': Plots the complex spectra or IQ data
range-Doppler making two separate plots for the real and imaginary
parts
User defined parameters:
azi, ele : float
azimuth and elevation (deg) of the ray to plot
azi_to, ele_tol : float
azimuth and elevation (deg) tolerance respect to nominal
position to plot. Default 1, 1.
ind_ray : int
index of the ray to plot. Alternative to
defining its antenna coordinates
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity',
'Doppler_frequency' or 'pulse_number'
vmin, vmax : float or None
Minimum and maximum of the color scale
'COMPLEX_TIME_DOPPLER': Plots the complex spectra or IQ data
time-Doppler making two separate plots for the real and imaginary
parts
User defined parameters:
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity' or
'Doppler frequency'
vmin, vmax : float or None
Minimum and maximum of the color scale
plot_type : str
Can be 'final' or 'temporal'. If final the data is only
plotted at the end of the processing
'DOPPLER': Plots a Doppler spectrum variable or IQ data variable
User defined parameters:
azi, ele, rng : float
azimuth and elevation (deg) and range (m) of the ray to
plot
azi_to, ele_tol, rng_tol : float
azimuth and elevation (deg) and range (m) tolerance
respect to nominal position to plot. Default 1, 1, 50.
ind_ray, ind_rng : int
index of the ray and range to plot. Alternative to
defining its antenna coordinates
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity',
'Doppler_frequency' or 'pulse_number'
vmin, vmax : float or None
Minimum and maximum of the color scale
'RANGE_DOPPLER': Makes a range-Doppler plot of spectral or IQ data
User defined parameters:
azi, ele : float
azimuth and elevation (deg) of the ray to plot
azi_to, ele_tol : float
azimuth and elevation (deg) tolerance respect to nominal
position to plot. Default 1, 1.
ind_ray : int
index of the ray to plot. Alternative to
defining its antenna coordinates
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity',
'Doppler_frequency' or 'pulse_number'
vmin, vmax : float or None
Minimum and maximum of the color scale
'SAVEALL': Saves radar spectra or IQ volume data including all or a
list of userdefined fields in a netcdf file
User defined parameters:
datatypes: list of str or None
The list of data types to save. If it is None, all fields
in the radar object will be saved
physical: Bool
If True the data will be saved in physical units (floats).
Otherwise it will be quantized and saved as binary
'SAVEVOL': Saves one field of a radar spectra or IQ volume data in a
netcdf file
User defined parameters:
physical: Bool
If True the data will be saved in physical units (floats).
Otherwise it will be quantized and saved as binary
'TIME_DOPPLER': Makes a time-Doppler plot of spectral or IQ data at a
point of interest.
User defined parameters:
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity',
'Doppler_frequency' or 'pulse_number'
vmin, vmax : float or None
Minimum and maximum of the color scale
plot_type : str
Can be 'final' or 'temporal'. If final the data is only
plotted at the end of the processing
Parameters
----------
dataset : spectra
spectra object
prdcfg : dictionary of dictionaries
product configuration dictionary of dictionaries
Returns
-------
None or name of generated files
"""
dssavedir = prdcfg['dsname']
if 'dssavename' in prdcfg:
dssavedir = prdcfg['dssavename']
if prdcfg['type'] == 'RANGE_DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
azi = prdcfg.get('azi', None)
ele = prdcfg.get('ele', None)
azi_tol = prdcfg.get('azi_tol', 1.)
ele_tol = prdcfg.get('ele_tol', 1.)
if azi is None or ele is None:
ind_ray = prdcfg.get('ind_ray', 0)
azi = dataset['radar_out'].azimuth['data'][ind_ray]
ele = dataset['radar_out'].elevation['data'][ind_ray]
else:
ind_ray = find_ray_index(
dataset['radar_out'].elevation['data'],
dataset['radar_out'].azimuth['data'], ele, azi,
ele_tol=ele_tol, azi_tol=azi_tol)
if ind_ray is None:
warn('Ray azi='+str(azi)+', ele='+str(ele) +
' out of radar coverage')
return None
gateinfo = 'az'+'{:.1f}'.format(azi)+'el'+'{:.1f}'.format(ele)
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
vmin = prdcfg.get('vmin', None)
vmax = prdcfg.get('vmax', None)
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
fname_list = make_filename(
'range_Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
if dataset['radar_out'].ngates == 1:
plot_Doppler(
dataset['radar_out'], field_name, ind_ray, 0, prdcfg,
fname_list, xaxis_info=xaxis_info, vmin=vmin, vmax=vmax)
else:
plot_range_Doppler(
dataset['radar_out'], field_name, ind_ray, prdcfg, fname_list,
xaxis_info=xaxis_info, vmin=vmin, vmax=vmax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'ANGLE_DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
along_azi = prdcfg.get('along_azi', True)
ang = prdcfg.get('ang', 0)
rng = prdcfg.get('rng', 0)
ang_tol = prdcfg.get('ang_tol', 1.)
rng_tol = prdcfg.get('rng_tol', 50.)
ind_rng = find_rng_index(
dataset['radar_out'].range['data'], rng, rng_tol=rng_tol)
if ind_rng is None:
warn('No data at rng='+str(rng))
return None
if along_azi:
ind_rays = np.where(np.logical_and(
dataset['radar_out'].elevation['data'] <= ang+ang_tol,
dataset['radar_out'].elevation['data'] >= ang-ang_tol))[0]
else:
ind_rays = np.where(np.logical_and(
dataset['radar_out'].azimuth['data'] <= ang+ang_tol,
dataset['radar_out'].azimuth['data'] >= ang-ang_tol))[0]
if ind_rays.size == 0:
warn('No data for angle '+str(ang))
return None
# sort angles
if along_azi:
ang_selected = dataset['radar_out'].azimuth['data'][ind_rays]
else:
ang_selected = dataset['radar_out'].elevation['data'][ind_rays]
ind_rays = ind_rays[np.argsort(ang_selected)]
if along_azi:
gateinfo = 'azi'+'{:.1f}'.format(ang)+'rng'+'{:.1f}'.format(rng)
else:
gateinfo = 'ele'+'{:.1f}'.format(ang)+'rng'+'{:.1f}'.format(rng)
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
vmin = prdcfg.get('vmin', None)
vmax = prdcfg.get('vmax', None)
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
fname_list = make_filename(
'range_Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
if ind_rays.size == 1:
plot_Doppler(
dataset['radar_out'], field_name, ind_rays, ind_rng, prdcfg,
fname_list, xaxis_info=xaxis_info, vmin=vmin, vmax=vmax)
else:
plot_angle_Doppler(
dataset['radar_out'], field_name, ang, ind_rays, ind_rng,
prdcfg, fname_list, xaxis_info=xaxis_info,
along_azi=along_azi, vmin=vmin, vmax=vmax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'TIME_DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
vmin = prdcfg.get('vmin', None)
vmax = prdcfg.get('vmax', None)
xmin = prdcfg.get('xmin', None)
xmax = prdcfg.get('xmax', None)
ymin = prdcfg.get('ymin', None)
ymax = prdcfg.get('ymax', None)
plot_type = prdcfg.get('plot_type', 'final')
if plot_type == 'final' and not dataset['final']:
return None
if 'antenna_coordinates_az_el_r' in dataset:
az = '{:.1f}'.format(dataset['antenna_coordinates_az_el_r'][0])
el = '{:.1f}'.format(dataset['antenna_coordinates_az_el_r'][1])
r = '{:.1f}'.format(dataset['antenna_coordinates_az_el_r'][2])
gateinfo = ('az'+az+'r'+r+'el'+el)
else:
lon = '{:.3f}'.format(
dataset['point_coordinates_WGS84_lon_lat_alt'][0])
lat = '{:.3f}'.format(
dataset['point_coordinates_WGS84_lon_lat_alt'][1])
alt = '{:.1f}'.format(
dataset['point_coordinates_WGS84_lon_lat_alt'][2])
gateinfo = ('lon'+lon+'lat'+lat+'alt'+alt)
time_info = datetime_from_radar(dataset['radar_out'])
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=time_info)
fname_list = make_filename(
'time_Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=time_info, runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
if dataset['radar_out'].nrays == 1:
plot_Doppler(
dataset['radar_out'], field_name, 0, 0, prdcfg, fname_list,
xaxis_info=xaxis_info, vmin=vmin, vmax=vmax)
else:
plot_time_Doppler(
dataset['radar_out'], field_name, prdcfg, fname_list,
xaxis_info=xaxis_info, vmin=vmin, vmax=vmax, xmin=xmin,
xmax=xmax, ymin=ymin, ymax=ymax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
azi = prdcfg.get('azi', None)
ele = prdcfg.get('ele', None)
rng = prdcfg.get('rng', None)
azi_tol = prdcfg.get('azi_tol', 1.)
ele_tol = prdcfg.get('ele_tol', 1.)
rng_tol = prdcfg.get('rng_tol', 50.)
if azi is None or ele is None or rng is None:
ind_ray = prdcfg.get('ind_ray', 0)
ind_rng = prdcfg.get('ind_rng', 0)
azi = dataset['radar_out'].azimuth['data'][ind_ray]
ele = dataset['radar_out'].elevation['data'][ind_ray]
rng = dataset['radar_out'].range['data'][ind_rng]
else:
ind_ray = find_ray_index(
dataset['radar_out'].elevation['data'],
dataset['radar_out'].azimuth['data'], ele, azi,
ele_tol=ele_tol, azi_tol=azi_tol)
ind_rng = find_rng_index(
dataset['radar_out'].range['data'], rng, rng_tol=rng_tol)
if ind_rng is None or ind_ray is None:
warn('Point azi='+str(azi)+', ele='+str(ele)+', rng='+str(rng) +
' out of radar coverage')
return None
gateinfo = (
'az'+'{:.1f}'.format(azi)+'el'+'{:.1f}'.format(ele) +
'r'+'{:.1f}'.format(rng))
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
vmin = prdcfg.get('vmin', None)
vmax = prdcfg.get('vmax', None)
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
fname_list = make_filename(
'Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
plot_Doppler(
dataset['radar_out'], field_name, ind_ray, ind_rng, prdcfg,
fname_list, xaxis_info=xaxis_info, vmin=vmin, vmax=vmax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'COMPLEX_RANGE_DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
azi = prdcfg.get('azi', None)
ele = prdcfg.get('ele', None)
azi_tol = prdcfg.get('azi_tol', 1.)
ele_tol = prdcfg.get('ele_tol', 1.)
if azi is None or ele is None:
ind_ray = prdcfg.get('ind_ray', 0)
azi = dataset['radar_out'].azimuth['data'][ind_ray]
ele = dataset['radar_out'].elevation['data'][ind_ray]
else:
ind_ray = find_ray_index(
dataset['radar_out'].elevation['data'],
dataset['radar_out'].azimuth['data'], ele, azi,
ele_tol=ele_tol, azi_tol=azi_tol)
if ind_ray is None:
warn('Ray azi='+str(azi)+', ele='+str(ele) +
' out of radar coverage')
return None
gateinfo = 'az'+'{:.1f}'.format(azi)+'el'+'{:.1f}'.format(ele)
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
vmin = prdcfg.get('vmin', None)
vmax = prdcfg.get('vmax', None)
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
fname_list = make_filename(
'c_range_Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
if dataset['radar_out'].ngates == 1:
plot_complex_Doppler(
dataset['radar_out'], field_name, ind_ray, 0, prdcfg,
fname_list, xaxis_info=xaxis_info, vmin=vmin, vmax=vmax)
else:
plot_complex_range_Doppler(
dataset['radar_out'], field_name, ind_ray, prdcfg, fname_list,
xaxis_info=xaxis_info, vmin=vmin, vmax=vmax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'COMPLEX_ANGLE_DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
along_azi = prdcfg.get('along_azi', True)
ang = prdcfg.get('ang', 0)
rng = prdcfg.get('rng', 0)
ang_tol = prdcfg.get('ang_tol', 1.)
rng_tol = prdcfg.get('rng_tol', 50.)
ind_rng = find_rng_index(
dataset['radar_out'].range['data'], rng, rng_tol=rng_tol)
if ind_rng is None:
warn('No data at rng='+str(rng))
return None
if along_azi:
ind_rays = np.where(np.logical_and(
dataset['radar_out'].elevation['data'] <= ang+ang_tol,
dataset['radar_out'].elevation['data'] >= ang-ang_tol))[0]
else:
ind_rays = np.where(np.logical_and(
dataset['radar_out'].azimuth['data'] <= ang+ang_tol,
dataset['radar_out'].azimuth['data'] >= ang-ang_tol))[0]
if ind_rays.size == 0:
warn('No data for angle '+str(ang))
return None
# sort angles
if along_azi:
ang_selected = dataset['radar_out'].azimuth['data'][ind_rays]
else:
ang_selected = dataset['radar_out'].elevation['data'][ind_rays]
ind_rays = ind_rays[np.argsort(ang_selected)]
if along_azi:
gateinfo = 'azi'+'{:.1f}'.format(ang)+'rng'+'{:.1f}'.format(rng)
else:
gateinfo = 'ele'+'{:.1f}'.format(ang)+'rng'+'{:.1f}'.format(rng)
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
vmin = prdcfg.get('vmin', None)
vmax = prdcfg.get('vmax', None)
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
fname_list = make_filename(
'range_Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
if ind_rays.size == 1:
plot_complex_Doppler(
dataset['radar_out'], field_name, ind_rays, ind_rng, prdcfg,
fname_list, xaxis_info=xaxis_info, vmin=vmin, vmax=vmax)
else:
plot_complex_angle_Doppler(
dataset['radar_out'], field_name, ang, ind_rays, ind_rng,
prdcfg, fname_list, xaxis_info=xaxis_info,
along_azi=along_azi, vmin=vmin, vmax=vmax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'COMPLEX_TIME_DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
vmin = prdcfg.get('vmin', None)
vmax = prdcfg.get('vmax', None)
plot_type = prdcfg.get('plot_type', 'final')
if plot_type == 'final' and not dataset['final']:
return None
if 'antenna_coordinates_az_el_r' in dataset:
az = '{:.1f}'.format(dataset['antenna_coordinates_az_el_r'][0])
el = '{:.1f}'.format(dataset['antenna_coordinates_az_el_r'][1])
r = '{:.1f}'.format(dataset['antenna_coordinates_az_el_r'][2])
gateinfo = ('az'+az+'r'+r+'el'+el)
else:
lon = '{:.3f}'.format(
dataset['point_coordinates_WGS84_lon_lat_alt'][0])
lat = '{:.3f}'.format(
dataset['point_coordinates_WGS84_lon_lat_alt'][1])
alt = '{:.1f}'.format(
dataset['point_coordinates_WGS84_lon_lat_alt'][2])
gateinfo = ('lon'+lon+'lat'+lat+'alt'+alt)
time_info = datetime_from_radar(dataset['radar_out'])
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=time_info)
fname_list = make_filename(
'c_time_Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=time_info, runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
if dataset['radar_out'].nrays == 1:
plot_complex_Doppler(
dataset['radar_out'], field_name, 0, 0, prdcfg, fname_list,
xaxis_info=xaxis_info, vmin=vmin, vmax=vmax)
else:
plot_complex_time_Doppler(
dataset['radar_out'], field_name, prdcfg, fname_list,
xaxis_info=xaxis_info, vmin=vmin, vmax=vmax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'COMPLEX_DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
azi = prdcfg.get('azi', None)
ele = prdcfg.get('ele', None)
rng = prdcfg.get('rng', None)
azi_tol = prdcfg.get('azi_tol', 1.)
ele_tol = prdcfg.get('ele_tol', 1.)
rng_tol = prdcfg.get('rng_tol', 50.)
if azi is None or ele is None or rng is None:
ind_ray = prdcfg.get('ind_ray', 0)
ind_rng = prdcfg.get('ind_rng', 0)
azi = dataset['radar_out'].azimuth['data'][ind_ray]
ele = dataset['radar_out'].elevation['data'][ind_ray]
rng = dataset['radar_out'].range['data'][ind_rng]
else:
ind_ray = find_ray_index(
dataset['radar_out'].elevation['data'],
dataset['radar_out'].azimuth['data'], ele, azi,
ele_tol=ele_tol, azi_tol=azi_tol)
ind_rng = find_rng_index(
dataset['radar_out'].range['data'], rng, rng_tol=rng_tol)
if ind_rng is None or ind_ray is None:
warn('Point azi='+str(azi)+', ele='+str(ele)+', rng='+str(rng) +
' out of radar coverage')
return None
gateinfo = (
'az'+'{:.1f}'.format(azi)+'el'+'{:.1f}'.format(ele) +
'r'+'{:.1f}'.format(rng))
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
vmin = prdcfg.get('vmin', None)
vmax = prdcfg.get('vmax', None)
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
fname_list = make_filename(
'c_Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
plot_complex_Doppler(
dataset['radar_out'], field_name, ind_ray, ind_rng, prdcfg,
fname_list, xaxis_info=xaxis_info, vmin=vmin, vmax=vmax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'AMPLITUDE_PHASE_DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
azi = prdcfg.get('azi', None)
ele = prdcfg.get('ele', None)
rng = prdcfg.get('rng', None)
azi_tol = prdcfg.get('azi_tol', 1.)
ele_tol = prdcfg.get('ele_tol', 1.)
rng_tol = prdcfg.get('rng_tol', 50.)
if azi is None or ele is None or rng is None:
ind_ray = prdcfg.get('ind_ray', 0)
ind_rng = prdcfg.get('ind_rng', 0)
azi = dataset['radar_out'].azimuth['data'][ind_ray]
ele = dataset['radar_out'].elevation['data'][ind_ray]
rng = dataset['radar_out'].range['data'][ind_rng]
else:
ind_ray = find_ray_index(
dataset['radar_out'].elevation['data'],
dataset['radar_out'].azimuth['data'], ele, azi,
ele_tol=ele_tol, azi_tol=azi_tol)
ind_rng = find_rng_index(
dataset['radar_out'].range['data'], rng, rng_tol=rng_tol)
if ind_rng is None or ind_ray is None:
warn('Point azi='+str(azi)+', ele='+str(ele)+', rng='+str(rng) +
' out of radar coverage')
return None
gateinfo = (
'az'+'{:.1f}'.format(azi)+'el'+'{:.1f}'.format(ele) +
'r'+'{:.1f}'.format(rng))
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
ampli_vmin = prdcfg.get('ampli_vmin', None)
ampli_vmax = prdcfg.get('ampli_vmax', None)
phase_vmin = prdcfg.get('phase_vmin', None)
phase_vmax = prdcfg.get('phase_vmax', None)
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
fname_list = make_filename(
'ap_Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
plot_amp_phase_Doppler(
dataset['radar_out'], field_name, ind_ray, ind_rng, prdcfg,
fname_list, xaxis_info=xaxis_info, ampli_vmin=ampli_vmin,
ampli_vmax=ampli_vmax, phase_vmin=phase_vmin,
phase_vmax=phase_vmax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'AMPLITUDE_PHASE_RANGE_DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
azi = prdcfg.get('azi', None)
ele = prdcfg.get('ele', None)
azi_tol = prdcfg.get('azi_tol', 1.)
ele_tol = prdcfg.get('ele_tol', 1.)
if azi is None or ele is None:
ind_ray = prdcfg.get('ind_ray', 0)
azi = dataset['radar_out'].azimuth['data'][ind_ray]
ele = dataset['radar_out'].elevation['data'][ind_ray]
else:
ind_ray = find_ray_index(
dataset['radar_out'].elevation['data'],
dataset['radar_out'].azimuth['data'], ele, azi,
ele_tol=ele_tol, azi_tol=azi_tol)
if ind_ray is None:
warn('Ray azi='+str(azi)+', ele='+str(ele) +
' out of radar coverage')
return None
gateinfo = 'az'+'{:.1f}'.format(azi)+'el'+'{:.1f}'.format(ele)
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
ampli_vmin = prdcfg.get('ampli_vmin', None)
ampli_vmax = prdcfg.get('ampli_vmax', None)
phase_vmin = prdcfg.get('phase_vmin', None)
phase_vmax = prdcfg.get('phase_vmax', None)
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
fname_list = make_filename(
'ap_range_Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
if dataset['radar_out'].ngates == 1:
plot_amp_phase_Doppler(
dataset['radar_out'], field_name, ind_ray, 0, prdcfg,
fname_list, xaxis_info=xaxis_info, ampli_vmin=ampli_vmin,
ampli_vmax=ampli_vmax, phase_vmin=phase_vmin,
phase_vmax=phase_vmax)
else:
plot_amp_phase_range_Doppler(
dataset['radar_out'], field_name, ind_ray, prdcfg, fname_list,
xaxis_info=xaxis_info, ampli_vmin=ampli_vmin,
ampli_vmax=ampli_vmax, phase_vmin=phase_vmin,
phase_vmax=phase_vmax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'AMPLITUDE_PHASE_ANGLE_DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
along_azi = prdcfg.get('along_azi', True)
ang = prdcfg.get('ang', 0)
rng = prdcfg.get('rng', 0)
ang_tol = prdcfg.get('ang_tol', 1.)
rng_tol = prdcfg.get('rng_tol', 50.)
ind_rng = find_rng_index(
dataset['radar_out'].range['data'], rng, rng_tol=rng_tol)
if ind_rng is None:
warn('No data at rng='+str(rng))
return None
if along_azi:
ind_rays = np.where(np.logical_and(
dataset['radar_out'].elevation['data'] <= ang+ang_tol,
dataset['radar_out'].elevation['data'] >= ang-ang_tol))[0]
else:
ind_rays = np.where(np.logical_and(
dataset['radar_out'].azimuth['data'] <= ang+ang_tol,
dataset['radar_out'].azimuth['data'] >= ang-ang_tol))[0]
if ind_rays.size == 0:
warn('No data for angle '+str(ang))
return None
# sort angles
if along_azi:
ang_selected = dataset['radar_out'].azimuth['data'][ind_rays]
else:
ang_selected = dataset['radar_out'].elevation['data'][ind_rays]
ind_rays = ind_rays[np.argsort(ang_selected)]
if along_azi:
gateinfo = 'azi'+'{:.1f}'.format(ang)+'rng'+'{:.1f}'.format(rng)
else:
gateinfo = 'ele'+'{:.1f}'.format(ang)+'rng'+'{:.1f}'.format(rng)
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
ampli_vmin = prdcfg.get('ampli_vmin', None)
ampli_vmax = prdcfg.get('ampli_vmax', None)
phase_vmin = prdcfg.get('phase_vmin', None)
phase_vmax = prdcfg.get('phase_vmax', None)
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
fname_list = make_filename(
'range_Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
if ind_rays.size == 1:
plot_amp_phase_Doppler(
dataset['radar_out'], field_name, ind_rays, ind_rng, prdcfg,
fname_list, xaxis_info=xaxis_info, ampli_vmin=ampli_vmin,
ampli_vmax=ampli_vmax, phase_vmin=phase_vmin,
phase_vmax=phase_vmax)
else:
plot_amp_phase_angle_Doppler(
dataset['radar_out'], field_name, ang, ind_rays, ind_rng,
prdcfg, fname_list, xaxis_info=xaxis_info,
along_azi=along_azi, ampli_vmin=ampli_vmin,
ampli_vmax=ampli_vmax, phase_vmin=phase_vmin,
phase_vmax=phase_vmax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'AMPLITUDE_PHASE_TIME_DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
ampli_vmin = prdcfg.get('ampli_vmin', None)
ampli_vmax = prdcfg.get('ampli_vmax', None)
phase_vmin = prdcfg.get('phase_vmin', None)
phase_vmax = prdcfg.get('phase_vmax', None)
plot_type = prdcfg.get('plot_type', 'final')
if plot_type == 'final' and not dataset['final']:
return None
if 'antenna_coordinates_az_el_r' in dataset:
az = '{:.1f}'.format(dataset['antenna_coordinates_az_el_r'][0])
el = '{:.1f}'.format(dataset['antenna_coordinates_az_el_r'][1])
r = '{:.1f}'.format(dataset['antenna_coordinates_az_el_r'][2])
gateinfo = ('az'+az+'r'+r+'el'+el)
else:
lon = '{:.3f}'.format(
dataset['point_coordinates_WGS84_lon_lat_alt'][0])
lat = '{:.3f}'.format(
dataset['point_coordinates_WGS84_lon_lat_alt'][1])
alt = '{:.1f}'.format(
dataset['point_coordinates_WGS84_lon_lat_alt'][2])
gateinfo = ('lon'+lon+'lat'+lat+'alt'+alt)
time_info = datetime_from_radar(dataset['radar_out'])
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=time_info)
fname_list = make_filename(
'ap_time_Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=time_info, runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
if dataset['radar_out'].nrays == 1:
plot_amp_phase_Doppler(
dataset['radar_out'], field_name, 0, 0, prdcfg, fname_list,
xaxis_info=xaxis_info, ampli_vmin=ampli_vmin,
ampli_vmax=ampli_vmax, phase_vmin=phase_vmin,
phase_vmax=phase_vmax)
else:
plot_amp_phase_time_Doppler(
dataset['radar_out'], field_name, prdcfg, fname_list,
xaxis_info=xaxis_info, ampli_vmin=ampli_vmin,
ampli_vmax=ampli_vmax, phase_vmin=phase_vmin,
phase_vmax=phase_vmax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'SAVEVOL':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
file_type = prdcfg.get('file_type', 'nc')
physical = prdcfg.get('physical', True)
new_dataset = deepcopy(dataset['radar_out'])
new_dataset.fields = dict()
new_dataset.add_field(
field_name, dataset['radar_out'].fields[field_name])
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
fname = make_filename(
'savevol', prdcfg['dstype'], prdcfg['voltype'], [file_type],
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])[0]
fname = savedir+fname
pyart.aux_io.write_spectra(fname, new_dataset, physical=physical)
print('saved file: '+fname)
return fname
if prdcfg['type'] == 'SAVEALL':
file_type = prdcfg.get('file_type', 'nc')
datatypes = prdcfg.get('datatypes', None)
physical = prdcfg.get('physical', True)
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
fname = make_filename(
'savevol', prdcfg['dstype'], 'all_fields', [file_type],
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])[0]
fname = savedir+fname
field_names = None
if datatypes is not None:
field_names = []
for datatype in datatypes:
field_names.append(get_fieldname_pyart(datatype))
if field_names is not None:
radar_aux = deepcopy(dataset['radar_out'])
radar_aux.fields = dict()
for field_name in field_names:
if field_name not in dataset['radar_out'].fields:
warn(field_name+' not in radar object')
else:
radar_aux.add_field(
field_name,
dataset['radar_out'].fields[field_name])
else:
radar_aux = dataset['radar_out']
pyart.aux_io.write_spectra(fname, radar_aux, physical=physical)
print('saved file: '+fname)
return fname
warn(' Unsupported product type: ' + prdcfg['type'])
return None | ba279b7331fda0fdcb2ef506e91d13bd11f37d2f | 15,873 |
def odds_or_evens(my_bool, nums):
"""Returns all of the odd or
even numbers from a list"""
return_list = []
for num in nums:
if my_bool:
if num % 2 == 0:
return_list.append(num)
else:
if num % 2 != 0:
return_list.append(num)
return return_list | 02b3b12acbaae10b2b0e05eec059f6571c576e80 | 15,874 |
import numpy
def local_mass_diagonal(quad_data, basis):
"""Constructs the elemental mass matrix, diagonal version
Arguments:
quad_data - Quadrature points and weights
basis - Basis and respective derivatives
Returns:
Mass matrix M, where m_ii = \int_k psi_i psi_i
"""
return numpy.sum(quad_data.w*basis.psi.T**2, axis=1) | ffaf34df758e73dea0db3ecb66de658991d8de58 | 15,875 |
def create_saved_group(uuid=None):
"""Create and save a Sample Group with all the fixings (plus gravy)."""
if uuid is None:
uuid = uuid4()
analysis_result = AnalysisResultMeta().save()
group_description = 'Includes factory-produced analysis results from all display_modules'
sample_group = SampleGroup(name='Fuzz Testing',
analysis_result=analysis_result,
description=group_description)
sample_group.id = uuid
db.session.add(sample_group)
db.session.commit()
# Add the results
analysis_result.average_genome_size = wrap_result(AGSFactory())
analysis_result.card_amr_genes = wrap_result(CARDGenesFactory())
analysis_result.functional_genes = wrap_result(FunctionalGenesFactory())
analysis_result.hmp = wrap_result(HMPFactory())
analysis_result.macrobe_abundance = wrap_result(MacrobeFactory())
analysis_result.methyltransferases = wrap_result(MethylsFactory())
analysis_result.microbe_directory = wrap_result(MicrobeDirectoryFactory())
analysis_result.pathways = wrap_result(PathwayFactory())
analysis_result.read_stats = wrap_result(ReadStatsFactory())
analysis_result.reads_classified = wrap_result(ReadsClassifiedFactory())
analysis_result.sample_similarity = wrap_result(create_mvp_sample_similarity())
# analysis_result.taxon_abundance =
analysis_result.virulence_factors = wrap_result(VFDBFactory())
analysis_result.save()
return sample_group | 7a9929518b44f6266f32300177385040b3da41c0 | 15,876 |
import copy
def words_to_indexes(tree):
"""Return a new tree based on the original tree, such that the leaf values
are replaced by their indexs."""
out = copy.deepcopy(tree)
leaves = out.leaves()
for index in range(0, len(leaves)):
path = out.leaf_treeposition(index)
out[path] = index + 1
return out | 99e4ad2aa1d318af21d934aee2128b8d7b51a99f | 15,877 |
def get_stoplist_names():
"""Return list of stoplist names"""
config = configuration()
return [name for name, value in config.items('stoplists')] | a93dec87fe840a1fab9d63527e7b54ae8a1c7cf5 | 15,878 |
def any_(criterions):
"""Return a stop criterion that given a list `criterions` of stop criterions
only returns True, if any of the criterions returns True.
This basically implements a logical OR for stop criterions.
"""
def inner(info):
return any(c(info) for c in criterions)
return inner | 600e7c1516cba6f0cd73812bcd43d5e194aa33d2 | 15,879 |
def validate_basic(params, length, allow_infnan=False, title=None):
"""
Validate parameter vector for basic correctness.
Parameters
----------
params : array_like
Array of parameters to validate.
length : int
Expected length of the parameter vector.
allow_infnan : bool, optional
Whether or not to allow `params` to contain -np.Inf, np.Inf, and
np.nan. Default is False.
title : str, optional
Description of the parameters (e.g. "autoregressive") to use in error
messages.
Returns
-------
params : ndarray
Array of validated parameters.
Notes
-----
Basic check that the parameters are numeric and that they are the right
shape. Optionally checks for NaN / infinite values.
"""
title = '' if title is None else ' for %s' % title
# Check for invalid type and coerce to non-integer
try:
params = np.array(params, dtype=object)
is_complex = [isinstance(p, complex) for p in params.ravel()]
dtype = complex if any(is_complex) else float
params = np.array(params, dtype=dtype)
except TypeError:
raise ValueError('Parameters vector%s includes invalid values.'
% title)
# Check for NaN, inf
if not allow_infnan and (np.any(np.isnan(params)) or
np.any(np.isinf(params))):
raise ValueError('Parameters vector%s includes NaN or Inf values.'
% title)
params = np.atleast_1d(np.squeeze(params))
# Check for right number of parameters
if params.shape != (length,):
plural = '' if length == 1 else 's'
raise ValueError('Specification%s implies %d parameter%s, but'
' values with shape %s were provided.'
% (title, length, plural, params.shape))
return params | c3567a7f08656c3b815eded0a6788d904b5820a5 | 15,880 |
import torch
def unsorted_segment_sum(data, segment_ids, num_segments):
"""
Computes the sum along segments of a tensor. Analogous to tf.unsorted_segment_sum.
:param data: A tensor whose segments are to be summed.
:param segment_ids: The segment indices tensor.
:param num_segments: The number of segments.
:return: A tensor of same data type as the data argument.
"""
assert all([i in data.shape for i in segment_ids.shape]), "segment_ids.shape should be a prefix of data.shape"
# segment_ids is a 1-D tensor repeat it to have the same shape as data
if len(segment_ids.shape) == 1:
s = torch.prod(torch.tensor(data.shape[1:])).long()
segment_ids = segment_ids.repeat_interleave(s).view(segment_ids.shape[0], *data.shape[1:])
assert data.shape == segment_ids.shape, "data.shape and segment_ids.shape should be equal"
shape = [num_segments] + list(data.shape[1:])
tensor = torch.zeros(*shape).scatter_add(0, segment_ids, data.float())
tensor = tensor.type(data.dtype)
return tensor | 7d8686d35afab975bff05d3cda50d1ceae537ab9 | 15,881 |
def create_parameters(address: str) -> dict:
"""Create parameters for address.
this function create parameters for having request from geocoder
and than return dictionary of parameters
Args:
address (str): the address for create parameters
Returns:
dict: takes the api key and Geocode from an other class and returns the dictionary
"""
address_to_string = address.replace(" ", "+")
params = {'apikey': developer_key,
'geocode': address_to_string}
return params | 9ad1723cf2bec66e366e83b814ee746cdddf8289 | 15,882 |
import re
def standardizeName(name):
"""
Remove stuff not used by bngl
"""
name2 = name
sbml2BnglTranslationDict = {
"^": "",
"'": "",
"*": "m",
" ": "_",
"#": "sh",
":": "_",
"α": "a",
"β": "b",
"γ": "g",
" ": "",
"+": "pl",
"/": "_",
":": "_",
"-": "_",
".": "_",
"?": "unkn",
",": "_",
"(": "",
")": "",
"[": "",
"]": "",
# "(": "__",
# ")": "__",
# "[": "__",
# "]": "__",
">": "_",
"<": "_",
}
for element in sbml2BnglTranslationDict:
name = name.replace(element, sbml2BnglTranslationDict[element])
name = re.sub("[\W]", "", name)
return name | 33caf35feb0c9dcc042add501a4470b1ccbd3b1c | 15,883 |
def _learning_rate_decay(hparams, warmup_steps=0):
"""Learning rate decay multiplier."""
scheme = hparams.learning_rate_decay_scheme
warmup_steps = tf.to_float(warmup_steps)
global_step = tf.to_float(tf.train.get_or_create_global_step())
if not scheme or scheme == "none":
return tf.constant(1.)
tf.logging.info("Applying learning rate decay: %s.", scheme)
if scheme == "exp":
decay_steps = hparams.learning_rate_decay_steps
p = (global_step - warmup_steps) / decay_steps
if hparams.learning_rate_decay_staircase:
p = tf.floor(p)
return tf.pow(hparams.learning_rate_decay_rate, p)
if scheme == "piecewise":
return _piecewise_learning_rate(global_step,
hparams.learning_rate_boundaries,
hparams.learning_rate_multiples)
if scheme == "cosine":
cycle_steps = hparams.learning_rate_cosine_cycle_steps
cycle_position = global_step % (2 * cycle_steps)
cycle_position = cycle_steps - tf.abs(cycle_steps - cycle_position)
return 0.5 * (1 + tf.cos(np.pi * cycle_position / cycle_steps))
if scheme == "cyclelinear10x":
# Cycle the rate linearly by 10x every warmup_steps, up and down.
cycle_steps = warmup_steps
cycle_position = global_step % (2 * cycle_steps)
cycle_position = tf.to_float( # Normalize to the interval [-1, 1].
cycle_position - cycle_steps) / float(cycle_steps)
cycle_position = 1.0 - tf.abs(cycle_position) # 0 to 1 and back to 0.
return (cycle_position + 0.1) * 3.0 # 10x difference each cycle (0.3-3).
if scheme == "sqrt":
return _legacy_sqrt_decay(global_step - warmup_steps)
raise ValueError("Unrecognized learning rate decay scheme: %s" %
hparams.learning_rate_decay_scheme) | 4d171ef2cb13d2f103ac722be12cd594b6533c60 | 15,884 |
import math
import os
def new_model(save_dir, integer_tokens, batch_size=128,
vocab_size=50000, embedding_size=128,
num_negative=64, num_steps=100001,
num_skips=2, skip_window=1):
"""
Create a new Word2Vec model with token
vectors generated in the 'tokens' step.
Parameters
----------
save_dir : str
Path to the output directory where model will be saved
integer_tokens : str
Path to the 1D token vectors
"""
# Create TF graph
with tf.device('/gpu:0'):
graph = tf.Graph()
with graph.as_default():
# If we are on the first run, initialize everything as normal
train_inputs = tf.placeholder(tf.int32, shape=[batch_size],
name="train_inputs")
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1],
name="train_labels")
with tf.device('/cpu:0'):
# Start embeddings w/ values uniformly distributed
# between -1 and 1
embeddings = tf.Variable(tf.random_uniform([
vocab_size,
embedding_size
], -1.0, 1.0), name="embeddings")
# Translates the train_inputs into the corresponding embedding
embed = tf.nn.embedding_lookup(embeddings, train_inputs,
name="embedding_op")
# Construct the variables for the noise contrastive estimation
nce_weights = tf.Variable(tf.truncated_normal([
vocab_size,
embedding_size
], stddev=1.0 / math.sqrt(embedding_size)), name="nce_weights")
nce_biases = tf.Variable(tf.zeros([vocab_size]), name="nce_biases")
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(tf.nn.nce_loss(
weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_negative,
num_classes=vocab_size
), name="loss")
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1,
keep_dims=True), name="norm")
normalized_embeddings = embeddings / norm
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session(graph=graph) as session:
init.run()
tf.add_to_collection('optimizer', optimizer)
data_index = 0
average_loss = 0
for step in xrange(num_steps):
good_batch = False
while not good_batch:
data_index, batch_inputs, batch_labels = generate_batch(
integer_tokens,
data_index,
batch_size,
num_skips,
skip_window
)
good_batch = is_batch_good(batch_inputs)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
print('Average loss at step ', step, ': ', average_loss)
average_loss = 0
final_embeddings = normalized_embeddings.eval()
saver.save(session, os.path.join(save_dir, 'embeddings_model'))
return final_embeddings | 40371578558d11dcd9e198f8b02503b3b3cdbe5d | 15,885 |
import re
def Register_User():
"""Validates register form data and saves it to the database"""
# Check if the fields are filled out
if not (request.form['username'] and request.form['email'] and request.form['password'] and request.form['passwordConf']):
return redirect(url_for('Register', message = "Please fill out all the fields"))
else:
# Ensure passwords match
if request.form['password'] != request.form['passwordConf']:
return redirect(url_for('Register', message = "Passwords do not match"))
# Ensure name is only _, a-z, A-Z, 0-9, and space
if not re.search(r'^[\w_ ]+$', request.form['username']):
return redirect(url_for('Register', message = "Username can only contain _, a-z, A-Z, 0-9 and spaces."))
# Ensure a valid email
if not re.search(r'^[a-zA-Z0-9]+[\._]?[a-zA-Z0-9]+[@]\w+[.]\w+$', request.form['email']):
return redirect(url_for('Register', message = "Invalid email"))
# Connect to DB
with engine.connect() as con:
# Check if username is taken
try:
statement = text("SELECT COUNT(1) FROM user WHERE (username = :username)")
result = con.execute(statement, username = request.form['username']).scalar()
except SQLAlchemyError as e:
return redirect(url_for('Error', title = "Error: Validating user availability", msg = type(e), back = "Register_User"))
except:
return redirect(url_for('Error', title = "Error", msg = "<class 'blog.UnhandledError'>", back = "Register_User"))
if result > 0:
return redirect(url_for('Register', message = "Username is already taken"))
# Check if email is taken
try:
statement = text("SELECT COUNT(1) FROM user WHERE (email = :email)")
result = con.execute(statement, email = request.form['email']).scalar()
except SQLAlchemyError as e:
return redirect(url_for('Error', title = "Error: Validating user availability", msg = type(e), back = "Register_User"))
except:
return redirect(url_for('Error', title = "Error", msg = "<class 'blog.UnhandledError'>", back = "Register_User"))
if result > 0:
return redirect(url_for('Register', message = "Email is already taken"))
# Create new user and add to the database
try:
new_user = User(request.form['username'], request.form['email'], request.form['password'])
db.session.add(new_user)
db.session.commit()
except:
return redirect(url_for('Error', title = "Error", msg = "<class 'blog.UnhandledError'>", back = "Register_User"))
# Get the new user's ID to log them in
try:
statement = text("SELECT id FROM user WHERE (username = :username)")
result = con.execute(statement, username = request.form['username']).scalar()
except:
return redirect(url_for('Error', title = "Error: Login failed", msg = "REGISTRATION WAS SUCCESSFUL. Something went wrong loging you in. Please login."))
# Log the new user in with a session
session['user_id'] = result
# Redirect to the new user's profile
return redirect(url_for('Own_Profile')) | 6a3ed4e99436845791d8025014f4fa4ddbaec86e | 15,886 |
def extreme_rank(df, col, n, bottom=True, keep=[]):
"""
Calculate the n top or bottom of a given series
"""
t = df[list(keep)+[col]].sort_values(col, ascending=bottom).iloc[:30]
count = t['NO_MUNICIPIO'].value_counts()
count.name = '#'
perc = t['NO_MUNICIPIO'].value_counts(normalize=True)
perc.name = '%'
return pd.concat([count, perc], axis=1), t | 976395ceb26f72300cbc24b9cb849b0e47f45ba8 | 15,887 |
def ss_octile(y):
"""Obtain the octile summary statistic.
The statistic reaches the optimal performance upon a high number of
observations. According to Allingham et al. (2009), it is more stable than ss_robust.
Parameters
----------
y : array_like
Yielded points.
Returns
-------
array_like of the shape (batch_size, dim_ss=8, dim_ss_point)
"""
octiles = np.linspace(12.5, 87.5, 7)
E1, E2, E3, E4, E5, E6, E7 = np.percentile(y, octiles, axis=1)
# Combining the summary statistics.
ss_octile = np.hstack((E1, E2, E3, E4, E5, E6, E7))
ss_octile = ss_octile[:, :, np.newaxis]
return ss_octile | a38256c3fa3e2d3c5d756883524d65a48b0585f5 | 15,888 |
def englishToFrench(englishText):
"""Translates English to French"""
model_id='en-fr'
fr_text = language_translator.translate(
text=englishText,
model_id=model_id).get_result()
return(fr_text['translations'][0]['translation']) | f1ebb6195d09230c1bac2b4351b0157813e6ca80 | 15,889 |
def calc_out_of_plane_angle(a, b, c, d):
"""
Calculate the out of plane angle of the A-D vector
to the A-B-C plane
Returns the value in radians and a boolean telling if b-a-c are near-collinear
"""
collinear_cutoff = 175./180.
collinear = 0
if abs(calc_angle(b, a, c)) > np.pi * collinear_cutoff:
collinear = 1
rab = b - a
rac = c - a
rad = d - a
rab /= np.linalg.norm(rab)
rac /= np.linalg.norm(rac)
rad /= np.linalg.norm(rad)
n = np.cross(rab,rac)
n /= np.linalg.norm(n)
sin = np.dot(n,rad)
ang = np.arcsin(sin)
return ang, collinear | e24c70e210cb8a454af07a1757864b9c241acaff | 15,890 |
def compute_distances(X, Y):
"""
Computes the Mahalanobis distances between X and Y, for the special case
where covariance between components is 0.
Args:
X (np.ndarray):
3D array that represents our population of gaussians. It is
assumed that X[0] is the 2D matrix containing the coordinates
of the centroids and X[1] represents the 2D matrix of variances.
Y (np.ndarray):
2D or 3D array that can represent either a data matrix or a
DE population. If it represents a population, only the centroids
are taken into consideration.
Returns: np.ndarray
A matrix that contains all distances for each row of X to all rows
of Y, computed with the variances found in X.
"""
assert X.ndim == 3 and X.shape[0] == 2, \
'X must have shape (2,_,_)'
assert Y.ndim == 2 or (Y.ndim == 3 and Y.shape[0] == 2), \
'Y must have shape (_,_) or (2,_,_)'
m = X.shape[1]
if Y.ndim == 2:
n = Y.shape[0]
points = Y
else:
n = Y.shape[1]
points = Y[0]
centers = X[0]
sigmas = X[1]
dist_matrix = np.empty((m, n), dtype=X.dtype)
for i in range(m):
# Broadcasting
diff = (centers[i] - points) / sigmas[i]
# This computes the sum of the pairwise products of the rows. In other
# words, it computes sum([x[i] * y[i] for i in range(x.shape[0])]).
dist_matrix[i, :] = np.einsum('ij,ij->i', diff, diff)
return dist_matrix | da994051b2eb4cc614368ed2a035d7a8bf9dcade | 15,891 |
def op_item_info():
"""Helper that compiles item info spec and all common module specs
:return dict
"""
item_spec = dict(
item=dict(
type="str",
required=True
),
flatten_fields_by_label=dict(
type="bool",
default=True
),
# Direct users to field_info module instead
field=dict(
type="str",
removed_from_collection="onepassword.connect",
removed_in_version="3.0.0",
),
vault=dict(
type="str"
)
)
item_spec.update(common_options())
return item_spec | 5bc4d3ff959e9642304dff31b910ea8a6c8a9d52 | 15,892 |
import collections
def unpack_condition(tup):
"""
Convert a condition to a list of values.
Notes
-----
Rules for keys of conditions dicts:
(1) If it's numeric, treat as a point value
(2) If it's a tuple with one element, treat as a point value
(3) If it's a tuple with two elements, treat as lower/upper limits and guess a step size.
(4) If it's a tuple with three elements, treat as lower/upper/step
(5) If it's a list, ndarray or other non-tuple ordered iterable, use those values directly.
"""
if isinstance(tup, tuple):
if len(tup) == 1:
return [float(tup[0])]
elif len(tup) == 2:
return np.arange(tup[0], tup[1], dtype=np.float)
elif len(tup) == 3:
return np.arange(tup[0], tup[1], tup[2], dtype=np.float)
else:
raise ValueError('Condition tuple is length {}'.format(len(tup)))
elif isinstance(tup, collections.Iterable):
return [float(x) for x in tup]
else:
return [float(tup)] | c07e651031850896d46a94e6060c79a955ad10fd | 15,893 |
import logging
def run(doc, preset_mc: bool):
"""Create Graph through classfication values."""
mc = doc._.MajorClaim
adus = doc._.ADU_Sents
if isinstance(mc, list):
mc = mc[0]
if mc == []:
mc = adus.pop(0)
elif not mc:
mc = adus.pop(0)
relations = compare_all(adus, mc)
if config["adu"]["MC"]["method"] == "relations" and not preset_mc:
mc = mc_from_relations.run_spacy(adus, relations)
relations = compare_all(adus, mc)
graph = ag.Graph(name=doc._.key.split("/")[-1])
mc_node = ag.Node(
key=graph.keygen(), text=mc, category=ag.NodeCategory.I, major_claim=True
)
graph.add_node(mc_node)
outer_adus = [a for a in adus if not a == mc]
inner_adus = []
nodes = dict()
connections = dict()
connections[mc] = []
for adu in outer_adus:
main_rel = relations[adu]["main"]
if relations[adu][mc].probability >= main_rel.probability * 0.90:
logging.debug("MC Match")
if relations[adu][mc].classification == RelationClass.ATTACK:
snode = ag.Node(
key=graph.keygen(),
text="Default Conflict",
category=ag.NodeCategory.CA,
)
elif relations[adu][mc].classification == RelationClass.SUPPORT:
snode = ag.Node(
key=graph.keygen(),
text="Default Inference",
category=ag.NodeCategory.RA,
)
else:
snode = None
if snode:
cnode = ag.Node(
key=graph.keygen(), text=adu, category=ag.NodeCategory.I
)
nodes[adu] = cnode
graph.add_edge(ag.Edge(key=graph.keygen(), start=cnode, end=snode))
graph.add_edge(ag.Edge(key=graph.keygen(), start=snode, end=mc_node))
outer_adus.remove(adu)
inner_adus.append(adu)
if len(graph.incoming_nodes[mc_node]) == 0:
iterator = 0
snode = None
designated_adu = None
while snode == None and iterator < len(outer_adus):
designated_adu = outer_adus[iterator]
if relations[designated_adu][mc].classification == RelationClass.ATTACK:
snode = ag.Node(
key=graph.keygen(),
text="Default Conflict",
category=ag.NodeCategory.CA,
)
elif relations[designated_adu][mc].classification == RelationClass.SUPPORT:
snode = ag.Node(
key=graph.keygen(),
text="Default Inference",
category=ag.NodeCategory.RA,
)
else:
iterator += 1
snode = None
if not snode or not designated_adu:
if outer_adus == []:
logging.info("No ADUs classified, aborting")
return graph
else:
designated_adu = outer_adus[0]
snode = snode = ag.Node(
key=graph.keygen(),
text="Default Inference",
category=ag.NodeCategory.RA,
)
cnode = ag.Node(
key=graph.keygen(), text=designated_adu, category=ag.NodeCategory.I
)
nodes[designated_adu] = cnode
graph.add_edge(ag.Edge(key=graph.keygen(), start=cnode, end=snode))
graph.add_edge(ag.Edge(key=graph.keygen(), start=snode, end=mc_node))
outer_adus.remove(designated_adu)
inner_adus.append(designated_adu)
max_iter = 0
while len(outer_adus) > 0 and max_iter < 40000:
max_iter += 1
for adu in outer_adus:
inner_found = False
for adu2 in inner_adus:
if adu2 == adu:
pass
elif (
relations[adu][adu2].probability
>= relations[adu]["main"].probability * 0.98
):
logging.debug("Match")
if relations[adu][adu2].classification == RelationClass.ATTACK:
snode = ag.Node(
key=graph.keygen(),
text="Default Conflict",
category=ag.NodeCategory.CA,
)
elif relations[adu][adu2].classification == RelationClass.SUPPORT:
snode = ag.Node(
key=graph.keygen(),
text="Default Inference",
category=ag.NodeCategory.RA,
)
else:
snode = None
if snode:
if adu in nodes:
cnode1 = nodes[adu]
else:
cnode1 = ag.Node(
key=graph.keygen(),
text=adu,
category=ag.NodeCategory.I,
)
nodes[adu] = cnode1
if adu2 in nodes:
cnode2 = nodes[adu2]
else:
cnode2 = ag.Node(
key=graph.keygen(),
text=adu2,
category=ag.NodeCategory.I,
)
nodes[adu2] = cnode2
graph.add_edge(
ag.Edge(key=graph.keygen(), start=cnode1, end=snode)
)
graph.add_edge(
ag.Edge(key=graph.keygen(), start=snode, end=cnode2)
)
inner_found = True
if inner_found:
outer_adus.remove(adu)
inner_adus.append(adu)
if len(outer_adus) > 0:
for adu in outer_adus:
snode = ag.Node(
key=graph.keygen(),
text="Default Inference",
category=ag.NodeCategory.RA,
)
cnode = ag.Node(key=graph.keygen(), text=adu, category=ag.NodeCategory.I,)
graph.add_edge(ag.Edge(key=graph.keygen(), start=cnode, end=snode))
graph.add_edge(ag.Edge(key=graph.keygen(), start=snode, end=mc_node))
return graph | bd1907bd452c78f2fb7f262af173c4743e1af725 | 15,894 |
from aiida.common.datastructures import wf_data_types
from aiida.orm.workflow import Workflow
from aiida.backends.djsite.db import models
from aiida.djsite.db import models
def get_wfs_with_parameter(parameter, wf_class='Workflow'):
"""
Find workflows of a given class, with a given parameter (which must be a
node)
:param parameter: an AiiDA node
:param wf_class: the name of the workflow class
:return: an AiiDA query set with all workflows that have this parameter
"""
try:
except ImportError:
# Find attributes with this name
qdata = models.DbWorkflowData.objects.filter(aiida_obj=parameter,
data_type=wf_data_types.PARAMETER)
# Find workflows with those attributes
if wf_class == 'Workflow':
qwf = Workflow.query(data__in=qdata)
else:
qwf = Workflow.query(module_class=wf_class,data__in=qdata)
#q2 = wf_class.query(data__in=q1)
# return a Django QuerySet with the resulting class instances
return qwf.distinct().order_by('ctime') | 7ae1c11b9b6495341da853d67d3d38df2c7838cd | 15,895 |
import glob
import os
def find_checkpoint_in_dir(model_dir):
"""tf.train.latest_checkpoint will find checkpoints if
'checkpoint' file is present in the directory.
"""
checkpoint_path = tf.train.latest_checkpoint(model_dir)
if checkpoint_path:
return checkpoint_path
# tf.train.latest_checkpoint did not find anything. Find .ckpt file
# manually.
files = glob.glob(os.path.join(model_dir, "*.ckpt*"))
if len(files) == 0:
return None
# Use last file for consistency if more than one (may not actually be
# "latest").
checkpoint_path = sorted(files)[-1]
# Trim after .ckpt-* segment. For example:
# model.ckpt-257706.data-00000-of-00002 -> model.ckpt-257706
parts = checkpoint_path.split(".")
ckpt_index = [i for i in range(len(parts)) if "ckpt" in parts[i]][0]
checkpoint_path = ".".join(parts[: ckpt_index + 1])
return checkpoint_path | 99eca445bae058692068983a12375cae8fb6478d | 15,896 |
def map_iou(boxes_true, boxes_pred, scores, thresholds = [0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75]):
"""
Mean average precision at differnet intersection over union (IoU) threshold
input:
boxes_true: Mx4 numpy array of ground true bounding boxes of one image.
bbox format: (x1, y1, w, h)
boxes_pred: Nx4 numpy array of predicted bounding boxes of one image.
bbox format: (x1, y1, w, h)
scores: length N numpy array of scores associated with predicted bboxes
thresholds: IoU shresholds to evaluate mean average precision on
output:
map: mean average precision of the image
"""
# According to the introduction, images with no ground truth bboxes will not be
# included in the map score unless there is a false positive detection (?)
# return None if both are empty, don't count the image in final evaluation (?)
if len(boxes_true) == 0 and len(boxes_pred) == 0:
return None
assert boxes_true.shape[1] == 4 or boxes_pred.shape[1] == 4, "boxes should be 2D arrays with shape[1]=4"
if len(boxes_pred):
assert len(scores) == len(boxes_pred), "boxes_pred and scores should be same length"
# sort boxes_pred by scores in decreasing order
boxes_pred = boxes_pred[np.argsort(scores)[::-1], :]
map_total = 0
# loop over thresholds
for t in thresholds:
matched_bt = set()
tp, fn = 0, 0
for i, bt in enumerate(boxes_true):
matched = False
for j, bp in enumerate(boxes_pred):
miou = calculate_iou(bt, bp)
if miou >= t and not matched and j not in matched_bt:
matched = True
tp += 1 # bt is matched for the first time, count as TP
matched_bt.add(j)
if not matched:
fn += 1 # bt has no match, count as FN
fp = len(boxes_pred) - len(matched_bt) # FP is the bp that not matched to any bt
m = tp / (tp + fn + fp)
map_total += m
return map_total / len(thresholds) | b33f6acf90a24ac473d36de4ceb06563bc1523f6 | 15,897 |
def draw_output_summary(model):
""" reads the data saved in the model class and depending on this data
chooses a visualization method to present the results with the help
of draw_optimization_overview """
if 'time_series' in model.log:
# no optimization has happend.
# hence, cost/predictions/parameters is 0-dim
fig = plt.figure()
ax = plt.subplot(1,1,1)
ax = draw_model_output(ax,model)
ax.title.set_text('Model Output')
else:
fig = draw_optimization_overview(model)
return fig | c996bf588f0aa31f32e80d80d352e6a81203a84f | 15,898 |
def general_spline_interpolation(xs, ys, p, knots=None):
"""
NOTE: SLOW SINCE IT USES B()
xs,ys: interpolation points
p: degree
knots: If None, use p+1-regular from xs[0] to slightly past x[1]
returns cs, knots
"""
# number of interpolation points (and also control points)
m = len(xs)
assert(len(ys) == m)
# use p+1-regular knot vector with ends equal to first sample and slightly
# past last sample
if knots == None:
knots = uniform_regular_knot_vector(m, p, t0=xs[0], t1=xs[-1]+0.001)
# create matrix A
A = np.zeros((m,m))
for row in range(m):
for col in range(m):
A[row, col] = B(col, p, xs[row], knots)
# compute control points
cs = np.linalg.inv(A).dot(np.array(ys))
return cs, knots | fce53b173b6e8234d0c35418ec1455793a62fc61 | 15,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.