content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def my_dice_metric_hemp(label, pred):
""" Converts dice score metric to tensorflow graph, only hemp
Args:
label: ground truth mask [batchsize, height, width, classes]
pred: prediction mask [batchsize, height, width, classes]
Returns:
dice value as tensor
"""
return tf.py_function(get_dice_score, [label > 0.5, pred > 0.5], tf.float32)
|
8c170ebde9002e4c8ea832f4310a0d1247617f73
| 31,898 |
def largest_second_derivative_2d(gxx, gxy, gyy):
"""This function determines the largest eigenvalue and corresponding eigen vector of an image based on Gaussian
derivatives. It returns the direction and magnitude of the largest second derivative. For lines, this direction
will correspond to the normal vector.
Parameters
----------
gxx: array_like
Second order Gaussian derivative in x direction.
gyy: array_like
Second order Gaussian derivative in y direction.
gxy: array_like
Gaussian derivative w.r.t. x and y."""
eigenvalues = eigenvalues_2d_symmetric(gxx, gxy, gyy)
# Largest negative eigenvalue always seems to be the first eigenvalue, but evaluate just to be sure.
max_eig = np.expand_dims(np.argmax(np.abs(eigenvalues), axis=2), axis=2)
# Line strength (second derivative along steepest principal axis) is given by:
# S = nx * nx * gxx + 2 * nx * ny * gxy + ny * ny * gyy
# This is the same as the largest eigenvalue which we already computed.
largest_eigenvalue = np.squeeze(np.take_along_axis(eigenvalues, max_eig, axis=2))
# Normal perpendicular to the line
nx, ny = eigenvector_2d_symmetric(gxx, gxy, gyy, largest_eigenvalue)
return nx, ny, largest_eigenvalue
|
de054fa7a3e2d47c25c6e7f8756c624add5d1cf8
| 31,899 |
def schema_to_entity_names(class_string):
"""
Mapping from classes path to entity names (used by the SQLA import/export)
This could have been written much simpler if it is only for SQLA but there
is an attempt the SQLA import/export code to be used for Django too.
"""
if class_string is None or len(class_string) == 0:
return
if(class_string == "aiida.backends.djsite.db.models.DbNode" or
class_string == "aiida.backends.sqlalchemy.models.node.DbNode"):
return NODE_ENTITY_NAME
if(class_string == "aiida.backends.djsite.db.models.DbLink" or
class_string == "aiida.backends.sqlalchemy.models.node.DbLink"):
return LINK_ENTITY_NAME
if(class_string == "aiida.backends.djsite.db.models.DbGroup" or
class_string ==
"aiida.backends.sqlalchemy.models.group.DbGroup"):
return GROUP_ENTITY_NAME
if(class_string == "aiida.backends.djsite.db.models.DbComputer" or
class_string ==
"aiida.backends.sqlalchemy.models.computer.DbComputer"):
return COMPUTER_ENTITY_NAME
if (class_string == "aiida.backends.djsite.db.models.DbUser" or
class_string == "aiida.backends.sqlalchemy.models.user.DbUser"):
return USER_ENTITY_NAME
|
c7a6aabde74e3639f39b8e56ad5f82c6684dd2ba
| 31,900 |
def csm_data(csm):
"""
Return the data field of the sparse variable.
"""
return csm_properties(csm)[0]
|
c0f6993f9fb005f5659539deb89b459039801ea1
| 31,901 |
def plot_hypnogram(resampled_hypno, cycles=None, label="", fig=None, ax=None):
"""
Plot an aesthetically pleasing hypnogram with optional cycle markers.
Parameters
----------
resampled_hypno : pd.DataFrame
Hypnogram resampled to epoch time units.
cycles : cycles : pd.DataFrame, optional
Tabulated estimates for onsets, offsets and durations of detected cycles.
The default is None.
label : str, optional
Custom label to add in plot title, usually a file identifier. The default is "".
Returns
-------
fig : Figure object
Main figure on which hypnogram has been plotted.
ax : Axis object
Axis on figure used by hypnogram.
"""
if (fig is None) and (ax is None):
# Initialize figure and axes
fig, ax = plt.subplots(1, 1, figsize=(16, 4.5), tight_layout=True, sharex=True)
# Ordering of stages and corresponding colors
stages = ["N3", "N2", "N1", "W", "R"]
colors = ["#002D72", "#005EB8", "#307FE2", "#30B700", "#BE3A34"]
# Step 1: Plot colored markers for each stage
# Iterate over each stage-color pair
for stage, color in zip(stages, colors):
# Filter subset of data
dat = resampled_hypno.loc[resampled_hypno["Stage"] == stage]
# Plot markers for each stage
ax.plot(
dat["Epoch_number"],
dat["Stage"],
linestyle="",
marker="s",
color=color,
markersize=2.5,
alpha=0.5,
# mec='w',
zorder=10,
)
# Step 2: Plot stage changes - classic hypnogram style
# Plot all stages across all epochs as a line
ax.plot(
resampled_hypno["Epoch_number"],
resampled_hypno["Stage"],
color="k",
alpha=0.4,
zorder=1,
linestyle="-",
linewidth=0.75,
)
# If cycle information provided
if cycles is not None:
# Iterate over cycles
for k, row in cycles.iterrows():
# Plot background tint
ax.axvspan(
xmin=2 * row["Onset"], # Convert minutes to epochs
xmax=2 * row["Offset"], # Convert minutes to epochs
ymin=0.025,
ymax=0.975,
color="#98B6E4",
alpha=0.16,
)
# Plot cycle onset and offset, and add text label for cycle number
ax.axvline(2 * row["Onset"], color="#98A4AE", alpha=0.25, linestyle="-")
ax.axvline(2 * row["Offset"], color="#98A4AE", alpha=0.25, linestyle="-")
ax.text(2 * row["Onset"] + 5, "R", f"C$_{k+1}$", va="top", alpha=0.75)
# Prepare secondary title with durations
cst = cycles["Duration"].sum()
tst = resampled_hypno["Epoch_number"].iloc[-1] / 2
N = len(cycles)
covg = 100 * cst / tst
title = f"{N} Sleep Cycles - Coverage: {covg:.1f}% of TST ({cst} of {tst} min)"
ax.set_title(title, loc="right")
# Adjust x-axis limits for cleaner fit, and add grid markers, ticks every 100 epochs
ax.set_xlim(-5, resampled_hypno["Epoch_number"].iloc[-1] + 5)
ax.xaxis.set_major_locator(MultipleLocator(100))
# Sane axis labels and primary title
ax.set_ylabel("Sleep Stage")
ax.set_xlabel("Epoch Number")
ax.set_title(f"Hypnogram for PSG {label}", loc="left", fontweight="bold")
return fig, ax
|
5c9be12d2aa9553d4a0084fd6734e5847eabcb57
| 31,902 |
from typing import Callable
def _scroll_screen(direction: int) -> Callable:
"""
Scroll to the next/prev group of the subset allocated to a specific screen.
This will rotate between e.g. 1->2->3->1 when the first screen is focussed.
"""
def _inner(qtile):
if len(qtile.screens) == 1:
current = qtile.groups.index(qtile.current_group)
destination = (current + direction) % 6
qtile.groups[destination].cmd_toscreen()
return
current = qtile.groups.index(qtile.current_group)
if current < 3:
destination = (current + direction) % 3
else:
destination = ((current - 3 + direction) % 3) + 3
qtile.groups[destination].cmd_toscreen()
return _inner
|
e778b6ef8a07fe8609a5f3332fa7c44d1b34c17a
| 31,903 |
def _gate_altitude_data_factory(radar):
""" Return a function which returns the gate altitudes. """
def _gate_altitude_data():
""" The function which returns the gate altitudes. """
try:
return radar.altitude['data'] + radar.gate_z['data']
except ValueError:
return np.mean(radar.altitude['data']) + radar.gate_z['data']
return _gate_altitude_data
|
2a13cd44a6c50e6cbe7272e5628926aa4e76c71b
| 31,904 |
from typing import Callable
import pathlib
def test_posix_message_queue_ee(
monkeypatch: pytest.MonkeyPatch,
is_dir: bool,
ee_support: bool,
engine: str,
generate_config: Callable,
):
"""Confirm error messages related to missing ``/dev/mqueue/`` and ``podman``.
Test using all possible combinations of container_engine, ee_support, and ``is_dir``.
:param monkeypatch: Fixture for patching
:param is_dir: The return value to set for ``pathlib.Path.is_dir``
:param ee_support: The value to set for ``--ee``
:param engine: The value to set for ``--ce``
:param generate_config: The configuration generator fixture
"""
message_queue_msg = (
"Execution environment support while using podman requires a '/dev/mqueue/' directory."
)
unpatched_is_dir = pathlib.Path.is_dir
def mock_is_dir(path):
"""Override the result for ``Path('/dev/mqueue/')`` to ``is_dir``.
:param path: The provided path to check
:returns: ``is_dir`` if the path is ``/dev/mqueue/``, else the real result
"""
if path == pathlib.Path("/dev/mqueue/"):
return is_dir
return unpatched_is_dir(path)
monkeypatch.setattr("pathlib.Path.is_dir", mock_is_dir)
response = generate_config(params=["--ce", engine, "--ee", str(ee_support)])
should_error = ee_support and engine == "podman" and not is_dir
message_queue_msg_exists = any(
exit_msg.message == message_queue_msg for exit_msg in response.exit_messages
)
assert should_error == message_queue_msg_exists
|
7dddd24953261324b982da0ed7adbf632cf47779
| 31,905 |
import random
def RandHexColor(length=6):
"""Generates a random color using hexadecimal digits.
Args:
length: The number of hex digits in the color. Defaults to 6.
"""
result = [random.choice(HEX_DIGITS) for _ in range(length)]
return '#' + ''.join(result)
|
7e196fa2b0666dc9aee67bcac83d0186812d9335
| 31,906 |
def _backtest2(prediction, price, acct_num,):
"""
Cal daiy return in % form
:param prediction:
:param price:
:param acct_num:
:return:
"""
# starting net val for trading account
mat = np.ones((acct_num, len(price)))
# liquidate or build position time
_idx = np.arange(len(price))
# price change
_chg = price.pct_change()
for i in range(acct_num):
adjust_time = _idx[i::acct_num]
for j, k in zip(adjust_time, np.hstack((adjust_time[1:], [-1]))):
sign = np.sign(prediction[j])
if k != -1:
mat[i][j+1:k+1] = 1+sign * _chg[j+1: k+1]
else:
mat[i][j+1:] = 1+ sign * _chg[j+1: ]
mat = mat.cumprod(1).sum(0)
mat /= mat[0]
# daily return in % form.
return 100 * np.diff(mat)/mat[:-1]
|
09e230945daf2f745b39523e5fba676e88629e09
| 31,907 |
def check_players(instance):
""" Checks to see if any of the starting players have left.
Args:
instance: The GameInstance model for this operation.
If a player has left the game, they are invited back and
a ValueError is raised.
Raises:
ValueError if a player has left the game.
"""
if len(instance.players) < len(instance.starting_players):
for starting_player in instance.starting_players:
if starting_player not in instance.players:
instance.invited.append(starting_player)
return ('%s left during your game. They have ' %
starting_player +
'been invited and must rejoin before continuing.')
return False
|
d3a31f17cf5d3dee2e3fd075cea2e31d8a806952
| 31,908 |
def list_descendant(input_list):
"""Function that orders a list on descendant order using insertion sort
Args:
input_list ([type]): List with the values that we want to order
Returns:
list: The ordered list
"""
for i in range(1, len(input_list)):
j = i-1
next_element = input_list[i]
# Compare the current element with next one
while (input_list[j] < next_element) and (j >= 0):
input_list[j+1] = input_list[j]
j = j-1
input_list[j+1] = next_element
return input_list
|
8d2c452a513ccf67c4b179bf989fac5cc0108d09
| 31,909 |
from io import StringIO
import csv
async def chargify_invoices_csv(request: Request):
"""
Output Chargify invoices data.
"""
output = StringIO()
writer = csv.writer(output)
if not request.app['chargify_invoices_data']:
raise Exception("data not loaded yet")
writer.writerows(request.app['chargify_invoices_data'])
return Response(text=output.getvalue(), content_type="text/csv")
|
5558b1b885f52edccc45a3819cab0353f84578fa
| 31,910 |
def generate_base_reference(header,waveform="cosine",period=24,phase=0,width=12):
"""
This will generate a waveform with a given phase and period based on the header,
"""
ZTs = header
tpoints = np.zeros(len(ZTs))
coef = 2.0 * np.pi / float(period)
w = float(width) * coef
for i,ZT in enumerate(ZTs):
z = float(ZT[2:].split("_")[0])
tpoints[i] = (z-phase) * coef
if waveform == "cosine":
def cosine(x,w):
x = x % (2*np.pi)
w = w % (2*np.pi)
if x <= w:
y = np.cos(x/(w/np.pi))
elif x > w:
y = np.cos( (x+2.*(np.pi-w))*np.pi/ (2*np.pi - w) )
return y
#fcos = lambda tp : cosine(tp,w)
reference= [cosine(tpoint,w) for tpoint in tpoints]
elif waveform == "trough":
def trough(x,w):
x = x % (2*np.pi)
w = w % (2*np.pi)
if x <= w:
y = 1 + -x/w
elif x > w:
y = (x-w)/(2*np.pi - w)
return y
#ftro = lambda tp : trough(tp,w)
reference= [trough(tpoint,w) for tpoint in tpoints]
return reference
|
b1178e07d9d628654902b6198acac1f791ba2064
| 31,911 |
def from_string(dlstr):
"""Factory method taking the string as appearing in FIELD input"""
input_key = dlstr.split()[0].lower()
for subcls in Interaction.__subclasses__():
if subcls.key == input_key:
return subcls.from_string(dlstr)
raise ValueError("No interaction available for {!r}".format(str))
|
875eb21e3773f47fdc77d3177a5f5253d0656546
| 31,912 |
def assign_cat(plugin):
"""Assigns `fonts` module keywords to the `Warp` plugin."""
meta = [
[KEYWORD_MATHCAL, "Script (or calligraphy): 𝒜ℬ𝒞𝒶𝒷𝒸𝒜𝒞𝒶𝒷𝒸"],
[KEYWORD_MATHBB, "Double-struck: 𝔸𝔹ℂ𝕒𝕓𝕔𝟙𝟚𝟛𝔸𝔹𝕒𝕓𝕔𝟙𝟚𝟛"],
[KEYWORD_MATHFRAK, "Fraktur: 𝔄𝔅ℭ𝔞𝔟𝔠𝔄𝔅𝔞𝔟𝔠"],
[KEYWORD_MATHSF, "Sans-serif: 𝖠𝖡𝖢𝖺𝖻𝖼𝟣𝟤𝟥𝖠𝖡𝖢𝖺𝖻𝖼𝟣𝟤𝟥"],
[KEYWORD_TEXTSF, "Sans-serif: 𝖠𝖡𝖢𝖺𝖻𝖼𝟣𝟤𝟥𝖠𝖡𝖢𝖺𝖻𝖼𝟣𝟤𝟥"],
[KEYWORD_MATHBF, "Serif Bold: 𝐀𝐁𝐂𝐚𝐛𝐜𝟏𝟐𝟑𝐀𝐁𝐂𝐚𝐛𝐜𝟏𝟐𝟑"],
[KEYWORD_TEXTBF, "Serif Bold: 𝐀𝐁𝐂𝐚𝐛𝐜𝟏𝟐𝟑𝐀𝐁𝐂𝐚𝐛𝐜𝟏𝟐𝟑"],
[KEYWORD_MATHBI, "Serif Bold italic: 𝑨𝑩𝑪𝒂𝒃𝒄𝟏𝟐𝟑𝑨𝑩𝑪𝒂𝒃𝒄𝟏𝟐𝟑"],
[KEYWORD_TEXTIT, "Serif Italic: 𝐴𝐵𝐶𝑎𝑏𝑐123𝐴𝐵𝐶𝑎𝑏𝑐"],
[KEYWORD_TEXTTT, "Mono-space: 𝙰𝙱𝙲𝚊𝚋𝚌𝟷𝟸𝟹𝙰𝙱𝙲𝚊𝚋𝚌𝟷𝟸𝟹"]]
items = [plugin.create_item(
category=plugin.CATEGORY_FONTS,
label=el[0],
short_desc=el[1],
target=el[0],
args_hint=kp.ItemArgsHint.REQUIRED,
hit_hint=kp.ItemHitHint.IGNORE) for el in meta]
return items
|
a1168624fa330ce19a3c8e78346bb7a893bb74e3
| 31,913 |
def get_holidays(startYear = 2018, endYear = 2025, countryCode = 'ZA'):
"""
Takes in a start and end date, and start and end year.
Produces a dataframe with a daily date and columns:
holiday - 'Y' for holiday
holidayName - name of the holiday if holiday is 'Y'
Returns a dataframe
"""
holidayDict = {}
for i in range(startYear, endYear):
for date, name in sorted(holidays.CountryHoliday(countryCode,years=[i]).items()):
holidayDict[date] = name
holiday_df = pd.DataFrame(list(holidayDict.items()),columns = ['day','holidayName'])
holiday_df['day'] = pd.to_datetime(holiday_df['day']).dt.date
return holiday_df
|
6d7d83389bee2a83c6cc2d565f7c04b8d131555a
| 31,915 |
import struct
def _testHeadCheckSum(header, tableDirectory):
"""
>>> header = dict(sfntVersion="OTTO")
>>> tableDirectory = [
... dict(tag="head", offset=100, length=100, checkSum=123, data="00000000"+struct.pack(">L", 925903070)),
... dict(tag="aaab", offset=200, length=100, checkSum=456),
... dict(tag="aaac", offset=300, length=100, checkSum=789),
... ]
>>> bool(_testHeadCheckSum(header, tableDirectory))
"""
flavor = header["sfntVersion"]
tables = {}
for entry in tableDirectory:
tables[entry["tag"]] = entry
data = tables["head"]["data"][8:12]
checkSumAdjustment = struct.unpack(">L", data)[0]
shouldBe = calcHeadCheckSumAdjustment(flavor, tables)
if checkSumAdjustment != shouldBe:
return ["The head checkSumAdjustment value is incorrect."]
return []
|
4391c4d6dd6c3fedf99315a2d6995962251ca10f
| 31,916 |
def svn_ra_do_status(*args):
"""
svn_ra_do_status(svn_ra_session_t session, svn_ra_reporter2_t reporter,
void report_baton, char status_target, svn_revnum_t revision,
svn_boolean_t recurse, svn_delta_editor_t status_editor,
void status_baton,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_do_status, args)
|
30d2d310bf2e047653c99ba4add68b3e2d07ee00
| 31,918 |
def clear_spaces(comp):
"""
'A + D' -> 'A+D'
"""
r = ''
for c in comp:
if c != ' ':
r += c
return r
|
cf8f28cf3633eb31d0c934dd9fc3035b3c1539f4
| 31,919 |
def _load_metabolite_linkouts(session, cobra_metabolite, metabolite_database_id):
"""Load new linkouts even ones that are pointing to previously created universal
metabolites.
The only scenario where we don't load a linkout is if the external id and
metabolite is exactly the same as a previous linkout.
"""
# parse the notes
def parse_linkout_str(id):
if id is None:
return None
id_string = str(id)
for s in ['{', '}', '[', ']', ''', "'",]:
id_string = id_string.replace(s, '')
return id_string.strip()
data_source_fix = {'KEGG_ID' : 'KEGGID', 'CHEBI_ID': 'CHEBI'}
db_xref_data_source_id = { data_source.name: data_source.id for data_source
in session.query(base.DataSource).all() }
for external_source, v in cobra_metabolite.notes.iteritems():
# ignore formulas
if external_source.lower() in ['formula', 'formula1', 'none']:
continue
# check if linkout matches the list
external_source = external_source.upper()
v = v[0]
if external_source in data_source_fix:
external_source = data_source_syn[external_source]
if '&apos' in v:
ids = [parse_linkout_str(x) for x in v.split(',')]
else:
ids = [parse_linkout_str(v)]
for external_id in ids:
if external_id.lower() in ['na', 'none']:
continue
exists = (session
.query(base.Synonym)
.filter(base.Synonym.synonym == external_id)
.filter(base.Synonym.type == 'component')
.filter(base.Synonym.ome_id == metabolite_database_id)
.count() > 0)
if not exists:
ome_linkout = {'type': 'component'}
ome_linkout['ome_id'] = metabolite_database_id
ome_linkout['synonym'] = external_id
try:
data_source_id = db_xref_data_source_id[external_source]
except KeyError:
data_source_id = create_data_source(session, external_source)
db_xref_data_source_id[external_source] = data_source_id
check_and_update_url(session, data_source_id)
ome_linkout['synonym_data_source_id'] = data_source_id
synonym = base.Synonym(**ome_linkout)
session.add(synonym)
|
cda0a2fef212b091d39b45d2c8603eedc0f3b99a
| 31,920 |
import random
def gen_ascii_captcha(symbols, length=6, max_h=10, noise_level=0, noise_char="."):
"""
Return a string of the specified length made by random symbols.
Print the ascii-art representation of it.
Example:
symbols = gen_ascii_symbols(input_file='ascii_symbols.txt',
chars = string.ascii_lowercase+string.ascii_uppercase+'0123456789')
while True:
captcha = gen_ascii_captcha(symbols, noise_level=0.2)
x = input('captcha: ')
if x == captcha:
print('\ncorrect')
break
print('\ninvalid captcha, please retry')
"""
assert noise_level <= 1
# max_h = 10
# noise_level = 0
captcha = "".join(random.sample(chars, length))
# print(code)
pool = [symbols[c].split("\n") for c in captcha]
for n in range(max_h, 0, -1):
line = ""
for item in pool:
try:
next_line = item[-n]
except IndexError:
next_line = "".join(
[" " for i in range(max([len(_item) for _item in item]))]
)
if noise_level:
# if random.random() < noise_level:
# next_line = next_line.replace(' ', noise_char)
next_line = "".join(
[
c
if random.random() > noise_level
else random.choice(noise_char)
for c in next_line
]
)
line += next_line
print(line)
return captcha
|
b6f6f02bbbe3cbdfce007ea13e26836c21e20ef2
| 31,921 |
def get_app_name_cache_file(file_path: str) -> str:
"""Returns path to app name cache file"""
return f'{file_path}.{APP_NAME_FILE_EXTENSION}'
|
16bdfe57e28eb5ea97e317fea98285af494ff0a9
| 31,922 |
def make_matrix_A(x_r, simulations, ξ, Δ_t=1, λ=1):
"""
Equation (9) from paper
:param x_r: reference output
:param simulations: simulation outputs
:param t_Δ: the intervals between each observation (series or constant)
:param ξ: squash control parameter
:return: the position/trend matrix
"""
A = []
for x_s in simulations:
a_i1 = position_metric(x_r, x_s, ξ)
a_i2 = trend_metric(x_r, x_s, ξ, Δ_t, λ)
A.append([a_i1, a_i2])
return np.array(A)
|
f4cb5e4dd5f79385ee4c945b4b7982200f2fa474
| 31,923 |
def generate_markov_content(content):
""" Generate Markov Content
Parameters
----------
content : str
Corpus to generate markov sequences from
"""
# Build the model.
text_model = markovify.Text(content, state_size=3)
# return randomly-generated sentence of no more than 280 characters
return text_model.make_short_sentence(200)
|
c0503dca553712f8563c53676d3216f8206808ab
| 31,924 |
def _quadratic_system_matrix(x):
"""
Create system matrix of a model with linear, quadratic and 1st-order 2-way interaction terms
Parameters
----------
x : array_like, shape (n, m)
Explanatory variable of n data points and m variables
Returns
-------
A : ndarray, shape (n, 1 + 1.5 m + 0.5 m ** 2)
System matrix
"""
n, m = x.shape
nc = int(1. + 1.5 * m + .5 * m ** 2.) # number of coefficients
if n < nc:
raise ValueError('Insufficient data points to fit full quadratic model.')
A = _allocate_system_matrix((n, nc))
_assign_linear_terms(A, x)
_assign_quadratic_terms(A, x)
_assign_interaction_terms(A, x, 2 * m + 1)
return A
|
6d958cccaf5127fdc9f3fc6bcc472ceafd2947d6
| 31,925 |
def pytest_pycollect_makeitem(collector, name, obj):
"""A pytest hook to collect curio coroutines."""
if collector.funcnamefilter(name) and curio.meta.iscoroutinefunction(obj):
item = pytest.Function.from_parent(collector, name=name)
if "curio" in item.keywords:
return list(collector._genfunctions(name, obj))
|
95d18bc4575fc31c938dae6d487cf4cd058d89c4
| 31,926 |
def eta_hms(seconds, always_show_hours=False, always_show_minutes=False, hours_leading_zero=False):
"""Converts seconds remaining into a human readable timestamp (e.g. hh:mm:ss, h:mm:ss, mm:ss, or ss).
Positional arguments:
seconds -- integer/float indicating seconds remaining.
Keyword arguments:
always_show_hours -- don't hide the 0 hours.
always_show_minutes -- don't hide the 0 minutes.
hours_leading_zero -- show 01:00:00 instead of 1:00:00.
Returns:
Human readable string.
"""
# Convert seconds to other units.
final_hours, final_minutes, final_seconds = 0, 0, seconds
if final_seconds >= 3600:
final_hours = int(final_seconds / 3600.0)
final_seconds -= final_hours * 3600
if final_seconds >= 60:
final_minutes = int(final_seconds / 60.0)
final_seconds -= final_minutes * 60
final_seconds = int(ceil(final_seconds))
# Determine which string template to use.
if final_hours or always_show_hours:
if hours_leading_zero:
template = '{hour:02.0f}:{minute:02.0f}:{second:02.0f}'
else:
template = '{hour}:{minute:02.0f}:{second:02.0f}'
elif final_minutes or always_show_minutes:
template = '{minute:02.0f}:{second:02.0f}'
else:
template = '{second:02.0f}'
return template.format(hour=final_hours, minute=final_minutes, second=final_seconds)
|
db85099a0a1c19391ed8abc1ea2bcf4b867a93af
| 31,927 |
def apply_Hc(C, A_L, A_R, Hlist):
"""
Compute C' via eq 16 of vumps paper (132 of tangent space methods).
"""
H, LH, RH = Hlist
A_Lstar = np.conj(A_L)
A_C = ct.rightmult(A_L, C)
to_contract = [A_C, A_Lstar, A_R, np.conj(A_R), H]
idxs = [(4, 1, 3),
(6, 1, -1),
(5, 3, 2),
(7, -2, 2),
(6, 7, 4, 5)]
term1 = tn.ncon(to_contract, idxs)
term2 = np.dot(LH, C)
term3 = np.dot(C, RH.T)
C_prime = term1 + term2 + term3
return C_prime
|
ae43b1fe1484ee70b2245474f03a3e9676b44929
| 31,928 |
import ipywidgets as ipyw
def make_basic_gui(container):
"""Create a basic GUI layout.
Parameters
----------
container : RenderContainer
Returns
-------
ipywidgets.GridspecLayout
"""
element_controls = [
ipyw.HTML(value="<b>Elements</b>", layout=ipyw.Layout(align_self="center"))
]
for key, descript in [
("group_atoms", "Atoms"),
("cell_lines", "Unit Cell"),
("group_labels", "Labels"),
("bond_lines", "Bonds"),
("group_millers", "Planes"),
("group_ghosts", "Ghosts"),
]:
if key not in container:
continue
toggle = ipyw.ToggleButton(
description=descript,
icon="eye",
button_style="primary",
value=False if key == "group_labels" else container[key].visible,
layout=ipyw.Layout(width="auto"),
)
ipyw.jslink((toggle, "value"), (container[key], "visible"))
element_controls.append(toggle)
control_box_elements = ipyw.Box(
element_controls, layout=ipyw.Layout(flex_flow="column")
)
container["control_box_elements"] = control_box_elements
background_controls = [
ipyw.HTML(value="<b>Background</b>", layout=ipyw.Layout(align_self="center"))
]
background_color = ipyw.ColorPicker(
concise=True,
description="Color",
description_tooltip="Background Color",
value=container.element_renderer.clearColor,
layout=ipyw.Layout(align_items="center"),
)
background_color.style.description_width = "40px"
ipyw.jslink((background_color, "value"), (container.element_renderer, "clearColor"))
background_controls.append(background_color)
background_opacity = ipyw.FloatSlider(
value=container.element_renderer.clearOpacity,
min=0,
max=1,
step=0.1,
orientation="horizontal",
readout=False,
description_tooltip="Background Opacity",
)
background_opacity.layout.max_width = "100px"
ipyw.jslink(
(background_opacity, "value"), (container.element_renderer, "clearOpacity")
)
background_controls.append(background_opacity)
# other_controls.append(ipyw.Label(value="Opacity", layout=ipyw.Layout(align_self="center")))
control_box_background = ipyw.Box(
background_controls, layout=ipyw.Layout(flex_flow="column")
)
container["control_box_background"] = control_box_background
axes = [container.axes_renderer] if "axes_renderer" in container else []
info_box = ipyw.HTML(
value="", # "Double-click atom for info (requires active kernel).",
color="grey",
layout=ipyw.Layout(
max_height="10px", margin="0px 0px 0px 0px", align_self="flex-start"
),
)
def on_click(change):
obj = change["new"]
if obj is None:
container.atom_pointer.visible = False
info_box.value = ""
else:
info_box.value = obj.name
# container.atom_pointer.position = container.atom_picker.point
container.atom_pointer.position = obj.position
container.atom_pointer.visible = True
container.atom_picker.observe(on_click, names=["object"])
if axes and container.element_renderer.height > 200:
grid = ipyw.GridspecLayout(
2,
2,
width=f"{container.element_renderer.width + 100}px",
height=f"{container.element_renderer.height + 35}px",
)
grid[0, 0] = container.element_renderer
grid[1, 0] = info_box
grid[:, 1] = ipyw.Box(
axes + [control_box_elements, control_box_background],
layout=ipyw.Layout(align_self="flex-start", flex_flow="column"),
)
else:
grid = ipyw.GridspecLayout(
2,
3,
width=f"{container.element_renderer.width + 200}px",
height=f"{container.element_renderer.height + 35}px",
)
grid[:, 0] = ipyw.Box(
axes, layout=ipyw.Layout(align_self="flex-end", flex_flow="column")
)
grid[0, 1] = container.element_renderer
grid[1, 1] = info_box
grid[:, 2] = ipyw.Box(
[control_box_elements, control_box_background],
layout=ipyw.Layout(align_self="flex-start", flex_flow="column"),
)
return grid
|
5a01c70304910b1b20193c507a348ec90dd6a514
| 31,929 |
def parse_middleware_mkdir_response(mkdir_resp):
"""
Parse a response from RpcMiddlewareMkdir.
Returns (mtime in nanoseconds, inode number, number of writes)
"""
return (_ctime_or_mtime(mkdir_resp),
mkdir_resp["InodeNumber"],
mkdir_resp["NumWrites"])
|
17336084895bbad579785e7e918674e4367f05c6
| 31,932 |
import torch
def decorate_batch(batch, device='cpu'):
"""Decorate the input batch with a proper device
Parameters
----------
batch : {[torch.Tensor | list | dict]}
The input batch, where the list or dict can contain non-tensor objects
device: str, optional
'cpu' or 'cuda'
Raises:
----------
Exception: Unsupported data type
Return
----------
torch.Tensor | list | dict
Maintain the same structure as the input batch, but with tensors moved to a proper device.
"""
if isinstance(batch, torch.Tensor):
batch = batch.to(device)
return batch
elif isinstance(batch, dict):
for key, value in batch.items():
if isinstance(value, torch.Tensor):
batch[key] = value.to(device)
elif isinstance(value, dict) or isinstance(value, list):
batch[key] = decorate_batch(value, device)
# retain other value types in the batch dict
return batch
elif isinstance(batch, list):
new_batch = []
for value in batch:
if isinstance(value, torch.Tensor):
new_batch.append(value.to(device))
elif isinstance(value, dict) or isinstance(value, list):
new_batch.append(decorate_batch(value, device))
else:
# retain other value types in the batch list
new_batch.append(value)
return new_batch
else:
raise Exception('Unsupported batch type {}'.format(type(batch)))
|
a0bd4a5dff0b5cf6e304aede678c5d56cb93d1dd
| 31,933 |
import yaml
def get_config(filename: str = CONFIG_FILENAME) -> dict:
"""
Get config as a dictionary
Parameters
----------
filename: str
The filename with all the configuration
Returns
-------
dict
A dictionary containing all the entries from the config YAML
"""
global config
if config is None:
config = yaml.load(open(filename), Loader=yaml.FullLoader)
return config
|
79572679204c53f6ecdf91a120bb6a08ced9afd1
| 31,934 |
def partition_trace(vtrace):
"""partition a trace based on its types """
partition = {}
for v in vtrace:
vty = get_vt_type(v)
if vty not in partition:
partition[vty] = []
partition[vty].append(v)
return partition
|
6fa30f6180a122cc6cb74bf7dcdfbbfa2b4a8627
| 31,935 |
def test_text_justify_bottom_right_and_top_left(region, projection):
"""
Print text justified at bottom right and top left.
"""
fig = Figure()
fig.text(
region=region,
projection=projection,
x=1.2,
y=0.2,
text="text justified bottom right",
justify="BR",
)
fig.text(
region=region,
projection=projection,
x=1.2,
y=0.2,
text="text justified top left",
justify="TL",
)
return fig
|
a8b1735e002f2c310226142bfcbde9bd1fea9f95
| 31,936 |
from pathlib import Path
def get_bench(
lx: tuple[int, int, int], data_dir: Path, tests: list[str], comp_lvls: list[int]
) -> dict[str, pd.DataFrame]:
"""get writing benchmark HDF5 files to pandas dataframe"""
data = {
"write_dr": pd.DataFrame(index=comp_lvls, columns=tests),
"read_dr": pd.DataFrame(index=comp_lvls, columns=tests),
"write_t": pd.DataFrame(index=comp_lvls, columns=tests),
"read_t": pd.DataFrame(index=comp_lvls, columns=tests),
}
hdf5_vers = None
mpi_api = None
for t in tests:
for c in comp_lvls:
tail = f"{lx[0]}_{lx[1]}_{lx[2]}_comp{c}"
h5fn = data_dir / f"{t}_{tail}.h5.write_stat.h5"
try:
with h5py.File(h5fn, "r") as f:
ca = f["/comp_lvl"][()]
data["write_dr"][t][ca] = f["/median_MBsec"][()]
data["write_t"][t][ca] = np.median(f["/t_ms"][:])
except FileNotFoundError:
print(f"ERROR: {t}: write benchmark {h5fn}")
h5fn = data_dir / f"{t}_{tail}.h5.read_stat.h5"
try:
with h5py.File(h5fn, "r") as f:
# read benchmarks refer to write compression level
data["read_dr"][t][ca] = f["/median_MBsec"][()]
data["read_t"][t][ca] = np.median(f["/t_ms"][:])
if "Ncpu" not in data and "mpi" in t:
data["Ncpu"] = f["/Ncpu"][()]
data["compiler"] = f["/compiler"].asstr()[()]
data["os"] = f["/os"].asstr()[()]
hdf5_vers = f["/hdf5version"][:]
mpi_api = f["/mpi_api_version"][:]
data["mpi_lib_version"] = f["/mpi_lib_version"].asstr()[()][:16]
# limit length for title
except FileNotFoundError:
print(f"ERROR: {t}: read benchmark {h5fn}")
if hdf5_vers is None:
raise FileNotFoundError(f"No data files were found in {data_dir}")
data["hdf5version"] = f"{hdf5_vers[0]}.{hdf5_vers[1]}.{hdf5_vers[2]}"
data["mpi_api_version"] = f"{mpi_api[0]}.{mpi_api[1]}"
return data
|
d135f58a3793cf161f338118060b16106bd6c96a
| 31,937 |
def custom_token(deploy_tester_contract, custom_token_params):
"""Deploy CustomToken contract"""
return deploy_tester_contract(
CONTRACT_CUSTOM_TOKEN,
[],
custom_token_params
)
|
175f8b49545e422ae691631b361f7bb62e9f62f5
| 31,938 |
def parse_values(reports, criteria1, criteria2, steps, crit1_name, crit2_name, first=False, cpus=1):
"""
Description: Parse the 'reports' and create a sorted array
of size n_structs following the criteria chosen by the user.
"""
info_reports = [ retrieve_report_data(report) for report in reports]
data = pd.concat(info_reports)
data.drop_duplicates(subset=[crit1_name, crit2_name], inplace=True)
print("Simulation data {}".format(data.shape))
return data
|
9d22756ecdfe50f7e5563a4fa855ddb5a3a4cd21
| 31,939 |
import io
def read_bytes(n: int, reader: io.IOBase) -> bytes:
"""
Reads the specified number of bytes from the reader. It raises an
`EOFError` if the specified number of bytes is not available.
Parameters:
- `n`: The number of bytes to read;
- `reader`: The reader;
Returns the bytes read.
"""
buff = reader.read(n)
if not isinstance(buff, bytes):
raise ValueError('The reader is expected to return bytes.')
if len(buff) != n:
raise EOFError(f'Unable to read {n} bytes from the stream.')
return buff
|
bb3d00fc7667839864f4104a94a26e682f058fdc
| 31,940 |
import json
def _format_full_payload(_json_field_name, _json_payload, _files_payload):
"""This function formats the full payload for a ``multipart/form-data`` API request including attachments.
.. versionadded:: 2.8.0
:param _json_field_name: The name of the highest-level JSON field used in the JSON payload
:type _json_field_name: str
:param _json_payload: The JSON payload data as a dictionary
:type _json_payload: dict
:param _files_payload: The payload for the attachments containing the IO stream for the file(s)
:type _files_payload: dict
:returns: The full payload as a dictionary
:raises: :py:exc:`TypeError`
"""
_full_payload = {
_json_field_name: (None, json.dumps(_json_payload, default=str), 'application/json')
}
_full_payload.update(_files_payload)
return _full_payload
|
feacd27be3e6fcbd33f77fa755be513a93e3cdeb
| 31,941 |
def convert_rows (rows):
"""Read a two-element tuple from a string.
rows should be a string containing two integers separated by a
comma, blank, or colon. The numbers may be enclosed in parentheses
or brackets, but this is not necessary. Note: the row numbers
are one indexed and inclusive, e.g. rows = "480, 544" means process
rows 479 through 543 (zero indexed), which is equivalent to the
slice 479:544.
"""
if rows.strip() == "":
rows = None
else:
bad = True
if rows.find (",") >= 0:
rownum = rows.split (",")
else:
rownum = rows.split (" ")
if len (rownum) == 2:
bad = False
try:
row0 = int (rownum[0]) - 1
row1 = int (rownum[1])
except:
bad = True
if bad:
raise ValueError("can't interpret rows = %s" % (rows,))
rows = (row0, row1)
return rows
|
e2d5a8e68459d6cb7a2fa044baa6d25dab46511c
| 31,942 |
import numpy
def prepare_data(hsi_img=None, gnd_img=None, window_size=7, n_principle=3,
batch_size=50, merge=False, ratio=[6, 2, 2]):
"""
Process the data from file path to splited train-valid-test sets; Binded in
dataset_spectral and dataset_spatial respectively.
Parameters
----------
hsi_img=None: 3-D numpy.ndarray, dtype=float, storing initial
hyperspectral image data.
gnd_img=None: 2-D numpy.ndarray, dtype=int, containing tags for pixeles.
The size is the same to the hsi_img size, but with only
1 band.
window_size: Size of spatial window. Pass an integer 1 if no spatial
infomation needed.
n_principle: This many principles you want to incorporate while
extracting spatial info.
merge: If merge==True, the returned dataset_spectral has
dataset_spatial stacked in the tail of it; else if
merge==False, the returned dataset_spectral and
dataset_spatial will have spectral and spatial information
only, respectively.
Return
------
dataset_spectral:
dataset_spatial:
extracted_pixel_ind:
split_mask:
"""
data_spectral, data_spatial, gndtruth, extracted_pixel_ind = \
T_pca_constructor(hsi_img=hsi_img, gnd_img=gnd_img, n_principle=n_principle,
window_size=window_size, flag='supervised')
################ separate train, valid and test spatial data ###############
[train_spatial_x, train_y], [valid_spatial_x, valid_y], [test_spatial_x, test_y], split_mask = \
train_valid_test(data=[data_spatial, gndtruth], ratio=ratio,
batch_size=batch_size, random_state=123)
# convert them to theano.shared values
train_set_x = theano.shared(value=train_spatial_x, name='train_set_x', borrow=True)
valid_set_x = theano.shared(value=valid_spatial_x, name='valid_set_x', borrow=True)
test_set_x = theano.shared(value=test_spatial_x, name='test_set_x', borrow=True)
train_set_y = theano.shared(value=train_y, name='train_set_y', borrow=True)
valid_set_y = theano.shared(value=valid_y, name='valid_set_y', borrow=True)
test_set_y = theano.shared(value=test_y, name='test_set_y', borrow=True)
dataset_spatial = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
############### separate train, valid and test spectral data ###############
[train_spectral_x, train_y], [valid_spectral_x, valid_y], [test_spectral_x, test_y], split_mask = \
train_valid_test(data=[data_spectral, gndtruth], ratio=ratio,
batch_size=batch_size, random_state=123)
# if we want to merge data, merge it
if merge:
train_spectral_x = numpy.hstack((train_spectral_x, train_spatial_x))
valid_spectral_x = numpy.hstack((valid_spectral_x, valid_spatial_x))
test_spectral_x = numpy.hstack((test_spectral_x, test_spatial_x))
# convert them to theano.shared values
train_set_x = theano.shared(value=train_spectral_x, name='train_set_x', borrow=True)
valid_set_x = theano.shared(value=valid_spectral_x, name='valid_set_x', borrow=True)
test_set_x = theano.shared(value=test_spectral_x, name='test_set_x', borrow=True)
train_set_y = theano.shared(value=train_y, name='train_set_y', borrow=True)
valid_set_y = theano.shared(value=valid_y, name='valid_set_y', borrow=True)
test_set_y = theano.shared(value=test_y, name='test_set_y', borrow=True)
dataset_spectral = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
return dataset_spectral, dataset_spatial, extracted_pixel_ind, split_mask
|
b54d174cdfc99bc1ba74e7b8cc09661d6bf18f9c
| 31,943 |
def mvresnet152(**kwargs):
"""Constructs a MVResNet-101 model.
"""
model = MVResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
return model
|
86b579e5f3a80b87677d0aa0c075d4e3eeb81d46
| 31,945 |
def _wer_compute(errors: Tensor, total: Tensor) -> Tensor:
"""Compute the word error rate.
Args:
errors: Number of edit operations to get from the reference to the prediction, summed over all samples
total: Number of words overall references
Returns:
Word error rate score
"""
return errors / total
|
c7a2ea912e27d1867f771b135cb0c8bd9fd7729e
| 31,946 |
def get_coordinator():
"""Creates a coordinator and returns it."""
workflow_queue = Queue.Queue()
complete_queue = Queue.Queue()
coordinator = WorkflowThread(workflow_queue, complete_queue)
coordinator.register(WorkflowItem, workflow_queue)
return coordinator
|
24fa3b52803f1cebae246be8b3988b9568965e6d
| 31,947 |
from typing import Any
from typing import Union
import torch
def tocuda(vars: Any) -> Union[str, torch.Tensor]:
"""Convert tensor to tensor on GPU"""
if isinstance(vars, torch.Tensor):
return vars.cuda()
elif isinstance(vars, str):
return vars
else:
raise NotImplementedError("invalid input type {} for tocuda".format(type(vars)))
|
b7be275fe7e909fa54fc62ed9e5fbe61d3ff4863
| 31,948 |
from typing import Union
from typing import NewType
from typing import Any
from typing import Tuple
from typing import Optional
def _maybe_node_for_newtype(
typ: Union[NewType, Any],
overrides: OverridesT,
memo: MemoType,
forward_refs: ForwardRefs
) -> Tuple[Optional[schema.nodes.SchemaNode], MemoType, ForwardRefs]:
""" newtypes do not change the underlying runtime data type that is used in
calls like isinstance(), therefore it's just enough for us to find
a schema node of the underlying type
"""
rv = None
if insp.is_new_type(typ):
return decide_node_type(typ.__supertype__, overrides, memo, forward_refs)
return rv, memo, forward_refs
|
5cce0197a44ebc2517c509211746a996d6e0235c
| 31,950 |
def read_slug(filename):
"""
Returns the test slug found in specified filename.
"""
with open(filename, "r") as f:
slug = f.read()
return slug
|
e1882d856e70efa8555dab9e422a1348594ffcaf
| 31,951 |
def boolean_value(value):
"""Given a Value, returns whether the object is statically known to be truthy.
Returns None if its truth value cannot be determined.
"""
if isinstance(value, KnownValue):
try:
return bool(value.val)
except Exception:
# Its __bool__ threw an exception. Just give up.
return None
return None
|
c4f971b474943f44c2d5b85def14d91901244e72
| 31,952 |
def _convert_to_dict(tracked_dict):
"""
Recursively convert a Pony ORM TrackedDict to a normal Python dict
"""
if not isinstance(tracked_dict, orm.ormtypes.TrackedDict):
return tracked_dict
return {k: _convert_to_dict(v) for k, v in tracked_dict.items()}
|
40a31ebc96f3010618c3b091e2618d2d6809b82d
| 31,956 |
def sky_coords(cluster):
"""Get the sky coordinates of every star in the cluster
Parameters
----------
cluster : class
StarCluster
Returns
-------
ra,dec,d0,pmra,pmdec,vr0 : float
on-sky positions and velocities of cluster stars
History
-------
2018 - Written - Webb (UofT)
"""
cluster.save_cluster()
if origin0 != "galaxy":
cluster.to_galaxy(starsort=False)
x0, y0, z0 = coords.galcenrect_to_XYZ(
cluster.x, cluster.y, cluster.z, Xsun=8.0, Zsun=0.025
).T
vx0, vy0, vz0 = coords.galcenrect_to_vxvyvz(
cluster.vx,
cluster.vy,
cluster.vz,
Xsun=8.0,
Zsun=0.025,
vsun=[-11.1, 244.0, 7.25],
).T
l0, b0, d0 = coords.XYZ_to_lbd(x0, y0, z0, degree=True).T
ra, dec = coords.lb_to_radec(l0, b0, degree=True).T
vr0, pmll0, pmbb0 = coords.vxvyvz_to_vrpmllpmbb(
vx0, vy0, vz0, l0, b0, d0, degree=True
).T
pmra, pmdec = coords.pmllpmbb_to_pmrapmdec(pmll0, pmbb0, l0, b0, degree=True).T
cluster.return_cluster()
return ra, dec, d0, pmra, pmdec, vr0
|
26a17879a6c2cc84dbf250d9250014f8554fbb9e
| 31,957 |
import time
import requests
import json
def macro_cons_silver_volume():
"""
全球最大白银 ETF--iShares Silver Trust 持仓报告, 数据区间从 20060429-至今
:return: pandas.Series
"""
t = time.time()
res = requests.get(
JS_CONS_SLIVER_ETF_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["白银"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["总库存(吨)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "etf",
"attr_id": "2",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, [0, 1]]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "silver_volume"
url = "https://cdn.jin10.com/data_center/reports/etf_2.json"
r = requests.get(url)
data_json = r.json()
append_temp_df = pd.DataFrame(data_json["values"]).T
append_temp_df.columns = [item["name"] for item in data_json["keys"]]
temp_append_df = append_temp_df["总库存"]
temp_append_df.name = "silver_volume"
temp_df = temp_df.reset_index()
temp_df["index"] = temp_df["index"].astype(str)
temp_df = temp_df.append(temp_append_df.reset_index())
temp_df.drop_duplicates(subset=["index"], keep="last", inplace=True)
temp_df.index = pd.to_datetime(temp_df["index"])
del temp_df["index"]
temp_df = temp_df[temp_df != 'Show All']
temp_df.sort_index(inplace=True)
temp_df = temp_df.astype(float)
return temp_df
|
1a806095e935ea5e6065dec4dc412775ebaa2b22
| 31,958 |
def preprocess_img(img):
"""Preprocessing function for images."""
return img/255
|
11651a809288d5c3aa776b318099b7eb750d28ec
| 31,961 |
def get_apitools_metadata_from_url(cloud_url):
"""Takes storage_url.CloudUrl and returns appropriate Apitools message."""
messages = apis.GetMessagesModule('storage', 'v1')
if cloud_url.is_bucket():
return messages.Bucket(name=cloud_url.bucket_name)
elif cloud_url.is_object():
generation = int(cloud_url.generation) if cloud_url.generation else None
return messages.Object(
name=cloud_url.object_name,
bucket=cloud_url.bucket_name,
generation=generation)
|
c8ec4dd6c6019467129c03d367c6dd58963d334f
| 31,963 |
def branch(ref=None):
"""Return the name of the current git branch."""
ref = ref or "HEAD"
return local("git symbolic-ref %s 2>/dev/null | awk -F/ {'print $NF'}"
% ref, capture=True)
|
8597a9b38f2a6372aa9dba911163bdb44d4db1f2
| 31,964 |
def fit_anis(celldmsx, Ex, ibrav=4, out=False, type="quadratic", ylabel="Etot"):
"""
An auxiliary function for handling fitting in the anisotropic case
"""
if out:
print (type+" fit")
if type=="quadratic":
a, chi = fit_quadratic(celldmsx, Ex, ibrav, out, ylabel)
elif type=="quartic":
a, chi = fit_quartic(celldmsx, Ex, ibrav, out, ylabel)
else:
print ("Fitting type not implemented")
return None, None
if chi!=None:
print_polynomial(a)
print ("Chi squared: ",chi,"\n")
return a, chi
return a, None
else:
if type=="quadratic":
a, chi = fit_quadratic(celldmsx, Ex, ibrav, False, ylabel)
elif type=="quartic":
a, chi = fit_quartic(celldmsx, Ex, ibrav, False, ylabel)
else:
return None, None
if chi!=None:
return a, chi
return a, None
|
25f88d62876696d1d9e95e1c11c5b687a59cea7b
| 31,965 |
def solicitacao_incluir(discente, sugestao_turma):
"""
Inclui uma Solicitação de interesse do Discente na Sugestão de Turma.
:param discente: Um objeto da classe @Discente
:param sugestao_turma: Um objeto da classe @SugestaoTurma
:return: Um objeto da classe @SolicitacaoTurma e um booleano informando se a Solicitação foi criada.
"""
usuario = discente.usuario
solicitacao, created = SolicitacaoTurma.objects.get_or_create(
usuario=usuario, solicitador=discente, turma=sugestao_turma)
return solicitacao, created
|
f346058717a62d6007ea99347043512f4770950d
| 31,966 |
def _evolve_trotter_gates(psi,
layers,
step_size,
num_steps,
euclidean=False,
callback=None):
"""Evolve an initial wavefunction psi via gates specified in `layers`.
If the evolution is euclidean, the wavefunction will be normalized after
each step.
"""
t = 0.0
for i in range(num_steps):
psi = apply_circuit(psi, layers)
if euclidean:
psi = tf.divide(psi, tf.norm(psi))
t += step_size
if callback is not None:
callback(psi, t, i)
return psi, t
|
e1319a01434b0de0c4d90db3881a5a1e3b22d491
| 31,967 |
from tvm.tir.analysis import _ffi_api as _analysis_ffi_api
import logging
def test_tuning_gpu_inherits_pass_context(target, dev):
"""Autotvm tuner inherits PassContexts but also adds a gpu verification pass by default.
Test that using PassContext inherits passes properly but also runs gpu verification pass.
"""
@pass_instrument
class PassInstrumentChecker:
"""Pass Instrument that simply sees if it's been run."""
def __init__(self):
self.has_been_run = False
def run_after_pass(self, mod, info):
self.has_been_run = True
class GPUVerifyPassMocked:
"""Context manager that mocks tir.analysis.verify_gpu_code meant
to verify the pass has been run. This is done by patching the ffi func handles."""
FFI_FUNC_HANDLE = "tir.analysis.verify_gpu_code"
FUNC_NAME = "verify_gpu_code"
def __init__(self) -> None:
self.old_impl = tvm._ffi.get_global_func(self.FFI_FUNC_HANDLE)
self.has_been_run = False
def gpu_verify_pass_mocked(self):
"""Get the replacement for the gpu verification pass."""
def _gpu_verify_pass_mocked(*args, **kwargs):
self.has_been_run = True
return self.old_impl(*args, **kwargs)
return _gpu_verify_pass_mocked
def __enter__(self):
tvm._ffi.register_func(
self.FFI_FUNC_HANDLE, self.gpu_verify_pass_mocked(), override=True
)
# Also overwrite the python bindings
setattr(
_analysis_ffi_api, self.FUNC_NAME, tvm._ffi.get_global_func(self.FFI_FUNC_HANDLE)
)
def __exit__(self, *args, **kwargs):
# Restore FFI status back to normal
tvm._ffi.register_func(self.FFI_FUNC_HANDLE, self.old_impl, override=True)
setattr(_analysis_ffi_api, self.FUNC_NAME, self.old_impl)
class OverwrittenBuildFunc(measure_methods._WrappedBuildFunc):
"""BuildFunc that mocks and patches as necessary to test proper passes are run."""
def __call__(self, measure_input, tmp_dir, **kwargs):
instrument = PassInstrumentChecker()
mocked_pass_checker = GPUVerifyPassMocked()
with mocked_pass_checker:
with PassContext(instruments=[instrument]):
regular_result = super().__call__(measure_input, tmp_dir, **kwargs)
# Check instrument has been run, meaning context was inherited by builder
assert instrument.has_been_run
# But also check the gpu verification pass has been run
# (which was not in the inherited ctx)
assert mocked_pass_checker.has_been_run
return regular_result
class MockedLocalBuilder(measure_methods.LocalBuilder):
"""As measure_methods.LocalBuilder but overwrites the PassContext for testing."""
def __init__(
self,
timeout=10,
n_parallel=None,
build_kwargs=None,
build_func="default",
do_fork=False,
runtime=None,
):
super().__init__(timeout, n_parallel, build_kwargs, build_func, do_fork, runtime)
self.build_func = OverwrittenBuildFunc(tar.tar, runtime)
def runner(target, dev):
task, target = get_sample_task(target, None)
logging.info("task config space: %s", task.config_space)
# Note: we use the MockedLocalBuilder here instead of autotvm.LocalBuilder()
measure_option = autotvm.measure_option(MockedLocalBuilder(), autotvm.LocalRunner())
results = []
tuner = RandomTuner(task)
tuner.tune(
n_trial=1,
measure_option=measure_option,
callbacks=(lambda _tuner, _inputs, rs: results.extend(rs),),
)
assert len(results) == 1
run_test_with_all_multiprocessing(runner, target, dev)
|
8c62f18293b24601a8ee5296f2e4984bed7c20e4
| 31,971 |
def make_scae(config):
"""Builds the SCAE."""
# return: model 应该是整个的要训练的model了吧
# 这个好像是要transform 训练集
# canvas & template size 都有什么用呢 在这里?
img_size = [config.canvas_size] * 2
template_size = [config.template_size] * 2
# 这个只是part encoder的一部分, 用的是卷积,直接就是4个卷积层了 padding可以自动, snt NB!!
'''
Sequential(
(0): Conv2d(1, 128, kernel_size=(3, 3), stride=(2, 2))
(1): ReLU()
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(2, 2))
(3): ReLU()
(4): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1))
(5): ReLU()
(6): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1))
(7): ReLU() )
// input: [1,40,40]
// output: [128,5,5]
'''
cnn_encoder = snt.nets.ConvNet2D(
output_channels=[128] * 4,
kernel_shapes=[3],
strides=[2, 2, 1, 1],
paddings=[snt.VALID], #这个VALAID 是不是你理解的那样?
activate_final=True)
# !!! 挺难的 ,构造 part encoder 其中有:
#
part_encoder = primary.CapsuleImageEncoder(
cnn_encoder,
config.n_part_caps,
config.n_part_caps_dims,
n_features=config.n_part_special_features,
similarity_transform=False,
encoder_type='conv_att')
# 先往下看吧...
part_decoder = primary.TemplateBasedImageDecoder(
output_size=img_size,
template_size=template_size,
n_channels=config.n_channels,
learn_output_scale=False,
colorize_templates=config.colorize_templates,
use_alpha_channel=config.use_alpha_channel,
template_nonlin=config.template_nonlin,
color_nonlin=config.color_nonlin,
)
# stacked auto encoder
obj_encoder = SetTransformer(
n_layers=3,
n_heads=1,
n_dims=16,
n_output_dims=256,
n_outputs=config.n_obj_caps,
layer_norm=True,
dropout_rate=0.)
# decoder
obj_decoder = ImageCapsule(
config.n_obj_caps,
2,
config.n_part_caps,
n_caps_params=config.n_obj_caps_params,
n_hiddens=128,
learn_vote_scale=True,
deformations=True,
noise_type='uniform',
noise_scale=4.,
similarity_transform=False)
# 这里是直接堆起来就行了么?
model = ImageAutoencoder(
primary_encoder=part_encoder,
primary_decoder=part_decoder,
encoder=obj_encoder,
decoder=obj_decoder,
input_key='image',
label_key='label',
n_classes=10,
dynamic_l2_weight=10,
caps_ll_weight=1.,
vote_type='enc',
pres_type='enc',
stop_grad_caps_inpt=True,
stop_grad_caps_target=True,
prior_sparsity_loss_type='l2',
prior_within_example_sparsity_weight=config.prior_within_example_sparsity_weight, # pylint:disable=line-too-long
prior_between_example_sparsity_weight=config.prior_between_example_sparsity_weight, # pylint:disable=line-too-long
posterior_sparsity_loss_type='entropy',
posterior_within_example_sparsity_weight=config.posterior_within_example_sparsity_weight, # pylint:disable=line-too-long
posterior_between_example_sparsity_weight=config.posterior_between_example_sparsity_weight, # pylint:disable=line-too-long
)
return model
|
415ab9d12806f250fa786872b0a3a2d6ecd82f72
| 31,972 |
import requests
def get_materialization_versions(dataset_name, materialization_endpoint=None):
""" Gets materialization versions with timestamps """
if materialization_endpoint is None:
materialization_endpoint = analysisdatalink.materialization_endpoint
url = '{}/api/dataset/{}'.format(materialization_endpoint, dataset_name)
r = requests.get(url)
assert r.status_code == 200
versions = {d['version']:d['time_stamp'] for d in r.json() if d['valid']}
return versions
|
32d6976e864a9926f6b2e51ec7a65c33e2b86832
| 31,973 |
import scipy
def WilcoxonRankSum(tpms):
"""May be very slow for large datasets."""
wrs = tpms[['SMTSD', 'ENSG']].copy().drop_duplicates().sort_values(by=['SMTSD', 'ENSG'])
wrs.reset_index(drop=True, inplace=True)
wrs['stat'] = pd.Series(dtype=float)
wrs['pval'] = pd.Series(dtype=float)
for smtsd in wrs.SMTSD.unique():
tpms_this = tpms[tpms.SMTSD==smtsd]
for ensg in tpms_this.ENSG.unique():
vals_f = tpms_this.TPM[(tpms_this.ENSG==ensg) & (tpms_this.SEX=="F")]
vals_m = tpms_this.TPM[(tpms_this.ENSG==ensg) &(tpms_this.SEX=="M")]
stat, pval = scipy.stats.ranksums(x=vals_f, y=vals_m)
#stat, pval = scipy.stats.mannwhitneyu(x=vals_f.rank(), y=vals_m.rank(), use_continuity=True, alternative="two-sided")
wrs.stat.loc[(wrs.SMTSD==smtsd) & (wrs.ENSG==ensg)] = stat
wrs.pval.loc[(wrs.SMTSD==smtsd) & (wrs.ENSG==ensg)] = pval
return wrs
|
89733a49bc8e0e0a1d492b21ad6310746e4d23fe
| 31,974 |
def filter_image(image, filter_DFT):
"""
Just takes the DFT of a filter and applies the filter to an image
This may optionally pad the image so as to match the number of samples in the
filter DFT. We should make sure this is greater than or equal to the size of
the image.
"""
assert image.dtype == 'float32'
assert filter_DFT.shape[0] >= image.shape[0], "don't undersample DFT"
assert filter_DFT.shape[1] >= image.shape[1], "don't undersample DFT"
filtered_with_padding = np.real(
np.fft.ifft2(np.fft.ifftshift(
filter_DFT * np.fft.fftshift(np.fft.fft2(image, filter_DFT.shape))),
filter_DFT.shape)).astype('float32')
return filtered_with_padding[0:image.shape[0], 0:image.shape[1]]
|
0c6d2e46640fbfd7757231eb398c90fea0ca0a65
| 31,975 |
def _create_diff_matrix(n, order=1):
"""Creates n x n matrix subtracting adjacent vector elements
Example:
>>> print(_create_diff_matrix(4, order=1))
[[ 1 -1 0 0]
[ 0 1 -1 0]
[ 0 0 1 -1]]
>>> print(_create_diff_matrix(4, order=2))
[[ 1 -1 0 0]
[-1 2 -1 0]
[ 0 -1 2 -1]
[ 0 0 -1 1]]
"""
if order == 1:
diff_matrix = -1 * np.diag(np.ones(n - 1), k=1).astype("int")
np.fill_diagonal(diff_matrix, 1)
diff_matrix = diff_matrix[:-1, :]
elif order == 2:
diff_matrix = -1 * np.diag(np.ones(n - 1), k=1).astype("int")
diff_matrix = diff_matrix + -1 * np.diag(np.ones(n - 1), k=-1).astype("int")
np.fill_diagonal(diff_matrix, 2)
diff_matrix[-1, -1] = 1
diff_matrix[0, 0] = 1
return diff_matrix
|
0d241d37075e37d342e2601fc307277dd92b180f
| 31,977 |
def fourier_frequencies_from_times(times):
"""
Calculates the Fourier frequencies from a set of times. These frequencies are in 1/units, where
`units` is the units of time in `times`. Note that if the times are not exactly equally spaced,
then the Fourier frequencies are ill-defined, and this returns the frequencies based on assuming
that the time-step is the mean time-step. This is reasonable for small deviations from equally
spaced times, but not otherwise.
Parameters
----------
times : list
The times from which to calculate the frequencies
Returns
-------
array
The frequencies associated with Fourier analysis on data with these timestamps.
"""
timestep = _np.mean(_np.diff(times)) # The average time step.
numtimes = len(times) # The number of times steps
return frequencies_from_timestep(timestep, numtimes)
|
4976483355af4652eafe28c955b2ca847bc140af
| 31,978 |
import copy
def fission(candidate_seed, pop, n, max_seed_area):
"""
In fusion, we use the convention of putting one seed on
the left and the other seed on the right, before we fuse
the two seeds. In fission, we assume that fission will
split the left part from the right part. Find the most
sparse column in the candidate seed and split the seed along
this column. If both parts are at least the minimum allowed
seed size, randomly choose one of them. If only one part
is at least the minimum allowed seed size, choose that
one part. If neither part is at least the minimum allowed
seed size, then default to sexual reproduction.
"""
# The most fit member of the tournament.
s0 = candidate_seed
# Minimum xspan. Only xspan is relevant, since we are splitting
# left and right parts.
min_s_xspan = mparam.min_s_xspan
# See whether the seed is big enough to split. If it is too
# small, then default to sexual reproduction.
if (s0.xspan <= min_s_xspan):
return sexual(candidate_seed, pop, n, max_seed_area)
# In the seed matrix, x = row index, y = column index.
# In Golly, g.setcell(g_x, g_y, s_state) refers to the cell
# in horizontal position g_x and vertical position g_y, where
# g_x increases from left to right and g_y increases from top
# to bottom. Unfortunately, x in the seed matrix ranges
# vertically over matrix rows and y in the seed matrix ranges
# horizontally over matrix columns, whereas x in Golly ranges
# horizontally and y in Golly ranges vertically.
#
# Speaking in Golly terms, we want to split the seed along
# any purple border (cells in state 5) such that the border
# spans the entire seed in a straight line. Due to the design
# of fusion(), the border will be a vertical purple stripe in
# Golly.
#
# There may be several vertical purple strips (that is, borders,
# buffer zones, lines of cells in state 5) in the seed.
# We will take the first one that we find.
border_line = -1 # no border found yet
border_colour = 5 # purple, state 5
for x in range(s0.xspan):
for y in range(s0.yspan):
if (s0.cells[x][y] != border_colour):
break # not a border -- try the next x
# if we make it here, then we have found a border
border_line = x
break # stop looking
# If no border was found, then use sexual reproduction
if (border_line == -1):
return sexual(candidate_seed, pop, n, max_seed_area)
# Left and right parts.
left_cells = s0.cells[0:border_line, :]
right_cells = s0.cells[(border_line + 1):, :]
# Initialize a seed for the left or right part.
s1 = copy.deepcopy(s0)
# If both parts are big enough, randomly choose one of them.
if ((left_cells.shape[0] >= min_s_xspan) \
and (right_cells.shape[0] >= min_s_xspan)):
if (rand.uniform(0, 1) < 0.5):
s1.cells = left_cells
else:
s1.cells = right_cells
# If only the left part is big enough, use the left part.
elif (left_cells.shape[0] >= min_s_xspan):
s1.cells = left_cells
# If only the right part is big enough, use the right part.
elif (right_cells.shape[0] >= min_s_xspan):
s1.cells = right_cells
# If neither part is big enough, use sexual reproduction
else:
return sexual(candidate_seed, pop, n, max_seed_area)
# Set the correct dimensions for the new seed
s1.xspan = s1.cells.shape[0]
s1.yspan = s1.cells.shape[1]
# Mutate s1
prob_grow = mparam.prob_grow
prob_flip = mparam.prob_flip
prob_shrink = mparam.prob_shrink
seed_density = mparam.seed_density
mutation_rate = mparam.mutation_rate
s1 = s1.mutate(prob_grow, prob_flip, prob_shrink, seed_density, mutation_rate)
# Update count of living cells
s1.num_living = s1.count_ones()
# Find the least fit old seed in the population. It's not a problem
# if there are ties.
s2 = find_worst_seed(pop)
# Now we have:
#
# s0 = seed 0
# s1 = left or right side of seed 0
# s2 = the least fit old seed, which will be replaced by s1
#
# Replace the least fit old seed in the population (s2) with the
# chosen part (s1).
i = s2.address # find the position of the old seed (s2)
s1.address = i # copy the old position of the old seed into s1
pop[i] = s1 # replace s2 (old seed) in population (pop) with s1
# Build a history for the new seed, by matching it against all seeds
# in the population.
width_factor = mparam.width_factor
height_factor = mparam.height_factor
time_factor = mparam.time_factor
num_trials = mparam.num_trials
pop_size = len(pop)
for j in range(pop_size):
update_history(g, pop, i, j, width_factor, height_factor, \
time_factor, num_trials)
update_similarity(pop, i, j)
# Report on the new history of the new seed
message = "Run: {}".format(n) + \
" Whole fitness (s0): {:.3f}".format(s0.fitness()) + \
" Fragment fitness (s1): {:.3f}".format(s1.fitness()) + \
" Replaced seed fitness (s2): {:.3f}\n".format(s2.fitness())
# Return with the updated population and a message.
return [pop, message]
|
f056c90f68f91ba1b3f81f76c3599f8f7a3aee51
| 31,979 |
def iou(box, clusters):
"""
Calculates the Intersection over Union (IoU) between a box and k clusters.
param:
box: tuple or array, shifted to the origin (i. e. width and height)
clusters: numpy array of shape (k, 2) where k is the number of clusters
return:
numpy array of shape (k, 0) where k is the number of clusters
"""
x = np.minimum(clusters[:, 0], box[0])
y = np.minimum(clusters[:, 1], box[1])
if np.count_nonzero(x == 0) > 10 or np.count_nonzero(y == 0) > 10:
raise ValueError("Box has no area")
intersection = x * y
box_area = box[0] * box[1]
cluster_area = clusters[:, 0] * clusters[:, 1]
iou_ = np.true_divide(intersection, box_area + cluster_area - intersection + 1e-10)
# iou_ = intersection / (box_area + cluster_area - intersection + 1e-10)
return iou_
|
d181cf7234602f4b3f7d5b6c0a25cd9e15c2b5ed
| 31,980 |
def munge_av_status(av_statuses):
"""Truncate and lowercase availability_status"""
return [a[20:].lower() for a in av_statuses]
|
52a00fc6733015c3618a2a394371ea9387d92fc0
| 31,981 |
def cdf(vals, reverse=False):
"""Computes the CDF of a list of values"""
vals = sorted(vals, reverse=reverse)
tot = float(len(vals))
x = []
y = []
for i, x2 in enumerate(vals):
x.append(x2)
y.append((i+1) / tot)
return x, y
|
3cc64dcb8876f7620f02da873e29569e77477823
| 31,982 |
def user_document_verification(request, document_id):
"""Display user's document information request.
Args:
request: URL request
document_request_id: document ID in firebase
Returns:
Render user document verification view.
"""
# Document Data
document_ref = db.collection("document_request")
user_document_data = document_ref.document(document_id).get().to_dict()
if user_document_data["user_verified"]:
return HttpResponseRedirect(
reverse(
"appointment:user_issuing_list",
kwargs={"document_id": user_document_data["document_id"]},
)
)
else:
# User Collection
user_ref = db.collection("users")
user_data = user_ref.document(user_document_data["user_id"]).get().to_dict()
document_userdata_list = []
for document in user_document_data["document"]:
document_userdata_list.append(document["document_name"])
user_document_data["document_list"] = document_list
return render(
request,
"appointment/user_document_request.html",
{"document_data": user_document_data, "user_data": user_data},
)
|
6610d21e1a0e35aaf3203814c20687a5b48e5d96
| 31,983 |
def bulk_records(
name: str,
bulk: TextClassificationBulkData,
common_params: CommonTaskQueryParams = Depends(),
service: TextClassificationService = Depends(
TextClassificationService.get_instance
),
datasets: DatasetsService = Depends(DatasetsService.get_instance),
current_user: User = Security(auth.get_user, scopes=[]),
) -> BulkResponse:
"""
Includes a chunk of record data with provided dataset bulk information
Parameters
----------
name:
The dataset name
bulk:
The bulk data
common_params:
Common query params
service:
the Service
datasets:
The dataset service
current_user:
Current request user
Returns
-------
Bulk response data
"""
task = TASK_TYPE
owner = current_user.check_workspace(common_params.workspace)
try:
dataset = datasets.find_by_name(
current_user, name=name, task=task, workspace=owner
)
datasets.update(
user=current_user,
dataset=dataset,
tags=bulk.tags,
metadata=bulk.metadata,
)
except EntityNotFoundError:
dataset_class = TaskFactory.get_task_dataset(task)
dataset = dataset_class.parse_obj({**bulk.dict(), "name": name})
dataset.owner = owner
datasets.create_dataset(user=current_user, dataset=dataset)
result = service.add_records(
dataset=dataset,
records=bulk.records,
)
return BulkResponse(
dataset=name,
processed=result.processed,
failed=result.failed,
)
|
aa82e779b5bd9286bd1b456f151f3b0679b3582b
| 31,986 |
def environment_list(p_engine, p_username, format, envname):
"""
Print list of environments
param1: p_engine: engine name from configuration
param2: format: output format
param3: envname: environemnt name to list, all if None
return 0 if environment found
"""
ret = 0
enginelist = get_list_of_engines(p_engine, p_username)
if enginelist is None:
return 1
data = DataFormatter()
data_header = [
("Engine name", 30),
("Environment name", 30),
("Application name", 30)
]
data.create_header(data_header)
data.format_type = format
for engine_tuple in enginelist:
engine_obj = DxMaskingEngine(engine_tuple)
if engine_obj.get_session():
continue
envlist = DxEnvironmentList()
# load all objects
# envlist.LoadEnvironments()
if envname is None:
environments = envlist.get_allref()
else:
environment = envlist.get_environmentId_by_name(envname)
if environment is None:
ret = ret + 1
continue
environments = [environment]
for envref in environments:
envobj = envlist.get_by_ref(envref)
data.data_insert(
engine_tuple[0],
envobj.environment_name,
envobj.application_name
)
print("")
print (data.data_output(False))
print("")
return ret
|
bf1ca059f0fd919445df4ee931450c7a07619707
| 31,987 |
def loss(logits, labels, weight_decay_factor, class_weights = None):
"""
Total loss:
----------
Args:
logits: Tensor, predicted [batch_size * height * width, num_classes]
labels: Tensor, ground truth [batch_size, height, width, 1]
weight_decay_factor: float, factor with which weights are decayed
class_weights: Tensor, weighting of class for loss [num_classes, 1] or None
Returns:
total_loss: Segmentation + Classification losses + WeightDecayFactor * L2 loss
"""
segment_loss = segmentation_loss(logits, labels,class_weights)
total_loss = segment_loss + weight_decay_factor * l2_loss()
tf.summary.scalar("loss/total", total_loss)
return total_loss
|
91fae015aeb5bed4c73cf2aa4d6e62a5c1d4580c
| 31,988 |
def negative(num):
"""assumes num is a numeric
returns a boolean, True if num is negative, else False"""
return num < 0
|
dc8b789b6dbd4d158482de6d4af26f48f9e8cc5b
| 31,989 |
def readSudokus(filename):
"""
Returns the n first sudokus of the file with given name
"""
f = open(filename)
res = None
txt = f.readline().strip()
if txt != "":
res = [[int(txt[i + j * 9]) for i in range(9)] for j in range(9)]
f.close()
return np.array(res)
|
564da1dd1fc035ec692986eaa8955985acd5b1ce
| 31,990 |
from flask.cli import ScriptInfo
def script_info(base_app):
"""Get ScriptInfo object for testing a CLI command.
Scope: module
.. code-block:: python
def test_cmd(script_info):
runner = CliRunner()
result = runner.invoke(mycmd, obj=script_info)
assert result.exit_code == 0
"""
return ScriptInfo(create_app=lambda info: base_app)
|
5c5298fcd816538e4890a1d46d51aea6d73702e6
| 31,991 |
def measured_points(idf, return_periods, interim_results=None, max_duration=None):
"""
get the calculation results of the rainfall with u and w without the estimation of the formulation
Args:
idf (IntensityDurationFrequencyAnalyse): idf class
return_periods (float | np.array | list | pd.Series): return period in [a]
interim_results (pd.DataFrame): data with duration as index and u & w as data
max_duration (float): max duration in [min]
Returns:
pd.Series: series with duration as index and the height of the rainfall as data
"""
if interim_results is None:
interim_results = get_interim_results_from_parameters(idf.parameters)
if max_duration is not None:
interim_results = interim_results.loc[:max_duration].copy()
return pd.Series(index=interim_results.index,
data=interim_results['u'] + interim_results['w'] * np.log(return_periods))
|
ba0e1cbddf6fd7e23abbfba4954683ada69cea2f
| 31,992 |
def get_charpixel():
""" Render a single charpixel """
if options.table == 'input':
c = getch()
if c in ['\n','\t']:
print(c)
else:
c = choice( CHARTABLES[ options.table ] )
return c.encode('utf-8')
|
deb0475dac66c10c1d9edea45cce1cedffbafd1d
| 31,993 |
def test_custom_fixer():
""" Test custom ParseFixer
Verify that read_csv uses custom ParseFixer
"""
class fix_pi(ParseFixer):
def __init__(self):
super().__init__()
# augment existing method, simple fix float
def fix_illegal_cell_value(self, vtype, value):
if vtype == "float":
return 22.0 / 7.0
else:
fix_value = ParseFixer.fix_illegal_cell_value(self, vtype, value)
return fix_value
fix = fix_pi()
fix.stop_on_errors = False
fix._called_from_test = True
with open(input_dir() / "types3.csv", "r") as fh:
g = read_csv(fh, to="jsondata", fixer=fix)
for tp, tt in g:
if tp == BlockType.TABLE:
assert tt["columns"]["num"]["values"][2] == 22.0 / 7.0
assert tt["columns"]["flt"]["values"][0] == 22.0 / 7.0
assert tt["columns"]["flt"]["values"][0] == 22.0 / 7.0
assert tt["columns"]["flt2"]["values"][2] == 22.0 / 7.0
with pytest.raises(InputError):
# test read_csv w. class (not instance) of fixer
# class has default stop_on_errors = True
with open(input_dir() / "types3.csv", "r") as fh:
g = read_csv(fh, to="jsondata", fixer=fix_pi)
for tp, tt in g:
pass
|
c137e190dffef686b04b2f515c6c65ecd2d50879
| 31,994 |
def context_factory(policy, name):
"""Factory function for creating context objects."""
if not isinstance(name, qpol.qpol_context_t):
raise TypeError("Contexts cannot be looked-up.")
return Context(policy, name)
|
a528978876bdbaa0d2e249f70a056d49fc894349
| 31,995 |
import email
def decode_mail_header(value, default_charset='us-ascii'):
"""Decode a header value into a unicode string."""
try:
headers = decode_header(value)
except email.errors.HeaderParseError:
return value.encode(default_charset, 'replace').decode(default_charset)
else:
for index, (text, charset) in enumerate(headers):
if isinstance(text, bytes):
try:
headers[index] = text.decode(
charset or default_charset, 'replace')
except LookupError:
# if the charset is unknown, force default
headers[index] = text.decode(default_charset, 'replace')
else:
headers[index] = text
return "".join(headers)
|
657a45da883bd35d99642af7cfa1ff5ed9200fbe
| 31,996 |
def xyz2xyzr(xyz: np.ndarray, *,
axis: int=None,
illuminant: Illuminant=get_default_illuminant(),
observer: Observer=get_default_observer()) -> np.ndarray:
"""
Convert XYZ to normalized XYZ reflectance
:param xyz: the raw xyz values
:param axis: the axis that the XYZ values lie along
:param illuminant: the illuminant
:param observer: the observer
:return: the xyz normalized Reflectance
"""
if axis is None:
axis = get_matching_axis(xyz.shape, 3)
new_shape = [1] * len(xyz.shape)
new_shape[axis] = -1
white_point = illuminant.get_white_point(observer).reshape(new_shape)
return xyz / white_point
|
76e0b3c095ac48a44b31349efe259b70a427cfc0
| 31,997 |
import time
def last_updated(a):
"""
Check the time since file was last updated.
"""
return time.time() - op.getmtime(a)
|
653eb5b68e00c57165b413d1de1ed0d8afee41f0
| 31,998 |
import functools
def lstm_acd_decomposition(inp, model):
"""
inp: tf.Tensor(dtype=np.int32, shape=(1, -1)) tokenized input
model: tf.keras.Model or equivalent
"""
l = inp.numpy().size
e, k, rk, b, dw, db = model.weights
embed_inp = tf.nn.embedding_lookup(params=e, ids=inp)
return acd_1d_decomposition(
functools.partial(lstm_score,
embed_inp=embed_inp,
k=k,
rk=rk,
b=b,
dw=dw,
db=db), l)
|
51d55fe155290b3ec1bc7f3b67b562e4b6ae4c23
| 31,999 |
import pyarrow as pa
def ST_IsValid(geos):
"""
Check if geometry is of valid geometry format.
:type geos: Series(dtype: object)
:param geos: Geometries in WKB form.
:rtype: Series(dtype: bool)
:return: True if geometry is valid.
:example:
>>> import pandas
>>> import arctern
>>> data = pandas.Series(["POINT (1.3 2.6)", "POINT (2.6 4.7)"])
>>> rst = arctern.ST_IsValid(arctern.ST_GeomFromText(data))
>>> print(rst)
0 true
1 true
dtype: bool
"""
arr_geos = pa.array(geos, type='binary')
return arctern_caller(arctern_core_.ST_IsValid, arr_geos)
|
466f29367dbdc7c09581f7bedda72fe729bdd73d
| 32,000 |
import time
def get_framerate(has_already_started,
start_time,
frame_counter,
frame_rate,
frame_num=5,
decimal_round_num=2):
""" Returns current framerate of video based on
time elapsed in frame_num frames.
Works in a while loop for each frame"""
if has_already_started:
if frame_counter % 5 == 0:
curr_time = time.time()
frame_rate = frame_counter/(curr_time - start_time)
frame_rate = round(frame_rate, decimal_round_num)
frame_counter += 1
return has_already_started, start_time, frame_counter, frame_rate
else:
has_already_started = True
start_time = time.time()
frame_counter = 0
frame_rate = 0
return has_already_started, start_time, frame_counter, frame_rate
|
61db421be9e8d5a0e810a79875eac2b776be99ca
| 32,002 |
def check_for_solve(grid):
""" checks if grid is full / filled"""
for x in range(9):
for y in range(9):
if grid[x][y] == 0:
return False
return True
|
5fc4a8e7a2efaa016065fc0736aa5bdb7d4c92f8
| 32,003 |
def Sn(i, length):
"""Convert an int to a binary string."""
s = ''
while i != 0:
digit = i & 0xff
i >>= 8
s += chr(digit)
if len(s) > length:
raise Exception("Integer too big to fit")
while len(s) < length:
s += chr(0)
return s
|
607c2b8e82379db091505d7422edc17ea121bc3f
| 32,004 |
def parse_args():
"""Parse command line arguments"""
parser = ArgumentParser(
description='Print contents of a parsed config file',
formatter_class=ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
'pipeline', metavar='CONFIGFILE',
nargs='?',
default='settings/pipeline/example.cfg',
help='Pipeline config file to parse',
)
parser.add_argument(
'-v',
action='count',
default=Levels.WARN,
help='Set verbosity level',
)
kwargs = vars(parser.parse_args())
set_verbosity(kwargs.pop('v'))
return kwargs
|
67462f2078ede7dfb9d9e0956b32f1eb09b2a747
| 32,006 |
import copy
def _normalize_annotation(annotation, tag_index):
"""
Normalize the annotation anchorStart and anchorEnd,
in the sense that we start to count the position
from the beginning of the sentence
and not from the beginning of the disambiguated page.
:param annotation: Annotation object
:param tag_index: start index (int)
:return: a new Annotation object
"""
# norm_annotation = copy.deepcopy(annotation)
norm_annotation = annotation
norm_annotation.anchorStart = int(annotation.anchorStart) - tag_index
norm_annotation.anchorEnd = int(annotation.anchorEnd) - tag_index
return copy.copy(norm_annotation)
|
a7da5810711ada97a2ddcc308be244233fe813be
| 32,007 |
def read_lc(csvfile, comment='|'):
"""
Read a light curve csv file from gAperture.
:param csvfile: The name of the csv file to read.
:type csvfile: str
:param comment: The character used to denote a comment row.
:type comment: str
:returns: pandas DataFrame -- The contents of the csv file.
"""
return pd.io.parsers.read_csv(csvfile, comment=comment)
|
74e53cbfe902e9d23567569ec0f4c5ef5ef75baa
| 32,008 |
def check_cutoffs(cutoffs):
"""Validates the cutoff
Parameters
----------
cutoffs : np.ndarray or pd.Index
Returns
----------
cutoffs (Sorted array)
Raises
----------
ValueError
If cutoffs is not a instance of np.array or pd.Index
If cutoffs array is empty.
"""
if not isinstance(cutoffs, (np.ndarray, pd.Index)):
raise ValueError(
f"`cutoffs` must be a np.array or pd.Index, " f"but found: {type(cutoffs)}"
)
assert np.issubdtype(cutoffs.dtype, np.integer)
if len(cutoffs) == 0:
raise ValueError("Found empty `cutoff` array")
return np.sort(cutoffs)
|
db0a3a477b27883aa1d29486083cf3e6c993e021
| 32,009 |
def hook(images, augmenter, parents, default):
"""Determines which augmenters to apply to masks."""
return augmenter.__class__.__name__ in MASK_AUGMENTERS
|
0e1e6589bc37d90b0ac8249e11dee54140efd86a
| 32,010 |
def _get_videos(course, pagination_conf=None):
"""
Retrieves the list of videos from VAL corresponding to this course.
"""
videos, pagination_context = get_videos_for_course(
str(course.id),
VideoSortField.created,
SortDirection.desc,
pagination_conf
)
videos = list(videos)
# This is required to see if edx video pipeline is enabled while converting the video status.
course_video_upload_token = course.video_upload_pipeline.get('course_video_upload_token')
transcription_statuses = ['transcription_in_progress', 'transcript_ready', 'partial_failure', 'transcript_failed']
# convert VAL's status to studio's Video Upload feature status.
for video in videos:
# If we are using "new video workflow" and status is in `transcription_statuses` then video encodes are ready.
# This is because Transcription starts once all the encodes are complete except for YT, but according to
# "new video workflow" YT is disabled as well as deprecated. So, Its precise to say that the Transcription
# starts once all the encodings are complete *for the new video workflow*.
is_video_encodes_ready = not course_video_upload_token and (video['status'] in transcription_statuses)
# Update with transcript languages
video['transcripts'] = get_available_transcript_languages(video_id=video['edx_video_id'])
video['transcription_status'] = (
StatusDisplayStrings.get(video['status']) if is_video_encodes_ready else ''
)
# Convert the video status.
video['status'] = convert_video_status(video, is_video_encodes_ready)
return videos, pagination_context
|
21774a476424b8f68f67c368fb72343fb9cfd552
| 32,011 |
import torch
def sample_stacking_program(num_primitives, device, address_suffix="", fixed_num_blocks=False):
"""Samples blocks to stack from a set [0, ..., num_primitives - 1]
*without* replacement. The number of blocks is stochastic and
can be < num_primitives.
Args
num_primitives (int)
device
address_suffix
Returns [num_blocks] (where num_blocks is stochastic and between 1 and num_primitives
(inclusive))
"""
# Init
stacking_program = []
available_primitive_ids = list(range(num_primitives))
if fixed_num_blocks:
num_blocks = num_primitives
else:
# Sample num_blocks uniformly from [1, ..., num_primitives] (inclusive)
raw_num_blocks_logits = torch.ones((num_primitives,), device=device)
raw_num_blocks = pyro.sample(
f"raw_num_blocks{address_suffix}",
pyro.distributions.Categorical(logits=raw_num_blocks_logits),
)
num_blocks = raw_num_blocks + 1
# Sample primitive ids
for block_id in range(num_blocks):
# Sample primitive
raw_primitive_id_logits = torch.ones((len(available_primitive_ids),), device=device)
raw_primitive_id = pyro.sample(
f"raw_primitive_id_{block_id}{address_suffix}",
pyro.distributions.Categorical(logits=raw_primitive_id_logits),
)
primitive_id = available_primitive_ids.pop(raw_primitive_id)
# Add to the stacking program based on previous action
stacking_program.append(primitive_id)
return torch.tensor(stacking_program, device=device)
|
f5927edc11b2e20fcfb4b19b6ecddd92ba911841
| 32,012 |
def get_mmr_address(rn, m0m1):
"""Return address of an memory-mapped register and its size in bits.
"""
mmr_map = { 0b00 : 0xf0400,
0b01 : 0xf0500,
0b10 : 0xf0600,
0b11 : 0xf0700 }
address = mmr_map[m0m1] + (rn * 0x4)
size = get_register_size_by_address(address)
return address, size
|
53b92051d7ac64f5e288a121dae7e764166c9d2e
| 32,013 |
import re
def number_of_a_char(element: Element):
"""
get number of linked char, for example, result of `<a href="#">hello</a>world` = 5
:param element:
:return: length
"""
if element is None:
return 0
text = ''.join(element.xpath('.//a//text()'))
text = re.sub(r'\s*', '', text, flags=re.S)
return len(text)
|
9d1394552b740844aadacc3fe3b2f802b698c18a
| 32,016 |
def preprocessing_fn(batch):
"""
Standardize, then normalize sound clips
"""
processed_batch = []
for clip in batch:
signal = clip.astype(np.float64)
# Signal normalization
signal = signal / np.max(np.abs(signal))
# get pseudorandom chunk of fixed length (from SincNet's create_batches_rnd)
signal_length = len(signal)
np.random.seed(signal_length)
signal_start = (
np.random.randint(signal_length / WINDOW_LENGTH - 1)
* WINDOW_LENGTH
% signal_length
)
signal_stop = signal_start + WINDOW_LENGTH
signal = signal[signal_start:signal_stop]
processed_batch.append(signal)
return np.array(processed_batch)
|
25ce4a077027239126d02b62e93ca0a3bcb15b5e
| 32,017 |
def elast_quad9(coord, params):
"""
Quadrilateral element with 9 nodes for classic elasticity
under plane-strain
Parameters
----------
coord : coord
Coordinates of the element.
params : list
List with material parameters in the following order:
[Young modulus, Poisson coefficient, density].
Returns
-------
stiff_mat : ndarray (float)
Local stifness matrix.
mass_mat : ndarray (float)
Local mass matrix.
"""
stiff_mat = np.zeros((18, 18))
mass_mat = np.zeros((18, 18))
C = fem.umat(params[:2])
if len(params) == 2:
dens = 1
else:
dens = params[-1]
gpts, gwts = gau.gauss_nd(3, ndim=2)
for cont in range(gpts.shape[0]): # pylint: disable=E1136 # pylint/issues/3139
r, s = gpts[cont, :]
H, B, det = fem.elast_diff_2d(r, s, coord, fem.shape_quad9)
factor = det * gwts[cont]
stiff_mat += factor * (B.T @ C @ B)
mass_mat += dens * factor * (H.T @ H)
return stiff_mat, mass_mat
|
c27bae77ca54a3a370ccdac5b5550f73cb121d9a
| 32,018 |
from typing import Union
from pathlib import Path
def peek(audio_file_path: Union[str, Path], output: str = "np"):
"""
Returns a tuple of audio data and its sampling rate
The audio data can be a numpy array or list
"""
data, sr = sf.read(audio_file_path, dtype="float32")
data = data.transpose()
if output == "list":
return data.tolist(), sr
if output == "np":
return data, sr
|
30b47c77ab92cf0a544d84605204261c899f9e9a
| 32,019 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.