content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def fetch_synthetic_2d(lags=0):
"""
Build synthetic 2d data.
Parameters
----------
lags : int, optional
If greater than 0 it's added time dependence.
The default is 0.
Returns
-------
data : numpy.ndarray, shape=(3000, 2)
Synthetic data.
"""
seed = 98
np.random.seed(seed)
mu_1, mu_2 = 1, 30
sigma_1, sigma_2 = 3, 1
num_samples = 3000
changes = {'incip': [
{'add': 50, 'where': (1000, 1300)},
{'add': 0, 'where': (1300, 1600)},
{'add': -50, 'where': (1600, 1650)}
],
'sudden': [
{'add': -50, 'where': (2000, 2200)}
]
}
labels = np.ones(num_samples, dtype=np.uint8)
labels[1070:1250] = 2
labels[1250:1602] = 3
labels[1602:1640] = 2
labels[2000:2200] = 4
x_1, x_2 = build_2d_gauss_data(mu_1, mu_2, sigma_1, sigma_2,
samples=num_samples, changes=changes,
alpha=0.15, w=10, lags=lags)
return np.c_[np.arange(1, 3001), x_1, x_2, labels] | 006f1dc900d88c45d16eb518403bb576167bcae1 | 22,610 |
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.model_selection import train_test_split
def prepare_data() -> tuple:
"""Do the whole data preparation - incl. data conversion and test/train split.
Returns:
tuple: (X_train, y_train, X_test, y_test)
"""
(data, labels) = get_data()
mlb = MultiLabelBinarizer()
mlb.fit(labels)
y = mlb.transform(labels)
# read and decode images
images = [tf.io.read_file(path).numpy() for path in data.image]
print("images read")
# create an empty array to allocate storage --> much faster than converting a list into a numpy-array using np.array(...)
X = np.zeros(shape=(len(images), 60, 80, 3))
for i in range(len(images)):
# pass encoded images into X
X[i] = (decode_img(images[i], 60, 80))
print("images decoded")
# split data into training and test set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42)
print("Data prep finished")
return (X_train, y_train, X_test, y_test) | a2a31f09858393d5fbcd5be80fd61b2e69df3cdf | 22,611 |
def current_url_name(request):
"""
Adds the name for the matched url pattern for the current request to the
request context.
"""
try:
match = resolve(request.path)
url_name = match.url_name
except Http404:
url_name = None
return {
'current_url_name': url_name
} | 9ec6ea26a503e0969400a85a0df7491f6aefc064 | 22,612 |
import click
def get_crypto_key():
"""Shows your crypto key"""
key = load_key()
if key:
click.secho("your actual crypto key:", fg = "blue")
return click.echo(load_key()) | 7078f5fd0fe099ef49a3d702111aae9ee5e23387 | 22,613 |
def get_entropies(data: pd.DataFrame):
"""
Compute entropies for words in wide-format df
"""
counts = pd.DataFrame(apply_count_values(
data.to_numpy(copy=True)), columns=data.columns)
probs = (counts / counts.sum(axis=0)).replace(0, np.nan)
nplogp = -probs * np.log2(probs)
return nplogp.sum() | 49a0a52194472a4285e60a05dab8a83dfaa7eec0 | 22,614 |
def _numeric_adjust_widgets(caption: str, min: float, max: float) -> HBox:
"""Return a HBox with a label, a text box and a linked slider."""
label = Label(value=caption)
text = BoundedFloatText(min=min, max=max, layout=wealth.plot.text_layout)
slider = FloatSlider(readout=False, min=min, max=max)
widgets.jslink((text, "value"), (slider, "value"))
box = HBox([label, text, slider])
return box | b45aaa069f425a888bf54281c6324509eac783c6 | 22,616 |
def where_handle(tokens):
"""Process where statements."""
internal_assert(len(tokens) == 2, "invalid where statement tokens", tokens)
final_stmt, init_stmts = tokens
return "".join(init_stmts) + final_stmt + "\n" | 8377a1b62ffbca31a6b6fa42a125e3b16387a665 | 22,618 |
def nltk_ngram_pos_tagger(input_dict):
"""
A tagger that chooses a token's tag based on its word string and
on the preceding n word's tags. In particular, a tuple
(tags[i-n:i-1], words[i]) is looked up in a table, and the
corresponding tag is returned. N-gram taggers are typically
trained on a tagged corpus.
Train a new NgramTagger using the given training data or
the supplied model. In particular, construct a new tagger
whose table maps from each context (tag[i-n:i-1], word[i])
to the most frequent tag for that context. But exclude any
contexts that are already tagged perfectly by the backoff
tagger.
:param training_corpus: A tagged corpus included with NLTK, such as treebank, brown, cess_esp, floresta,
or an Annotated Document Corpus in the standard TextFlows' adc format
:param backoff_tagger: A backoff tagger, to be used by the new
tagger if it encounters an unknown context.
:param cutoff: If the most likely tag for a context occurs
fewer than *cutoff* times, then exclude it from the
context-to-tag table for the new tagger.
:param n: N-gram is a contiguous sequence of n items from a given sequence of text or speech.
:returns pos_tagger: A python dictionary containing the POS tagger object and its arguments.
"""
chunk = input_dict['training_corpus']['chunk']
corpus = input_dict['training_corpus']['corpus']
training_corpus=corpus_reader(corpus, chunk)
backoff_tagger=input_dict['backoff_tagger']['object'] if input_dict['backoff_tagger'] else DefaultTagger('-None-')
n=int(input_dict['n']) #default 2
cutoff=int(input_dict['cutoff']) #default 0
return {'pos_tagger': {
'function':'tag_sents',
'object': NgramTagger(n, train=training_corpus, model=None,
backoff=backoff_tagger, cutoff=0)
}
} | ab6d1f48590042b1efe0a9c7e741e7a5ccbcdb34 | 22,619 |
def remove_app(INSTALLED_APPS, app):
""" remove app from installed_apps """
if app in INSTALLED_APPS:
apps = list(INSTALLED_APPS)
apps.remove(app)
return tuple(apps)
return INSTALLED_APPS | 7386b6f38b73abf25e94d9c8368aaac6255d2cee | 22,621 |
from typing import OrderedDict
def get_feature(cluster, sample, i):
"""Turn a cluster into a biopython SeqFeature."""
qualifiers = OrderedDict(ID="%s_%d" % (sample, i))
for attr in cluster.exportable:
qualifiers[attr] = getattr(cluster, attr.lower())
feature = SeqFeature(FeatureLocation(cluster.start, cluster.end), type=cluster.type, strand=1, qualifiers=qualifiers)
if cluster.feature_args:
seqfeature_args = cluster.feature_args
base_args = {'location': FeatureLocation(cluster.start, cluster.end), 'strand': 1}
subfeatures = []
for feature_args in seqfeature_args:
feature_args = feature_args.to_feature_args()
args = base_args.copy()
args.update(feature_args)
subfeatures.append(SeqFeature(**args))
feature.sub_features = subfeatures
return cluster.tid, feature | 4ec3cb0757db9b884d3bce3d4406f1824a6c62dd | 22,622 |
def loss(Y, spectra, beta, Yval, val_spec):
"""
Description
-----------
Calculate the loss for a specfic set of beta values
Parameters
----------
Y: labels (0 or 1)
spectra: flux values
beta: beta values
Yval: validation set labels (0 or 1)
val_spec: validation flux values
Returns
-------
J_sum: total loss calculated from all spectra
beta_gradients: gradients for each beat value
J_sum_val: validation loss calaculated from all validation spectra
"""
J_total = []
i = 0
while i < len(Y):
#do logistic regression
sum_p = param_sum(spectra[i],beta)
lr = log_reg(sum_p)
#deal with log(0) cases
if (lr == 1 and Y[i] == 0) or (lr == 0 and Y[i] == 1):
J_iter = 1e+30
else:
J_iter = (-Y[i]*np.log(lr)) - ((1-Y[i])*np.log(1-lr))
J_total.append(J_iter)
i += 1
J_sum = (1/len(Y))*np.sum(J_total)
J_total_val = []
#validation
i = 0
while i < len(Yval):
sum_p_val = param_sum(val_spec[i],beta)
lr_val = log_reg(sum_p_val)
if (lr_val == 1 and Yval[i] == 0) or (lr_val == 0 and Yval[i] == 1):
J_iter_val = 1e+30
else:
J_iter_val = (-Yval[i]*np.log(lr_val)) - ((1-Yval[i])*np.log(1-lr_val))
J_total_val.append(J_iter_val)
i += 1
J_sum_val = (1/len(Yval))*np.sum(J_total_val)
#shuffle the data for SGD
Y, spectra = unison_shuffled_copies(Y, spectra)
#select subset of data
batch = 100
Y_batch = Y[0:batch]
spectra_batch = spectra[0:batch]
beta_gradients = np.zeros(len(beta))
i = 0
#calculate gradients
while i < len(Y_batch):
sum_p = param_sum(spectra_batch[i],beta)
for j in range(len(beta)):
if j == 0:
beta_gradients[j] += gradient1(Y_batch[i], sum_p)
else:
beta_gradients[j] += gradient2(Y_batch[i], spectra_batch[i][j-1], sum_p)
i += 1
return J_sum, beta_gradients, J_sum_val | 5be2675d14062834bc0f8c9cd83551a60b528668 | 22,623 |
def video_detail_except():
"""取得channelPlayListItem/videoDetail兩表video_id的差集
Returns:
[list]: [目前尚未儲存詳細資料的影片ID]
"""
playlist_id = get_db_ChannelPlayListItem_video_id()
video_detail_id = get_db_VideoDetail_video_id()
if video_detail_id and playlist_id:
filter_video = list(set(playlist_id).difference(set(video_detail_id)))
return filter_video
return False | efe9f151c6689cf6a71584cf988d0af8bb1c0c2a | 22,624 |
def nodes(xmrs):
"""Return the list of Nodes for *xmrs*."""
nodes = []
_props = xmrs.properties
varsplit = sort_vid_split
for p in xmrs.eps():
sortinfo = None
iv = p.intrinsic_variable
if iv is not None:
sort, _ = varsplit(iv)
sortinfo = _props(iv)
sortinfo[CVARSORT] = sort
nodes.append(
Node(p.nodeid, p.pred, sortinfo, p.lnk, p.surface, p.base, p.carg)
)
return nodes | 0e081648e6b30ec6cc230218c346384624d2ade6 | 22,625 |
import torch
def get_data_loader(transformed_data, is_training_data=True):
"""
Creates and returns a data loader from transformed_data
"""
return torch.utils.data.DataLoader(transformed_data, batch_size=50, shuffle=True) if is_training_data else torch.utils.data.DataLoader(transformed_data, batch_size=50) | 7f7dfdc83abc0ab261fde7be2216b4e130761f7e | 22,627 |
def structured_rand_arr(size, sample_func=np.random.random,
ltfac=None, utfac=None, fill_diag=None):
"""Make a structured random 2-d array of shape (size,size).
If no optional arguments are given, a symmetric array is returned.
Parameters
----------
size : int
Determines the shape of the output array: (size,size).
sample_func : function, optional.
Must be a function which when called with a 2-tuple of ints, returns a
2-d array of that shape. By default, np.random.random is used, but any
other sampling function can be used as long as it matches this API.
utfac : float, optional
Multiplicative factor for the upper triangular part of the matrix.
ltfac : float, optional
Multiplicative factor for the lower triangular part of the matrix.
fill_diag : float, optional
If given, use this value to fill in the diagonal. Otherwise the diagonal
will contain random elements.
Examples
--------
>>> np.random.seed(0) # for doctesting
>>> np.set_printoptions(precision=4) # for doctesting
>>> structured_rand_arr(4)
array([[ 0.5488, 0.7152, 0.6028, 0.5449],
[ 0.7152, 0.6459, 0.4376, 0.8918],
[ 0.6028, 0.4376, 0.7917, 0.5289],
[ 0.5449, 0.8918, 0.5289, 0.0871]])
>>> structured_rand_arr(4,ltfac=-10,utfac=10,fill_diag=0.5)
array([[ 0.5 , 8.3262, 7.7816, 8.7001],
[-8.3262, 0.5 , 4.6148, 7.8053],
[-7.7816, -4.6148, 0.5 , 9.4467],
[-8.7001, -7.8053, -9.4467, 0.5 ]])
"""
# Make a random array from the given sampling function
rmat = sample_func((size,size))
# And the empty one we'll then fill in to return
out = np.empty_like(rmat)
# Extract indices for upper-triangle, lower-triangle and diagonal
uidx = triu_indices(size,1)
lidx = tril_indices(size,-1)
didx = diag_indices(size)
# Extract each part from the original and copy it to the output, possibly
# applying multiplicative factors. We check the factors instead of
# defaulting to 1.0 to avoid unnecessary floating point multiplications
# which could be noticeable for very large sizes.
if utfac:
out[uidx] = utfac * rmat[uidx]
else:
out[uidx] = rmat[uidx]
if ltfac:
out[lidx] = ltfac * rmat.T[lidx]
else:
out[lidx] = rmat.T[lidx]
# If fill_diag was provided, use it; otherwise take the values in the
# diagonal from the original random array.
if fill_diag is not None:
out[didx] = fill_diag
else:
out[didx] = rmat[didx]
return out | 4463f62bef1feff23019cc35439545b52461ee40 | 22,629 |
def append_empty_args(func):
"""To use to transform an ingress function that only returns kwargs to one that
returns the normal form of ingress functions: ((), kwargs)"""
@wraps(func)
def _func(*args, **kwargs):
return (), func(*args, **kwargs)
return _func | ec2bf4c30eddb418ade57e50ce8a4a233a8f0f9d | 22,630 |
def gateway(job, app, tool, user, user_email):
"""
Function to specify the destination for a job. At present this is exactly the same
as using dynamic_dtd with tool_destinations.yml but can be extended to more complex
mapping such as limiting resources based on user group or selecting destinations
based on queue size.
Arguments to this function can include app, job, job_id, job_wrapper, tool, tool_id,
user, user_email (see https://docs.galaxyproject.org/en/latest/admin/jobs.html)
"""
if user_email in user_destinations.keys():
if hasattr(tool, 'id') and isinstance(tool.id, str) and tool.id.startswith('toolshed'): # map shed tools only
return user_destinations[user_email]
if user:
user_roles = [role.name for role in user.all_roles() if not role.deleted]
# If any of these are prefixed with 'training-'
if any([role.startswith('training-') for role in user_roles]):
# Then they are a training user, we will send their jobs to pulsar,
# Or give them extra resources
if hasattr(tool, 'id') and isinstance(tool.id, str) and tool.id.startswith('toolshed') and tool.id.split('/')[-2] in pulsar_list:
return app.job_config.get_destination('pulsar_destination')
else:
return app.job_config.get_destination('slurm_dest')
destination = map_tool_to_destination(job, app, tool, user_email, path=TOOL_DESTINATION_PATH)
return destination | b51cd288b638469d054191be0c1423d0c637ce9a | 22,631 |
def pprint(matrix: list) -> str:
"""
Preety print matrix string
Parameters
----------
matrix : list
Square matrix.
Returns
-------
str
Preety string form of matrix.
"""
matrix_string = str(matrix)
matrix_string = matrix_string.replace('],', '],\n')
return matrix_string | 5c0ffa2b0a9c237b65b5ad7c4e17c2456195c088 | 22,633 |
def sigmoid(x : np.ndarray, a : float, b : float, c : float) -> np.ndarray :
"""
A parameterized sigmoid curve
Args:
x (np.ndarray or float): x values to evaluate the sigmoid
a (float): vertical stretch parameter
b (float): horizontal shift parameter
c (float): horizontal stretch parameter
Returns:
evalutated sigmoid curve at x values for the given parameterization
"""
return a / (b + np.exp(-1.0 * c * x)) | dacb80ca958bf9f0a007fe6d50970f444fd9b4e7 | 22,634 |
def merge_testcase_data(leaf, statsname, x_axis):
"""
statsname might be a function. It will be given the folder path
of the test case and should return one line.
"""
res = get_leaf_tests_stats(leaf, statsname)
return merge_testcase_data_set_x(res, x_axis) | 5b8829377d9249630a9261e7ee27533cce72542c | 22,635 |
def dashed_word(answer):
"""
:param answer: str, from random_word
:return: str, the number of '-' as per the length of answer
"""
ans = ""
for i in answer:
ans += '-'
return ans | 358be047bfad956afef27c0665b02a2a233fefbf | 22,637 |
def merge_overpass_jsons(jsons):
"""Merge a list of overpass JSONs into a single JSON.
Parameters
----------
jsons : :obj:`list`
List of dictionaries representing Overpass JSONs.
Returns
-------
:obj:`dict`
Dictionary containing all elements from input JSONS.
"""
elements = []
for osm_json in jsons:
elements.extend(osm_json['elements'])
return {'elements': elements} | c68fde0ddbdf22a34377e1e865be36aaabaa47be | 22,638 |
from admin.get_session_info import run as _get_session_info
from admin.login import run as _login
from admin.logout import run as _logout
from admin.recover_otp import run as _recover_otp
from admin.register import run as _register
from admin.request_login import run as _request_login
from admin.handler import MissingFunctionError
def identity_functions(function, args):
"""This function routes calls to sub-functions, thereby allowing
a single identity function to stay hot for longer
Args:
function (str): for selection of function to call
args: arguments to be passed to the selected function
Returns:
function: If valid function selected, function with args passed
else None
"""
if function == "get_session_info":
return _get_session_info(args)
elif function == "login":
return _login(args)
elif function == "logout":
return _logout(args)
elif function == "recover_otp":
return _recover_otp(args)
elif function == "register":
return _register(args)
elif function == "request_login":
return _request_login(args)
else:
raise MissingFunctionError() | 1885705099ce67e806a6cc9a461f92c659bbf0bb | 22,639 |
def load_template(tmpl):
"""
Loads the default template file.
"""
with open(tmpl, "r") as stream:
return Template(stream.read()) | a37a74cf37a05142bcddac870a81ee44531004f3 | 22,640 |
def get_total_cases():
"""全国の現在の感染者数"""
return col_ref.document(n).get().to_dict()["total"]["total_cases"] | 1e933fd86cde49edbb5d78591fb513cb9633c080 | 22,642 |
from typing import Optional
def _find_db_team(team_id: OrgaTeamID) -> Optional[DbOrgaTeam]:
"""Return the team with that id, or `None` if not found."""
return db.session.query(DbOrgaTeam).get(team_id) | 9c0709c8b601a1910ed1e6b08f16970c976b7310 | 22,643 |
from typing import Optional
from contextlib import suppress
def make_number(
num: Optional[str],
repr: str = None,
speak: str = None,
literal: bool = False,
special: dict = None,
) -> Optional[Number]:
"""Returns a Number or Fraction dataclass for a number string
If literal, spoken string will not convert to hundreds/thousands
NOTE: Numerators are assumed to have a single digit. Additional are whole numbers
"""
# pylint: disable=too-many-branches
if not num or is_unknown(num):
return None
# Check special
with suppress(KeyError):
item = (special or {}).get(num) or SPECIAL_NUMBERS[num]
if isinstance(item, tuple):
value, spoken = item
else:
value = item
spoken = spoken_number(str(value), literal=literal)
return Number(repr or num, value, spoken)
# Check cardinal direction
if num in CARDINALS:
if not repr:
repr = num
num = str(CARDINALS[num])
# Remove spurious characters from the end
num = num.rstrip("M.")
num = num.replace("O", "0")
num = num.replace("+", "")
# Create Fraction
if "/" in num:
return make_fraction(num, repr, literal)
# Handle Minus values with errors like 0M04
if "M" in num:
val_str = num.replace("MM", "-").replace("M", "-")
while val_str[0] != "-":
val_str = val_str[1:]
else:
val_str = num
# Check value prefixes
speak_prefix = ""
if val_str.startswith("ABV "):
speak_prefix += "above "
val_str = val_str[4:]
if val_str.startswith("BLW "):
speak_prefix += "below "
val_str = val_str[4:]
if val_str.startswith("FL"):
speak_prefix += "flight level "
val_str, literal = val_str[2:], True
# Create Number
if not val_str:
return None
if "." in num:
value = float(val_str)
# Overwrite float 0 due to "0.0" literal
if not value:
value = 0
else:
value = int(val_str)
spoken = speak_prefix + spoken_number(speak or str(value), literal)
return Number(repr or num, value, spoken) | 0fdbd9610355cfceb2ee5ab0fd04b694ca8da9af | 22,644 |
import plotly.graph_objs as go
def gif_jtfs_3d(Scx, jtfs=None, preset='spinned', savedir='',
base_name='jtfs3d', images_ext='.png', cmap='turbo', cmap_norm=.5,
axes_labels=('xi2', 'xi1_fr', 'xi1'), overwrite=False,
save_images=False, width=800, height=800, surface_count=30,
opacity=.2, zoom=1, angles=None, verbose=True, gif_kw=None):
"""Generate and save GIF of 3D JTFS slices.
Parameters
----------
Scx : dict / tensor, 4D
Output of `jtfs(x)` with `out_type='dict:array'` or `'dict:list'`,
or output of `wavespin.toolkit.pack_coeffs_jtfs`.
jtfs : TimeFrequencyScattering1D
Required if `preset` is not `None`.
preset : str['spinned', 'all'] / None
If `Scx = jtfs(x)`, then
- 'spinned': show only `psi_t * psi_f_up` and `psi_t * psi_f_dn` pairs
- 'all': show all pairs
`None` is for when `Scx` is already packed via `pack_coeffs_jtfs`.
savedir, base_name, images_ext, overwrite :
See `help(wavespin.visuals.gif_jtfs)`.
cmap : str
Colormap to use.
cmap_norm : float
Colormap norm to use, as fraction of maximum value of `packed`
(i.e. `norm=(0, cmap_norm * packed.max())`).
axes_labels : tuple[str]
Names of last three dimensions of `packed`. E.g. `structure==2`
(in `pack_coeffs_jtfs`) will output `(n2, n1_fr, n1, t)`, so
`('xi2', 'xi1_fr', 'xi1')` (default).
width : int
2D width of each image (GIF frame).
height : int
2D height of each image (GIF frame).
surface_count : int
Greater improves 3D detail of each frame, but takes longer to render.
opacity : float
Lesser makes 3D surfaces more transparent, exposing more detail.
zoom : float (default=1) / None
Zoom factor on each 3D frame. If None, won't modify `angles`.
If not None, will first divide by L2 norm of `angles`, then by `zoom`.
angles : None / np.ndarray / list/tuple[np.ndarray] / str['rotate']
Controls display angle of the GIF.
- None: default angle that faces the line extending from min to max
of `xi1`, `xi2`, and `xi1_fr` (assuming default `axes_labels`).
- Single 1D array: will reuse for each frame.
- 'rotate': will use a preset that rotates the display about the
default angle.
Resulting array is passed to `go.Figure.update_layout()` as
`'layout_kw': {'scene_camera': 'center': dict(x=e[0], y=e[1], z=e[2])}`,
where `e = angles[0]` up to `e = angles[len(packed) - 1]`.
verbose : bool (default True)
Whether to print GIF generation progress.
gif_kw : dict / None
Passed as kwargs to `wavespin.visuals.make_gif`.
Example
-------
Also see `examples/visuals_tour.py`.
::
N, J, Q = 2049, 7, 16
x = toolkit.echirp(N)
jtfs = TimeFrequencyScattering1D(J, N, Q, J_fr=4, Q_fr=2,
out_type='dict:list')
Scx = jtfs(x)
gif_jtfs_3d(Scx, jtfs, savedir='', preset='spinned')
"""
try:
except ImportError as e:
print("\n`plotly.graph_objs` is needed for `gif_jtfs_3d`.")
raise e
# handle args & check if already exists (if so, delete if `overwrite`)
savedir, savepath_gif, images_ext, save_images, *_ = _handle_gif_args(
savedir, base_name, images_ext, save_images, overwrite, show=False)
if preset not in ('spinned', 'all', None):
raise ValueError("`preset` must be 'spinned', 'all', or None (got %s)" % (
preset))
# handle input tensor
if not isinstance(Scx, (dict, np.ndarray)):
raise ValueError("`Scx` must be dict or numpy array (need `out_type` "
"'dict:array' or 'dict:list'). Got %s" % type(Scx))
elif isinstance(Scx, dict):
ckw = dict(Scx=Scx, meta=jtfs.meta(), reverse_n1=False,
out_3D=jtfs.out_3D,
sampling_psi_fr=jtfs.sampling_psi_fr)
if preset == 'spinned':
_packed = pack_coeffs_jtfs(structure=2, separate_lowpass=True, **ckw)
_packed = _packed[0] # spinned only
elif preset == 'all':
_packed = pack_coeffs_jtfs(structure=2, separate_lowpass=False, **ckw)
else:
raise ValueError("dict `Scx` requires string `preset` (got %s)" % (
preset))
packed = _packed.transpose(-1, 0, 1, 2) # time first
elif isinstance(Scx, np.ndarray):
packed = Scx
# handle labels
supported = ('t', 'xi2', 'xi1_fr', 'xi1')
for label in axes_labels:
if label not in supported:
raise ValueError(("unsupported `axes_labels` element: {} -- must "
"be one of: {}").format(
label, ', '.join(supported)))
frame_label = [label for label in supported if label not in axes_labels][0]
# 3D meshgrid
def slc(i, g):
label = axes_labels[i]
start = {'xi1': .5, 'xi2': .5, 't': 0, 'xi1_fr': .5}[label]
end = {'xi1': 0., 'xi2': 0., 't': 1, 'xi1_fr': -.5}[label]
return slice(start, end, g*1j)
a, b, c = packed.shape[1:]
X, Y, Z = np.mgrid[slc(0, a), slc(1, b), slc(2, c)]
# handle `angles`; camera focus
if angles is None:
eye = np.array([2.5, .3, 2])
eye /= np.linalg.norm(eye)
eyes = [eye] * len(packed)
elif (isinstance(angles, (list, tuple)) or
(isinstance(angles, np.ndarray) and angles.ndim == 2)):
eyes = angles
elif isinstance(angles, str):
assert angles == 'rotate', angles
n_pts = len(packed)
def gauss(n_pts, mn, mx, width=20):
t = np.linspace(0, 1, n_pts)
g = np.exp(-(t - .5)**2 * width)
g *= (mx - mn)
g += mn
return g
x = np.logspace(np.log10(2.5), np.log10(8.5), n_pts, endpoint=1)
y = np.logspace(np.log10(0.3), np.log10(6.3), n_pts, endpoint=1)
z = np.logspace(np.log10(2.0), np.log10(2.0), n_pts, endpoint=1)
x, y, z = [gauss(n_pts, mn, mx) for (mn, mx)
in [(2.5, 8.5), (0.3, 6.3), (2, 2)]]
eyes = np.vstack([x, y, z]).T
else:
eyes = [angles] * len(packed)
assert len(eyes) == len(packed), (len(eyes), len(packed))
# camera zoom
if zoom is not None:
for i in range(len(eyes)):
eyes[i] /= (np.linalg.norm(eyes[i]) * .5 * zoom)
# colormap norm
mx = cmap_norm * packed.max()
# gif configs
volume_kw = dict(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
opacity=opacity,
surface_count=surface_count,
colorscale=cmap,
showscale=False,
cmin=0,
cmax=mx,
)
layout_kw = dict(
margin_pad=0,
margin_l=0,
margin_r=0,
margin_t=0,
title_pad_t=0,
title_pad_b=0,
margin_autoexpand=False,
scene_aspectmode='cube',
width=width,
height=height,
scene=dict(
xaxis_title=axes_labels[0],
yaxis_title=axes_labels[1],
zaxis_title=axes_labels[2],
),
scene_camera=dict(
up=dict(x=0, y=1, z=0),
center=dict(x=0, y=0, z=0),
),
)
# generate gif frames ####################################################
img_paths = []
for k, vol4 in enumerate(packed):
fig = go.Figure(go.Volume(value=vol4.flatten(), **volume_kw))
eye = dict(x=eyes[k][0], y=eyes[k][1], z=eyes[k][2])
layout_kw['scene_camera']['eye'] = eye
fig.update_layout(
**layout_kw,
title={'text': f"{frame_label}={k}",
'x': .5, 'y': .09,
'xanchor': 'center', 'yanchor': 'top'}
)
savepath = os.path.join(savedir, f'{base_name}{k}{images_ext}')
if os.path.isfile(savepath) and overwrite:
os.unlink(savepath)
fig.write_image(savepath)
img_paths.append(savepath)
if verbose:
print("{}/{} frames done".format(k + 1, len(packed)), flush=True)
# make gif ###############################################################
try:
if gif_kw is None:
gif_kw = {}
make_gif(loaddir=savedir, savepath=savepath_gif, ext=images_ext,
delimiter=base_name, overwrite=overwrite, verbose=verbose,
**gif_kw)
finally:
if not save_images:
# guarantee cleanup
for path in img_paths:
if os.path.isfile(path):
os.unlink(path) | 0d34878378e463a55655be29bd6b3d665adc05c6 | 22,646 |
def _serialize_account(project):
"""Generate several useful fields related to a project's account"""
account = project.account
return {'goal': account.goal,
'community_contribution': account.community_contribution,
'total_donated': account.total_donated(),
'total_raised': account.total_raised(),
'total_cost': account.total_cost(),
'percent_raised': account.percent_raised(),
'percent_community': account.percent_community(),
'funded': account.funded(),
'remaining': account.remaining()} | dea20df5db1ae37f61d6c661f957432b7cb72158 | 22,648 |
def neighbor_smoothing_binary(data_3d, neighbors):
"""
takes a 3d binary (0/1) input, returns a "neighbor" smoothed 3d matrix
Input:
------
data_3d: a 3d np.array (with 0s and 1s) -> 1s are "on", 0s are "off"
neighbors: the value that indicates the number of neighbors around voxel to check
Returns:
--------
smoothed_neighbors: 3d np.array same shape as data_3d
"""
smoothed_neighbors = data_3d.copy()
shape = data_3d.shape
for i in 1 + np.arange(shape[0] - 2):
for j in 1 + np.arange(shape[1] - 2):
for k in 1 + np.arange(shape[2] - 2):
# number of neighbors that need to be positivednm
if np.sum(data_3d[(i - 1):(i + 2),(j - 1):(j + 2),(k - 1):(k + 2)] == 1) < neighbors and data_3d[i, j, k] == 1:
smoothed_neighbors[i, j, k] = 0
return smoothed_neighbors | 22f503e9843a1a0864e3a66c4ed05070712defa3 | 22,650 |
def registrymixin_models():
"""Fixtures for RegistryMixin tests."""
# We have two sample models and two registered items to test that
# the registry is unique to each model and is not a global registry
# in the base RegistryMixin class.
# Sample model 1
class RegistryTest1(BaseMixin, db.Model):
"""Registry test model 1."""
__tablename__ = 'registry_test1'
# Sample model 2
class RegistryTest2(BaseMixin, db.Model):
"""Registry test model 2."""
__tablename__ = 'registry_test2'
# Sample registered item (form or view) 1
class RegisteredItem1:
"""Registered item 1."""
def __init__(self, obj=None):
"""Init class."""
self.obj = obj
# Sample registered item 2
@RegistryTest2.views('test')
class RegisteredItem2:
"""Registered item 2."""
def __init__(self, obj=None):
"""Init class."""
self.obj = obj
# Sample registered item 3
@RegistryTest1.features('is1')
@RegistryTest2.features()
def is1(obj):
"""Assert object is instance of RegistryTest1."""
return isinstance(obj, RegistryTest1)
RegistryTest1.views.test = RegisteredItem1
return SimpleNamespace(**locals()) | 420ecaea78780524ac0a889af1d584d0bbced8f3 | 22,653 |
def is_test_input_output_file(file_name: str) -> bool:
"""
Return whether a file is used as input or output in a unit test.
"""
ret = is_under_test_dir(file_name)
ret &= file_name.endswith(".txt")
return ret | 961c2dcda2cb848a1880b36ca06c01a8bf091704 | 22,654 |
import csv
def generate_csv_from_queryset(queryset, csv_name = "query_csv"):
"""
Genera un file csv a partire da un oggetto di tipo QuerySet
:param queryset: oggetto di tipo QuerySet
:param csv_name: campo opzionale per indicare il nome di output del csv
:return: oggetto response
"""
try:
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=' + csv_name + '.csv'
model_field_names = []
writer = csv.writer(response, delimiter=';')
if isinstance(queryset.first(), dict):
fields = queryset.first().keys()
else:
fields = [field.attname.split('_id')[0] if field.attname.endswith('_id') else field.attname for field in queryset.first()._meta.local_fields]
for field in fields:
model_field_names.append(field)
writer.writerow(model_field_names)
for query in queryset:
csv_row = []
for field in fields:
csv_row.append(query[field] if isinstance(query, dict) else model_to_dict(query)[field])
writer.writerow(csv_row)
return response
except Exception as e:
return e | ba4fab63e40cf791d7ac148ca804f0f60173d395 | 22,655 |
def get(isamAppliance, id, check_mode=False, force=False, ignore_error=False):
"""
Retrieving the current runtime template files directory contents
"""
return isamAppliance.invoke_get("Retrieving the current runtime template files directory contents",
"/mga/template_files/{0}".format(id), ignore_error=ignore_error) | 252bf635445af134772e8e762508ec9eb32d974e | 22,656 |
def update(contxt, vsmapp_id, attach_status=None, is_terminate=False):
"""update storage pool usage"""
if contxt is None:
contxt = context.get_admin_context()
if not vsmapp_id:
raise exception.StoragePoolUsageInvalid()
is_terminate = utils.bool_from_str(is_terminate)
kargs = {
'attach_status': attach_status,
'terminate_at': timeutils.utcnow() if is_terminate else None
}
try:
return db.storage_pool_usage_update(contxt, vsmapp_id, kargs)
except db_exc.DBError as e:
LOG.exception(_("DB Error on updating new storage pool usage %s" % e))
raise exception.StoragePoolUsageFailure() | 1015a7387cb264e9f8ce611d1b2de531aabb249e | 22,657 |
import tqdm
def adsgan(orig_data, params):
"""Generate synthetic data for ADSGAN framework.
Args:
orig_data: original data
params: Network parameters
mb_size: mini-batch size
z_dim: random state dimension
h_dim: hidden state dimension
lamda: identifiability parameter
iterations: training iterations
Returns:
synth_data: synthetically generated data
"""
# Reset the tensorflow graph
tf.reset_default_graph()
## Parameters
# Feature no
x_dim = len(orig_data.columns)
# Sample no
no = len(orig_data)
# Batch size
mb_size = params['mb_size']
# Random variable dimension
z_dim = params['z_dim']
# Hidden unit dimensions
h_dim = params['h_dim']
# Identifiability parameter
lamda = params['lamda']
# Training iterations
iterations = params['iterations']
# WGAN-GP parameters
lam = 10
lr = 1e-4
#%% Data Preprocessing
orig_data = np.asarray(orig_data)
def data_normalization(orig_data, epsilon = 1e-8):
min_val = np.min(orig_data, axis=0)
normalized_data = orig_data - min_val
max_val = np.max(normalized_data, axis=0)
normalized_data = normalized_data / (max_val + epsilon)
normalization_params = {"min_val": min_val, "max_val": max_val}
return normalized_data, normalization_params
def data_renormalization(normalized_data, normalization_params, epsilon = 1e-8):
renormalized_data = normalized_data * (normalization_params['max_val'] + epsilon)
renormalized_data = renormalized_data + normalization_params['min_val']
return renormalized_data
orig_data, normalization_params = data_normalization(orig_data)
#%% Necessary Functions
# Xavier Initialization Definition
def xavier_init(size):
in_dim = size[0]
xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
return tf.random_normal(shape = size, stddev = xavier_stddev)
# Sample from uniform distribution
def sample_Z(m, n):
return np.random.uniform(-1., 1., size = [m, n])
# Sample from the real data
def sample_X(m, n):
return np.random.permutation(m)[:n]
#%% Placeholder
# Feature
X = tf.placeholder(tf.float32, shape = [None, x_dim])
# Random Variable
Z = tf.placeholder(tf.float32, shape = [None, z_dim])
#%% Discriminator
# Discriminator
D_W1 = tf.Variable(xavier_init([x_dim, h_dim]))
D_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
D_W2 = tf.Variable(xavier_init([h_dim,h_dim]))
D_b2 = tf.Variable(tf.zeros(shape=[h_dim]))
D_W3 = tf.Variable(xavier_init([h_dim,1]))
D_b3 = tf.Variable(tf.zeros(shape=[1]))
theta_D = [D_W1, D_W2, D_W3, D_b1, D_b2, D_b3]
#%% Generator
G_W1 = tf.Variable(xavier_init([z_dim + x_dim, h_dim]))
G_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
G_W2 = tf.Variable(xavier_init([h_dim,h_dim]))
G_b2 = tf.Variable(tf.zeros(shape=[h_dim]))
G_W3 = tf.Variable(xavier_init([h_dim,h_dim]))
G_b3 = tf.Variable(tf.zeros(shape=[h_dim]))
G_W4 = tf.Variable(xavier_init([h_dim, x_dim]))
G_b4 = tf.Variable(tf.zeros(shape=[x_dim]))
theta_G = [G_W1, G_W2, G_W3, G_W4, G_b1, G_b2, G_b3, G_b4]
#%% Generator and discriminator functions
def generator(z, x):
inputs = tf.concat([z, x], axis = 1)
G_h1 = tf.nn.tanh(tf.matmul(inputs, G_W1) + G_b1)
G_h2 = tf.nn.tanh(tf.matmul(G_h1, G_W2) + G_b2)
G_h3 = tf.nn.tanh(tf.matmul(G_h2, G_W3) + G_b3)
G_log_prob = tf.nn.sigmoid(tf.matmul(G_h3, G_W4) + G_b4)
return G_log_prob
def discriminator(x):
D_h1 = tf.nn.relu(tf.matmul(x, D_W1) + D_b1)
D_h2 = tf.nn.relu(tf.matmul(D_h1, D_W2) + D_b2)
out = (tf.matmul(D_h2, D_W3) + D_b3)
return out
#%% Structure
G_sample = generator(Z,X)
D_real = discriminator(X)
D_fake = discriminator(G_sample)
# Replacement of Clipping algorithm to Penalty term
# 1. Line 6 in Algorithm 1
eps = tf.random_uniform([mb_size, 1], minval = 0., maxval = 1.)
X_inter = eps*X + (1. - eps) * G_sample
# 2. Line 7 in Algorithm 1
grad = tf.gradients(discriminator(X_inter), [X_inter])[0]
grad_norm = tf.sqrt(tf.reduce_sum((grad)**2 + 1e-8, axis = 1))
grad_pen = lam * tf.reduce_mean((grad_norm - 1)**2)
# Loss function
D_loss = tf.reduce_mean(D_fake) - tf.reduce_mean(D_real) + grad_pen
G_loss1 = -tf.sqrt(tf.reduce_mean(tf.square(X - G_sample)))
G_loss2 = -tf.reduce_mean(D_fake)
G_loss = G_loss2 + lamda * G_loss1
# Solver
D_solver = (tf.train.AdamOptimizer(learning_rate = lr, beta1 = 0.5).minimize(D_loss, var_list = theta_D))
G_solver = (tf.train.AdamOptimizer(learning_rate = lr, beta1 = 0.5).minimize(G_loss, var_list = theta_G))
#%% Iterations
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# Iterations
for it in tqdm(range(iterations)):
# Discriminator training
for _ in range(5):
Z_mb = sample_Z(mb_size, z_dim)
X_idx = sample_X(no,mb_size)
X_mb = orig_data[X_idx,:]
_, D_loss_curr = sess.run([D_solver, D_loss], feed_dict = {X: X_mb, Z: Z_mb})
# Generator Training
Z_mb = sample_Z(mb_size, z_dim)
X_idx = sample_X(no,mb_size)
X_mb = orig_data[X_idx,:]
_, G_loss1_curr, G_loss2_curr = sess.run([G_solver, G_loss1, G_loss2], feed_dict = {X: X_mb, Z: Z_mb})
#%% Output Generation
synth_data = sess.run([G_sample], feed_dict = {Z: sample_Z(no, z_dim), X: orig_data})
synth_data = synth_data[0]
# Renormalization
synth_data = data_renormalization(synth_data, normalization_params)
# Binary features
for i in range(x_dim):
if len(np.unique(orig_data[:, i])) == 2:
synth_data[:, i] = np.round(synth_data[:, i])
return synth_data | f0e4f85f93d116c75c82a1aba524bed89beb4f14 | 22,658 |
import requests
def unemployed(year,manu, key,state='*'): #ex manu= 54 is professional, scientific, and technical service industries, year= 2017
"""Yearly data on self-employed manufacturing sectors for all counties. Returns all receipts in thousands of dollars for all counties for the specified state for certain industries.
Parameters
----------
year: int
Only full 4-integer values for years where the Community Survey is available, 2009-2019
manu: str
string for a manufacturing sector code
key: str
API key requested from US census.gov website, string format
state: str
string argument for state code
Returns
-------
dataframe
Pandas dataframe extracted with the inserted parameters from the census manufacturing survey
Examples
--------
>>> from us_census import us_census
>>> unemployed(2002, '54', MYKEY, '02')"""
assert isinstance(year, int), "Please only ask for available years and ensure the year entry is an integer"
assert isinstance(manu, str), "Ensure the manufacturing sector is viable and a string."
assert isinstance(state, str)
r= requests.get(f'http://api.census.gov/data/{year}/nonemp?get=NRCPTOT,NAME&for=county:*&in=state:{state}&NAICS{year}={manu}&key={key}')
if r.status_code== 200:
try:
df= pd.DataFrame(r.json())
return df
except (NameError):
print("This state, year, or manufacturing sector code was not found. Please try valid inputs for the American Manufacturing survey .") | 1ee03a5a0ee1faf2101b8785aadda688b722a96e | 22,659 |
def stack_bricks(top_brick, bottom_brick):
"""Stacks two Duplo bricks, returns the attachment frame of the top brick."""
arena = composer.Arena()
# Bottom brick is fixed in place, top brick has a freejoint.
arena.attach(bottom_brick)
attachment_frame = arena.add_free_entity(top_brick)
# Attachment frame is positioned such that the top brick is on top of the
# bottom brick.
attachment_frame.pos = (0, 0, 0.0192)
return arena, attachment_frame | 64b096bc77dfcd62c39cccdfad10241a1574a1ec | 22,660 |
def trigger(name):
"""
@trigger decorator allow to register a function as a trigger with a given name
Parameters
----------
name : str
Name of the trigger
"""
_app = get_app_instance()
return _app.trigger(name) | 3af727c206565346b69ea5eb6735d9237cefeaf3 | 22,661 |
from typing import Dict
from typing import List
def get_components_to_remove(component_dict: Dict[str, ComponentImport]) -> List[str]:
"""Gets a list of components to remove from the dictionary using console input.
Args:
component_dict (Dict[str, ComponentImport]): The custom component dictionary.
Returns:
List[str]: The keys to remove from the dictionary.
"""
components = []
if component_dict:
name = get_str("Enter a component name to remove(blank to skip): ")
else:
name = ""
while name != "":
if name.startswith("custom/"):
name = name.removeprefix("custom/")
if f"custom/{name}" not in component_dict:
error(f"No component import with name: {name}")
elif f"custom/{name}" in components:
error(f"Already removing component import with name: {name}")
else:
components.append(f"custom/{name}")
if len(component_dict) <= len(components):
break
name = get_str("Enter a component name to remove(blank to skip): ")
return components | fd423b0de58ef2e8d5db1a63d334f034a729f2f4 | 22,662 |
import requests
def list_data_objects():
"""
This endpoint translates DOS List requests into requests against indexd
and converts the responses into GA4GH messages.
:return:
"""
req_body = app.current_request.json_body
if req_body:
page_token = req_body.get('page_token', None)
page_size = req_body.get('page_size', None)
else:
page_token = "0"
page_size = 100
if req_body and (page_token or page_size):
gdc_req = dos_list_request_to_indexd(req_body)
else:
gdc_req = {}
response = requests.get("{}/index/".format(INDEXD_URL), params=gdc_req)
if response.status_code != 200:
return Response(
{'msg': 'The request was malformed {}'.format(
response.json().get('message', ""))})
list_response = response.json()
return gdc_to_dos_list_response(list_response) | 0eae49f4e559a4d640ce304cfaa17ef3806b2937 | 22,663 |
from typing import Union
def get_include_file_start_line(block: Block) -> Union[int, None]:
"""
>>> block = lib_test.get_test_block_ok()
>>> # test start-line set to 10
>>> get_include_file_start_line(block)
10
>>> assert block.include_file_start_line == 10
>>> # test start-line not set
>>> block = lib_test.get_test_block_start_line_not_set()
>>> get_include_file_start_line(block)
>>> assert block.include_file_start_line is None
>>> # test start-line invalid
>>> block = lib_test.get_test_block_start_line_invalid()
>>> get_include_file_start_line(block) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: Error in File ".../README.template.rst", Line 47103: option "start-line" has no value
>>> # test start-line not integer
>>> block = lib_test.get_test_block_start_line_not_integer()
>>> get_include_file_start_line(block) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
TypeError: Error in File ".../README.template.rst", Line 47103: option "start-line" has to be integer
"""
include_file_start_line = block.include_file_start_line
if lib_block_options.is_option_in_block('start-line', block):
include_file_start_line = int(lib_block_options.get_option_value_from_block_or_raise_if_empty_or_invalid('start-line', block, value_must_be_int=True))
block.include_file_start_line = include_file_start_line
return include_file_start_line | 5e9ddeed83192f8a53acaca1a6ac4493a7dcbe36 | 22,664 |
def get_wcets(utils, periods):
""" Returns WCET """
return [ui * ti for ui, ti in zip(utils, periods)]
# return [math.ceil(ui * ti) for ui, ti in zip(utils, periods)] | f853459b2463fc75b91f5effe3357b9c4ec5c4f9 | 22,665 |
def aws_get_dynamodb_table_names(profile_name: str) -> list:
"""
get all DynamoDB tables
:param profile_name: AWS IAM profile name
:return: a list of DynamoDB table names
"""
dynamodb_client = aws_get_client("dynamodb", profile_name)
table_names = []
more_to_evaluate = True
last_evaluated_table_name = None
while more_to_evaluate:
if last_evaluated_table_name is None:
response = dynamodb_client.list_tables()
else:
response = dynamodb_client.list_tables(ExclusiveStartTableName=last_evaluated_table_name)
partial_table_names = response.get("TableNames")
last_evaluated_table_name = response.get("LastEvaluatedTableName")
if partial_table_names is not None and len(partial_table_names) > 0:
table_names.extend(partial_table_names)
if last_evaluated_table_name is None:
more_to_evaluate = False
table_names.sort()
return table_names | fddc574e8f4f6a798e5fe0b339c898b09f18b527 | 22,666 |
from typing import Any
def stack_images_vertical(
image_0: NDArray[(Any, ...), Any], image_1: NDArray[(Any, ...), Any]
) -> NDArray[(Any, ...), Any]:
"""
Stack two images vertically.
Args:
image_0: The image to place on the top.
image_1: The image to place on the bottom.
Returns:
An image with the original two images on top of eachother.
Note:
The images must have the same width.
Example::
color_image = rc.camera.get_color_image()
depth_image = rc.camera.get_depth_image()
depth_image_colormap = rc_utils.colormap_depth_image(depth_image)
# Create a new image with the color on the top and depth on the bottom
new_image = rc_utils.stack_images_vertically(color_image, depth_image_colormap)
"""
assert (
image_0.shape[1] == image_1.shape[1]
), f"image_0 width ({image_0.shape[1]}) must be the same as image_1 width ({image_1.shape[1]})."
return np.vstack((image_0, image_1)) | bb83cc6246bef5df370b45ac0ce7ed5b58a974f7 | 22,667 |
def simulate_ids(num):
"""
模拟生成一定数量的身份证号
"""
ids = []
if num > 0:
for i in range(1, num+1):
id_raw = digit_1to6() + digit_7to10() + digit_11to14() + digit_15to17()
id = id_raw + digit_18(id_raw)
ids.append(id)
else:
return False
return ids | 2e74fb150f01b8d43eefffdab2d95790cad6f334 | 22,668 |
def geomean(x):
"""computes geometric mean
"""
return exp(sum(log(i) for i in x) / len(x)) | 7c5ad27938a6d6da7f304d7ac66fcc1a179162c2 | 22,669 |
from typing import Tuple
def _get_mean_traces_for_iteration_line_plot(
scenario_df: pd.DataFrame,
) -> Tuple[go.Scatter, go.Scatter, go.Scatter]:
"""Returns the traces for the mean of the success rate.
Parameters
----------
scenario_df : pd.DataFrame
DataFrame containing the columns "n_iterations", "mean_success_rate", "std_success_rate"
Returns
-------
Tuple[go.Scatter, go.Scatter, go.Scatter]
tuple of traces. Contains:
- main_trace: mean
- upper_trace: upper bound using the standard deviation
- lower_trace: lower bound using the standard deviation
"""
# standard colors used by Plotly
colors = px.colors.qualitative.Plotly
# mean of success rate
mean_trace = go.Scatter(
name="Mean",
x=scenario_df["n_iterations"],
y=scenario_df["mean_success_rate"],
# line=dict(color="rgb(0,100,80)"),
mode="lines+markers",
marker=dict(size=15),
line=dict(width=4),
# legendgroup="group",
visible=False,
)
# upper std bound of success rate
y = scenario_df["mean_success_rate"] + 2 * scenario_df["std_success_rate"]
y = np.minimum(y, 1)
upper_trace = go.Scatter(
name="Upper bound",
x=scenario_df["n_iterations"],
y=y,
mode="lines",
# make the line invisible
line=dict(color="rgba(255,255,255,0)"),
showlegend=False,
# legendgroup="group",
visible=False,
)
# lower std bound of success rate
y = scenario_df["mean_success_rate"] - 2 * scenario_df["std_success_rate"]
y = np.maximum(y, 0)
lower_trace = go.Scatter(
name="Lower bound",
x=scenario_df["n_iterations"],
y=y,
mode="lines",
# make the line invisible
line=dict(color="rgba(255,255,255,0)"),
fill="tonexty",
fillcolor=f"rgba{(*_hex_to_rgb(colors[0]), 0.3)}",
showlegend=False,
# legendgroup="group",
visible=False,
)
return mean_trace, upper_trace, lower_trace | 04542d1b1f76173e8fa2431a5a6bd3b8e8627026 | 22,670 |
def create_graph_of_words(words, database, filename, window_size = 4):
"""
Function that creates a Graph of Words that contains all nodes from each document for easy comparison,
inside the neo4j database, using the appropriate cypher queries.
"""
# Files that have word length < window size, are skipped.
# Window size ranges from 2 to 6.
length = len(words)
if (length < window_size):
# Early exit, we return the skipped filename
return filename
# We are using a global set of edges to avoid creating duplicate edges between different graph of words.
# Basically the co-occurences will be merged.
global edges
# We are using a global set of edges to avoid creating duplicate nodes between different graph of words.
# A list is being used to respect the order of appearance.
global nodes
# We are getting the unique terms for the current graph of words.
terms = []
for word in words:
if word not in terms:
terms.append(word)
# Remove end-of-sentence token, so it doesn't get created.
if 'e5c' in terms:
terms.remove('e5c')
# If the word doesn't exist as a node, then create it.
for word in terms:
if word not in nodes:
database.execute(f'CREATE (w:Word {{key: "{word}"}})', 'w')
# Append word to the global node graph, to avoid duplicate creation.
nodes.append(word)
# Create unique connections between existing nodes of the graph.
for i, current in enumerate(words):
# If there are leftover items smaller than the window size, reduce it.
if i + window_size > length:
window_size = window_size - 1
# If the current word is the end of sentence string,
# we need to skip it, in order to go to the words of the next sentence,
# without connecting words of different sentences, in the database.
if current == 'e5c':
continue
# Connect the current element with the next elements of the window size.
for j in range(1, window_size):
next = words[i + j]
# Reached the end of sentence string.
# We can't connect words of different sentences,
# therefore we need to pick a new current word,
# by going back out to the outer loop.
if next == 'e5c':
break
edge = (current, next)
if edge in edges:
# If the edge, exists just update its weight.
edges[edge] = edges[edge] + 1
query = (f'MATCH (w1:Word {{key: "{current}"}})-[r:connects]-(w2:Word {{key: "{next}"}}) '
f'SET r.weight = {edges[edge]}')
else:
# Else, create it, with a starting weight of 1 meaning first co-occurence.
edges[edge] = 1
query = (f'MATCH (w1:Word {{key: "{current}"}}) '
f'MATCH (w2:Word {{key: "{next}"}}) '
f'MERGE (w1)-[r:connects {{weight: {edges[edge]}}}]-(w2)')
# This line of code, is meant to be executed, in both cases of the if...else statement.
database.execute(query, 'w')
# Create a parent node that represents the document itself.
# This node is connected to all words of its own graph,
# and will be used for similarity/comparison queries.
database.execute(f'CREATE (d:Document {{filename: "{filename}"}})', 'w')
# Create a word list with comma separated, quoted strings for use in the Cypher query below.
#word_list = ', '.join(f'"{word}"' for word in terms)
query = (f'MATCH (w:Word) WHERE w.key IN {terms} '
'WITH collect(w) as words '
f'MATCH (d:Document {{filename: "{filename}"}}) '
'UNWIND words as word '
'CREATE (d)-[:includes]->(word)')
database.execute(query, 'w')
return | 89560a26d2ab624a28b07bf2ff826a8eb34564e8 | 22,671 |
from pathlib import Path
def get_dir(path):
"""
Функция возвращает директорию файла, если он является файлом, иначе
возвращает объект Path из указанного пути
"""
if not isinstance(path, Path):
path = Path(path)
return path.parent if path.is_file() else path | b7ca7f60d88c06bc3181bd93039c13a13e2684a4 | 22,672 |
import hashlib
def get_file_sha(fname):
"""
Calculates the SHA1 of a given file.
`fname`: the file path
return: the calculated SHA1 as hex
"""
result = ''
if isfile(fname):
sha1 = hashlib.sha1()
with open(fname, 'rb') as f:
while True:
data = f.read(BUF_SIZE)
if not data:
break
sha1.update(data)
result = sha1.hexdigest()
return result | c328f8c5b65a018016a02d39a54b753c33cd3217 | 22,673 |
def graphtool_to_gjgf(graph):
"""Convert a graph-tool graph object to gJGF.
Parameters
----------
graph : graph object from graph-tool
Returns
-------
gjgf : dict
Dictionary adhering to
:doc:`gravis JSON Graph Format (gJGF) <../../format_specification>`
Caution
-------
0 and 0.0 values are ignored because they represent missing values in graph-tool.
This can cause problems when such values have the usual meaning of a quantity being zero.
"""
data, data_graph, data_nodes, data_edges = _internal.prepare_gjgf_dict()
# 1) Graph properties
graph_directed = graph.is_directed()
graph_metadata_dict = {key: graph.graph_properties[key] # key syntax is necessary
for key in graph.graph_properties.keys()}
_internal.insert_graph_data(data_graph, graph_directed, graph_metadata_dict)
# 2) Nodes and their properties
for node_object in graph.vertices():
node_id = str(node_object)
node_metadata_dict = {}
for key, value_array in graph.vertex_properties.items():
val = value_array[node_object]
if isinstance(val, (str, int, float)) and val not in ('', 0, 0.0):
node_metadata_dict[key] = val
_internal.insert_node_data(data_nodes, node_id, node_metadata_dict)
# 3) Edges and their properties
for edge_object in graph.edges():
edge_source_id = str(edge_object.source())
edge_target_id = str(edge_object.target())
edge_metadata_dict = {}
for key, value_array in graph.edge_properties.items():
val = value_array[edge_object]
if val not in ('', 0, 0.0):
edge_metadata_dict[key] = val
_internal.insert_edge_data(data_edges, edge_source_id, edge_target_id, edge_metadata_dict)
return data | 73ceca04270aaa1f754001d446dba9c796ef6822 | 22,674 |
def forbidden_error(error):
""" 418 I'm a teapot """
return engine.get_template('errors/417.html').render({}), 418 | 43319b9aa95c970e75e392e584dd8af78199ceb0 | 22,675 |
def green(s: str) -> str:
"""green(s) color s with green.
This function exists to encapsulate the coloring methods only in utils.py.
"""
return colorama.Fore.GREEN + s + colorama.Fore.RESET | 56f12d56257d0728d7d71cba61256bede5c3a064 | 22,676 |
import pickle
def cache_fun(fname_cache, fun):
"""Check whether cached data exists, otherwise call fun and return
Parameters
----------
fname_cache: string
name of cache to look for
fun: function
function to call in case cache doesn't exist
probably a lambda function
"""
try:
print("checking cache for", fname_cache)
with open(fname_cache, 'rb') as fhandle:
print("found cache")
ret = pickle.load(fhandle)
except (FileNotFoundError, EOFError):
print("cache not found, running function")
ret = fun()
with open(fname_cache, 'wb') as fhandle:
pickle.dump(ret, fhandle)
return ret | 0387f611ab12aeb8a2d7dfca664e08b0438b1905 | 22,677 |
def find_server_storage_UUIDs(serveruuid):
"""
@rtype : list
@return:
"""
storageuuids = []
db = dbconnect()
cursor = db.cursor()
cursor.execute("SELECT UUID, ServerUUID FROM Storage WHERE ServerUUID = '%s'" % serveruuid)
results = cursor.fetchall()
for row in results:
storageuuids.append(row[0])
db.close()
return storageuuids | 48abd3e3a9dc49cef5f9eb52d7327e79a61602a4 | 22,678 |
import torch
def unpack_bidirectional_lstm_state(state, num_directions=2):
"""
Unpack the packed hidden state of a BiLSTM s.t. the first dimension equals to the number of layers multiplied by
the number of directions.
"""
batch_size = state.size(1)
new_hidden_dim = int(state.size(2) / num_directions)
return torch.stack(torch.split(state, new_hidden_dim, dim=2), dim=1).view(-1, batch_size, new_hidden_dim) | fa58ed9bcf2e9e95aa62b3d18110abe6abce6b1b | 22,679 |
def macd_diff(close, window_slow=26, window_fast=12, window_sign=9, fillna=False):
"""Moving Average Convergence Divergence (MACD Diff)
Shows the relationship between MACD and MACD Signal.
https://en.wikipedia.org/wiki/MACD
Args:
close(pandas.Series): dataset 'Close' column.
window_fast(int): n period short-term.
window_slow(int): n period long-term.
window_sign(int): n period to signal.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
return MACD(
close=close,
window_slow=window_slow,
window_fast=window_fast,
window_sign=window_sign,
fillna=fillna,
).macd_diff() | ef5b222c688026a25121b35d4354967e4f5c93fc | 22,680 |
def spike_histogram(series, merge_spikes=True, window_duration=60, n_bins=8):
"""
Args:
* series (pd.Series): watts
* merge_spikes (bool): Default = True
* window_duration (float): Width of each window in seconds
* n_bins (int): number of bins per window.
Returns:
spike_hist, bin_edges:
spike_hist (pd.DataFrame):
index is pd.DateTimeIndex of start of each time window
columns are 2-tuples of the bin edges in watts (int)
bin_edges (list of ints):
"""
fdiff = series.diff()
if merge_spikes:
fdiff = get_merged_spikes_pandas(fdiff)
abs_fdiff = np.fabs(fdiff)
freq = (window_duration, 'S')
date_range, boundaries = _indicies_of_periods(fdiff.index,
freq=freq)
bin_edges = np.concatenate(([0], np.exp(np.arange(1,n_bins+1))))
bin_edges = np.round(bin_edges).astype(int)
cols = zip(bin_edges[:-1], bin_edges[1:])
spike_hist = pd.DataFrame(index=date_range, columns=cols)
for date_i, date in enumerate(date_range):
start_i, end_i = boundaries[date_i]
chunk = abs_fdiff[start_i:end_i]
spike_hist.loc[date] = np.histogram(chunk, bins=bin_edges)[0]
return spike_hist, bin_edges | fc401d8c2b4aabde646a3570397f8fdb8087bf6b | 22,681 |
def data_context_connectivity_context_connectivity_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_peak_information_rate_post(uuid, local_id, tapi_common_capacity_value=None): # noqa: E501
"""data_context_connectivity_context_connectivity_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_peak_information_rate_post
creates tapi.common.CapacityValue # noqa: E501
:param uuid: Id of connectivity-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:param tapi_common_capacity_value: tapi.common.CapacityValue to be added to list
:type tapi_common_capacity_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_capacity_value = TapiCommonCapacityValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!' | 92cefba50ff813e2fd1797d383ae2df7e02a99da | 22,682 |
def any(*args, span=None):
"""Create a new experssion of the union of all conditions in the arguments
Parameters
----------
args : list
List of symbolic boolean expressions
span : Optional[Span]
The location of this operator in the source code.
Returns
-------
expr: Expr
Expression
"""
if not args:
raise ValueError("Any must take at least 1 argument")
if len(args) == 1:
return args[0]
val = _ffi_api._OpOr(args[0], args[1], span) # type: ignore
for i in range(2, len(args)):
val = _ffi_api._OpOr(val, args[i], span) # type: ignore
return val | 697eb7e44d0fb9a0b9b947eea9fe4e8f68b78210 | 22,683 |
from typing import List
from typing import Any
def firstOrNone(list: List[Any]) -> Any:
"""
Return the first element of a list or None if it is not set
"""
return nthOrNone(list, 0) | 9c51a2f72fe5f516258f2fd20210bd83a3cfbf2d | 22,684 |
def ellipse_points( xy=[0,-5.], ex=254., ez=190., n=1000 ):
"""
:param ec: center of ellipse
:param ex: xy radius of ellipse
:param ez: z radius of ellipse
:param n: number of points
:return e: array of shape (n,2) of points on the ellipse
"""
t = np.linspace( 0, 2*np.pi, n )
e = np.zeros([len(t), 2])
e[:,0] = ex*np.cos(t) + xy[0]
e[:,1] = ez*np.sin(t) + xy[1]
return e | 93b3aeccf6ab04ad8b0e96cdad80a52d9a6c46c4 | 22,685 |
def ion_list():
"""List of ions with pre-computed CLOUDY ionization fraction"""
ions = np.array(['al2','c2','c3','c4','fe2','h1','mg2',
'n1','n2','n3','n4','n5','ne8','o1','o6',
'o7','o8','si2','si3','si4'])
return ions | 4fae0b3cf7956349a30807d4975b1d72cf10c9ec | 22,687 |
def is_valid_filename(filename):
"""Determines if a filename is valid (with extension)."""
valid_extensions = ['mp4', 'webm', 'ogg']
extension = get_extension(filename)
return bool(extension and extension in valid_extensions) | 6a493fac59fca900e5d3bf55075e897bf36528f7 | 22,688 |
import re
def read(line_str, line_pos, pattern='[0-9a-zA-Z_:?!><=&]'):
"""
Read all tokens from a code line matching specific characters,
starting at a specified position.
Args:
line_str (str): The code line.
line_pos (int): The code line position to start reading.
pattern (str): Regular expression for a single character. All matching
characters will be read.
Returns:
literal (str): The literal that was read, including only characters
that were defined in the pattern argument.
line_pos (int): The updated line position.
"""
length = len(line_str)
literal = ''
while line_pos < length and re.match(pattern, line_str[line_pos]):
literal += line_str[line_pos]
line_pos += 1
return literal, line_pos | 95ece37e927ff3f8ea9579a7d78251b10b1ed0e6 | 22,689 |
from typing import Dict
import random
def demographic(population: int, highest_lvl_ratio: int = ONE_MILLION, num_levels: int = NUM_LEVELS) -> Dict[int, int]:
"""
Calculate the number of levelled NPCs in a given population.
Args:
population:
The population to consider these levelled NPCs in.
highest_lvl_ratio:
The fraction of the population that should be of the highest level.
num_levels:
The number of levels to consider.
Returns:
A dict mapping the levels (0-highest) to the number of NPCs at each level.
"""
# Generate the proportions of each level and scale to the desired population
fractions = generate_per_level_fractions(highest_lvl_ratio, num_levels)
rough_numbers = {(k + 1): (v * population) for k, v in enumerate(fractions)}
# Take the rough numbers use the whole number part and probabilistically add the remainder
final_numbers = dict()
for level, rough_num in rough_numbers.items():
num, extra_prob = divmod(rough_num, 1)
if random.random() < extra_prob:
num += 1
final_numbers[level] = int(num)
final_numbers[0] = population - sum(final_numbers.values())
return final_numbers | b45b26c7add41c85cf526789ba26f7c877db685a | 22,690 |
def gencpppxd(env, exceptions=True, ts=None):
"""Generates all cpp_*.pxd Cython header files for an environment of modules.
Parameters
----------
env : dict
Environment dictonary mapping target module names to module description
dictionaries.
exceptions : bool or str, optional
Cython exception annotation. Set to True to automatically detect exception
types, False for when exceptions should not be included, and a str (such as
'+' or '-1') to apply to everywhere.
ts : TypeSystem, optional
A type system instance.
Returns
-------
cpppxds : dict
Maps environment target names to Cython cpp_*.pxd header files strings.
"""
ts = ts or TypeSystem()
cpppxds = {}
for name, mod in env.items():
if mod['srcpxd_filename'] is None:
continue
cpppxds[name] = modcpppxd(mod, exceptions, ts=ts)
return cpppxds | 627828bfc01c8282b0bf53f5e3cef234d0bdc816 | 22,691 |
import requests
def configure_mongo_connection(
key: str, host: str, port: int, dbname: str, username: str, password: str
):
"""
Configure the connection with the given `key` in fidesops with your PostgreSQL database credentials.
Returns the response JSON if successful, or throws an error otherwise.
See http://localhost:8000/docs#/Connections/put_connection_config_secrets_api_v1_connection__connection_key__secret_put
"""
connection_secrets_data = {
"host": host,
"port": port,
"defaultauthdb": dbname,
"username": username,
"password": password,
}
response = requests.put(
f"{FIDESOPS_URL}/api/v1/connection/{key}/secret",
headers=oauth_header,
json=connection_secrets_data,
)
if response.ok:
if (response.json())["test_status"] != "failed":
logger.info(
f"Configured fidesops mongo connection secrets via /api/v1/connection/{key}/secret"
)
return response.json()
raise RuntimeError(
f"fidesops connection configuration failed! response.status_code={response.status_code}, response.json()={response.json()}"
) | 72746eb7bcebb747b4821f453e8f0f4543abc060 | 22,692 |
import random
def fully_random(entries, count):
"""Choose completely at random from all entries"""
return random.sample(entries, count) | a1f494f6b3cc635bc109378305bf547d48f29019 | 22,693 |
def _get_sets_grp(grpName="controllers_grp"):
"""Get set group
Args:
grpName (str, optional): group name
Returns:
PyNode: Set
"""
rig = _get_simple_rig_root()
sets = rig.listConnections(type="objectSet")
controllersGrp = None
for oSet in sets:
if grpName in oSet.name():
controllersGrp = oSet
return controllersGrp | ec65ab91b69cb1c509412258b517f78f9e124f24 | 22,694 |
import re
def clean_text(text, cvt_to_lowercase=True, norm_whitespaces=True):
"""
Cleans a text for language detection by transforming it to lowercase, removing unwanted
characters and replacing whitespace characters for a simple space.
:rtype : string
:param text: Text to clean
:param cvt_to_lowercase: Convert text to lowercase
:param norm_whitespaces: Normalize whitespaces
"""
# converting text to lowercase (if required)
cleaned_text = text.lower() if cvt_to_lowercase else text
# removing unwanted characters
cleaned_text = ''.join([
c for c in cleaned_text
if c not in unwanted_chars
])
# normalizing whitespaces
cleaned_text = re.sub(r'\s+', ' ', cleaned_text) if norm_whitespaces else cleaned_text
# returning the cleaned text
return cleaned_text | 7586112429f529d21f5d7a992bf40d3604dfe52a | 22,695 |
def print(*args, **kwargs) -> None:
"""Proxy for Console print."""
console = get_console()
return console.print(*args, **kwargs) | 49b96ae3df30bf09e742f8355f0867341396bc44 | 22,696 |
import yaml
def _yaml_to_dict(yaml_string):
"""
Converts a yaml string to dictionary
Args:
yaml_string: String containing YAML
Returns:
Dictionary containing the same object
"""
return yaml.safe_load(yaml_string) | c7de0c860028d17302cd4d07e20c3215503b977b | 22,698 |
import urllib
from bs4 import BeautifulSoup
def room_urls_for_search_url(url):
"""
the urls of all rooms that are yieled in a search url
"""
with urllib.request.urlopen(url) as response:
html = response.read()
soup = BeautifulSoup(html, 'html.parser')
room_urls = {erg_list_entry.find('a').find('strong').get_text():
room_url_for_room_id(erg_list_entry.find('a').get('href').split('.rgid=')[1].split('&')[0])
for erg_list_entry in soup.find_all('div', {'class': 'erg_list_entry'})
if erg_list_entry.find('div', {'class': 'erg_list_label'}).get_text() == 'Raum:'}
return room_urls | 8945b55e7379860defa0f4229690e53196acbe4d | 22,699 |
import time
import hmac
import hashlib
import requests
import json
def bittrex_get_balance(api_key, api_secret):
"""Get your total balances for your bittrex account
args:
required:
api_key (str)
api_secret (str)
return:
results (DataFrame) of balance information for each crypto
"""
nonce = int(time.time()*1000)
url = "https://bittrex.com/api/v1.1/account/getbalances?apikey={}&nonce={}".format(api_key, nonce)
# url = 'https://bittrex.com/api/v1.1/account/getbalances'
sign = hmac.new(api_secret.encode('utf-8'), url.encode('utf-8'), hashlib.sha512).hexdigest()
headers = {'apisign': sign}
r = requests.get(url, headers=headers)
j = json.loads(r.text)
results = j['result']
df = pd.DataFrame.from_dict(results)
return df | a90979a495fe9410d76996006063ca01fdcfe04c | 22,700 |
def evaluate_functions(payload, context, get_node_instances_method, get_node_instance_method, get_node_method):
"""
Evaluate functions in payload.
:param payload: The payload to evaluate.
:param context: Context used during evaluation.
:param get_node_instances_method: A method for getting node instances.
:param get_node_instance_method: A method for getting a node instance.
:param get_node_method: A method for getting a node.
:return: payload.
"""
#print '!!! evaluate_function', payload, context
context = PostProcessingContext(None, context, get_node_instances_method, get_node_instance_method, get_node_method)
return context.evaluate(payload) | 2cac04f35ac6032ec0d06ff5c25da9b64c700f7c | 22,702 |
import torch
def rollout(render=False):
""" Execute a rollout and returns minus cumulative reward.
Load :params: into the controller and execute a single rollout. This
is the main API of this class.
:args params: parameters as a single 1D np array
:returns: minus cumulative reward
# Why is this the minus cumulative reward?!?!!?
"""
print('a rollout dims', len(a_rollout))
#env.seed(int(rand_env_seed)) # ensuring that each rollout has a differnet random seed.
obs = env.reset()
# This first render is required !
env.render()
next_hidden = [
torch.zeros(1, LATENT_RECURRENT_SIZE).to(device)
for _ in range(2)]
cumulative = 0
i = 0
rollout_dict = {k:[] for k in ['obs', 'rew', 'act', 'term']}
obs = transform(obs).unsqueeze(0).to(device)
mu, logsigma = vae.encoder(obs)
next_z = mu + logsigma.exp() * torch.randn_like(mu)
while True:
#print(i)
action = torch.Tensor(a_rollout[i]).to(device).unsqueeze(0)
#print('into mdrnn',action.shape, next_z.shape, next_hidden[0].shape)
# commented out reward and done.
mus, sigmas, logpi, _, _, next_hidden = mdrnn(action, next_z, next_hidden)
# decode current z to see what it looks like.
recon_obs = vae.decoder(next_z)
if i>dream_point:
if type(obs) != torch.Tensor:
obs = transform(obs).unsqueeze(0)
to_save = torch.cat([obs, recon_obs.cpu()], dim=0)
#print(to_save.shape)
# .view(args.batch_size*2, 3, IMAGE_RESIZE_DIM, IMAGE_RESIZE_DIM)
save_image(to_save,
join(mdir, 'dream/sample_' + str(i) + '.png'))
obs, reward, done, _ = env.step(a_rollout[i])
if i < dream_point or np.random.random()>0.95:
print('using real obs at point:', i)
obs = transform(obs).unsqueeze(0).to(device)
mu, logsigma = vae.encoder(obs)
next_z = mu + logsigma.exp() * torch.randn_like(mu)
else:
# sample the next z.
g_probs = Categorical(probs=torch.exp(logpi).permute(0,2,1))
which_g = g_probs.sample()
#print(logpi.shape, mus.permute(0,2,1)[:,which_g].shape ,mus[:,:,which_g].shape, which_g, mus.shape )
#print(mus.squeeze().permute(1,0).shape, which_g.permute(1,0))
mus_g, sigs_g = torch.gather(mus.squeeze(), 0, which_g), torch.gather(sigmas.squeeze(), 0, which_g)
#print(mus_g.shape)
next_z = mus_g + sigs_g * torch.randn_like(mus_g)
#print(next_z.shape)
#for key, var in zip(['obs', 'rew', 'act', 'term'], [obs,reward, action, done]):
# rollout_dict[key].append(var)
if render:
env.render()
cumulative += reward
if done or i >= time_limit:
return - cumulative
i += 1 | 9165642f37ef1ea6882c6be0a6fe493d2aff342c | 22,703 |
import requests
import json
def get_request(url, access_token, origin_address: str = None):
"""
Create a HTTP get request.
"""
api_headers = {
'Authorization': 'Bearer {0}'.format(access_token),
'X-Forwarded-For': origin_address
}
response = requests.get(
url,
headers=api_headers
)
if response.status_code == 200:
return json.loads(response.text)
else:
raise Exception(response.text) | 0c7f577132b1fb92a8ea9073cb68e9b7bf3cd2a5 | 22,704 |
def join_metadata(df: pd.DataFrame) -> pd.DataFrame:
"""Joins data including 'agent_id' to work out agent settings."""
assert 'agent_id' in df.columns
sweep = make_agent_sweep()
data = []
for agent_id, agent_ctor_config in enumerate(sweep):
agent_params = {'agent_id': agent_id}
agent_params.update(agent_ctor_config.settings)
data.append(agent_params)
agent_df = pd.DataFrame(data)
# Suffixes should not be needed... but added to be safe in case of clash.
return pd.merge(df, agent_df, on='agent_id', suffixes=('', '_agent')) | c09a524484424fb0fdf329fd73e3ea489b8ae523 | 22,705 |
def mocked_get_release_by_id(id_, includes=[], release_status=[],
release_type=[]):
"""Mimic musicbrainzngs.get_release_by_id, accepting only a restricted list
of MB ids (ID_RELEASE_0, ID_RELEASE_1). The returned dict differs only in
the release title and artist name, so that ID_RELEASE_0 is a closer match
to the items created by ImportHelper._create_import_dir()."""
# Map IDs to (release title, artist), so the distances are different.
releases = {ImportMusicBrainzIdTest.ID_RELEASE_0: ('VALID_RELEASE_0',
'TAG ARTIST'),
ImportMusicBrainzIdTest.ID_RELEASE_1: ('VALID_RELEASE_1',
'DISTANT_MATCH')}
return {
'release': {
'title': releases[id_][0],
'id': id_,
'medium-list': [{
'track-list': [{
'id': 'baz',
'recording': {
'title': 'foo',
'id': 'bar',
'length': 59,
},
'position': 9,
'number': 'A2'
}],
'position': 5,
}],
'artist-credit': [{
'artist': {
'name': releases[id_][1],
'id': 'some-id',
},
}],
'release-group': {
'id': 'another-id',
}
}
} | 647b9bf54f27353834a30ec907ecc5114a782b93 | 22,706 |
def get_name_with_template_specialization(node):
"""
node is a class
returns the name, possibly added with the <..> of the specialisation
"""
if not node.kind in (
CursorKind.CLASS_DECL, CursorKind.STRUCT_DECL, CursorKind.CLASS_TEMPLATE_PARTIAL_SPECIALIZATION): return None
tokens = get_tokens(node)
name = node.spelling
if tokens and tokens[0] == 'template':
t = tokens[len(extract_bracketed(tokens[1:])) + 3:]
if t and t[0] == '<': name = name + ''.join(extract_bracketed(t))
return name | 4cf19f9f383174789c7ff3003bc1144167d3e84d | 22,710 |
from typing import Optional
from typing import Union
def linear_timeseries(
start_value: float = 0,
end_value: float = 1,
start: Optional[Union[pd.Timestamp, int]] = pd.Timestamp("2000-01-01"),
end: Optional[Union[pd.Timestamp, int]] = None,
length: Optional[int] = None,
freq: str = "D",
column_name: Optional[str] = "linear",
dtype: np.dtype = np.float64,
) -> TimeSeries:
"""
Creates a univariate TimeSeries with a starting value of `start_value` that increases linearly such that
it takes on the value `end_value` at the last entry of the TimeSeries. This means that
the difference between two adjacent entries will be equal to
(`end_value` - `start_value`) / (`length` - 1).
Parameters
----------
start_value
The value of the first entry in the TimeSeries.
end_value
The value of the last entry in the TimeSeries.
start
The start of the returned TimeSeries' index. If a pandas Timestamp is passed, the TimeSeries will have a pandas
DatetimeIndex. If an integer is passed, the TimeSeries will have a pandas Int64Index index. Works only with
either `length` or `end`.
end
Optionally, the end of the returned index. Works only with either `start` or `length`. If `start` is
set, `end` must be of same type as `start`. Else, it can be either a pandas Timestamp or an integer.
length
Optionally, the length of the returned index. Works only with either `start` or `end`.
freq
The time difference between two adjacent entries in the returned TimeSeries. Only effective if `start` is a
pandas Timestamp. A DateOffset alias is expected; see
`docs <https://pandas.pydata.org/pandas-docs/stable/user_guide/TimeSeries.html#dateoffset-objects>`_.
column_name
Optionally, the name of the value column for the returned TimeSeries
dtype
The desired NumPy dtype (np.float32 or np.float64) for the resulting series
Returns
-------
TimeSeries
A linear TimeSeries created as indicated above.
"""
index = _generate_index(start=start, end=end, freq=freq, length=length)
values = np.linspace(start_value, end_value, len(index), dtype=dtype)
return TimeSeries.from_times_and_values(
index, values, freq=freq, columns=pd.Index([column_name])
) | ae8ef8252beee1e799182d0aaa499167c1abb78d | 22,711 |
from typing import Dict
def outlierBySd(X: Matrix,
max_iterations: int,
**kwargs: Dict[str, VALID_INPUT_TYPES]):
"""
Builtin function for detecting and repairing outliers using standard deviation
:param X: Matrix X
:param k: threshold values 1, 2, 3 for 68%, 95%, 99.7% respectively (3-sigma rule)
:param repairMethod: values: 0 = delete rows having outliers, 1 = replace outliers as zeros
2 = replace outliers as missing values
:param max_iterations: values: 0 = arbitrary number of iteration until all outliers are removed,
n = any constant defined by user
:return: Matrix X with no outliers
"""
params_dict = {'X': X, 'max_iterations': max_iterations}
params_dict.update(kwargs)
vX_0 = Matrix(X.sds_context, '')
vX_1 = Matrix(X.sds_context, '')
vX_2 = Matrix(X.sds_context, '')
vX_3 = Scalar(X.sds_context, '')
vX_4 = Scalar(X.sds_context, '')
output_nodes = [vX_0, vX_1, vX_2, vX_3, vX_4, ]
op = MultiReturn(X.sds_context, 'outlierBySd', output_nodes, named_input_nodes=params_dict)
vX_0._unnamed_input_nodes = [op]
vX_1._unnamed_input_nodes = [op]
vX_2._unnamed_input_nodes = [op]
vX_3._unnamed_input_nodes = [op]
vX_4._unnamed_input_nodes = [op]
return op | eda872a6dd6f8de22620ecf599381d186641a772 | 22,712 |
from typing import Dict
def encode_address(address: Dict) -> bytes:
"""
Creates bytes representation of address data.
args:
address: Dictionary containing the address data.
returns:
Bytes to be saved as address value in DB.
"""
address_str = ''
address_str += address['balance'] + '\0'
address_str += address['code'] + '\0'
address_str += str(address['inputTxIndex']) + '\0'
address_str += str(address['outputTxIndex']) + '\0'
address_str += str(address['minedIndex']) + '\0'
address_str += address['tokenContract'] + '\0'
address_str += str(address['inputTokenTxIndex']) + '\0'
address_str += str(address['outputTokenTxIndex']) + '\0'
address_str += str(address['inputIntTxIndex']) + '\0'
address_str += str(address['outputIntTxIndex']) + '\0'
return address_str.encode() | fcf05da104551561e44b7ab9c2bf54a9bfcf801e | 22,713 |
def analyze_image(image_url, tag_limit=10):
"""
Given an image_url and a tag_limit, make requests to both the Clarifai API
and the Microsoft Congnitive Services API to return two things:
(1) A list of tags, limited by tag_limit,
(2) A description of the image
"""
clarifai_tags = clarifai_analysis(image_url)
ms_tags, ms_caption = oxford_project_analysis(image_url)
clarifai_tags = map(lambda s: s.lower(), clarifai_tags)
ms_tags = map(lambda s: s.lower(), ms_tags)
# Get tags that occur in both
merged_tags = []
set(ms_tags)
for tag in clarifai_tags:
if tag in ms_tags:
merged_tags.append(tag)
merged_tags_set = set(merged_tags)
merged_tags += [tag for tag in clarifai_tags if tag not in merged_tags]
merged_tags += [tag for tag in ms_tags if tag not in merged_tags]
# Limit the tags
merged_tags = merged_tags[:tag_limit]
return merged_tags, ms_caption | 8d3337c34369d69c9ae48f43100ecb2b930f8a15 | 22,714 |
def _decision_function(scope, operator, container, model, proto_type):
"""Predict for linear model.
score = X * coefficient + intercept
"""
coef_name = scope.get_unique_variable_name('coef')
intercept_name = scope.get_unique_variable_name('intercept')
matmul_result_name = scope.get_unique_variable_name(
'matmul_result')
score_name = scope.get_unique_variable_name('score')
coef = model.coef_.T
container.add_initializer(coef_name, proto_type,
coef.shape, coef.ravel())
container.add_initializer(intercept_name, proto_type,
model.intercept_.shape, model.intercept_)
input_name = operator.inputs[0].full_name
if type(operator.inputs[0].type) in (BooleanTensorType, Int64TensorType):
cast_input_name = scope.get_unique_variable_name('cast_input')
apply_cast(scope, operator.input_full_names, cast_input_name,
container, to=proto_type)
input_name = cast_input_name
container.add_node(
'MatMul', [input_name, coef_name],
matmul_result_name,
name=scope.get_unique_operator_name('MatMul'))
apply_add(scope, [matmul_result_name, intercept_name],
score_name, container, broadcast=0)
return score_name | e5f105bfb09ac0b5aba0c7adcfd6cb6538911040 | 22,715 |
def create_mapping(dico):
"""
Create a mapping (item to ID / ID to item) from a dictionary.
Items are ordered by decreasing frequency.
"""
sorted_items = sorted(list(dico.items()), key=lambda x: (-x[1], x[0]))
id_to_item = {i: v[0] for i, v in enumerate(sorted_items)}
item_to_id = {v: k for k, v in list(id_to_item.items())}
return item_to_id, id_to_item | cdfb0bd9ffa047e0214486a1b2e63b45e437cf22 | 22,716 |
def read_library(args):
"""Read in a haplotype library. Returns a HaplotypeLibrary() and allele coding array"""
assert args.library or args.libphase
filename = args.library if args.library else args.libphase
print(f'Reading haplotype library from: {filename}')
library = Pedigree.Pedigree()
if args.library:
library.readInPed(args.library, args.startsnp, args.stopsnp, haps=True, update_coding=True)
elif args.libphase:
library.readInPhase(args.libphase, args.startsnp, args.stopsnp)
else:
# This shouldn't happen
raise ValueError('No library specified')
print(f'Haplotype library contains {len(library)} individuals with {library.nLoci} markers')
haplotype_library = HaplotypeLibrary.HaplotypeLibrary(library.nLoci)
for individual in library:
for haplotype in individual.haplotypes:
haplotype_library.append(haplotype, individual.idx)
haplotype_library.freeze()
return haplotype_library, library.allele_coding | e96b156db9cdcf0b70dfcdb2ba155f26a59f8d44 | 22,718 |
import sympy
def get_symbolic_quaternion_from_axis_angle(axis, angle, convention='xyzw'):
"""Get the symbolic quaternion associated from the axis/angle representation.
Args:
axis (np.array[float[3]], np.array[sympy.Symbol[3]]): 3d axis vector.
angle (float, sympy.Symbol): angle.
convention (str): convention to be adopted when representing the quaternion. You can choose between 'xyzw' or
'wxyz'.
Returns:
np.array[float[4]]: symbolic quaternion.
"""
w = sympy.cos(angle / 2.)
x, y, z = sympy.sin(angle / 2.) * axis
if convention == 'xyzw':
return np.array([x, y, z, w])
elif convention == 'wxyz':
return np.array([w, x, y, z])
else:
raise NotImplementedError("Asking for a convention that has not been implemented") | 8d753c72fc775de38b349e2bf77e3a61a84b07e9 | 22,720 |
def get_session(role_arn, session_name, duration_seconds=900):
"""
Returns a boto3 session for the specified role.
"""
response = sts_client.assume_role(
RoleArn=role_arn,
RoleSessionName=session_name,
DurationSeconds=duration_seconds,
)
creds = response["Credentials"]
return boto3.Session(
aws_access_key_id=creds["AccessKeyId"],
aws_secret_access_key=creds["SecretAccessKey"],
aws_session_token=creds["SessionToken"],
) | d60b0b1c6288a8a594e0a1fe4175c69da80ffe29 | 22,721 |
import traceback
def base_kinesis_role(construct, resource_name: str, principal_resource: str, **kwargs):
"""
Function that generates an IAM Role with a Policy for SQS Send Message.
:param construct: Custom construct that will use this function. From the external construct is usually 'self'.
:param resource_name: Name of the resource. Used for naming purposes.
:param principal_resource: Resource used to define a Service Principal. Has to match an AWS Resource. For example, 'iot' -> 'iot.amazonaws.com'.
:param kwargs: Other parameters that could be used by the construct.
:return: IAM Role with an IAM Policy attached.
"""
try:
actions = ["kinesis:PutRecord"]
resources = [construct._kinesis_stream.stream_arn]
role = base_service_role(construct, resource_name, principal_resource, actions=actions, resources=resources)
except Exception:
print(traceback.format_exc())
else:
return role | 82c2d7b7b32857baa619ed7891956930e206bf78 | 22,722 |
def set_age_distribution_default(dic, value=None, drop=False):
"""
Set the ages_distribution key of dictionary to the given value or to the
World's age distribution.
"""
ages = dic.pop("age_distribution", None)
if ages is None:
ages = world_age_distribution() if value is None else value
if isinstance(ages, str):
ages = mdm.age_distribution(value)
elif not isinstance(ages, (pd.Series, pd.DataFrame)):
ages = get_param("age_distribution", value)
if not drop:
dic["age_distribution"] = ages
return ages | 98ca6e784b240ee76ddb4a9d77b691ef08fa7057 | 22,723 |
def home():
"""Home page"""
return render_template('home.html') | dc63ced89e5176de1f77ea995678c2f5c37c2593 | 22,724 |
import copy
def csv2dict(file_csv, delimiter=','):
"""
This function is used to load the csv file and return a dict which contains
the information of the csv file. The first row of the csv file contains the
column names.
Parameters
----------
file_csv : str
The input filename including path of the csv file.
Returns
-------
outdic : dict
The return dict which contains all information in the csv file.
"""
# load station infomation: SED COSEISMIQ CSV format, temporary format
df = pd.read_csv(file_csv, delimiter=delimiter, header="infer", skipinitialspace=True, encoding='utf-8')
outdic = {}
for column in df:
outdic[column] = copy.deepcopy(df[column].values)
return outdic | d971941b7c5f0bbf021c64cf7c30d1dcee710b9d | 22,725 |
def make_bb_coord_l(contour_l, img, IMG_HEIGHT):
"""
Take in a list of contour arrays and return a list of four coordinates
of a bounding box for each contour array.
"""
assert isinstance(contour_l, list)
coord_l = []
for i in range(len(contour_l)):
c = contour_l[i]
bb = get_bb_coord(contour=c, img=img, IMG_HEIGHT=IMG_HEIGHT)
# extend if bb is a list (i.e. a split bounding box)
if isinstance(bb, list):
coord_l.extend(bb)
else:
coord_l.append(bb)
return coord_l | 4f30c95db8a7d2ef81376aa0fa77d7cedc0a913c | 22,726 |
def calc_pair_scale(seqs, obs1, obs2, weights1, weights2):
"""Return entropies and weights for comparable alignment.
A comparable alignment is one in which, for each paired state ij, all
alternate observable paired symbols are created. For instance, let the
symbols {A,C} be observed at position i and {A,C} at position j. If we
observe the paired types {AC, AA}. A comparable alignment would involve
replacing an AC pair with a CC pair."""
# scale is calculated as the product of mi from col1 with alternate
# characters. This means the number of states is changed by swapping
# between the original and selected alternate, calculating the new mi
pair_freqs = CategoryCounter(seqs)
weights1 = dict(weights1)
weights2 = dict(weights2)
scales = []
for a, b in list(pair_freqs.keys()):
weights = weights1[a]
pr = a + b
pair_freqs -= pr
obs1 -= a
# make comparable alignments by mods to col 1
for c, w in list(weights.items()):
new_pr = c + b
pair_freqs += new_pr
obs1 += c
entropy = mi(obs1.entropy, obs2.entropy, pair_freqs.entropy)
scales += [(pr, entropy, w)]
pair_freqs -= new_pr
obs1 -= c
obs1 += a
# make comparable alignments by mods to col 2
weights = weights2[b]
obs2 -= b
for c, w in list(weights.items()):
new_pr = a + c
pair_freqs += new_pr
obs2 += c
entropy = mi(obs1.entropy, obs2.entropy, pair_freqs.entropy)
scales += [(pr, entropy, w)]
obs2 -= c
pair_freqs -= new_pr
obs2 += b
pair_freqs += pr
return scales | 6781b86719d669970d67753eabc91c60bc258dcc | 22,727 |
def distance_to_line(pt, line_pt_pair):
"""
Returns perpendicular distance of point 'pt' to a line given by
the pair of points in second argument
"""
x = pt[0]
y = pt[1]
p, q = line_pt_pair
q0_m_p0 = q[0]-p[0]
q1_m_p1 = q[1]-p[1]
denom = sqrt(q0_m_p0*q0_m_p0 + q1_m_p1*q1_m_p1)
return (q0_m_p0*p[1]-q1_m_p1*p[0] - q0_m_p0*y + q1_m_p1*x)/denom | 93500c0f8a4d8d11435647e1868fb929128d1273 | 22,728 |
def define_wfr(ekev):
"""
defines the wavefront in the plane prior to the mirror ie., after d1
:param ekev: energy of the source
"""
spb = Instrument()
spb.build_elements(focus = 'nano')
spb.build_beamline(focus = 'nano')
spb.crop_beamline(element1 = "d1")
bl = spb.get_beamline()
wfr = construct_SA1_wavefront(512, 512, ekev, 0.25)
bl.propagate(wfr)
return wfr | 8ccf7880ff22dc13f979b45c288a58bbb4e3c5c9 | 22,729 |
def ratlab(top="K+", bottom="H+", molality=False):
"""
Python wrapper for the ratlab() function in CHNOSZ.
Produces a expression for the activity ratio between the ions in the top and
bottom arguments. The default is a ratio with H+, i.e.
(activity of the ion) / [(activity of H+) ^ (charge of the ion)]
Parameters
----------
top : str, default "K+"
The ion in the numerator of the ratio.
bottom : str, default "H+"
The ion in the denominator of the ratio.
molality : bool, default False
Use molality (m) instead of activity (a) for aqueous species?
Returns
-------
A formatted string representing the activity ratio.
"""
top_formula = chemparse.parse_formula(top)
if "+" in top_formula.keys():
top_charge = top_formula["+"]
elif "-" in top_formula.keys():
top_charge = top_formula["-"]
else:
raise Exception("Cannot create an ion ratio involving one or more neutral species.")
bottom_formula = chemparse.parse_formula(bottom)
if "+" in bottom_formula.keys():
bottom_charge = bottom_formula["+"]
elif "-" in bottom_formula.keys():
top_charge = bottom_formula["-"]
else:
raise Exception("Cannot create an ion ratio involving one or more neutral species.")
if top_charge.is_integer():
top_charge = int(top_charge)
if bottom_charge.is_integer():
bottom_charge = int(bottom_charge)
if top_charge != 1:
top_charge = "<sup>"+str(top_charge)+"</sup>"
else:
top_charge = ""
if bottom_charge != 1:
bottom_charge = "<sup>"+str(bottom_charge)+"</sup>"
else:
bottom_charge = ""
if molality:
sym = "m"
else:
sym = "a"
return "log("+sym+bottom_charge+"<sub>"+html_chemname_format(top)+"</sub>/"+sym+top_charge+"<sub>"+html_chemname_format(bottom)+"</sub>)" | 69c6a5fbbb344e5b0e063ea438994c3ce7e6cafb | 22,730 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.