content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def _act_drop(grid_world, agent, env_obj, drop_loc):
""" Private MATRX method.
Drops the carried object.
Parameters
----------
grid_world : GridWorld
The :class:`matrx.grid_world.GridWorld` instance in which the
object is dropped.
agent : AgentBody
The :class:`matrx.objects.agent_body.AgentBody` of the agent who
drops the object.
env_obj : EnvObject
The :class:`matrx.objects.env_object.EnvObject` to be dropped.
drop_loc : [x, y]
The drop location.
Returns
-------
DropObjectResult
The :class:`matrx.actions.action.ActionResult` depicting the
action's expected success or failure and reason for that result.
Returns the following results:
* RESULT_SUCCESS: When the object is successfully dropped.
"""
# Updating properties
agent.is_carrying.remove(env_obj)
env_obj.carried_by.remove(agent.obj_id)
# We return the object to the grid location we are standing at without registering a new ID
env_obj.location = drop_loc
grid_world._register_env_object(env_obj, ensure_unique_id=False)
return DropObjectResult(DropObjectResult.RESULT_SUCCESS, True)
|
93511395fda0060d479284a4b97ccd181346292f
| 28,321 |
def get_emoticon_radar_chart(scores_list, colors, names):
""" AAA
"""
data_radars = []
emotions = ['anger', 'anticipation', 'disgust', 'fear', 'joy', 'sadness', 'surprise', 'trust']
for score, color, name in zip(scores_list, colors, names):
data = go.Scatterpolar(r=score, theta=emotions, fill='toself', line=dict(color=color), name=name)
data_radars.append(data)
layout = go.Layout(polar=dict(radialaxis=dict(visible=True)), showlegend=True, margin=dict(t=30),
paper_bgcolor='rgba(0,0,0,0)')
fig = go.Figure(data=data_radars, layout=layout)
return fig
|
9f147a9bdd5713a915b96a309bcd1086c9e17ba6
| 28,322 |
def get_polling_method(meth_name=None):
""" Grab a polling-method by string-key
Eventually these could be auto-registered somehow;
for now we just keep a look-up dict of them. """
methods = dict(
poll_game_unknowns=poll_game_unknowns,
poll_dan=poll_dan,
)
default_method = poll_game_unknowns
if meth_name is None:
return default_method
# Note missing-entries, other than `None`, will generate KeyErrors
# This is on the caller to handle.
return methods[meth_name]
|
2faf19b3b6cf6decd230c5678591478eaf7839d6
| 28,323 |
import pkg_resources
import scipy
def generate_wav(pattern, tempo=120, loops=1, saveName='audiofile.wav', fs=44100,
dynamics=False, customSound=None):
"""
Generate a .wav file from a pattern.
Specify a tempo (in BPM), loops, name of the file, sampling rate,
and decide if you want "dynamics". Dynamics adds offsets to the amplitude
of the onsets, thus generating more naturally sounding rhythm pattern.
Parameters
----------
pattern : A rhythm pattern.
tempo : Tempo in BPM, default is 120.
loops : Number of times to repeat the pattern.
saveName : Name of the output file.
fs : Integer, optional
Samplerate. The default is 44100.
dynamics : Boolean, optional
Setting this to true adds dynamics to the audio file. The default is False.
Returns
-------
saveName : Path and name of saved audio file.
"""
# input check
assert pattern.shape[0] == 3, 'Wrong shape of pattern, should be 3xn!'
#this experimentally just adds some dynamics
if dynamics:
dynamicsHihat = np.tile([0.7, 0.5, 1, 0.5], 8)
dynamicsSnare = np.tile([0.8, 0.7, 0.8, 0.5, 1, 0.5, 0.8, 0.5], 4)
dynamicsKick = np.tile([1, 0.5, 0.7, 0.5, 0.8, 0.5, 0.7, 0.5], 4)
else:
dynamicsHihat = np.ones(32)
dynamicsSnare = np.ones(32)
dynamicsKick = np.ones(32)
if saveName[-4:] != '.wav':
saveName = saveName + '.wav'
# read samples
if customSound:
if customSound == 'amen':
hihatLoc = pkg_resources.resource_stream(__name__, 'samples/amenRideLong.wav')
kickLoc = pkg_resources.resource_stream(__name__, 'samples/amenKickLong.wav')
snareLoc = pkg_resources.resource_stream(__name__, 'samples/amenSnareLong.wav')
elif customSound == '909':
hihatLoc = pkg_resources.resource_stream(__name__, 'samples/909hihatStereo.wav')
kickLoc = pkg_resources.resource_stream(__name__, 'samples/909kickStereo.wav')
snareLoc = pkg_resources.resource_stream(__name__, 'samples/909snareStereo.wav')
else:
hihatLoc = pkg_resources.resource_stream(__name__, 'samples/hihat.wav')
kickLoc = pkg_resources.resource_stream(__name__, 'samples/kick.wav')
snareLoc = pkg_resources.resource_stream(__name__, 'samples/snare.wav')
rate, hihatSample = scipy.io.wavfile.read(hihatLoc)
rate, kickSample = scipy.io.wavfile.read(kickLoc)
rate, snareSample = scipy.io.wavfile.read(snareLoc)
# just pushing down the amplitude a bit
if not customSound:
hihatSample = hihatSample * 0.25
kickSample = kickSample * 0.25
snareSample = snareSample * 0.25
maxLengthSample = max([len(hihatSample), len(snareSample), len(kickSample)])
if rate != fs:
print('Error: Sample rate mismatch between samples and specified sample rate')
return
# create three np arrays for each instrument, fill them, then merge them
quarter = 60/tempo
bar = 4 * quarter
length = 2 * bar * fs
# figure out a way to set dtype as same as the wav-files
hihats = np.zeros((int(length + maxLengthSample),2), dtype='int16')
snare = np.zeros((int(length + maxLengthSample),2), dtype='int16')
kick = np.zeros((int(length + maxLengthSample),2), dtype='int16')
# three separate loops
hihatEvents = pattern[0]
snareEvents = pattern[1]
kickEvents = pattern[2]
# for fast tempi, need to consider that the length won't be enough.
#hihats
for n in range(0, len(hihatEvents)):
if hihatEvents[n] == 1:
thisPosition = int(round((length/32) * n))
hihats[thisPosition:thisPosition+len(hihatSample),] = hihats[thisPosition:thisPosition+len(hihatSample),] + (hihatSample * dynamicsHihat[n])
#snare
for n in range(0, len(snareEvents)):
if snareEvents[n] == 1:
thisPosition = int(round((length/32) * n))
snare[thisPosition:thisPosition+len(snareSample),] = snare[thisPosition:thisPosition+len(snareSample),] + (snareSample * dynamicsSnare[n])
#kick
for n in range(0, len(kickEvents)):
if kickEvents[n] == 1:
thisPosition = int(round((length/32) * n))
kick[thisPosition:thisPosition+len(kickSample),] = kick[thisPosition:thisPosition+len(kickSample),] + (kickSample * dynamicsKick[n])
# mix together
jointSample = (hihats * 0.1) + (snare * 0.3) + (kick * 0.3)
# ensure length
#jointSample = jointSample[0:int(round(length)),].astype('int16')
# add loops
looped = np.zeros((int(((length) * loops + (2 * length))), 2), dtype='int16')
#if loops > 1:
for n in range(0, loops):
thisPosition = n*int(length)
looped[thisPosition:thisPosition+len(jointSample)] = looped[thisPosition:thisPosition+len(jointSample)] + jointSample
# now trim it
looped = looped[0:(int(round(length*loops))+maxLengthSample)]
#else:
# looped = jointSample[0:(int(round(length*loops))+maxLengthSample)]
normalized = np.array((looped / np.max(np.abs(looped.flatten()))) * 32767, dtype='int16')
# write wav
scipy.io.wavfile.write(saveName, fs, normalized)
return saveName
|
aa11722a40aca967d168f38ea1ae239eccfa3361
| 28,326 |
def svn_fs_upgrade(*args):
"""svn_fs_upgrade(char path, apr_pool_t pool) -> svn_error_t"""
return _fs.svn_fs_upgrade(*args)
|
4f466df2d6f41cbe277370e3ec158e7737d271f0
| 28,327 |
def api_url(service: str = "IPublishedFileService",
function: str = "QueryFiles",
version: str = "v1") -> str:
"""
Builds a steam web API url.
:param service: The steam service to attach to.
:param function: The function to call.
:param version: The API version.
:return: The built URL.
"""
return "https://api.steampowered.com/%s/%s/%s/" % (
service, function, version
)
|
2538ab8c8035c491611585089ddd3a1625e423cc
| 28,328 |
import re
def reg_all_keywords(data):
"""
从meta file中提取所有关键词,格式为:
***[:###]***
提取出###
:param data:
:return:
"""
patt = re.compile(r"\[:([^\[\]]+)\]")
ret = patt.findall(data)
return ret if ret else None
|
d81f8dd5f04d9e65f61247a8c9857969cf7e514d
| 28,329 |
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
_focus = windowing.FocusManager()
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
window = Tk.Tk()
canvas = FigureCanvasTkAgg(figure, master=window)
figManager = FigureManagerTkAgg(canvas, num, window)
if matplotlib.is_interactive():
figManager.show()
return figManager
|
c5c589c214a70f07ace913b5b8fb13cd52c30240
| 28,330 |
def get_platform():
"""Gets the platform (example: azure)."""
return get_config_value("platform")
|
693540442f23b21b9d983c9e7728d5397415544b
| 28,331 |
def ascii_from_object(space, w_obj):
"""Implements builtins.ascii()"""
# repr is guaranteed to be unicode
w_repr = space.repr(w_obj)
w_encoded = encode_object(space, w_repr, 'ascii', 'backslashreplace')
return decode_object(space, w_encoded, 'ascii', 'strict')
|
14ff3217b42743c5e202db107914e5ee0df4a10d
| 28,333 |
def capture(p):
"""Return a peg that acts like p, except it adds to the values
tuple the text that p matched."""
return _Peg(('capture(%r)', p),
lambda s, far, (i, vals):
[(i2, vals2 + (s[i:i2],))
for i2, vals2 in p.run(s, far, (i, vals))])
|
710e1cf4015b057e6898affa70ef380be0648ea3
| 28,335 |
def html_chart(df, height=1200):
"""
make interactive chart.
param df: inpute dataframe
param height: optional plot height
returns: plotly chart
"""
fig = make_subplots(rows=(len(df.columns)),
cols=1,
subplot_titles=df.columns,
shared_xaxes=True,
vertical_spacing=0.007
)
j = 1
for i in df.columns:
fig.add_trace(
go.Scatter(
{'x': df.index,
'y': df[i]}),
row=j, col=1)
j += 1
fig.update_layout(height=height, font_size=9)
return fig
|
821f6ae8c10a80c32a932cd77beb2b0a3969d0af
| 28,336 |
def build_0565_color_lookup():
"""Build the lookup table for the ARGB_0565 color format"""
bdG = 6
bdB = 5
redColorOffset = bdG + bdB
greenColorOffset = bdB
val_lookup_5 = BITDEPTH_VALUE_LOOKUPS[5]
val_lookup_6 = BITDEPTH_VALUE_LOOKUPS[6]
conversion_table = [None] * 65536
for r_short, r_value in enumerate(val_lookup_5):
r_offset = r_short << redColorOffset
for g_short, g_value in enumerate(val_lookup_6):
g_offset = g_short << greenColorOffset
rg_offset = r_offset | g_offset
for b_short, b_value in enumerate(val_lookup_5):
final_color_code = rg_offset | b_short
final_color_tuple = (r_value, g_value, b_value, 255)
conversion_table[final_color_code] = final_color_tuple
return conversion_table
|
c3a33e0355fb795e93ee722012faab6b83195bb4
| 28,337 |
def build_que_input_from_segments(context, answer, question, tokenizer,
max_input_length=1000, with_eos=True,
with_labels=True):
""" Build a sequence of input from 3 segments:
context, answer, question """
bos, eos, ctx, ans, que, pad, gen = \
tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
padded = []
context = [bos, ctx] + context
answer = [ans] + answer
question = [que] + question + ([eos] if with_eos else [])
combined = list(chain(context, answer, question))
len_combined = len(combined)
if len_combined > max_input_length:
len_context = max_input_length - len(answer) - len(question)
context = context[:len_context]
elif len_combined < max_input_length:
len_reamining = max_input_length - len_combined
padded = [pad] * len_reamining
instance = {}
instance["input_ids"] = list(chain(padded, context, answer, question))
instance["token_type_ids"] = [pad] * len(padded) + [ctx] * len(context)\
+ [ans] * len(answer) + [que] * len(question)
if with_labels:
instance["labels"] = [-1] * (len(padded) + len(context) + len(answer)
+ 1) + question[1:]
return instance
|
400abaac1744bab2f665c8ffad50ba2b7030569b
| 28,338 |
def transition(field, source='*', target=None, conditions=[], custom={}):
"""
Method decorator for mark allowed transitions
Set target to None if current state needs to be validated and
has not changed after the function call
"""
def inner_transition(func):
fsm_meta = getattr(func, '_django_fsm', None)
if not fsm_meta:
fsm_meta = FSMMeta(field=field, method=func)
setattr(func, '_django_fsm', fsm_meta)
@wraps(func)
def _change_state(instance, *args, **kwargs):
return fsm_meta.field.change_state(instance, func, *args, **kwargs)
if isinstance(source, (list, tuple)):
for state in source:
func._django_fsm.add_transition(state, target, conditions, custom)
else:
func._django_fsm.add_transition(source, target, conditions, custom)
return _change_state
return inner_transition
|
cf4066c8a21c89a793e526cf4a4171ac17cf7c42
| 28,341 |
def get_inspexp_frames(slice, inspexp_data, images_path):
"""
Loads inspiration and expiration frames for the specified cine-MRI slice
Parameters
----------
slice: CineMRISlice
A cine-MRI slice for which to extract inspiration and expiration frames
inspexp_data : dict
A dictionary with inspiration / expiration frames data
images_path : Path
A path to the image folder in cine-MRI archive
Returns
-------
insp_frame, exp_frame : ndarray
The inspiration and expiration frames
"""
insp_ind, exp_ind = get_insp_exp_indices(slice, inspexp_data)
# Load the expiration frame (visceral slide is computed for the expiration frame)
slice_path = slice.build_path(images_path)
slice_array = sitk.GetArrayFromImage(sitk.ReadImage(str(slice_path)))
insp_frame = slice_array[insp_ind]
exp_frame = slice_array[exp_ind]
return insp_frame, exp_frame
|
d0dda284af281ebca08ee494a1e5fdc8f97789e4
| 28,342 |
def cleaned_reviews_dataframe(reviews_df):
"""
Remove newline "\n" from titles and descriptions,
as well as the "Unnamed: 0" column generated when
loading DataFrame from CSV. This is the only cleaning
required prior to NLP preprocessing.
INPUT: Pandas DataFrame with 'title' and 'desc' column names
OUTPUT: Cleaned DataFrame with combined 'title_desc' column
"""
reviews_df['title'] = reviews_df['title'].str.replace('\n', '')
reviews_df['desc'] = reviews_df['desc'].str.replace('\n','')
reviews_df['title_desc'] = reviews_df['title'] + reviews_df['desc']
if 'Unnamed: 0' in set(reviews_df.columns):
reviews_df = reviews_df.drop('Unnamed: 0', axis=1)
return reviews_df
|
8f805f556667f5d734d4d272a2194784d37ce99c
| 28,343 |
def not_list(l):
"""Return the element wise negation of a list of booleans"""
assert all([isinstance(it, bool) for it in l])
return [not it for it in l]
|
6d30f5dd587cdc69dc3db94abae92a7a8a7c610d
| 28,344 |
def first_order_forward(n, zero = True):
"""
"""
m1 = -np.eye(n) + np.eye(n, k = 1)
return np.vstack([np.ones(n), m1])
|
d79149614e15c8cce9f13a402f6321b43498862b
| 28,345 |
def boil(config, recipe_config):
""" Boil wort. """
up = config['unit_parser']
if 'Hops' in recipe_config:
hops = recipe_config['Hops']
for hop in hops:
if 'addition type' in hop and hop['addition type'] == 'fwh':
if 'mass' in hop and 'name' in hop and 'type' in hop:
mass = up.convert(hop['mass'], 'ounces')
variety = hop['name']
pellets = hop['type']
print('Add {0:.2f}oz {1:s} {2:s} during lautering process (first wort hopping).'.format(mass, variety, pellets))
time_additions = []
for hop in hops:
if 'boil_time' in hop and 'mass' in hop and 'name' in hop and 'type' in hop:
boil_time = up.convert(hop['boil_time'], 'minutes')
mass = up.convert(hop['mass'], 'ounces')
variety = hop['name']
pellets = hop['type']
time_additions.append({'boil_time': boil_time, 'mass': mass, 'variety': variety, 'pellets': pellets})
time_additions = sorted(time_additions, key=lambda k: k['boil_time'], reverse=True)
for hop in time_additions:
if hop['boil_time'] == 1:
plural = ''
else:
plural = 's'
print('Add {0:.2f}oz {2:s} {3:s} at {1:.0f} minute{4:s}.'.format(hop['mass'], hop['boil_time'], hop['variety'], hop['pellets'], plural))
for hop in hops:
if 'addition type' in hop and hop['addition type'] == 'flameout':
if 'mass' in hop and 'name' in hop and 'type' in hop:
mass = up.convert(hop['mass'], 'ounces')
variety = hop['name']
pellets = hop['type']
print('Add {0:.2f}oz {1:s} {2:s} at flameout.'.format(mass, variety, pellets))
if ('Pre-Boil Volume' not in recipe_config or
'Pre-Boil Gravity' not in recipe_config):
return config, recipe_config
pre_bv = up.convert(recipe_config['Pre-Boil Volume'], 'gallons')
pre_bg = recipe_config['Pre-Boil Gravity']
if ('Brew Day' in recipe_config and
'Post-Boil Volume' in recipe_config['Brew Day'] and
'Original Gravity' in recipe_config['Brew Day']):
if 'Pre-Boil Volume' in recipe_config['Brew Day']:
actual_pre_bv = up.convert(recipe_config['Brew Day']['Pre-Boil Volume'], 'gallons')
if 'Boil Time' in recipe_config:
boil_time = up.convert(recipe_config['Boil Time'], 'hours')
elif 'Boil Time' in config:
boil_time = up.convert(config['Boil Time'], 'hours')
else:
boil_time = 1.0
post_bv = up.convert(recipe_config['Brew Day']['Post-Boil Volume'], 'gallons')
og = recipe_config['Brew Day']['Original Gravity']
post_gp = specific_gravity_to_gravity_points(og, post_bv)
pre_gp = specific_gravity_to_gravity_points(pre_bg, pre_bv)
if 'Brewhouse Efficiency' in recipe_config:
planned_efficiency = recipe_config['Brewhouse Efficiency']
elif 'Brewhouse Efficiency' in config:
planned_efficiency = config['Brewhouse Efficiency']
else:
planned_efficiency = 0.7
efficiency = planned_efficiency * post_gp / pre_gp
recipe_config['Brew Day']['Brewhouse Efficiency'] = efficiency
evaporation_rate = (actual_pre_bv - post_bv) / boil_time
recipe_config['Brew Day']['Evaporation Rate'] = '{0:.06f} gallons_per_hour'.format(evaporation_rate)
print('Actual post-boil volume: {0:.02f} gallons'.format(post_bv))
print('Evaporation rate: {0:.02f} gallons per hour'.format(evaporation_rate))
print('Original gravity: {0:.03f}'.format(og))
print('Efficiency: {0:.02f}'.format(efficiency))
elif ('Brew Day' in recipe_config
and 'Pre-Boil Volume' in recipe_config['Brew Day']
and 'Pre-Boil Gravity' in recipe_config['Brew Day']):
pre_boil_volume = up.convert(recipe_config['Brew Day']['Pre-Boil Volume'], 'gallons')
pre_boil_gravity = recipe_config['Brew Day']['Pre-Boil Gravity']
pre_gp = pre_boil_gravity - 1
if 'Boil Time' in recipe_config:
boil_time = up.convert(recipe_config['Boil Time'], 'hours')
elif 'Boil Time' in config:
boil_time = up.convert(config['Boil Time'], 'hours')
else:
boil_time = 1.0
if 'Evaporation Rate' in recipe_config:
evaporation_rate = up.convert(recipe_config['Evaporation Rate'], 'gallons_per_hour')
elif 'Evaporation Rate' in config:
evaporation_rate = up.convert(config['Evaporation Rate'], 'gallons_per_hour')
else:
evaporation_rate = 1.75
post_boil_volume = pre_boil_volume - evaporation_rate * boil_time
og = 1 + pre_gp * pre_boil_volume / post_boil_volume
print('Predicted original gravity: {0:.03f}'.format(og))
recipe_config['Brew Day']['Original Gravity'] = og
else:
if 'Original Gravity' in recipe_config:
print('Predicted original gravity: {0:.03f}'.format(recipe_config['Original Gravity']))
return config, recipe_config
|
035de7c388e2c82962987c63c13679e6bd16222f
| 28,346 |
def _ecdf(
data=None,
p=None,
x_axis_label=None,
y_axis_label="ECDF",
title=None,
plot_height=300,
plot_width=450,
staircase=False,
complementary=False,
x_axis_type="linear",
y_axis_type="linear",
**kwargs,
):
"""
Create a plot of an ECDF.
Parameters
----------
data : array_like
One-dimensional array of data. Nan's are ignored.
conf_int : bool, default False
If True, display a confidence interval on the ECDF.
ptiles : list, default [2.5, 97.5]
The percentiles to use for the confidence interval. Ignored it
`conf_int` is False.
n_bs_reps : int, default 1000
Number of bootstrap replicates to do to compute confidence
interval. Ignored if `conf_int` is False.
fill_color : str, default 'lightgray'
Color of the confidence interbal. Ignored if `conf_int` is
False.
fill_alpha : float, default 1
Opacity of confidence interval. Ignored if `conf_int` is False.
p : bokeh.plotting.Figure instance, or None (default)
If None, create a new figure. Otherwise, populate the existing
figure `p`.
x_axis_label : str, default None
Label for the x-axis. Ignored if `p` is not None.
y_axis_label : str, default 'ECDF' or 'ECCDF'
Label for the y-axis. Ignored if `p` is not None.
title : str, default None
Title of the plot. Ignored if `p` is not None.
plot_height : int, default 300
Height of plot, in pixels. Ignored if `p` is not None.
plot_width : int, default 450
Width of plot, in pixels. Ignored if `p` is not None.
staircase : bool, default False
If True, make a plot of a staircase ECDF (staircase). If False,
plot the ECDF as dots.
complementary : bool, default False
If True, plot the empirical complementary cumulative
distribution functon.
x_axis_type : str, default 'linear'
Either 'linear' or 'log'.
y_axis_type : str, default 'linear'
Either 'linear' or 'log'.
kwargs
Any kwargs to be passed to either p.circle or p.line, for
`staircase` being False or True, respectively.
Returns
-------
output : bokeh.plotting.Figure instance
Plot populated with ECDF.
"""
# Check data to make sure legit
data = utils._convert_data(data)
# Data points on ECDF
x, y = _ecdf_vals(data, staircase, complementary)
# Instantiate Bokeh plot if not already passed in
if p is None:
y_axis_label = kwargs.pop("y_axis_label", "ECCDF" if complementary else "ECDF")
p = bokeh.plotting.figure(
plot_height=plot_height,
plot_width=plot_width,
x_axis_label=x_axis_label,
y_axis_label=y_axis_label,
x_axis_type=x_axis_type,
y_axis_type=y_axis_type,
title=title,
)
if staircase:
# Line of steps
p.line(x, y, **kwargs)
# Rays for ends
if complementary:
p.ray(x=x[0], y=1, length=0, angle=np.pi, **kwargs)
p.ray(x=x[-1], y=0, length=0, angle=0, **kwargs)
else:
p.ray(x=x[0], y=0, length=0, angle=np.pi, **kwargs)
p.ray(x=x[-1], y=1, length=0, angle=0, **kwargs)
else:
p.circle(x, y, **kwargs)
return p
|
e3ae7e76eaa285506692ef48031cdb309fa732f1
| 28,347 |
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
def _get_cipher(key: bytes) -> Cipher:
"""获取 DES3 Cipher 对象"""
algorithm = algorithms.TripleDES(key)
cipher = Cipher(algorithm, modes.CBC(key[:8]), backend=default_backend())
return cipher
|
13c046884ccd51ff19ed9eb80a1747f6888863a0
| 28,348 |
def get_real_dist2rim(x_dist, radius_cut, radius_sphere):
"""
Get the real distance to rim
:param x_dist:
:param radius_cut:
:param radius_sphere:
:return:
"""
x_transf = x_dist * ((radius_sphere) - np.sqrt((radius_sphere) ** 2 - (radius_cut) ** 2)) / radius_cut
return x_transf
|
89f1a6ef3e020636537a8f229e082f7765410129
| 28,349 |
def get_workers_stats(worker_class=None):
"""Get the RQ workers stats.
Args:
worker_class (type): RQ Worker class
Returns:
list: List of worker stats as a dict {name, queues, state}
Raises:
redis.exceptions.RedisError: On Redis connection errors
"""
worker_class = worker_class if worker_class is not None else Worker
workers = worker_class.all()
return [
{"name": w.name, "queues": w.queue_names(), "state": w.get_state()}
for w in workers
]
|
33e86511051b15de07eceaa1ab0d8d609eecbafc
| 28,351 |
def _get_trafo(cmatrix: cairo.Matrix) -> qc3const.TrafoType:
"""Converts cairo matrix to trafo list
:param cmatrix: (cairo.Matrix) cairo transformation matrix
:return: (qc3const.TrafoType) transformation matrix
"""
return [i for i in cmatrix]
|
ea9c3c3b8466a7025fce5f48ceb02baa5ae9d319
| 28,352 |
import bokeh
from bokeh.plotting import output_file, ColumnDataSource, show, figure
from bokeh.models import HoverTool, CategoricalColorMapper, LinearColorMapper, Legend, LegendItem, ColorBar
from bokeh.palettes import Category20
def mousover_plot(datadict, attr_x, attr_y, attr_color=None, attr_size=None, save_file=None, plot_title="",
point_transparency = 0.5, point_size=20, default_color="#2222aa", hidden_keys = []):
""" Produces dynamic scatter plot that can be interacted with by mousing over each point to see its label
Args:
datadict (dict): keys contain attributes, values of lists of data from each attribute to plot (each list index corresponds to datapoint).
The values of all extra keys in this dict are considered (string) labels to assign to datapoints when they are moused over.
Apply _formatDict() to any entries in datadict which are themselves dicts.
attr_x (str): name of column in dataframe whose values are shown on x-axis (eg. 'latency'). Can be categorical or numeric values
attr_y (str): name of column in dataframe whose values are shown on y-axis (eg. 'validation performance'). Must be numeric values.
attr_size (str): name of column in dataframe whose values determine size of dots (eg. 'memory consumption'). Must be numeric values.
attr_color (str): name of column in dataframe whose values determine color of dots (eg. one of the hyperparameters). Can be categorical or numeric values
point_labels (list): list of strings describing the label for each dot (must be in same order as rows of dataframe)
save_file (str): where to save plot to (html) file (if None, plot is not saved)
plot_title (str): Title of plot and html file
point_transparency (float): alpha value of points, lower = more transparent
point_size (int): size of points, higher = larger
hidden keys (list[str]): which keys of datadict NOT to show labels for.
"""
try:
with warning_filter():
bokeh_imported = True
except ImportError:
bokeh_imported = False
if not bokeh_imported:
warnings.warn('AutoGluon summary plots cannot be created because bokeh is not installed. To see plots, please do: "pip install bokeh==2.0.1"')
return None
n = len(datadict[attr_x])
for key in datadict.keys(): # Check lengths are all the same
if len(datadict[key]) != n:
raise ValueError("Key %s in datadict has different length than %s" % (key, attr_x))
attr_x_is_string = any([type(val)==str for val in datadict[attr_x]])
if attr_x_is_string:
attr_x_levels = list(set(datadict[attr_x])) # use this to translate between int-indices and x-values
og_x_vals = datadict[attr_x][:]
attr_x2 = attr_x + "___" # this key must not already be in datadict.
hidden_keys.append(attr_x2)
datadict[attr_x2] = [attr_x_levels.index(category) for category in og_x_vals] # convert to ints
legend = None
if attr_color is not None:
attr_color_is_string = any([type(val)==str for val in datadict[attr_color]])
color_datavals = datadict[attr_color]
if attr_color_is_string:
attr_color_levels = list(set(color_datavals))
colorpalette = Category20[20]
color_mapper = CategoricalColorMapper(factors=attr_color_levels, palette=[colorpalette[2*i % len(colorpalette)] for i in range(len(attr_color_levels))])
legend = attr_color
else:
color_mapper = LinearColorMapper(palette='Magma256', low=min(datadict[attr_color]), high=max(datadict[attr_color])*1.25)
default_color = {'field': attr_color, 'transform': color_mapper}
if attr_size is not None: # different size for each point, ensure mean-size == point_size
attr_size2 = attr_size + "____"
hidden_keys.append(attr_size2)
og_sizevals = np.array(datadict[attr_size])
sizevals = point_size + (og_sizevals - np.mean(og_sizevals))/np.std(og_sizevals) * (point_size/2)
if np.min(sizevals) < 0:
sizevals = -np.min(sizevals) + sizevals + 1.0
datadict[attr_size2] = list(sizevals)
point_size = attr_size2
if save_file is not None:
output_file(save_file, title=plot_title)
print("Plot summary of models saved to file: %s" % save_file)
source = ColumnDataSource(datadict)
TOOLS="crosshair,pan,wheel_zoom,box_zoom,reset,hover,save"
p = figure(title=plot_title, tools=TOOLS)
if attr_x_is_string:
circ = p.circle(attr_x2, attr_y, line_color=default_color, line_alpha = point_transparency,
fill_color = default_color, fill_alpha=point_transparency, size=point_size, source=source)
else:
circ = p.circle(attr_x, attr_y, line_color=default_color, line_alpha = point_transparency,
fill_color = default_color, fill_alpha=point_transparency, size=point_size, source=source)
hover = p.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([(key,'@'+key+'{safe}') for key in datadict.keys() if key not in hidden_keys])
# Format axes:
p.xaxis.axis_label = attr_x
p.yaxis.axis_label = attr_y
if attr_x_is_string: # add x-ticks:
p.xaxis.ticker = list(range(len(attr_x_levels)))
p.xaxis.major_label_overrides = {i: attr_x_levels[i] for i in range(len(attr_x_levels))}
# Legend additions:
if attr_color is not None and attr_color_is_string:
legend_it = []
for i in range(len(attr_color_levels)):
legend_it.append(LegendItem(label=attr_color_levels[i], renderers = [circ], index=datadict[attr_color].index(attr_color_levels[i])))
legend = Legend(items=legend_it, location=(0, 0))
p.add_layout(legend, 'right')
if attr_color is not None and not attr_color_is_string:
color_bar = ColorBar(color_mapper=color_mapper, title = attr_color,
label_standoff=12, border_line_color=None, location=(0,0))
p.add_layout(color_bar, 'right')
if attr_size is not None:
p.add_layout(Legend(items=[LegendItem(label='Size of points based on "'+attr_size + '"')]), 'below')
show(p)
|
0cee54239d13e7c3ebd36972e7c0f259ff7de69a
| 28,353 |
import numpy
def globalInequalityChanges(Y, fieldNames, outFile, permutations=9999):
"""Global inequality change test
This function tests whether global inequality has significantly changed
for the Theil statistic over the period t to t+k. For more information on
this function see [Rey_Sastre2010] (this function recreates Table 2 in
that paper).
Layer.inequality('globalInequalityChanges', var, outFile, <permutations>)
:keyword var: List with variables to be analyzed; e.g: ['Y1978', 'Y1979', 'Y1980', 'Y1981']
:type var: list
:keyword outFile: Name for the output file; e.g.: "regionsDifferenceTest.csv"
:type outFile: string
:keyword permutations: Number of random spatial permutations. Default value permutations = 9999.
:type permutations: integer
**Example 1** ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
result = china.inequality('globalInequalityChanges',['Y1978', 'Y1979', 'Y1980', 'Y1981'], "interregional_inequality_differences.csv")
"""
def getVar(Y, possition):
result = {}
for k in Y:
result[k] = [Y[k][possition]]
return result
def shufflePeriods(Y,pos1,pos2):
result = {}
for k in Y:
possibilities = [Y[k][pos1],Y[k][pos2]]
result[k] = [possibilities[numpy.random.randint(0,2)]]
return result
print("Creating global Inequality Changes [Rey_Sastre2010 - Table 2]")
results = {}
r2a = list(range(len(Y)))
for nv1, var1 in enumerate(fieldNames):
var = getVar(Y,nv1)
t1,tb1,tw1 = theil(var,r2a)
results[(var1,var1)] = t1
for nv2, var2 in enumerate(fieldNames[nv1+1:]):
var = getVar(Y,nv1+nv2+1)
t2,tb2,tw2 = theil(var,r2a)
results[(var1,var2)] = t2 - t1
numerator = 1
for iter in range(permutations):
var = shufflePeriods(Y,nv1,nv1 + nv2 + 1)
t3,tb3,tw3 = theil(var,r2a)
if abs(t2-t1) < abs(t3-t1):
numerator += 1
results[(var2,var1)] = numerator/float(permutations+1)
if outFile:
fout = open(outFile,"w")
aux = str(fieldNames).replace("[","")
aux = aux.replace("]","")
aux = aux.replace("'","")
line = "".join([",",aux])
fout.write("".join([line,"\n"]))
for var1 in fieldNames:
line = [var1]
for var2 in fieldNames:
line += [results[(var1,var2)]]
line = str(line).replace("[","")
line = line.replace("]","")
line = line.replace("'","")
fout.write("".join([line,"\n"]))
fout.close()
print("global Inequality Changes created!")
return results
|
6a9d0579c52083419d5f4cbedbe4b568d6b7e0a0
| 28,354 |
import re
def generate_gisaid_fasta_df(fname, rtype="nuc", ambiguous_tol=0.01, len_tol=0.9):
"""
Generate pandas dataframe for sequences downloaded from GISAID
"""
fdat_df = []
standardise_gene_name = {"PB2":1, "PB1":2, "PA":3, "HA":4, "NP":5, "NA":6, "MP":7, "NS":8}
subtype_to_influenza_gene_len = {"A":{'1-PB2': 2280, '2-PB1': 2274, '3-PA': 2151, '4-HA': 1701, '5-NP': 1497, '6-NA': 1410, '7-M': 982, '8-NS': 838}, "B":{'1-PB2': 2259, '2-PB1': 2313, '3-PA': 2178, '4-HA': 1749, '5-NP': 1683, '6-NA': 1398, '7-M': 1076, '8-NS': 1024}}
fasta_dat = parsefasta(fname, rtype=rtype)
print ("Number of input sequences: %i"%(len(fasta_dat)))
amb_count = 0
len_count = 0
for header, sequence in fasta_dat.items():
sname, gene, iid, date, passage, subtype = header.split("|")
gene = "%i-%s"%(standardise_gene_name[gene], gene)
flu_type = re.search("^(A|B)", subtype).group()
# uncount sequences with > amb_tol of amb res
amb_res = "n" if rtype == "nuc" else "X"
if sequence.count(amb_res)/len(sequence) > ambiguous_tol:
amb_count += 1
continue
# min sequence length
if len(sequence) < len_tol*subtype_to_influenza_gene_len[flu_type][gene]:
len_count += 1
continue
date = toYearFraction(date)
sname = re.sub("(\(h\dn\d\)|[^a-z0-9\-\.\/_])", "", sname.lower())
fdat_df.append({"sname":sname, "gene":gene, "iid":iid, "subtype":subtype, "date":date, "passage":passage, "seq":sequence})
fdat_df = pd.DataFrame.from_dict(fdat_df).set_index("iid")
print ("Number of output sequences: %i"%(len(fdat_df)))
print ("Removed because AMM(<%.2f)/LEN(>%.2f) = %i/%i"%(ambiguous_tol, len_tol, amb_count, len_count))
print ("Number of unique iid: %i"%(len(set(fdat_df.index))))
return fdat_df
|
bf7c09f1f2cfa935d93bbe46c25d55539f2c8bf8
| 28,355 |
def radial_trajectory(base_resolution,
views=1,
phases=None,
ordering='linear',
angle_range='full',
tiny_number=7,
readout_os=2.0):
"""Calculate a radial trajectory.
This function supports the following 2D ordering methods:
* **linear**: Uniformly spaced radial views. Views are interleaved if there
are multiple phases.
* **golden**: Consecutive views are spaced by the golden angle (222.49
degrees if `angle_range` is `'full'` and 111.25 degrees if `angle_range` is
`'half'`) [1]_.
* **golden_half**: Variant of `'golden'` in which views are spaced by 111.25
degrees even if `angle_range` is `'full'` [1]_.
* **tiny**: Consecutive views are spaced by the n-th tiny golden angle, where
`n` is given by `tiny_number` [2]_. The default tiny number is 7 (47.26
degrees if `angle_range` is `'full'` and 23.63 degrees if `angle_range` is
`'half'`).
* **tiny_half**: Variant of `'tiny'` in which views are spaced by a half angle
even if `angle_range` is `'full'` [2]_ (23.63 degrees for `tiny_number`
equal to 7).
* **sorted**: Like `golden`, but views within each phase are sorted by their
angle in ascending order. Can be an alternative to `'tiny'` ordering in
applications where small angle increments are required.
This function also supports the following 3D ordering methods:
* **sphere_archimedean**: 3D radial trajectory ("koosh-ball"). The starting
points of consecutive views trace an Archimedean spiral trajectory along
the surface of a sphere, if `angle_range` is `'full'`, or a hemisphere, if
`angle_range` is `'half'` [3]_. Views are interleaved if there are multiple
phases.
Args:
base_resolution: An `int`. The base resolution, or number of pixels in the
readout dimension.
views: An `int`. The number of radial views per phase.
phases: An `int`. The number of phases for cine acquisitions. If `None`,
this is assumed to be a non-cine acquisition with no time dimension.
ordering: A `string`. The ordering type. Must be one of: `{'linear',
'golden', 'tiny', 'sorted', 'sphere_archimedean'}`.
angle_range: A `string`. The range of the rotation angle. Must be one of:
`{'full', 'half'}`. If `angle_range` is `'full'`, the full circle/sphere
is included in the range. If `angle_range` is `'half'`, only a
semicircle/hemisphere is included.
tiny_number: An `int`. The tiny golden angle number. Only used if `ordering`
is `'tiny'` or `'tiny_half'`. Must be >= 2. Defaults to 7.
readout_os: A `float`. The readout oversampling factor. Defaults to 2.0.
Returns:
A `Tensor` of type `float32` and shape `[views, samples, 2]` if `phases` is
`None`, or of shape `[phases, views, samples, 2]` if `phases` is not `None`.
`samples` is equal to `base_resolution * readout_os`. The units are
radians/voxel, ie, values are in the range `[-pi, pi]`.
References:
.. [1] Winkelmann, S., Schaeffter, T., Koehler, T., Eggers, H. and
Doessel, O. (2007), An optimal radial profile order based on the golden
ratio for time-resolved MRI. IEEE Transactions on Medical Imaging,
26(1): 68-76, https://doi.org/10.1109/TMI.2006.885337
.. [2] Wundrak, S., Paul, J., Ulrici, J., Hell, E., Geibel, M.-A.,
Bernhardt, P., Rottbauer, W. and Rasche, V. (2016), Golden ratio sparse
MRI using tiny golden angles. Magn. Reson. Med., 75: 2372-2378.
https://doi.org/10.1002/mrm.25831
.. [3] Wong, S.T.S. and Roos, M.S. (1994), A strategy for sampling on a
sphere applied to 3D selective RF pulse design. Magn. Reson. Med.,
32: 778-784. https://doi.org/10.1002/mrm.1910320614
"""
return _kspace_trajectory('radial',
{'base_resolution': base_resolution,
'readout_os': readout_os},
views=views,
phases=phases,
ordering=ordering,
angle_range=angle_range,
tiny_number=tiny_number)
|
3554fc0b833be552af31153c80c07863ab8f683d
| 28,356 |
def highlight_threshold(image, img_data, threshold, color=(255, 0, 0)):
"""
Given an array of values for an image, highlights pixels whose value is greater than the given threshold.
:param image: The image to highlight
:param img_data: The values to use
:param threshold: The threshold above which pixels should the highlighted
:param color: The color to highlight pixels with
:return: The image, with high-value pixels highlighted
"""
out_pixels = list(image)
for i in range(len(image)):
p, e = image[i], img_data[i]
if e > threshold:
out_pixels[i] = color
return out_pixels
|
bc4b0c9f44f7d45b947c9913f6b6f43b73ea542b
| 28,357 |
import numpy
def error_norm(q_numerical, q_exact, dx, p=2):
"""
Compute the discrete error in q in the p norm
Parameters
----------
q_numerical : numpy vector
The numerical solution, an array size (N,) or (N,1)
q_exact : numpy vector
The exact solution, whose size matches q_numerical
dx : float
The relevant grid spacing
p : int or 'inf', optional
The norm. The default is 2.
Returns
-------
error_value : float
(dx * sum((q_n - q_e)**p))**(1/p)
"""
if p == 'inf':
error_value = numpy.max(numpy.abs(q_numerical - q_exact))
else:
error_value = (dx * numpy.sum(numpy.abs(q_numerical - q_exact)**p))**(1/p)
return error_value
|
e4d33583ee2c5308a2eda9755c44961acba2603d
| 28,358 |
def flip(xyz_img):
"""
Take an xyz_img and flip its world from LPS / RAS to
RAS / LPS.
>>> data = np.random.standard_normal((30,40,50,5))
>>> metadata = {'name':'John Doe'}
>>> lps_im = XYZImage(data, np.diag([3,4,5,1]), 'ijkt', metadata)
>>> lps_im.xyz_transform
XYZTransform(
function_domain=CoordinateSystem(coord_names=('i', 'j', 'k'), name='voxel', coord_dtype=float64),
function_range=CoordinateSystem(coord_names=('x+LR', 'y+PA', 'z+SI'), name='world', coord_dtype=float64),
affine=array([[ 3., 0., 0., 0.],
[ 0., 4., 0., 0.],
[ 0., 0., 5., 0.],
[ 0., 0., 0., 1.]])
)
>>> ras_im = flip(lps_im)
>>> ras_im.xyz_transform
XYZTransform(
function_domain=CoordinateSystem(coord_names=('i', 'j', 'k'), name='voxel', coord_dtype=float64),
function_range=CoordinateSystem(coord_names=('x+RL', 'y+AP', 'z+SI'), name='world', coord_dtype=float64),
affine=array([[-3., 0., 0., 0.],
[ 0., -4., 0., 0.],
[ 0., 0., 5., 0.],
[ 0., 0., 0., 1.]])
)
>>> print np.allclose(ras_im.get_data(), lps_im.get_data())
True
>>> print ras_im.metadata == lps_im.metadata
True
>>>
>>> print flip(ras_im) == lps_im
True
>>>
"""
if xyz_img.reference.coord_names == lps_output_coordnames:
flipped_lps = False
else:
flipped_lps = True
xyz_transform = xyz_img.xyz_transform
new_xyz_transform_matrix = np.dot(np.diag([-1,-1,1,1]),
xyz_transform.affine)
return XYZImage(xyz_img._data, new_xyz_transform_matrix,
xyz_img.axes.coord_names,
metadata=xyz_img.metadata,
lps=flipped_lps)
|
e3f345c8c61043e8a46e8c9b603ed5de93125d63
| 28,360 |
def pivoting_remove(z, rule):
"""Choose which active constraint will be replaced
"""
if rule is None:
k = np.argmin(z)
elif rule.lower() == 'bland':
k = np.min(np.nonzero(z < 0)[0])
else:
raise('Undefined pivoting rule')
return k
|
54186ddc15db3abca6853b928c6d51f145cbe248
| 28,361 |
import tqdm
def train_network(model, optimizer, train_loader, lss_fc) -> None:
"""Train Network for one Epoch."""
train_losses = []
for batch in tqdm(train_loader, total=len(train_loader)):
optimizer.zero_grad()
input_tensor, original = batch
input_tensor = input_tensor.to('cuda')
out = model(input_tensor)
loss = crop_mse(original=original,
out=out,
mask=input_tensor[:, 1],
mse=lss_fc)
loss.backward()
optimizer.step()
train_losses.append(loss.detach())
return sum(train_losses)/len(train_losses)
|
1b90706348ceefe7840b16d29bfb0cc37229ec46
| 28,362 |
def get_class(cls):
"""Return TestModuleVisitor report from a class instance."""
ast = get_ast(cls.__module__)
nv = TestmoduleVisitor()
nv.visit(ast)
return nv._classes[cls.__name__]
|
4d3bb56f9582edb1576db67a3094f6b3efa3e106
| 28,363 |
def show_system_timezone(
enode,
_shell='vtysh',
_shell_args={
'matches': None,
'newline': True,
'timeout': None,
'connection': None
}
):
"""
Display system timezone information
This function runs the following vtysh command:
::
# show system timezone
:param dict kwargs: arguments to pass to the send_command of the
vtysh shell.
:param str _shell: shell to be selected
:param dict _shell_args: low-level shell API arguments
:return: A dictionary as returned by
:func:`topology_lib_vtysh.parser.parse_show_system_timezone`
"""
cmd = [
'show system timezone'
]
shell = enode.get_shell(_shell)
shell.send_command(
(' '.join(cmd)).format(**locals()), **_shell_args
)
result = shell.get_response(
connection=_shell_args.get('connection', None)
)
return parse_show_system_timezone(result)
|
67f08762a31c54fdaeb7c93c48c8c5d97ecf2f3e
| 28,364 |
import numpy
def summarize_list(values):
"""
Takes a list of integers such as [1,2,3,4,6,7,8] and summarises it as a string "1-4,6-8"
:param values:
:return: string
"""
sorted_values = numpy.array(sorted(values))
summaries = [
(f'{chunk[0]}-{chunk[-1]}' if len(chunk) > 1 else f'{chunk[0]}')
for chunk in numpy.split(sorted_values, numpy.where(numpy.diff(sorted_values) > 1)[0] + 1)
if len(chunk)
]
return ','.join(summaries)
|
ea6e3501fb3340e0a78a71096129df5b3400fac9
| 28,365 |
import torch
def enforce_size(img, depth, instances, new_w, new_h):
""" Ensures that the image is the given size without distorting aspect ratio. """
with torch.no_grad():
_, h, w = img.size()
if h == new_h and w == new_w:
return img, depth, instances
# Resize the image so that it fits within new_w, new_h
w_prime = new_w
h_prime = h * new_w / w
if h_prime > new_h:
w_prime *= new_h / h_prime
h_prime = new_h
w_prime = int(w_prime)
h_prime = int(h_prime)
# Do all the resizing
img = F.interpolate(img.unsqueeze(0), (h_prime, w_prime), mode='bilinear', align_corners=False)
img.squeeze_(0)
depth = F.interpolate(depth.unsqueeze(0), (h_prime, w_prime), mode='bilinear', align_corners=False)
depth.squeeze_(0)
# Act like each object is a color channel
instances['masks'] = F.interpolate(instances['masks'].unsqueeze(0), (h_prime, w_prime), mode='bilinear', align_corners=False)
instances['masks'].squeeze_(0)
# Scale bounding boxes (this will put them in the top left corner in the case of padding)
instances['boxes'][:, [0, 2]] *= (w_prime / new_w)
instances['boxes'][:, [1, 3]] *= (h_prime / new_h)
# Finally, pad everything to be the new_w, new_h
pad_dims = (0, new_w - w_prime, 0, new_h - h_prime)
img = F.pad( img, pad_dims, mode='constant', value=0)
depth = F.pad(depth, pad_dims, mode='constant', value=0)
instances['masks'] = F.pad(instances['masks'], pad_dims, mode='constant', value=0)
return img, depth, instances
|
5252b9c62af4ce909fb85856a78b7e4a697aaf74
| 28,366 |
import stat
def update_V_softmax(V,B,T,O,R,gamma,eps=None,PBVI_temps=None,
max_iter=100,verbose=False,n_samps=100,seed=False):
"""
inputs:
V (list):
V[0]: n_B x n_S array of alpha-vector values for each belief
V[1]: n_B array, denoting which action generated each alpha-vector
B: n_B x n_S array of belief states to be updated
optional inputs:
outputs:
V (same as input), updated
"""
if PBVI_temps is None:
temp1=.01
temp2=.01
temp3=.01
else:
temp1 = PBVI_temps[0]
temp2 = PBVI_temps[1]
temp3 = PBVI_temps[2]
if seed: #testing
np.random.seed(711)
n_B = np.shape(B)[0]
n_V = np.shape(B)[0]
n_A = np.shape(R)[1]
n_S = np.shape(R)[0]
O_dims = np.shape(O)[1]
O_means = O[0]; O_sds = O[1] #O_dims,n_S,n_A
if eps is None:
eps = 0.01*n_S
#### no reason to resample O each iteration; so sample obs beforehand and cache
O_samps = np.random.normal(0,1,(n_samps,O_dims,n_S,n_A)) #K x D x S x A
O_samps = O_means + O_sds*O_samps
#precompute and cache b^ao for sampled observations...
O_logprob = np.sum(stat.norm.logpdf(O_samps[:,:,:,:,None], #K x D x S x A x 1
np.transpose(O_means,[0,2,1])[:,None,:,:],
np.transpose(O_sds,[0,2,1])[:,None,:,:],),1)
#K: # samples drawn
log_B = np.log(B+1e-16) # B x S
log_T = np.log(T+1e-16) #S' x S x A
log_TB = logsumexp(log_B[None,:,:,None] + log_T[:,None,:,:],2)# S' x S
log_bao = np.transpose(O_logprob[:,:,None,:,:] + log_TB[None,:,:,:,None],[2,0,3,1,4])
b_ao = np.exp(log_bao - logsumexp(log_bao,4)[:,:,:,:,None]) #B x K x A x S' x S
for ct in range(max_iter):
old_V = np.array(V[0],copy=True)
alpha_bao = np.einsum('ab,cdefb->acdef',V[0],b_ao)/temp1 #V x B x K x A x S'
#softmax
exp_alpha_bao = np.exp(alpha_bao - np.max(alpha_bao,0)) #V x B x K x A x S'
alpha_bao_probs = exp_alpha_bao/np.sum(exp_alpha_bao,0) #V x B x K x A x S'
#soft mean
prob_meta_obs = np.mean(alpha_bao_probs,axis=2) #V x B x A x S'
alpha_aO_alpha2 = np.einsum('ab,bcd,efdb->efdac',V[0],T,prob_meta_obs) #V' x B x A x V x S
B_alpha_aO_alpha2 = np.einsum('ab,cadeb->cade',B,alpha_aO_alpha2)/temp2 #V' x B x A x V
#softmax
exp_aB = np.exp(B_alpha_aO_alpha2 - np.max(B_alpha_aO_alpha2,3)[:,:,:,None]) #V' x B x A x V
aB_probs = exp_aB/np.sum(exp_aB,3)[:,:,:,None] #V' x B x A x V
#soft mean
avg_B_alpha_aO_alpha2 = np.sum(alpha_aO_alpha2 * aB_probs[:,:,:,:,None], axis=3) #V' x B x A x S
alpha_ab = R.T + gamma*np.einsum('abcd->bcd',avg_B_alpha_aO_alpha2) #B x A x S
alpha_ab_B = np.einsum('ab,acb->ac',B,alpha_ab)/temp3 #B x A
#softmax
exp_alpha_ab_B = np.exp(alpha_ab_B - np.max(alpha_ab_B,1)[:,None]) #B x A
alpha_ab_B_probs = exp_alpha_ab_B/np.sum(exp_alpha_ab_B,1)[:,None] #B x A
#soft mean
avg_alpha_abB = np.sum(alpha_ab * alpha_ab_B_probs[:,:,None], 1) #B x S
V[0] = avg_alpha_abB #B x S; alpha-vecs
V[1] = alpha_ab_B_probs #B x A; action probs for each alpha-vec
diff = np.sum(np.abs(V[0]-old_V))
#check for convergence
if diff < eps:
return V
if verbose:
print("didn't converge during update :(" %np.sum(np.abs(V[0]-old_V)))
return V
|
62910d068a59902d6a9f5f0c2b873cad551f9c17
| 28,367 |
def scaled_location_plot(yname, yopt, scaled_res):
"""
Plot the scaled location, given the dependant values and scaled residuals.
:param str yname: Name of the Y axis
:param ndarray yopt: Estimated values
:param ndarray scaled_res: Scaled residuals
:returns: the handles for the data and the smoothed curve
"""
scr = sqrt(abs(scaled_res))
p_scaled = plot(yopt, scr, '+')[0]
av = NonParamRegression(yopt, scr)
av.fit()
xr = arange(yopt.min(), yopt.max(), (yopt.max() - yopt.min()) / 1024)
rr = av(xr)
p_smooth = plot(xr, rr, 'g')[0]
expected_mean = 2 ** (1 / 4) * gamma(3 / 4) / sqrt(pi)
plot([yopt.min(), yopt.max()], [expected_mean, expected_mean], 'r--')
title('Scale-location')
xlabel(yname)
ylabel('$|$Normalized residuals$|^{1/2}$')
gca().set_yticks([0, 1, 2])
return [p_scaled, p_smooth]
|
24e126f3bb60e5f46713d3a0c7da383684081afd
| 28,368 |
def importNoiseTerms(filename):
""" Imports noise data from an FWH file; the returned data is a list of length
nProbes filled with (nTime,3) arrays """
f = open(filename,'r')
deltaT = []
while True:
line = f.readline(); # read line by line
if line == '': # check for EoF
break;
if (line != '\n'): # skip empty lines
if line.split()[1] == 'x':
# get the number of probes and allocate memory
noProbes = len(line.split())-2;
time = np.array([])
field = -1
# process the probe x-locations
x = np.array([float(i) for i in line.split()[2:noProbes+2]])
elif line.split()[1] == 'y':
y = np.array([float(i) for i in line.split()[2:noProbes+2]])
elif line.split()[1] == 'z':
z = np.array([float(i) for i in line.split()[2:noProbes+2]])
elif line.split()[1] == 'deltaT':
deltaT = np.array([float(i) for i in line.split()[2:noProbes+2]])
elif line.split()[0] != '#':
line = (line.replace('(',' ').replace(')',' ').replace(',',' ')).split()
# check if saving 3 or 5 terms
if field == -1 and ((len(line)-1)/noProbes)%3 == 0:
field = [np.zeros((0,3)) for i in range(noProbes)]
elif field == -1 and ((len(line)-1)/noProbes)%5 == 0:
field = [np.zeros((0,5)) for i in range(noProbes)]
# convert all to floats
vals = [float(s) for s in line]
time = np.append(time, vals[0])
for i in range(0,noProbes):
if ((len(line)-1)/noProbes)%3 == 0:
field[i] = np.vstack([field[i],np.array([vals[(i*3+1) : (i*3+1+3)]])])
elif ((len(line)-1)/noProbes)%5 == 0:
field[i] = np.vstack([field[i],np.array([vals[(i*5+1) : (i*5+1+5)]])])
f.close()
return time, field, np.transpose(np.vstack([x,y,z])), deltaT
|
b2066f37f7a030d1e330f9bcc0b13b7527caa1e1
| 28,369 |
def line_edit_style_factory(txt_color='white', tgt_layer_color='white',
bg_color='#232323'):
"""Generates a string of a qss style sheet for a line edit. Colors can be
supplied as strings of color name or hex value. If a color arg receives
a tuple we assume it is either an rgb or rgba tuple.
:param txt_color: Color the text of the line edit should be.
:param tgt_layer_color: The color of the current target layer.
:param bg_color: The color that will fill the background of the line eidit.
:return: string of qss
"""
def handle_rgb(color_tuple):
"""Assumes the tuple is rgba or rgb (len 4 or 3)"""
val = ','.join([str(i) for i in color_tuple])
if len(color_tuple) == 4:
rgb = 'rgba({})'.format(val)
else:
rgb = 'rgb({})'.format(val)
return rgb
if isinstance(bg_color, tuple):
bg_color = handle_rgb(bg_color)
style = '''
QTextEdit,
QLineEdit {
border-radius: 11px;
border: 1px solid transparent;
background-color: %s;
color: %s
}
QTextEdit:hover,
QLineEdit:hover {
border: 1px solid %s
}
QTextEdit:focus,
QLineEdit:focus {
border: 2px solid %s
}
''' % (bg_color, txt_color, tgt_layer_color,
tgt_layer_color)
return style
|
10670afc32ec1c19d09dd72fc0e23bb1583ba3af
| 28,370 |
import struct
import random
def create_key(key_len):
""" Generates key using random device if present
- key_len -- length of key
"""
try:
#generates truly random numbers
frand = open("/dev/random", "r")
data = frand.read(key_len/2)
frand.close()
return data.encode('hex')
except IOError:
buf =''
length = key_len/4
#generates truly pusedo random numbers
for i in range(length):
#read one byte at a time
buf = buf + struct.pack("!L", random.getrandbits(32)).encode('hex')
return buf[:key_len]
|
84a9952a896855f04ddf6fedf8a81c1be6bdaa08
| 28,371 |
import re
def ipv6_from_string(string: str) -> netaddr.IPSet:
"""
Takes a string and extracts all valid IPv6 Addresses as a SET of Strings
Uses the validate_ip helper function to achieve.
"""
ipv6_regex = re.compile(
'(?<![a-zA-Z\d\.])((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)?(\/[\d][\d]?[\d]?|1([01][0-9]|2[0-8]))?|(\.(\d{1,3}))(?<![a-zA-Z\d])')
potential_ipv6s = re.findall(ipv6_regex, string)
valid_ipv6s = []
for ipv6 in potential_ipv6s:
ipv6 = ipv6[0] + ipv6[75]
if validate_ip(ipv6) is True:
valid_ipv6s.append(ipv6)
return netaddr.IPSet(valid_ipv6s)
|
2aa529b8561498384dea2ae6c18f44164710848a
| 28,373 |
from typing import Tuple
from typing import List
def get_feature_location(
feature_text: str) -> Tuple[int, int, str, List, bool, bool]:
"""
Args:
feature_text: endswith '\n'
For example:
' CDS complement(join(<360626..360849,360919..360948,
' 361067..361220,361292..361470,361523..>361555))
' /gene="rIIA"
' /locus_tag="T4p001"
' /db_xref="GeneID:1258593"
Returns:
start: int
The example would be 12
end: int
The example would be 2189
strand: str, '+' or '-'
The example would be '-'
regions: list of tuple (int, int, str)
Indicating start, end, strand of each region (intron)
The example would be [(12, 2189, '-')]
partial_start: bool
partial_end: bool
"""
locstr = get_location_string(feature_text)
if locstr.startswith('complement('):
# Remove 'complement(' and ')'
locstr = locstr[len('complement('):-1]
all_complement = True
else:
all_complement = False
if locstr.startswith('join('):
# Remove 'join(' and ')'
locstr = locstr[len('join('):-1]
if locstr.startswith('order('):
# Remove 'join(' and ')'
locstr = locstr[len('order('):-1]
# loclist = list of strings
# e.g. ["complement(2853..2990)", "complement(2458..2802))"]
loclist = locstr.split(',')
partial_start, partial_end = False, False
regions = [] # e.g. [(100, 200, '-'), (<300, 400, '+'), (500, >600, '+')]
for i, s in enumerate(loclist):
# Tell the strand
if s.startswith('complement('):
# Remove 'complement(' and ')'
s = s[len('complement('):-1]
c = '-' # c = strand
elif all_complement:
c = '-'
else:
c = '+'
a, b = s.split('..') if ('..' in s) else (s, s) # a is start, b is end
# First start has '<' --> partial start
if i == 0 and a.startswith('<'):
partial_start = True
# Last end has '>' --> partial end
if i == len(loclist) - 1 and b.startswith('>'):
partial_end = True
if a.startswith('<') or a.startswith('>'):
a = a[1:]
if b.startswith('<') or b.startswith('>'):
b = b[1:]
a, b = int(a), int(b)
if a > b:
a, b = b, a # a must be < b
regions.append((a, b, c))
start, end, strand = regions[0][0], regions[-1][1], regions[0][2]
return start, end, strand, regions, partial_start, partial_end
|
665649a7ea7c618a8830b0bf11c5d26a6e6d21fd
| 28,374 |
import json
import logging
def _update_port_rate_limits_v1(port_name, broadcast_limit=None, broadcast_units=None,
multicast_limit=None, multicast_units=None, unknown_unicast_limit=None,
unknown_unicast_units=None, **kwargs):
"""
Perform GET and PUT calls to update a Port's rate limits
:param port_name: Alphanumeric name of the Port
:param broadcast_limit: Rate limit for broadcast ingress traffic
:param broadcast_units: Units for broadcast rate limit; should be either "kbps" (kilobits/second) or
"pps" (packets/second)
:param multicast_limit: Rate limit in pps for multicast ingress traffic
:param multicast_units: Units for multicast rate limit; should be either "kbps" (kilobits/second) or
"pps" (packets/second)
:param unknown_unicast_limit: Rate limit in pps for unknown_unicast ingress traffic
:param unknown_unicast_units: Units for unknown unicast rate limit; should be either "kbps" (kilobits/second) or
"pps" (packets/second)
:param kwargs:
keyword s: requests.session object with loaded cookie jar
keyword url: URL in main() function
:return: True if successful, False otherwise
"""
port_name_percents = common_ops._replace_special_characters(port_name)
port_data = port.get_port(port_name, depth=0, selector="configuration", **kwargs)
port_data['rate_limits'] = {}
if broadcast_limit is not None and broadcast_units is not None:
port_data['rate_limits']['broadcast'] = broadcast_limit
port_data['rate_limits']['broadcast_units'] = broadcast_units
if multicast_limit is not None and multicast_units is not None:
port_data['rate_limits']['multicast'] = multicast_limit
port_data['rate_limits']['multicast_units'] = multicast_units
if unknown_unicast_limit is not None and unknown_unicast_units is not None:
port_data['rate_limits']['unknown-unicast'] = unknown_unicast_limit
port_data['rate_limits']['unknown-unicast_units'] = unknown_unicast_units
# must remove these fields from the data since they can't be modified
port_data.pop('name', None)
port_data.pop('origin', None)
target_url = kwargs["url"] + "system/ports/%s" % port_name_percents
put_data = json.dumps(port_data, sort_keys=True, indent=4)
response = kwargs["s"].put(target_url, data=put_data, verify=False)
if not common_ops._response_ok(response, "PUT"):
logging.warning("FAIL: Updating rate limits for Port '%s' failed with status code %d: %s"
% (port_name, response.status_code, response.text))
return False
else:
logging.info("SUCCESS: Updating rate limits for Port '%s' succeeded"
% port_name)
return True
|
28f21634af949b2e023db64ac6a4b850e2bf96cc
| 28,376 |
def random_geom_sum(pmf, p, low_mem=False):
"""Calculates the distribution of Z = X_1 + X_2 + ... + X_N.
Parameters
----------
pmf : array
Probability distribution of X such that pmf[x] = Pr(X = x).
p : float
Probability such that N ~ geom(p), i.e. Pr(N = n) = p(1-p)^{n-1}.
low_mem : boolean
If set to True this function doesn't store or output the intermediate
results `pmf_given_N`, saving a lot of memory. Note that when next to
calculating the waiting time, the Werner parameters are calculated as
well, this value must be set to False, because the results of
`get_pmfs_after_fixed_lengths(pmf)` are required for the Werner
parameter calculation.
NOTE: refactoring would also allow for a lower memory implementation for
the Werner parameter calculation as well.
Returns
-------
Tuple (pmf_out, pmfs_given_N)
pmf_out[z] = Pr(sum^N X = z) = Pr(Z = z).
pmf_given_N[n,z] = Pr(sum^n X = z) = Pr(Z = z | N = n).
"""
if(low_mem):
pmf_final = get_pmf_after_prob_length_low_memory(pmf, p)
pmfs_given_N = None
else:
pmfs_given_N = get_pmfs_after_fixed_lengths(pmf)
pmf_final = get_pmf_after_prob_length(pmfs_given_N, p)
return pmf_final, pmfs_given_N
|
3b7f75a248975dd78e3cf986bff02a6160c1a026
| 28,377 |
def noto_tools(default=""):
"""Local path to nototools git repo. If this is called, we require config
to be set up."""
result = _values.get("noto_tools", default)
if result:
return result
raise Exception(_ERR_MSG)
|
42738c374bcd6d89baf4eabbe7c0f0a75fb1fd1d
| 28,378 |
def fdc_windtur_west(timestamp, sonicU, sonicV, sonicW, heading,
rateX, rateY, rateZ, accX, accY, accZ, lat):
"""
Description:
Calculates the L1 windspeed data product WINDTUR-VLW_L1 from the FDCHP
instrument, which collects 20 minutes of data every hour. The L1 data
consists of these values less 30 seconds from both the beginning and
end of each 12000 point dataset.
Implemented by:
2014-11-17: Russell Desiderio. Initial Code
2015-01-29: Russell Desiderio. Removed temperature from calling arguments.
Usage:
wind_west = fdc_windtur_west(timestamp, sonicU, sonicV, sonicW, heading,
rateX, rateY, rateZ, accX, accY, accZ, lat)
where
wind_west = windspeed West WINDTUR-VLW_L1 [m/s], UNcorrected for magnetic variation
timestamp = data date and time values [seconds since 1900-01-01]
sonicU = WINDTUR-U_L0 [cm/s]; u-component of windspeed measured in the buoy
frame of reference
sonicV = WINDTUR-V_L0 [cm/s]; v-component of windspeed measured in the buoy
frame of reference
sonicW = WINDTUR-W_L0 [cm/s]; w-component of windspeed measured in the buoy
frame of reference
heading = MOTFLUX-YAW_L0 [radians] measured by the magnetometer (NOT msrd by the gyro).
***NOT USED*** roll: MOTFLUX-ROLL_L0 [radians] ***NOT USED***
***NOT USED*** pitch: MOTFLUX-PITCH_L0 [radians] ***NOT USED***
rateX = MOTFLUX-ROLL_RATE_L0 [radians/s] measured by the gyro
rateY = MOTFLUX-PITCH_RATE_L0 [radians/s] measured by the gyro
rateZ = MOTFLUX-YAW_RATE_L0 [radians/s] measured by the gyro
accX = MOTFLUX-ACX_L0 [9.80665 m^2/s^2] x-component of platform linear acceleration
accY = MOTFLUX-ACY_L0 [9.80665 m^2/s^2] y-component of platform linear acceleration
accZ = MOTFLUX-ACZ_L0 [9.80665 m^2/s^2] z-component of platform linear acceleration
lat = latitude of instrument in decimal degrees
References:
OOI (2014). Data Product Specification for FDCHP Data Products. Document
Control Number 1341-00280. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-00280_Data_Product_Spec_FDCHP_OOI.pdf)
"""
# this data product is temperature independent
sonicT = sonicW * np.nan
_, windspeeds = fdc_flux_and_wind(timestamp, sonicU, sonicV, sonicW, sonicT,
heading, rateX, rateY, rateZ, accX, accY,
accZ, lat)
wind_west = np.asarray(windspeeds[1]).flatten()
return wind_west
|
e0136ff7ddaf676b99f28700e3613523bb57b12e
| 28,379 |
def reports():
"""View reports"""
return render_template("reports.html")
|
b69119a97998595757d52e5641246bbeea007d18
| 28,380 |
def otr_statusbar_cb(data, item, window):
"""Update the statusbar."""
if window:
buf = weechat.window_get_pointer(window, 'buffer')
else:
# If the bar item is in a root bar that is not in a window, window
# will be empty.
buf = weechat.current_buffer()
result = ''
if buffer_is_private(buf):
local_user = irc_user(
buffer_get_string(buf, 'localvar_nick'),
buffer_get_string(buf, 'localvar_server'))
remote_user = irc_user(
buffer_get_string(buf, 'localvar_channel'),
buffer_get_string(buf, 'localvar_server'))
context = ACCOUNTS[local_user].getContext(remote_user)
encrypted_str = config_string('look.bar.state.encrypted')
unencrypted_str = config_string('look.bar.state.unencrypted')
authenticated_str = config_string('look.bar.state.authenticated')
unauthenticated_str = config_string('look.bar.state.unauthenticated')
logged_str = config_string('look.bar.state.logged')
notlogged_str = config_string('look.bar.state.notlogged')
bar_parts = []
if context.is_encrypted():
if encrypted_str:
bar_parts.append(''.join([
config_color('status.encrypted'),
encrypted_str,
config_color('status.default')]))
if context.is_verified():
if authenticated_str:
bar_parts.append(''.join([
config_color('status.authenticated'),
authenticated_str,
config_color('status.default')]))
elif unauthenticated_str:
bar_parts.append(''.join([
config_color('status.unauthenticated'),
unauthenticated_str,
config_color('status.default')]))
if context.is_logged():
if logged_str:
bar_parts.append(''.join([
config_color('status.logged'),
logged_str,
config_color('status.default')]))
elif notlogged_str:
bar_parts.append(''.join([
config_color('status.notlogged'),
notlogged_str,
config_color('status.default')]))
elif unencrypted_str:
bar_parts.append(''.join([
config_color('status.unencrypted'),
unencrypted_str,
config_color('status.default')]))
result = config_string('look.bar.state.separator').join(bar_parts)
if result:
result = '{color}{prefix}{result}'.format(
color=config_color('status.default'),
prefix=config_string('look.bar.prefix'),
result=result)
return result
|
9fb58921b901e542ad6f8bed7207337db56de872
| 28,381 |
def rotate_ellipse_NS(time_deg, datastruc, const):
"""Rotate ellipse major/minor axis to north/south orientation."""
# Construct major and minor
major, minor, pha, inc = get_constituent(const, datastruc)
# construct current at this time
try:
major_current = major*np.cos(np.deg2rad(time_deg - pha))
minor_current = minor*np.cos(np.deg2rad(time_deg - pha) - np.pi/2)
except ValueError:
time_deg = np.expand_dims(time_deg, 2)
major_current = major*np.cos(np.deg2rad(time_deg - pha))
minor_current = minor*np.cos(np.deg2rad(time_deg - pha) - np.pi/2)
# Rotate to u and v
rotated_current = ((major_current + 1j*minor_current)
* np.exp(1j*np.deg2rad(inc)))
u = np.real(rotated_current)
v = np.imag(rotated_current)
return u, v
|
48c4d4270e8a521969608369974c59cff6700a03
| 28,382 |
import requests
def img_lookup(pid):
"""Query for object type and return correct JPG location"""
r = requests.get("https://fsu.digital.flvc.org/islandora/object/{0}/datastream/JPG/view".format(pid))
if r.status_code == 200:
return r.url
elif r.status_code == 404:
r2 = requests.get("https://fsu.digital.flvc.org/islandora/object/{0}/pages".format(pid))
soup = bs4.BeautifulSoup(r2.text, 'lxml')
div = soup.find_all('div', class_="islandora-objects-grid-item")[0]
dd = div.find('dd')
a = dd.find('a')
if a is not None:
return "https://fsu.digital.flvc.org{0}/datastream/JPG/view".format(a['href'])
else:
return None
else:
return None
|
ac9ccfc64e4bf38b0f22e90649368cae1ad89b18
| 28,383 |
from datetime import datetime
def _get_midnight_date(date):
"""Return midnight date for the specified date.
Effectively, this function returns the start of the day for the
specified date.
Arguments:
date -- An arbitrary date (type: datetime.datetime)
Return: Midnight date (type: datetime.datetime)
"""
return datetime.datetime(date.year, date.month, date.day)
|
165a884fd12e79f167c9818126e1e31a3b2dc8b3
| 28,384 |
def get_workflow_entrypoint(definition_class, workflow_name, workflow_version):
"""Get the entry point information from *workflow_class*.
This function provides a convenient way to extract the parameters
that need to be returned the *get_workflow* argument to
:py:class:`~.GenericWorkflowWorker`
:param definition_class: Class which defines the workflow
:type definition_class: child class of botoflow.workflow_definition.WorkflowDefinition
:param str workflow_name: The name of the workflow
:param str workflow_version: The version of the workflow
:return: Return a tuple of (*definition_class*, *workflow_type*, *entrypoint_func_name*)
"""
return extract_workflows_dict([definition_class])[workflow_name, workflow_version]
|
87a7cbc1ad810e08033f19d1d3c7551ff8b4eb46
| 28,385 |
def expect_types(*_pos, **named):
"""
Preprocessing decorator that verifies inputs have expected types.
Usage
-----
>>> @expect_types(x=int, y=str)
... def foo(x, y):
... return x, y
...
>>> foo(2, '3')
(2, '3')
>>> foo(2.0, '3')
Traceback (most recent call last):
...
TypeError: foo() expected an argument of type 'int' for argument 'x', but got float instead. # noqa
"""
if _pos:
raise TypeError("expect_types() only takes keyword arguments.")
for name, type_ in iteritems(named):
if not isinstance(type_, (type, tuple)):
raise TypeError(
"expect_types() expected a type or tuple of types for "
"argument '{name}', but got {type_} instead.".format(
name=name, type_=type_,
)
)
return preprocess(**valmap(_expect_type, named))
|
92b7682bda54f02c095d10534b71fb02fea1a763
| 28,386 |
def squash_by(child_parent_ids, *attributes):
"""Squash a child-parent relationship
Arguments
---------
child_parent_ids - array of ids (unique values that identify the parent)
*attributes - other arrays that need to follow the sorting of ids
Returns
-------
child_parents_idx - an array of len(child) which points to the index of
parent
parent_ids - len(parent) of the ids
*parent_attrs - len(parent) of the other attributes
"""
unique_resids, sort_mask, atom_idx = np.unique(
child_parent_ids, return_index=True, return_inverse=True)
return atom_idx, unique_resids, [attr[sort_mask] for attr in attributes]
|
1c68bb38ee10044803021f4d74b37ea4b161eef5
| 28,387 |
import random
import math
def _generate_quantsets(num_vars, num_qsets, ratio):
"""
_generate_quantsets(num_vars : int,
num_qsets : int,
ratio : float)
return (quantsets : list)
Generates a list of random quantifier sets according to given arguments
returns it.
Returns the list of generated quantifier sets.
"""
global _qcache, _vcache, _options
quantsets = []
quantifiers = [UNIVERSAL, EXISTENTIAL]
num_sets = {EXISTENTIAL : 0, UNIVERSAL: 0}
_num_vars = {EXISTENTIAL : 0, UNIVERSAL : 0}
rem_vars = {EXISTENTIAL : 0, UNIVERSAL : 0}
# prevent universal quantset at innermost scope, would be removed anyway
# by applying forall reduction
if _options.reduce:
# number of quantifier sets is even -> start with UNIVERSAL
# otherwise with EXISTENTIAL
qindex = num_qsets % 2
else:
qindex = random.randint(0, 1)
# special case
if ratio != None:
# if all variables have to be universal -> only one universal
# quantifier set exists
if ratio == 0.0:
qindex = 0
# if all variables have to be existential -> only one existential
# quantifier set exists
elif ratio == 1.0:
qindex = 1
# if only one quantifier set is given, change ratio in order to have only
# existential or universal variables
if num_qsets == 1:
if qindex == 1:
ratio = 1.0
else:
ratio = 0.0
# calculate number of existential and universal quantifier sets
if num_qsets % 2 == 0: # even number of quantifier sets
num_sets[EXISTENTIAL] = num_sets[UNIVERSAL] = num_qsets / 2
else:
if quantifiers[qindex] == EXISTENTIAL:
num_sets[EXISTENTIAL] = math.floor(num_qsets / 2) + 1
num_sets[UNIVERSAL] = num_sets[EXISTENTIAL] - 1
else:
num_sets[UNIVERSAL] = math.floor(num_qsets / 2) + 1
num_sets[EXISTENTIAL] = num_sets[UNIVERSAL] - 1
assert(num_sets[EXISTENTIAL] > 0 or num_sets[UNIVERSAL] > 0)
assert(num_sets[EXISTENTIAL] + num_sets[UNIVERSAL] == num_qsets)
# calculate number of existential and universal variables
if ratio != None:
if ratio > 0.0 and ratio < 1.0:
# there has to be at least 1 existential variable if given ratio is
# greater 0.0 and less than 1.0
_num_vars[EXISTENTIAL] = max(1, math.floor(num_vars * ratio))
else:
# special case: ratio is 0.0 or 1.0 -> all variables are either
# existential or universal
_num_vars[EXISTENTIAL] = math.floor(num_vars * ratio)
# just use a random number of existential variables
else:
# we need at least num_sets[EXISTENTIAL] and at most num_sets[UNIVERSAL]
# existential variables in order to be sure that we always have enough
# variables for the specified amount of quantifier sets
_num_vars[EXISTENTIAL] = random.randint(num_sets[EXISTENTIAL],
num_vars - num_sets[UNIVERSAL])
# remaining number of variables are universal
_num_vars[UNIVERSAL] = num_vars - _num_vars[EXISTENTIAL]
rem_vars = _num_vars.copy()
assert(_num_vars[EXISTENTIAL] + _num_vars[UNIVERSAL] == num_vars)
assert(num_sets[EXISTENTIAL] + num_sets[UNIVERSAL] == num_qsets)
# variables not yet used in quantifier sets
vars = [v for v in range(1, num_vars + 1)]
while num_sets[EXISTENTIAL] > 0 or num_sets[UNIVERSAL] > 0:
qset = []
quantifier = quantifiers[qindex]
# add quantifier to set
qset.append(quantifier)
# determine number of variables of new quantifier set
if num_sets[quantifier] == 1: # last quantifier set
vars_per_qset = rem_vars[quantifier]
else:
vars_per_qset = random.randint(1, int(rem_vars[quantifier] /
num_sets[quantifier]))
rem_vars[quantifier] -= vars_per_qset
num_sets[quantifier] -= 1
assert(rem_vars[quantifier] >= 0)
# add random variables to quantifier set
for i in range(vars_per_qset):
assert(len(vars) > 0)
rand_index = random.randint(0, len(vars) - 1) % len(vars)
assert(rand_index >= 0)
assert(rand_index < len(vars))
var = vars.pop(rand_index)
# cache variable information (quantifier, scope level, occurrences)
_vcache[var] = [quantifier, len(quantsets), 0]
# mark variable as not used yet
_qcache[quantifier][UNUSED].append(var)
# add variable to quantifier set
qset.append(var)
quantsets.append(qset)
# set next quantifier
qindex = (qindex + 1) & 1
assert(rem_vars[EXISTENTIAL] == 0)
assert(rem_vars[UNIVERSAL] == 0)
assert(num_sets[EXISTENTIAL] == 0)
assert(num_sets[UNIVERSAL] == 0)
assert(len(vars) == 0)
assert(len(quantsets) == num_qsets)
assert(len(_vcache) == num_vars)
assert(len(_qcache[EXISTENTIAL][UNUSED]) + \
len(_qcache[UNIVERSAL][UNUSED]) == num_vars)
return quantsets
|
e01e053e64384f0304fd21200bd31485f0e6eb06
| 28,388 |
def encoder(src_embedding, src_sequence_length):
"""Encoder: Bidirectional GRU"""
encoder_fwd_cell = layers.GRUCell(hidden_size=hidden_dim)
encoder_fwd_output, fwd_state = layers.rnn(
cell=encoder_fwd_cell,
inputs=src_embedding,
sequence_length=src_sequence_length,
time_major=False,
is_reverse=False)
encoder_bwd_cell = layers.GRUCell(hidden_size=hidden_dim)
encoder_bwd_output, bwd_state = layers.rnn(
cell=encoder_bwd_cell,
inputs=src_embedding,
sequence_length=src_sequence_length,
time_major=False,
is_reverse=True)
encoder_output = layers.concat(
input=[encoder_fwd_output, encoder_bwd_output], axis=2)
encoder_state = layers.concat(input=[fwd_state, bwd_state], axis=1)
return encoder_output, encoder_state
|
f23fb197838952017d3706db221ab55c9807bbb0
| 28,390 |
def _class_search_post_url_from(absolute_url, form):
"""Determines absolute URL to submit HTTP POST query request to"""
method = form.get(HTTP_METHOD)
if method != POST:
raise ValueError("Expected POST form submission method; Got "+repr(method))
action = form.get(ACTION)
dest_url = urljoin(absolute_url, action)
return dest_url
|
9547643b49cec1a4ea272c31b6a5399c076fa487
| 28,391 |
def pcmh_2_2d__3_5_6_7_8():
"""Huddles, Meetings & Trainings"""
huddle_sheet_url = URL('init', 'word', 'huddle_sheet.doc', vars=dict(**request.get_vars), hmac_key=MY_KEY,
salt=session.MY_SALT, hash_vars=["app_id"])
# referral tracking chart
huddle_sheet = MultiQNA(
5, float('inf'), True,
'huddle_sheet',
"Please upload a minimum of 5 days' worth of <a href='{url}'>daily huddle sheets</a>. The huddles must filled "
"out every morning discussing tasks / reminders regarding a particular patient or a population of "
"patients.".format(url=huddle_sheet_url)
)
huddle_sheet.set_template("{choose_file}")
temp = "Please have all staff sign <a href='{url}'>this %s sign-in sheet</a> the next time " + \
"{practice} conducts a %s.".format(practice=APP.practice_name)
# meeting_sheet
meeting_sheet_url = URL('init', 'word', 'signin_sheet.doc', args=["meeting_signin_sheet"],
vars=dict(type="meeting", **request.get_vars),
hmac_key=MY_KEY, salt=session.MY_SALT, hash_vars=["app_id", "type"])
meeting_sheet = MultiQNA(
1, 1, True,
'meeting_sheet',
(temp % ("meeting", "meeting to discuss practice functioning")).format(
url=meeting_sheet_url)
)
meeting_sheet.set_template("{choose_file}")
# meeting_sheet
training_sheet_url = URL('init', 'word', 'signin_sheet.doc', args=["training_signin_sheet"],
vars=dict(type="training", **request.get_vars),
hmac_key=MY_KEY, salt=session.MY_SALT, hash_vars=["app_id", "type"])
training_sheet = MultiQNA(
1, float('inf'), True,
'training_sheet',
(temp % ("training", "training / re-training regarding patient and population management")).format(
url=training_sheet_url)
)
training_sheet.set_template("{choose_file}")
return dict(documents=[
dict(
description="Daily Huddle Sheet",
url=huddle_sheet_url,
permissions=["IS_TEAM"]
),
dict(
description="Training Sign-in Sheet",
url=training_sheet_url,
permissions=["IS_TEAM"]
),
dict(
description="Meeting Sign-in Sheet",
url=meeting_sheet_url,
permissions=["IS_TEAM"]
),
])
|
73946627100342c07083e656d92d2364d166cc16
| 28,392 |
def CallCountsToMockFunctions(mock_function):
"""A decorator that passes a call count to the function it decorates.
Examples:
@CallCountsToMockFunctions
def foo(call_count):
return call_count
...
...
[foo(), foo(), foo()]
[0, 1, 2]
"""
counter = [0]
def Result(*args, **kwargs):
# For some values of `counter`, the mock function would simulate raising
# an exception, so let the test case catch the exception via
# `unittest.TestCase.assertRaises()` and to also handle recursive functions.
prev_counter = counter[0]
counter[0] += 1
ret_value = mock_function(prev_counter, *args, **kwargs)
return ret_value
return Result
|
cc621cabdf87ff554bb02c25282e99fadcaaa833
| 28,393 |
from typing import Callable
from typing import Tuple
import scipy
def multi_start_maximise(objective_function: Callable,
initial_points: ndarray, **kwargs) -> Tuple[ndarray, float]:
"""Run multi-start maximisation of the given objective function.
Warnings
--------
This is a hack to take advantage of fast vectorised computation and avoid expensive python loops. There may be some
issues with this method!
The objective function provided here must be a vectorised function. We take advantage of the fast computation of
vectorised functions to view a multi-start optimisation as a single pass of a higher-dimensional optimisation,
rather than several passes of a low-dimensional optimisation (which would require an expensive python loop). We
simply concatenate all the points where the function is to be evaluated into a single high-dimensional vector, give
the function value as the sum of all the individual function values, and give the Jacobian as the concatenation of
all the individual Jacobians. In this way we can essentially perform many optimisations in parallel. Note that
there is an issue here with the stopping condition: we can only consider all optimisations together, so even if most
have come very close to an optimum, the process will continue as long as one is far away. However, this does seem to
perform well in practice.
Parameters
----------
objective_function
Function to be maximised. Must return both the function value and the Jacobian. Must also accept a 2D array of
points, returning a 1D array and a 2D array for the function values and Jacobians respectively.
initial_points
Points at which to begin the optimisation, as a 2D array of shape (num_points, num_dimensions).
**kwargs
Keyword arguments will be included in the 'options' dict passed to the underlying scipy optimiser.
Returns
-------
ndarray
The location of the found maximum.
float
The value of the objective function at the found maximum.
"""
minimizer_kwargs = DEFAULT_MINIMIZER_KWARGS.copy()
minimizer_kwargs['options'] = {**minimizer_kwargs['options'], **kwargs} # This merges the two dicts.
num_points, num_dims = np.shape(initial_points)
def function_to_minimise(x, *inner_args, **inner_kwargs):
x = np.reshape(x, (num_points, num_dims))
value, jacobian = objective_function(x, *inner_args, **inner_kwargs)
combined_value, combined_jacobian = -value.sum(), -jacobian.ravel()
if not np.isfinite(combined_value) or not np.all(np.isfinite(combined_jacobian)):
raise FloatingPointError("Objective function for multi-start optimisation returned NaN or infinity.")
return combined_value, combined_jacobian
maximum = scipy.optimize.minimize(function_to_minimise, initial_points, **minimizer_kwargs)
#print(maximum)
maxima = maximum.x.reshape(num_points, num_dims)
values, _ = objective_function(maxima)
max_index = np.argmax(values)
optimal_x = maxima[max_index, :]
optimal_y = values[max_index]
return optimal_x, optimal_y
|
6eabacc0d84389c45ddbd75fd84a27cf312e65be
| 28,394 |
async def ping():
"""
.ping: respond with pong
"""
return "pong"
|
988165efb5087fd838a2930dbe4ed540b2d70037
| 28,395 |
from statsmodels.tsa.stattools import adfuller
def stationarity_check(TS,plot=True,col=None):
"""From: https://learn.co/tracks/data-science-career-v2/module-4-a-complete-data-science-project-using-multiple-regression/working-with-time-series-data/time-series-decomposition
"""
# Import adfuller
if col is not None:
# Perform the Dickey Fuller Test
dftest = adfuller(TS[col]) # change the passengers column as required
else:
dftest=adfuller(TS)
if plot:
# Calculate rolling statistics
rolmean = TS.rolling(window = 8, center = False).mean()
rolstd = TS.rolling(window = 8, center = False).std()
#Plot rolling statistics:
fig = plt.figure(figsize=(12,6))
orig = plt.plot(TS, color='blue',label='Original')
mean = plt.plot(rolmean, color='red', label='Rolling Mean')
std = plt.plot(rolstd, color='black', label = 'Rolling Std')
plt.legend(loc='best')
plt.title('Rolling Mean & Standard Deviation')
# plt.show(block=False)
# Print Dickey-Fuller test results
print ('Results of Dickey-Fuller Test:')
dfoutput = pd.Series(dftest[0:4],
index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])
for key,value in dftest[4].items():
dfoutput['Critical Value (%s)'%key] = value
sig = dfoutput['p-value']<.05
print (dfoutput)
print()
if sig:
print(f"[i] p-val {dfoutput['p-value'].round(4)} is <.05, so we reject the null hypothesis.")
print("\tThe time series is NOT stationary.")
else:
print(f"[i] p-val {dfoutput['p-value'].round(4)} is >.05, therefore we support the null hypothesis.")
print('\tThe time series IS stationary.')
return dfoutput
|
4b2120b4da74a08e13f61220bd212ae6016f3a73
| 28,396 |
def file_exists(session, ds_browser, ds_path, file_name):
"""Check if the file exists on the datastore."""
client_factory = session._get_vim().client.factory
search_spec = vm_util.search_datastore_spec(client_factory, file_name)
search_task = session._call_method(session._get_vim(),
"SearchDatastore_Task",
ds_browser,
datastorePath=ds_path,
searchSpec=search_spec)
try:
task_info = session._wait_for_task(search_task)
except error_util.FileNotFoundException:
return False
file_exists = (getattr(task_info.result, 'file', False) and
task_info.result.file[0].path == file_name)
return file_exists
|
00b856d529f16ea05123f2b4447d94698d986902
| 28,398 |
def convert_binary_to_unicode(binary_input):
"""
converts binary string of length 18 input to unicode
:param binary_input: String
:return: String
"""
unicode_output = ''
for starting_position in range(0, len(binary_input), 18):
unicode_output += chr(int(binary_input[starting_position:starting_position + 18], 2))
return unicode_output
|
ae00c8b31779420662dca09e1ca6c23590b45e38
| 28,399 |
def get_rating(comment):
"""
"""
return comment.xpath(
".//div[@itemprop=\"reviewRating\"]/meta[@itemprop=\"ratingValue\"]"
)[0].attrib.get("content")
|
c80d7f3443a20facdf5a99c3db42d8aa49e95010
| 28,400 |
def creat_netmiko_connection(username, password, host, port) -> object:
"""Logs into device and returns a connection object to the caller. """
credentials = {
'device_type': 'cisco_ios',
'host': host,
'username': username,
'password': password,
'port': port,
'session_log': 'my_file.out'}
try:
device_connect = ConnectHandler(**credentials)
except ssh_exception.AuthenticationException:
device_connect = "ssh_exception"
except EOFError:
device_connect = "Authenitcation Error"
except ssh_exception.NetmikoTimeoutException:
device_connect = 'Connection Timeout'
except ValueError:
device_connect = 'Connection Issue'
except:
device_connect = 'An Error Occured'
return device_connect
|
94c7463235051f87ad106b0c960ad155101fce56
| 28,401 |
def get_requirements():
"""Read the requirements file."""
requirements = read("requirements.txt")
return [r for r in requirements.strip().splitlines()]
|
a178d5148b137b4a6f46112cd73bbf6d2e9fb211
| 28,403 |
def handler(fmt, station, issued):
"""Handle the request, return dict"""
pgconn = get_dbconn("asos")
if issued is None:
issued = utc()
if issued.tzinfo is None:
issued = issued.replace(tzinfo=timezone.utc)
df = read_sql(
f"""
WITH forecast as (
select id from taf where station = %s and
valid > %s - '24 hours'::interval and valid <= %s
ORDER by valid DESC LIMIT 1)
select
to_char(t.valid at time zone 'UTC', '{ISO}') as utc_valid,
raw,
is_tempo,
to_char(t.end_valid at time zone 'UTC', '{ISO}') as utc_end_valid,
sknt,
drct,
gust,
visibility,
presentwx,
skyc,
skyl,
ws_level,
ws_drct,
ws_sknt
from taf{issued.year} t JOIN forecast f on
(t.taf_id = f.id) ORDER by valid ASC
""",
pgconn,
params=(station, issued, issued),
index_col=None,
)
if fmt == "txt":
for col in ["presentwx", "skyc", "skyl"]:
df[col] = [" ".join(map(str, item)) for item in df[col]]
return df.to_csv(index=False)
if fmt == "json":
return df.to_json(orient="table", index=False)
|
d75940ab5accc36258473d9794fd0449f707b593
| 28,404 |
def index_count(index_file=config.vdb_bin_index):
"""
Method to return the number of indexed items
:param index_file: Index DB file
:return: Count of the index
"""
return len(storage.stream_read(index_file))
|
2abe3a9a3b1e04f67175cabc9099f490556caabd
| 28,405 |
def asc_to_dict(filename: str) -> dict:
"""
Load an asc file into a dict object.
:param filename: The file to load.
:return dict: A dict object containing data.
"""
return list_to_dict(asc_to_list(filename))
|
3509321bc38e53ae1e86fa8b9cea113bec55700a
| 28,407 |
def merge_sort(array):
"""
Merge Sort
Complexity: O(NlogN)
"""
if len(array) > 1:
mid = len(array) // 2
left = array[:mid]
right = array[mid:]
left = merge_sort(left)
right = merge_sort(right)
array = []
# This is a queue implementation. We can also use
# a deque but slicing it needs the itertools slice
# function which I didn't want to use. More on that
# in the stacks and queues chapter.
l1 = l2 = 0
while len(left) > l1 and len(right) > l2:
if left[l1] < right[l2]:
array.append(left[l1])
l1 += 1
else:
array.append(right[l2])
l2 += 1
while len(left) > l1:
array.append(left[l1])
l1 += 1
while len(right) > l2:
array.append(right[l2])
l2 += 1
return array
|
73b3ac5b950f5788cbc3e7c98d2a4d5aac427929
| 28,408 |
def semi_major_axis(P, Mtotal):
"""Semi-major axis
Kepler's third law
Args:
P (float): Orbital period [days]
Mtotal (float): Mass [Msun]
Returns:
float or array: semi-major axis in AU
"""
# convert inputs to array so they work with units
P = np.array(P)
Mtotal = np.array(Mtotal)
Mtotal = Mtotal*c.M_sun.value
P = (P * u.d).to(u.second).value
G = c.G.value
a = ((P**2)*G*Mtotal/(4*(np.pi)**2))**(1/3.)
a = a/c.au.value
return a
|
338ce7857544d59dca1d78026f2559ce698faae8
| 28,409 |
def _check_df_load(df):
"""Check if `df` is already loaded in, if not, load from file."""
if isinstance(df, str):
if df.lower().endswith('json'):
return _check_gdf_load(df)
else:
return pd.read_csv(df)
elif isinstance(df, pd.DataFrame):
return df
else:
raise ValueError("{} is not an accepted DataFrame format.".format(df))
|
7245341c8fa58e2aea20761d6832be09e948b0e3
| 28,411 |
def ping(host, timeout=False, return_boolean=False):
"""
Performs an ICMP ping to a host
.. versionchanged:: 2015.8.0
Added support for SunOS
CLI Example:
.. code-block:: bash
salt '*' network.ping archlinux.org
.. versionadded:: 2015.5.0
Return a True or False instead of ping output.
.. code-block:: bash
salt '*' network.ping archlinux.org return_boolean=True
Set the time to wait for a response in seconds.
.. code-block:: bash
salt '*' network.ping archlinux.org timeout=3
"""
if timeout:
if __grains__["kernel"] == "SunOS":
cmd = "ping -c 4 {} {}".format(
__utils__["network.sanitize_host"](host), timeout
)
else:
cmd = "ping -W {} -c 4 {}".format(
timeout, __utils__["network.sanitize_host"](host)
)
else:
cmd = "ping -c 4 {}".format(__utils__["network.sanitize_host"](host))
if return_boolean:
ret = __salt__["cmd.run_all"](cmd)
if ret["retcode"] != 0:
return False
else:
return True
else:
return __salt__["cmd.run"](cmd)
|
f5707427eaef1e436618065bea78faa15a5cce7e
| 28,413 |
def comp_wind_sym(wind_mat):
"""Computes the winding pattern periodicity and symmetries
Parameters
----------
wind_mat : numpy.ndarray
Matrix of the Winding
Returns
-------
Nperw: int
Number of electrical period of the winding
"""
assert len(wind_mat.shape) == 4, "dim 4 expected for wind_mat"
# Summing on all the layers (Nlay_r and Nlay_theta)
wind_mat2 = squeeze(np_sum(np_sum(wind_mat, axis=1), axis=0))
qs = wind_mat.shape[3] # Number of phase
Zs = wind_mat.shape[2] # Number of Slot
Nperw = 1 # Number of electrical period of the winding
Nperslot = 1 # Periodicity of the winding in number of slots
# Looking for the periodicity of each phase
for q in range(0, qs):
k = 1
is_sym = False
while k <= Zs and not is_sym:
# We shift the array arround the slot and check if it's the same
if array_equal(wind_mat2[:, q], roll(wind_mat2[:, q], shift=k)):
is_sym = True
else:
k += 1
# least common multiple to find common periodicity between different phase
Nperslot = lcm(Nperslot, k)
# If Nperslot > Zs no symmetry
if Nperslot > 0 and Nperslot < Zs:
# nb of periods of the winding (2 means 180°)
Nperw = Zs / float(Nperslot)
# if Zs cannot be divided by Nperslot (non integer)
if Nperw % 1 != 0:
Nperw = 1
# Check for anti symmetries in the elementary winding pattern
if (
Nperslot % 2 == 0
and norm(
wind_mat2[0 : Nperslot // 2, :] + wind_mat2[Nperslot // 2 : Nperslot, :]
)
== 0
):
is_asym_wind = True
Nperw = Nperw * 2
else:
is_asym_wind = False
return int(Nperw), is_asym_wind
|
7984eb6f3b1d7d11694ecac1237ce27b11bbd9fe
| 28,414 |
def row(data, widths="auto", spacing=3, aligns=None):
"""Format data as a table row.
data (iterable): The individual columns to format.
widths (iterable or 'auto'): Column widths in order. If "auto", widths
will be calculated automatically based on the largest value.
spacing (int): Spacing between columns, in spaces.
aligns (iterable / unicode): Column alignments in order. 'l' (left,
default), 'r' (right) or 'c' (center). If a string, value is used
for all columns.
RETURNS (unicode): The formatted row.
"""
cols = []
if hasattr(aligns, '__hash__') and aligns in ALIGN_MAP:
aligns = [aligns for _ in data]
for i, col in enumerate(data):
align = ALIGN_MAP.get(aligns[i] if aligns and i < len(aligns) else "l")
col_width = len(col) if widths == "auto" else widths[i]
tpl = "{:%s%d}" % (align, col_width)
cols.append(tpl.format(to_string(col)))
return (" " * spacing).join(cols)
|
3adef2268ba1720e7480a3b4d0f873927d14f0b6
| 28,416 |
from datetime import datetime
import calendar
def _increment_date(date, grain):
"""
Creates a range of dates where the starting date is the given date and the
ending date is the given date incremented for 1 unit of the given grain
(year, month or day).
:param date: the starting date in string format 'YYYY-MM-DD'
:param grain: the grain of increment 'year', 'month' or 'day'
:return: a dictionary with starting and ending date
"""
result = {'from': date}
date_from = datetime.datetime.strptime(date, '%Y-%m-%d')
if grain == 'year':
date_to = datetime.date(date_from.year + 1, date_from.month, date_from.day)
elif grain == 'month':
days_in_month = calendar.monthrange(date_from.year, date_from.month)[1]
date_to = date_from + datetime.timedelta(days=days_in_month)
else:
date_to = date_from + datetime.timedelta(days=1)
result['to'] = str(date_to)[:10] # format 'YYYY-MM-DD'
return result
|
53626ad40cdf5a2352a6129fb15ed91ede60838e
| 28,417 |
def ErrorCorrect(val,fEC):
"""
Calculates the error correction parameter \lambda_{EC}. Typical val is 1.16.
Defined in Sec. IV of [1].
Parameters
----------
val : float
Error correction factor.
fEC : float
Error correction efficiency.
Returns
-------
float
Error correction parameter.
"""
return val * fEC
|
83c4483c56c7c3b79060dd070ec68f6dfd5ee749
| 28,418 |
def BytesGt(left: Expr, right: Expr) -> BinaryExpr:
"""Greater than expression with bytes as arguments.
Checks if left > right, where left and right are interpreted as big-endian unsigned integers.
Arguments must not exceed 64 bytes.
Requires TEAL version 4 or higher.
Args:
left: Must evaluate to bytes.
right: Must evaluate to bytes.
"""
return BinaryExpr(Op.b_gt, TealType.bytes, TealType.uint64, left, right)
|
9c509eab36ef0b174248741b656add275d8654b3
| 28,419 |
def balanceOf(account):
"""
can be invoked at every shard. If invoked at non-root shard, the shard must receive a xshard transfer before. Otherwise the function will throw an exception.
:param account: user address
:return: the token balance of account
"""
if len(account) != 20:
raise Exception("address length error")
return Invoke(SHARD_VERSION, XSHARD_ASSET_ADDR, 'oep4BalanceOf', account)
|
36d56a2536f33053dc5ed2020d0124380e9ceb28
| 28,420 |
def archive_entry(title):
"""
"""
if not session.get('logged_in'):
abort(401)
db = get_db()
# Archive it
stmt = '''
insert into archived_entries select * from entries
where pretty_title like ?
'''
db.execute(stmt,
('%' + title + '%',))
db.execute('delete from entries where pretty_title like ?',
('%' + title + '%',))
db.commit()
flash('Archived page: ' + title)
return redirect(url_for('show_entries'))
|
69384fbfda4090352640890105c02304782e541c
| 28,421 |
def build_census_df(projection_admits: pd.DataFrame, parameters) -> pd.DataFrame:
"""ALOS for each category of COVID-19 case (total guesses)"""
n_days = np.shape(projection_admits)[0]
hosp_los, icu_los, vent_los = parameters.lengths_of_stay
los_dict = {
"Hospitalized": hosp_los,
"ICU": icu_los,
"Ventilated": vent_los,
}
census_dict = dict()
for k, los in los_dict.items():
census = (
projection_admits.cumsum().iloc[:-los, :]
- projection_admits.cumsum().shift(los).fillna(0)
).apply(np.ceil)
census_dict[k] = census[k]
census_df = pd.DataFrame(census_dict)
census_df["day"] = census_df.index
census_df = census_df[["day", "Hospitalized", "ICU", "Ventilated"]]
census_df = census_df.head(n_days)
census_df = census_df.rename(
columns={
disposition: f"{disposition}"
for disposition in ("Hospitalized", "ICU", "Ventilated")
}
)
return census_df
|
0b1471f6e522a15027e2797484e573c65971e0d4
| 28,422 |
def indented_kv(key: str, value: str, indent=1, separator="=", suffix=""):
"""Print something as a key-value pair whilst properly indenting. This is useful
for implementations of`str` and `repr`.
Args:
key (str): Key.
value (str): Value.
indent (int, optional): Number of spaces to indent. Defaults to 1.
separator (str, optional): Separator between the key and value. Defaults to "=".
suffix (str, optional): Extra to print at the end. You can set this, e.g., to
",\n" or ">". Defaults to no suffix.
Returns
str: Key-value representation with proper indentation.
"""
key_string = f"{indent * ' '}{key}{separator}"
value_string = value.strip().replace("\n", "\n" + " " * len(key_string))
return key_string + value_string + suffix
|
b27a7ed7a0db4219332fda1e1131c888216141b2
| 28,423 |
def are_in_file(file_path, strs_to_find):
"""Returns true if every string in the given strs_to_find array is found in
at least one line in the given file. In particular, returns true if
strs_to_find is empty. Note that the strs_to_find parameter is mutated."""
infile = open(file_path)
for line in infile:
if len(strs_to_find) == 0:
return True
index = 0
while index < len(strs_to_find):
if strs_to_find[index] in line:
del strs_to_find[index]
else:
index = index + 1
return len(strs_to_find) == 0
|
474234a35bf885c5f659f32a25c23580f2014cc2
| 28,424 |
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
df = pd.read_csv(filename)
df.drop_duplicates(inplace=True)
df.dropna(inplace=True)
df = df.loc[filters(df)]
df.index = np.arange(len(df))
df = df.loc[df.zipcode.notnull()]
df = split_zipcode(df)
df = split_date(df)
df.drop(["id", "date", "zipcode", "sqft_living", "lat", "long"], inplace=True, axis=1)
# df = df.loc[filters(df)]
df = df.loc[df.sqft_above / df.floors <= df.sqft_lot] # Another filter to apply, we need floors > 0 first.
df["last_renovated"] = np.maximum(df.yr_built, df.yr_renovated)
# df.dropna(inplace=True)
return df.drop(["price"], axis=1), df.price
|
a8aed077d63c9e2df0f150b2ef7e3c06c30fcb29
| 28,425 |
import unicodedata
import re
def slugify(value):
"""
Unicode version of standart slugify.
Converts spaces to hyphens. Removes characters that
aren't unicode letters, underscores, or hyphens. Converts to lowercase.
Also replaces whitespace with hyphens and
strips leading and trailing hyphens.
:param value: String to slugify.
:type value: str
:returns: Slugified value.
:rtype: str
"""
value = unicodedata.normalize('NFKC', value)
value = unicode(re.sub('(?u)[^\w\s-]+', '', value).strip().lower())
return mark_safe(re.sub('[-\s]+', '-', value).strip('-'))
|
e81020b76f4e29f89e44c420e8e95b89f7eb1363
| 28,427 |
from math import factorial as f
def binomial_coefficient(n: int, m: int) -> int:
""" Binomial Coefficient
Returns n!/(m!(n-m)!). This is used in combinatronics and binomial theorem."""
return f(n)/(f(m)*f(n-m))
|
e0ad7a4cd3cb85bb4c0a48890209a8f71086a853
| 28,429 |
def joint(waypoints):
"""
Calculate a trajectory by a joint operation.
"""
# total number of segments
numSegments = len(waypoints) - 1
# every segment has its own polynomial of 4th degree for X,Y and Z and a polynomial of 2nd degree for Yaw
numCoefficients = numSegments * (3*5+3)
# list of calculated trajectory coefficients
trajectory = []
# start + end X,Y,Z,Yaw position for every segment: 8
# rendezvous X,Y,Z,Yaw velocity: 4
# absolute start + end X,Y,Z (+ start Yaw) velocity: 7
numConstraints = numSegments * 8 + (numSegments - 1) * 4 + 7
P_numpy = zeros((numCoefficients, numCoefficients))
for i in range(numSegments):
P_numpy[0 + i * 18, 0 + i * 18] = 1 # minimize snap for X
# P_numpy[2 + i * 18, 2 + i * 18] = 100 # minimize acceleration for X
P_numpy[5 + i * 18, 5 + i * 18] = 1 # minimize snap for Y
# P_numpy[7 + i * 18, 7 + i * 18] = 100 # minimize acceleration for Y
P_numpy[10 + i * 18, 10 + i * 18] = 1 # minimize snap for Z
# P_numpy[12 + i * 18, 12 + i * 18] = 100 # minimize acceleration for Z
P_numpy[15 + i * 18, 15 + i * 18] = 1 # minimize acceleration for Yaw
P = csc_matrix(P_numpy) # convert to CSC for performance
# =============================
# Gradient vector (linear terms), we have none
# =============================
q = zeros((numCoefficients, 1))
q = hstack(q) # convert to hstack for performance
# =============================
# Inequality matrix (left side), we have none
# =============================
G = zeros((numConstraints, numCoefficients))
# =============================
# Inequality vector (right side), we have none
# =============================
h = zeros((numConstraints, 1))
h = hstack(h) # convert to hstack for performance
# =============================
# Equality matrix (left side)
# =============================
A = zeros((numConstraints, numCoefficients))
# =============================
# Equality vector (right side)
# =============================
b = zeros((numConstraints, 1))
# =============================
# Set up of Equality Constraints
# =============================
cc = -1 # Current Constraint
for i in range(numSegments):
# "start of segment" position constraints
cc += 1 # X Position
A[cc, 0 + i * 18] = waypoints[i].time ** 4
A[cc, 1 + i * 18] = waypoints[i].time ** 3
A[cc, 2 + i * 18] = waypoints[i].time ** 2
A[cc, 3 + i * 18] = waypoints[i].time
A[cc, 4 + i * 18] = 1
b[cc, 0] = waypoints[i].x
cc += 1 # Y Position
A[cc, 5 + i * 18] = waypoints[i].time ** 4
A[cc, 6 + i * 18] = waypoints[i].time ** 3
A[cc, 7 + i * 18] = waypoints[i].time ** 2
A[cc, 8 + i * 18] = waypoints[i].time
A[cc, 9 + i * 18] = 1
b[cc, 0] = waypoints[i].y
cc += 1 # Z Position
A[cc, 10 + i * 18] = waypoints[i].time ** 4
A[cc, 11 + i * 18] = waypoints[i].time ** 3
A[cc, 12 + i * 18] = waypoints[i].time ** 2
A[cc, 13 + i * 18] = waypoints[i].time
A[cc, 14 + i * 18] = 1
b[cc, 0] = waypoints[i].z
cc += 1 # Yaw Angle
A[cc, 15 + i * 18] = waypoints[i].time ** 2
A[cc, 16 + i * 18] = waypoints[i].time
A[cc, 17 + i * 18] = 1
b[cc, 0] = waypoints[i].yaw
# "end of segment" position constraints
cc += 1 # X Position
A[cc, 0 + i * 18] = waypoints[i + 1].time ** 4
A[cc, 1 + i * 18] = waypoints[i + 1].time ** 3
A[cc, 2 + i * 18] = waypoints[i + 1].time ** 2
A[cc, 3 + i * 18] = waypoints[i + 1].time
A[cc, 4 + i * 18] = 1
b[cc, 0] = waypoints[i + 1].x
cc += 1 # Y Position
A[cc, 5 + i * 18] = waypoints[i + 1].time ** 4
A[cc, 6 + i * 18] = waypoints[i + 1].time ** 3
A[cc, 7 + i * 18] = waypoints[i + 1].time ** 2
A[cc, 8 + i * 18] = waypoints[i + 1].time
A[cc, 9 + i * 18] = 1
b[cc, 0] = waypoints[i + 1].y
cc += 1 # Z Position
A[cc, 10 + i * 18] = waypoints[i + 1].time ** 4
A[cc, 11 + i * 18] = waypoints[i + 1].time ** 3
A[cc, 12 + i * 18] = waypoints[i + 1].time ** 2
A[cc, 13 + i * 18] = waypoints[i + 1].time
A[cc, 14 + i * 18] = 1
b[cc, 0] = waypoints[i + 1].z
cc += 1 # Yaw Angle
A[cc, 15 + i * 18] = waypoints[i + 1].time ** 2
A[cc, 16 + i * 18] = waypoints[i + 1].time
A[cc, 17 + i * 18] = 1
b[cc, 0] = waypoints[i + 1].yaw
# segment rendezvous constraints
if i == 0:
continue
cc += 1 # X Velocity Rendezvous
A[cc, 0 + i * 18] = 4 * waypoints[i].time ** 3
A[cc, 1 + i * 18] = 3 * waypoints[i].time ** 2
A[cc, 2 + i * 18] = 2 * waypoints[i].time
A[cc, 3 + i * 18] = 1
A[cc, 0 + i * 18 - 18] = -1 * A[cc, 0 + i * 18]
A[cc, 1 + i * 18 - 18] = -1 * A[cc, 1 + i * 18]
A[cc, 2 + i * 18 - 18] = -1 * A[cc, 2 + i * 18]
A[cc, 3 + i * 18 - 18] = -1 * A[cc, 3 + i * 18]
cc += 1 # Y Velocity Rendezvous
A[cc, 5 + i * 18] = 4 * waypoints[i].time ** 3
A[cc, 6 + i * 18] = 3 * waypoints[i].time ** 2
A[cc, 7 + i * 18] = 2 * waypoints[i].time
A[cc, 8 + i * 18] = 1
A[cc, 5 + i * 18 - 18] = -1 * A[cc, 5 + i * 18]
A[cc, 6 + i * 18 - 18] = -1 * A[cc, 6 + i * 18]
A[cc, 7 + i * 18 - 18] = -1 * A[cc, 7 + i * 18]
A[cc, 8 + i * 18 - 18] = -1 * A[cc, 8 + i * 18]
cc += 1 # Z Velocity Rendezvous
A[cc, 10 + i * 18] = 4 * waypoints[i].time ** 3
A[cc, 11 + i * 18] = 3 * waypoints[i].time ** 2
A[cc, 12 + i * 18] = 2 * waypoints[i].time
A[cc, 13 + i * 18] = 1
A[cc, 10 + i * 18 - 18] = -1 * A[cc, 10 + i * 18]
A[cc, 11 + i * 18 - 18] = -1 * A[cc, 11 + i * 18]
A[cc, 12 + i * 18 - 18] = -1 * A[cc, 12 + i * 18]
A[cc, 13 + i * 18 - 18] = -1 * A[cc, 13 + i * 18]
cc += 1 # Yaw Velocity Rendezvous
A[cc, 15 + i * 18] = 2 * waypoints[i].time
A[cc, 16 + i * 18] = 1
A[cc, 15 + i * 18 - 18] = -1 * A[cc, 15 + i * 18]
A[cc, 16 + i * 18 - 18] = -1 * A[cc, 16 + i * 18]
# cc += 1 # X Acceleration Rendezvous
# A[cc, 0 + i * 18] = 12 * waypoints[0].time ** 2
# A[cc, 1 + i * 18] = 6 * waypoints[0].time
# A[cc, 2 + i * 18] = 2
# A[cc, 0 + i * 18 - 18] = -1 * A[cc, 0 + i * 18]
# A[cc, 1 + i * 18 - 18] = -1 * A[cc, 1 + i * 18]
# A[cc, 2 + i * 18 - 18] = -1 * A[cc, 2 + i * 18]
# cc += 1 # Y Acceleration Rendezvous
# A[cc, 5 + i * 18] = 12 * waypoints[0].time ** 2
# A[cc, 6 + i * 18] = 6 * waypoints[0].time
# A[cc, 7 + i * 18] = 2
# A[cc, 5 + i * 18 - 18] = -1 * A[cc, 5 + i * 18]
# A[cc, 6 + i * 18 - 18] = -1 * A[cc, 6 + i * 18]
# A[cc, 7 + i * 18 - 18] = -1 * A[cc, 7 + i * 18]
# cc += 1 # Z Acceleration Rendezvous
# A[cc, 10 + i * 18] = 12 * waypoints[0].time ** 2
# A[cc, 11 + i * 18] = 6 * waypoints[0].time
# A[cc, 12 + i * 18] = 2
# A[cc, 10 + i * 18 - 18] = -1 * A[cc, 10 + i * 18]
# A[cc, 11 + i * 18 - 18] = -1 * A[cc, 11 + i * 18]
# A[cc, 12 + i * 18 - 18] = -1 * A[cc, 12 + i * 18]
# cc += 1 # Yaw Acceleration Rendezvous
# A[cc, 15 + i * 18] = 2
# A[cc, 15 + i * 18 - 18] = -1 * A[cc, 15 + i * 18]
# cc += 1 # X Jerk Rendezvous
# A[cc, 0] = 24 * waypoints[0].time
# A[cc, 1] = 6
# A[cc, 0 + i * 18 - 18] = -1 * A[cc, 0 + i * 18]
# A[cc, 1 + i * 18 - 18] = -1 * A[cc, 1 + i * 18]
# cc += 1 # Y Jerk Rendezvous
# A[cc, 5] = 24 * waypoints[0].time
# A[cc, 6] = 6
# A[cc, 5 + i * 18 - 18] = -1 * A[cc, 5 + i * 18]
# A[cc, 6 + i * 18 - 18] = -1 * A[cc, 6 + i * 18]
# cc += 1 # Z Jerk Rendezvous
# A[cc, 10] = 24 * waypoints[0].time
# A[cc, 11] = 6
# A[cc, 10 + i * 18 - 18] = -1 * A[cc, 10 + i * 18]
# A[cc, 11 + i * 18 - 18] = -1 * A[cc, 11 + i * 18]
#
# cc += 1 # X Snap Rendezvous
# A[cc, 0] = 24
# A[cc, 0 + i * 18 - 18] = -1 * A[cc, 0 + i * 18]
# cc += 1 # Y Snap Rendezvous
# A[cc, 5] = 24
# A[cc, 5 + i * 18 - 18] = -1 * A[cc, 5 + i * 18]
# cc += 1 # Z Snap Rendezvous
# A[cc, 10] = 24
# A[cc, 10 + i * 18 - 18] = -1 * A[cc, 10 + i * 18]
cc += 1 # absolute start X velocity
A[cc, 0] = 4 * waypoints[0].time ** 3
A[cc, 1] = 3 * waypoints[0].time ** 2
A[cc, 2] = 2 * waypoints[0].time
A[cc, 3] = 1
cc += 1 # absolute start Y velocity
A[cc, 5] = 4 * waypoints[0].time ** 3
A[cc, 6] = 3 * waypoints[0].time ** 2
A[cc, 7] = 2 * waypoints[0].time
A[cc, 8] = 1
cc += 1 # absolute start Z velocity
A[cc, 10] = 4 * waypoints[0].time ** 3
A[cc, 11] = 3 * waypoints[0].time ** 2
A[cc, 12] = 2 * waypoints[0].time
A[cc, 13] = 1
cc += 1 # absolute start Yaw velocity
A[cc, 15] = 2 * waypoints[0].time
A[cc, 16] = 1
cc += 1 # absolute end X velocity
A[cc, numCoefficients - 18 + 0] = 4 * waypoints[-1].time ** 3
A[cc, numCoefficients - 18 + 1] = 3 * waypoints[-1].time ** 2
A[cc, numCoefficients - 18 + 2] = 2 * waypoints[-1].time
A[cc, numCoefficients - 18 + 3] = 1
cc += 1 # absolute end Y velocity
A[cc, numCoefficients - 18 + 5] = 4 * waypoints[-1].time ** 3
A[cc, numCoefficients - 18 + 6] = 3 * waypoints[-1].time ** 2
A[cc, numCoefficients - 18 + 7] = 2 * waypoints[-1].time
A[cc, numCoefficients - 18 + 8] = 1
cc += 1 # absolute end Z velocity
A[cc, numCoefficients - 18 + 10] = 4 * waypoints[-1].time ** 3
A[cc, numCoefficients - 18 + 11] = 3 * waypoints[-1].time ** 2
A[cc, numCoefficients - 18 + 12] = 2 * waypoints[-1].time
A[cc, numCoefficients - 18 + 13] = 1
#cc += 1 # absolute end Yaw velocity
#A[cc, numCoefficients - 18 + 15] = 2 * waypoints[-1].time
#A[cc, numCoefficients - 18 + 16] = 1
#cc += 1 # absolute start X acceleration
# A[c, 0] = 12 * waypoints[0].time ** 2
# A[c, 1] = 6 * waypoints[0].time
# A[c, 2] = 2
#cc += 1 # absolute start Y acceleration
# A[c, 5] = 12 * waypoints[0].time ** 2
# A[c, 6] = 6 * waypoints[0].time
# A[c, 7] = 2
#cc += 1 # absolute start Z acceleration
# A[cc, 10] = 12 * waypoints[0].time ** 2
# A[cc, 11] = 6 * waypoints[0].time
# A[cc, 12] = 2
#cc += 1 # absolute start Yaw acceleration
# A[cc, 15] = 2
#cc += 1 # absolute end X acceleration
# A[cc, numCoefficients - 18 + 0] = 12 * waypoints[-1].time ** 2
# A[cc, numCoefficients - 18 + 1] = 6 * waypoints[-1].time
# A[cc, numCoefficients - 18 + 2] = 2
#cc += 1 # absolute end Y acceleration
# A[cc, numCoefficients - 18 + 5] = 12 * waypoints[-1].time ** 2
# A[cc, numCoefficients - 18 + 6] = 6 * waypoints[-1].time
# A[cc, numCoefficients - 18 + 7] = 2
#cc += 1 # absolute end Z acceleration
# A[cc, numCoefficients - 18 + 10] = 12 * waypoints[-1].time ** 2
# A[cc, numCoefficients - 18 + 11] = 6 * waypoints[-1].time
# A[cc, numCoefficients - 18 + 12] = 2
#cc += 1 # absolute end Yaw acceleration
# A[cc, numCoefficients - 18 + 15] = 2
#cc += 1 # absolute start X jerk
# A[cc, 0] = 24 * waypoints[0].time
# A[cc, 1] = 6
#cc += 1 # absolute start Y jerk
# A[cc, 5] = 24 * waypoints[0].time
# A[cc, 6] = 6
#cc += 1 # absolute start Z jerk
# A[cc, 10] = 24 * waypoints[0].time
# A[cc, 11] = 6
#cc += 1 # absolute end X jerk
# A[cc, numCoefficients - 18 + 0] = 24 * waypoints[-1].time
# A[cc, numCoefficients - 18 + 1] = 6
#cc += 1 # absolute end Y jerk
# A[cc, numCoefficients - 18 + 5] = 24 * waypoints[-1].time
# A[cc, numCoefficients - 18 + 6] = 6
#cc += 1 # absolute end Z jerk
# A[cc, numCoefficients - 18 + 10] = 24 * waypoints[-1].time
# A[cc, numCoefficients - 18 + 11] = 6
#cc += 1 # absolute start X snap
# A[cc, 0] = 24
#cc += 1 # absolute start Y snap
# A[cc, 5] = 24
#cc += 1 # absolute start Z snap
# A[cc, 10] = 24
#cc += 1 # absolute end X snap
# A[cc, numCoefficients - 18 + 0] = 24
#cc += 1 # absolute end Y snap
# A[cc, numCoefficients - 18 + 5] = 24
#cc += 1 # absolute end Z snap
# A[cc, numCoefficients - 18 + 10] = 24
# =============================
# Solver Setup
# =============================
# OSQP needs:
# P = quadratic terms
# q = linear terms
# A = constraint matrix of ALL constraints (inequality & equality)
# l = lower constraints
# u = upper constraints
P = csc_matrix(P)
q = hstack(q)
h = hstack(h)
b = hstack(b)
A = vstack([G, A])
A = csc_matrix(A)
l = -inf * ones(len(h))
l = hstack([l, b])
u = hstack([h, b])
# setup solver and solve
m = osqp.OSQP()
m.setup(P=P, q=q, A=A, l=l, u=u) # extra solver variables can be set here
res = m.solve()
# save to trajectory variable
for i in range(0, size(res.x), 18):
segment = res.x[i:i + 18]
trajectory.append(segment)
print("QP solution Number following: ", res.x)
return trajectory
|
06b3b2f183c749405ecacd4ce639c3c2d5826e55
| 28,430 |
import math
def logistic(x: float):
"""Logistic function."""
return 1 / (1 + math.exp(-x))
|
98b4f7aebd562609789ed5f53f6a79d63eaf6ea0
| 28,431 |
def highlight_deleted(obj):
"""
Display in red lines when object is deleted.
"""
obj_str = conditional_escape(text_type(obj))
if not getattr(obj, 'deleted', False):
return obj_str
else:
return '<span class="deleted">{0}</span>'.format(obj_str)
|
daad6a35bab989a2ca9df63292fecf36b05ff715
| 28,432 |
def range_(stop):
""":yaql:range
Returns an iterator over values from 0 up to stop, not including
stop, i.e. [0, stop).
:signature: range(stop)
:arg stop: right bound for generated list numbers
:argType stop: integer
:returnType: iterator
.. code::
yaql> range(3)
[0, 1, 2]
"""
return iter(range(stop))
|
28717348bcdcd432388b8a4809c897c70a2fce3f
| 28,433 |
def post(filename: str, files: dict, output_type: str):
"""Constructs the http call to the deliver service endpoint and posts the request"""
url = f"http://{CONFIG.DELIVER_SERVICE_URL}/deliver/{output_type}"
logger.info(f"Calling {url}")
try:
response = session.post(url, params={"filename": filename}, files=files)
except MaxRetryError:
logger.error("Max retries exceeded", request_url=url)
raise RetryableError("Max retries exceeded")
except ConnectionError:
logger.error("Connection error", request_url=url)
raise RetryableError("Connection error")
return response
|
358b408ace8750d1c48ca6bef0855aac4db625ca
| 28,435 |
def is_off(*args):
"""
is_off(F, n) -> bool
is offset?
@param F (C++: flags_t)
@param n (C++: int)
"""
return _ida_bytes.is_off(*args)
|
43dc5298bad5daf95f76e8426e819e1feb89f8d4
| 28,437 |
def get_libdcgm_path():
"""
Returns relative path to libdcgm.so.2
"""
return "../../lib/libdcgm.so.2"
|
a1067449bdc9012e07c5707ece68c3aae2799694
| 28,438 |
def method(modelclass, **kwargs):
"""Decorate a ProtoRPC method for use by the endpoints model passed in.
Requires exactly one positional argument and passes the rest of the keyword
arguments to the classmethod "method" on the given class.
Args:
modelclass: An Endpoints model class that can create a method.
Returns:
A decorator that will use the endpoint metadata to decorate an endpoints
method.
"""
return _GetEndpointsMethodDecorator('method', modelclass, **kwargs)
|
801fad462414b94f6ee72e507c17813afc043f81
| 28,439 |
def detect_on(window, index=3, threshold=5): # threshold value is important: power(watts)
"""input: np array
listens for a change in active power that exceeds threshold
(can use Active/real(P), Apparent(S), and Reactive (Q)(worst..high SNR))
index = index of feature to detect. Used P_real @ index 3
returns: boolean for event detection"""
prev_avg = np.average([window[-2][index], window[-3][index], window[-4][index]])
if window[-1][index] - prev_avg > threshold: # if power change > average of last two readings
return True
else:
return False
|
dfea9b4ea95c22b199a63c47cb5f7f16f10df742
| 28,440 |
def calculate_target_as_one_column(df:pd.DataFrame, feature_cols:list, target_cols:list):
"""create a row for every new porduct and give the product name as target column, this is done for the train set"""
x = df[target_cols]
x = x[x==1].stack().reset_index().drop(0,1)
df = pd.merge(df, x, left_on=df.index, right_on='level_0')
df.rename(columns={'level_1': "y"}, inplace=True)
keep_cols = feature_cols.copy()
keep_cols += [ col for col in df if col[-2:] == '_s'] # keep also shifted columns
keep_cols.append('month_int')
keep_cols.append('id') #keep id
keep_cols.append('y') #keep target var
return df[keep_cols]
|
eee8e27a60999c95e2354877526ae27d9679a3ca
| 28,441 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.