content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def embed_tiles_in_json_sprite(tile_list, as_bytes=True, out_file=None):
"""Make a big rectangle containing the images for a brainsprite.
Parameters:
-----------
tile_list : list
List of 2d square numpy arrays to stick in a mosaic
Returns:
--------
mosaic : np.ndarray
Mosaic of tile images
"""
# Tiles are squares
tile_size = tile_list[0].shape[0]
num_tiles = len(tile_list)
num_tile_rows = nearest_square(num_tiles)
num_tile_cols = int(np.ceil(num_tiles/num_tile_rows))
mosaic = np.zeros((num_tile_rows * tile_size,
num_tile_cols * tile_size))
i_indices, j_indices = np.unravel_index(np.arange(num_tiles),
(num_tile_rows, num_tile_cols))
i_tile_offsets = tile_size * i_indices
j_tile_offsets = tile_size * j_indices
for tile, i_offset, j_offset in zip(tile_list, i_tile_offsets,
j_tile_offsets):
mosaic[i_offset:(i_offset + tile_size),
j_offset:(j_offset + tile_size)] = tile
if as_bytes:
img = mplfig(mosaic, out_file, as_bytes=as_bytes)
return dict(img=img, N=num_tile_rows, M=num_tile_cols,
pix=tile_size, num_slices=num_tiles)
return dict(mosaic=mosaic, N=num_tile_rows, M=num_tile_cols,
pix=tile_size, num_slices=num_tiles) | 2eca9eb999d69537fac60dfefd0d482576994868 | 7,915 |
import re
def removeUnicode(text):
""" Removes unicode strings like "\u002c" and "x96" """
text = re.sub(r'(\\u[0-9A-Fa-f]+)',r'', text)
text = re.sub(r'[^\x00-\x7f]',r'',text)
return text | f5c8090329ede82ce51601efa463537bba68b63a | 7,916 |
import scipy
def mfcc(tf, n_mfcc, fs, fmin=0.0, fmax=None):
"""
Extract MFCC vectors
Args:
tf : single-channel time-frequency domain signal,
indexed by 'tf'
n_mfcc : number of coefficients
fs : sample rate
fmin : (default 0) minimal frequency in Hz
fmax : (default fs/2) maximal frequency in Hz
Returns:
mfcc : MFCC
"""
if fmax is None:
fmax = fs / 2.0
n_frame, n_fbin = tf.shape
# get filter weights
freq = np.fft.fftfreq(n_fbin)
fbw = mel_freq_fbank_weight(n_mfcc, freq, fs, fmin=fmin, fmax=fmax)
# get log power
sigpow = np.real(tf * tf.conj())
logfpow = np.log(np.einsum('bf,tf->tb', fbw, sigpow) + 1e-20)
# DCT
mfcc = scipy.fft.dct(logfpow)
return mfcc | 58769db11aa0633dd846b9d427e6d35a0fff435e | 7,917 |
import logging
def transform(fname, metadata=False):
"""
This function reads a Mission Analysis Orbit file and performs a matrix
transformation on it. Currently only from the Mercury Equatorial frame to
the Earth Equatorial frame.
:param fname: The path to the orbit file.
:type fname: str.
:param metadata: Flag to return the metadata dictionary
:type state: bool.
:returns: numpy.array -- the return code.
"""
furnsh("/Users/jmcaulif/Code/naif/generic/lsk/naif0010.tls")
logging.basicConfig(level=logging.INFO)
mdata = {}
data = {}
with open(fname, 'r') as fh:
for line in fh:
t, x, y, z, vx, vy, vz = [float(x) for x in line.split()]
T = np.array([[0.98159386604468, 0.19098031873327, 0.0],
[-0.16775718426422, 0.86223242348167,
0.47792549108063],
[0.09127436261733, -0.46912873047114,
0.87840037851502]])
Tinv = linalg.inv(T)
r = np.array([[x, y, z]])
v = np.array([[vx, vy, vz]])
r_new = Tinv.dot(r.T).T
v_new = Tinv.dot(v.T).T
x, y, z = r_new[0]
vx, vy, vz = v_new[0]
t = et2utc(t * 86400, 'isoc', 2)
print("{} {:9.2f} {:9.2f} {:9.2f} {:9.6f} {:9.6f} {:9.6f}".
format(t, x, y, z, vx, vy, vz))
fh.close()
if metadata:
return data, mdata
else:
return data | ce389f74ba35d3c92a94031563b187a7285ccb5b | 7,918 |
import requests
import json
def query_repository(repo_name, index_uuid, token, display_results=False):
"""
Display the ids ('subjects') of all items indexed in a repository.
:param repo_name: Textual name of repository to query, corresponds to 'name' field in conf file.
:param index_name: Name of index, mapped by us to a UUID.
:param display_results: Print ids to standard output
:return: List of result ids
"""
LOGGER.info("Querying index %s for repository %s" % (index_uuid, repo_name))
querylimit = 20
headers = {'Authorization' : ('Bearer ' + token), 'Content-Type' : 'application/json'}
queryobj = {"@datatype": "GSearchRequest", "@version": "2016-11-09", "advanced": True, "offset": 0,
"limit": querylimit, "q": "*", "filters": [
{"@datatype": "GFilter", "@version": "2016-11-09", "type": "match_any",
"field_name": "https://frdr\\.ca/schema/1\\.0#origin\\.id", "values": [""]}]}
result_ids = []
queryobj["filters"][0]["values"][0] = repo_name
offset = 0
while True:
r = requests.post('https://' + _api_host + '/v1/index/' + index_uuid + '/search', headers=headers, json=queryobj)
search_results = json.loads(r.text)
results_count = search_results['count']
LOGGER.info("Got %i results" % (results_count))
if results_count == 0:
break
for result in search_results['gmeta']:
result_ids.append(result['subject'])
offset = offset + querylimit
queryobj["offset"] = offset
if display_results:
print('\n'.join(result_ids))
return result_ids | 8a6b4f90374b90504a375c5d44a6dce6f32fb936 | 7,919 |
def power(x1, x2, out=None, where=True, dtype=None):
"""
First array elements raised to powers from second array, element-wise.
Raises each base in `x1` to the positionally-corresponding power in `x2`.
Note:
Numpy arguments `casting`, `order`, `dtype`, `subok`, `signature`, and `extobj` are
not supported.
When `where` is provided, `out` must have a tensor value. `out` is not supported
for storing the result, however it can be used in combination with `where` to set
the value at indices for which `where` is set to False.
On GPU, the supported dtypes are np.float16, and np.float32.
Args:
x1 (Tensor): the bases.
x2 (Tensor): the exponents.
out (Tensor or None, optional): defaults to None.
where (Tensor or None, optional): For any non-default value of type other
than :class:`Tensor` or :class:`None`, the output retains its original value.
This condition is broadcasted over the input. At locations where the
condition is `True`, the out array will be set to the ufunc result.
Elsewhere, the out array will retain its original value. Note that
if an uninitialized out array is created via the default ``out=None``,
locations within it where the condition is `False` will remain
uninitialized.
dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
output Tensor.
Returns:
Tensor or scalar, the bases in `x1` raised to the exponents in `x2`. This
is a scalar if both `x1` and `x2` are scalars.
Raises:
TypeError: if the input is not a tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> x1 = np.full((3, 2), [1, 2]).astype('float32')
>>> x2 = np.full((3, 2), [3, 4]).astype('float32')
>>> output = np.power(x1, x2)
>>> print(output)
[[ 1, 16],
[ 1, 16],
[ 1, 16]]
"""
return _apply_tensor_op(F.tensor_pow, x1, x2, out=out, where=where, dtype=dtype) | ecc265e96c36c47177aa96797d355a55f35114d6 | 7,920 |
def _str2bool(s):
"""文字列からboolへの変換"""
return s.lower() in ["true", "t", "yes", "1"] | 877340b7926541a5b8f56cb7f1acd5a54e08a987 | 7,921 |
import random
def _generate_nonce(length=42):
"""
Generate an alpha numeric string that is unique for each request.
Twitter used a 42 character alpha-numeric (case-sensitive) string in the
API documentation. However, they note "any approach which produces a
relatively random alphanumeric string should be OK here." I opted not to
use a cryptographically secure source of entropy. `SystemRandom` is
convenient, but it uses file IO to connect to `/dev/urandom`. Adding
`async` machinery here seems like expensive complexity.
"""
return "".join(random.choice(ALPHANUMERIC) for _ in range(length)) | 034c4fee363d47e9af98c0135159f53179d27fdd | 7,922 |
def int_(value):
"""Validate that the config option is an integer.
Automatically also converts strings to ints.
"""
check_not_templatable(value)
if isinstance(value, int):
return value
if isinstance(value, float):
if int(value) == value:
return int(value)
raise Invalid(
f"This option only accepts integers with no fractional part. Please remove the fractional part from {value}"
)
value = string_strict(value).lower()
base = 10
if value.startswith("0x"):
base = 16
try:
return int(value, base)
except ValueError:
# pylint: disable=raise-missing-from
raise Invalid(f"Expected integer, but cannot parse {value} as an integer") | f5d5f729a1bbec418f3b270b4e28be2cf9cc7cd6 | 7,924 |
def explode(screen):
"""Convert a string representing a screen
display into a list of lists."""
return [list(row) for row in screen.split('\n')] | a43a9d8c830c4a784bb9c3505c62aaf2077bb732 | 7,925 |
def basic_info(user, keys):
"""Prints a table of basic user information"""
table = formatting.KeyValueTable(['Title', 'Basic Information'])
table.align['Title'] = 'r'
table.align['Basic Information'] = 'l'
table.add_row(['Id', user.get('id', '-')])
table.add_row(['Username', user.get('username', '-')])
if keys:
for key in user.get('apiAuthenticationKeys'):
table.add_row(['APIKEY', key.get('authenticationKey')])
table.add_row(['Name', "%s %s" % (user.get('firstName', '-'), user.get('lastName', '-'))])
table.add_row(['Email', user.get('email')])
table.add_row(['OpenID', user.get('openIdConnectUserName')])
address = "%s %s %s %s %s %s" % (
user.get('address1'), user.get('address2'), user.get('city'), user.get('state'),
user.get('country'), user.get('postalCode'))
table.add_row(['Address', address])
table.add_row(['Company', user.get('companyName')])
table.add_row(['Created', user.get('createDate')])
table.add_row(['Phone Number', user.get('officePhone')])
if user.get('parentId', False):
table.add_row(['Parent User', utils.lookup(user, 'parent', 'username')])
table.add_row(['Status', utils.lookup(user, 'userStatus', 'name')])
table.add_row(['PPTP VPN', user.get('pptpVpnAllowedFlag', 'No')])
table.add_row(['SSL VPN', user.get('sslVpnAllowedFlag', 'No')])
for login in user.get('unsuccessfulLogins', {}):
login_string = "%s From: %s" % (login.get('createDate'), login.get('ipAddress'))
table.add_row(['Last Failed Login', login_string])
break
for login in user.get('successfulLogins', {}):
login_string = "%s From: %s" % (login.get('createDate'), login.get('ipAddress'))
table.add_row(['Last Login', login_string])
break
return table | 805b8e12f531b1c6b31aeca847568cbc1b2e929c | 7,927 |
def iobes_iob(tags):
"""
IOBES -> IOB
"""
new_tags = []
for i, tag in enumerate(tags):
if tag == 'rel':
new_tags.append(tag)
elif tag.split('-')[0] == 'B':
new_tags.append(tag)
elif tag.split('-')[0] == 'I':
new_tags.append(tag)
elif tag.split('-')[0] == 'S':
new_tags.append(tag.replace('S-', 'B-'))
elif tag.split('-')[0] == 'E':
new_tags.append(tag.replace('E-', 'I-'))
elif tag.split('-')[0] == 'O':
new_tags.append(tag)
else:
raise Exception('Invalid format!')
return new_tags | 1a2f715edfc37b387944f84c4f149f21a0e86e74 | 7,928 |
def draw_handler(canvas):
"""
Event handler that is responsible for all drawing. It
receives "canvas" object and draws the "Pong" table,
the "moving" ball and the scores of each "Player".
It is also responsible for testing whether the ball
touches/collides with the "gutters" or the "paddles".
"""
# These are (global) numbers; vertical "position" of
# each "paddle".
global paddle1_pos, paddle2_pos
# These are (global) numbers; "score" of each "Player".
global score1, score2
# These are vectors stored as (global) "[x,y]" lists;
# ball "position" and "velocity".
global ball_pos, ball_vel
# This is (global) number; keeps track of the time in
# "seconds".
global seconds
# Draw middle line and "gutters" of "Pong" table.
canvas.draw_line([WIDTH / 2, 0], [WIDTH / 2, HEIGHT], LINE_WIDTH, COLOR)
canvas.draw_line([PAD_WIDTH, 0], [PAD_WIDTH, HEIGHT], LINE_WIDTH, COLOR)
canvas.draw_line([WIDTH - PAD_WIDTH, 0], [WIDTH - PAD_WIDTH, HEIGHT], LINE_WIDTH, COLOR)
# "Postpone" the beginning of new game if "Timer" is
# already running by "reseting" ball "position" at the
# middle of the table.
if timer.is_running():
ball_pos = [WIDTH / 2, HEIGHT / 2]
# Print message about the remaining time until the
# beginning of the new game by referencing the
# global "seconds" counter.
canvas.draw_text("new game will start in " +
str(NEW_GAME_DELAY - seconds) +
" seconds" +
("." * (NEW_GAME_DELAY - seconds)),
[WIDTH // 12, 3 * HEIGHT // 4], 3 * FONT_SIZE // 10, COLOR, FONT_FACE)
else:
# "Timer" has expired; update ball "position" for
# the new game.
ball_pos[0] += ball_vel[0]
ball_pos[1] += ball_vel[1]
# Test whether the ball touches/collides with the left
# "gutter" (offset from the left edge of the "canvas"
# by the width of the "paddle").
if ball_pos[0] <= (BALL_RADIUS + PAD_WIDTH):
# Check whether the ball is actually striking left
# "paddle" when it touches left "gutter". If so,
# reflect the ball back into play; ball's "velocity"
# increased by the "acceleration" configured.
if ((paddle1_pos - HALF_PAD_HEIGHT) <= ball_pos[1] <= (paddle1_pos + HALF_PAD_HEIGHT)):
ball_vel[0] = -ball_vel[0] * BALL_VELOCITY_ACCELERATION
else:
# Ball touched "gutter". Respawn the ball in
# the center of the table headed towards the
# opposite "gutter" and of course update score
# of "Player" 2 (right) by the "points"
# configured.
score2 += POINTS
# Start a game of "Pong". Start also a "Timer"
# to "postpone" the beginning of the new game.
if not timer.is_running():
timer.start()
spawn_ball(RIGHT)
# Test whether the ball touches/collides with the right
# "gutter" (offset from the right edge of the "canvas"
# by the width of the "paddle").
elif ball_pos[0] >= ((WIDTH - 1) - BALL_RADIUS - PAD_WIDTH):
# Check whether the ball is actually striking right
# "paddle" when it touches right "gutter". If so,
# reflect the ball back into play; ball's "velocity"
# increased by the "acceleration" configured.
if ((paddle2_pos - HALF_PAD_HEIGHT) <= ball_pos[1] <= (paddle2_pos + HALF_PAD_HEIGHT)):
ball_vel[0] = -ball_vel[0] * BALL_VELOCITY_ACCELERATION
else:
# Ball touched "gutter". Respawn the ball in
# the center of the table headed towards the
# opposite "gutter" and of course update score
# of "Player" 1 (left) by the "points"
# configured.
score1 += POINTS
# Start a game of "Pong". Start also a "Timer"
# to "postpone" the beginning of the new game.
if not timer.is_running():
timer.start()
spawn_ball(LEFT)
# Collide and reflect off of top side of the "canvas".
elif ball_pos[1] <= BALL_RADIUS:
ball_vel[1] = -ball_vel[1]
# Collide and reflect off of bottom side of the "canvas".
elif ball_pos[1] >= ((HEIGHT - 1) - BALL_RADIUS):
ball_vel[1] = -ball_vel[1]
# Draw a ball moving across the "Pong" table.
canvas.draw_circle(ball_pos, BALL_RADIUS, 2 * LINE_WIDTH, COLOR, COLOR)
# Update paddle's vertical "position", by
# referencing the two global variables that contain the
# vertical "velocities" of the "paddle". Keep "paddle"
# on the screen by calling the proper "helper" function.
if keep_paddle_on_screen(paddle1_pos, paddle1_vel):
paddle1_pos += paddle1_vel
if keep_paddle_on_screen(paddle2_pos, paddle2_vel):
paddle2_pos += paddle2_vel
# Draw left and right "paddles" in their respective
# "gutters".
canvas.draw_polygon([[0, paddle1_pos - HALF_PAD_HEIGHT],
[PAD_WIDTH, paddle1_pos - HALF_PAD_HEIGHT],
[PAD_WIDTH, paddle1_pos + HALF_PAD_HEIGHT],
[0, paddle1_pos + HALF_PAD_HEIGHT]],
LINE_WIDTH, COLOR, COLOR)
canvas.draw_polygon([[WIDTH - PAD_WIDTH, paddle2_pos - HALF_PAD_HEIGHT],
[WIDTH , paddle2_pos - HALF_PAD_HEIGHT],
[WIDTH, paddle2_pos + HALF_PAD_HEIGHT],
[WIDTH - PAD_WIDTH, paddle2_pos + HALF_PAD_HEIGHT]],
LINE_WIDTH, COLOR, COLOR)
# Draw scores;
# but first get the width of the "score" text in pixels
# for each "Player"; useful in (later) computing the
# position to draw the "score" text - centered justified
# on the "canvas field" of each player.
score_textwidth_in_px = frame.get_canvas_textwidth(str(score1), FONT_SIZE, FONT_FACE)
score_point_x = (WIDTH // 4) - (score_textwidth_in_px // 2)
score_point_y = (HEIGHT // 4)
canvas.draw_text(str(score1), [score_point_x, score_point_y], FONT_SIZE, COLOR, FONT_FACE)
score_textwidth_in_px = frame.get_canvas_textwidth(str(score2), FONT_SIZE, FONT_FACE)
score_point_x = (3 * WIDTH // 4) - (score_textwidth_in_px // 2)
score_point_y = (HEIGHT // 4)
canvas.draw_text(str(score2), [score_point_x, score_point_y], FONT_SIZE, COLOR, FONT_FACE)
return None | ad9d5be8bbc1eb1b1612c6bfd35cc774bcacbd7a | 7,929 |
def tf_idf(df, vocab):
"""[summary]
https://towardsdatascience.com/natural-language-processing-feature-engineering-using-tf-idf-e8b9d00e7e76
Args:
docs ([type]): [description]
"""
docs = []
for text in df['text'].tolist():
docs += [text]
vectorizer = TfidfVectorizer(tokenizer=token_list, lowercase=False, vocabulary=vocab)
vectors = vectorizer.fit_transform(docs)
feature_names = vectorizer.get_feature_names()
dense = vectors.todense()
denselist = dense.tolist()
tfidf_matrix = pd.DataFrame(denselist, columns=feature_names)
return tfidf_matrix | ef75e51f1d4b69dcf6a5ab908f56eff8a25852f7 | 7,930 |
def get_activation_func(activation):
"""Turns a string activation function name into a function.
"""
if isinstance(activation, string_types):
# Get the activation function.
activation = activation.lower()
if activation == "tanh":
activation_func = tanh
elif activation == "abstanh":
activation_func = abs_tanh
elif activation in ["sig", "sigmoid"]:
activation_func = sigmoid
elif activation in ["rec", "relu"]:
activation_func = rectify
elif activation in ["prelu_shelf"]:
activation_func = parametric_flat_relu
elif activation == "relu_max":
activation_func = rectify_max # For performance comparisons with abs version of rectify
elif activation in ["rec_para", "prelu"]:
activation_func = parametric_rectifier
elif activation == "maxout":
activation_func = maxout
elif activation == "linear":
activation_func = linear
else:
raise ValueError("Unrecognized activation: {}".format(activation))
else:
activation_func = activation
return activation_func | 6cb6fccdacf44c3fc3fac242d69a2494459c1318 | 7,932 |
def create_schema(hostname='localhost', username=None, password=None,
dbname=None, port=None, schema_name=None):
"""Create test schema."""
cn = create_cn(hostname, password, username, dbname, port)
with cn.cursor() as cr:
cr.execute('DROP SCHEMA IF EXISTS %s CASCADE' % dbname)
cr.execute('CREATE SCHEMA %s' % dbname)
cn.close()
cn = create_cn(hostname, password, username, dbname, port)
return cn | ef1b8b77ca1cd88804f0afec9efc24caad3a601d | 7,934 |
def set_color_in_session(intent, session):
""" Sets the color in the session and prepares the speech to reply to the
user.
"""
card_title = intent['name']
session_attributes = {}
should_end_session = False
if 'Color' in intent['slots']:
favorite_color = intent['slots']['Color']['value']
session_attributes = create_favorite_color_attributes(favorite_color)
speech_output = "I now know the bus stop you are in is " + \
favorite_color + \
". You can ask me where your bus stop is by asking, " \
"what bus stop am I on?"
reprompt_text = "You can ask me where your bus stop is by asking, " \
"what bus stop am I on?"
else:
speech_output = "I'm not sure what bus stop you are in. " \
"Please try again."
reprompt_text = "I'm not sure what bus stop you are in " \
"You can ask me where your bus stop is by asking, " \
"what bus stop am I on?"
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session)) | 4783aa36b18fb723441d99a8ffcdfe4b587d2a45 | 7,935 |
def fpath_to_pgn(fpath):
"""Slices the pgn string from file path.
"""
return fpath.split('/')[-1].split('.jpeg')[0] | 1cc6cad60c5356b6c731947a59998117bf15035a | 7,936 |
def convert_to_constant(num):
"""
Convert one float argument to Constant, returning the converted object.
:param float num:
Float number to be converted to Constant
:return:
Float number converted to a Constant object
:rtype: object
"""
return Constant(name=str(num), units = null_dimension, value = float(num) ) | 111ae55c1446228b23638465c75bd9fa6d3d7043 | 7,937 |
def data_zip(data):
"""
输入数据,返回一个拼接了子项的列表,如([1,2,3], [4,5,6]) -> [[1,4], [2,5], [3,6]]
{"a":[1,2],"b":[3,4]} -> [{"a":1,"b":3}, {"a":2,"b":4}]
:param data: 数组 data
元组 (x, y,...)
字典 {"a":data1, "b":data2,...}
:return: 列表或数组
"""
if isinstance(data, tuple):
return [list(d) for d in zip(*data)]
if isinstance(data, dict):
data_list = []
keys = data.keys()
for i in range(len(data[list(keys)[0]])): # 迭代字典值中的数据
data_dict = {}
for key in keys:
data_dict[key] = data[key][i]
data_list.append(data_dict)
return data_list
return data | 31dcaa3905a7d062cfe994543df31f293fdc962a | 7,938 |
def _days_in_leap_and_common_years(i_date, f_date):
"""Return the a tuple with number of days in common and leap years (respectively) between initial and final dates.
"""
iy = i_date.year
fy = f_date.year
days_in_leap = 0
days_in_common = 0
if iy == fy:
# same year
delta = f_date - i_date
if _isleap(iy):
days_in_leap += delta.days
else:
days_in_common += delta.days
elif fy - iy >= 1:
# different year
delta1 = i_date.replace(year = iy+1, month=1, day=1) - i_date # days in initial year
delta2 = f_date - f_date.replace(month=1, day=1) # days in final year
if _isleap(iy):
days_in_leap += delta1.days
else:
days_in_common += delta1.days
if _isleap(fy):
days_in_leap += delta2.days
else:
days_in_common += delta2.days
leaps_in_between = [y for y in range(iy+1, fy) if _isleap(y)]
commons_in_between = [y for y in range(iy+1, fy) if not(_isleap(y))]
days_in_leap += len(leaps_in_between) * 366
days_in_common += len(commons_in_between) * 365
#else:
#raise InputError(expr = "Error in days_in_years(), f_date.year must be greater than i_date.year")
return (days_in_leap, days_in_common) | f96b6c26fd8e87a543ffa6b6e7ed66144248d752 | 7,939 |
def make_space_kernel(data, background_kernel, trigger_kernel, time,
time_cutoff=None, space_cutoff=None):
"""Produce a kernel object which evaluates the background kernel, and
the trigger kernel based on the space locations in the data, always using
the fixed time as passed in.
:param data: An array of shape `(3,N)` giving the space-time locations
events. Used when computing the triggered / aftershock events.
:param background_kernel: The kernel object giving the background risk
intensity. We assume this has a method `space_kernel` which gives just
the two dimensional spacial kernel.
:param trigger_kernel: The kernel object giving the trigger / aftershock
risk intensity.
:param time: The fixed time coordinate to evaluate at.
:param time_cutoff: Optional; if set, then we assume the trigger_kernel is
zero for times greater than this value (to speed up evaluation).
:param space_cutoff: Optional; if set, then we assume the trigger_kernel is
zero for space distances greater than this value (to speed up evaluation).
:return: A kernel object which can be called on arrays of (2 dimensional
space) points.
"""
mask = data[0] < time
if time_cutoff is not None:
mask = mask & (data[0] > time - time_cutoff)
data_copy = _np.array(data[:, mask])
return SpaceKernel(time, background_kernel, trigger_kernel, data_copy, space_cutoff) | 5c84dfb89340e52e57fb0b28464b18b0487601ea | 7,940 |
import torch
def get_dim_act_curv(args):
"""
Helper function to get dimension and activation at every layer.
:param args:
:return:
"""
if not args.act:
act = lambda x: x
else:
act = getattr(F, args.act)
acts = [act] * (args.num_layers - 1)
dims = [args.feat_dim]
# Check layer_num and hdden_dim match
if args.num_layers > 1:
hidden_dim = [int(h) for h in args.hidden_dim.split(',')]
if args.num_layers != len(hidden_dim) + 1:
raise RuntimeError('Check dimension hidden:{}, num_layers:{}'.format(args.hidden_dim, args.num_layers) )
dims = dims + hidden_dim
dims += [args.dim]
acts += [act]
n_curvatures = args.num_layers
if args.c_trainable == 1: # NOTE : changed from # if args.c is None:
# create list of trainable curvature parameters
curvatures = [nn.Parameter(torch.Tensor([args.c]).to(args.device)) for _ in range(n_curvatures)]
else:
# fixed curvature
curvatures = [torch.tensor([args.c]) for _ in range(n_curvatures)]
if not args.cuda == -1:
curvatures = [curv.to(args.device) for curv in curvatures]
return dims, acts, curvatures | 072a54a3a2060598cbbe0fc89efe0be4b7cdc63f | 7,941 |
def write_charset_executable(mysql_charset_script_name, here):
"""Write to disk as an executable the file that will be used to issue the MySQL
statements that change the character set to UTF-8 -- return the absolute path.
"""
mysql_charset_script = os.path.join(here, mysql_charset_script_name)
if not os.path.exists(mysql_charset_script):
with open(mysql_charset_script, 'w') as f:
pass
os.chmod(mysql_charset_script, 0744)
return mysql_charset_script | b92ea0e48ccebd267856ababcb714e736fea812c | 7,943 |
import time
def readTemperature(file):
"""
Returns the temperature of the one wire sensor.
Pass in the file containing the one wire data (ds18b20+)
"""
lines = read_temp_raw(file)
while lines[0].strip()[-3:] != "YES":
time.sleep(0.2)
lines = read_temp_raw(file)
equals_pos = lines[1].find("t=")
if equals_pos != -1:
temp_string = lines[1][equals_pos + 2:]
# convert temperature to C
temp_c = float(temp_string) / 1000.0
return temp_c
return -273.15 | 3398f287ae98df4b72ff212cdcad1764a9bbe31b | 7,944 |
def energyAct(
grid, deltaE, xA, yA, zA, xB, yB, zB, temp, eList, i, dimensions):
"""Perform swap or not, based on deltaE value"""
kB = 8.617332e-5 # boltzmann constant, w/ ~eV units
kTemp = kB * temp
if deltaE <= 0: # Swap lowers energy, therefore is favourable,
# so perform swap in grid
grid = performSwap(grid, xA, yA, zA, xB, yB, zB, dimensions)
eList[i + 1] = eList[i] + deltaE
else: # i.e. deltaE > 0:
if temp == 0:
thermalEnergy = 0
else:
thermalEnergy = exp((-1 * deltaE) / (kTemp))
R = randint(0, 1000) / 1000
if thermalEnergy > R:
grid = performSwap(grid, xA, yA, zA, xB, yB, zB, dimensions)
eList[i + 1] = eList[i] + deltaE
else:
eList[i + 1] = eList[i]
return grid, eList | 2380991200c2b3c196b1fea4c4108e82f20a979b | 7,946 |
def lambda_handler(event, context):
"""
Lambda function that transforms input data and stores inital DB entry
Parameters
----------
event: dict, required
context: object, required Lambda Context runtime methods and attributes
Context doc: https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html
Returns
------
Lambda Output Format: dict
"""
log.log_request_and_context(event, context)
labeling_jobs = event["labelingJobs"]
batch_id = event["batchId"]
error_message = ""
"""
Example database entry input for batch
{
"BatchCurrentStep": "INPUT",
"BatchId": "notebook-test-08f874a7",
"BatchMetadataType": "INPUT",
"BatchStatus": "INTERNAL_ERROR",
"LabelingJobs": [
{
"inputConfig": {
"inputManifestS3Uri": "s3://smgt-qa-batch-input-468814823616-us-east-1/two-frame-manifest.manifest"
},
"jobLevel": 1,
"jobModality": "PointCloudObjectDetectionAudit",
"jobName": "notebook-test-08f874a7-first-level",
"jobType": "BATCH",
"labelCategoryConfigS3Uri": "s3://smgt-qa-batch-input-468814823616-us-east-1/first-level-label-category-file.json",
"maxConcurrentTaskCount": 1,
"taskAvailabilityLifetimeInSeconds": 864000,
"taskTimeLimitInSeconds": 604800,
"workteamArn": "arn:aws:sagemaker:us-east-1:468814823616:workteam/private-crowd/first-level"
},
{
"inputConfig": {
"chainFromJobName": "notebook-test-08f874a7-first-level"
},
"jobLevel": 2,
"jobModality": "PointCloudObjectDetectionAudit",
"jobName": "notebook-test-08f874a7-second-level",
"jobType": "BATCH",
"maxConcurrentTaskCount": 1,
"taskAvailabilityLifetimeInSeconds": 864000,
"taskTimeLimitInSeconds": 604800,
"workteamArn": "arn:aws:sagemaker:us-east-1:468814823616:workteam/private-crowd/first-level"
}
]
}
"""
db.insert_transformed_input_batch_metadata(
batch_id=batch_id,
batch_current_step=BatchCurrentStep.INPUT,
batch_status=BatchStatus.IN_PROGRESS,
batch_metadata_type=BatchMetadataType.INPUT,
error_message=error_message,
labeling_jobs=labeling_jobs,
)
return {
"batch_id": batch_id,
} | 83bba41d28a7b37da6579dfc2641380839f9d785 | 7,947 |
def getModCase(s, mod):
"""Checks the state of the shift and caps lock keys, and switches the case of the s string if needed."""
if bool(mod & KMOD_RSHIFT or mod & KMOD_LSHIFT) ^ bool(mod & KMOD_CAPS):
return s.swapcase()
else:
return s | 4faa2963e96786c5495443382585188b3a6ff119 | 7,948 |
from pathlib import Path
def try_provider(package, provider, domain):
"""Try using a provider."""
downloaded_file = None
data = None
apk_name = f'{package}.apk'
temp_file = Path(gettempdir()) / apk_name
link = find_apk_link(provider, domain)
if link:
downloaded_file = download_file(link, temp_file)
if downloaded_file:
data = add_apk(downloaded_file, apk_name)
if data:
return data
return None | e43a6b18a5783722c148a537f21e501a1f4e5928 | 7,950 |
def _get_list(sline):
"""Takes a list of strings and converts them to floats."""
try:
sline2 = convert_to_float(sline)
except ValueError:
print("sline = %s" % sline)
raise SyntaxError('cannot parse %s' % sline)
return sline2 | 2b315b12508638fe603378fdd3127bd814f4313d | 7,951 |
def add_number(partitions, number):
"""
Adds to the partition provided `number` in all its combinations
"""
# Add to each list in partitions add 1
prods = partitions.values()
nKeys = [(1,) + x for x in partitions.keys()]
# apply sum_ones on each partition, and add results to partitions
# Done use reduce, the continues list creation is just too slow
#partitions = reduce(lambda acc, x: acc + sum_ones(x), partitions, [])
newParts = []
newProds = []
for part, prod in zip(nKeys, prods):
npart, nprod = sum_ones(part, prod)
newParts.extend(npart)
newProds.extend(nprod)
# Remove duplicates
return dict(zip(newParts, newProds)) | cade1ddbd4002b76fc2652c59fe4f0a9bcdcc9b9 | 7,952 |
import colorsys
def _color_to_rgb(color, input):
"""Add some more flexibility to color choices."""
if input == "hls":
color = colorsys.hls_to_rgb(*color)
elif input == "husl":
color = husl.husl_to_rgb(*color)
color = tuple(np.clip(color, 0, 1))
elif input == "xkcd":
color = xkcd_rgb[color]
return color | 7b3a502eba4a48dbd9b1e44d278aee58eb1ea22c | 7,953 |
def in_days(range_in_days):
"""
Generate time range strings between start and end date where each range is range_in_days days long
:param range_in_days: number of days
:return: list of strings with time ranges in the required format
"""
delta = observation_period_end - observation_period_start # timedelta
period_starts = []
for d in range(0, delta.days + 1, range_in_days):
# print(observation_period_start + timedelta(days=d))
period_starts.append(observation_period_start + timedelta(days=d))
start_end = []
for i, start in enumerate(period_starts[:-1]):
start_end.append((start, period_starts[i+1] - timedelta(days=1)))
time_periods = [start.strftime("%Y%m%d") + ":" + end.strftime("%Y%m%d") for start, end in start_end]
return time_periods | f9e77eac3fd151923cb14d05d291a0e362bd11b5 | 7,954 |
from pathlib import Path
from typing import List
import re
def tags_in_file(path: Path) -> List[str]:
"""Return all tags in a file."""
matches = re.findall(r'@([a-zA-Z1-9\-]+)', path.read_text())
return matches | 1071c22ac79f51697b2ed18896aa1d17568ecb2c | 7,955 |
import torch
def pad_to_longest_in_one_batch(batch):
"""According to the longest item to pad dataset in one batch.
Notes:
usage of pad_sequence:
seq_list = [(L_1, dims), (L_2, dims), ...]
item.size() must be (L, dims)
return (longest_len, len(seq_list), dims)
Args:
batch: [
(noisy_mag_1, noise_mag_1, clean_mag_1, n_frames_1),
(noisy_mag_2, noise_mag_2, clean_mag_2, n_frames_2),
...
]
"""
noisy_mag_list = []
mask_mag_list = []
clean_mag_list = []
n_frames_list = []
for noisy_mag, mask, clean_mag, n_frames in batch:
noisy_mag_list.append(torch.t(torch.tensor(noisy_mag))) # the shape of tensor is (T, F).
mask_mag_list.append(torch.t(torch.tensor(mask)))
clean_mag_list.append(torch.t(torch.tensor(clean_mag)))
n_frames_list.append(n_frames)
noisy_mag_one_batch = pad_sequence(noisy_mag_list) # the shape is (longest T, len(seq_list), F)
mask_one_batch = pad_sequence(mask_mag_list)
clean_mag_one_batch = pad_sequence(clean_mag_list)
noisy_mag_one_batch = noisy_mag_one_batch.permute(1, 0, 2) # the shape is (len(seq_list), longest T, F)
mask_one_batch = mask_one_batch.permute(1, 0, 2)
clean_mag_one_batch = clean_mag_one_batch.permute(1, 0, 2)
# (batch_size, longest T, F)
return noisy_mag_one_batch, mask_one_batch, clean_mag_one_batch, n_frames_list | 3892b6002ae0f0cb0b6ce4c29128120a8f63237a | 7,956 |
def create_instance(c_instance):
""" Creates and returns the Twister script """
return Twister(c_instance) | acceb844eae8a3c9a8c734e55c4c618f990f1ae0 | 7,957 |
from typing import Type
from typing import Callable
def command_handler(command_type: Type[CommandAPI],
*,
name: str = None) -> Callable[[CommandHandlerFn], Type[CommandHandler]]:
"""
Decorator that can be used to construct a CommandHandler from a simple
function.
.. code-block:: python
@command_handler(Ping)
def handle_ping(connection, msg):
connection.get_base_protocol().send_pong()
"""
if name is None:
name = f'handle_{command_type.__name__}'
def decorator(fn: CommandHandlerFn) -> Type[CommandHandler]:
return type(
name,
(CommandHandler,),
{
'cmd_type': command_type,
'handle': staticmethod(fn),
},
)
return decorator | 1c6634691e366b61043b2c745d43cc2371090200 | 7,958 |
def check_ip_in_lists(ip, db_connection, penalties):
"""
Does an optimized ip lookup with the db_connection. Applies only the maximum penalty.
Args:
ip (str): ip string
db_connection (DBconnector obj)
penalties (dict): Contains tor_penalty, vpn_penalty, blacklist_penalty keys with integer values
Returns:
:int: penalty_added
"""
penalties = {'tor': int(penalties['tor_penalty']), 'vpn': int(penalties['vpn_penalty']), 'blacklist': int(penalties['ip_blacklist_penalty'])}
penalties = sorted(penalties.items(), key=lambda x: x[1])
# sort by penalty value to check in that order and perform early stopping
penalty_added = 0
for penalty_type, penalty_value in penalties:
if penalty_value == 0:
continue
if penalty_type == 'tor':
if db_connection.set_exists('tor_ips', ip):
penalty_added = penalty_value
elif penalty_type == 'blacklist':
if db_connection.set_exists('blacklist_ips', ip):
penalty_added = penalty_value
elif db_connection.set_exists('blacklist_ips', '.'.join(ip.split('.')[:3])):
penalty_added = penalty_value
elif db_connection.set_exists('blacklist_ips', '.'.join(ip.split('.')[:2])):
penalty_added = penalty_value
elif penalty_type == 'vpn':
if db_connection.set_exists('vpn_ips', ip):
penalty_added = penalty_value
elif db_connection.set_exists('vpn_ips', '.'.join(ip.split('.')[:3])):
penalty_added = penalty_value
elif db_connection.set_exists('vpn_ips', '.'.join(ip.split('.')[:2])):
penalty_added = penalty_value
if penalty_added > 0:
break
return penalty_added | 2d6e3615d4b0d9b0fb05e7a0d03708856ffcbfef | 7,959 |
import numpy
def scan(fn,
sequences=None,
outputs_info=None,
non_sequences=None,
n_steps=None,
truncate_gradient=-1,
go_backwards=False,
mode=None,
name=None,
options=None,
profile=False):
"""
This function constructs and applies a Scan op to the provided
arguments.
:param fn:
``fn`` is a function that describes the operations involved in one
step of ``scan``. ``fn`` should construct variables describing the
output of one iteration step. It should expect as input theano
variables representing all the slices of the input sequences
and previous values of the outputs, as well as all other arguments
given to scan as ``non_sequences``. The order in which scan passes
these variables to ``fn`` is the following :
* all time slices of the first sequence
* all time slices of the second sequence
* ...
* all time slices of the last sequence
* all past slices of the first output
* all past slices of the second otuput
* ...
* all past slices of the last output
* all other arguments (the list given as `non_sequences` to
scan)
The order of the sequences is the same as the one in the list
`sequences` given to scan. The order of the outputs is the same
as the order of ``outputs_info``. For any sequence or output the
order of the time slices is the same as the one in which they have
been given as taps. For example if one writes the following :
.. code-block:: python
scan(fn, sequences = [ dict(input= Sequence1, taps = [-3,2,-1])
, Sequence2
, dict(input = Sequence3, taps = 3) ]
, outputs_info = [ dict(initial = Output1, taps = [-3,-5])
, dict(initial = Output2, taps = None)
, Output3 ]
, non_sequences = [ Argument1, Argument 2])
``fn`` should expect the following arguments in this given order:
#. ``Sequence1[t-3]``
#. ``Sequence1[t+2]``
#. ``Sequence1[t-1]``
#. ``Sequence2[t]``
#. ``Sequence3[t+3]``
#. ``Output1[t-3]``
#. ``Output1[t-5]``
#. ``Output3[t-1]``
#. ``Argument1``
#. ``Argument2``
The list of ``non_sequences`` can also contain shared variables
used in the function, though ``scan`` is able to figure those
out on its own so they can be skipped. For the clarity of the
code we recommend though to provide them to scan. To some extend
``scan`` can also figure out other ``non sequences`` (not shared)
even if not passed to scan (but used by `fn`). A simple example of
this would be :
.. code-block:: python
import theano.tensor as TT
W = TT.matrix()
W_2 = W**2
def f(x):
return TT.dot(x,W_2)
The function is expected to return two things. One is a list of
outputs ordered in the same order as ``outputs_info``, with the
difference that there should be only one output variable per
output initial state (even if no tap value is used). Secondly
`fn` should return an update dictionary (that tells how to
update any shared variable after each iteration step). The
dictionary can optionally be given as a list of tuples. There is
no constraint on the order of these two list, ``fn`` can return
either ``(outputs_list, update_dictionary)`` or
``(update_dictionary, outputs_list)`` or just one of the two (in
case the other is empty).
To use ``scan`` as a while loop, the user needs to change the
function ``fn`` such that also a stopping condition is returned.
To do so, he/she needs to wrap the condition in an ``until`` class.
The condition should be returned as a third element, for example:
.. code-block:: python
...
return [y1_t, y2_t], {x:x+1}, theano.scan_module.until(x < 50)
Note that a number of steps (considered in here as the maximum
number of steps ) is still required even though a condition is
passed (and it is used to allocate memory if needed). = {}):
:param sequences:
``sequences`` is the list of Theano variables or dictionaries
describing the sequences ``scan`` has to iterate over. If a
sequence is given as wrapped in a dictionary, then a set of optional
information can be provided about the sequence. The dictionary
should have the following keys:
* ``input`` (*mandatory*) -- Theano variable representing the
sequence.
* ``taps`` -- Temporal taps of the sequence required by ``fn``.
They are provided as a list of integers, where a value ``k``
impiles that at iteration step ``t`` scan will pass to ``fn``
the slice ``t+k``. Default value is ``[0]``
Any Theano variable in the list ``sequences`` is automatically
wrapped into a dictionary where ``taps`` is set to ``[0]``
:param outputs_info:
``outputs_info`` is the list of Theano variables or dictionaries
describing the initial state of the outputs computed
recurrently. When this initial states are given as dictionary
optional information can be provided about the output corresponding
to these initial states. The dictionary should have the following
keys:
* ``initial`` -- Theano variable that represents the initial
state of a given output. In case the output is not computed
recursively (think of a map) and does not require a initial
state this field can be skiped. Given that only the previous
time step of the output is used by ``fn`` the initial state
should have the same shape as the output. If multiple time
taps are used, the initial state should have one extra
dimension that should cover all the possible taps. For example
if we use ``-5``, ``-2`` and ``-1`` as past taps, at step 0,
``fn`` will require (by an abuse of notation) ``output[-5]``,
``output[-2]`` and ``output[-1]``. This will be given by
the initial state, which in this case should have the shape
(5,)+output.shape. If this variable containing the initial
state is called ``init_y`` then ``init_y[0]`` *corresponds to*
``output[-5]``. ``init_y[1]`` *correponds to* ``output[-4]``,
``init_y[2]`` corresponds to ``output[-3]``, ``init_y[3]``
coresponds to ``output[-2]``, ``init_y[4]`` corresponds to
``output[-1]``. While this order might seem strange, it comes
natural from splitting an array at a given point. Assume that
we have a array ``x``, and we choose ``k`` to be time step
``0``. Then our initial state would be ``x[:k]``, while the
output will be ``x[k:]``. Looking at this split, elements in
``x[:k]`` are ordered exactly like those in ``init_y``.
* ``taps`` -- Temporal taps of the output that will be pass to
``fn``. They are provided as a list of *negative* integers,
where a value ``k`` implies that at iteration step ``t`` scan
will pass to ``fn`` the slice ``t+k``.
``scan`` will follow this logic if partial information is given:
* If an output is not wrapped in a dictionary, ``scan`` will wrap
it in one assuming that you use only the last step of the output
(i.e. it makes your tap value list equal to [-1]).
* If you wrap an output in a dictionary and you do not provide any
taps but you provide an initial state it will assume that you are
using only a tap value of -1.
* If you wrap an output in a dictionary but you do not provide any
initial state, it assumes that you are not using any form of
taps.
* If you provide a ``None`` instead of a variable or a empty
dictionary ``scan`` assumes that you will not use any taps for
this output (like for example in case of a map)
If ``outputs_info`` is an empty list or None, ``scan`` assumes
that no tap is used for any of the outputs. If information is
provided just for a subset of the outputs an exception is
raised (because there is no convention on how scan should map
the provided information to the outputs of ``fn``)
:param non_sequences:
``non_sequences`` is the list of arguments that are passed to
``fn`` at each steps. One can opt to exclude variable
used in ``fn`` from this list as long as they are part of the
computational graph, though for clarity we encourage not to do so.
:param n_steps:
``n_steps`` is the number of steps to iterate given as an int
or Theano scalar. If any of the input sequences do not have
enough elements, scan will raise an error. If the *value is 0* the
outputs will have *0 rows*. If the value is negative, ``scan``
will run backwards in time. If the ``go_backwards`` flag is already
set and also ``n_steps`` is negative, ``scan`` will run forward
in time. If n stpes is not provided, ``scan`` will figure
out the amount of steps it should run given its input sequences.
:param truncate_gradient:
``truncate_gradient`` is the number of steps to use in truncated
BPTT. If you compute gradients through a scan op, they are
computed using backpropagation through time. By providing a
different value then -1, you choose to use truncated BPTT instead
of classical BPTT, where you go for only ``truncate_gradient``
number of steps back in time.
:param go_backwards:
``go_backwards`` is a flag indicating if ``scan`` should go
backwards through the sequences. If you think of each sequence
as indexed by time, making this flag True would mean that
``scan`` goes back in time, namely that for any sequence it
starts from the end and goes towards 0.
:param name:
When profiling ``scan``, it is crucial to provide a name for any
instance of ``scan``. The profiler will produce an overall
profile of your code as well as profiles for the computation of
one step of each instance of ``scan``. The ``name`` of the instance
appears in those profiles and can greatly help to disambiguate
information.
:param mode:
It is recommended to leave this argument to None, especially
when profiling ``scan`` (otherwise the results are not going to
be accurate). If you prefer the computations of one step of
``scan`` to be done differently then the entire function, you
can use this parameter to describe how the computations in this
loop are done (see ``theano.function`` for details about
possible values and their meaning).
:param profile:
Flag or string. If true, or different from the empty string, a
profile object will be created and attached to the inner graph of
scan. In case ``profile`` is True, the profile object will have the
name of the scan instance, otherwise it will have the passed string.
Profile object collect (and print) information only when running the
inner graph with the new cvm linker ( with default modes,
other linkers this argument is useless)
:rtype: tuple
:return: tuple of the form (outputs, updates); ``outputs`` is either a
Theano variable or a list of Theano variables representing the
outputs of ``scan`` (in the same order as in
``outputs_info``). ``updates`` is a subclass of dictionary
specifying the
update rules for all shared variables used in scan
This dictionary should be passed to ``theano.function`` when
you compile your function. The change compared to a normal
dictionary is that we validate that keys are SharedVariable
and addition of those dictionary are validated to be consistent.
"""
# Note : see the internal documentation of the scan op for naming
# conventions and all other details
if options is None:
options = {}
rvals = scan_utils.canonical_arguments(sequences,
outputs_info,
non_sequences,
go_backwards,
n_steps)
inputs, states_and_outputs_info, parameters, T = rvals
# If we provided a known number of steps ( before compilation)
# and if that number is 1 or -1, then we can skip the Scan Op,
# and just apply the inner function once
# To do that we check here to see the nature of n_steps
T_value = None
if isinstance(n_steps, (float, int)):
T_value = int(n_steps)
else:
try:
T_value = opt.get_scalar_constant_value(n_steps)
except (TypeError, AttributeError):
T_value = None
if T_value in (1, -1):
return one_step_scan(fn,
inputs,
states_and_outputs_info,
parameters,
truncate_gradient)
# 1. Variable representing the current time step
t = scalar_shared(numpy.int64(0), name='t')
# 2. Allocate memory for the states of scan.
mintaps = []
lengths = []
for pos, arg_info in enumerate(states_and_outputs_info):
if arg_info.get('taps', None) == [-1]:
mintaps.append(1)
lengths.append(scalar_shared(numpy.int64(0),
name='l%d' % pos))
arg_info['initial'] = scan_utils.expand(tensor.unbroadcast(
tensor.shape_padleft(arg_info['initial']), 0), T)
elif arg_info.get('taps', None):
if numpy.any(numpy.array(arg_info.get('taps', [])) > 0):
# Make sure we do not have requests for future values of a
# sequence we can not provide such values
raise ValueError('Can not use future taps of outputs',
arg_info)
mintap = abs(numpy.min(arg_info['taps']))
lengths.append(scalar_shared(numpy.int64(0),
name='l%d' % pos))
mintaps.append(mintap)
arg_info['initial'] = scan_utils.expand(
arg_info['initial'][:mintap], T)
else:
mintaps.append(0)
lengths.append(scalar_shared(numpy.int64(0),
name='l%d' % pos))
# 3. Generate arguments for the function passed to scan. This will
# function will return the outputs that need to be computed at every
# timesteps
inputs_slices = [input[t] for input in inputs]
states_slices = []
for n, state in enumerate(states_and_outputs_info):
# Check if it is actually a state and not an output
if mintaps[n] != 0:
for k in state['taps']:
states_slices.append(
state['initial'][(t + mintaps[n] + k) % lengths[n]])
# 4. Construct outputs that are to be computed by the inner
# function of scan
args = inputs_slices + states_slices + parameters
cond, states_and_outputs, updates = \
scan_utils.get_updates_and_outputs(fn(*args))
# User is allowed to provide no information if it only behaves like a
# map
if (len(states_and_outputs) != len(states_and_outputs_info) and
len(states_and_outputs_info) == 0):
mintaps = [0] * len(states_and_outputs)
# 5. Construct the scan op
# 5.1 Construct list of shared variables with updates (those that
# can be treated as states (i.e. of TensorType) and those that can not
# (like Random States)
if cond is not None:
_cond = [cond]
else:
_cond = []
rvals = rebuild_collect_shared(
states_and_outputs + _cond,
updates=updates,
rebuild_strict=True,
copy_inputs_over=True,
no_default_updates=False)
# extracting the arguments
input_variables, cloned_outputs, other_rval = rvals
clone_d, update_d, update_expr, shared_inputs = other_rval
additional_input_states = []
additional_output_states = []
additional_lengths = []
additional_mintaps = []
original_numeric_shared_variables = []
non_numeric_input_states = []
non_numeric_output_states = []
original_non_numeric_shared_variables = []
pos = len(lengths)
for sv in shared_inputs:
if sv in update_d:
if isinstance(sv, (TensorVariable, TensorSharedVariable)):
# We can treat it as a sit sot
nw_state = scan_utils.expand(
tensor.unbroadcast(tensor.shape_padleft(sv), 0), T)
additional_lengths.append(scalar_shared(numpy.int64(0),
name='l%d' % pos))
pos = pos + 1
additional_mintaps.append(1)
additional_input_states.append(nw_state)
additional_output_states.append(
scan_utils.clone(tensor.set_subtensor(
nw_state[(t + 1) % additional_lengths[-1]],
update_d[sv])))
original_numeric_shared_variables.append(sv)
else:
non_numeric_input_states.append(sv)
non_numeric_output_states.append(update_d[sv])
original_non_numeric_shared_variables.append(sv)
# Replace shared variables in the update
_additional_output_states = []
replace = {}
for sv, buf in zip(original_numeric_shared_variables,
additional_input_states):
replace[sv] = buf[t]
for out in additional_output_states:
_additional_output_states.append(
scan_utils.clone(out, replace=replace))
additional_output_states = _additional_output_states
# 5.2 Collect inputs/outputs of the inner function
inputs = []
outputs = []
for n, mintap in enumerate(mintaps):
if mintap != 0:
input_state = states_and_outputs_info[n]['initial']
inputs.append(input_state)
outputs.append(
tensor.set_subtensor(
input_state[(t + mintap) % lengths[n]],
states_and_outputs[n]))
else:
mem_buffer = scan_utils.allocate_memory(
T, states_and_outputs_info[n], states_and_outputs[n])
inputs.append(output)
outputs.append(
tensor.set_subtensor(output[t % lengths[n]],
states_and_outputs[n]))
inputs.extend(additional_input_states)
outputs.extend(additional_output_states)
lengths.extend(additional_lengths)
mintaps.extend(additional_mintaps)
inputs.extend(non_numeric_input_states)
outputs.extend(non_numeric_output_states)
all_other_inputs = gof.graph.inputs(outputs)
parameters = [x for x in all_other_inputs
if (x not in inputs and x not in lengths and x is not t
and isinstance(x, gof.Variable) and
not isinstance(x, gof.Constant))]
inputs.extend(parameters)
# 5.3 Construct the the options dictionary
options['name'] = name
options['profile'] = profile
options['mode'] = mode
options['inplace'] = False
options['gpu'] = False
options['truncate_gradient'] = truncate_gradient
options['hash_inner_graph'] = 0
# 5.4 Construct the ScanOp instance
local_op = scan_op.ScanOp(inputs=inputs,
outputs=outputs,
lengths=lengths,
switches=[],
mintaps=mintaps,
index=t,
options=options,
as_repeatUntil=cond)
# Note that we get here all the outputs followed by the update rules to
# the shared variables we had in our scan
# we know that we have (in this given order):
# * len(states_and_outputs) real outputs
# * len(additional_input_states) updates for numeric shared variable
# * len(non_numeric_input_states) updates for non numeric shared
# variables
scan_inputs = [T] + inputs
scan_outputs_update_rules = scan_utils.to_list(local_op(*scan_inputs))
# 5.5 Collect outputs and add permutation object
scan_outputs = []
for pos in xrange(len(states_and_outputs)):
out = scan_utils.ScanPermutation(mintaps[pos])(
scan_outputs_update_rules[pos], t)
scan_outputs.append(out[mintaps[pos]:])
# 5.6 Construct updates dictionary
update_rules = scan_outputs_update_rules[len(states_and_outputs):]
updates = {}
for v, u in izip(original_numeric_shared_variables,
update_rules[:len(additional_input_states)]):
updates[v] = u[-1]
for v, u in izip(original_non_numeric_shared_variables,
update_rules[len(additional_input_states):]):
updates[v] = u
# Step 5.7 We are done and can return everything back to the user
return scan_outputs, updates | 7ac0b7d106bc2e1642827a2e9f79552eb418a918 | 7,960 |
def stock_analyst(stock_list):
"""This function accepts a list of data P and outputs the best day to
buy(B) and sell(S) stock.
Args:
stock_list: expects a list of stocks as a parameter
Returns:
a string promting to buy stock if one has not bought stock i.e the
value of stock is less than 1
If the value of stock is > 0 it returns the best days to stock at
value and sell stock at maximum value
"""
B = stock_list.index(min(stock_list))
buy_value = min(stock_list)
sell_value = -1
if buy_value > 1:
for sell_indx in range(B, len(stock_list)):
if sell_value < stock_list[sell_indx]:
sell_value = stock_list[sell_indx]
S = sell_indx
else:
return 'Buy stock first'
return [B, S] | bbb3cd664ba0ea366e8ad6fa369ae3259cf52a02 | 7,961 |
def is_sync_available(admin_id):
"""Method to check the synchronization's availability about networks connection.
Args:
admin_id (str): Admin privileges flag.
"""
return r_synchronizer.is_sync_available() | 712becdc6c9903d41e3d29602bb7dc07987c6867 | 7,962 |
import collections
def on_second_thought(divider):
"""sort the characters according to number of times they appears in given text,
returns the remaining word as a string
"""
unsorted_list = list(unsorted_string)
# characters occurence determines the order
occurence = collections.Counter(unsorted_list)
# sort by characters frequency in descending order
occurences_list = sorted(unsorted_list, key=occurence.get, reverse=True)
# already sorted, duplicates would provide no value
reduced_list = list(collections.OrderedDict.fromkeys(occurences_list))
divider_position = reduced_list.index(divider)
# everything behind (and including) the divider is irrelevant
return ''.join(reduced_list[:divider_position]) | 1295a67cf1ebce0f79e49566306eba6add1f2e35 | 7,963 |
def aiohttp_unused_port(loop, aiohttp_unused_port, socket_enabled):
"""Return aiohttp_unused_port and allow opening sockets."""
return aiohttp_unused_port | 9c5d0c1125a7758be2e07a8f8aca6676429a841a | 7,964 |
def font_variant(name, tokens):
"""Expand the ``font-variant`` shorthand property.
https://www.w3.org/TR/css-fonts-3/#font-variant-prop
"""
return expand_font_variant(tokens) | 8bac3f0610c7951686504fd843c845d124f34ed6 | 7,966 |
def with_part_names(*part_names):
"""Add part names for garage.parts.assemble.
Call this when you want to assemble these parts but do not want
them to be passed to main.
"""
return lambda main: ensure_app(main).with_part_names(*part_names) | c62d495c2259139b1a9079697bcf784d12e6f9c2 | 7,967 |
def library_view(request):
"""View for image library."""
if request.user.is_authenticated:
the_user = request.user
albums = Album.objects.filter(user=the_user)
context = {'the_user': the_user,
'albums': albums}
return render(request, 'imager_profile/library.html', context) | 948d239decfacabf5b3bba05e10739c7856db609 | 7,968 |
def combine_html_json_pbp(json_df, html_df, game_id, date):
"""
Join both data sources. First try merging on event id (which is the DataFrame index) if both DataFrames have the
same number of rows. If they don't have the same number of rows, merge on: Period', Event, Seconds_Elapsed, p1_ID.
:param json_df: json pbp DataFrame
:param html_df: html pbp DataFrame
:param game_id: id of game
:param date: date of game
:return: finished pbp
"""
# Don't need those columns to merge in
json_df = json_df.drop(['p1_name', 'p2_name', 'p2_ID', 'p3_name', 'p3_ID'], axis=1)
try:
html_df.Period = html_df.Period.astype(int)
# If they aren't equal it's usually due to the HTML containing a challenge event
if html_df.shape[0] == json_df.shape[0]:
json_df = json_df[['period', 'event', 'seconds_elapsed', 'xC', 'yC']]
game_df = pd.merge(html_df, json_df, left_index=True, right_index=True, how='left')
else:
# We always merge if they aren't equal but we check if it's due to a challenge so we can print out a better
# warning message for the user.
# NOTE: May be slightly incorrect. It's possible for there to be a challenge and another issue for one game.
if'CHL' in list(html_df.Event):
shared.print_warning("The number of columns in the Html and Json pbp are different because the"
" Json pbp, for some reason, does not include challenges. Will instead merge on "
"Period, Event, Time, and p1_id.")
else:
shared.print_warning("The number of columns in the Html and json pbp are different because "
"someone fucked up. Will instead merge on Period, Event, Time, and p1_id.")
# Actual Merging
game_df = pd.merge(html_df, json_df, left_on=['Period', 'Event', 'Seconds_Elapsed', 'p1_ID'],
right_on=['period', 'event', 'seconds_elapsed', 'p1_ID'], how='left')
# This is always done - because merge doesn't work well with shootouts
game_df = game_df.drop_duplicates(subset=['Period', 'Event', 'Description', 'Seconds_Elapsed'])
except Exception as e:
shared.print_warning('Problem combining Html Json pbp for game {}'.format(game_id, e))
return
game_df['Game_Id'] = game_id[-5:]
game_df['Date'] = date
return pd.DataFrame(game_df, columns=pbp_columns) | 4f2aa3948fea4f64ac996f4052101daa556d1038 | 7,969 |
import json
def read_json_file(path):
"""
Given a line-by-line JSON file, this function converts it to
a Python dict and returns all such lines as a list.
:param path: the path to the JSON file
:returns items: a list of dictionaries read from a JSON file
"""
items = list()
with open(path, 'r') as raw_data:
for line in raw_data:
line = json.loads(line)
items.append(line)
return items | 15f898faca0dff0ca4b6c73ff31e037d822cf273 | 7,970 |
def get_boss_wage2(employee):
""" Monadic version. """
return bind3(bind3(unit3(employee), Employee.get_boss), Employee.get_wage) | 60524cc219a1c4438d310382aff519ee8ef5a66b | 7,971 |
def keypoint_angle(kp1, kp2):
"""求两个keypoint的夹角 """
k = [
(kp1.angle - 180) if kp1.angle >= 180 else kp1.angle,
(kp2.angle - 180) if kp2.angle >= 180 else kp2.angle
]
if k[0] == k[1]:
return 0
else:
return abs(k[0] - k[1]) | 3feee667bcf767656da6334727b8d502be41d909 | 7,972 |
def get_args_static_distribute_cells():
"""
Distribute ranges of cells across workers.
:return: list of lists
"""
pop_names_list = []
gid_lists = []
for pop_name in context.pop_names:
count = 0
gids = context.spike_trains[pop_name].keys()
while count < len(gids):
pop_names_list.append(pop_name)
gid_lists.append(gids[count:count+context.gid_block_size])
count += context.gid_block_size
return [pop_names_list, gid_lists] | 42862d47533a8662b26c9875e5f62ceebb91ccec | 7,973 |
import torch
def le(input, other, *args, **kwargs):
"""
In ``treetensor``, you can get less-than-or-equal situation of the two tree tensors with :func:`le`.
Examples::
>>> import torch
>>> import treetensor.torch as ttorch
>>> ttorch.le(
... torch.tensor([[1, 2], [3, 4]]),
... torch.tensor([[1, 1], [4, 4]]),
... )
tensor([[ True, False],
[ True, True]])
>>> ttorch.le(
... ttorch.tensor({
... 'a': [[1, 2], [3, 4]],
... 'b': [1.0, 1.5, 2.0],
... }),
... ttorch.tensor({
... 'a': [[1, 1], [4, 4]],
... 'b': [1.3, 1.2, 2.0],
... }),
... )
<Tensor 0x7ff363bc6198>
├── a --> tensor([[ True, False],
│ [ True, True]])
└── b --> tensor([ True, False, True])
"""
return torch.le(input, other, *args, **kwargs) | fa96a544f7f449daf008c6cf9b68f3760de67487 | 7,974 |
import numpy
def linear_to_srgb(data):
"""Convert linear color data to sRGB.
Acessed from https://entropymine.com/imageworsener/srgbformula
Parameters
----------
data: :class:`numpy.ndarray`, required
Array of any shape containing linear data to be converted to sRGB.
Returns
-------
converted: :class:`numpy.ndarray`
Array with the same shape as `data` containing values in sRGB space.
"""
return numpy.where(data <= 0.0031308, data * 12.92, 1.055 * numpy.power(data, 1 / 2.4) - 0.055) | 01eae0f9a34204498aad86e3a0a38337c7ced919 | 7,975 |
def circuit_to_dagdependency(circuit):
"""Build a ``DAGDependency`` object from a ``QuantumCircuit``.
Args:
circuit (QuantumCircuit): the input circuits.
Return:
DAGDependency: the DAG representing the input circuit as a dag dependency.
"""
dagdependency = DAGDependency()
dagdependency.name = circuit.name
for register in circuit.qregs:
dagdependency.add_qreg(register)
for register in circuit.cregs:
dagdependency.add_creg(register)
for operation, qargs, cargs in circuit.data:
dagdependency.add_op_node(operation, qargs, cargs)
dagdependency._add_successors()
return dagdependency | 7356da47f3af1088226af765f83cd43413de0a1f | 7,976 |
def tweets_factory(fixtures_factory):
"""Factory for tweets from YAML file"""
def _tweets_factory(yaml_file):
all_fixtures = fixtures_factory(yaml_file)
return [t for t in all_fixtures if isinstance(t, TweetBase)]
return _tweets_factory | 19d6e7ffe57ec071d324d535458c2263496c109d | 7,977 |
def monte_carlo(ds,duration,n,pval,timevar):
"""
pval: two-tailed pval
"""
x=0
mc = np.empty([ds.shape[1],ds.shape[2],n])
while x<n:
dummy = np.random.randint(0, len(ds[timevar])-duration, size=1) # have to adjust size so total number of points is always the same
mc[:,:,x] = ds[int(dummy):int(dummy+duration),::].mean(timevar)
x=x+1
# derive percentile
perc_upper = np.nanpercentile(mc,100-pval,axis=2)
perc_lower = np.nanpercentile(mc,pval,axis=2)
return perc_lower,perc_upper | 040aaf1a6a6813095079262446a2226fec8948ee | 7,978 |
def num_crl(wf_n):
"""Function computes the autocorrelation function from given vectors\
and the Discrete Fourier transform
Args:
wf_n(numpy array, complex): Wave function over time
Returns:
numpy array, complex: The wave function complex over time.
numpy array, complex: The autocorrelation function over time.
numpy array, complex: The Discrete Fourier Transformation function\
over frequency
"""
# setting up the time vector and deleting it from array
time_vc = np.zeros([len(wf_n[0])])
time_vc = wf_n[0]
wf_n = np.delete(wf_n, [0], axis=0)
# the lenth of the vector
t_wf = len(wf_n[0])
p_wf = len(wf_n[:, 0])
# turning array into complex
comp_vc = np.zeros([p_wf, t_wf], dtype=np.complex_)
for n in range(p_wf):
comp_vc[:, n] = wf_n[n * 2] + wf_n[1 + n * 2] * 1j
return comp_vc, time_vc | fc84cd7184fb04f2725b50439d9b5cfe223e2020 | 7,979 |
def resample_time_series(series, period="MS"):
"""
Resample and interpolate a time series dataframe so we have one row
per time period (useful for FFT)
Parameters
----------
df: DataFrame
Dataframe with date as index
col_name: string,
Identifying the column we will pull out
period: string
Period for resampling
Returns
-------
Series:
pandas Series with datetime index, and one column, one row per day
"""
# give the series a date index if the DataFrame is not index by date already
# if df.index.name != 'date':
# series.index = df.date
# just in case the index isn't already datetime type
series.index = pd.to_datetime(series.index)
# resample to get one row per time period
rseries = series.resample(period).mean()
new_series = rseries.interpolate()
return new_series | 2e3d2b1cbe4a8a0cc13c33c19fe217364819e31d | 7,980 |
def check_user(user, pw, DB):
"""
Check if user exists and if password is valid.
Return the user's data as a dict or a string with an error message.
"""
userdata = DB.get(user)
if not userdata:
log.error("Unknown user: %s", user)
return "Unknown user: %s" % user
elif userdata.get(C.Password) != pw:
log.error("Invalid password!")
return "Invalid password!"
return userdata | 21e8c56c0f747bd105cec31e1cb5aea348b4af44 | 7,983 |
def get_single_response_value(dom_response_list: list, agg_function):
"""
Get value of a single scenario's response.
:param dom_response_list: Single response provided as a list of one term.
:param agg_function: Function to aggregate multiple responses.
:return: Value of such observation.
"""
response_list = extract_list_from_dom(dom_object=dom_response_list[0],
tag_name='Observation',
attribute_name='Value')
if len(response_list) == 0:
response_value = np.NaN
else:
try:
response_value = agg_function([float(item) for item in response_list])
except TypeError:
response_value = np.NaN
return response_value | 7a7ef4f24a720a4611c48123061886b6bdb9f2f5 | 7,984 |
def sharpe_ratio(returns, periods=252):
"""
Create the Sharpe ratio for the strategy, based on a
benchmark of zero (i.e. no risk-free rate information).
Args:
returns (list, Series) - A pandas Series representing
period percentage returns.
periods (int.) Daily (252), Hourly (252*6.5), Minutely(252*6.5*60) etc.
Returns:
float. The result
"""
return np.sqrt(periods) * (np.mean(returns)) / np.std(returns) | b06ef19c5512370ff98217f7fb565c25846b697e | 7,985 |
def is_validated(user):
"""Is this user record validated?"""
# An account is "validated" if it has the `validated` field set to True, or
# no `validated` field at all (for accounts created before the "account
# validation option" was enabled).
return user.get("validated", True) | c1ddfc52a62e71a68798dc07e7576a4ae42aa17f | 7,986 |
def current_chart_provider_monthly():
""" API for monthly provider chart """
mytime = dubwebdb.CTimes("%Y-%m", request.args.get('time_start'),
request.args.get('time_end'))
myids = dubwebdb.Ids(prv_id=sanitize_list(request.args.get('prvid')),
team_id=sanitize_list(request.args.get('teamid')),
project_id=request.args.get('prjid'),
div_id=None)
csv_only = request.args.get('dl_csv')
if csv_only:
myrows = dubwebdb.get_data_budget_provider(mytime, myids)
return convert_to_download_csv(myrows)
else:
return dubwebdb.get_data_provider(mytime, myids, add_budget=True) | 36f67b0323be8fc136175fa4f9fb4819b40ebb94 | 7,987 |
def create_round_meander(radius, theta=0, offset=Point()):
"""
Returns a single period of a meandering path based on radius
and angle theta
"""
deg_to_rad = 2 * pi / 360
r = radius
t = theta * deg_to_rad
# The calculation to obtain the 'k' coefficient can be found here:
# http://itc.ktu.lt/itc354/Riskus354.pdf
# "APPROXIMATION OF A CUBIC BEZIER CURVE BY CIRCULAR ARCS AND VICE VERSA"
# by Aleksas Riskus
k = 0.5522847498
# the control points need to be shortened relative to the angle by this factor
j = 2*t/pi
path = "m %s,%s " % (-2*r*cos(t)-offset.x, -offset.y)
path += "c %s,%s %s,%s %s,%s " % (-k*r*j*sin(t),-k*r*j*cos(t), -(r-r*cos(t)),-r*sin(t)+r*k*j, -(r-r*cos(t)),-r*sin(t))
path += "c %s,%s %s,%s %s,%s " % (0,-k*r, r-k*r,-r, r,-r)
path += "c %s,%s %s,%s %s,%s " % (k*r,0, r,r-k*r, r,r)
path += "c %s,%s %s,%s %s,%s " % (0,k*r*j, -(r-r*cos(t)-k*r*j*sin(t)),r*sin(t)-r*k*j*cos(t), -r+r*cos(t),r*sin(t))
path += "c %s,%s %s,%s %s,%s " % (-k*r*j*sin(t),k*r*j*cos(t), -(r-r*cos(t)),r*sin(t)-r*k*j, -(r-r*cos(t)),r*sin(t))
path += "c %s,%s %s,%s %s,%s " % (0,k*r, r-k*r,r, r,r)
path += "c %s,%s %s,%s %s,%s " % (k*r,0, r,-r+k*r, r,-r)
path += "c %s,%s %s,%s %s,%s " % (0,-k*r*j, -(r-r*cos(t)-k*r*j*sin(t)),-r*sin(t)+r*k*j*cos(t), -r+r*cos(t),-r*sin(t))
return path | c4379ef8f16486e9cdbd3353c5458a6c9523bb2d | 7,988 |
import pickle
def load_config(path):
"""Loads the config dict from a file at path; returns dict."""
with open(path, "rb") as f:
config = pickle.load(f)
return config | eb12aed2ebdeebacf3041f3e4880c714f99c052c | 7,990 |
from datetime import datetime
def _check_year(clinicaldf: pd.DataFrame, year_col: int, filename: str,
allowed_string_values: list = []) -> str:
"""Check year columns
Args:
clinicaldf: Clinical dataframe
year_col: YEAR column
filename: Name of file
allowed_string_values: list of other allowed string values
Returns:
Error message
"""
error = ''
if process_functions.checkColExist(clinicaldf, year_col):
# Deal with pre-redacted values and other allowed strings
# first because can't int(text) because there are
# instances that have <YYYY
year_series = clinicaldf[year_col][
~clinicaldf[year_col].isin(allowed_string_values)
]
year_now = datetime.datetime.utcnow().year
try:
years = year_series.apply(
lambda x: datetime.datetime.strptime(
str(int(x)), '%Y').year > year_now
)
# Make sure that none of the years are greater than the current
# year. It can be the same, but can't future years.
assert not years.any()
except Exception:
error = (f"{filename}: Please double check your {year_col} "
"column, it must be an integer in YYYY format "
f"<= {year_now}")
# Tack on allowed string values
if allowed_string_values:
error += " or '{}'.\n".format(
"', '".join(allowed_string_values)
)
else:
error += ".\n"
else:
error = f"{filename}: Must have {year_col} column.\n"
return error | c1630b4196733baa6ef12db2990243b1052d01d5 | 7,991 |
def lower_strings(string_list):
"""
Helper function to return lowercase version of a list of strings.
"""
return [str(x).lower() for x in string_list] | 58dcaccbc0f4ce8f22d80922a3ac5da26d7f42b1 | 7,992 |
def AAprime():
"""
>> AAprime()
aaprime and aprimea
"""
aprimeA = dot(transpose(ATable), ATable)
# Aaprime = dot(ATable1, ATable)
return aprimeA | f47d4df43ebcb8348e4a6fd4234b38bd18e92199 | 7,993 |
import _pickle
import _io
def _load_dataset(data_filename_or_set, comm, verbosity):
"""Loads a DataSet from the data_filename_or_set argument of functions in this module."""
printer = _baseobjs.VerbosityPrinter.create_printer(verbosity, comm)
if isinstance(data_filename_or_set, str):
if comm is None or comm.Get_rank() == 0:
if _os.path.splitext(data_filename_or_set)[1] == ".pkl":
with open(data_filename_or_set, 'rb') as pklfile:
ds = _pickle.load(pklfile)
else:
ds = _io.read_dataset(data_filename_or_set, True, "aggregate", printer)
if comm is not None: comm.bcast(ds, root=0)
else:
ds = comm.bcast(None, root=0)
else:
ds = data_filename_or_set # assume a Dataset object
return ds | 9c3a26a36202b4e8f35c795b7817d3fde8900a0b | 7,995 |
import re
def parse_contact_name(row, name_cols, strict=False, type='person'):
"""Parses a person's name with probablepeople library
Concatenates all the contact name columns into a single string and then attempts to parse it
into standardized name components and return a subset of the name parts that are useful for
comparing contacts. This process eliminates notes and other non-name text from dirty data.
Args:
row (pd.Series): A record
name_cols (list): A list of column names in the record, in order, that when concatenated
comprise a person's name
strict (boolean, optional): Whether or not to raise a RepeatedLabelError when parsing, if
False, the last value of the repeated labels will be used for the parse
type (str): Which probableparser to use: 'generic', 'person' or 'company'
Returns:
A subset (tuple of str, or np.nan) of the standardized name components, namely:
(title, first, last, full_name)
"""
row = row.fillna('')
concat = []
for col in name_cols:
concat.append(row.get(col, ''))
concat = ' '.join(concat)
cleaned = re.sub(r'(not\s*available|not\s*provided|n/a)', '', concat, flags=re.IGNORECASE)
try:
parsed = probablepeople.tag(cleaned, type)
except probablepeople.RepeatedLabelError as e:
if strict:
raise e
problem_key, problem_vals, parsed = find_repeated_label(cleaned)
parsed = (parsed, '')
title = parsed[0].get('PrefixOther', np.nan)
first = parsed[0].get('GivenName', np.nan)
last = parsed[0].get('Surname', np.nan)
try:
full_name = first + ' ' + last
except TypeError as e:
full_name = np.nan
return title, first, last, full_name | 315b971344df60d2cbb4f0c4e1d820b37b07ddaf | 7,996 |
def get_peer_addr(ifname):
"""Return the peer address of given peer interface.
None if address not exist or not a peer-to-peer interface.
"""
for addr in IP.get_addr(label=ifname):
attrs = dict(addr.get('attrs', []))
if 'IFA_ADDRESS' in attrs:
return attrs['IFA_ADDRESS'] | e92906c0c705eb42ec3bedae2959dabcad72f0d2 | 7,999 |
def eiffel_artifact_created_event():
"""Eiffel artifact created event."""
return {
"meta": {
"id": "7c2b6c13-8dea-4c99-a337-0490269c374d",
"time": 1575981274307,
"type": "EiffelArtifactCreatedEvent",
"version": "3.0.0",
},
"links": [],
"data": {"identity": "pkg:artifact/created/[email protected]"},
} | 0ef2e5adadb58b92c94bac42c9880728573b159e | 8,000 |
def _hash(input_data, initVal=0):
"""
hash() -- hash a variable-length key into a 32-bit value
k : the key (the unaligned variable-length array of bytes)
len : the length of the key, counting by bytes
level : can be any 4-byte value
Returns a 32-bit value. Every bit of the key affects every bit of
the return value. Every 1-bit and 2-bit delta achieves avalanche.
About 36+6len instructions.
The best hash table sizes are powers of 2. There is no need to do
mod a prime (mod is so slow!). If you need less than 32 bits,
use a bitmask. For example, if you need only 10 bits, do
h = (h & hashmask(10));
In which case, the hash table should have hashsize(10) elements.
If you are hashing n strings (ub1 **)k, do it like this:
for (i=0, h=0; i<n; ++i) h = hash( k[i], len[i], h);
By Bob Jenkins, 1996. [email protected]. You may use this
code any way you wish, private, educational, or commercial. It's free.
See http://burtleburtle.net/bob/hash/evahash.html
Use for hash table lookup, or anything where one collision in 2^32 is
acceptable. Do NOT use for cryptographic purposes.
"""
data = bytes(input_data, encoding='ascii')
len_pos = len(data)
length = len(data)
if length == 0:
return 0
a = 0x9e3779b9
b = 0x9e3779b9
c = initVal
p = 0
while len_pos >= 12:
a += ((data[p + 0]) + ((data[p + 1]) << 8) + ((data[p + 2]) << 16) + ((data[p + 3]) << 24))
b += ((data[p + 4]) + ((data[p + 5]) << 8) + ((data[p + 6]) << 16) + ((data[p + 7]) << 24))
c += ((data[p + 8]) + ((data[p + 9]) << 8) + ((data[p + 10]) << 16) + ((data[p + 11]) << 24))
q = _mix(a, b, c)
a = q[0]
b = q[1]
c = q[2]
p += 12
len_pos -= 12
c += length
if len_pos >= 11:
c += (data[p + 10]) << 24
if len_pos >= 10:
c += (data[p + 9]) << 16
if len_pos >= 9:
c += (data[p + 8]) << 8
# the first byte of c is reserved for the length
if len_pos >= 8:
b += (data[p + 7]) << 24
if len_pos >= 7:
b += (data[p + 6]) << 16
if len_pos >= 6:
b += (data[p + 5]) << 8
if len_pos >= 5:
b += (data[p + 4])
if len_pos >= 4:
a += (data[p + 3]) << 24
if len_pos >= 3:
a += (data[p + 2]) << 16
if len_pos >= 2:
a += (data[p + 1]) << 8
if len_pos >= 1:
a += (data[p + 0])
q = _mix(a, b, c)
a = q[0]
b = q[1]
c = q[2]
return rshift_zero_padded(c, 0) | c4f1b0ee22ca940d360090b965125e71c272ad4c | 8,001 |
import logging
def read_config_option(key, expected_type=None, default_value=None):
"""Read the specified value from the configuration file.
Args:
key: the name of the key to read from the config file.
expected_type: read the config option as the specified type (if specified)
default_value: if the key doesn't exist, just return the default value.
If the default value is not specified, the function will throw whatever
error was raised by the configuration parser
"""
logging.info("Reading config option {} with expected type {}".format(key, expected_type))
try:
if not expected_type:
value = conf_parser.get("Settings", key)
if key is "password":
logging.info("Got configuration for key {}: ****".format(key))
else:
logging.info("Got configuration for key {}: {}".format(key, value))
return conf_parser.get("Settings", key)
elif expected_type is bool:
return conf_parser.getboolean("Settings", key)
except (ValueError, NoOptionError) as e:
if default_value:
return default_value
else:
raise | effc94b89dd8b1e0765c71bd4c0c03760715db1d | 8,002 |
from typing import Any
import logging
def removeKeys(array: dict = None, remove: Any = None) -> dict:
"""
Removes keys from array by given remove value.
:param array: dict[Any: Any]
:param remove: Any
:return:
- sorted_dict - dict[Any: Any]
"""
if remove is None:
remove = []
try:
sorted_dict = {}
for item_key in array:
if array[item_key] != remove:
sorted_dict[item_key] = array[item_key]
return sorted_dict
except Exception as e:
logging.exception(e) | 1b98821000642c79fbb71a9c0dc7163c4a95fa26 | 8,004 |
from trimesh.path.creation import box_outline
from trimesh.path.util import concatenate
def affine2boxmesh(affines):
"""
:param affines: (n_parts, 6), range (0, 1)
:return:
"""
n_parts = len(affines)
colors = [[0, 0, 255, 255], # blue
[0, 255, 0, 255], # green
[255, 0, 0, 255], # red
[255, 255, 0, 255], # yellow
[0, 255, 255, 255], # cyan
[255, 0, 255, 255], # Magenta
[160, 32, 240, 255], # purple
[255, 255, 240, 255]] # ivory
shape_box = []
for idx in range(n_parts):
part_trans = affines[idx, :3]
part_size = affines[idx, 3:]
trans_mat = np.eye(4)
# translate to center of axis aligned bounds
trans_mat[:3, 3] = part_trans
part_box = box_outline(transform=trans_mat,
extents=part_size
)
shape_box.append(part_box)
shape_box = concatenate(shape_box)
return shape_box | 3d6568e6e533bdb31cdceb244666f376d73dad1e | 8,005 |
def _select_index_code(code):
"""
1 - sh
0 - sz
"""
code = str(code)
if code[0] == '3':
return 0
return 1 | 697d8e5ca1744c897b7eebbb7b9b0a3b45faec3d | 8,006 |
def get_install_agent_cmd():
"""Get OS specific command to install Telegraf agent."""
agent_pkg_deb = "https://packagecloud.io/install/repositories/" \
"wavefront/telegraf/script.deb.sh"
agent_pkg_rpm = "https://packagecloud.io/install/repositories/" \
"wavefront/telegraf/script.rpm.sh"
dist = system.check_os()
cmd = None
if not dist:
print("Error: Unsupported OS version. Please contact"
" [email protected].")
return cmd
if dist.strip().startswith(("Oracle Linux Server", "Fedora",
"Amazon Linux", "CentOS",
"Red Hat Enterprise Linux")):
cmd = "curl -s %s | bash" % (agent_pkg_rpm)
cmd += " && yum -y -q install telegraf"
elif dist.strip().startswith("Ubuntu"):
cmd = "curl -s %s | bash" % (agent_pkg_deb)
cmd += ' && apt-get -y -qq -o Dpkg::Options::="--force-confold"' \
' install telegraf'
elif dist.strip().lower().startswith("debian"):
cmd = "curl -s %s | bash" % (agent_pkg_deb)
cmd += ' && apt-get -o Dpkg::Options::="--force-confnew"' \
' -y install telegraf'
elif dist.strip().startswith(("openSUSE", "SUSE Linux Enterprise Server",
"SLES")):
cmd = "curl -s %s | bash" % (agent_pkg_rpm)
cmd += ' && zypper install telegraf'
else:
message.print_warn("Error: Unsupported OS version: %s." % (dist))
return cmd | 14373024b3b6046badcedf686a38423f126f02a2 | 8,007 |
from typing import Tuple
from typing import DefaultDict
import collections
def unpack(manifests: LocalManifestLists) -> Tuple[ServerManifests, bool]:
"""Convert `manifests` to `ServerManifests` for internal processing.
Returns `False` unless all resources in `manifests` are unique. For
instance, returns False if two files define the same namespace or the same
deployment.
The primary use case is to convert the manifests we read from local files
into the format Square uses internally for the server manifests as well.
Inputs:
manifests: LocalManifestLists
Returns:
ServerManifests: flattened version of `manifests`.
"""
# Compile a dict that shows which meta manifest was defined in which file.
# We will shortly use this information to determine if all resources were
# defined exactly once across all files.
all_meta: DefaultDict[MetaManifest, list] = collections.defaultdict(list)
for fname in manifests:
for meta, _ in manifests[fname]:
all_meta[meta].append(fname)
# Find out if all meta manifests were unique. If not, log the culprits and
# return with an error.
unique = True
for meta, fnames in all_meta.items():
if len(fnames) > 1:
unique = False
tmp = [str(_) for _ in fnames]
logit.error(
f"Duplicate ({len(tmp)}x) manifest {meta}. "
f"Defined in {str.join(', ', tmp)}"
)
if not unique:
return ({}, True)
# Compile the input manifests into a new dict with the meta manifest as key.
out = {k: v for fname in manifests for k, v in manifests[fname]}
return (out, False) | b7f3c3f1388b9d3791a18f2da97dd40cf131ecaa | 8,008 |
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the switch from config."""
if DATA_KEY not in hass.data:
hass.data[DATA_KEY] = {}
host = config[CONF_HOST]
token = config[CONF_TOKEN]
name = config[CONF_NAME]
model = config.get(CONF_MODEL)
_LOGGER.info("Initializing with host %s (token %s...)", host, token[:5])
devices = []
unique_id = None
if model is None:
try:
miio_device = Device(host, token)
device_info = await hass.async_add_executor_job(miio_device.info)
model = device_info.model
unique_id = f"{model}-{device_info.mac_address}"
_LOGGER.info(
"%s %s %s detected",
model,
device_info.firmware_version,
device_info.hardware_version,
)
except DeviceException:
raise PlatformNotReady
if model in ["090615.switch.switch01"]:
plug = PtxSwitch(host, token, model=model)
device = XiaomiPTXSwitch(name, plug, model, unique_id, 1)
devices.append(device)
hass.data[DATA_KEY][host] = device
elif model in ["090615.switch.switch02"]:
plug = PtxSwitch(host, token, model=model)
device = XiaomiPTXSwitch(name, plug, model, unique_id, 1)
devices.append(device)
hass.data[DATA_KEY][host] = device
plug2 = PtxSwitch(host, token, model=model)
device2 = XiaomiPTXSwitch(name, plug2, model, unique_id, 2)
devices.append(device2)
hass.data[DATA_KEY][host] = device2
elif model in ["090615.switch.switch03"]:
plug = PtxSwitch(host, token, model=model)
device = XiaomiPTXSwitch(name, plug, model, unique_id, 1)
devices.append(device)
hass.data[DATA_KEY][host] = device
plug2 = PtxSwitch(host, token, model=model)
device2 = XiaomiPTXSwitch(name, plug2, model, unique_id, 2)
devices.append(device2)
hass.data[DATA_KEY][host] = device2
plug3 = PtxSwitch(host, token, model=model)
device3 = XiaomiPTXSwitch(name, plug3, model, unique_id, 3)
devices.append(device3)
hass.data[DATA_KEY][host] = device3
else:
_LOGGER.error(
"Unsupported device found! Please create an issue at "
"https://github.com/volshebniks/python-miio-ptx/issues "
"and provide the following data: %s",
model,
)
return False
async_add_entities(devices, update_before_add=True) | 0ef4f94c2b69bb2674ffcace56a85927c2c6d1d1 | 8,009 |
def process_generate_metric_alarms_event(event):
"""Handles a new event request
Placeholder copied from alert_controller implementation
"""
LOG.info(str(event))
return create_response(200, body="Response to HealthCheck") | 45f3ac5fa73048ec1f76bb5188a4e1de0eaca62d | 8,010 |
def get_query_segment_info(collection_name, timeout=None, using="default"):
"""
Notifies Proxy to return segments information from query nodes.
:param collection_name: A string representing the collection to get segments info.
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur.
:type timeout: float
:return: QuerySegmentInfo:
QuerySegmentInfo is the growing segments's information in query cluster.
:rtype: QuerySegmentInfo
:example:
>>> from pymilvus import Collection, FieldSchema, CollectionSchema, DataType, connections, utility
>>> connections.connect(alias="default")
>>> _DIM = 128
>>> field_int64 = FieldSchema("int64", DataType.INT64, description="int64", is_primary=True)
>>> field_float_vector = FieldSchema("float_vector", DataType.FLOAT_VECTOR, description="float_vector", is_primary=False, dim=_DIM)
>>> schema = CollectionSchema(fields=[field_int64, field_float_vector], description="get collection entities num")
>>> collection = Collection(name="test_get_segment_info", schema=schema)
>>> import pandas as pd
>>> int64_series = pd.Series(data=list(range(10, 20)), index=list(range(10)))i
>>> float_vector_series = [[random.random() for _ in range _DIM] for _ in range (10)]
>>> data = pd.DataFrame({"int64" : int64_series, "float_vector": float_vector_series})
>>> collection.insert(data)
>>> collection.load() # load collection to memory
>>> res = utility.get_query_segment_info("test_get_segment_info")
"""
return _get_connection(using).get_query_segment_info(collection_name, timeout=timeout) | 521119f98a43d1abc303028f9bfa150dbfba098b | 8,011 |
def IoU(pred, gt, n_classes, all_iou=False):
"""Computes the IoU by class and returns mean-IoU"""
# print("IoU")
iou = []
for i in range(n_classes):
if np.sum(gt == i) == 0:
iou.append(np.NaN)
continue
TP = np.sum(np.logical_and(pred == i, gt == i))
FP = np.sum(np.logical_and(pred == i, gt != i))
FN = np.sum(np.logical_and(pred != i, gt == i))
iou.append(TP / (TP + FP + FN))
# nanmean: if a class is not present in the image, it's a NaN
result = [np.nanmean(iou), iou] if all_iou else np.nanmean(iou)
return result | 9635472121b13c9ce04e38fdfaee8bf29774a17a | 8,012 |
def from_torchvision(vision_transform, p=1):
"""Takes in an arbitary torchvision tranform and wrap it such that it can be
applied to a list of images of shape HxWxC
Returns a callable class that takes in list of images and target as input
NOTE:
Due to implementation difficuities, in order to apply the same
randomized transform to EACH image, it is best to pass in
a deterministic transform like the functional transforms
in torchvision and then pass in a p value for the wrapper
to roll a number and apply the transform with that probability
Additionally, it's also possible to wrap a torchvision functional transform
as long as it's a function that takes in an image as it's only argument
i.e can write something like:
lambda x: some_functional_transform(x,...)
"""
return TorchvisionWrapper(vision_transform, p=p) | b2af1a672d24171d9b80cd4eeb6d53fc80d09f53 | 8,013 |
def get_ports(context, project_id=None):
"""Returns all ports of VMs in EOS-compatible format.
:param project_id: globally unique neutron tenant identifier
"""
session = context.session
model = db_models.AristaProvisionedVms
if project_id:
all_ports = (session.query(model).
filter(model.project_id == project_id,
model.host_id.isnot(None),
model.vm_id.isnot(None),
model.network_id.isnot(None),
model.port_id.isnot(None)))
else:
all_ports = (session.query(model).
filter(model.project_id.isnot(None),
model.host_id.isnot(None),
model.vm_id.isnot(None),
model.network_id.isnot(None),
model.port_id.isnot(None)))
ports = {}
for port in all_ports:
if port.port_id not in ports:
ports[port.port_id] = port.eos_port_representation()
ports[port.port_id]['hosts'].append(port.host_id)
return ports | 17d8dadde3dde78286f746435454b454d5589bd2 | 8,015 |
import hashlib
import hmac
def _HMAC(K, C, Mode=hashlib.sha1):
"""
Generate an HMAC value.
The default mode is to generate an HMAC-SHA-1 value w/ the SHA-1 algorithm.
:param K: shared secret between client and server.
Each HOTP generator has a different and unique secret K.
:type K: bytes
:param C: 8-byte counter value, the moving factor.
This counter MUST be synchronized between the HOTP generator
(client) and the HOTP validator (server).
:type C: bytes
:param Mode: The algorithm to use when generating the HMAC value
:type Mode: hashlib.sha1, hashlib.sha256, hashlib.sha512, or hashlib.md5
:return: HMAC result. If HMAC-SHA-1, result is 160-bits (20-bytes) long.
:rtype: bytes
"""
return hmac.new(K, C, Mode).digest() | db9bf26c52427acc259f3cb1590c7c13b0d0dd9e | 8,016 |
def get_reference(planet_name):
"""
Return reference for a given planet's orbit fit
Args:
planet_name (str): name of planet. no space
Returns:
reference (str): Reference of orbit fit
"""
planet_name = planet_name.lower()
if planet_name not in post_dict:
raise ValueError("Invalid planet name '{0}'".format(planet_name))
filename, reference = post_dict[planet_name]
return reference | b700509300bdb2f6595e2a7c44a0d84e04d795f8 | 8,017 |
def DefaultPortIfAvailable():
"""Returns default port if available.
Raises:
EmulatorArgumentsError: if port is not available.
Returns:
int, default port
"""
if portpicker.is_port_free(_DEFAULT_PORT):
return _DEFAULT_PORT
else:
raise EmulatorArgumentsError(
'Default emulator port [{}] is already in use'.format(_DEFAULT_PORT)) | 40c065946d8f9ee6c50f7df40f2be6644a472414 | 8,018 |
def mock_hub(hass):
"""Mock hub."""
mock_integration(hass, MockModule(DOMAIN))
hub = mock.MagicMock()
hub.name = "hub"
hass.data[DOMAIN] = {DEFAULT_HUB: hub}
return hub | b4495ca6fbb7638aedf86406f94c00566e376b1b | 8,019 |
def deferred_setting(name, default):
"""
Returns a function that calls settings with (name, default)
"""
return lambda: setting(name, default) | 286acec75f7a5a1e0217dc4cee7b7b5d1ba8e742 | 8,020 |
def extends_dict(target, source):
""" Will copy every key and value of source in target if key is not present in target """
for key, value in source.items():
if key not in target:
target[key] = value
elif type(target[key]) is dict:
extends_dict(target[key], value)
elif type(target[key]) is list:
target[key] += value
return target | 5a68dde5e3bb7dbb81ad61c3698614f56dd5efd7 | 8,021 |
def get_maps_interface_class(zep_inp):
"""
Takes the input of zephyrus and return the maps of
interfaces to classes and viceversa
"""
interface_to_classes = {}
class_to_interfaces = {}
for i in zep_inp["components"].keys():
class_name = i.split(settings.SEPARATOR)[-1]
interfaces = zep_inp["components"][i]["provides"][0]["ports"]
class_to_interfaces[class_name] = interfaces
for k in interfaces:
if k in interface_to_classes:
interface_to_classes[k].append(class_name)
else:
interface_to_classes[k] = [class_name]
return (interface_to_classes,class_to_interfaces) | 9a14de30677abe0fa8cf0bb3907cd1f32a8f33de | 8,022 |
def plextv_resources_base_fixture():
"""Load base payload for plex.tv resources and return it."""
return load_fixture("plex/plextv_resources_base.xml") | f252099bd6457af208c4d96b8024d3ce28d84cd9 | 8,023 |
from netrc import netrc
from requests.auth import HTTPDigestAuth
def _auth(machine='desi.lbl.gov'):
"""Get authentication credentials.
"""
n = netrc()
try:
u,foo,p = n.authenticators(machine)
except:
raise ValueError('Unable to get user/pass from $HOME/.netrc for {}'.format(machine))
return HTTPDigestAuth(u,p) | 4ef27e589416f54dd76522b3312a2aa24441e200 | 8,024 |
import re
def relative_date_add(date_rule: str, strict: bool = False) -> float:
"""Change the string in date rule format to the number of days. E.g 1d to 1, 1y to 365, 1m to 30, -1w to -7"""
days = ''
if re.search(DateRuleReg, date_rule) is not None:
res = re.search(DateRuleReg, date_rule)
date_str = res.group(1)
if date_str[0] == '-':
num = float(date_str[1:-1])
days = '-'
else:
num = float(date_str[:-1])
rule = date_str[-1:]
if rule in DictDayRule:
scale = DictDayRule[rule]
days = days + str(num * scale)
d = float(days)
return d
else:
raise MqValueError('There are no valid day rule for the point provided.')
if strict:
raise MqValueError(f'invalid date rule {date_rule}')
return 0 | 9180ed2ec99302679f7d0e2ee9ca57c4e2e6c48c | 8,025 |
def to_frames_using_nptricks(src: np.ndarray, window_size: int, stride: int) -> np.ndarray:
"""
np.ndarray をフレーム分けするプリミティブな実装で,分割に`np.lib.stride_tricks.as_strided`関数を使用しており,indexingを使用する`to_frames_using_index`より高速である.
Parameters
----------
src: np.ndarray
splited source.
window_size: int
sliding window size.
stride: int,
stride is int more than 0.
Returns
-------
frames: np.ndarray
a shape of frames is `(num_frames, window_size, *src.shape[1:])`, where num_frames is `(src.shape[0] - window_size) // stride + 1`.
"""
assert stride > 0, 'ストライドは正の整数である必要がある. stride={}'.format(stride)
num_frames = (src.shape[0] - window_size) // stride + 1
ret_shape = (num_frames, window_size, *src.shape[1:])
strides = (stride * src.strides[0], *src.strides)
return np.lib.stride_tricks.as_strided(src, shape=ret_shape, strides=strides) | b10b077cf0fbf2b0e491e7f1cba9033cfadf10c5 | 8,026 |
def global_fit(
model_constructor,
pdf_transform=False,
default_rtol=1e-10,
default_atol=1e-10,
default_max_iter=int(1e7),
learning_rate=1e-6,
):
"""
Wraps a series of functions that perform maximum likelihood fitting in the
`two_phase_solver` method found in the `fax` python module. This allows for
the calculation of gradients of the best-fit parameters with respect to upstream
parameters that control the underlying model, i.e. the event yields (which are
then parameterized by weights or similar).
Args:
model_constructor: Function that takes in the parameters of the observable,
and returns a model object (and background-only parameters)
Returns:
global_fitter: Callable function that performs global fits.
Differentiable :)
"""
adam_init, adam_update, adam_get_params = optimizers.adam(learning_rate)
def make_model(model_pars):
m, bonlypars = model_constructor(model_pars)
bounds = m.config.suggested_bounds()
constrained_mu = (
to_inf(constrained_mu, bounds[0]) if pdf_transform else constrained_mu
)
exp_bonly_data = m.expected_data(bonlypars, include_auxdata=True)
def expected_logpdf(pars): # maps pars to bounded space if pdf_transform = True
return (
m.logpdf(to_bounded_vec(pars, bounds), exp_bonly_data)
if pdf_transform
else m.logpdf(pars, exp_bonly_data)
)
def global_fit_objective(pars): # NLL
return -expected_logpdf(pars)[0]
return global_fit_objective
def global_bestfit_minimized(hyper_param):
nll = make_model(hyper_param)
def bestfit_via_grad_descent(i, param): # gradient descent
g = jax.grad(nll)(param)
# param = param - g * learning_rate
param = adam_get_params(adam_update(i, g, adam_init(param)))
return param
return bestfit_via_grad_descent
global_solve = twophase.two_phase_solver(
param_func=global_bestfit_minimized,
default_rtol=default_rtol,
default_atol=default_atol,
default_max_iter=default_max_iter,
)
def global_fitter(init, hyper_pars):
solve = global_solve(init, hyper_pars)
return solve.value
return global_fitter | 46917c0a4e6469759184a4aaa8199c57573360b0 | 8,027 |
from typing import Optional
from typing import List
def reorder_task(
token: 'auth.JWT',
task_id: 'typevars.ObjectID',
before_id: 'Optional[typevars.ObjectID]' = None,
after_id: 'Optional[typevars.ObjectID]' = None
) -> 'List[models.Task]':
"""Change the position of the task in the list."""
if before_id is None and after_id is None:
raise util_errors.APIError(
'One of before_id or after_id must be provided', 400)
if task_id == before_id or task_id == after_id:
raise util_errors.APIError(
'Task cannot be before or after itself', 400)
before = None
after = None
(task, before, after) = auth.load_owned_objects(
models.Task, token, 'get tasks', task_id, before_id, after_id)
if before is None:
before = after.before
if after is None:
after = before.after
if (
(before is not None and before.after is not after) or
(after is not None and after.before is not before)):
raise util_errors.APIError(
'Before and after tasks are not adjacent', 400)
mutated = [before, after, task, task.before, task.after]
if before is not None and task.parent is not before.parent:
mutated.extend([task.parent, before.parent])
check_reparent(task, before.parent)
elif after is not None and task.parent is not after.parent:
mutated.extend([task.parent, after.parent])
check_reparent(task, after.parent)
if task.before is not None:
task.before.after = task.after
elif task.after is not None:
task.after.before = None
task.before = before
task.after = after
db.DB.session.commit()
return [m for m in set(mutated) if m is not None] | b9e30b6d5929614d8385bd478e0e0a1f2663723f | 8,029 |
def get_ldpc_code_params(ldpc_design_filename):
"""
Extract parameters from LDPC code design file.
Parameters
----------
ldpc_design_filename : string
Filename of the LDPC code design file.
Returns
-------
ldpc_code_params : dictionary
Parameters of the LDPC code.
"""
ldpc_design_file = open(ldpc_design_filename)
ldpc_code_params = {}
[n_vnodes, n_cnodes] = [int(x) for x in ldpc_design_file.readline().split(' ')]
[max_vnode_deg, max_cnode_deg] = [int(x) for x in ldpc_design_file.readline().split(' ')]
vnode_deg_list = np.array([int(x) for x in ldpc_design_file.readline().split(' ')[:-1]], np.int32)
cnode_deg_list = np.array([int(x) for x in ldpc_design_file.readline().split(' ')[:-1]], np.int32)
cnode_adj_list = -np.ones([n_cnodes, max_cnode_deg], int)
vnode_adj_list = -np.ones([n_vnodes, max_vnode_deg], int)
for vnode_idx in range(n_vnodes):
vnode_adj_list[vnode_idx, 0:vnode_deg_list[vnode_idx]] = \
np.array([int(x)-1 for x in ldpc_design_file.readline().split('\t')])
for cnode_idx in range(n_cnodes):
cnode_adj_list[cnode_idx, 0:cnode_deg_list[cnode_idx]] = \
np.array([int(x)-1 for x in ldpc_design_file.readline().split('\t')])
cnode_vnode_map = -np.ones([n_cnodes, max_cnode_deg], int)
vnode_cnode_map = -np.ones([n_vnodes, max_vnode_deg], int)
cnode_list = np.arange(n_cnodes)
vnode_list = np.arange(n_vnodes)
for cnode in range(n_cnodes):
for i, vnode in enumerate(cnode_adj_list[cnode, 0:cnode_deg_list[cnode]]):
cnode_vnode_map[cnode, i] = cnode_list[np.where(vnode_adj_list[vnode, :] == cnode)]
for vnode in range(n_vnodes):
for i, cnode in enumerate(vnode_adj_list[vnode, 0:vnode_deg_list[vnode]]):
vnode_cnode_map[vnode, i] = vnode_list[np.where(cnode_adj_list[cnode, :] == vnode)]
cnode_adj_list_1d = cnode_adj_list.flatten().astype(np.int32)
vnode_adj_list_1d = vnode_adj_list.flatten().astype(np.int32)
cnode_vnode_map_1d = cnode_vnode_map.flatten().astype(np.int32)
vnode_cnode_map_1d = vnode_cnode_map.flatten().astype(np.int32)
pmat = np.zeros([n_cnodes, n_vnodes], int)
for cnode_idx in range(n_cnodes):
pmat[cnode_idx, cnode_adj_list[cnode_idx, :]] = 1
ldpc_code_params['n_vnodes'] = n_vnodes
ldpc_code_params['n_cnodes'] = n_cnodes
ldpc_code_params['max_cnode_deg'] = max_cnode_deg
ldpc_code_params['max_vnode_deg'] = max_vnode_deg
ldpc_code_params['cnode_adj_list'] = cnode_adj_list_1d
ldpc_code_params['cnode_vnode_map'] = cnode_vnode_map_1d
ldpc_code_params['vnode_adj_list'] = vnode_adj_list_1d
ldpc_code_params['vnode_cnode_map'] = vnode_cnode_map_1d
ldpc_code_params['cnode_deg_list'] = cnode_deg_list
ldpc_code_params['vnode_deg_list'] = vnode_deg_list
ldpc_design_file.close()
return ldpc_code_params | a2702a3fb5faf67d56fa08ae7ab627e3142fb006 | 8,030 |
def find_collection(*, collection, name):
"""
Looks through the pages of a collection for a resource with the specified name.
Returns it, or if not found, returns None
"""
if isinstance(collection, ProjectCollection):
try:
# try to use search if it is available
# call list() to collapse the iterator, otherwise the NotFound
# won't show up until collection_list is used
collection_list = list(collection.search(search_params={
"name": {
"value": name,
"search_method": "EXACT"
}
}))
except NotFound:
# Search must not be available yet
collection_list = collection.list()
else:
collection_list = collection.list()
matching_resources = [resource for resource in collection_list if resource.name == name]
if len(matching_resources) > 1:
raise ValueError("Found multiple collections with name '{}'".format(name))
if len(matching_resources) == 1:
result = matching_resources.pop()
print('Found existing: {}'.format(result))
return result
else:
return None | a6532f2f63b682822f96e51d7ab86e7bc240d922 | 8,031 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.