content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import glob
import os
import fnmatch
def parse_directory(path, rgb_prefix='img_', flow_x_prefix='flow_x_', flow_y_prefix='flow_y_'):
"""
Parse directories holding extracted frames from standard benchmarks
"""
print('parse frames under folder {}'.format(path))
frame_folders = glob.glob(os.path.join(path, '*'))
def count_files(directory, prefix_list):
lst = os.listdir(directory)
cnt_list = [len(fnmatch.filter(lst, x+'*')) for x in prefix_list]
return cnt_list
# check RGB
rgb_counts = {}
flow_counts = {}
dir_dict = {}
for i,f in enumerate(frame_folders):
all_cnt = count_files(f, (rgb_prefix, flow_x_prefix, flow_y_prefix))
k = f.split('/')[-1]
rgb_counts[k] = all_cnt[0]
dir_dict[k] = f
x_cnt = all_cnt[1]
y_cnt = all_cnt[2]
if x_cnt != y_cnt:
raise ValueError('x and y direction have different number of flow images. video: '+f)
flow_counts[k] = x_cnt
if i % 200 == 0:
print('{} videos parsed'.format(i))
print('frame folder analysis done')
return dir_dict, rgb_counts, flow_counts
|
5459cbb64686718d6275c1349e2062d5ed277023
| 20,200 |
def create_effect(
effect_id: CardEffect.EffectId = CardEffect.EffectId.DMG,
target: CardLevelEffects.Target = CardLevelEffects.Target.OPPONENT,
power: int = 10,
range_: float = 5
) -> Effect:
"""
Creates effect with given data, or creates default effect dealing dmg to opponent if no was data provided.
:param effect_id:
:param target:
:param power:
:param range_:
:return: Created effect.
"""
effect_factory = EffectFactory.get_instance()
card = CardFactory()
effect_model = CardLevelEffectsFactory(
card=card,
card_effect=CardEffect.objects.get(pk=effect_id),
target=target,
power=power,
range=range_
)
return effect_factory.create(effect_model)
|
9055250c4ab7db3700b2393c16f54cfef3566747
| 20,201 |
def part_two(stream: Stream, violation: int) -> int:
"""Find the sum of min & max in the sequence that sums to `violation`."""
for start in range(len(stream) - 1):
for end in range(start + 2, len(stream) + 1):
seq = stream[start:end]
seq_sum = sum(seq)
if seq_sum == violation:
return min(seq) + max(seq)
if seq_sum > violation:
break # No point in going further, since the sum can only grow
raise Exception("Solution not found!")
|
a1481af0183f1e42642a9137242d57fe75738770
| 20,202 |
def getDoubleArray(plug):
"""
Gets the float array from the supplied plug.
:type plug: om.MPlug
:rtype: om.MDoubleArray
"""
return om.MFnDoubleArrayData(plug.asMObject()).array()
|
6e93113d1968a56cb3b12c500be04c554f79c165
| 20,203 |
import numpy
def get_fb(file_name):
"""#{{{
load feature file and transform to dict
return:
dict
key_list_feat
"""
ff = open(file_name, 'r')
fb = []
delta = []
fb_matrix = numpy.zeros([1, 24])
delta_matrix = numpy.zeros([1, 24])
fbanks = {}
deltas = {}
fb_keylist = []
while(1):
line = ff.readline()
if not line:
# print 'end of file'
break
end_line = line.strip().split()[-1]
if end_line == '[':
key = line.strip().split()[0]
elif end_line == ']':
for i in range(24):
fb.append(float(line.strip().split()[i]))
for i in range(24, 48):
delta.append(float(line.strip().split()[i]))
fb_keylist.append(key)
fb_matrix = numpy.vstack((fb_matrix, fb))
fbanks[key] = fb_matrix[1:, :]
delta_matrix = numpy.vstack((delta_matrix, delta))
deltas[key] = delta_matrix[1:, :]
fb = []
delta = []
fb_matrix = numpy.zeros([1, 24])
delta_matrix = numpy.zeros([1, 24])
else:
for i in range(24):
# value.append(line.strip().split()[i])
fb.append(float(line.strip().split()[i]))
for i in range(24, 48):
delta.append(float(line.strip().split()[i]))
fb_matrix = numpy.vstack((fb_matrix, fb))
delta_matrix = numpy.vstack((delta_matrix, delta))
fb = []
delta = []
print('number of utterances in fbank: %d' % len(fbanks))
ff.close()
return fbanks, deltas, fb_keylist
|
85589f74f47a58f0ba36a438b3340ff0858737e4
| 20,204 |
def make_train_input_fn(
feature_spec, labels, file_pattern, batch_size, shuffle=True):
"""Makes an input_fn for training."""
return _make_train_or_eval_input_fn(
feature_spec,
labels,
file_pattern,
batch_size,
tf.estimator.ModeKeys.TRAIN,
shuffle)
|
1f8d481d5fff1913f392a4286c445af757f849bd
| 20,205 |
def _find(xs, predicate):
"""Locate an item in a list based on a predicate function.
Args:
xs (list) : List of data
predicate (function) : Function taking a data item and returning bool
Returns:
(object|None) : The first list item that predicate returns True for or None
"""
for x in xs:
if predicate(x):
return x
return None
|
94d8dd47e54e1887f67c5f5354d05dc0c294ae52
| 20,206 |
from typing import OrderedDict
def remove_dataparallel_prefix(state_dict):
"""Removes dataparallel prefix of layer names in a checkpoint state dictionary."""
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:] if k[:7] == "module." else k
new_state_dict[name] = v
return new_state_dict
|
28fe85b262f8d4bdefa40d34839787ba1a8ef094
| 20,207 |
def user_upload_widget(node, on_complete=''):
"""Returns a Valum Uploader widget that uploads files based on the user's
home directory.
:param node: storage type (public or private) and path indicator, e.g.
"public:foo/bar" to have the uploaded file go in
MEDIA_ROOT/$USERNAME/foo/bar.
:param on_complete: name of Javascript function to call when an upload has
complete, will be called with signature:
function(String id, String fileName, Object responseJSON)
"""
return _valum_widget('/yacon/browser/user_upload_file/', node,
on_complete=on_complete)
|
77a797aaabaf7d3d0f9923e3931e938e568952e0
| 20,208 |
def run_lsa(model, lsa_options):
"""Implements local sensitivity analysis using LSI, RSI, and parameter subset reduction.
Parameters
----------
model : Model
Object of class Model holding run information.
options : Options
Object of class Options holding run settings.
Returns
-------
LsaResults
Object of class LsaResults holding all run results.
"""
# LSA implements the following local sensitivity analysis methods on system specified by "model" object
# 1) Jacobian
# 2) Scaled Jacobian for Relative Sensitivity Index (RSI)
# 3) Fisher Information matrix
# Required Inputs: object of class "model" and object of class "options"
# Outputs: Object of class lsa with Jacobian, RSI, and Fisher information matrix
# Calculate Jacobian
jac_raw=get_jacobian(model.eval_fcn, model.base_poi, lsa_options.x_delta,\
lsa_options.method, scale=False, y_base=model.base_qoi)
# Calculate relative sensitivity index (RSI)
jac_rsi=get_jacobian(model.eval_fcn, model.base_poi, lsa_options.x_delta,\
lsa_options.method, scale=True, y_base=model.base_qoi)
# Calculate Fisher Information Matrix from jacobian
fisher_mat=np.dot(np.transpose(jac_raw), jac_raw)
#Active Subspace Analysis
if lsa_options.run_param_subset:
reduced_model, active_set, inactive_set = get_active_subset(model, lsa_options)
#Collect Outputs and return as an lsa object
return LsaResults(jacobian=jac_raw, rsi=jac_rsi, fisher=fisher_mat,\
reduced_model=reduced_model, active_set=active_set,\
inactive_set=inactive_set)
else:
return LsaResults(jacobian=jac_raw, rsi=jac_rsi, fisher=fisher_mat)
|
2a8376f3e287dbeefa8f13321804ef6052357bf5
| 20,209 |
def condense_simple_conv3x3(in_channels,
out_channels,
groups):
"""
3x3 version of the CondenseNet specific simple convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
groups : int
Number of groups.
"""
return CondenseSimpleConv(
in_channels=in_channels,
out_channels=out_channels,
ksize=3,
stride=1,
pad=1,
groups=groups)
|
061d0df67fdcf5f3c56f227ca7e53d0fef3e2db2
| 20,210 |
def read_data_from(file_: str) -> list:
"""Read bitmasks and values from file."""
return open(file_, "r").read().splitlines()
|
ec1bd526d46ee94452df23f92448e60af4d6865c
| 20,211 |
def version_for(plugin):
# (Plugin) -> Optional[str]
"""Determine the version of a plugin by its module.
:param plugin:
The loaded plugin
:type plugin:
Plugin
:returns:
version string for the module
:rtype:
str
"""
module_name = plugin.plugin.__module__
try:
module = __import__(module_name)
except ImportError:
return None
return getattr(module, "__version__", None)
|
6d7c4ccc868d11d28c92d1264d52488e22d7f5e5
| 20,212 |
import json
def choose_organization():
"""Allow user to input organization id.
Returns:
str: Access target id
"""
target_id = None
while not target_id:
orgs = None
return_code, out, err = utils.run_command([
'gcloud', 'organizations', 'list', '--format=json'])
if return_code:
print(err)
else:
try:
orgs = json.loads(out)
except ValueError as verr:
print(verr)
if not orgs:
print('\nYou don\'t have access to any organizations. '
'Choose another option to enable Forseti access.')
return None
print('\nHere are the organizations you have access to:')
valid_org_ids = set()
for org in orgs:
org_id = utils.id_from_name(org['name'])
valid_org_ids.add(org_id)
print('ID=%s (description="%s")' %
(org_id, org['displayName']))
choice = raw_input('Enter the organization id where '
'you want Forseti to crawl for data: ').strip()
try:
# make sure that the choice is a valid organization id
if choice not in valid_org_ids:
print('Invalid organization id %s, try again' % choice)
return None
target_id = str(int(choice))
except ValueError:
print('Unable to parse organization id %s' % choice)
return target_id
|
fa46edc07e45eaa53bb6a52a6f0e7992a836fad7
| 20,213 |
def start_server(function):
"""
Decorator.
Tries to call function, if it fails, try to (re)start inotify server.
Raise QueryFailed if something went wrong
"""
def decorated_function(self, *args):
result = None
try:
return function(self, *args)
except (OSError, socket.error), err:
autostart = self.ui.configbool('inotify', 'autostart', True)
if err[0] == errno.ECONNREFUSED:
self.ui.warn(_('inotify-client: found dead inotify server '
'socket; removing it\n'))
os.unlink(os.path.join(self.root, '.hg', 'inotify.sock'))
if err[0] in (errno.ECONNREFUSED, errno.ENOENT) and autostart:
self.ui.debug('(starting inotify server)\n')
try:
try:
server.start(self.ui, self.dirstate, self.root,
dict(daemon=True, daemon_pipefds=''))
except server.AlreadyStartedException, inst:
# another process may have started its own
# inotify server while this one was starting.
self.ui.debug(str(inst))
except Exception, inst:
self.ui.warn(_('inotify-client: could not start inotify '
'server: %s\n') % inst)
else:
try:
return function(self, *args)
except socket.error, err:
self.ui.warn(_('inotify-client: could not talk to new '
'inotify server: %s\n') % err[-1])
elif err[0] in (errno.ECONNREFUSED, errno.ENOENT):
# silently ignore normal errors if autostart is False
self.ui.debug('(inotify server not running)\n')
else:
self.ui.warn(_('inotify-client: failed to contact inotify '
'server: %s\n') % err[-1])
self.ui.traceback()
raise QueryFailed('inotify query failed')
return decorated_function
|
e6de74cacb703172ff25c30c8a1bd54937f47d7b
| 20,214 |
def get_subscriber_groups(publication_id, subscription_id='', full_uri=False):
"""This function identifies the subscriber groups for one or more subscriptions within a publication.
.. versionchanged:: 3.1.0
Refactored the function to be more efficient.
:param publication_id: The ID of the publication
:type publication_id: int, str
:param subscription_id: The specific subscription ID for which to return subscriber groups (Optional)
:type subscription_id: int, str
:param full_uri: Determines whether or not to return the full URI or just the Group ID (``False`` by default)
:type full_uri: bool
:returns: A dictionary mapping the subscription IDs to the respective subscriber groups
:raises: :py:exc:`khorosjx.errors.exceptions.SubscriptionNotFoundError`
"""
# Verify that the core connection has been established
verify_core_connection()
# Capture the subscriber groups for each subscription
subscriptions = get_subscription_data(publication_id)
# Filter for a specific subscription if an ID is provided
if subscription_id:
subscriptions = filter_subscriptions_by_id(subscription_id, subscriptions)
# Capture the subscriber groups
subscriber_groups = {}
for subscription in subscriptions:
if full_uri:
subscriber_groups[subscription['id']] = subscription.get('subscribers')
else:
subscribers = []
for subscriber in subscription.get('subscribers'):
subscribers.append(subscriber.split('securityGroups/')[1])
subscriber_groups[subscription['id']] = subscribers
return subscriber_groups
|
e7dd2a052992109a2673dcdbbac388ee0babf7ec
| 20,215 |
def get_salutation_from_title(title):
"""
Described here: https://github.com/VNG-Realisatie/Haal-Centraal-BRP-bevragen/blob/v1.0.0/features/aanhef.feature#L4-L38
"""
if title in [BARON, HERTOG, JONKHEER, MARKIES, RIDDER]:
return HOOGWELGEBOREN_HEER
if title in [BARONES, HERTOGIN, JONKVROUW, MARKIEZIN]:
return HOOGWELGEBOREN_VROUWE
if title in [PRINS, PRINSES]:
return HOOGHEID
if title == GRAAF:
return HOOGGEBOREN_HEER
if title == GRAVIN:
return HOOGGEBOREN_VROUWE
|
afbafcf7c2ec2a77b44d2e9aad5930f83d5cc10c
| 20,216 |
def hourOfDayNy(dateTime):
"""
Returns an int value of the hour of the day for a DBDateTime in the New York time zone.
The hour is on a 24 hour clock (0 - 23).
:param dateTime: (io.deephaven.db.tables.utils.DBDateTime) - The DBDateTime for which to find the hour of the day.
:return: (int) A QueryConstants.NULL_INT if the input is null, otherwise, an int value
of the hour of the day represented by the DBDateTime when interpreted in the New York
time zone.
"""
return _java_type_.hourOfDayNy(dateTime)
|
eac5db0723bf44162d50a787c56244d5bcb094d9
| 20,217 |
def _extract_action_num_and_node_id(m):
"""Helper method: Extract *action_num* and *node_id* from the given regex
match. Convert *action_num* to a 0-indexed integer."""
return dict(
action_num=(int(m.group('action_num')) - 1),
node_id=m.group('node_id'),
)
|
f1e5f0b81d6d82856b7c00d67270048e0e4caf38
| 20,218 |
import re
def get_uid_cidx(img_name):
"""
:param img_name: format output_path / f'{uid} cam{cidx} rgb.png'
"""
img_name = img_name.split("/")[-1]
assert img_name[-8:] == " rgb.png"
img_name = img_name[:-8]
m = re.search(r'\d+$', img_name)
assert not m is None
cidx = int(m.group())
img_name = img_name[:-len(str(cidx))]
assert img_name[-4:] == " cam"
uid = img_name[0:-4]
return uid, cidx
|
29363f4fc686fa972c2249e5e1db1a333625be36
| 20,219 |
def parse_color(hex_color):
"""Parse color values"""
cval = int(hex_color, 16)
x = lambda b: ((cval >> b) & 0xff) / 255.0
return {k: x(v) for k, v in dict(r=16, g=8, b=0).iteritems()}
|
70ca92f7696dd5193730326de141ad30c039f7c6
| 20,220 |
def apply_4x4(RT, XYZ):
"""
RT: B x 4 x 4
XYZ: B x N x 3
"""
#RT = RT.to(XYZ.device)
B, N, _ = list(XYZ.shape)
ones = np.ones([B, N, 1])
XYZ1 = np.concatenate([XYZ, ones], 2)
XYZ1_t = np.transpose(XYZ1, 1, 2)
# this is B x 4 x N
XYZ2_t = np.matmul(RT, XYZ1_t)
XYZ2 = np.transpose(XYZ2_t, 1, 2)
XYZ2 = XYZ2[:,:,:3]
return XYZ2
|
b2b2e76a79dbbdf2bc0039fb073a0a6209c9f82d
| 20,221 |
def smoothmax(value1, value2, hardness):
"""
A smooth maximum between two functions. Also referred to as the logsumexp() function.
Useful because it's differentiable and preserves convexity!
Great writeup by John D Cook here:
https://www.johndcook.com/soft_maximum.pdf
:param value1: Value of function 1.
:param value2: Value of function 2.
:param hardness: Hardness parameter. Higher values make this closer to max(x1, x2).
:return: Soft maximum of the two supplied values.
"""
value1 = value1 * hardness
value2 = value2 * hardness
max = np.fmax(value1, value2)
min = np.fmin(value1, value2)
out = max + np.log(1 + np.exp(min - max))
out /= hardness
return out
|
60ef9d14b6867aaa205c186309d0c9f53e4edb21
| 20,222 |
import os
def base_app(instance_path):
"""Flask application fixture."""
app_ = Flask('testapp', instance_path=instance_path)
app_.config.update(
SECRET_KEY='SECRET_KEY',
SQLALCHEMY_DATABASE_URI=os.environ.get(
'SQLALCHEMY_DATABASE_URI', 'sqlite:///test.db'),
SQLALCHEMY_TRACK_MODIFICATIONS=True,
TESTING=True,
)
InvenioPIDStore(app_)
InvenioDB(app_)
InvenioPIDRelations(app_)
InvenioRecords(app_)
InvenioIndexer(app_)
InvenioSearch(app_)
Babel(app_)
return app_
|
e4391a35fc92b228b3a1ae425478a9a777f85594
| 20,223 |
def udf_con(udf_backend):
"""
Instance of Client, already connected to the db (if applies).
"""
return udf_backend.connection
|
7e95460b4e6808cc148406dfbdb8e952ebb4739a
| 20,224 |
from typing import Union
from pathlib import Path
from typing import Optional
def resolve_config(*, config: Union[Path, str]) -> Optional[Path]:
"""Resolves a config to an absolute Path."""
path = config if isinstance(config, Path) else Path(config)
# Is it absolute, or relative to the CWD?
if path.exists():
return path
# Is it relative to a configuration directory?
for config_dir in get_config_dirs():
lpath = config_dir.joinpath(path)
if lpath.exists():
return lpath
for extension in EXTENSIONS:
lpath = config_dir.joinpath(f"{str(path)}.{extension}")
if lpath.exists():
return lpath
return None
|
4290e46cda385fd60c164af712d28e5e7ad22c83
| 20,225 |
def get_default_config() -> DefaultConfig:
"""
Get the default config.
Returns:
A dict with the default config.
"""
images = assets.get_images()
return {
"static_url": "/static",
"favicon_ico": images.favicon_ico.name,
"favicon_png": images.favicon_png.name,
"favicon_svg": images.favicon_svg.name,
"preview_png": images.preview_png.name,
"google_tag_manager": "GTM-*******",
"language": "en",
"territory": "US",
"domain": "sample.com",
"text_dir": "ltr",
"title": "Sample",
"description": "We do things",
"subject": "Home Page",
"main_color": "#ff0000",
"background_color": "#ffffff",
"author_name": info.AUTHOR,
"author_email": info.EMAIL,
"facebook_app_id": "123456",
"twitter_username": "sample",
"twitter_user_id": "123456",
"itunes_app_id": "123456",
"itunes_affiliate_data": "123456",
}
|
72685f6bb2a45e03f42d96c82cd0436a826fed68
| 20,226 |
import string
import re
def normalize_elt(elt, alphanum=True):
"""
Normalize string by removing newlines, punctuation, spaces,
and optionally filtering for alphanumeric chars
Args:
elt (string):
string to normalize
alphanum (bool, optional, default True):
if True, only return elt if it contains at least
one alphanumeric char, return None otherwise
Returns:
norm_elt (string):
normalized string or None
"""
norm_elt = elt.replace('\n', '') # remove new lines
translator = str.maketrans('', '', string.punctuation)
norm_elt = norm_elt.lower().translate(translator) # lowercase then remove punctuation
norm_elt = norm_elt.strip().replace(' ', '_') # replace spaces with underscores
if alphanum:
alphanum_check = re.search('[a-zA-Z0-9]', norm_elt)
if alphanum_check:
return norm_elt
else:
return None
else:
return norm_elt
|
79aad0a7425270b8708598fe5429ecd7c46bffde
| 20,227 |
from typing import Tuple
from typing import Optional
def check_importability(code: str, func_name: str) -> Tuple[bool, Optional[Exception]]:
"""Very simple check just to see whether the code is at least importable"""
try:
import_func_from_code(
code,
func_name,
raise_if_not_found=False,
register_module=False,
)
return True, None
except Exception as e: # pylint: disable=broad-except
return False, e
|
b7823344bf2ab7055882fb366b0903fcab1a5366
| 20,228 |
import torch
def compute_q(u, v, omega, k_hat, m_hat, N=100, map_est=False):
"""
Inputs:
u, v - (B,L*2)
omega - (L,n)
k_hat, m_hat - (B,J)
"""
B, L = u.size()[0], int(u.size()[1]/2)
unique_omega, inverse_idx = torch.unique(omega, dim=0, return_inverse=True) # (J,n), (L)
c, s = utils.circular_moment_numint_multi(k_hat, m_hat, unique_omega, unique_omega, N=N, map_est=map_est) # (B,J), (B,J) (0.0013s)
c, s = c[:,inverse_idx], s[:,inverse_idx] # (B,L), (B,L)
qc, qs = torch.empty(B,L*2,device=device), torch.empty(B,L*2,device=device)
qc[:,::2], qc[:,1::2] = c.clone(), c.clone()
qs[:,::2], qs[:,1::2] = s.clone(), s.clone()
return qc, qs
|
d8ab2b71a9c149ed3c8e25daa86954c8d24ce79e
| 20,229 |
def stationarity(sequence):
"""
Compute the stationarity of a sequence.
A stationary transition is one whose source and destination symbols
are the same. The stationarity measures the percentage of transitions
to the same location.
Parameters
----------
sequence : list
A list of symbols.
Returns
-------
float
Percentage of the sequence that is stationary.
"""
if len(sequence) <= 1:
return 100.0
if len(sequence) == len(set(sequence)):
return .0
stationary_transitions = 0
for i in range(1, len(sequence)):
if sequence[i - 1] == sequence[i]:
stationary_transitions += 1
return round(stationary_transitions * 100 / (len(sequence) - 1), 2)
|
39f96d4a07a83ef2c46033dcac9cfaa343747b2f
| 20,230 |
def build_nmt_model(Vs, Vt, demb=128, h=128, drop_p=0.5, tied=True, mask=True, attn=True, l2_ratio=1e-4,
training=None, rnn_fn='lstm'):
"""
Builds the target machine translation model.
:param demb: Embedding dimension.
:param h: Number of hidden units.
:param drop_p: Dropout percentage.
:param attn: Flag to include attention units.
:param rnn_fn: Can be 'lstm' or 'gru'.
"""
if rnn_fn == 'lstm':
rnn = LSTM
elif rnn_fn == 'gru':
rnn = LSTM
else:
raise ValueError(rnn_fn)
# Build encoder
encoder_input = Input((None,), dtype='float32', name='encoder_input')
if mask:
encoder_emb_layer = Embedding(Vs + 1, demb, mask_zero=True, embeddings_regularizer=l2(l2_ratio),
name='encoder_emb')
else:
encoder_emb_layer = Embedding(Vs, demb, mask_zero=False, embeddings_regularizer=l2(l2_ratio),
name='encoder_emb')
encoder_emb = encoder_emb_layer(encoder_input)
# Dropout for encoder
if drop_p > 0.:
encoder_emb = Dropout(drop_p)(encoder_emb, training=training)
encoder_rnn = rnn(h, return_sequences=True, return_state=True, kernel_regularizer=l2(l2_ratio), name='encoder_rnn')
encoder_rtn = encoder_rnn(encoder_emb)
encoder_outputs = encoder_rtn[0]
encoder_states = encoder_rtn[1:]
# Build decoder
decoder_input = Input((None,), dtype='float32', name='decoder_input')
if mask:
decoder_emb_layer = Embedding(Vt + 1, demb, mask_zero=True, embeddings_regularizer=l2(l2_ratio),
name='decoder_emb')
else:
decoder_emb_layer = Embedding(Vt, demb, mask_zero=False, embeddings_regularizer=l2(l2_ratio),
name='decoder_emb')
decoder_emb = decoder_emb_layer(decoder_input)
# Dropout for decoder
if drop_p > 0.:
decoder_emb = Dropout(drop_p)(decoder_emb, training=training)
decoder_rnn = rnn(h, return_sequences=True, kernel_regularizer=l2(l2_ratio), name='decoder_rnn')
decoder_outputs = decoder_rnn(decoder_emb, initial_state=encoder_states)
if drop_p > 0.:
decoder_outputs = Dropout(drop_p)(decoder_outputs, training=training)
# Taken from https://arxiv.org/pdf/1805.01817.pdf for training with user annotations
if tied:
final_outputs = DenseTransposeTied(Vt, kernel_regularizer=l2(l2_ratio), name='outputs',
tied_to=decoder_emb_layer, activation='linear')(decoder_outputs)
else:
final_outputs = Dense(Vt, activation='linear', kernel_regularizer=l2(l2_ratio), name='outputs')(decoder_outputs)
# Add attention units
if attn:
contexts = Attention(units=h, kernel_regularizer=l2(l2_ratio), name='attention',
use_bias=False)([encoder_outputs, decoder_outputs])
if drop_p > 0.:
contexts = Dropout(drop_p)(contexts, training=training)
contexts_outputs = Dense(Vt, activation='linear', use_bias=False, name='context_outputs',
kernel_regularizer=l2(l2_ratio))(contexts)
final_outputs = Add(name='final_outputs')([final_outputs, contexts_outputs])
model = Model(inputs=[encoder_input, decoder_input], outputs=[final_outputs])
return model
|
ff3991dab1b4d6e8e5556f064356e1cce1320e78
| 20,231 |
from typing import List
import json
import requests
def get_available_tf_versions(include_prerelease: bool = False) -> List[str]:
"""Return available Terraform versions."""
tf_releases = json.loads(
requests.get("https://releases.hashicorp.com/index.json").text
)["terraform"]
tf_versions = sorted(
[k for k, _v in tf_releases["versions"].items()], # descending
key=LooseVersion,
reverse=True,
)
if include_prerelease:
return [i for i in tf_versions if i]
return [i for i in tf_versions if i and "-" not in i]
|
4be8820bb7cc2b5e5a649b8690fdef3d376a80ed
| 20,232 |
def relay_tagged(c, x, tag):
"""Implementation of tagged for Relay."""
assert tag.is_constant(int)
rtag = get_union_ctr(tag.value, None)
return rtag(c.ref(x))
|
b9d97051b3bfe13194a4123ec044e79d53c7587f
| 20,233 |
def get_vf(Xf, Nf):
"""
compute the 1-spectrogram of the projection of a frequency band of the mix at 1 frequency on some directions
:param Xf: T x I complex STFT of mix at a given f
:param Nf: Mp x Md x I projection matrix
:return: Vf: Mp x Ml x Nt magnitude spectrogram of projection
"""
Vf = np.tensordot(Nf, Xf, axes=(-1, 1))
Vf = np.abs(Vf)
return Vf
|
921bebfb4129f9ae7b0b2d5878c20eb957328c6c
| 20,234 |
def gen_data_code(stream, bits=ic.core_opts.data_bits):
# type: (ic.Stream, int) -> dict
"""
Create a similarity preserving ISCC Data-Code with the latest standard algorithm.
:param Stream stream: Input data stream.
:param int bits: Bit-length of ISCC Data-Code (default 64).
:return: ISCC Data-Code
:rtype: dict
"""
return gen_data_code_v0(stream, bits)
|
ede11d67f305b57a2734cc5898e22102c0db07bb
| 20,235 |
import os
def file_get_size_in_bytes(path: str) -> int:
"""Return the size of the file in bytes."""
return int(os.stat(path).st_size)
|
e9692259d2f5cb8f536cb8c6a0ae53e0b7c6efd1
| 20,236 |
def model_fn_builder(
bert_config,
init_checkpoint,
layer_indexes,
use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
model = modeling.BertModel(
config= bert_config,
is_training= False,
input_ids= features['input_ids'],
input_mask= features['input_mask'],
token_type_ids= features['input_type_ids'],
use_one_hot_embeddings= use_one_hot_embeddings)
if mode != tf.estimator.ModeKeys.PREDICT: raise ValueError("Only PREDICT modes are supported: %s" % (mode))
tvars = tf.trainable_variables()
scaffold_fn = None
(assignment_map,
initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(
tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
all_layers = model.get_all_encoder_layers()
predictions = {"unique_id": features["unique_ids"]}
for (i, layer_index) in enumerate(layer_indexes):
predictions["layer_output_%d" % i] = all_layers[layer_index]
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode= mode,
predictions= predictions,
scaffold_fn= scaffold_fn)
return output_spec
return model_fn
|
7a57c1557f4643c738a22b0baf4dbdce0fa06a3a
| 20,237 |
def git2pep440(ver_str):
"""
Converts a git description to a PEP440 conforming string
:param ver_str: git version description
:return: PEP440 version description
"""
dash_count = ver_str.count('-')
if dash_count == 0:
return ver_str
elif dash_count == 1:
return ver_str.split('-')[0] + "+dirty"
elif dash_count == 2:
tag, commits, sha1 = ver_str.split('-')
return "{}.post0.dev{}+{}".format(tag, commits, sha1)
elif dash_count == 3:
tag, commits, sha1, _ = ver_str.split('-')
return "{}.post0.dev{}+{}.dirty".format(tag, commits, sha1)
else:
raise RuntimeError("Invalid version string")
|
7c4a5185305627c22118722b73b2facfa830875a
| 20,238 |
def rejoin(hyphenated, line):
"""Add hyphenated word part to line start, dehyphenating when required."""
first_part, hyphen = split_hyphen(hyphenated)
second_part, rest = split_first_token(line)
if is_same_vowel(first_part[-1], second_part[0]):
# same vowel before and after hyphen
keep_hyphen = True
elif not (first_part[-1].isalpha() and second_part[0].isalpha()):
# only join alphabetic with alphabetic char
keep_hyphen = True
elif not (is_regular_word(first_part) and
is_regular_word(second_part)):
# one or both are not "regular" words
keep_hyphen = True
elif is_proper_noun(second_part):
# Keep hyphen for proper noun compounds. Don't check first
# part as start-of-sentence capitalization may confound the
# capitalization pattern.
keep_hyphen = True
else:
keep_hyphen = False
if keep_hyphen:
rejoined = first_part + hyphen + second_part
else:
rejoined = first_part + second_part
return rejoined + rest
|
ced2bd2b791660e45741997e17ba3bdc0adfad6f
| 20,239 |
from typing import Union
import zmq
def msg_bytes(msg: Union[bytes, bytearray, zmq.Frame]) -> Union[bytes, bytearray]:
"""Return message frame as bytes.
"""
return msg.bytes if isinstance(msg, zmq.Frame) else msg
|
166865f5d51526cf70c767fc90e3d7b474501fb0
| 20,240 |
import math
def iucr_string(values):
"""Convert a central value (average) and its s.u. into an IUCr compliant number representation.
:param values: pair of central value (average) and s.u.
:type values: tuple((float, float))
:return: IUCr compliant representation
:rtype: str
"""
if values[1] == 0 or values[1] is None: # No or zero s.u. given
return str(values[0])
sig_pos = math.floor(math.log10(abs(values[1]))) # position of first significant digit
sig_3 = math.trunc(abs(values[1]) * 10 ** (2 - sig_pos)) / 10 ** (2 - sig_pos) # 1st three significant s.u. digits
sig_3 *= 10 ** -(sig_pos + 1) # s.u. moved directly behind decimal separator (final range: 0.100-0.999)
if sig_3 < 0.195: # round to two digits (final s.u. range: 0.10-0.19)
su = round(abs(values[1]), 1 - sig_pos)
avg = round(values[0], 1 - sig_pos)
sig_len = 2
elif sig_3 < 0.950: # round to one digit (final s.u. range: 0.2-0.9)
su = round(abs(values[1]), -sig_pos)
avg = round(values[0], -sig_pos)
sig_len = 1
else: # round to two digits and move forward (final s.u.: 0.10)
sig_pos += 1
su = round(abs(values[1]), 1 - sig_pos)
avg = round(values[0], 1 - sig_pos)
sig_len = 2
if sig_pos > 0: # only integral part for s.u. >= 1.95
sign_shift = -1 if values[0] < 0 else 0
avg_str = ('{:' + str(sig_pos + sign_shift) + '.0f}').format(avg).strip()
su_str = ('{:' + str(sig_pos) + '.0f}').format(su)
else: # fractional and possibly integral part for s.u. < 1.95
avg_str = ('{:.' + str(-sig_pos + sig_len - 1) + 'f}').format(avg)
su_str = '{:.0f}'.format(abs(su / 10 ** (sig_pos - sig_len + 1)))
return '{:s}({:s})'.format(avg_str, su_str)
|
c6c6602aa0ba481ed5467ed62df59a16f26a8091
| 20,241 |
def augment_signals(ds, augment_configs):
"""
Apply all augmentation methods specified in 'augment_config' and return a dataset where all elements are drawn randomly from the augmented and unaugmented datasets.
"""
augmented_datasets = []
for conf in augment_configs:
aug_kwargs = {k: v for k, v in conf.items() if k not in {"type", "split"}}
if conf["type"] == "random_resampling":
augmented_datasets.append(augment_by_random_resampling(ds, **aug_kwargs))
elif conf["type"] == "additive_noise":
augmented_datasets.append(augment_by_additive_noise(ds, **aug_kwargs))
else:
logger.warning("Unknown signal augmentation type '%s', skipping", conf["type"])
# Sample randomly from the unaugmented dataset and all augmented datasets
return tf.data.experimental.sample_from_datasets([ds] + augmented_datasets)
|
5c07fcaefc277a4190336995440d1380f4263409
| 20,242 |
def sse_pack(d):
"""For sending sse to client. Formats a dictionary into correct form for SSE"""
buf = ''
for k in ['retry','id','event','data']:
if k in d.keys():
buf += '{}: {}\n'.format(k, d[k])
return buf + '\n'
|
a497c7ab919115d59d49f25abfdb9d88b0963af3
| 20,243 |
def proj_beta_model(r2d_kpc, n0, r_c, beta):
"""
Compute a projected beta model:
P(R) = \int n_e dl at given R
Parameters
----------
- r2d_kpc: array of projected radius at which to compute integration
- n0 : normalization
- r_c : core radius parameter
- beta : slope of the profile
Outputs
--------
- The projected profile in units of kpc times original profile
"""
return np.sqrt(np.pi) * n0 * r_c * gamma(1.5*beta - 0.5) / gamma(1.5*beta) * (1 + (r2d_kpc/r_c)**2)**(0.5-1.5*beta)
|
1d581d73da98833ca0df33df9de19b7cd71d7164
| 20,244 |
def read_raw_datafile(filename):
"""
Read and format the weather data from one csv file downloaded from the
climate.weather.gc.ca website.
"""
dataset = pd.read_csv(filename, dtype='str')
valid_columns = [
'Date/Time', 'Year', 'Month', 'Day', 'Max Temp (°C)', 'Min Temp (°C)',
'Mean Temp (°C)', 'Total Precip (mm)']
dataset['Date/Time'] = pd.to_datetime(
dataset['Date/Time'], format="%Y-%m-%d")
dataset = (
dataset
.drop(labels=[c for c in dataset.columns if c not in valid_columns],
axis=1)
.set_index('Date/Time', drop=True)
)
return dataset
|
08d56daa375e3a05d0bca9041c8e973ec80ebe11
| 20,245 |
def zeros(shape, backend=TensorFunctions):
"""
Produce a zero tensor of size `shape`.
Args:
shape (tuple): shape of tensor
backend (:class:`Backend`): tensor backend
Returns:
:class:`Tensor` : new tensor
"""
return Tensor.make([0] * int(operators.prod(shape)), shape, backend=backend)
|
1c3115613850819ece6f5a86142e7feb532930e7
| 20,246 |
def formatLabels(labels, total_time, time):
"""Format labels into vector where each value represents a window of
time seconds"""
time_threshold = 1
num_windows = total_time // time
Y = np.zeros(num_windows)
for label in labels:
start = label['start']
duration = label['duration']
end = start + duration
start_window = int(round(start / time))
end_window = int(round(end / time))
if end_window > start_window:
window_limit = (start_window + 1) * 30
if window_limit - start <= time_threshold:
start_window += 1
if end - window_limit <= time_threshold:
end_window -= 1
Y[start_window:end_window + 1] = 1
print("{} arousals".format(len(labels)))
return Y
|
714bfda3c8a997abb9389f74261bcc9aa2fb768b
| 20,247 |
import re
def line(line_def, **kwargs):
"""Highlights a character in the line"""
def replace(s):
return "(%s)" % ansi.aformat(s.group()[1:], attrs=["bold", ])
return ansi.aformat(
re.sub('@.?', replace, line_def),
**kwargs)
|
755dee2147f606e57825314642dd35cc713cb9ff
| 20,248 |
from datetime import datetime
def ceil_datetime_at_minute_interval(timestamp, minute):
"""
From http://stackoverflow.com/questions/13071384/python-ceil-a-datetime-to-next-quarter-of-an-hour
:param timestamp:
:type timestamp: datetime.datetime
:param minute:
:type minute: int
:return:
:rtype: datetime.datetime
"""
# how many secs have passed this hour
nsecs = timestamp.minute * 60 + timestamp.second + timestamp.microsecond * 1e-6
# number of seconds to next minute mark
seconds = minute * 60
delta = (nsecs // seconds) * seconds + seconds - nsecs
if delta < seconds:
return timestamp + datetime.timedelta(seconds=delta)
else:
return timestamp
|
7e9522ac8eebdab531d13d842a5018f3813ab86c
| 20,249 |
import requests
import json
def make_response(
activated=True,
expires_in=0,
auto_activation_supported=True,
oauth_server=None,
DATA=None,
):
"""
Helper for making ActivationRequirementsResponses with known fields
"""
DATA = DATA or []
data = {
"activated": activated,
"expires_in": expires_in,
"oauth_server": oauth_server,
"DATA": DATA,
"auto_activation_supported": auto_activation_supported,
}
response = requests.Response()
response.headers["Content-Type"] = "application/json"
response._content = json.dumps(data).encode("utf-8")
return ActivationRequirementsResponse(
GlobusHTTPResponse(response, client=mock.Mock())
)
|
eaf971695752dfd3d0e4b414d3085c3d60349534
| 20,250 |
def post_create_manager_config(
api_client,
id,
log_files=None,
configuration_files=None,
ports=None,
process_manager=None,
executables=None,
**kwargs
): # noqa: E501
"""post_create_manager_config # noqa: E501
Create a new plugin manager configuration. If no params are provided,
a vanilla example configuration will be created. See docs for param specs:
https://docs.cohesive.net/docs/network-edge-plugins/plugin-manager/ # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> response = await api.post_create_manager_config(client, id, async_req=True)
:param int id: ID for Plugin instance (running container) (required)
:param log_files list: List of Log File objects
:param configuration_files list: List of Configuration File objects
:param ports list: List of Port objects
:param process_manager Dict: Process Manager object
:param executables list: List of Executable objects
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: APIResponse or awaitable if async
"""
local_var_params = locals()
request_params = [
"log_files",
"configuration_files",
"ports",
"process_manager",
"executables",
]
collection_formats = {}
path_params = {"id": id}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = {}
for param in [p for p in request_params if local_var_params.get(p) is not None]:
body_params[param] = local_var_params[param]
# HTTP header `Accept`
header_params["Accept"] = api_client.select_header_accept(
["application/json"]
) # noqa: E501
# HTTP header `Content-Type`
header_params["Content-Type"] = api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiTokenAuth", "basicAuth"] # noqa: E501
return api_client.call_api(
"/plugin-instances/{id}/manager",
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="object", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
|
c3f27767a6b94604390b683b4169d7ecc54e945f
| 20,251 |
import re
import os
def get_long_desc() -> str:
""" read long description and adjust master with version for badges or links
only for release versions (x.y.z)
"""
get_version()
release = re.compile(r'(\d+\.){0,2}\d+$')
with open(os.path.join(wd, 'README.md')) as fd:
if _version == '0.0.0' or not release.match(_version):
_long_description = fd.read()
else:
lines = fd.readlines()
for i, line in enumerate(lines):
if not line.startswith('['):
break
if 'travis' in line:
lines[i] = line.replace('master', _version)
elif 'codecov' in line:
commit = get_commit()
if commit != '':
lines[i] = line.replace('branch/master',
'commit/' + commit)
_long_description = ''.join(lines)
return _long_description
|
6a2366a7555d654897271a43cf680e7555cc03a1
| 20,252 |
def get_current_user():
"""Load current user or use anon user."""
return auth.User(
uuid=None,
login='anon',
password='',
name='anon',
visiblity=None,
language=None,
last_seen=None,
)
|
2074b73d970e0549726b887e67b9c7b36db6e463
| 20,253 |
def validate(number, alphabet='0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ*'):
"""Check whether the check digit is valid."""
try:
valid = checksum(number, alphabet) == 1
except Exception: # noqa: B902
raise InvalidFormat()
if not valid:
raise InvalidChecksum()
return number
|
1d521eaa369c6436d8c8cfc93889d769ca15c193
| 20,254 |
from typing import Union
def pct_chg(x: Union[np.ndarray, pd.Series]) -> np.ndarray:
"""Percentage change between the current and a prior element.
Args:
x: A numpy.ndarray or pandas.Series object
Returns:
A numpy.ndarray with the results
"""
x = x.astype("float64")
if isinstance(x, pd.DataFrame):
pc = x.pct_change().values.reshape(-1, 1)
else:
x = np.reshape(x, (-1,))
x_df = pd.Series(x, name="x")
pc = x_df.pct_change().values.reshape(-1, 1)
return pc
|
253a8223c58fa9d89d50c554f54051724961c3bb
| 20,255 |
def calc_gram_matrix(input_mat):
"""
Paper directly mentions about calculating Gram matrix:
G_{ij}^l = \sum_k F_{ij}^l F_{jk}^l
i and j stand for filter position and k stands for position in each filters.
If matrix A is composed of vectors, a1, a2, a3, etc,
e.g. A = [a1, a2, a3, ...] note that a1, a2, a3 are column vecdtors
then Gram matrix G can be calculated as $G = A^T cdot A$
inputs:
It takes input shape of [1, height, width, channel]
returns:
[1, channel, channel, 1]
"""
channel_size = input_mat.shape[-1]
# From [1, height, width, channel] to [1, height * width, channel]
vectorized_input = tf.reshape(input_mat, [1, -1, channel_size])
# Transform it to shape of [channel, height * width]
mat_2d = vectorized_input[0, :, :]
F = tf.transpose(mat_2d)
# Calculate gram matrix
gram_mat = tf.linalg.matmul(F, mat_2d) # this produce the shape of [channel, channel]
feature_map_size = input_mat.shape[1] * input_mat.shape[2]
return gram_mat / feature_map_size
|
386db7deec87127f8c2872e091283fb555611f04
| 20,256 |
def parse_annotations_with_food_part_template(annotations, premise):
""" """
annotations_aggregated = []
annotations_reported = []
rows_grouped_by_premises = annotations[annotations["premise"] == premise]
for hypothesis in rows_grouped_by_premises["hypothesis"].unique():
rows_grouped_by_hypotheses = rows_grouped_by_premises[
rows_grouped_by_premises["hypothesis"] == hypothesis
]
ners = rows_grouped_by_hypotheses[f"correct_ner"]
n_entail = len(ners[ners == "Entails"])
n_non_entail = len(ners[ners == "Not Entails/Error"])
# Report to the annotator if annotation results are highly
# non consensus.
if n_entail == n_non_entail:
annotations_reported.append(
[rows_grouped_by_hypotheses.iloc[0]["id"], premise, hypothesis]
)
continue
correct_ner = "Entails" if n_entail > n_non_entail else "Not Entails"
if rows_grouped_by_hypotheses["premise"].values[0][:3] == "<s>":
premise_filtered = rows_grouped_by_hypotheses["premise"].values[0][
3:
]
else:
premise_filtered = rows_grouped_by_hypotheses["premise"].values[0]
id_ = rows_grouped_by_hypotheses.iloc[0]["id"]
food = rows_grouped_by_hypotheses.iloc[0]["hypothesis_food"]
food_id = rows_grouped_by_hypotheses.iloc[0]["hypothesis_food_id"]
food_part = rows_grouped_by_hypotheses.iloc[0]["hypothesis_food_part"]
food_part_id = None
chemical = rows_grouped_by_hypotheses.iloc[0]["hypothesis_chemical"]
chemical_id = rows_grouped_by_hypotheses.iloc[0][
"hypothesis_chemical_id"
]
conc_value = None
conc_value_id = None
conc_unit = None
conc_unit_id = None
annotations_aggregated.append(
[
id_,
premise_filtered,
hypothesis.replace("(whole plant)", ""),
correct_ner,
n_entail,
n_non_entail,
food,
food_id,
food_part,
food_part_id,
chemical,
chemical_id,
conc_value,
conc_value_id,
conc_unit,
conc_unit_id,
]
)
return annotations_aggregated, annotations_reported
|
1bf17aa605b9ec581ec1379ed0ae7f7340461a19
| 20,257 |
def filter_dictionary(dictionary, filter_func):
"""
returns the first element of `dictionary` where the element's key pass the filter_func.
filter_func can be either a callable or a value.
- if callable filtering is checked with `test(element_value)`
- if value filtering is checked with `element_value == filter_func`
:param dictionary:
:param test:
:return:
>>> filter_dictionary({'arg': 'test'}, 'test')
'arg'
>>> filter_dictionary({}, 'test')
>>> def is_test(value):
... return value == 'test'
>>> filter_dictionary({'arg': 'test'}, is_test)
'arg'
"""
if not callable(filter_func):
test_func = lambda x: x == filter_func
else:
test_func = filter_func
for key, value in dictionary.iteritems():
if test_func(value):
return key
|
f5fa77a51241323845eb9a59adc9df7f662f287b
| 20,258 |
def restart_workflow(workflow_id, clear_data=False, delete_files=False):
"""Restart a workflow with the latest spec.
Clear data allows user to restart the workflow without previous data."""
workflow_model: WorkflowModel = session.query(WorkflowModel).filter_by(id=workflow_id).first()
WorkflowProcessor.reset(workflow_model, clear_data=clear_data, delete_files=delete_files)
return get_workflow(workflow_model.id)
|
70c5df4b31559830c94d01cfb86e9a68070f63b1
| 20,259 |
def spike_lmax(S, Q):
"""Maximum spike given a perturbation"""
S2 = S * S
return ((1.0 / Q) + S2) * (1 + (1.0 / S2))
|
ba845e3255e4d3eb5116d279a209f4424062603b
| 20,260 |
def get_engine():
"""Returns the db engine."""
if not hasattr(g, 'sqlite_engine'):
g.sqlite_engine = create_engine('sqlite:///' + app.config['DATABASE'], echo=True)
return g.sqlite_engine
|
579d7110cd27787095c6ede2f25841cbac5e8ca0
| 20,261 |
def is_on_curve(point):
"""Returns True if the given point lies on the elliptic curve."""
if point is None:
# None represents the point at infinity.
return True
x, y = point
return (y * y - x * x * x - curve.a * x - curve.b) % curve.p == 0
|
563d567c7dadc9d23bf6467c4fe30c697b0fd9fa
| 20,262 |
from typing import Dict
from typing import List
from typing import Tuple
def find_closest_point(
odlc: Dict[str, float],
boundary_points: List[Dict[str, float]],
obstacles: List[Dict[str, float]],
) -> Tuple[Dict[str, float], List[float]]:
"""Finds the closest safe point to the ODLC while staying within the flight boundary
Parameters
----------
odlc : Dict[str, float]
Point data for the ODLC object
boundary_points : List[Dict[str, float]]
Point data which makes up the flight boundary
obstacles : List[Dict[str, float]]
Point data for the obstacles
Returns
-------
Tuple[Dict[str, float], List[float]]
Closest safe point, and the shrunken boundary (for plotting)
"""
poly_points = [(point["utm_x"], point["utm_y"]) for point in boundary_points]
boundary_shape = Polygon(poly_points)
odlc_shape = Point(odlc["utm_x"], odlc["utm_y"])
for obstacle in obstacles:
# create obstacle as shapely shape
circle = Point(obstacle["utm_x"], obstacle["utm_y"]).buffer(obstacle["radius"]).boundary
obstacle_shape = Polygon(circle)
# remove obstacle area from boundary polygon
boundary_shape = boundary_shape.difference(obstacle_shape)
# scale down boundary by 1% to add a safety margin
boundary_shape = scale_polygon(boundary_shape, 0.01)
p_1, _ = nearest_points(
boundary_shape, odlc_shape
) # point returned in same order as input shapes
closest_point = p_1
zone_number = odlc["utm_zone_number"]
zone_letter = odlc["utm_zone_letter"]
return (
{
"utm_x": closest_point.x,
"utm_y": closest_point.y,
"utm_zone_number": zone_number,
"utm_zone_letter": zone_letter,
"latitude": utm.to_latlon(closest_point.x, closest_point.y, zone_number, zone_letter)[
0
],
"longitude": utm.to_latlon(closest_point.x, closest_point.y, zone_number, zone_letter)[
1
],
},
list(zip(*boundary_shape.exterior.coords.xy)), # pylint: disable=maybe-no-member
)
|
c07f2a1922f5083d9155da12a7273208ae440cf5
| 20,263 |
from typing import Tuple
from typing import Dict
from typing import List
def _get_band_edge_indices(
band_structure: BandStructure,
tol: float = 0.005,
) -> Tuple[Dict[Spin, List[int]], Dict[Spin, List[int]]]:
"""
Get indices of degenerate band edge states, within a tolerance.
Parameters
----------
band_structure : BandStructure
A band structure.
tol : float
Degeneracy tolerance in meV.
"""
vbm_energy = band_structure.get_vbm()["energy"]
cbm_energy = band_structure.get_cbm()["energy"]
vbm_band_indices = {}
cbm_band_indices = {}
for spin, spin_energies in band_structure.bands.items():
vb_idxs = np.where(
np.any(
(spin_energies > vbm_energy - tol)
& (spin_energies < band_structure.efermi),
axis=1,
)
)[0]
cb_idxs = np.where(
np.any(
(spin_energies < cbm_energy + tol)
& (spin_energies > band_structure.efermi),
axis=1,
)
)[0]
vbm_band_indices[spin] = vb_idxs.tolist()
cbm_band_indices[spin] = cb_idxs.tolist()
return vbm_band_indices, cbm_band_indices
|
bd0ed67b879c627c77e35c519dbfcf86890523e1
| 20,264 |
import logging
import os
def make_val_dataloader(data_config, data_path, task=None, data_strct=None):
""" Return a data loader for a validation set """
if not "val_data" in data_config or data_config["val_data"] is None:
print_rank("Validation data list is not set", loglevel=logging.DEBUG)
return None
loader_type = detect_loader_type(data_config["val_data"], data_config["loader_type"])
if loader_type == 'text':
TextDataLoader = get_exp_dataloader(task)
val_dataloader = TextDataLoader(
data = data_strct if data_strct is not None else os.path.join(data_path, data_config["val_data"]),
user_idx = 0,
mode = 'val',
args=data_config
)
else:
raise NotImplementedError("Not supported loader_type={} audio_format={}".format(loader_type, data_config["audio_format"]))
return val_dataloader
|
cbcb53fc3a9f8a27b2c371df693b5abe059d3f37
| 20,265 |
def _get_urls():
"""Stores the URLs for histology file downloads.
Returns
-------
dict
Dictionary with template names as keys and urls to the files as values.
"""
return {
"fsaverage": "https://box.bic.mni.mcgill.ca/s/znBp7Emls0mMW1a/download",
"fsaverage5": "https://box.bic.mni.mcgill.ca/s/N8zstvuRb4sNcSe/download",
"fs_LR_64k": "https://box.bic.mni.mcgill.ca/s/6zKHcg9xXu5inPR/download",
}
|
697cf29b3caaeda079014fd342fbe7ad4c650d30
| 20,266 |
import struct
def guid_bytes_to_string(stream):
"""
Read a byte stream to parse as GUID
:ivar bytes stream: GUID in raw mode
:returns: GUID as a string
:rtype: str
"""
Data1 = struct.unpack("<I", stream[0:4])[0]
Data2 = struct.unpack("<H", stream[4:6])[0]
Data3 = struct.unpack("<H", stream[6:8])[0]
Data4 = stream[8:16]
return "%08x-%04x-%04x-%s-%s" % (Data1, Data2, Data3, "".join("%02x" % x for x in Data4[0:2]), "".join("%02x" % x for x in Data4[2:]))
|
23f013b48806d1d2d4b4bec4ab4a5fcf6fc2e6b0
| 20,267 |
def thaiword_to_time(text: str, padding: bool = True) -> str:
"""
Convert Thai time in words into time (H:M).
:param str text: Thai time in words
:param bool padding: Zero padding the hour if True
:return: time string
:rtype: str
:Example:
thaiword_to_time"บ่ายโมงครึ่ง")
# output:
# 13:30
"""
keys_dict = list(_DICT_THAI_TIME.keys())
text = text.replace("กว่า", "").replace("ๆ", "").replace(" ", "")
_i = ["ตีหนึ่ง", "ตีสอง", "ตีสาม", "ตีสี่", "ตีห้า"]
_time = ""
for affix in _THAI_TIME_AFFIX:
if affix in text and affix != "ตี":
_time = text.replace(affix, affix + "|")
break
elif affix in text and affix == "ตี":
for j in _i:
if j in text:
_time = text.replace(j, j + "|")
break
else:
pass
if "|" not in _time:
raise ValueError("Cannot find any Thai word for time affix.")
_LIST_THAI_TIME = _time.split("|")
del _time
hour = _THAI_TIME_CUT.word_tokenize(_LIST_THAI_TIME[0])
minute = _LIST_THAI_TIME[1]
if len(minute) > 1:
minute = _THAI_TIME_CUT.word_tokenize(minute)
else:
minute = 0
text = ""
# determine hour
if hour[-1] == "นาฬิกา" and hour[0] in keys_dict and hour[:-1]:
text += str(thaiword_to_num("".join(hour[:-1])))
elif hour[0] == "ตี" and hour[1] in keys_dict:
text += str(_DICT_THAI_TIME[hour[1]])
elif hour[-1] == "โมงเช้า" and hour[0] in keys_dict:
if _DICT_THAI_TIME[hour[0]] < 6:
text += str(_DICT_THAI_TIME[hour[0]] + 6)
else:
text += str(_DICT_THAI_TIME[hour[0]])
elif (hour[-1] == "โมงเย็น" or hour[-1] == "โมง") and hour[0] == "บ่าย":
text += str(_DICT_THAI_TIME[hour[1]] + 12)
elif (hour[-1] == "โมงเย็น" or hour[-1] == "โมง") and hour[0] in keys_dict:
text += str(_DICT_THAI_TIME[hour[0]] + 12)
elif hour[-1] == "เที่ยงคืน":
text += "0"
elif hour[-1] == "เที่ยงวัน" or hour[-1] == "เที่ยง":
text += "12"
elif hour[0] == "บ่ายโมง":
text += "13"
elif hour[-1] == "ทุ่ม":
if len(hour) == 1:
text += "19"
else:
text += str(_DICT_THAI_TIME[hour[0]] + 18)
if not text:
raise ValueError("Cannot find any Thai word for hour.")
if padding and len(text) == 1:
text = "0" + text
text += ":"
# determine minute
if minute:
n = 0
for affix in minute:
if affix in keys_dict:
if affix != "สิบ":
n += _DICT_THAI_TIME[affix]
elif affix == "สิบ" and n != 0:
n *= 10
elif affix == "สิบ" and n == 0:
n += 10
if n != 0 and n > 9:
text += str(n)
else:
text += "0" + str(n)
else:
text += "00"
return text
|
235874bf252908eeba96f17b2ccc4d7ab32b90ce
| 20,268 |
import torch
import os
import time
def _build_index_mappings(name, data_prefix, documents, sizes,
num_samples, seq_length, seed):
"""Build doc-idx, sample-idx, and shuffle-idx.
doc-idx: is an array (ordered) of documents to be used in training.
sample-idx: is the start document index and document offset for each
training sample.
shuffle-idx: maps the sample index into a random index into sample-idx.
"""
# Number of tokens in each epoch and number of required epochs.
tokens_per_epoch = _num_tokens(documents, sizes, seq_length)
num_epochs = _num_epochs(tokens_per_epoch, seq_length, num_samples)
# rng state
np_rng = np.random.RandomState(seed=seed)
# Filename of the index mappings.
_filename = data_prefix
_filename += '_{}_indexmap'.format(name)
_filename += '_{}ns'.format(num_samples)
_filename += '_{}sl'.format(seq_length)
_filename += '_{}s'.format(seed)
doc_idx_filename = _filename + '_doc_idx.npy'
sample_idx_filename = _filename + '_sample_idx.npy'
shuffle_idx_filename = _filename + '_shuffle_idx.npy'
# Build the indexed mapping if not exist.
if torch.distributed.get_rank() == 0:
if (not os.path.isfile(doc_idx_filename)) or \
(not os.path.isfile(sample_idx_filename)) or \
(not os.path.isfile(shuffle_idx_filename)):
print_rank_0(' > WARNING: could not find index map files, building '
'the indices on rank 0 ...')
# For the last epoch, decide whether include the entire epoch
# in the global shuffle or not.
# If we need only one epoch, then separating last epoch does
# not mean anything.
if num_epochs == 1:
separate_last_epoch = False
print(' > only one epoch required, setting '
'separate_last_epoch to False', flush=True)
else:
# Get the number of samples for the last epoch
num_samples_from_epochs_minus_one = (
(num_epochs - 1) * tokens_per_epoch - 1) // seq_length
last_epoch_num_samples = num_samples - \
num_samples_from_epochs_minus_one
assert last_epoch_num_samples >= 0, \
'last epoch number of samples should be non-negative.'
num_samples_per_epoch = (tokens_per_epoch - 1) // seq_length
assert last_epoch_num_samples < (num_samples_per_epoch + 1), \
'last epoch number of samples exceeded max value.'
# If we have less than 80% of the samples for the last epoch,
# seperate out the epoch and treat it differently.
# Note: the 80% number is just based on common sense and can
# be adjusted if needed.
separate_last_epoch = (last_epoch_num_samples <
int(0.80 * num_samples_per_epoch))
if separate_last_epoch:
string = ' > last epoch number of samples ({}) is smaller '\
'than 80% of number of samples per epoch ({}), '\
'setting separate_last_epoch to True'
else:
string = ' > last epoch number of samples ({}) is larger '\
'than 80% of number of samples per epoch ({}), '\
'setting separate_last_epoch to False'
print(string.format(last_epoch_num_samples,
num_samples_per_epoch), flush=True)
# doc-idx.
start_time = time.time()
doc_idx = _build_doc_idx(documents, num_epochs, np_rng,
separate_last_epoch)
np.save(doc_idx_filename, doc_idx, allow_pickle=True)
print_rank_0(' > elasped time to build and save doc-idx mapping '
'(seconds): {:4f}'.format(time.time() - start_time))
# sample-idx.
start_time = time.time()
# Use C++ implementation for speed.
# First compile and then import.
# from megatron.data import helpers
assert doc_idx.dtype == np.int32
assert sizes.dtype == np.int32
# sample_idx = helpers.build_sample_idx(sizes, doc_idx, seq_length,
# num_epochs, tokens_per_epoch)
sample_idx = _build_sample_idx(sizes, doc_idx, seq_length,
num_epochs, tokens_per_epoch)
np.save(sample_idx_filename, sample_idx, allow_pickle=True)
print_rank_0(' > elasped time to build and save sample-idx mapping '
'(seconds): {:4f}'.format(time.time() - start_time))
# shuffle-idx.
start_time = time.time()
# -1 is due to data structure used to retieve the index:
# sample i --> [sample_idx[i], sample_idx[i+1])
if separate_last_epoch:
num_samples_ = num_samples_from_epochs_minus_one
else:
num_samples_ = sample_idx.shape[0] - 1
shuffle_idx = _build_shuffle_idx(num_samples_,
sample_idx.shape[0] - 1, np_rng)
np.save(shuffle_idx_filename, shuffle_idx, allow_pickle=True)
print_rank_0(' > elasped time to build and save shuffle-idx mapping'
' (seconds): {:4f}'.format(time.time() - start_time))
# This should be a barrier but nccl barrier assumes
# device_index=rank which is not the case for model
# parallel case
counts = torch.cuda.LongTensor([1])
torch.distributed.all_reduce(counts, group=mpu.get_data_parallel_group())
torch.distributed.all_reduce(counts, group=mpu.get_pipeline_model_parallel_group())
assert counts[0].item() == (
torch.distributed.get_world_size() //
torch.distributed.get_world_size(group=mpu.get_tensor_model_parallel_group()))
# Load mappings.
start_time = time.time()
print_rank_0(' > loading doc-idx mapping from {}'.format(
doc_idx_filename))
doc_idx = np.load(doc_idx_filename, allow_pickle=True, mmap_mode='r')
print_rank_0(' > loading sample-idx mapping from {}'.format(
sample_idx_filename))
sample_idx = np.load(sample_idx_filename, allow_pickle=True, mmap_mode='r')
print_rank_0(' > loading shuffle-idx mapping from {}'.format(
shuffle_idx_filename))
shuffle_idx = np.load(shuffle_idx_filename, allow_pickle=True, mmap_mode='r')
print_rank_0(' loaded indexed file in {:3.3f} seconds'.format(
time.time() - start_time))
print_rank_0(' total number of samples: {}'.format(
sample_idx.shape[0]))
print_rank_0(' total number of epochs: {}'.format(num_epochs))
return doc_idx, sample_idx, shuffle_idx
|
396299fffff56b6edcb1be075561dd4a20f1f326
| 20,269 |
import random
def get_grains(ng,gdmin,angrange0,angrange1,two_dim):
"""
Get specified number of grains with conditions of minimum distance and angle range.
"""
dang = (angrange1-angrange0) /180.0 *np.pi /ng
grains= []
ig = 0
dmin = 1e+30
while True:
if ig >= ng: break
pi= np.zeros((3,))
ai= np.zeros((3,))
pi[0]= random()
pi[1]= random()
if two_dim:
too_close = False
dminl = 1e+30
for i,gi in enumerate(grains):
dlt = pi - gi.point
dlt = dlt - np.round(dlt)
d = np.sqrt( dlt[0]**2+dlt[1]**2 )
dminl = min(d,dminl)
if d < gdmin:
too_close = True
break
if too_close:
continue
dmin = min(dminl,dmin)
pi[2]= 0.0
ai[0]= 0.0
ai[1]= 0.0
ai[2]= angrange0 +dang*ig +random()*dang
else:
pi[2]= random()
too_close = False
for gi in grains:
dlt = pi - gi.point
dlt = dlt - np.round(dlt)
d = np.sqrt( dlt[0]**2+dlt[1]**2+dlt[2]**2 )
if d < gdmin:
too_close
break
if too_close:
continue
ai[0]= random()*np.pi*2 -np.pi
ai[1]= random()*np.pi/2 -np.pi/2
ai[2]= random()*np.pi*2 -np.pi
print(' point,angle =',pi,ai)
gi= Grain(pi,ai)
grains.append(gi)
ig += 1
print(' Minimum distance between grains and limit = ',dmin,gdmin)
return grains
|
591858e16353d5363767c7816d59f64b890929f8
| 20,270 |
from typing import List
def two_loops(N: List[int]) -> List[int]:
"""Semi-dynamic programming approach using O(2n):
- Calculate the product of all items before item i
- Calculate the product of all items after item i
- For each item i, multiply the products for before and after i
L[i] = N[i-1] * L[i-1] if i != 0 else 1
R[j] = N[j+1] * R[j+1] if j != (len(N) - 1) else 1
A[i] = L[i] * R[i]
N[0] = 3
N[1] = 7
N[2] = 1
N[3] = 4
N[4] = 8
N[5] = 9
L[0] = 1 = 1
L[1] = (1) * 3 = 3
L[2] = (3) * 7 = 21
L[3] = (21) * 1 = 21
L[4] = (21) * 4 = 84
L[5] = (84) * 8 = 672
R[5] = 1 = 1
R[4] = (1) * 9 = 9
R[3] = (9) * 8 = 72
R[2] = (72) * 4 = 288
R[1] = (288) * 1 = 288
R[0] = (288) * 7 = 2016
A = [L[0]*R[0], L[1]*R[1], L[2]*R[2], L[3]*R[3], L[4]*R[4], L[5]*R[5]]
A = [2016, 864, 6048, 1512, 756, 672]
"""
items_len = len(N)
of_left = [1 for _ in range(items_len)]
of_right = [1 for _ in range(items_len)]
for i in range(items_len):
j = (items_len - 1) - i # Invert i; start counting from len(N) to 0.
of_left[i] = N[i-1] * of_left[i-1] if i != 0 else 1
of_right[j] = N[j+1] * of_right[j+1] if i != 0 else 1
return list(map(lambda p: p[0] * p[1], zip(of_left, of_right)))
|
3620aa19833b2e967b2c295fa53ba39bf3b6b70d
| 20,271 |
import os
import subprocess
def make_slurm_queue(dirmain, print_level=0):
"""get queue list from slurm """
# Check slurm
list_ids = []
list_scripts = []
usr = os.environ.get('USER')
proc = subprocess.run(['squeue', "-u", usr, "-O", "jobid:.50,name:.150,stdout:.200"], capture_output=True)
all_info_user = proc.stdout.decode('utf-8').split('\n')
all_info_user = [x for x in all_info_user if x != '']
if print_level == 2:
print("Number of Slurm tasks running:", len(all_info_user) - 1)
for i in range(1, len(all_info_user)):
line_id = all_info_user[i][:50].strip()
line_bashname = all_info_user[i][50:200].strip()
line_jobdir = all_info_user[i][200:].strip()
line_jobdir = os.path.realpath(line_jobdir)
# Check bash name plus directory via slurm output (requires slurm submit as above)
if (os.path.exists(os.path.join(dirmain, line_bashname)) and os.path.basename(dirmain) == os.path.basename(
os.path.dirname(line_jobdir))):
list_ids.append(line_id)
list_scripts.append(line_bashname)
if print_level >= 3:
print("ID: ", line_id, ", Script: ", line_bashname)
if print_level == 2:
print("Number of Slurms tasks running for this directory:", len(list_scripts))
return list_ids, list_scripts
|
5ce9da0a90720175d0730a6c8ca099e3f54e3667
| 20,272 |
from typing import Union
from pathlib import Path
from typing import Optional
import re
def read_renku_version_from_dockerfile(path: Union[Path, str]) -> Optional[str]:
"""Read RENKU_VERSION from the content of path if a valid version is available."""
path = Path(path)
if not path.exists():
return
docker_content = path.read_text()
m = re.search(r"^\s*ARG RENKU_VERSION=(.+)$", docker_content, flags=re.MULTILINE)
if not m:
return
try:
return str(Version(m.group(1)))
except ValueError:
return
|
9b9c343db6ca0604e04c90cb6a51be9bba3e0b1c
| 20,273 |
def zero_mean(framed):
"""Calculate zero-mean of frames"""
mean = np.mean(framed, axis=1)
framed = framed - mean[np.newaxis, :].T
return framed
|
f970522327b019cfddc42a5764f9854eaf681378
| 20,274 |
def now():
"""
返回当前时间
"""
return timezone.now()
|
86dee41549eeef546f5447ed657eb849c36239cc
| 20,275 |
import os
def calc_thickness_of_wing(XFOILdirectory, chordArray2):
"""
calculation wing thickness list
"""
# open airfoil data
data = io_fpa.open2read(os.path.join("data", XFOILdirectory, "foil.dat"))
# make airfoil list
xlist = [float(i.split()[0]) for i in data[1:]]
ylist = [float(i.split()[1]) for i in data[1:]]
# divide upper and lower
zeropoint = None
for i in range(len(xlist)):
if xlist[i] == ylist[i]:
zeropoint = i
upperx = np.array(xlist[:zeropoint+1])[::-1]
uppery = np.array(ylist[:zeropoint+1])[::-1]
lowerx = np.array(xlist[zeropoint:])
lowery = np.array(ylist[zeropoint:])
# interpolate uppwer and lower file in order to be able to different yposition of both upper and lower
linear_interp_upper = interp1d(upperx, uppery)
linear_interp_lower = interp1d(lowerx, lowery)
xx = np.linspace(0., 1., 100)
newylower = linear_interp_lower(xx)
newyupper = linear_interp_upper(xx)
thickness = newyupper - newylower
maxthickness = max(thickness)
# make thickness list of span direction
thickness = [i * maxthickness for i in chordArray2]
return thickness
# plt.plot(self.yy, self.thickness)
# plt.savefig(self.dirname + "/" + "thickness")
|
67d9b5fa03b9dec00739a3e4e4fffa82a539848c
| 20,276 |
def rgb_to_hex(r, g, b):
"""Turn an RGB float tuple into a hex code.
Args:
r (float): R value
g (float): G value
b (float): B value
Returns:
str: A hex code (no #)
"""
r_int = round((r + 1.0) / 2 * 255)
g_int = round((g + 1.0) / 2 * 255)
b_int = round((b + 1.0) / 2 * 255)
r_txt = "%02x" % r_int
b_txt = "%02x" % b_int
g_txt = "%02x" % g_int
return r_txt + g_txt + b_txt
|
a5181c475c798bbd03020d81da10d8fbf86cc396
| 20,277 |
def calc_R(x,y, xc, yc):
"""
calculate the distance of each 2D points from the center (xc, yc)
"""
return np.sqrt((x-xc)**2 + (y-yc)**2)
|
7a10251f3048a3d7c07f6fd886225b841e19a1a2
| 20,278 |
def get_followers_list(user_url, driver, followers=True):
"""
Returns a list of users who follow or are followed by a user.
Parameters
----------
user_url: string
driver: selenium.webdriver
followers: bool
If True, gets users who are followers of this user.
If False, gets users who this user follows.
"""
if followers:
url = user_url + '/followers/'
else:
url = user_url + '/following/'
process = lambda soup: [
str(item.find_all('a',
{'class': 'userWrapper'})[0].get('href'))
for item in soup.select('div.item')
]
followers = process_whole_page(driver, url, process)
return followers
|
fbb592c29b66b41b51d67a938659177ead9a13c6
| 20,279 |
from typing import Optional
def policy_to_dict(player_policy,
game,
all_states=None,
state_to_information_state=None,
player_id: Optional = None):
"""Converts a Policy instance into a tabular policy represented as a dict.
This is compatible with the C++ TabularExploitability code (i.e.
pyspiel.exploitability, pyspiel.TabularBestResponse, etc.).
While you do not have to pass the all_states and state_to_information_state
arguments, creating them outside of this funciton will speed your code up
dramatically.
Args:
player_policy: The policy you want to convert to a dict.
game: The game the policy is for.
all_states: The result of calling get_all_states.get_all_states. Can be
cached for improved performance.
state_to_information_state: A dict mapping str(state) to
state.information_state for every state in the game. Can be cached for
improved performance.
Returns:
A dictionary version of player_policy that can be passed to the C++
TabularBestResponse, Exploitability, and BestResponse functions/classes.
"""
if all_states is None:
all_states = get_all_states.get_all_states(
game,
depth_limit=-1,
include_terminals=False,
include_chance_states=False)
state_to_information_state = {
state: str(
np.asarray(all_states[state].information_state_tensor(), dtype=np.float32).tolist()) for
state in all_states
}
tabular_policy = dict()
for state in all_states:
if player_id is not None and all_states[state].current_player() != player_id:
continue
information_state = state_to_information_state[state]
tabular_policy[information_state] = list(
player_policy.action_probabilities(all_states[state]).items())
return tabular_policy
|
c5e048d7886dac6b36197c0ee1593f8602972fa5
| 20,280 |
import time
def compressed_gw(Dist1,Dist2,p1,p2,node_subset1,node_subset2, verbose = False, return_dense = True):
"""
In:
Dist1, Dist2 --- distance matrices of size nxn and mxm
p1,p2 --- probability vectors of length n and m
node_subset1, node_subset2 --- subsets of point indices. This version of the qGW code
specifically uses Voronoi partitions from fixed subsets
(usually these are chosen randomly). Other partitioning schems
are possible, but not currently implemented here.
verbose --- print status and compute times
return_dense --- some parts of the algorithm use sparse matrices. If 'False' a sparse matrix is returned.
Out:
full_coup --- coupling matrix of size nxm giving a probabilistic correspondence between metric spaces.
"""
# Compress Graphs
start = time.time()
if verbose:
print('Compressing Graphs...')
coup1, p_compressed1 = compress_graph_from_subset(Dist1,p1,node_subset1)
coup2, p_compressed2 = compress_graph_from_subset(Dist2,p2,node_subset2)
Dist_new1, p_new1 = compress_graph(Dist1,p_compressed1)
Dist_new2, p_new2 = compress_graph(Dist2,p_compressed2)
if verbose:
print('Time for Compressing:', time.time() - start)
# Match compressed graphs
start = time.time()
if verbose:
print('Matching Compressed Graphs...')
coup_compressed, log = gwa.gromov_wasserstein(Dist_new1, Dist_new2, p_new1, p_new2)
if verbose:
print('Time for Matching Compressed:', time.time() - start)
# Find submatchings and create full coupling
if verbose:
print('Matching Subgraphs and Constructing Coupling...')
supp1 = find_support(p_compressed1)
supp2 = find_support(p_compressed2)
full_coup = coo_matrix((Dist1.shape[0], Dist2.shape[0]))
matching_time = 0
matching_and_expanding_time = 0
num_local_matches = 0
for (i_enum, i) in enumerate(supp1):
subgraph_i = find_support(coup1[:,i])
for (j_enum, j) in enumerate(supp2):
start = time.time()
w_ij = coup_compressed[i_enum,j_enum]
if w_ij > 1e-10:
num_local_matches += 1
subgraph_j = find_support(coup2[:,j])
# Compute submatching
coup_sub_ij = find_submatching_locally_linear(Dist1,Dist2,coup1,coup2,i,j)
matching_time += time.time()-start
# Expand to correct size
idx = np.argwhere(coup_sub_ij > 1e-10)
idx_i = idx.T[0]
idx_j = idx.T[1]
row = np.array(subgraph_i)[idx_i]
col = np.array(subgraph_j)[idx_j]
data = w_ij*np.array([coup_sub_ij[p[0],p[1]] for p in list(idx)])
expanded_coup_sub_ij = coo_matrix((data, (row,col)), shape=(full_coup.shape[0], full_coup.shape[1]))
# Update full coupling
full_coup += expanded_coup_sub_ij
matching_and_expanding_time += time.time()-start
if verbose:
print('Total Time for',num_local_matches,'local matches:')
print('Local matching:', matching_time)
print('Local Matching Plus Expansion:', matching_and_expanding_time)
if return_dense:
return full_coup.toarray()
else:
return full_coup
|
7008e62138e2f98238fdba6bf698559690c83972
| 20,281 |
import sys
def get_stats(service: googleapiclient.discovery, videos_list: list):
"""Get duration, views and live status of YouTube video with their ID
:param service: a YouTube service build with 'googleapiclient.discovery'
:param videos_list: list of YouTube video IDs
:return items: playlist items (videos) as a list.
"""
items = []
try:
videos_ids = [video['video_id'] for video in videos_list]
except TypeError:
videos_ids = videos_list
# Split task in chunks of size 50 to request on a maximum of 50 videos at each iteration.
videos_chunks = [videos_ids[i:i + min(50, len(videos_ids))] for i in range(0, len(videos_ids), 50)]
for chunk in videos_chunks:
try:
request = get_videos(service=service, videos_list=chunk)
# Keep necessary data
items += [{'video_id': item['id'],
'views': item['statistics'].get('viewCount', 0),
'likes': item['statistics'].get('likeCount', 0),
'comments': item['statistics'].get('commentCount', 0),
'duration': isodate.parse_duration(item['contentDetails'].get('duration', 0)).seconds,
'live_status': item['snippet'].get('liveBroadcastContent')} for item in request['items']]
except googleapiclient.errors.HttpError as http_error:
history.error(http_error.error_details)
sys.exit()
return items
|
52ec8ccfe10609f175c3ffdd480c9292ad306e90
| 20,282 |
import sys
def NotecardExceptionInfo(exception):
"""Construct a formatted Exception string.
Args:
exception (Exception): An exception object.
Returns:
string: a summary of the exception with line number and details.
"""
name = exception.__class__.__name__
return sys.platform + ": " + name \
+ ": " + ' '.join(map(str, exception.args))
|
bf45535776298a8d0afd326539f9492bfb710a9c
| 20,283 |
from typing import Dict
from typing import Tuple
from typing import List
def load_test_dataset(cfg: Dict) -> Tuple[Tuple[List]]:
"""Read config and load test dataset
Args:
cfg (Dict): config from config.json
Returns:
Tuple[Tuple[List]]: Test dataset
"""
X_test, y_test, test_prompt_ids = read_dataset(
cfg.preprocess_data_args["test_path"],
cfg.preprocess_data_args["prompt_id"],
cfg.preprocess_data_args["maxlen"],
cfg.preprocess_data_args["to_lower"],
cfg.preprocess_data_args["score_index"],
)
return (X_test, y_test, test_prompt_ids)
|
b4306f48a927ab0036b5815c5b119480ea4452ba
| 20,284 |
import shutil
def clear_caches() -> None:
""" Clear all Caches created by instagramy in current dir """
return shutil.rmtree(cache_dir, ignore_errors=True)
|
262cb8117d5987b2b2c7bef6c1f9444061f527a2
| 20,285 |
import numpy
import warnings
def SingleCameraCalibration_from_xml(elem, helper=None):
""" loads a camera calibration from an Elementree XML node """
assert ET.iselement(elem)
assert elem.tag == "single_camera_calibration"
cam_id = elem.find("cam_id").text
pmat = numpy.array(numpy.mat(elem.find("calibration_matrix").text))
res = numpy.array(numpy.mat(elem.find("resolution").text))[0,:]
scale_elem = elem.find("scale_factor")
if NO_BACKWARDS_COMPAT:
assert scale_elem is None, 'XML file has outdated <scale_factor>'
else:
if scale_elem is not None:
# backwards compatibility
scale = float( scale_elem.text )
if scale != 1.0:
warnings.warn('converting old scaled calibration')
scale_array = numpy.ones((3,4))
scale_array[:,3] = scale # mulitply last column by scale
pmat = scale_array*pmat # element-wise multiplication
if not helper:
helper_elem = elem.find("non_linear_parameters")
if helper_elem is not None:
helper = reconstruct_utils.ReconstructHelper_from_xml(helper_elem)
else:
# make with no non-linear stuff (i.e. make linear)
helper = reconstruct_utils.ReconstructHelper(1,1, # focal length
0,0, # image center
0,0, # radial distortion
0,0) # tangential distortion
return SingleCameraCalibration(cam_id=cam_id,
Pmat=pmat,
res=res,
helper=helper)
|
ad9f8d36b82dc3dbf4b3189166e48c0b5304a389
| 20,286 |
def contrast_augm_cv2(images,fmin,fmax):
"""
this function is equivalent to the numpy version, but 2.8x faster
"""
images = np.copy(images)
contr_rnd = rand_state.uniform(low=fmin,high=fmax,size=images.shape[0])
for i in range(images.shape[0]):
fac = contr_rnd[i]
images[i] = np.atleast_3d(cv2.addWeighted(images[i], fac , 0, 0, 128-fac*128))
return images
|
7bd56e2b053e0ede33bdd62c269e66858745c642
| 20,287 |
def load_preprocess():
"""
Load the Preprocessed Training data and return them in batches of <batch_size> or less
"""
return pickle_load('preprocess.p')
|
80bf8a509c972291767ade03a64e74aed11b8298
| 20,288 |
def detection_layer(config, rois, mrcnn_class, mrcnn_bbox, image_meta):
"""Takes classified proposal boxes and their bounding box deltas and
returns the final detection boxes.
Returns:
[batch, num_detections, (y1, x1, y2, x2, class_score)] in pixels
"""
# Currently only supports batchsize 1
rois = rois.squeeze(0)
_, _, window, _ = parse_image_meta(image_meta)
window = window[0]
detections = refine_detections(rois, mrcnn_class, mrcnn_bbox, window, config)
return detections
|
7770241c44dc050c0fb20f18b729eba388a47721
| 20,289 |
def connectedSocketDiscover():
"""
Try to discover the internal address by using a connected UDP
socket.
@return: a L{Deferred} called with the internal address.
"""
def cb(address):
protocol = DatagramProtocol()
listeningPort = reactor.listenUDP(0, protocol)
protocol.transport.connect(address, 7)
internal = protocol.transport.getHost().host
listeningPort.stopListening()
return internal
return reactor.resolve('A.ROOT-SERVERS.NET').addCallback(cb)
|
a443ef9774774fe4a31cb3f76462d35255380caa
| 20,290 |
from typing import Dict
from typing import Tuple
from typing import List
from typing import Optional
def construct_source_plate_not_recognised_message(
params: Dict[str, str]
) -> Tuple[List[str], Optional[Message]]:
"""Constructs a message representing a source plate not recognised event;
otherwise returns appropriate errors.
Arguments:
params {Dict[str, str]} -- All parameters of the plate event message request.
Returns:
{[str]} -- Any errors attempting to construct the message, otherwise an empty array.
{Message} -- The constructed message; otherwise None if there are any errors.
"""
try:
user_id = params.get("user_id", "")
robot_serial_number = params.get("robot", "")
if len(user_id) == 0 or len(robot_serial_number) == 0:
return [
"'user_id' and 'robot' are required to construct a "
f"{PLATE_EVENT_SOURCE_NOT_RECOGNISED} event message"
], None
robot_uuid = __get_robot_uuid(robot_serial_number)
if robot_uuid is None:
return [f"Unable to determine a uuid for robot '{robot_serial_number}'"], None
message_content = {
"event": {
"uuid": str(uuid4()),
"event_type": PLATE_EVENT_SOURCE_NOT_RECOGNISED,
"occured_at": __get_current_datetime(),
"user_identifier": user_id,
"subjects": [__construct_robot_message_subject(robot_serial_number, robot_uuid)],
"metadata": {},
},
"lims": app.config["RMQ_LIMS_ID"],
}
return [], Message(message_content)
except Exception as e:
logger.error(f"Failed to construct a {PLATE_EVENT_SOURCE_NOT_RECOGNISED} message")
logger.exception(e)
return [
"An unexpected error occurred attempting to construct the "
f"{PLATE_EVENT_SOURCE_NOT_RECOGNISED} event message"
], None
|
73c34f090dcdd802e0161d0f808deb03aef2c660
| 20,291 |
def odd(x):
"""True if x is odd."""
return (x & 1)
|
9cd383ea01e0fed56f6df42648306cf2415f89e9
| 20,292 |
def sparse_transformer_local():
"""Set of hyperparameters for a sparse model using only local."""
hparams = common_hparams.basic_params1()
hparams.max_length = 4096
hparams.batch_size = 4096
hparams.add_hparam("max_target_length", 4096)
hparams.add_hparam("add_timing_signal", False)
hparams.add_hparam("local_num_heads", 8)
hparams.add_hparam("sparsity_cluster_num_heads", 0)
hparams.add_hparam("sparsity_strided_num_heads", 0)
hparams.add_hparam("sparsity_cluster_strided_num_heads", 0)
hparams.add_hparam("sparsity_skip_first", 0)
hparams.add_hparam("ema", True)
hparams.add_hparam("query_shape", (512,))
hparams.add_hparam("memory_query_shape", (512,))
hparams.add_hparam("memory_flange", (512,))
hparams.add_hparam("sparsity_cluster_size", 0)
hparams.add_hparam("sparsity_cluster_attention_window", 0)
hparams.add_hparam("num_encoder_layers", 0)
hparams.add_hparam("num_decoder_layers", 24)
hparams.add_hparam("attention_key_channels", 0) # Uses hidden_size
hparams.add_hparam("attention_value_channels", 0) # Uses hidden_size
hparams.add_hparam("attention_dropout", 0.0)
hparams.add_hparam("ffn_layer", "conv_hidden_relu")
hparams.add_hparam("filter_size", 2048) # Used in ffn_layer
hparams.add_hparam("relu_dropout", 0.0) # Used in ffn_layer
hparams.add_hparam("input_dropout", 0.0) # dropout on input sequences
hparams.add_hparam("target_dropout", 0.0) # dropout on target sequences
hparams.add_hparam("use_tpu", True)
hparams.tpu_enable_host_call = True # Enable summaries on TPU
hparams.pad_batch = True
hparams.bottom = {
"targets": target_bottom,
}
# Optimizer
hparams.dropout = 0.0
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.optimizer = "adafactor"
hparams.optimizer_adafactor_beta1 = 0.0
hparams.optimizer_adafactor_beta2 = 0.999
hparams.optimizer_adafactor_clipping_threshold = 1.0
hparams.optimizer_adafactor_decay_type = "pow"
hparams.optimizer_adafactor_memory_exponent = 0.8
hparams.optimizer_adafactor_multiply_by_parameter_scale = True
hparams.learning_rate_schedule = "constant*rsqrt_normalized_decay"
hparams.learning_rate_warmup_steps = 10000
hparams.learning_rate_constant = 0.01
hparams.initializer_gain = 0.2
hparams.initializer = "uniform_unit_scaling"
hparams.weight_decay = 0.0
hparams.label_smoothing = 0.0
hparams.summarize_vars = True
hparams.hidden_size = 512
# Memory saving measures
hparams.add_hparam("cache_padding_bias", False)
hparams.add_hparam("embedding_dims", 512)
hparams.add_hparam("share_qk", True)
hparams.shared_embedding = True
hparams.shared_embedding_and_softmax_weights = True
# relative attention
hparams.max_relative_position = 1024
hparams.add_hparam("local_relative", True)
hparams.add_hparam("sparsity_cluster_relative", True)
hparams.add_hparam("sparsity_cluster_strided_relative", True)
hparams.add_hparam("sparsity_strided_relative", False)
# Decoding
hparams.add_hparam("nucleus_sampling", 0.9)
hparams.add_hparam("num_decode_cores", 8)
hparams.add_hparam("fast_decode", False)
# Clustering hparams
hparams.add_hparam("beta", 1e-4)
hparams.add_hparam("decay", 0.999)
# LSH attention as in Reformer
hparams.add_hparam("hash_items", False)
hparams.add_hparam("token_bias_wt_trainable", False)
return hparams
|
29c4ce03eabc311cc6526d381ff1b09e33cd9667
| 20,293 |
import scipy
def make_aperture_mask(self, snr_threshold=5, margin=4):
"""Returns an aperture photometry mask.
Parameters
----------
snr_threshold : float
Background detection threshold.
"""
# Find the pixels that are above the threshold in the median flux image
median = np.nanmedian(self.flux, axis=0)
mad = median_absolute_deviation(median[np.isfinite(median)])
# 1.4826 turns MAD into STDEV for a Gaussian
mad_cut = 1.4826 * mad * snr_threshold
region = np.where(median > mad_cut, 1, 0)
# Label all contiguous regions above the threshold
labels = scipy.ndimage.label(region)[0]
# Central pixel coordinate
centralpix = [1 + median.shape[0] // 2, 1 + median.shape[1] // 2]
# find brightest pix within margin of central pix
central_img = median[centralpix[0] - margin: centralpix[0] + margin,
centralpix[1] - margin: centralpix[1] + margin]
# unravel_index converts indices into a tuple of coordinate arrays
brightestpix = np.unravel_index(central_img.argmax(), central_img.shape)
bpixy, bpixx = brightestpix
# Which label corresponds to the brightest pixel?
regnum = labels[centralpix[0] - margin + bpixy, centralpix[1] - margin + bpixx]
return labels == regnum
|
26e176f68155f2d7c8756eb794083e71cd1a920c
| 20,294 |
def verify_password(username, password):
"""Verify the password."""
if username in users:
return check_password_hash(users.get(username), password)
return False
|
a41664c6121f9f88522d69d5f2720da94fddc299
| 20,295 |
def data_mnist(one_hot=True):
"""
Preprocess MNIST dataset
"""
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(X_train.shape[0],
FLAGS.IMAGE_ROWS,
FLAGS.IMAGE_COLS,
FLAGS.NUM_CHANNELS)
X_test = X_test.reshape(X_test.shape[0],
FLAGS.IMAGE_ROWS,
FLAGS.IMAGE_COLS,
FLAGS.NUM_CHANNELS)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
print "Loaded MNIST test data."
if one_hot:
# convert class vectors to binary class matrices
y_train = np_utils.to_categorical(y_train, FLAGS.NUM_CLASSES).astype(np.float32)
y_test = np_utils.to_categorical(y_test, FLAGS.NUM_CLASSES).astype(np.float32)
return X_train, y_train, X_test, y_test
|
d160d9033acc0e84255cf43faf094d7ecd83e99c
| 20,296 |
import glob
import os
def get_file_paths_by_pattern(pattern='*', folder=None):
"""Get a file path list matched given pattern.
Args:
pattern(str): a pattern to match files.
folder(str): searching folder.
Returns
(list of str): a list of matching paths.
Examples
>>> get_file_paths_by_pattern('*.png') # get all *.png files in folder
>>> get_file_paths_by_pattern('*rotate*') # get all files with 'rotate' in name
"""
if folder is None:
return glob.glob(pattern)
else:
return glob.glob(os.path.join(folder, pattern))
|
467485dd4b162654bd73cf412c93558fa43a5b8e
| 20,297 |
def pval_two(n, m, N, Z_all, tau_obs):
"""
Calculate the p-value of a two sided test.
Given a tau_obs value use absolute value to
find values more extreme than the observed tau.
Parameters
----------
n : int
the sum of all subjects in the sample group
m : int
number of subjects who are 1 if control group
N : array
an array of all subjects in all groups
Z_all: matrix
the output from the function nchoosem
tau_obs: float
the observed value of tau
Returns
--------
pd : float
the pval of the test statistic
"""
assert m <= n, "# of subjects who are 1 must be <= to sum of all subjects"
n_Z_all = Z_all.shape[0]
dat = np.zeros((n, 2))
N = [int(x) for x in N]
if N[0] > 0:
dat[0:N[0], :] = 1
if N[1] > 0:
dat[(N[0]): (N[0] + N[1]), 0] = 1
dat[(N[0]): (N[0] + N[1]), 1] = 0
if N[2] > 0:
dat[(N[0]+N[1]):(N[0]+N[1]+N[2]), 0] = 0
dat[(N[0]+N[1]):(N[0]+N[1]+N[2]), 1] = 1
if N[3] > 0:
dat[(N[0]+N[1]+N[2]):(N[0]+N[1]+N[2]+N[3]), ] = 0
tau_hat = np.matmul(Z_all, dat[:, 0]) / (m) - \
np.matmul((1 - Z_all), dat[:, 1]) / (n-m)
tau_N = (N[1]-N[2]) / n
pd = sum(np.round(np.abs(tau_hat-tau_N), 15) >=
np.round(np.abs(tau_obs-tau_N), 15))/n_Z_all
return pd
|
efc1f602d72d71a0270ef2f4c2c2ed8610832d62
| 20,298 |
def make_where_tests(options):
"""Make a set of tests to do where."""
test_parameters = [
{
"input_dtype": [tf.float32, tf.int32],
"input_shape_set": [([1, 2, 3, 4], [1, 2, 3, 4]),],
"use_where_v2": [False, True],
},
{
"input_dtype": [tf.float32, tf.int32],
"input_shape_set": [([1, 2, 3, 4], [1, 2, 3, 1]),],
"use_where_v2": [True],
},
]
def build_graph(parameters):
"""Build the where op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_set"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input3",
shape=parameters["input_shape_set"][1])
less = tf.less(input_value1, input_value2)
where = tf.where_v2 if parameters["use_where_v2"] else tf.where
out = where(less, input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_set"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_set"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
|
fa10458af3e9043ea5779ff05cb679ef4b798d1b
| 20,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.