content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import requests
def make_new_paste(devkey, paste_text, user_key=None, paste_title=None, paste_format=None, paste_type=None, paste_expiry: int=None):
"""This function creates a new paste
on pastebin with the given arguments."""
data = {'api_dev_key': devkey, 'api_option': 'paste', 'api_paste_code': paste_text, 'api_paste_expire_date': f'{paste_expiry}M', 'api_paste_format': paste_format, 'api_user_key': user_key}
r = requests.post('https://pastebin.com/api/api_post.php', data=data)
return r.text
|
59b5be916c007e778ac0cf7a22b49b094f68dfaa
| 18,000 |
def not_none(value):
"""
This function ensures that passed value is not None:
>>> schema = Schema(not_none)
>>> assert 1 == schema(1)
>>> try:
... schema(None)
... assert False, "an exception should've been raised"
... except MultipleInvalid:
... pass
"""
if value is None:
raise NoneInvalid('value is None')
else:
return value
|
d3381c51d2d25edfbd56f6e1008d39056b0a0bda
| 18,001 |
def j1c_dblprime(amplitudes):
"""Calculate j''1c angular observable"""
[_, _, _, _, a_0_l, a_0_r, a_00_l, a_00_r] = amplitudes
return (2 / tf.sqrt(3.0)) * (
tf.math.real(a_00_l * tf.math.conj(a_0_l) * bw_k700_k892) +
tf.math.real(a_00_r * tf.math.conj(a_0_r) * bw_k700_k892)
)
|
eead7c64e7033262aa98ccb966fd83a51419a065
| 18,002 |
def preprocess(img):
"""Changes RGB [0,1] valued image to BGR [0,255] with mean subtracted."""
mean_bgr = load_mean_bgr()
print 'mean blue', np.mean(mean_bgr[:, :, 0])
print 'mean green', np.mean(mean_bgr[:, :, 1])
print 'mean red', np.mean(mean_bgr[:, :, 2])
out = np.copy(img) * 255.0
out = out[:, :, [2, 1, 0]] # swap channel from RGB to BGR
out -= mean_bgr
return out
|
759110f2004315ab45aed1b18dbe5a1132366dd5
| 18,003 |
def profile_detail(request, username, template_name='userena/profile_detail.html', extra_context=None, **kwargs):
"""
Detailed view of an user.
:param username:
String of the username of which the profile should be viewed.
:param template_name:
String representing the template name that should be used to display
the profile.
:param extra_context:
Dictionary of variables which should be supplied to the template. The
``profile`` key is always the current profile.
**Context**
``profile``
Instance of the currently viewed ``Profile``.
"""
user = get_object_or_404(User,
username__iexact=username)
profile = user.get_profile()
if not profile.can_view_profile(request.user):
return HttpResponseForbidden(_("You don't have permission to view this profile."))
if not extra_context: extra_context = dict()
extra_context['profile'] = user.get_profile()
return direct_to_template(request,
template_name,
extra_context=extra_context,
**kwargs)
|
c81d3cb2910e6358760c742c7b4081df4ed95a45
| 18,004 |
def prepare_ternary(figsize, scale):
"""Help function to ternary plot"""
fig, ax = plt.subplots(figsize=figsize)
tax = ternary.TernaryAxesSubplot(ax=ax, scale=scale)
ax.axis('off')
gm = 0.1 * scale
blw = 1
tlw = 1
# Draw Boundary and Gridlines
tax.boundary(linewidth=blw)
tax.gridlines(color='grey', multiple=gm, alpha=0.8)
# Set Axis labels and Title
tax.bottom_axis_label(
r"Retweets $\rightarrow$", offset=-0.08, fontsize='small')
tax.right_axis_label(r"$\leftarrow$Replies", offset=0.2, fontsize='small')
tax.left_axis_label(r"$\leftarrow$Tweets", offset=0.2, fontsize='small')
# ticks
locations = range(0, scale + 1, 4)
ticks = ['{}'.format(x * 10) for x in range(0, 11, 2)]
tax.ticks(
axis='lbr',
ticks=ticks,
locations=locations,
linewidth=tlw,
offset=0.03,
fsize=9,
clockwise=False)
return tax
|
67b40d55d2296957cbe152bce69a5afcd22c2624
| 18,005 |
from typing import Dict
def parse_spreadsheet(hca_spreadsheet: Workbook, entity_dictionary: Dict):
"""
Parse the spreadsheet and fill the metadata with accessions.
:param hca_spreadsheet: Workbook object of the spreadsheet
:param entity_dictionary: Dictionary mapping by entity UUID to the proper archiveEntity
:return: Accessioned spreadsheet
"""
# Parse each sheet for the UUIDs
for sheet in hca_spreadsheet.sheetnames:
for row in hca_spreadsheet[sheet].rows:
if row[0].value in entity_dictionary:
# Get fqk, search for it, add accession based on the entity dictionary
fqk = (accession_mapping[entity_dictionary[row[0].value]['type']]['fqk']
.replace("{}", sheet.lower().replace(" ", "_")))
coordinate_column = search_fqk_in_sheet(hca_spreadsheet[sheet], fqk, 4)
coordinate_row = row[0].coordinate[1:]
cell_coordinate = f'{coordinate_column}{coordinate_row}'
hca_spreadsheet[sheet][cell_coordinate].value = entity_dictionary[row[0].value]['accession']
return hca_spreadsheet
|
83607766eda5b0f9d5a6fc09035a12d29fb8b44c
| 18,006 |
def decode(data):
"""Decode JSON serialized string, with possible embedded Python objects.
"""
return _decoder.decode(data)
|
b82d55eb7e704f9396aab3642314f172c2205a04
| 18,007 |
def p_correction(p_values):
"""
Corrects p_values for multiple testing.
:param p_values: Dictionary storing p_values with corresponding feature names as keys.
:return: DataFrame which shows the results of the analysis; p-value, corrected p-value and boolean indicating \
significance.
"""
p_trans = _transform_p_dict(p_values)
# get and drop features which are NaN to skip them in multitest correction
nan_features = p_trans[pd.isnull(p_trans[0])]
p_trans = p_trans.dropna(axis=0, subset=[0])
# extract p_value column to pass into multiple testing correction
p_val_col = p_trans[0].sort_values()
# add NaN features back to p_trans to include them into result table later on
p_trans = pd.concat([p_trans, nan_features])
# raise Error if no p_values where calculated that can be passed into multiple test correction
if p_val_col.values.size == 0:
# unpack the p_values which are stored in 2 layer nested dicts.
nested_values = []
for value in p_values.values():
nested_values.append(*value.values())
# if all p_values are nan, return an all nan result table
if pd.isnull(nested_values).all():
result_table = _create_result_table(None, p_val_col, p_trans, conf_invs, counts)
return result_table.sort_index()
raise ValueError("No p_values have been submitted into multiple test correction.")
# correct p-values
result = multipletests(p_val_col.values)
return result, p_val_col, p_trans
|
f1e7faa35176cdf41aca273413d7eb9d784dfdb1
| 18,008 |
def GetFlippedPoints3(paths, array):
"""same as first version, but doesnt flip locations: just sets to -1
used for random walks with self intersections - err type 6"""
# this may not work for double ups?
for i in paths:
for j in i: # for the rest of the steps...
array[j[0]][j[1]][j[2]] = -1 # flip initial position
return(array)
|
ad0bc7a03e293beb2542ee555a341bdfc8706408
| 18,009 |
from typing import Callable
async def _silent_except(f: Callable, *args, **kwargs):
"""
Helper Function that calls a function or coroutine and returns its result excepting all errors
"""
try:
called = f(*args, **kwargs)
except:
return
if isawaitable(called):
try:
result = await called
except:
return
else:
return result
else:
return called
|
adaaf3e4a35dfed86d8fa83f8254396e0fa5245b
| 18,010 |
def get_concat_h(im1, im2):
"""Concatenate two images horizontally."""
dst = Image.new("RGB", (im1.width + im2.width, im1.height))
dst.paste(im1, (0, 0))
dst.paste(im2, (im1.width, 0))
return dst
|
60c67011c25ace5e0491bc365256364a9b677798
| 18,011 |
def get_taxname(taxid):
"""Return scientific name for NCBI Taxonomy ID."""
if get_taxname.id_name_map is None:
get_taxname.id_name_map = load_taxid_name_map('data/taxnames.tsv')
if get_taxname.id_name_map is None: # assume fail, fallback
get_taxname.id_name_map = TAXID_NAME_MAP
return get_taxname.id_name_map.get(taxid, '<UNKNOWN>')
|
8a42b542fef9a003e7f40542513d8d4a9d5d8e88
| 18,012 |
import os
def _get_movies(dir):
"""Gets the movies from the specified directory"""
movieList = []
directories = os.listdir(dir)
for d in directories:
# We need to skip past directories without instruction sets
if '__' not in d:
continue
files = os.listdir("{root}/{subdir}".format(root=dir, subdir=d))
for f in files:
# Don't add .mkv's that are handbrake encodes.
if '--converted' not in f and '.mkv' in f:
movie = Movie(dir, d, f)
movieList.append(movie)
return movieList
|
b3624580b364b357999288f8d9ddffa43cfebb3a
| 18,013 |
def lrfn(epoch):
"""
lrfn(epoch)
This function creates a custom piecewise linear-exponential learning rate function for a custom learning rate scheduler. It is linear to a max, then exponentially decays
* INPUTS: current `epoch` number
* OPTIONAL INPUTS: None
* GLOBAL INPUTS:`START_LR`, `MIN_LR`, `MAX_LR`, `RAMPUP_EPOCHS`, `SUSTAIN_EPOCHS`, `EXP_DECAY`
* OUTPUTS: the function lr with all arguments passed
"""
def lr(epoch, START_LR, MIN_LR, MAX_LR, RAMPUP_EPOCHS, SUSTAIN_EPOCHS, EXP_DECAY):
if epoch < RAMPUP_EPOCHS:
lr = (MAX_LR - START_LR)/RAMPUP_EPOCHS * epoch + START_LR
elif epoch < RAMPUP_EPOCHS + SUSTAIN_EPOCHS:
lr = MAX_LR
else:
lr = (MAX_LR - MIN_LR) * EXP_DECAY**(epoch-RAMPUP_EPOCHS-SUSTAIN_EPOCHS) + MIN_LR
return lr
return lr(epoch, START_LR, MIN_LR, MAX_LR, RAMPUP_EPOCHS, SUSTAIN_EPOCHS, EXP_DECAY)
|
f71e776e07ac9f4be5802127e8c9ca84e864de58
| 18,014 |
import numbers
def get_balances():
"""
Get the balances of the configured validator (if possible)
"""
balances = account.get_balance_on_all_shards(validator_config['validator-addr'], endpoint=node_config['endpoint'])
for bal in balances:
bal['balance'] = float(numbers.convert_atto_to_one(bal['balance']))
return balances
|
ed79137e7eb8482f86246174b1bf107229c59b90
| 18,015 |
import collections
def _make_ordered_node_map(
pipeline: pipeline_pb2.Pipeline
) -> 'collections.OrderedDict[str, pipeline_pb2.PipelineNode]':
"""Prepares the Pipeline proto for DAG traversal.
Args:
pipeline: The input Pipeline proto, which must already be topologically
sorted.
Returns:
An OrderedDict that maps node_ids to PipelineNodes.
"""
result = collections.OrderedDict()
for pipeline_or_node in pipeline.nodes:
node_id = pipeline_or_node.pipeline_node.node_info.id
result[node_id] = pipeline_or_node.pipeline_node
return result
|
04d766081bffe000509a70a43208b6998b764a49
| 18,016 |
def energy(_x, _params):
"""Kinetic and Potential Energy of point mass pendulum.
_x is an array/list in the following order:
q1: Angle of first pendulum link relative to vertical (0 downwards)
u1: A[1] measure number of the inertial angular velocity of the first link.
_params is an array/list in the following order:
m: Mass of first pendulum point mass.
l: Length of first pendulum link.
g: Gravitational constant.
Returns a list/array of kinetic energy and potential energy, respectively.
"""
# Unpack function arguments
q1, u1 = _x
# Unpack function parameters
m, g, l, b = _params
# Trigonometric functions
c1 = cos(q1)
# Calculate return values
ke = m*l**2*u1**2/2
pe = g*l*m*(1 - c1)
# Return calculated values
return [ke, pe]
|
0796149bab5a5a36717a67477661633eaf3a29c2
| 18,017 |
import math
def gelu(input_tensor):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
input_tensor: float Tensor to perform activation.
Returns:
`input_tensor` with the GELU activation applied.
"""
# math.sqrt needed for bfloat16 compatibility
cdf = 0.5 * (1.0 + tf.erf(input_tensor / math.sqrt(2.0)))
return input_tensor * cdf
|
fd6de5888839521118c42d2e046b526f7025c70d
| 18,018 |
import torch
def KPConv_ops(query_points,
support_points,
neighbors_indices,
features,
K_points,
K_values,
KP_extent,
KP_influence,
aggregation_mode):
"""
This function creates a graph of operations to define Kernel Point Convolution in tensorflow. See KPConv function
above for a description of each parameter
:param query_points: [n_points, dim]
:param support_points: [n0_points, dim]
:param neighbors_indices: [n_points, n_neighbors]
:param features: [n_points, in_fdim]
:param K_points: [n_kpoints, dim]
:param K_values: [n_kpoints, in_fdim, out_fdim]
:param KP_extent: float32
:param KP_influence: string
:param aggregation_mode: string
:return: [n_points, out_fdim]
"""
# Get variables
n_kp = int(K_points.shape[0])
# print(support_points.shape)
# Add a fake point in the last row for shadow neighbors
shadow_point = torch.ones_like(support_points[:1, :]) * 1e6
support_points = torch.cat([support_points, shadow_point], axis=0)
# Get neighbor points [n_points, n_neighbors, dim]
# print(shadow_point.shape)
# print(support_points.shape)
# print(neighbors_indices.shape)
neighbors = support_points[neighbors_indices]
# Center every neighborhood
neighbors = neighbors - query_points.unsqueeze(1)
# Get all difference matrices [n_points, n_neighbors, n_kpoints, dim]
neighbors = neighbors.unsqueeze(2)
neighbors = neighbors.repeat([1, 1, n_kp, 1])
differences = neighbors - K_points
# Get the square distances [n_points, n_neighbors, n_kpoints]
sq_distances = torch.sum(differences ** 2, axis=3)
# Get Kernel point influences [n_points, n_kpoints, n_neighbors]
if KP_influence == 'constant':
# Every point get an influence of 1.
all_weights = torch.ones_like(sq_distances)
all_weights = all_weights.permute(0, 2, 1)
elif KP_influence == 'linear':
# Influence decrease linearly with the distance, and get to zero when d = KP_extent.
all_weights = torch.relu(1 - torch.sqrt(sq_distances) / KP_extent)
all_weights = all_weights.permute(0, 2, 1)
elif KP_influence == 'gaussian':
# Influence in gaussian of the distance.
sigma = KP_extent * 0.3
all_weights = radius_gaussian(sq_distances, sigma)
all_weights = all_weights.permute(0, 2, 1)
else:
raise ValueError('Unknown influence function type (config.KP_influence)')
# In case of closest mode, only the closest KP can influence each point
if aggregation_mode == 'closest':
neighbors_1nn = torch.argmin(sq_distances, axis=2,
output_type=torch.long)
#
# all_weights *= tf.one_hot(neighbors_1nn, n_kp, axis=1,
# dtype=torch.float32)
all_weights *= torch.zeros_like(all_weights,
dtype=torch.float32).scatter_(
1, neighbors_1nn, 1)
elif aggregation_mode != 'sum':
raise ValueError("Unknown convolution mode. Should be 'closest' or 'sum'")
features = torch.cat([features, torch.zeros_like(features[:1, :])], axis=0)
# Get the features of each neighborhood [n_points, n_neighbors, in_fdim]
neighborhood_features = features[neighbors_indices]
# Apply distance weights [n_points, n_kpoints, in_fdim]
weighted_features = torch.matmul(all_weights, neighborhood_features)
# Apply network weights [n_kpoints, n_points, out_fdim]
weighted_features = weighted_features.permute(1, 0, 2)
kernel_outputs = torch.matmul(weighted_features, K_values)
# Convolution sum to get [n_points, out_fdim]
output_features = torch.sum(kernel_outputs, axis=0)
return output_features
|
29fbb193fef31cdd3bdfcf4df212c56eae32cb3e
| 18,019 |
def kerneleval(X_test, X_train, kernel):
"""
This function computes the pariwise distances between
each row in X_test and X_train using the kernel
specified in 'kernel'
X_test, X_train: 2d np.arrays
kernel: kernel parameters
"""
if kernel is None:
return X_train
fn = kernel['fn']
if fn == 'rbf':
return rbf(X_train, X_test, gamma=kernel['gamma'])
elif fn == 'poly':
return poly(X_train, X_test, degree=kernel['degree'])
elif fn == 'linear':
return linear(X_train, X_test)
|
7cdba3af72dab288c9efac757905c51ed5f9a5f6
| 18,020 |
def aks_show_snapshot_table_format(result):
"""Format a snapshot as summary results for display with "-o table"."""
return [_aks_snapshot_table_format(result)]
|
174deb4bdbe1da27826c89b4bd187e5aa8a00216
| 18,021 |
from typing import List
from typing import Dict
from typing import Any
import subprocess
def get_all_monitors() -> List[Dict[str, Any]]:
"""
:return: all monitors array list sorted from left to right.
i.e: [
{'hr': 1366, 'vr': 768, 'ho': 0, 'vo': 914, 'name': 'eDP-1-1'},
{'hr': 2560, 'vr': 1440, 'ho': 1366, 'vo': 0, 'name': 'HDMI-1-1'},
]
hr: Horizontal resolution
vr: Vertical resolution
ho: Horizontal offset
vo: Vertical offset
name: The screen name
"""
# all_monitors_xrand_resp_ is string like this:
# Monitors: 2
# 0: +*HDMI-1-1 2560/621x1440/341+1366+0 HDMI-1-1
# 1: +eDP-1-1 1366/309x768/174+0+45 eDP-1-1
all_monitors_xrand_resp_ = subprocess.getoutput("xrandr --listmonitors")
monitors_ = []
for line_ in all_monitors_xrand_resp_.split(": ")[2:]:
monitor = {
# Horizontal resolution. i.e 2560
"hr": atoi(line_.split(" ")[1].split("/")[0]),
# Vertical resolution. i.e 1440
"vr": atoi(line_.split(" ")[1].split("/")[1].split("x")[1].split("/")[0]),
# Horizontal offset. i.e 1366
"ho": atoi(line_.split(" ")[1].split("+")[1]),
# Vertical offset. i.e 0
"vo": atoi(line_.split(" ")[1].split("+")[2]),
# Monitor name. i.e HDMI-1-1
"name": line_.replace(" ", " ").rsplit(" ")[0].replace("+", "").replace("*", ""),
}
monitors_.append(monitor)
return sorted(monitors_, key=lambda i: i['ho'])
|
29e6dc676bf52f8a4c15fd94a6bed8422fef594f
| 18,022 |
def poly_prem(f, g, *symbols):
"""Returns polynomial pseudo-remainder. """
return poly_pdiv(f, g, *symbols)[1]
|
4360e2bb4afc7d49f12b411aa18d2d5a1786306b
| 18,023 |
def gen_input_code(question, id):
"""
Returns the html code for rendering the appropriate input
field for the given question.
Each question is identified by name=id
"""
qtype = question['type']
if qtype == 'text':
return """<input type="text" class="ui text" name="{0}"
placeholder="your answer..." />""".format(id)
elif qtype == 'code':
return '<textarea class="ui text" name="{0}"></textarea>'.format(id)
else:
button_template = '<input type="radio" name="{0}" value="{1}"> {1}<br>'
code = ''
for choice in question['choices']:
code = code + button_template.format(id, choice)
return code
|
b76bea45c0ce847d664a38694732ef0b75c2a53c
| 18,024 |
def orbit_position(data, body='sun'):
"""calculate orbit position of sun or moon for instrument position at each time in 'data' using :class:`ephem`
Args:
data: :class:`xarray.Dataset`, commonly Measurement.data
body (optional): name of astronomical body to calculate orbit from ('sun' or 'moon'). Defaults to 'sun'
Returns:
tuple containing:
ele: :class:`numpy.ndarray` of elevations of the body for each time step
azi: :class:`numpy.ndarray` of azimuths of the body for each time step
"""
obs = ephem.Observer()
if body == 'sun':
obj = ephem.Sun()
elif body == 'moon':
obj = ephem.Moon()
else:
raise NotImplementedError("function only implemented for 'body' in ['sun', 'moon']")
ele = np.full(data['time'].shape, np.nan)
azi = np.full(data['time'].shape, np.nan)
for ind, time in enumerate(data['time']):
# observer settings
obs.lat = str(data['lat'][ind].values) # needs to be string to be interpreted as degrees
obs.lon = str(data['lon'][ind].values) # needs to be string to be interpreted as degrees
obs.elevation = data['altitude'][ind].values
obs.date = str(time.dt.strftime('%Y/%m/%d %H:%M:%S').values)
# get object's position in degrees
obj.compute(obs)
ele[ind] = np.rad2deg(obj.alt)
azi[ind] = np.rad2deg(obj.az)
return ele, azi
|
98ab63c20026d83010b10db7ef141d6f1c9bf55f
| 18,025 |
def list_inventory (inventory):
"""
:param inventory: dict - an inventory dictionary.
:return: list of tuples - list of key, value pairs from the inventory dictionary.
"""
result = []
for element, quantity in inventory.items():
if quantity > 0:
result.append ((element, quantity))
return result
|
264f8cde11879be8ace938c777f546974383122c
| 18,026 |
def wsd_is_duplicated_msg(msg_id):
"""
Check for a duplicated message.
Implements SOAP-over-UDP Appendix II Item 2
"""
if msg_id in wsd_known_messages:
return True
wsd_known_messages.append(msg_id)
if len(wsd_known_messages) > WSD_MAX_KNOWN_MESSAGES:
wsd_known_messages.popleft()
return False
|
acd0c1b7de00e6e5ef2a04ff15c1906a5c543089
| 18,027 |
import matplotlib.pyplot as plt
def match_diagnostic_plot(V1, V2, pair_ix, tf=None, new_figure=False):
"""
Show the results of the pair matching from `match_catalog_quads`.
"""
if new_figure:
fig = plt.figure(figsize=[4,4])
ax = fig.add_subplot(111)
else:
ax = plt.gca()
# Original catalog points
ax.scatter(V1[:,0], V1[:,1], marker='o', alpha=0.1, color='k',
label='V1, N={0}'.format(V1.shape[0]))
ax.scatter(V2[:,0], V2[:,1], marker='o', alpha=0.1, color='r',
label='V2, N={0}'.format(V2.shape[0]))
if tf is not None:
# First catalog matches
tf_mat = V1[pair_ix[:,0],:]
ax.plot(tf_mat[:,0], tf_mat[:,1], marker='o', alpha=0.1,
color='k', linewidth=2)
# Transformed first catalog
tf_mat = tf(V1[pair_ix[:,0],:])
ax.plot(tf_mat[:,0], tf_mat[:,1], marker='o', alpha=0.8, color='k',
linewidth=2, label='Transform:\n'+' shift=[{0:.2f}, {1:.2f}]\n rotation={2:.4f}'.format(tf.translation[0],
tf.translation[1], tf.rotation))
else:
# First catalog matches
tf_mat = V1[pair_ix[:,0],:]
ax.plot(tf_mat[:,0], tf_mat[:,1], marker='o', alpha=0.8, color='k',
linewidth=2)
# Second catalog matches
ax.plot(V2[pair_ix[:,1],0], V2[pair_ix[:,1],1], marker='.', alpha=0.8,
color='r', linewidth=0.8,
label='{0} pairs'.format(pair_ix.shape[0]))
ax.legend(fontsize=8)
if new_figure:
fig.tight_layout(pad=0.2)
return fig
|
7af49a4b7ced3c0d310a6e5b4f623ef93698fa12
| 18,028 |
import json
def sliding_tile_state():
"""
Return the current state of the puzzle
:return: JSON object representing the state of the maze puzzle
"""
json_state = {'sliding_tile': sliding_tile.array(), 'solver': sliding_tile_str_solver, 'steps': sliding_tile_steps,
'search_steps': sliding_tile_search_steps, 'size1': sliding_tile.size1, 'size2': sliding_tile.size2}
return json.dumps(json_state)
|
8438ab4066e4a70b33873fee39667251da0823fc
| 18,029 |
import json
def _json_to_numpy(string_like, dtype=None): # type: (str) -> np.array
"""Convert a JSON object to a numpy array.
Args:
string_like (str): JSON string.
dtype (dtype, optional): Data type of the resulting array. If None, the dtypes will be determined by the
contents of each column, individually. This argument can only be used to
'upcast' the array. For downcasting, use the .astype(t) method.
Returns:
(np.array): numpy array
"""
data = json.loads(string_like)
return np.array(data, dtype=dtype)
|
accdb28572ed13e6e977d569a69e4dfe27e22e21
| 18,030 |
import json
import time
def connect(**kwargs):
"""
A strategy to connect a bot.
:param kwargs: strategy, listener, and orders_queue
:return: the input strategy with a report
"""
strategy = kwargs['strategy']
listener = kwargs['listener']
orders_queue = kwargs['orders_queue']
assets = kwargs['assets']
logger = log.get_logger(__name__, strategy['bot'])
if support_functions.get_profile(strategy['bot'])['banned']:
logger.warning('{} has been banned'.format(strategy['bot']))
strategy['report'] = {
'success': False,
'details': {'Execution time': 0, 'Reason': '{} has been banned'.format(strategy['bot'])}
}
log.close_logger(logger)
return strategy
if 'connected' in listener.game_state.keys():
if listener.game_state['connected']:
logger.info('Bot connected in {}s'.format(0))
strategy['report'] = {
'success': True,
'details': {'Execution time': 0}
}
log.close_logger(logger)
return strategy
bot_profile = strategies.support_functions.get_profile(strategy['bot'])
order = {
'command': 'connect',
'parameters': {
'name': bot_profile['name'],
'username': bot_profile['username'],
'password': bot_profile['password'],
'serverId': assets['server_2_id'][bot_profile['server']],
}
}
logger.info('Sending order to bot API: {}'.format(order))
orders_queue.put((json.dumps(order),))
start = time.time()
timeout = 40 if 'timeout' not in strategy.keys() else strategy['timeout']
waiting = True
while waiting and time.time() - start < timeout:
if 'connected' in listener.game_state.keys() and 'api_outdated' in listener.game_state.keys():
if 'pos' in listener.game_state.keys() or listener.game_state['api_outdated'] or listener.game_state['banned']:
# Actually wait for the map to load and not just a connection confirmation
waiting = False
time.sleep(0.05)
execution_time = time.time() - start
if waiting:
logger.warn('Failed connecting in {}s'.format(execution_time))
strategy['report'] = {
'success': False,
'details': {'Execution time': execution_time, 'Reason': 'Timeout'}
}
log.close_logger(logger)
return strategy
if listener.game_state['api_outdated']:
logger.warn('Your BlackFalconAPI is outdated. Try to get the latest one or contact the BlackFalcon team if you already have the latest version')
strategy['report'] = {
'success': False,
'details': {'Execution time': execution_time, 'Reason': 'Your BlackFalconAPI is outdated. Try to get the latest one or contact the BlackFalcon team if you already have the latest version'}
}
log.close_logger(logger)
return strategy
if listener.game_state['banned']:
logger.warn('{} has been banned'.format(strategy['bot']))
strategy['report'] = {
'success': False,
'details': {'Execution time': execution_time, 'Reason': '{} has been banned'.format(strategy['bot'])}
}
log.close_logger(logger)
return strategy
logger.info('Connected {} in {}s'.format(strategy['bot'], execution_time))
strategy['report'] = {
'success': True,
'details': {'Execution time': execution_time}
}
log.close_logger(logger)
return strategy
|
fd2c2637e9eb02356e441994d214d86ec77f56f1
| 18,031 |
def create_env(env, render=False, shared=False, maddpg=False, evaluate=False):
"""Return, and potentially create, the environment.
Parameters
----------
env : str or gym.Env
the environment, or the name of a registered environment.
render : bool
whether to render the environment
shared : bool
specifies whether agents in an environment are meant to share policies.
This is solely used by multi-agent Flow environments.
maddpg : bool
whether to use an environment variant that is compatible with the
MADDPG algorithm
evaluate : bool
specifies whether this is a training or evaluation environment
Returns
-------
gym.Env or list of gym.Env or None
gym-compatible environment(s). Set to None if no environment is being
returned.
array_like or list of array_like or None
the observation(s) from the environment(s) upon reset. Set to None if
no environment is being returned.
"""
if env is None:
# No environment (for evaluation environments).
return None, None
elif isinstance(env, str):
if env in ENV_ATTRIBUTES.keys() or env.startswith("multiagent"):
# Handle multi-agent environments.
multiagent = env.startswith("multiagent")
if multiagent:
env = env[11:]
env = ENV_ATTRIBUTES[env]["env"](
evaluate, render, multiagent, shared, maddpg)
elif env.startswith("flow:"):
# environments in flow/examples
env = import_flow_env(env, render, shared, maddpg, evaluate)
else:
# This is assuming the environment is registered with OpenAI gym.
env = gym.make(env)
# Reset the environment.
if isinstance(env, list):
obs = [next_env.reset() for next_env in env]
else:
obs = env.reset()
return env, obs
|
8c43a177418b7b9317d2ebcd4155edf5a58b5afe
| 18,032 |
def measure_approximate_cost(structure):
""" Various bits estimate the size of the structures they return. This makes that consistent. """
if isinstance(structure, (list, tuple)): return 1 + sum(map(measure_approximate_cost, structure))
elif isinstance(structure, dict): return len(structure) + sum(map(measure_approximate_cost, structure.values()))
elif isinstance(structure, int) or structure is None: return 1
else: assert False, type(structure)
|
8adbd962e789be6549745fbb71c71918d3cc8d0c
| 18,033 |
def make3DArray(dim1, dim2, dim3, initValue):
"""
Return a list of lists of lists representing a 3D array with dimensions
dim1, dim2, and dim3 filled with initialValue
"""
result = []
for i in range(dim1):
result = result + [make2DArray(dim2, dim3, initValue)]
return result
|
c4972ff72fe751d131e4d840b12905d2383299c2
| 18,034 |
import math
def generate_boxes(bounds=(-1, -1, 1, 1), method='size', size=math.inf):
"""
Generate a stream of random bounding boxes
Has two methods for generating random boxes:
- *size* - generates a random central point (x0, y0)
within the bounding box, and then draws widths and heights
from a logN(0, 0.25) distribution.
- *range* - generates random ranges in x and y by drawing
points from the bounding box and ordering them.
Parameters:
bounds - the bounding box to generate boxes in
method - the method to use to generate the boxes. One of
'range' or 'size'
size - the number of boxes to generate. If `size=math.inf`
then return a
Returns:
a generator
"""
methods = {
'size': size_box_stream,
'range': range_box_stream
}
if method not in methods.keys():
raise ValueError(f'Unknown method {method}, allowed values are {methods.keys()}')
# Make the thing to return
_generator = methods[method](bounds)
return _generator if math.isinf(size) else islice(_generator, size)
|
4cb4ae7fd179b466054c21d7512a1861652476c0
| 18,035 |
def create_assets(asset_ids, asset_type, mk_parents):
"""Creates the specified assets if they do not exist.
This is a fork of the original function in 'ee.data' module with the
difference that
- If the asset already exists but the type is different that the one we
want, raise an error
- Starts the creation of folders since 'user/username/'
Will be here until I can pull requests to the original repo
:param asset_ids: list of paths
:type asset_ids: list
:param asset_type: the type of the assets. Options: "ImageCollection" or
"Folder"
:type asset_type: str
:param mk_parents: make the parents?
:type mk_parents: bool
:return: A description of the saved asset, including a generated ID
"""
for asset_id in asset_ids:
already = ee.data.getInfo(asset_id)
if already:
ty = already['type']
if ty != asset_type:
raise ValueError("{} is a {}. Can't create asset".format(asset_id, ty))
print('Asset %s already exists' % asset_id)
continue
if mk_parents:
parts = asset_id.split('/')
root = "/".join(parts[:2])
root += "/"
for part in parts[2:-1]:
root += part
if ee.data.getInfo(root) is None:
ee.data.createAsset({'type': 'Folder'}, root)
root += '/'
return ee.data.createAsset({'type': asset_type}, asset_id)
|
7be92642b6863f19039ed92d6652027ccd43d4ba
| 18,036 |
def decoration(markdown: str, separate: int = 0) -> str:
"""見出しが使われているマークダウンをDiscordで有効なものに変換します。
ただたんに`# ...`を`**#** ...`に変換して渡された数だけ後ろに改行を付け足すだけです。
Parameters
----------
markdown : str
変換するマークダウンです。
separate : int, default 1
見出しを`**`で囲んだ際に後ろに何個改行を含めるかです。"""
new = ""
for line in markdown.splitlines():
if line.startswith(("# ", "## ", "### ", "#### ", "##### ")):
line = f"**#** {line[line.find(' ')+1:]}"
if line.startswith(("\n", "**#**")):
line = f"{repeate(separate)}{line}"
new += f"{line}\n"
return new
|
f76a21b093a00d04d1e95fc733a0956722737d51
| 18,037 |
import hashlib
def get_fingerprint(file_path: str) -> str:
"""
Calculate a fingerprint for a given file.
:param file_path: path to the file that should be fingerprinted
:return: the file fingerprint, or an empty string
"""
try:
block_size = 65536
hash_method = hashlib.md5()
with open(file_path, 'rb') as input_file:
buf = input_file.read(block_size)
while buf:
hash_method.update(buf)
buf = input_file.read(block_size)
return hash_method.hexdigest()
except Exception:
# if the file cannot be hashed for any reason, return an empty fingerprint
return ''
|
b0ee4d592b890194241aaafb43ccba927d13662a
| 18,038 |
def set_publish_cluster_args(args):
"""Set args to publish cluster
"""
public_cluster = {}
if args.public_cluster:
public_cluster = {"private": False}
if args.model_price:
public_cluster.update(price=args.model_price)
if args.cpp:
public_cluster.update(credits_per_prediction=args.cpp)
return public_cluster
|
a1a5842093daf4d6de9bc9cdfae0cf7f9f5a0f5c
| 18,039 |
import os
def create_session():
"""creates database session"""
db_engine = sa.create_engine(os.environ.get('DATABASE_URL'), echo=True)
return sessionmaker(bind=db_engine)
|
312486556abc977f1187ba8a2ada70f2b681f3d0
| 18,040 |
def _get_iforest_anomaly_score_per_node(children_left, children_right, n_node_samples):
"""
Get anomaly score per node in isolation forest, which is node depth + _average_path_length(n_node_samples). Will
be used to replace "value" in each tree.
Args:
children_left: left children
children_right: right children
n_node_samples: number of samples per node
"""
# Get depth per node.
node_depth = np.zeros(shape=n_node_samples.shape, dtype=np.int64)
stack = [(0, -1)] # seed is the root node id and its parent depth
while len(stack) > 0:
node_id, parent_depth = stack.pop()
node_depth[node_id] = parent_depth + 1
if children_left[node_id] != children_right[node_id]:
stack.append((children_left[node_id], parent_depth + 1))
stack.append((children_right[node_id], parent_depth + 1))
return _average_path_length(n_node_samples) + node_depth
|
d567d083d8e1914e4aee809092fd81e08e74f98d
| 18,041 |
def get_invalid_value_message(value_name: str, value: str, line_no: int, uid: str, expected_vals: "list[str]") -> str:
"""
Returns the formatted message template for invalid value while parsing students data!
"""
msg = f"Invalid {value_name} <span class=\"font-weight-bold\">{value}</span>\
on line <span class=\"text-primary\">{line_no}</span>\
of UID <span class=\"text-secondary\">{uid}</span>.\
Should be one of {expected_vals}"
return msg
|
cb7dc84b566bb117fe53ce5956919978558ccbbf
| 18,042 |
def compute_score_for_coagulation(platelets_count: int) -> int:
"""
Computes score based on platelets count (unit is number per microliter).
"""
if platelets_count < 20_000:
return 4
if platelets_count < 50_000:
return 3
if platelets_count < 100_000:
return 2
if platelets_count < 150_000:
return 1
return 0
|
dc6e9935555fbb0e34868ce58a8ad8bc77be8b0c
| 18,043 |
def check_horizontal_visibility(board: list):
"""
Check row-wise visibility (left-right and vice versa)
Return True if all horizontal hints are satisfiable,
i.e., for line 412453* , hint is 4, and 1245 are the four buildings
that could be observed from the hint looking to the right.
>>> check_horizontal_visibility(['***21**', '412453*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***'])
True
>>> check_horizontal_visibility(['***21**', '452453*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***'])
False
>>> check_horizontal_visibility(['***21**', '452413*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***'])
False
"""
res_num = 0
res = 1
k = 1
for i in board:
if i[0] != '*':
while i[k + 1] != i[-1]:
if i[k] < i[k + 1]:
res += 1
k += 1
if res == int(i[0]):
res_num = res_num
else:
res_num += 1
if i[-1] != '*':
i = i[::-1]
while i[k + 1] != i[-1]:
if i[k] < i[k + 1]:
res += 1
k += 1
if res == int(i[0]):
res_num = res_num
else:
res_num += 1
res = 1
k = 1
if res_num == 0:
return True
else:
return False
|
b84ff29fde689069ba5e92b10d54c8f0528aa321
| 18,044 |
import requests
def _get_soup(header, url):
"""This functions simply gets the header and url, creates a session and
generates the "soup" to pass to the other functions.
Args:
header (dict): The header parameters to be used in the session.
url (string): The url address to create the session.
Returns:
bs4.BeautifulSoup: The BeautifoulSoup object.
"""
# Try to read data from URL, if it fails, return None
try:
session = requests.Session()
session.headers["User-Agent"] = header["User-Agent"]
session.headers["Accept-Language"] = header["Language"]
session.headers["Content-Language"] = header["Language"]
html = session.get(url)
return bs(html.text, "html.parser")
except:
print(f"ERROR: Unable to retrieve data from {url}")
return None
|
22ad8876bdd19d405398272cfe0d4429f4b6ac9a
| 18,045 |
import sys
def test_model(model, name_model, X_train, y_train, X_test, y_test, details=False,
normalize=False, weights=None, return_model=False, lib='scikit-learn', fit_params=None):
"""
Function that does a detailed investigation of a given model. Confusion matrices are generated
and various metrics are shown.
Currently supported libraries: 'scikit-learn' (including Pipeline), 'keras'.
For language classification additional features are implemented and recognized by
pipelines named steps, if name:
- 'vect': (CountVectorizer) word counts are displayed for most and least frequent words
- 'tfidf': (TfidfTransformer) words with highest and lowest TFIDF scores are displayed
- 'multNB': (MultinomialNB) words with highest and lowest weights are shown
Parameters
----------
model : object with attributes fit & predict (+ others...)
The model being tested
name_model : string
Name of the model being tested
X_train : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y_train : array-like, shape (n_samples) or (n_samples, n_features)
Target relative to x_train for classification
X_test : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y_test : array-like, shape (n_samples) or (n_samples, n_features)
Target relative to x_test for classification
details : bool
If True evaluation about every parameter configuration is shown
default False
normalize : bool
Specifies wheter or not confusion matrix is normalized.
default False
weights : dict
weights used in fit method. For example for KerasClassifier
model.fit(x_train, y_train, class_weight=weights). weights requires
a named step 'nn' in which it is applied.
return_model : bool
model is returned if True
default False
lib : string
specifies which library the model belongs to
Possible choices are: 'scikit-learn' (default), 'keras'
fit_params : dict
fitting parameters for the classifier - only works for lib="keras",
pass weights via seperate argument, as the class labels need to be encoded otherwise.
Returns
-------
model, if return_model True
"""
# In the case where the labels don't need to be further encoded one
# could simply pass the class weights with fit_params. In case of label encoding
# one need to pass the class weights via weights.
if weights is not None or lib == 'keras':
le = LabelEncoder()
y_test_dec = y_test
y_test = le.fit_transform(y_test)
y_train_dec = y_train
y_train = le.transform(y_train)
# Encode the class label for the weights
df = pd.DataFrame(weights, index=[0])
df.columns = le.transform(df.columns)
class_weights = df.iloc[0].to_dict()
fit_params['nn__class_weight'] = class_weights
if weights is not None:
# try:
model.fit(X_train, y_train, **fit_params)
# except Exception as e:
# print(e)
# print("You probably face the issue that scikit-learn's fit method does not have"
# " the fitting parameter class_weight.")
# sys.exit()
else:
model.fit(X_train, y_train, **fit_params)
else:
model.fit(X_train, y_train, **fit_params)
print('############################################# \n '
'model: {} \n'
'#############################################'.format(name_model))
if details and hasattr(model, 'named_steps'):
print('the list of steps and parameters in the pipeline\n')
for k, v in model.named_steps.items():
print('{}: {}\n'.format(k, v))
if lib == 'scikit-learn':
y_pred = model.predict(X_test)
y_pred_train = model.predict(X_train)
elif lib == 'keras':
y_pred = model.predict_classes(X_test)
y_pred_train = model.predict_classes(X_train)
else:
print("No library recognized.")
sys.exit()
# make sure we work with the correct encoding
if weights is not None or lib == 'keras':
y_pred_dec = le.inverse_transform(y_pred)
y_pred_train_dec = le.inverse_transform(y_pred_train)
model_classes = le.classes_
elif lib == 'scikit-learn':
y_pred_dec = y_pred
y_pred_train_dec = y_pred_train
y_train_dec = y_train
y_test_dec = y_test
model_classes = model.classes_
# print accuracy
print('accuracy on test set: \n {} % \n'.format(accuracy_score(y_test_dec, y_pred_dec)))
print('accuracy on train set: \n {} % \n'.format(accuracy_score(y_train_dec, y_pred_train_dec)))
# print report
rep = classification_report(y_test_dec, y_pred_dec)
print('classification report: \n {} \n '.format(rep))
cm = confusion_matrix(y_test_dec, y_pred_dec, labels=model_classes)
if details:
print('confusion matrix: \n {} \n'.format(cm))
print('Actual labels:')
for i, j in zip(np.sum(cm, axis=1), model_classes):
print(' ', j, i)
print('')
print('Predicted labels:')
for i, j in zip(np.sum(cm, axis=0), model_classes):
print(' ', j, i)
print('')
# Plot non-normalized confusion matrix
plt.figure()
plt.figure(figsize=(12, 12))
plot_confusion_matrix(cm, classes=model_classes,
title='Confusion matrix', normalize=normalize)
plt.show()
if details:
# print the lenght of the vocabulary
has_index = False
if hasattr(model, 'named_steps'):
if 'vect' in model.named_steps.keys():
# '.vocabulary_': dictionary item (word) and index 'world': index
# '.get_feature_names()': list of word from (vocabulary)
voc = model.named_steps['vect'].vocabulary_
voc_list = sorted(voc.items(), key=lambda kv: kv[1], reverse=True)
print('length of the vocabulary vector : \n{} {} '
'\n'.format(len(voc), len(model.named_steps['vect'].get_feature_names())))
# looking at the word occurency after CountVectorizer
vect_fit = model.named_steps['vect'].transform(X_test)
counts = np.asarray(vect_fit.sum(axis=0)).ravel().tolist()
df_counts = pd.DataFrame({'term': model.named_steps['vect'].get_feature_names(), 'count': counts})
df_counts.sort_values(by='count', ascending=False, inplace=True)
print(df_counts.head(30))
print(df_counts.tail(10))
print('')
n = 0
for i in voc_list:
n += 1
print(' ', i)
if n > 20:
break
print('more frequent words: \n{} \n'.format(voc_list[0:20]))
print('less frequent words: \n{} \n'.format(voc_list[-20:-1]))
# print('longest word: \n{} \n'.format(max(voc, key=len)))
# print('shortest word: \n{} \n'.format(min(voc, key=len)))
index = model.named_steps['vect'].get_feature_names()
has_index = True
# print the tfidf values
if 'tfidf' in model.named_steps.keys():
tfidf_value = model.named_steps['tfidf'].idf_
# print('model\'s methods: {}\n'.format(dir(model.named_steps['tfidf'])))
if has_index:
# looking at the word occurency after CountVectorizer
tfidf_fit = model.named_steps['tfidf'].transform(vect_fit)
tfidf = np.asarray(tfidf_fit.mean(axis=0)).ravel().tolist()
df_tfidf = pd.DataFrame({'term': model.named_steps['vect'].get_feature_names(), 'tfidf': tfidf})
df_tfidf.sort_values(by='tfidf', ascending=False, inplace=True)
print(df_tfidf.head(20))
print(df_tfidf.tail(20))
print('')
tfidf_series = pd.Series(data=tfidf_value, index=index)
print('IDF:')
print('Smallest idf:\n{}'.format(tfidf_series.nsmallest(20).index.values.tolist()))
print('{} \n'.format(tfidf_series.nsmallest(20).values.tolist()))
print('Largest idf:\n{}'.format(tfidf_series.nlargest(20).index.values.tolist()))
print('{} \n'.format(tfidf_series.nlargest(20).values.tolist()))
# print the parameters from the model
if 'multNB' in model.named_steps.keys():
values = model.named_steps['multNB'].coef_[0]
if has_index:
features_series = pd.Series(data=values, index=index)
print('Model\'s parameters:')
print('Smallest coeff:\n{}'.format(features_series.nsmallest(20).index.values.tolist()))
print('{} \n'.format(features_series.nsmallest(20).values.tolist()))
print('Largest coeff:\n{}'.format(features_series.nlargest(20).index.values.tolist()))
print('{} \n'.format(features_series.nlargest(20).values.tolist()))
# to find the list of label
# model_classes
# to find the model and attributes
# print('model\'s attributes: {}\n'.format(model.__dict__))
# to find all methods
# print('model\'s methods: {}\n'.format(dir(model)))
# dir(model)
print('')
if return_model:
return model
|
d0fd0f528b14dcb0502c62b97c883d6469ef81ed
| 18,046 |
import json
def get_text_block(dunning_type, language, doc):
"""
This allows the rendering of parsed fields in the jinja template
"""
if isinstance(doc, string_types):
doc = json.loads(doc)
text_block = frappe.db.get_value('Dunning Type Text Block',
{'parent': dunning_type, 'language': language},
['top_text_block', 'bottom_text_block'], as_dict = 1)
if text_block:
return {
'top_text_block': frappe.render_template(text_block.top_text_block, doc),
'bottom_text_block': frappe.render_template(text_block.bottom_text_block, doc)
}
|
31775b402a943e0c735d65a3c388503a6e03b37e
| 18,047 |
import os
def load(test=False, cols=None):
"""Loads data from FTEST if *test* is True, otherwise from FTRAIN.
Pass a list of *cols* if you're only interested in a subset of the
target columns.
"""
fname = FTEST if test else FTRAIN
df = pd.read_csv(os.path.expanduser(fname)) # load pandas dataframe
# The Image column has pixel values separated by space; convert
# the values to numpy arrays:
df['Image'] = df['Image'].apply(lambda im: np.fromstring(im, sep=' '))
if cols: # get a subset of columns
df = df[list(cols) + ['Image']]
print(df.count()) # prints the number of values for each column
df = df.dropna() # drop all rows that have missing values in them
X = np.vstack(df['Image'].values) / 255. # scale pixel values to [0, 1]
X = X.astype(np.float32)
if not test: # only FTRAIN has any target columns
y = df[df.columns[:-1]].values
y = (y - 48) / 48 # scale target coordinates to [-1, 1]
X, y = shuffle(X, y, random_state=42) # shuffle train data
y = y.astype(np.float32)
else:
y = None
return X, y
|
7de65b700ee077b496cd7ca4e678bc133d0532d7
| 18,048 |
import os
def flow_read(src_file):
"""Read optical flow stored in a .flo, .pfm, or .png file
Args:
src_file: Path to flow file
Returns:
flow: optical flow in [h, w, 2] format
Refs:
- Interpret bytes as packed binary data
Per https://docs.python.org/3/library/struct.html#format-characters:
format: f -> C Type: float, Python type: float, Standard size: 4
format: d -> C Type: double, Python type: float, Standard size: 8
Based on:
- To read optical flow data from 16-bit PNG file:
https://github.com/ClementPinard/FlowNetPytorch/blob/master/datasets/KITTI.py
Written by Clément Pinard, Copyright (c) 2017 Clément Pinard
MIT License
- To read optical flow data from PFM file:
https://github.com/liruoteng/OpticalFlowToolkit/blob/master/lib/pfm.py
Written by Ruoteng Li, Copyright (c) 2017 Ruoteng Li
License Unknown
- To read optical flow data from FLO file:
https://github.com/daigo0927/PWC-Net_tf/blob/master/flow_utils.py
Written by Daigo Hirooka, Copyright (c) 2018 Daigo Hirooka
MIT License
"""
# Read in the entire file, if it exists
assert(os.path.exists(src_file))
if src_file.lower().endswith('.flo'):
with open(src_file, 'rb') as f:
# Parse .flo file header
tag = float(np.fromfile(f, np.float32, count=1)[0])
assert(tag == TAG_FLOAT)
w = np.fromfile(f, np.int32, count=1)[0]
h = np.fromfile(f, np.int32, count=1)[0]
# Read in flow data and reshape it
flow = np.fromfile(f, np.float32, count=h * w * 2)
flow.resize((h, w, 2))
elif src_file.lower().endswith('.png'):
# Read in .png file
flow_raw = cv2.imread(src_file, -1)
# Convert from [H,W,1] 16bit to [H,W,2] float formet
flow = flow_raw[:, :, 2:0:-1].astype(np.float32)
flow = flow - 32768
flow = flow / 64
# Clip flow values
flow[np.abs(flow) < 1e-10] = 1e-10
# Remove invalid flow values
invalid = (flow_raw[:, :, 0] == 0)
flow[invalid, :] = 0
elif src_file.lower().endswith('.pfm'):
with open(src_file, 'rb') as f:
# Parse .pfm file header
tag = f.readline().rstrip().decode("utf-8")
assert(tag == 'PF')
dims = f.readline().rstrip().decode("utf-8")
w, h = map(int, dims.split(' '))
scale = float(f.readline().rstrip().decode("utf-8"))
# Read in flow data and reshape it
flow = np.fromfile(f, '<f') if scale < 0 else np.fromfile(f, '>f')
flow = np.reshape(flow, (h, w, 3))[:, :, 0:2]
flow = np.flipud(flow)
else:
raise IOError
return flow
|
855e0a6a65bbf6d3658843da4f4d9d0c4e3ea597
| 18,049 |
def group_create_factory(context, request):
"""Return a GroupCreateService instance for the passed context and request."""
user_service = request.find_service(name="user")
return GroupCreateService(
session=request.db,
user_fetcher=user_service.fetch,
publish=partial(_publish, request),
)
|
3928a35a74d1f62e4a6f5e38087fce72e7ebbc95
| 18,050 |
def verify(params, vk, m, sig):
""" verify a signature on a clear message """
(G, o, g1, hs, g2, e) = params
(g2, X, Y) = vk
sig1 , sig2 = sig
return not sig1.isinf() and e(sig1, X + m * Y) == e(sig2, g2)
|
7413d9172d383c3602cbc2b8348c4ace61c40302
| 18,051 |
import os
def prepend_items():
"""
Return a function than prepend any item from "paths" list with "prefix"
"""
def prepend_func(prefix, paths):
return [os.path.join(prefix, item) for item in paths]
return prepend_func
|
b7c4fd8e1c53c82ba7dd1e826feb084e6543691b
| 18,052 |
def generate_data(n):
"""
生成训练数据
"""
X, y = make_classification(n_samples=n, n_features=4)
data = pd.DataFrame(X, columns=["x1", "x2", "x3", "x4"])
data["y"] = y
return data
|
0bf9cac1cf94c6bf8c12cb605f3bfcd6cde10a0d
| 18,053 |
def blsimpv(p, s, k, rf, t, div=0, cp=1):
"""
Computes implied Black vol from given price, forward, strike and time.
"""
f = lambda x: blsprice(s, k, rf, t, x, div, cp) - p
result = brentq(f, 1e-9, 1e+9)
return result
|
30ad8274aa40f50460cc7f52095ead8ef5021c9a
| 18,054 |
def container_describe(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /container-xxxx/describe API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Containers-for-Execution#API-method%3A-%2Fcontainer-xxxx%2Fdescribe
"""
return DXHTTPRequest('/%s/describe' % object_id, input_params, always_retry=always_retry, **kwargs)
|
e56126b67880a316a84ab81cbcd208844282f0f5
| 18,055 |
from typing import Union
from typing import Dict
def nested(fields: Union[Dict[str, Dict], DataSpec], **config) -> dict:
"""
Constructs a nested Field Spec
Args:
fields: sub field specifications
config: in kwargs format
Returns:
the nested spec
"""
spec = {
"type": "nested",
"fields": utils.get_raw_spec(fields)
} # type: Dict[str, Any]
if len(config) > 0:
spec['config'] = config
return spec
|
3cba172e642b968aabbeb7ee1f2c21d217f443e2
| 18,056 |
from matplotlib.colors import ListedColormap
def make_cluster_cmap(labels, grey_pos='start'):
"""
Creates an appropriate colormap for a vector of cluster labels.
Parameters
----------
labels : array_like
The labels of multiple clustered points
grey_pos: str
Where to put the grey color for the noise
Returns
-------
cmap : matplotlib colormap object
A correct colormap
Examples
--------
>>> my_cmap = make_cluster_cmap(labels=np.array([-1,3,5,2,4,1,3,-1,4,2,5]))
"""
if labels.max() < 9:
cmap = list(plt.get_cmap('tab10').colors)
if grey_pos == 'end':
cmap.append(cmap.pop(-3))
elif grey_pos == 'start':
cmap = [cmap.pop(-3)] + cmap
elif grey_pos == 'del':
del cmap[-3]
else:
cmap = list(plt.get_cmap('tab20').colors)
if grey_pos == 'end':
cmap.append(cmap.pop(-6))
cmap.append(cmap.pop(-6))
elif grey_pos == 'start':
cmap = [cmap.pop(-5)] + cmap
cmap = [cmap.pop(-5)] + cmap
elif grey_pos == 'del':
del cmap[-5]
del cmap[-5]
cmap = ListedColormap(cmap)
return cmap
|
3766f2db561705660bb572f8757ae8b0cc225a10
| 18,057 |
from typing import List
def objects_from_array(
objects_arr: np.ndarray, default_keys=constants.DEFAULT_OBJECT_KEYS
) -> List[btypes.PyTrackObject]:
"""Construct PyTrackObjects from a numpy array."""
assert objects_arr.ndim == 2
n_features = objects_arr.shape[1]
assert n_features >= 3
n_objects = objects_arr.shape[0]
keys = default_keys[:n_features]
objects_dict = {keys[i]: objects_arr[:, i] for i in range(n_features)}
objects_dict["ID"] = np.arange(n_objects)
return objects_from_dict(objects_dict)
|
eeabc05132fa04f826c65d204f2d97ded625189a
| 18,058 |
def run_policy(env, policy, scaler, logger, episodes):
""" Run policy and collect data for a minimum of min_steps and min_episodes
Args:
env: ai gym environment
policy: policy object with sample() method
scaler: scaler object, used to scale/offset each observation dimension
to a similar range
logger: logger object, used to save stats from episodes
episodes: total episodes to run
Returns: list of trajectory dictionaries, list length = number of episodes
'observes' : NumPy array of states from episode
'actions' : NumPy array of actions from episode
'rewards' : NumPy array of (un-discounted) rewards from episode
'unscaled_obs' : NumPy array of (un-discounted) rewards from episode
"""
total_steps = 0
trajectories = []
for e in range(episodes):
observes, actions, rewards, unscaled_obs = run_episode(env, policy, scaler)
total_steps += observes.shape[0]
trajectory = {'observes': observes,
'actions': actions,
'rewards': rewards,
'unscaled_obs': unscaled_obs}
trajectories.append(trajectory)
unscaled = np.concatenate([t['unscaled_obs'] for t in trajectories])
scaler.update(unscaled) # update running statistics for scaling observations
logger.log({'_MeanReward': np.mean([t['rewards'].sum() for t in trajectories]),
'Steps': total_steps})
return trajectories
|
8d723d13d10b15fda3a2da3591b663ec3c1b81b8
| 18,059 |
def load_data():
"""Load database"""
db = TinyDB(DATABASE_PATH)
data = db.all()
return pd.DataFrame(data)
|
5fba31fb66f1ccb86125902e8a39fe2c0247f741
| 18,060 |
def plotcmaponaxis(ax, surf, title, point_sets=None):
"""Plot a Surface as 2D heatmap on a given matplotlib Axis"""
surface = ax.pcolormesh(surf.X, surf.Y, surf.Z, cmap=cm.viridis)
if point_sets:
for x_y, z, style in point_sets:
ax.scatter(x_y[:, 0], x_y[:, 1], **style)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title(title)
return surface
|
9d462c745cc3a5f2142c29d69ddba1b4f96f6cab
| 18,061 |
def get_log_storage() -> TaskLogStorage:
"""Get current TaskLogStorage instance associated with the current application."""
return current_app.config.get("LOG_STORAGE")
|
30e4e8d6c61196ee94d519cff020d54d47b2ddbf
| 18,062 |
def test_optional_posonly_args1(a, b=10, /, c=100):
"""
>>> test_optional_posonly_args1(1, 2, 3)
6
>>> test_optional_posonly_args1(1, 2, c=3)
6
>>> test_optional_posonly_args1(1, b=2, c=3) # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError: test_optional_posonly_args1() got ... keyword argument... 'b'
>>> test_optional_posonly_args1(1, 2)
103
>>> test_optional_posonly_args1(1, b=2) # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError: test_optional_posonly_args1() got ... keyword argument... 'b'
"""
return a + b + c
|
8986d0718e65988f109b31bf5f7ce8fdcd65c833
| 18,063 |
def _build_schema_resource(fields):
"""Generate a resource fragment for a schema.
Args:
fields (Sequence[google.cloud.bigquery.schema.SchemaField): schema to be dumped.
Returns:
Sequence[Dict]: Mappings describing the schema of the supplied fields.
"""
return [field.to_api_repr() for field in fields]
|
34a32c9b1707062d202a1fd9f98cdd4dc0cb11ae
| 18,064 |
def flatten_swtn(x):
""" Flatten list an array.
Parameters
----------
x: list of dict or ndarray
the input data
Returns
-------
y: ndarray 1D
the flatten input list of array.
shape: list of dict
the input list of array structure.
"""
# Check input
if not isinstance(x, list):
x = [x]
elif len(x) == 0:
return None, None
# Flatten the dataset
y = []
shape_dict = []
for i in range(len(x)):
dict_lvl = {}
for key in x[i].keys():
dict_lvl[key] = x[i][key].shape
y = np.concatenate((y, x[i][key].flatten()))
shape_dict.append(dict_lvl)
return y, shape_dict
|
2f8e0b17c462dd97eaa3cd69104164cdcf533cdc
| 18,065 |
from sys import path
def checkpoint(cache, **csv_args):
"""
Return a decorator which automatically caches the result of a function
which returns a pandas.DataFrame.
Parameters
----------
cache : str or path object
The path to the file which contains the results of `func`.
**csv_args : Optional[Mapping]
Arguments to pass on to both `pandas.read_csv` to retrieve cached
results and `pandas.DataFrame.to_csv` to save results. Should only
contain parameters common to `read_csv` and `to_csv` (which is most of
them).
Returns
-------
_decorator : function
A decorator which caches the result of any function returning a
`pandas.DataFrame`.
"""
def _decorator(func):
@wraps(func)
def _wrapper(*args, **kwargs):
if path.exists(cache):
return pd.read_csv(cache, **csv_args)
else:
result = func(*args, **kwargs)
result.to_csv(cache, **csv_args)
return result
return _wrapper
return _decorator
|
5f43a48e6635b6bfa012daaf84780457f98037aa
| 18,066 |
def crossing(series, value, **options):
"""Find where a function crosses a value.
series: Series
value: number
options: passed to interp1d (default is linear interp)
returns: number
"""
interp = interp1d(series.values, series.index, **options)
return interp(value)
|
5318975ad28280e6aff4c0dbd944daf0dd2a24d1
| 18,067 |
import math
def equilSoundSpeeds(gas, rtol=1.0e-6, maxiter=5000):
"""
Returns a tuple containing the equilibrium and frozen sound speeds for a
gas with an equilibrium composition. The gas is first set to an
equilibrium state at the temperature and pressure of the gas, since
otherwise the equilibrium sound speed is not defined.
"""
# set the gas to equilibrium at its current T and P
gas.equilibrate('TP', rtol=rtol, maxiter=maxiter)
# save properties
s0 = gas.s
p0 = gas.P
r0 = gas.density
# perturb the pressure
p1 = p0*1.0001
# set the gas to a state with the same entropy and composition but
# the perturbed pressure
gas.SP = s0, p1
# frozen sound speed
afrozen = math.sqrt((p1 - p0)/(gas.density - r0))
# now equilibrate the gas holding S and P constant
gas.equilibrate('SP', rtol=rtol, maxiter=maxiter)
# equilibrium sound speed
aequil = math.sqrt((p1 - p0)/(gas.density - r0))
# compute the frozen sound speed using the ideal gas expression as a check
gamma = gas.cp/gas.cv
afrozen2 = math.sqrt(gamma * ct.gas_constant * gas.T /
gas.mean_molecular_weight)
return aequil, afrozen, afrozen2
|
c2b10fe05cc2f19e50b5ed8934c463768ec16c8e
| 18,068 |
import time
import click
def benchmark(partitioner_list: list, item_list: list, bucket_list: list, iterations: int = 1,
begin_range: int = 1, end_range: int = 10, specified_items_sizes: list = None, verbose: bool = False)\
-> pd.DataFrame:
"""
Args:
Returns:
Raises:
"""
r = pd.DataFrame(columns=('partitioner', 'num_items', 'buckets', 'iteration',
'variance', 'elapsed_seconds', 'dividers', 'items'))
for num_items in item_list:
for num_buckets in bucket_list:
results = []
for i in range(1, iterations + 1):
if specified_items_sizes is None:
items = np.random.randint(begin_range, end_range + 1, size=num_items)
else:
items = specified_items_sizes[:num_items]
for partitioner in partitioner_list:
start = time.time()
dividers, variance = partitioner.partition(items, num_buckets)
end = time.time()
results.append({
'partitioner': partitioner.name,
'num_items': num_items,
'buckets': num_buckets,
'iteration': i,
'variance': variance,
'elapsed_seconds': end - start,
'dividers': dividers,
'items': items
})
r = r.append(results)
mean = r[(r.num_items == num_items) & (r.buckets == num_buckets)].groupby('partitioner').mean()
if verbose:
click.echo(f'Items: {num_items} Buckets: {num_buckets} Mean values over {iterations} iterations:')
click.echo(f'Partitioner\t\tTime (ms)\t\tVariance')
for partitioner, record in mean.iterrows():
click.echo(f'{partitioner}\t\t\t{record.elapsed_seconds * 1000:.2f}\t\t\t{record.variance:.4f}')
return r
|
08e4d00fa57bada7297c509ead2a2e45f1fb5cc7
| 18,069 |
def adj_by_strand(genes):
"""
liste: list of hmm gene with homogenous strand
Check if the gene is in tandem with another and if so store the gene inside a set obj.TA_gene.linked
In parallel it clean up the list obj.TA_gene.genes
by removing the genes that forme a tandem. Then TA_gene.genes has only the lonely_gene
"""
linked_genes = set()
for gi, gene in enumerate(genes):
# print obj.TA_gene.genes_plus[gi].gene_number, obj.TA_gene.genes_plus[gi].len_val
for gpost in genes[gi + 1:]:
if gpost.end - gene.end + 1 > obj.Gene.length_max + obj.Gene.distanceMax:
"""
if the distance between gene.end and gpost.end is superior to lenmax + distmax
Then the two gene won't be in pair and the next postgene either because they are sorted by their start
So we can break the gpost for loop and check the next gene
"""
break
# it is a simple test that ckeck if the two gene are adjacent
if gene.is_pre_adj_to(gpost):
# store the information of prev and post according the strand
if gene.strand == '+':
gene.post.append(gpost)
gpost.prev.append(gene)
else:
gpost.post.append(gene)
gene.prev.append(gpost)
# add the gene because it has a link in the set linked of class TA_gene
linked_genes.add(gene)
# add the gene because it has a link in the set linked of class TA_gene
linked_genes.add(gpost)
return linked_genes
|
2836375fadf46b445098ddecf3aaf1884dad8efc
| 18,070 |
def register_user():
""" register a user and take to profile page """
form = RegisterForm()
if form.validate_on_submit():
username = form.username.data
password = form.password.data
email = form.email.data
first_name = form.first_name.data
last_name = form.last_name.data
new_user = User.register(username, password, email, first_name, last_name)
db.session.add(new_user)
try:
db.session.commit()
except IntegrityError:
form.username.errors.append('Username taken. Please pick another username')
return render_template('register.html', form=form)
session['username'] = new_user.username
flash('Welcome! Successfully Created Your Account!', "success")
return redirect(f'/users/{new_user.username}')
return render_template('register.html', form=form)
|
2f1f875d3c35589d8efc1e069a8d050b931a5f51
| 18,071 |
def stack_init_image(init_image, num_images):
"""Create a list from a single image.
Args:
init_image: a single image to be copied and stacked
num_images: number of copies to be included
Returns:
A list of copies of the original image (numpy ndarrays)
"""
init_images = []
for j in range(num_images):
init_images.append(np.asarray(init_image.copy()))
return init_images
|
2cf26723bdbf53921ff053308e408bd84ec03edb
| 18,072 |
import os
def process_problem(_path, _lang=EN):
""" Обработка задачи """
path = os.path.join(_path, 'problem.xml')
with open(path, 'r', encoding='utf-8') as file:
root = ET.parse(file).getroot()
titles = root.find('names').findall('name')
title = titles[0].attrib['value']
for t in titles:
if t.attrib['language'] == _lang:
title = t.attrib['value']
statement = try_get_statement_resource(root, _lang)
if statement.found:
path = os.path.join(_path, statement.path)
with open(path, 'r', encoding=statement.encoding) as file:
statement_source = file.read()
else:
statement_source = ''
solution = try_get_solution_resource(root, _lang)
if solution.found:
path = os.path.join(_path, solution.path)
with open(path, 'r', encoding=solution.encoding) as file:
solution_source = file.read()
else:
solution_source = ''
checker_source = ''
checker_lang = None
source_node = root.find('assets/checker/source')
if source_node is not None:
path = os.path.join(_path, source_node.attrib['path'])
checker_lang = try_get_checker_lang(path)
with open(path, 'r') as checker_file:
checker_source = checker_file.read()
judging = root.find('judging')
input_file = judging.attrib['input-file']
output_file = judging.attrib['output-file']
time_limit = 0
memory_limit = 0
tl_node = root.find('judging/testset/time-limit')
if tl_node is not None:
time_limit = int(float(tl_node.text) * 0.001)
ml_node = root.find('judging/testset/memory-limit')
if ml_node is not None:
memory_limit = int(ml_node.text) // (1024 * 1024)
problem = Problem()
problem.codename = title
problem.input_file = input_file
problem.output_file = output_file
problem.time_limit = time_limit
problem.memory_limit = memory_limit
# problem.statement = statement_source
# problem.solutions = solution_source
problem.checker = checker_source
problem.checker_lang = checker_lang
result = ImportResult(problem, get_tags(root))
return result
|
f5a3a987212c6b0c7d688a0bc59783a7554b04a2
| 18,073 |
import sys
def input_to_text(s):
"""Convert the given byte string or text type to text using the
file system encoding of the current system.
:param basestring s: String or text type to convert
:return: The string as text
:rtype: unicode
"""
return avalon.compat.to_text(s, sys.getfilesystemencoding())
|
7147c91984ea4e7e3099b37297009ac37e7218fb
| 18,074 |
def f5(x, eps=0.0):
"""The function f(x)=tanh(4x)+noise"""
return np.tanh(4*x) + eps * np.random.normal(size=x.shape)
|
02025ed30032b1e8de9ecbca4238170e5adff4b1
| 18,075 |
def get_next_event(game, players):
"""
return None if a player has to move before the next event
otherwise return the corresponding Event enum entry
"""
active_player = get_active_player(players, game.finish_time)
if active_player is None:
return None
planet_rotation_event = (
game.planet_rotation_event_time, game.planet_rotation_event_move, Event.PLANET_ROTATION)
offer_demand_event = (game.offer_demand_event_time,
game.offer_demand_event_move, Event.OFFER_DEMAND)
no_event = (active_player.time_spent, active_player.last_move, None)
events = [planet_rotation_event, offer_demand_event, no_event]
if game.midgame_scoring:
midgame_scoring_event = (game.midgame_scoring_event_time, game.midgame_scoring_event_move, Event.MIDGAME_SCORING)
events.append(midgame_scoring_event)
result = next_turn(events)
return result
|
955082da6a3c0ec8b0ee50e149e7251651584352
| 18,076 |
import six
import json
def _convert_requirements(requirements):
"""Convert the requirements to an array of strings.
["key op value", "key op value", ...]
"""
# TODO(frossigneux) Support the "or" operator
# Convert text to json
if isinstance(requirements, six.string_types):
try:
requirements = json.loads(requirements)
except ValueError:
raise manager_ex.MalformedRequirements(rqrms=requirements)
# Requirement list looks like ['<', '$ram', '1024']
if _requirements_with_three_elements(requirements):
result = []
if requirements[0] == '=':
requirements[0] = '=='
string = (requirements[1][1:] + " " + requirements[0] + " " +
requirements[2])
result.append(string)
return result
# Remove the 'and' element at the head of the requirement list
elif _requirements_with_and_keyword(requirements):
return [_convert_requirements(x)[0]
for x in requirements[1:]]
# Empty requirement list0
elif isinstance(requirements, list) and not requirements:
return requirements
else:
raise manager_ex.MalformedRequirements(rqrms=requirements)
|
6b221a303af41ea81dd172fc0a3bd2f4069ffd0c
| 18,077 |
def max_width(string, cols, separator='\n'):
"""Returns a freshly formatted
:param string: string to be formatted
:type string: basestring or clint.textui.colorred.ColoredString
:param cols: max width the text to be formatted
:type cols: int
:param separator: separator to break rows
:type separator: basestring
>>> formatters.max_width('123 5678', 8)
'123 5678'
>>> formatters.max_width('123 5678', 7)
'123 \n5678'
"""
is_color = isinstance(string, ColoredString)
if is_color:
string_copy = string._new('')
string = string.s
stack = tsplit(string, NEWLINES)
for i, substring in enumerate(stack):
stack[i] = substring.split()
_stack = []
for row in stack:
_row = ['',]
_row_i = 0
for word in row:
if (len(_row[_row_i]) + len(word)) <= cols:
_row[_row_i] += word
_row[_row_i] += ' '
elif len(word) > cols:
# ensure empty row
if len(_row[_row_i]):
_row[_row_i] = _row[_row_i].rstrip()
_row.append('')
_row_i += 1
chunks = schunk(word, cols)
for i, chunk in enumerate(chunks):
if not (i + 1) == len(chunks):
_row[_row_i] += chunk
_row[_row_i] = _row[_row_i].rstrip()
_row.append('')
_row_i += 1
else:
_row[_row_i] += chunk
_row[_row_i] += ' '
else:
_row[_row_i] = _row[_row_i].rstrip()
_row.append('')
_row_i += 1
_row[_row_i] += word
_row[_row_i] += ' '
else:
_row[_row_i] = _row[_row_i].rstrip()
_row = map(str, _row)
_stack.append(separator.join(_row))
_s = '\n'.join(_stack)
if is_color:
_s = string_copy._new(_s)
return _s
|
49521ec4521b639e71b3fc5212738cc6e4d93129
| 18,078 |
import base64
def aes_encrypt(text, sec_key):
"""
AES encrypt method.
:param text:
:param sec_key:
:return:
"""
pad = 16 - len(text) % 16
if isinstance(text, bytes):
text = text.decode('utf-8')
text += pad * chr(pad)
encryptor = AES.new(sec_key, 2, '0102030405060708')
cipher_text = encryptor.encrypt(text)
cipher_text = base64.b64encode(cipher_text)
return cipher_text
|
55340a7f1fcf37c58daaf3a72db70344159fbf30
| 18,079 |
def get_value_at_coords(matrix, x, y):
"""Returns the value of the matrix at given integer coordinates.
Arguments:
matrix {ndarray} -- Square matrix.
x {int} -- x-coordinate.
y {int} -- y-coordinate.
Returns:
int -- Value of the matrix.
"""
offset = matrix_offset(matrix)
return matrix[x + offset, y + offset]
|
92e96f276025e21bc8643eb96ce03fd191285c93
| 18,080 |
def rms(vector):
"""
Parameters
----------
vector
Returns
-------
"""
return np.sqrt(np.mean(np.square(vector)))
|
9d4888050e7f048a8d2ca5b92fa638d5c8d24eb7
| 18,081 |
def parse(text):
"""Parse a tag-expression as text and return the expression tree.
.. code-block:: python
tags = ["foo", "bar"]
tag_expression = parse("foo and bar or not baz")
assert tag_expression.evaluate(tags) == True
:param text: Tag expression as text to parse.
:param parser_class: Optional p
:return: Parsed expression
"""
return TagExpressionParser.parse(text)
|
202951a1023557e3405b8f5d4d06084e798ae12c
| 18,082 |
import itertools
def average_distance(points, distance_func):
"""
Given a set of points and their pairwise distances, it calculates the average distances
between a pair of points, averaged over all C(num_points, 2) pairs.
"""
for p0, p1 in itertools.combinations(points, 2): # assert symmetry
assert abs(distance_func(p0, p1) - distance_func(p1, p0)) < 1e-7, \
'{} {} {} {}'.format(p0, p1, distance_func(p0, p1), distance_func(p1, p0))
for p0, p1, p2 in itertools.combinations(points, 3): # assert triangle inequality
assert distance_func(p0, p1) + distance_func(p1, p2) >= distance_func(p0, p2)
assert distance_func(p0, p2) + distance_func(p1, p2) >= distance_func(p0, p1)
assert distance_func(p0, p1) + distance_func(p0, p2) >= distance_func(
p1, p2), '{p0}-{p1}={d01} {p0}-{p2}={d02} {p1}-{p2}={d12}'.format(
p0=p0, p1=p1, p2=p2, d01=distance_func(p0, p1), d02=distance_func(p0, p2),
d12=distance_func(p1, p2))
# actual calculation happens below
total_dist = 0.0
all_pairs = list(itertools.combinations(points, 2))
for p0, p1 in all_pairs:
total_dist += distance_func(p0, p1)
if all_pairs:
return float(total_dist) / len(all_pairs)
else:
return 0.0
|
236735da94e902dd7fbe062de8abb9a02208156f
| 18,083 |
def get_bin_alignment(begin, end, freq):
"""Generate a few values needed for checking and filling a series if
need be."""
start_bin = get_expected_first_bin(begin,freq)
end_bin = (end/freq)*freq
expected_bins = expected_bin_count(start_bin, end_bin, freq)
return start_bin, end_bin, expected_bins
|
0ff46d4d8df2d7fd177377621c69bac95f23eb9f
| 18,084 |
def TagAndFilterWrapper(target, dontRemoveTag=False):
"""\
Returns a component that wraps a target component, tagging all traffic
coming from its outbox; and filtering outany traffic coming into its inbox
with the same unique id.
"""
if dontRemoveTag:
Filter = FilterButKeepTag
else:
Filter = FilterTag
return Graphline( TAGGER = UidTagger(),
FILTER = Filter(),
TARGET = target,
linkages = {
("TARGET", "outbox") : ("TAGGER", "inbox"), # tag data coming from target
("TAGGER", "outbox") : ("self", "outbox"),
("TAGGER", "uid") : ("FILTER", "uid"), # ensure filter uses right uid
("self", "inbox") : ("FILTER", "inbox"), # filter data going to target
("FILTER", "outbox") : ("TARGET", "inbox"),
("self", "control") : ("TARGET", "control"), # shutdown signalling path
("TARGET", "signal") : ("TAGGER", "control"),
("TAGGER", "signal") : ("FILTER", "control"),
("FILTER", "signal") : ("self", "signal"),
},
)
|
83329d0c3f6bbf872ba65d31f7b2111c60a768e7
| 18,085 |
import requests
import json
def tweets(url):
"""tweets count"""
try:
twitter_url = 'http://urls.api.twitter.com/1/urls/count.json?url=' + url
r = requests.get(twitter_url, headers=headers)
json_data = json.loads(r.text)
return json_data['count']
except:
return 0
|
07e10d4b1ddad8cf74d79dc21fbc8dbfe1c38428
| 18,086 |
def CLJPc(S):
"""Compute a C/F splitting using the parallel CLJP-c algorithm.
CLJP-c, or CLJP in color, improves CLJP by perturbing the initial
random weights with weights determined by a vertex coloring.
Parameters
----------
S : csr_matrix
Strength of connection matrix indicating the strength between nodes i
and j (S_ij)
Returns
-------
splitting : array
Array of length of S of ones (coarse) and zeros (fine)
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.classical.split import CLJPc
>>> S = poisson((7,), format='csr') # 1D mesh with 7 vertices
>>> splitting = CLJPc(S)
See Also
--------
MIS, PMIS, CLJP
References
----------
.. [1] David M. Alber and Luke N. Olson
"Parallel coarse-grid selection"
Numerical Linear Algebra with Applications 2007; 14:611-643.
"""
S = remove_diagonal(S)
return CLJP(S, color=True)
|
3ef11327120a71123e51b702c5452e8036f581d5
| 18,087 |
def displayTwoDimMapPOST():
"""Run displayTwoDimMap"""
executionStartTime = int(time.time())
# status and message
success = True
message = "ok"
plotUrl = ''
dataUrl = ''
# get model, var, start time, end time, lon1, lon2, lat1, lat2, months, scale
jsonData = request.json
model = jsonData['model']
var = jsonData['var']
startT = jsonData['start_time']
endT = jsonData['end_time']
lon1 = jsonData['lon1']
lon2 = jsonData['lon2']
lat1 = jsonData['lat1']
lat2 = jsonData['lat2']
months = jsonData['months']
scale = jsonData['scale']
userId = request.args.get('userid', '')
print 'from url, userId: ', userId
if userId != None and userId != '':
userId = int(userId)
else:
userId = 0
#added by Chris
parameters_json = {'model':model, 'var':var, 'startT':startT,
'endT':endT, 'lon1':lon1, 'lon2':lon2,
'lat1':lat1, 'lat2':lat2, 'months':months,
'scale':scale}
print 'model: ', model
print 'var: ', var
print 'startT: ', startT
print 'endT: ', endT
print 'lon1: ', lon1
print 'lon2: ', lon2
print 'lat1: ', lat1
print 'lat2: ', lat2
print 'months: ', months
print 'scale: ', scale
# get where the input file and output file are
current_dir = os.getcwd()
print 'current_dir: ', current_dir
try:
seed_str = model+var+startT+endT+lon1+lon2+lat1+lat2+months+scale
tag = md5.new(seed_str).hexdigest()
output_dir = current_dir + '/svc/static/twoDimMap/' + tag
print 'output_dir: ', output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# chdir to where the app is
os.chdir(current_dir+'/svc/src/twoDimMap')
# instantiate the app. class
c1 = call_twoDimMap.call_twoDimMap(model, var, startT, endT, lon1, lon2, lat1, lat2, months, output_dir, scale)
# call the app. function
(message, imgFileName, dataFileName) = c1.displayTwoDimMap()
# chdir back
os.chdir(current_dir)
hostname, port = get_host_port2("host.cfg")
### userId = 2
if hostname == 'EC2':
try:
req = urllib2.Request('http://169.254.169.254/latest/meta-data/public-ipv4')
response = urllib2.urlopen(req)
hostname = response.read()
except Exception, e:
print 'e: ', e
"""
try:
req2 = urllib2.Request(' http://169.254.169.254/latest/user-data')
response2 = urllib2.urlopen(req2)
userId = json.loads(response2.read())['username']
except Exception, e:
print 'e: ', e
userId = 2
"""
"""
if userIdDict.has_key(userId):
userId = userIdDict[userId]
else :
userId = 'lei'
"""
print 'userId: ', userId
print 'hostname: ', hostname
print 'port: ', port
### url = 'http://cmacws.jpl.nasa.gov:8090/static/twoDimMap/' + tag + '/' + imgFileName
### url = 'http://' + hostname + ':' + port + '/static/twoDimMap/' + tag + '/' + imgFileName
### print 'url: ', url
plotUrl = 'http://' + hostname + ':' + port + '/static/twoDimMap/' + tag + '/' + imgFileName
print 'plotUrl: ', plotUrl
dataUrl = 'http://' + hostname + ':' + port + '/static/twoDimMap/' + tag + '/' + dataFileName
print 'dataUrl: ', dataUrl
failedImgUrl = 'http://' + hostname + ':' + port + '/static/plottingFailed.png'
print 'failedImgUrl: ', failedImgUrl
if imgFileName is '' or not os.path.exists(output_dir+'/'+imgFileName):
print '****** Error: %s not exist' % imgFileName
plotUrl = failedImgUrl
if dataFileName is '' or not os.path.exists(output_dir+'/'+dataFileName):
print '****** Error: %s not exist' % dataFileName
dataUrl = failedImgUrl
print 'message: ', message
if len(message) == 0 or message.find('Error') >= 0 or message.find('error:') >= 0 :
success = False
### url = ''
plotUrl = ''
dataUrl = ''
except ValueError, e:
# chdir to current_dir in case the dir is changed to where the app is in the try block
os.chdir(current_dir)
print 'change dir back to: ', current_dir
success = False
message = str(e)
except Exception, e:
# chdir to current_dir in case the dir is changed to where the app is in the try block
os.chdir(current_dir)
print 'change dir back to: ', current_dir
success = False
### message = str("Error caught in displayTwoDimMap()")
message = str(e)
purpose = request.args.get('purpose')#"Test .\'\"\\purpose"
executionEndTime = int(time.time())
### urlLink = 'model1=%s&var1=%s&lon1=%s&lon2=%s&lat1=%s&lat2=%s&startT=%s&endT=%s&months=%s&scale=%s&image=%s&data_url=%s' % (model,var,lon1,lon2,lat1,lat2,startT,endT,months,scale,plotUrl,dataUrl)
urlLink = request.query_string
print 'urlLink: ', urlLink
post_json = {'source': 'JPL', 'parameters':urlLink, 'frontend_url': frontend_url, 'backend_url': backend_url, 'userId': long(userId),
'executionStartTime':long(executionStartTime)*1000, 'executionEndTime':long(executionEndTime)*1000}
post_json = json.dumps(post_json)
if USE_CMU:
try:
print requests.post(CMU_PROVENANCE_URL, data=post_json, headers=HEADERS).text
print requests.post(CMU_PROVENANCE_URL_2, data=post_json, headers=HEADERS).text
### print requests.post(VIRTUAL_EINSTEIN_URL, data=post_json, headers=HEADERS).text
except:
print 'Something went wrong with Wei\'s stuff'
#/added by Chris
return jsonify({
'success': success,
'message': message,
'url': plotUrl,
'dataUrl': dataUrl
})
|
7b0402b66538b7b5d987c1385d9ac12df82fac66
| 18,088 |
def encrypt(msg, hexPubkey):
"""Encrypts message with hex public key"""
return pyelliptic.ECC(curve='secp256k1').encrypt(
msg, hexToPubkey(hexPubkey))
|
30befcf48d0417f13a93ad6d4e9f8ccf0fdbeae5
| 18,089 |
def indexing(zDatagridLeft,zDatagridRight,zModelgridLeft,zModelgridRight):
"""
Searches for closest distances between actual and theorectical points.
zDatagridLeft = float - tiled matrix (same values column-wise) of z coordintates
of droplet on left side, size = [len(zModel),len(zActualLeft)]
zDatagridRight = float - tiled matrix (same values column-wise) of z coordintates
of droplet on right side, size = [len(zModel),len(zActualRight)]
zModelgridLeft = float - tiled matrix (same values row-wise) of theorectical z coordintates
of droplet (one side), size = [len(zModel),len(zActualLeft)]
zModelgridRight = float - tiled matrix (same values row-wise) of theorectical z coordintates
of droplet (one side), size = [len(zModel),len(zActualRight)]
"""
#indexing location of closest value
indexLeft=np.argmin(np.abs((zModelgridLeft-zDatagridLeft)),axis=0)
indexRight=np.argmin(np.abs((zModelgridRight-zDatagridRight)),axis=0)
return indexLeft,indexRight
|
d14b0037a4898fc12524aba0a29231a545dfed8a
| 18,090 |
def torch2np(tensor):
"""
Convert from torch tensor to numpy convention.
If 4D -> [b, c, h, w] to [b, h, w, c]
If 3D -> [c, h, w] to [h, w, c]
:param tensor: Torch tensor
:return: Numpy array
"""
array, d = tensor.detach().cpu().numpy(), tensor.dim()
perm = [0, 2, 3, 1] if d == 4 else [1, 2, 0] if d == 3 else None
return array.transpose(perm) if perm else array
|
23acaa7b4e58d7891e77c22f29b7cbdc7a9a80d0
| 18,091 |
import glob
import os
import random
def get_filenames(feature_folder, glob_pattern, sample_size=None):
"""
Finds the all the files in the given feature folder which matches the glob pattern.
:param feature_folder: The folder to search for files.
:param glob_pattern: The glob pattern to use for finding files.
:param sample_size: If given, restrict the number of files loaded to a sample of this size.
:return: A list of files matching the glob pattern in the feature folder.
"""
files = glob.glob(os.path.join(feature_folder, glob_pattern))
if sample_size is not None and sample_size < len(files):
files = random.sample(files, sample_size)
return files
|
19b97b9e981b8fe0b978d5af2240dc22c02d7e93
| 18,092 |
from typing import Any
import array
import numpy
def message_to_csv(msg: Any, truncate_length: int = None,
no_arr: bool = False, no_str: bool = False) -> str:
"""
Convert a ROS message to string of comma-separated values.
:param msg: The ROS message to convert.
:param truncate_length: Truncate values for all message fields to this length.
This does not truncate the list of message fields.
:param no_arr: Exclude array fields of the message.
:param no_str: Exclude string fields of the message.
:returns: A string of comma-separated values representing the input message.
"""
def to_string(val, field_type=None):
nonlocal truncate_length, no_arr, no_str
r = ''
if any(isinstance(val, t) for t in [list, tuple, array.array, numpy.ndarray]):
if no_arr is True and field_type is not None:
r = __abbreviate_array_info(val, field_type)
else:
for i, v in enumerate(val):
if r:
r += ','
if truncate_length is not None and i >= truncate_length:
r += '...'
break
r += to_string(v)
elif any(isinstance(val, t) for t in [bool, bytes, float, int, str, numpy.number]):
if no_str is True and isinstance(val, str):
val = '<string length: <{0}>>'.format(len(val))
elif any(isinstance(val, t) for t in [bytes, str]):
if truncate_length is not None and len(val) > truncate_length:
val = val[:truncate_length]
if isinstance(val, bytes):
val += b'...'
else:
val += '...'
r = str(val)
else:
r = message_to_csv(val, truncate_length, no_arr, no_str)
return r
result = ''
# We rely on __slots__ retaining the order of the fields in the .msg file.
for field_name, field_type in zip(msg.__slots__, msg.SLOT_TYPES):
value = getattr(msg, field_name)
if result:
result += ','
result += to_string(value, field_type)
return result
|
10ab4c7482c2fbf6e4335daaf0359390ed215152
| 18,093 |
def create_message(username, message):
""" Creates a standard message from a given user with the message
Replaces newline with html break """
message = message.replace('\n', '<br/>')
return '{{"service":1, "data":{{"message":"{mes}", "username":"{user}"}} }}'.format(mes=message, user=username)
|
d12807789d5e30d1a4a39c0368ebe4cf8fbde99e
| 18,094 |
def overlaps(sdf, other):
"""
Indicates if the intersection of the two geometries has the same shape
type as one of the input geometries and is not equivalent to either of
the input geometries.
========================= =========================================================
**Argument** **Description**
------------------------- ---------------------------------------------------------
sdf Required Spatially Enabled DataFrame. The dataframe to have the operation performed on.
------------------------- ---------------------------------------------------------
other Required Spatially Enabled DataFrame or arcgis.Geometry. This is the selecting data.
========================= =========================================================
:returns: pd.DataFrame (Spatially enabled DataFrame)
"""
global _HASARCPY, _HASSHAPELY
if _HASARCPY == False and _HASSHAPELY == False:
return None
ud = pd.Series([False] * len(sdf))
if isinstance(other, (Point, Polygon, Polyline, MultiPoint)):
sindex = sdf.spatial.sindex()
q1 = sindex.intersect(bbox=other.extent)
sub = sdf.iloc[q1]
dj = sub[sdf.spatial.name].geom.overlaps(other)
dj.index = sub.index
ud = ud | dj
return sdf[ud]
elif _is_geoenabled(other):
sindex = sdf.spatial.sindex()
name = other.spatial.name
for index, seg in other.iterrows():
g = seg[name]
q1 = sindex.intersect(bbox=g.extent)
sub = sdf.iloc[q1]
if len(sub) > 0:
dj = sub[sdf.spatial.name].geom.overlaps(g)
dj.index = sub.index
ud = ud | dj
return sdf[ud]
else:
raise ValueError(("Invalid input, please verify that `other` "
"is a Point, Polygon, Polyline, MultiPoint, "
"or Spatially enabled DataFrame"))
return None
|
14cad072b3b11efe4c4f14d7fc14e053a262f904
| 18,095 |
from django.contrib.auth.views import redirect_to_login
def render_page(request, page):
"""Рендер страницы"""
if page.registration_required and not request.user.is_authenticated:
return redirect_to_login(request.path)
# if page.template:
# template = loader.get_template(page.template)
# print(template)
# # else:
# # template = loader.get_template(DEFAULT_TEMPLATE)
# # t = Template(template)
#
# p = Template(template).render(RequestContext(request, {'page': page}))
# print(p)
# # page.title = mark_safe(page.title)
# # page.text = mark_safe(page.text)
# return HttpResponse(p)
# # return HttpResponse(template.render({'page': page}, request))
return render(request, page.template, {"page": page})
|
6e11ea24ee9dc9cf7e1cf8df3bfc192715202044
| 18,096 |
def _whctrs(anchor):
"""
Return width, height, x center, and y center for an anchor (window).
"""
w = anchor[2] - anchor[0] + 1
h = anchor[3] - anchor[1] + 1
x_ctr = anchor[0] + 0.5 * (w - 1)
y_ctr = anchor[1] + 0.5 * (h - 1)
# 16,16, 7.5, 7.5
return w, h, x_ctr, y_ctr
|
ae3f3d7c486b1698f31ecce301f0e2c2f8af5e84
| 18,097 |
def obtener_atletas_pais(atletas: list, pais_interes: str) -> list:
"""
Función que genera una lista con la información de los atletas del país dado,
sin importar el año en que participaron los atletas.
Parámetros:
atletas: list de diccionarios con la información de cada atleta.
pais_interes: str.
Retorna:
atletas_pais: list con los diccionarios de los atletas del país.
diccionario de cada atleta: {'nombre': str, 'evento': str, 'anio': int}.
"""
# Inicializar lista de atletas del país.
atletas_pais = list()
# Inicio de recorrido por la lista de atletas.
for cada_atleta in atletas:
# Definición de variables del atleta actual.
anio_actual = cada_atleta['anio']
nombre_actual = cada_atleta['nombre']
evento_actual = cada_atleta['evento']
pais_actual = cada_atleta['pais']
# Verificación de nombre y rango de tiempo.
if pais_actual == pais_interes:
# Se añade el diccionario de atleta a la lista de atletas.
atletas_pais.append({'nombre': nombre_actual, 'evento': evento_actual, 'anio': anio_actual})
return atletas_pais
|
4b03364a76af4e7818f977731b259fdfee6817ee
| 18,098 |
def kl_div_mixture_app(m1, v1, m2, v2,
return_approximations=False,
return_upper_bound=False):
"""Approximate KL divergence between Gaussian and mixture of Gaussians
See Durrieu et al, 2012: "Lower and upper bounds for approximation of the
Kullback-Leibler divergence between Gaussian Mixture Models"
https://serval.unil.ch/resource/serval:BIB_513DF4E21898.P001/REF
Both the variational and the product approximation are simplified here
compared to the paper, as we assume to have a single Gaussian as the first
argument.
m1: ([batch_dims], data_dims)
v1: ([batch_dims], data_dims)
m2: ([batch_dims], mixtures, data_dims)
v2: ([batch_dims], mixtures, data_dims)
"""
assert m1.ndim + 1 == m2.ndim
if return_upper_bound:
res = _kl_div_mixture_app_with_upper_bound(m1, v1, m2, v2)
if return_approximations:
return res
else:
return res[0], res[3]
else:
kls_app, kls_var, kls_prod = _kl_div_mixture_app(m1, v1, m2, v2)
if return_approximations:
return kls_app, kls_var, kls_prod
else:
return kls_app
|
e90fbf8596a06513d68c1eca17a35857d75eea70
| 18,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.