content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def _ComputeRelativeAlphaBeta(omega_b, position_b, apparent_wind_b):
"""Computes the relative alpha and beta values, in degrees, from kinematics.
Args:
omega_b: Array of size (n, 3). Body rates of the kite [rad/s].
position_b: Array of size (1, 3). Position of the surface to compute local
alpha/beta [m].
apparent_wind_b: Array of size (n,3). Apparent wind vector from the state
estimator [m/s].
Returns:
local_alpha_deg, local_beta_deg: The values of local alpha and beta.
The math for a relative angle of attack at a given section is as follows:
(1) Kinematically:
v_section_b = apparent_wind_b - omega_b X position_b
(2) By definition:
alpha_rad = atan2(-v_section_b_z, -v_section_b_x)
beta_rad = asin(-v_section_b_y, mag(v_section_b))
where _x, _y, _z denote the unit basis vectors in the body coordinates.
"""
assert np.shape(omega_b) == np.shape(apparent_wind_b)
# The subtraction is because the cross product is the rigid body motion
# but the reference frame for the aero has the opposite effect of the
# motion of the rigid body motion frame.
local_vel = apparent_wind_b - np.cross(omega_b, position_b, axisa=1,
axisb=1)
local_vel_mag = np.linalg.norm(local_vel, axis=1)
local_alpha_deg = np.rad2deg(np.arctan2(-1.0 * local_vel[:, 2],
-1.0 * local_vel[:, 0]))
local_beta_deg = np.rad2deg(np.arcsin(-1.0 * local_vel[:, 1]
/ local_vel_mag))
return local_alpha_deg, local_beta_deg | 6aa5f82e85b50abab0c72800b5e2b11ec613bcbd | 22,271 |
def make_chord(midi_nums, duration, sig_cons=CosSignal, framerate=11025):
"""Make a chord with the given duration.
midi_nums: sequence of int MIDI note numbers
duration: float seconds
sig_cons: Signal constructor function
framerate: int frames per second
returns: Wave
"""
freqs = [midi_to_freq(num) for num in midi_nums]
signal = sum(sig_cons(freq) for freq in freqs)
wave = signal.make_wave(duration, framerate=framerate)
wave.apodize()
return wave | babc9d22b92b2e7085680178718959cd7ef15eca | 22,272 |
def calculate_percent(partial, total):
"""Calculate percent value."""
if total:
percent = round(partial / total * 100, 2)
else:
percent = 0
return f'{percent}%' | 4d3da544dd1252acec3351e7f67568be80afe020 | 22,273 |
import requests
def okgets(urls):
"""Multi-threaded requests.get, only returning valid response objects
:param urls: A container of str URLs
:returns: A tuple of requests.Response objects
"""
return nest(
ripper(requests.get),
filt(statusok),
tuple
)(urls) | 0933f4df68745a6c9d69d0b42d4bb005c1c69772 | 22,274 |
def worker(args):
"""
This function does the work of returning a URL for the NDSE view
"""
# Step 1. Create the NDSE view request object
# Set the url where you want the recipient to go once they are done
# with the NDSE. It is usually the case that the
# user will never "finish" with the NDSE.
# Assume that control will not be passed back to your app.
view_request = ConsoleViewRequest(return_url=args["ds_return_url"])
if args["starting_view"] == "envelope" and args["envelope_id"]:
view_request.envelope_id = args["envelope_id"]
# Step 2. Get the console view url
# Exceptions will be caught by the calling function
api_client = ApiClient()
api_client.host = args["base_path"]
api_client.set_default_header("Authorization", "Bearer " + args["ds_access_token"])
envelope_api = EnvelopesApi(api_client)
results = envelope_api.create_console_view(args["account_id"], console_view_request=view_request)
url = results.url
return {"redirect_url": url} | abcb2a94e5d14519a708ae8d531e47f30bc3c0da | 22,275 |
import warnings
import math
def plot_horiz_xsection_quiver_map(Grids, ax=None,
background_field='reflectivity',
level=1, cmap='pyart_LangRainbow12',
vmin=None, vmax=None,
u_vel_contours=None,
v_vel_contours=None,
w_vel_contours=None,
wind_vel_contours=None,
u_field='u', v_field='v', w_field='w',
show_lobes=True, title_flag=True,
axes_labels_flag=True,
colorbar_flag=True,
colorbar_contour_flag=False,
bg_grid_no=0, contour_alpha=0.7,
coastlines=True,
quiver_spacing_x_km=10.0,
quiver_spacing_y_km=10.0,
gridlines=True,
quiverkey_len=5.0,
quiverkey_loc='best',
quiver_width=0.01):
"""
This procedure plots a horizontal cross section of winds from wind fields
generated by PyDDA using quivers onto a geographical map. The length of
the quivers varies with wind speed.
Parameters
----------
Grids: list
List of Py-ART Grids to visualize
ax: matplotlib axis handle (with cartopy ccrs)
The axis handle to place the plot on. Set to None to create a new map.
Note: the axis needs to be in a PlateCarree() projection. Support for
other projections is planned in the future.
background_field: str
The name of the background field to plot the quivers on.
level: int
The number of the vertical level to plot the cross section through.
cmap: str or matplotlib colormap
The name of the matplotlib colormap to use for the background field.
vmin: float
The minimum bound to use for plotting the background field. None will
automatically detect the background field minimum.
vmax: float
The maximum bound to use for plotting the background field. None will
automatically detect the background field maximum.
u_vel_contours: 1-D array
The contours to use for plotting contours of u. Set to None to not
display such contours.
v_vel_contours: 1-D array
The contours to use for plotting contours of v. Set to None to not
display such contours.
w_vel_contours: 1-D array
The contours to use for plotting contours of w. Set to None to not
display such contours.
u_field: str
Name of zonal wind (u) field in Grids.
v_field: str
Name of meridional wind (v) field in Grids.
w_field: str
Name of vertical wind (w) field in Grids.
show_lobes: bool
If True, the dual doppler lobes from each pair of radars will be shown.
title_flag: bool
If True, PyDDA will generate a title for the plot.
axes_labels_flag: bool
If True, PyDDA will generate axes labels for the plot.
colorbar_flag: bool
If True, PyDDA will generate a colorbar for the plot background field.
colorbar_contour_flag: bool
If True, PyDDA will generate a colorbar for the contours.
bg_grid_no: int
Number of grid in Grids to take background field from.
Set to -1 to use maximum value from all grids.
contour_alpha: float
Alpha (transparency) of velocity contours. 0 = transparent, 1 = opaque
coastlines: bool
Set to true to display coastlines.
quiver_spacing_x_km: float
Spacing in km between quivers in x axis.
quiver_spacing_y_km: float
Spacing in km between quivers in y axis.
gridlines: bool
Set to true to show grid lines.
quiverkey_len: float
Length to use for the quiver key in m/s.
quiverkey_loc: str
Location of quiverkey. One of:
'best'
'top_left'
'top'
'top_right'
'bottom_left'
'bottom'
'bottom_right'
'left'
'right'
'top_left_outside'
'top_right_outside'
'bottom_left_outside'
'bottom_right_outside'
'best' will put the quiver key in the corner with the fewest amount of
valid data points while keeping the quiver key inside the plot.
The rest of the options will put the quiver key in that
particular part of the plot.
quiver_width: float
The width of the lines for the quiver given as a fraction
relative to the plot width. Use this to specify the thickness
of the quiver lines.
Returns
-------
ax: matplotlib axis
Axis handle to output axis
"""
if(bg_grid_no > -1):
grid_bg = Grids[bg_grid_no].fields[background_field]['data']
else:
grid_array = np.ma.stack(
[x.fields[background_field]['data'] for x in Grids])
grid_bg = grid_array.max(axis=0)
if(vmin is None):
vmin = grid_bg.min()
if(vmax is None):
vmax = grid_bg.max()
grid_h = Grids[0].point_altitude['data']/1e3
grid_x = Grids[0].point_x['data']/1e3
grid_y = Grids[0].point_y['data']/1e3
grid_lat = Grids[0].point_latitude['data'][level]
grid_lon = Grids[0].point_longitude['data'][level]
qloc_x, qloc_y = _parse_quiverkey_string(
quiverkey_loc, grid_h[level], grid_x[level],
grid_y[level], grid_bg[level])
dx = np.diff(grid_x, axis=2)[0, 0, 0]
dy = np.diff(grid_y, axis=1)[0, 0, 0]
if(np.ma.isMaskedArray(Grids[0].fields[u_field]['data'])):
u = Grids[0].fields[u_field]['data'].filled(fill_value=np.nan)
else:
u = Grids[0].fields[u_field]['data']
if(np.ma.isMaskedArray(Grids[0].fields[v_field]['data'])):
v = Grids[0].fields[v_field]['data'].filled(fill_value=np.nan)
else:
v = Grids[0].fields[v_field]['data']
if(np.ma.isMaskedArray(Grids[0].fields[u_field]['data'])):
w = Grids[0].fields[w_field]['data'].filled(fill_value=np.nan)
else:
w = Grids[0].fields[w_field]['data']
transform = ccrs.PlateCarree()
if(ax is None):
ax = plt.axes(projection=transform)
the_mesh = ax.pcolormesh(grid_lon[:, :], grid_lat[:, :],
grid_bg[level, :, :],
cmap=cmap, transform=transform, zorder=0,
vmin=vmin, vmax=vmax)
horiz_wind_speed = np.ma.sqrt(u**2 + v**2)
quiver_density_x = int((1/dx)*quiver_spacing_x_km)
quiver_density_y = int((1/dy)*quiver_spacing_y_km)
q = ax.quiver(grid_lon[::quiver_density_y, ::quiver_density_x],
grid_lat[::quiver_density_y, ::quiver_density_x],
u[level, ::quiver_density_y, ::quiver_density_x],
v[level, ::quiver_density_y, ::quiver_density_x],
transform=transform, width=quiver_width,
scale=25.*quiverkey_len)
quiver_font = {'family': 'sans-serif',
'style': 'normal',
'variant': 'normal',
'weight': 'bold',
'size': 'medium'}
ax.quiverkey(q, qloc_x, qloc_y,
quiverkey_len, label=(str(quiverkey_len) +' m/s'),
fontproperties=quiver_font)
if(colorbar_flag is True):
cp = Grids[bg_grid_no].fields[background_field]['long_name']
cp.replace(' ', '_')
cp = cp + ' [' + Grids[bg_grid_no].fields[background_field]['units']
cp = cp + ']'
plt.colorbar(the_mesh, ax=ax, label=(cp))
if(u_vel_contours is not None):
u_filled = np.ma.masked_where(u[level, :, :] < np.min(u_vel_contours),
u[level, :, :])
try:
cs = ax.contourf(grid_lon[:, :], grid_lat[:, :],
u_filled, levels=u_vel_contours, linewidths=2,
alpha=contour_alpha, zorder=2, extend='both')
cs.set_clim([np.min(u_vel_contours), np.max(u_vel_contours)])
cs.cmap.set_under(color='white', alpha=0)
cs.cmap.set_over(color='white', alpha=0)
cs.cmap.set_bad(color='white', alpha=0)
ax.clabel(cs)
if(colorbar_contour_flag is True):
ax2 = plt.colorbar(cs, ax=ax, label='U [m/s]', extend='both',
spacing='proportional')
except ValueError:
warnings.warn(("Cartopy does not support blank contour plots, " +
"contour color map not drawn!"), RuntimeWarning)
if(v_vel_contours is not None):
v_filled = np.ma.masked_where(v[level, :, :] < np.min(v_vel_contours),
v[level, :, :])
try:
cs = ax.contourf(grid_lon[:, :], grid_lat[:, :],
v_filled, levels=u_vel_contours, linewidths=2,
alpha=contour_alpha, zorder=2, extend='both')
cs.set_clim([np.min(v_vel_contours), np.max(v_vel_contours)])
cs.cmap.set_under(color='white', alpha=0)
cs.cmap.set_over(color='white', alpha=0)
cs.cmap.set_bad(color='white', alpha=0)
ax.clabel(cs)
if(colorbar_contour_flag is True):
ax2 = plt.colorbar(cs, ax=ax, label='V [m/s]', extend='both',
spacing='proportional')
except ValueError:
warnings.warn(("Cartopy does not support blank contour plots, " +
"contour color map not drawn!"), RuntimeWarning)
if(w_vel_contours is not None):
w_filled = np.ma.masked_where(w[level, :, :] < np.min(w_vel_contours),
w[level, :, :])
try:
cs = ax.contourf(grid_lon[::, ::], grid_lat[::, ::],
w_filled, levels=w_vel_contours, linewidths=2,
alpha=contour_alpha, zorder=2, extend='both')
cs.set_clim([np.min(w_vel_contours), np.max(w_vel_contours)])
cs.cmap.set_under(color='white', alpha=0)
cs.cmap.set_over(color='white', alpha=0)
cs.cmap.set_bad(color='white', alpha=0)
ax.clabel(cs)
if(colorbar_contour_flag is True):
ax2 = plt.colorbar(cs, ax=ax, label='W [m/s]', extend='both',
spacing='proportional',
ticks=w_vel_contours)
except ValueError:
warnings.warn(("Cartopy does not support color maps on blank " +
"contour plots, contour color map not drawn!"),
RuntimeWarning)
if(wind_vel_contours is not None):
vel = np.ma.sqrt(u[level, :, :]**2 + v[level, :, :]**2)
vel = vel.filled(fill_value=np.nan)
try:
cs = ax.contourf(grid_x[level, :, :], grid_y[level, :, :],
vel, levels=wind_vel_contours, linewidths=2,
alpha=contour_alpha)
cs.cmap.set_under(color='white', alpha=0)
cs.cmap.set_bad(color='white', alpha=0)
ax.clabel(cs)
if(colorbar_contour_flag is True):
ax2 = plt.colorbar(cs, ax=ax, label='|V\ [m/s]', extend='both',
spacing='proportional',
ticks=w_vel_contours)
except ValueError:
warnings.warn(("Cartopy does not support color maps on blank " +
"contour plots, contour color map not drawn!"),
RuntimeWarning)
bca_min = math.radians(Grids[0].fields[u_field]['min_bca'])
bca_max = math.radians(Grids[0].fields[u_field]['max_bca'])
if(show_lobes is True):
for i in range(len(Grids)):
for j in range(len(Grids)):
if (i != j):
bca = retrieval.get_bca(Grids[j].radar_longitude['data'],
Grids[j].radar_latitude['data'],
Grids[i].radar_longitude['data'],
Grids[i].radar_latitude['data'],
Grids[j].point_x['data'][0],
Grids[j].point_y['data'][0],
Grids[j].get_projparams())
ax.contour(
grid_lon[:, :], grid_lat[:, :], bca,
levels=[bca_min, bca_max], color='k', zorder=1)
if(axes_labels_flag is True):
ax.set_xlabel(('Latitude [$\degree$]'))
ax.set_ylabel(('Longitude [$\degree$]'))
if(title_flag is True):
ax.set_title(
('PyDDA retreived winds @' + str(grid_h[level, 0, 0]) + ' km'))
if(coastlines is True):
ax.coastlines(resolution='10m')
if(gridlines is True):
ax.gridlines()
ax.set_extent([grid_lon.min(), grid_lon.max(),
grid_lat.min(), grid_lat.max()])
num_tenths = round((grid_lon.max()-grid_lon.min())*10)+1
the_ticks_x = np.round(
np.linspace(grid_lon.min(), grid_lon.max(), num_tenths), 1)
num_tenths = round((grid_lat.max()-grid_lat.min())*10)+1
the_ticks_y = np.round(
np.linspace(grid_lat.min(), grid_lat.max(), num_tenths), 1)
ax.set_xticks(the_ticks_x)
ax.set_yticks(the_ticks_y)
return ax | 7f093435ad5488226232a6028d94e6f22b1a2688 | 22,276 |
def register(registered_collection, reg_key):
"""Register decorated function or class to collection.
Register decorated function or class into registered_collection, in a
hierarchical order. For example, when reg_key="my_model/my_exp/my_config_0"
the decorated function or class is stored under
registered_collection["my_model"]["my_exp"]["my_config_0"].
This decorator is supposed to be used together with the lookup() function in
this file.
Args:
registered_collection: a dictionary. The decorated function or class will be
put into this collection.
reg_key: The key for retrieving the registered function or class. If reg_key
is a string, it can be hierarchical like my_model/my_exp/my_config_0
Returns:
A decorator function
Raises:
KeyError: when function or class to register already exists.
"""
def decorator(fn_or_cls):
"""Put fn_or_cls in the dictionary."""
if isinstance(reg_key, str):
hierarchy = reg_key.split("/")
collection = registered_collection
for h_idx, entry_name in enumerate(hierarchy[:-1]):
if entry_name not in collection:
collection[entry_name] = {}
collection = collection[entry_name]
if not isinstance(collection, dict):
raise KeyError(
"Collection path {} at position {} already registered as "
"a function or class.".format(entry_name, h_idx))
leaf_reg_key = hierarchy[-1]
else:
collection = registered_collection
leaf_reg_key = reg_key
if leaf_reg_key in collection:
raise KeyError("Function or class {} registered multiple times.".format(
leaf_reg_key))
collection[leaf_reg_key] = fn_or_cls
return fn_or_cls
return decorator | affba6b7ee1294040633f488752623b3fa0462e4 | 22,277 |
def form_hhaa_records(df,
team_locn='h',
records='h',
feature='ftGoals'):
"""
Accept a league table of matches with a feature
"""
team_records = []
for _, team_df in df.groupby(by=team_locn):
lags = range(0, len(team_df))
records_df = pd.DataFrame({f'{team_locn}_{records}_{feature}-{n}':
team_df[team_locn + '_' + feature].shift(n)
for n in lags})
team_record = pd.concat([team_df, records_df], sort=True, axis=1)
team_records.append(team_record)
full_df = pd.concat(team_records, axis=0, sort=True).sort_index()
return full_df | af618fa0fe3c1602018ba6830c381bde73c158c3 | 22,278 |
def process_dataset(material: str, frequency: float, plot=False,
pr=False) -> float:
"""
Take a set of data, fit curve and find thermal diffustivity.
Parameters
----------
material : str
Gives material of this dataset. 'Cu' or 'Al'.
frequency : float
Frequency used, in mHz.
plot : bool
True if a plot of the curves should be shown.
plot : bool
True if the ODR output should be printed.
Returns
-------
diffustivity : float
The estimated thermal diffusivity of this material.
"""
# Check parameter validity
if material not in ['Cu', 'Al']:
raise ValueError('Invalid material name')
# Get file
filename = '{}_{}mHz.csv'.format(material, frequency)
raw = pd.read_csv(filename,
names=['Time',
'Ref',
'Source',
'S1',
'S2',
'S3',
'S4',
'S5',
'S6'])
# Set sensor position (in m) based on bar material
if material == 'Cu':
x = np.array([12, 35, 70, 150, 310, 610]) / 1000
dx = np.full(6, 0.015)
elif material == 'Al':
x = np.array([27.5, 70, 150, 310, 630]) / 1000
dx = np.array([0.25, 0.25, 0.25, 0.25, 0.5]) / 100
# Start processing data into a useful format
data = raw.to_numpy()
# delete first row of zeroes
data = np.delete(data, 0, 0)
# For every temperature measurement, associates it with time and position
# Also dumps data from the dodgy sensor
# Calculates error in Temperature based a C class Pt100
def add_independents(row):
if material == 'Cu':
t = np.full(6, row[0])
relative_temperature = row[3:] - row[1]
temp_err = (row[3:] + row[1]) * 0.01 + 1.2
elif material == 'Al':
t = np.full(5, row[0])
relative_temperature = row[4:] - row[1]
temp_err = (row[4:] + row[1]) * 0.01 + 1.2
return np.column_stack((t, x, dx, relative_temperature, temp_err))
# This produces an array for each time measurment,
# where each row is [t, x, T(x,t) ]
data = np.apply_along_axis(add_independents, 1, data)
# Extract the rows from each time measurement array into one big array
data = np.reshape(data, (-1, 5))
# Split columns into named vars for clarity
# Note how the array has been transposed
time, x, dx, Temperature, dT = data.T
# Estimate time error
dtime = np.full(len(time), 0.01)
dindep = [dx, dtime]
# Set angular frquency, given we know frequency
w = 2 * np.pi * (frequency / 1000)
# Equation to fit to
def model(params, independent):
A, B, C = params
t, x = independent
return A * np.exp(- B * x) * np.sin(w * t - (C * x))
# Fit curve
mod = odr.Model(model)
realData = odr.RealData([time, x], y=Temperature, sx=dindep, sy=dT)
myodr = odr.ODR(realData, mod, beta0=[11., 2., 9.])
output = myodr.run()
parameters = output.beta
if plot:
# Plot experimental data
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(time, x, Temperature, s=1, color='black')
# ax.scatter(time, x, Temperature, s=1, c=Temperature, cmap='plasma')
ax.set_title('{} at {}mHz'.format(material, frequency))
ax.set_xlabel('Time (s)')
ax.set_ylabel('Distance (m)')
ax.set_zlabel('Temperature (C)')
# Plot the fitted function
sampling_time = 5 * 1000 / frequency
sample_time = np.linspace(0, sampling_time, 750)
sample_x = np.linspace(0, 0.65, 750)
Time, X = np.meshgrid(sample_time, sample_x, sparse=True)
sample_Temperature = model(parameters, [Time, X])
ax.plot_surface(Time, X, sample_Temperature, cmap='plasma',
alpha=0.4)
# ax.plot_wireframe(Time, X, sample_Temperature, color='black',
# alpha=0.5)
# Include sd uncertainties with parameters
pu = uarray(parameters, output.sd_beta)
if pr:
output.pprint()
# print(pu)
# Calculate diffusitivity
return w / (2 * pu[1] * pu[2]) | 64e25b326ddf33adf568b395320e9dddcc9c637d | 22,279 |
def make_ln_func(variable):
"""Take an qs and computed the natural log of a variable"""
def safe_ln_queryset(qs):
"""Takes the natural log of a queryset's values and handles zeros"""
vals = qs.values_list(variable, flat=True)
ret = np.log(vals)
ret[ret == -np.inf] = 0
return ret
return safe_ln_queryset | 200c17c011788e53aa3f678ede22c02bad10613a | 22,282 |
def calc_all_energies(n, k, states, params):
"""Calculate all the energies for the states given. Can be used for Potts.
Parameters
----------
n : int
Number of spins.
k : int
Ising or Potts3 model.
states : ndarray
Number of distinct states.
params : ndarray
(h,J) vector
Returns
-------
E : ndarray
Energies of all given states.
"""
e = np.zeros(len(states))
s_ = np.zeros((1,n), dtype=np.int8)
if k==2:
for i in range(len(states)):
s = states[i]
e[i] -= fast_sum(params[n:], s)
e[i] -= np.sum(s*params[:n])
elif k==3:
for i in range(len(states)):
s = states[i]
for ix in range(n):
# fields
e[i] -= params[ix+s[ix]*n]
e[i] -= fast_sum_ternary(params[n*k:], s)
else: raise NotImplementedError
return e | 9de47da0f0dfa2047fdddc7796ada861d7be0f6b | 22,283 |
from heroku_connect.models import TriggerLog, TriggerLogArchive
def create_heroku_connect_schema(using=DEFAULT_DB_ALIAS):
"""
Create Heroku Connect schema.
Note:
This function is only meant to be used for local development.
In a production environment the schema will be created by
Heroku Connect.
Args:
using (str): Alias for database connection.
Returns:
bool: ``True`` if the schema was created, ``False`` if the
schema already exists.
"""
connection = connections[using]
with connection.cursor() as cursor:
cursor.execute(_SCHEMA_EXISTS_QUERY, [settings.HEROKU_CONNECT_SCHEMA])
schema_exists = cursor.fetchone()[0]
if schema_exists:
return False
cursor.execute("CREATE SCHEMA %s;", [AsIs(settings.HEROKU_CONNECT_SCHEMA)])
with connection.schema_editor() as editor:
for model in get_heroku_connect_models():
editor.create_model(model)
# Needs PostgreSQL and database superuser privileges (which is the case on Heroku):
editor.execute('CREATE EXTENSION IF NOT EXISTS "hstore";')
for cls in [TriggerLog, TriggerLogArchive]:
editor.create_model(cls)
return True | bb7eacbf4775bb08f723b69adc6a43c10ffe9287 | 22,284 |
import re
def extract_sentences(modifier, split_text):
"""
Extracts the sentences that contain the modifier references.
"""
extracted_text = []
for sentence in split_text:
if re.search(r"\b(?=\w)%s\b(?!\w)" % re.escape(modifier), sentence,
re.IGNORECASE):
extracted_text.append(sentence)
return extracted_text | 4e31a250520b765d998aa8bc88f2414fe206901c | 22,285 |
def get_1_neighbours(graph, i):
"""
This function gets all the 1-neighborhoods including i itself.
"""
nbhd_nodes = graph.get_out_neighbours(i)
nbhd_nodes = np.concatenate((nbhd_nodes,np.array([i])))
return nbhd_nodes | 4b19f6eb2cbd7044cf0da26e6770a2be85ae901d | 22,286 |
def window_slice(frame, center, window):
"""
Get the index ranges for a window with size `window` at `center`, clipped to the boundaries of `frame`
Parameters
----------
frame : ArrayLike
image frame for bound-checking
center : Tuple
(y, x) coordinate of the window
window : float,Tuple
window length, or tuple for each axis
Returns
-------
(ys, xs)
tuple of ranges for the indices for the window
"""
half_width = np.asarray(window) / 2
Ny, Nx = frame.shape[-2:]
lower = np.maximum(0, np.round(center - half_width), dtype=int, casting="unsafe")
upper = np.minimum(
(Ny - 1, Nx - 1), np.round(center + half_width), dtype=int, casting="unsafe"
)
return range(lower[0], upper[0] + 1), range(lower[1], upper[1] + 1) | 111c53b7b2ead44e462cc3c5815e9d44b4c3d024 | 22,287 |
def revnum_to_revref(rev, old_marks):
"""Convert an hg revnum to a git-fast-import rev reference (an SHA1
or a mark)"""
return old_marks.get(rev) or b':%d' % (rev+1) | 13730de4c1debe0cecdd1a14652490b9416b22f5 | 22,288 |
def onset_precision_recall_f1(ref_intervals, est_intervals,
onset_tolerance=0.05, strict=False, beta=1.0):
"""Compute the Precision, Recall and F-measure of note onsets: an estimated
onset is considered correct if it is within +-50ms of a reference onset.
Note that this metric completely ignores note offset and note pitch. This
means an estimated onset will be considered correct if it matches a
reference onset, even if the onsets come from notes with completely
different pitches (i.e. notes that would not match with
:func:`match_notes`).
Examples
--------
>>> ref_intervals, _ = mir_eval.io.load_valued_intervals(
... 'reference.txt')
>>> est_intervals, _ = mir_eval.io.load_valued_intervals(
... 'estimated.txt')
>>> (onset_precision,
... onset_recall,
... onset_f_measure) = mir_eval.transcription.onset_precision_recall_f1(
... ref_intervals, est_intervals)
Parameters
----------
ref_intervals : np.ndarray, shape=(n,2)
Array of reference notes time intervals (onset and offset times)
est_intervals : np.ndarray, shape=(m,2)
Array of estimated notes time intervals (onset and offset times)
onset_tolerance : float > 0
The tolerance for an estimated note's onset deviating from the
reference note's onset, in seconds. Default is 0.05 (50 ms).
strict : bool
If ``strict=False`` (the default), threshold checks for onset matching
are performed using ``<=`` (less than or equal). If ``strict=True``,
the threshold checks are performed using ``<`` (less than).
beta : float > 0
Weighting factor for f-measure (default value = 1.0).
Returns
-------
precision : float
The computed precision score
recall : float
The computed recall score
f_measure : float
The computed F-measure score
"""
validate_intervals(ref_intervals, est_intervals)
# When reference notes are empty, metrics are undefined, return 0's
if len(ref_intervals) == 0 or len(est_intervals) == 0:
return 0., 0., 0.
matching = match_note_onsets(ref_intervals, est_intervals,
onset_tolerance=onset_tolerance,
strict=strict)
onset_precision = float(len(matching))/len(est_intervals)
onset_recall = float(len(matching))/len(ref_intervals)
onset_f_measure = util.f_measure(onset_precision, onset_recall, beta=beta)
return onset_precision, onset_recall, onset_f_measure | aa4747925a59116246ece29e4cec55a2f91a903d | 22,289 |
def parse_acs_metadata(acs_metadata, groups):
"""Returns a map of variable ids to metadata for that variable, filtered to
specified groups.
acs_metadata: The ACS metadata as json.
groups: The list of group ids to include."""
output_vars = {}
for variable_id, metadata in acs_metadata["variables"].items():
group = metadata.get("group")
if group in groups and metadata["label"].startswith("Estimate!!Total"):
output_vars[variable_id] = metadata
return output_vars | f0bfb0172b0b2d5fec92b613b5f2e2baf6e7c8f0 | 22,290 |
def split_series_using_lytaf(timearray, data, lytaf):
"""
Proba-2 analysis code for splitting up LYRA timeseries around locations
where LARs (and other data events) are observed.
Parameters
----------
timearray : `numpy.ndarray` of times understood by `sunpy.time.parse_time`
function.
data : `numpy.array` corresponding to the given time array
lytaf : `numpy.recarray`
Events obtained from querying LYTAF database using
lyra.get_lytaf_events().
Output
------
output : `list` of dictionaries
Each dictionary contains a sub-series corresponding to an interval of
'good data'.
"""
n = len(timearray)
mask = np.ones(n)
el = len(lytaf)
# make the input time array a list of datetime objects
datetime_array = []
for tim in timearray:
datetime_array.append(parse_time(tim))
# scan through each entry retrieved from the LYTAF database
for j in range(0, el):
# want to mark all times with events as bad in the mask, i.e. = 0
start_dt = lytaf['begin_time'][j]
end_dt = lytaf['end_time'][j]
# find the start and end indices for each event
start_ind = np.searchsorted(datetime_array, start_dt)
end_ind = np.searchsorted(datetime_array, end_dt)
# append the mask to mark event as 'bad'
mask[start_ind:end_ind] = 0
diffmask = np.diff(mask)
tmp_discontinuity = np.where(diffmask != 0.)
# disc contains the indices of mask where there are discontinuities
disc = tmp_discontinuity[0]
if len(disc) == 0:
print('No events found within time series interval. '
'Returning original series.')
return [{'subtimes': datetime_array, 'subdata': data}]
# -1 in diffmask means went from good data to bad
# +1 means went from bad data to good
# want to get the data between a +1 and the next -1
# if the first discontinuity is a -1 then the start of the series was good.
if diffmask[disc[0]] == -1.0:
# make sure we can always start from disc[0] below
disc = np.insert(disc, 0, 0)
split_series = []
limit = len(disc)
# now extract the good data regions and ignore the bad ones
for h in range(0, limit, 2):
if h == limit-1:
# can't index h+1 here. Go to end of series
subtimes = datetime_array[disc[h]:-1]
subdata = data[disc[h]:-1]
subseries = {'subtimes':subtimes, 'subdata':subdata}
split_series.append(subseries)
else:
subtimes = datetime_array[disc[h]:disc[h+1]]
subdata = data[disc[h]:disc[h+1]]
subseries = {'subtimes':subtimes, 'subdata':subdata}
split_series.append(subseries)
return split_series | 2cc509ede0f2f74f999fae180acb23049a87f165 | 22,291 |
def getrqdata(request):
"""Return the request data.
Unlike the now defunct `REQUEST
<https://docs.djangoproject.com/en/1.11/ref/request-response/#django.http.HttpRequest.REQUEST>`_
attribute, this inspects the request's `method` in order to decide
what to return.
"""
if request.method in ('PUT', 'DELETE'):
return QueryDict(request.body)
# note that `body` was named `raw_post_data` before Django 1.4
# print 20130222, rqdata
# rqdata = request.REQUEST
if request.method == 'HEAD':
return request.GET
return getattr(request, request.method) | d385943c4c8c7fc7e0b5fc4b1d0f1ba0bc272a13 | 22,292 |
from typing import List
def generate_per_level_fractions(highest_level_ratio: int, num_levels: int = NUM_LEVELS) -> List[float]:
"""
Generates the per-level fractions to reach the target sum (i.e. the highest level ratio).
Args:
highest_level_ratio:
The 1:highest_level_ratio ratio for the highest level; i.e. the target sum for the geometric series.
num_levels:
The number of levels to calculate the sum over.
Returns:
A list of fractions of the population, per-level.
"""
ratio = calc_geometric_ratio(highest_level_ratio, num_levels)
per_level = [(ratio ** i) / highest_level_ratio for i in range(num_levels)]
# Change so that the highest level information is at the end
per_level.reverse()
return per_level | 6c7aee63a2b89671ae65bd28fb8616ffc72d014b | 22,293 |
def choose_transformations(name):
"""Prompts user with different data transformation options"""
transformations_prompt=[
{
'type':'confirm',
'message':'Would you like to apply some transformations to the file? (Default is no)',
'name':'confirm_transformations',
'default':False
},
{
'type':'checkbox',
'message':f'Ok {name}, let\'s select some transformation before we convert your file:',
'name':'transformations',
'choices':[
{'name':'Change Column Names'},
{'name':'Change File Name'}
],
'when': lambda answers: answers['confirm_transformations']
}
]
answers = prompt(questions=transformations_prompt)
return answers | f24c560cb23573daa57e4fece7a28b3a809ae478 | 22,294 |
from typing import Dict
from typing import Tuple
def update_list_item_command(client: Client, args: Dict) -> Tuple[str, Dict, Dict]:
"""Updates a list item. return outputs in Demisto's format
Args:
client: Client object with request
args: Usually demisto.args()
Returns:
Outputs
"""
list_id = int(args.get('list_id')) # type: ignore
item_id = int(args.get('item_id')) # type: ignore
raw_response = client.update_list_item(
list_id=list_id,
item_id=item_id,
type=args.get('type'),
value=args.get('value'),
risk=args.get('risk'),
notes=args.get('notes')
)
if raw_response:
title = f'{INTEGRATION_NAME} - List item {item_id} from list {list_id} was updated successfully'
context_entry = create_context_result(raw_response, LIST_ITEM_TRANS)
context = {
f'{INTEGRATION_CONTEXT_NAME}List(val.ID && val.ID === {list_id}).Item(val.ID === obj.ID)': context_entry
}
human_readable = tableToMarkdown(title, context_entry)
# Return data to Demisto
return human_readable, context, raw_response
else:
return f'{INTEGRATION_NAME} - Could not update list item.', {}, raw_response | 6471170d72bec7dd19d102470e2b29dec2131e17 | 22,295 |
import torch
def fft(input, inverse=False):
"""Interface with torch FFT routines for 3D signals.
fft of a 3d signal
Example
-------
x = torch.randn(128, 32, 32, 32, 2)
x_fft = fft(x)
x_ifft = fft(x, inverse=True)
Parameters
----------
x : tensor
Complex input for the FFT.
inverse : bool
True for computing the inverse FFT.
Raises
------
TypeError
In the event that x does not have a final dimension 2 i.e. not
complex.
Returns
-------
output : tensor
Result of FFT or IFFT.
"""
if not _is_complex(input):
raise TypeError('The input should be complex (e.g. last dimension is 2)')
if inverse:
return torch.ifft(input, 3)
return torch.fft(input, 3) | 8b7bdfbaeaf712ee8734c7d035f404fd154d3838 | 22,296 |
def dbdescs(data, dbname):
"""
return the entire set of information for a specific server/database
"""
# pylint: disable=bad-continuation
return {
'admin': onedesc(data, dbname, 'admin', 'rw'),
'user': onedesc(data, dbname, 'user', 'rw'),
'viewer': onedesc(data, dbname, 'viewer', 'ro')
} | 895f87300192fbad1045665eef0a08c64c6ba294 | 22,297 |
from datetime import datetime
def format_date(date):
"""Format date to readable format."""
try:
if date != 'N/A':
date = datetime.datetime.strptime(date, '%Y-%m-%d %H:%M:%S').strftime('%d %b %Y')
except ValueError:
logger.error("Unexpected ValueError while trying to format date -> {}".format(date))
pass
return date | 48d6d426925e45f0c3b92e492efa5d23e1550a2f | 22,298 |
def estimate_perfomance_plan(sims, ntra, stateinit, destination, plan=list(), plot=False, verbose=True):
"""
Estimates the performances of two plans and compares them on two scenarios.
:param list() sims: List of :class:`simulatorTLKT.Simulator`
:param int ntra: Number of trajectories used to estimate the performances on each scenarios
:param list(int,float,float) stateinit: [t_index, lat, lon], starting point of the plans
:param list(int,float,float) destination: [t_index, lat, lon], destination point of the plans
:param list plan: list of actions to apply
:param bool plot: if True displays the mean trajectories per scenario
:param bool verbose: if True verbose results
:return: mean_arrival_times, var_arrival_times, global_mean_time, variance_globale with length : len(list) = len(sims)
:rtype: list(float), list(float), float, float
"""
################### Arrival Time #############################
meantrajs = []
mean_arrival_times = []
var_arrival_times = []
all_arrival_times = []
nb_actions = len(plan)
for _, sim in enumerate(sims):
arrivaltimes = []
trajsofsim = np.zeros((ntra, len(sims[0].times), 3))
for ii in range(ntra):
traj = []
sim.reset(stateinit)
traj.append(list(sim.state))
compte_action = 0
while (compte_action < nb_actions):
action = plan[compte_action]
compte_action += 1
sim.doStep(action)
traj.append(list(sim.state))
if nb_actions == 0:
dist, action = sim.getDistAndBearing(sim.state[1:], destination)
sim.doStep(action)
traj.append(list(sim.state))
atDest, frac = Tree.is_state_at_dest(destination, sim.prevState, sim.state)
while (not atDest) \
and (not Tree.is_state_terminal(sim, sim.state)):
dist, action = sim.getDistAndBearing(sim.state[1:], destination)
sim.doStep(action)
traj.append(list(sim.state))
atDest, frac = Tree.is_state_at_dest(destination, sim.prevState, sim.state)
if atDest:
finalTime = sim.times[sim.state[0]] - \
(1 - frac) * (sim.times[sim.state[0]] - sim.times[sim.state[0] - 1])
arrivaltimes.append(finalTime)
all_arrival_times.append(finalTime)
else:
finalTime = sim.times[-1]
arrivaltimes.append(finalTime)
all_arrival_times.append(finalTime)
trajsofsim[ii, :, :] = traj[-1]
trajsofsim[ii, :, 0] = [i for i in range(len(sim.times))]
trajsofsim[ii, :len(traj), :] = traj
meantrajs.append(np.mean(trajsofsim, 0))
average_scenario = np.mean(arrivaltimes)
mean_arrival_times.append(average_scenario)
variance_scenario = 0
for value in arrivaltimes:
variance_scenario += (average_scenario - value) ** 2
variance_scenario = variance_scenario / ntra
var_arrival_times.append(variance_scenario)
global_mean_time = np.mean(all_arrival_times)
variance_globale = 0
for value in all_arrival_times:
variance_globale += (global_mean_time - value) ** 2
variance_globale = variance_globale / len(all_arrival_times)
if plot:
basemap_time = sims[0].prepareBaseMap(proj='aeqd', centerOfMap=stateinit[1:])
plt.title('Mean trajectory for minimal travel time estimation')
colors = plt.get_cmap("tab20")
colors = colors.colors[:len(sims)]
xd, yd = basemap_time(destination[1], destination[0])
xs, ys = basemap_time(stateinit[2], stateinit[1])
basemap_time.scatter(xd, yd, zorder=0, c="red", s=100)
plt.annotate("destination", (xd, yd))
basemap_time.scatter(xs, ys, zorder=0, c="green", s=100)
plt.annotate("start", (xs, ys))
for ii, sim in enumerate(sims):
sim.plotTraj(meantrajs[ii], basemap_time, color=colors[ii], label="Scen. num : " + str(ii))
plt.legend()
if verbose:
for nb in range(len(sims)):
print("temps scénario isochrones ", nb, " = ", mean_arrival_times[nb])
print("variance scénario isochrones = ", var_arrival_times[nb])
print()
print("moyenne des temps isochrones = ", global_mean_time)
print("variance globale des isochrones = ", variance_globale)
return [global_mean_time] + mean_arrival_times, [variance_globale] + var_arrival_times | 274ebadafa7f7637e27a0f25a013171a0955d4ce | 22,300 |
def xls_dslx_ir_impl(ctx, src, dep_src_list):
"""The implementation of the 'xls_dslx_ir' rule.
Converts a DSLX source file to an IR file.
Args:
ctx: The current rule's context object.
src: The source file.
dep_src_list: A list of source file dependencies.
Returns:
DslxModuleInfo provider
ConvIRInfo provider
DefaultInfo provider
"""
ir_file = _convert_to_ir(ctx, src, dep_src_list)
dslx_module_info = ctx.attr.dep[DslxModuleInfo]
return [
dslx_module_info,
ConvIRInfo(
dslx_source_file = src,
conv_ir_file = ir_file,
),
DefaultInfo(files = depset([ir_file])),
] | 119112184086ccb469157eae1b17e1a0f38b57ef | 22,301 |
def split_data(images, labels):
"""
Split data into training (80%), validation (10%), and testing (10%)
datasets
Returns (images_train, images_validate, images_test, labels_train,
labels_validate, labels_test)
Assumes that num_covid_points <= num_normal_points and num_virus_points
"""
images, labels = shuffle_data_pair(images, labels)
num_covid_points = sum(map(lambda label: label == 0, labels))
# Calculate split
num_test = int(num_covid_points * 0.1)
num_covid_train = num_covid_points - num_test * 2
num_other_train = int(num_covid_train * 1.1)
# (train, validate, test) points added
num_points_added = [
[0, 0, 0], # COVID-19
[0, 0, 0], # Viral pneumonia
[0, 0, 0] # Normal
]
# Datasets
images_train = []
labels_train = []
images_validate = []
labels_validate = []
images_test = []
labels_test = []
# Add images and labels to datasets
notifier.send(" Adding images and labels to dataset...")
for i, label in enumerate(labels):
print(f" Point: {i} / {len(labels)}")
completed_labels = [False, False, False] # Enough of label added
if all(completed_labels):
break
for j in range(3): # 0: COVID-19, 1: Viral pneumonia, 2: Normal
if completed_labels[j]:
continue
if label == j:
# Add training data
can_add_training = False
if j == 0: # COVID-19
if num_points_added[j][0] < num_covid_train:
can_add_training = True
num_points_added[j][0] += 1
elif num_points_added[j][0] < num_other_train: # Not COVID-19
can_add_training = True
num_points_added[j][0] += 1
if can_add_training:
images_train.append(images[i])
labels_train.append(labels[i])
break
# Add validation data
if num_points_added[j][1] < num_test:
num_points_added[j][1] += 1
images_validate.append(images[i])
labels_validate.append(labels[i])
break
# Add testing data
if num_points_added[j][2] < num_test:
num_points_added[j][2] += 1
images_test.append(images[i])
labels_test.append(labels[i])
break
# Point couldn't be added anywhere: label is complete
completed_labels[j] = True
break
# Shuffle all data
notifier.send(" Shuffling data...")
images_train, labels_train = shuffle_data_pair(
images_train, labels_train
)
images_validate, labels_validate = shuffle_data_pair(
images_validate, labels_validate
)
images_test, labels_test = shuffle_data_pair(
images_test, labels_test
)
if PLOT_LABELS:
# Plot data frequencies
plt.hist(labels, bins=3)
plt.title("Labels")
plt.hist(labels_train, bins=3)
plt.title("Train Labels")
plt.hist(labels_validate, bins=3)
plt.title("Validate Labels")
plt.hist(labels_test, bins=3)
plt.title("Test Labels")
plt.show()
# Make labels categorical
notifier.send(" Making labels categorical: train...")
labels_train = tf.keras.utils.to_categorical(labels_train)
notifier.send(" Making labels categorical: validate...")
labels_validate = tf.keras.utils.to_categorical(labels_validate)
notifier.send(" Making labels categorical: test...")
labels_test = tf.keras.utils.to_categorical(labels_test)
notifier.send(" Converting data to NumPy arrays...")
return \
np.array(images_train), np.array(images_validate), np.array(images_test), \
np.array(labels_train), np.array(labels_validate), np.array(labels_test) | 87950ef842781abb8500961a11d997b254bde6af | 22,302 |
import random
def randomlyInfectRegions(network, regions, age_groups, infected):
"""Randomly infect regions to initialize the random simulation
:param network: object representing the network of populations
:type network: A NetworkOfPopulation object
:param regions: The number of regions to expose.
:type regions: int
:param age_groups: Age groups to infect
:type age_groups: list
:param infected: People to infect
:type infected: int
:return: Structure of initially infected regions with number
:rtype: dict
"""
infections = {}
for regionID in random.choices(list(network.graph.nodes()), k=regions):
infections[regionID] = {}
for age in age_groups:
infections[regionID][age] = infected
return infections | 213450bfbdba56a8671943905d6ac888a548c8aa | 22,303 |
def timestamp_to_uint64(timestamp):
"""Convert timestamp to milliseconds since epoch."""
return int(timestamp.timestamp() * 1e3) | 165df202cb5f8cee5792bfa5778114ea3e98fa65 | 22,304 |
def extensible(x):
"""
Enables a function to be extended by some other function.
The function will get an attribute (extensible) which will return True.
The function will also get a function (extendedby) which will return a
list of all the functions that extend it.
"""
extensible_functions.append(x.__name__)
@wraps(x)
def wrapper(*args, **kwargs):
if x.__name__ in extensions:
for f in extensions[x.__name__]:
if not f.after:
f.func(*args, **kwargs)
result = x(*args, **kwargs)
if x.__name__ in extensions:
for f in extensions[x.__name__]:
if f.after:
f.func(*args, **kwargs)
return result
wrapper.extensible = True
def extended_by():
return extensions[x.__name__]
wrapper.extendedby = extended_by
return wrapper | a810e90e386441e8b223824c77ee452b4f7ff6d5 | 22,305 |
def _validate_user_deploy_steps(task, user_steps, error_prefix=None):
"""Validate the user-specified deploy steps.
:param task: A TaskManager object
:param user_steps: a list of deploy steps. A deploy step is a dictionary
with required keys 'interface', 'step', 'args', and 'priority'::
{ 'interface': <driver_interface>,
'step': <name_of_deploy_step>,
'args': {<arg1>: <value1>, ..., <argn>: <valuen>},
'priority': <priority_of_deploy_step> }
For example::
{ 'interface': 'bios',
'step': 'apply_configuration',
'args': { 'settings': [ { 'foo': 'bar' } ] },
'priority': 150 }
:param error_prefix: String to use as a prefix for exception messages, or
None.
:raises: InvalidParameterValue if validation of deploy steps fails.
:raises: InstanceDeployFailure if there was a problem getting the deploy
steps from the driver.
:return: validated deploy steps update with information from the driver
"""
driver_steps = _get_deployment_steps(task, enabled=False, sort=False)
return _validate_user_steps(task, user_steps, driver_steps, 'deploy',
error_prefix=error_prefix) | 58cf55b444c533ec96a86ad09b76ca9bc275f7dd | 22,306 |
from operator import gt
def is_period_arraylike(arr):
""" return if we are period arraylike / PeriodIndex """
if isinstance(arr, pd.PeriodIndex):
return True
elif isinstance(arr, (np.ndarray, gt.ABCSeries)):
return arr.dtype == object and lib.infer_dtype(arr) == 'period'
return getattr(arr, 'inferred_type', None) == 'period' | f675f56dbca7ef80dc75bbe454a4f6e11a419c50 | 22,307 |
def reset_password_step_2(token):
"""Processing the second step of changing the password (password change)"""
email = confirm_token_reset_password(token)
if not email:
return redirect(url_for('web_pages.reset_password_step_1'))
form = EditPassword()
if form.validate_on_submit():
password = form.password.data
session = create_session()
user = session.query(User).filter(User.email == email).first()
if not user:
abort(404)
user.set_password(password)
session.merge(user)
session.commit()
flash('Пароль успешно изменен', 'success')
return redirect(url_for('web_pages.login_page'))
return render_template('reset_password_step_2.html', form=form) | dcec97ba112ff96af4510488f801926190cfe221 | 22,308 |
def FStarTypeRole(typ, rawtext, text, lineno, inliner, options={}, content=[]):
"""An inline role to highlight F* types."""
#pylint: disable=dangerous-default-value, unused-argument
return nodes.literal(typ, rawtext, text, lineno, inliner, options=options, content=content) | 970ed43558e87a4319aed91c33d781fbe6a39d20 | 22,309 |
def matobj2dict(matobj):
"""A recursive function which converts nested mat object
to a nested python dictionaries
Arguments:
matobj {sio.matlab.mio5_params.mat_struct} -- nested mat object
Returns:
dict -- a nested dictionary
"""
ndict = {}
for fieldname in matobj._fieldnames:
attr = matobj.__dict__[fieldname]
if isinstance(attr, sio.matlab.mio5_params.mat_struct):
ndict[fieldname] = matobj2dict(attr)
elif isinstance(attr, np.ndarray) and fieldname == "move":
for ind, val in np.ndenumerate(attr):
ndict[
fieldname
+ str(ind).replace(",", "").replace(")", "").replace("(", "_")
] = matobj2dict(val)
elif fieldname == "skel":
tree = []
for ind in range(len(attr)):
tree.append(matobj2dict(attr[ind]))
ndict[fieldname] = tree
else:
ndict[fieldname] = attr
return ndict | 6b8413fd0c4dc9bb4e778944e7a6d4c260b56fa1 | 22,310 |
import io
def download_from_vt(client: vt.Client, file_hash: str) -> bytes:
"""
Download file from VT.
:param vt.Client client: the VT client
:param str file_hash: the file hash
:rtype: bytes
:return: the downloaded data
:raises ValueError: in case of any error
"""
try:
buffer = io.BytesIO()
client.download_file(file_hash, buffer)
buffer.seek(0, 0)
return buffer.read()
except (IOError, vt.APIError) as e:
raise ValueError(str(e)) from e | 055cd636d853d81921034d197bac9ad7a9c206c2 | 22,311 |
import torch
def divide_and_conquer(x, k, mul):
"""
Divide and conquer method for polynomial expansion
x is a 2d tensor of size (n_classes, n_roots)
The objective is to obtain the k first coefficients of the expanded
polynomial
"""
to_merge = []
while x[0].dim() > 1 and x[0].size(0) > 1:
size = x[0].size(0)
half = size // 2
if 2 * half < size:
to_merge.append([t[-1] for t in x])
x = mul([t[:half] for t in x],
[t[half: 2 * half] for t in x])
for row in to_merge:
x = mul(x, row)
x = torch.cat(x)
return x | 64bdf2d50cf7cbf7da814b93521df5cee41623fe | 22,312 |
def calculate_operating_pressure(feed_state_block=None, over_pressure=0.15,
water_recovery=0.5, NaCl_passage=0.01, solver=None):
"""
estimate operating pressure for RO unit model given the following arguments:
feed_state_block: the state block of the RO feed that has the non-pressure state
variables initialized to their values (default=None)
over_pressure: the amount of operating pressure above the brine osmotic pressure
represented as a fraction (default=0.15)
water_recovery: the mass-based fraction of inlet H2O that becomes permeate
(default=0.5)
NaCl_passage: the mass-based fraction of inlet NaCl that becomes permeate
(default=0.01)
solver: solver object to be used (default=None)
"""
t = ConcreteModel() # create temporary model
prop = feed_state_block.config.parameters
t.brine = prop.build_state_block([0], default={})
# specify state block
t.brine[0].flow_mass_phase_comp['Liq', 'H2O'].fix(
value(feed_state_block.flow_mass_phase_comp['Liq', 'H2O']) * (1 - water_recovery))
t.brine[0].flow_mass_phase_comp['Liq', 'NaCl'].fix(
value(feed_state_block.flow_mass_phase_comp['Liq', 'NaCl']) * (1 - NaCl_passage))
t.brine[0].pressure.fix(101325) # valid when osmotic pressure is independent of hydraulic pressure
t.brine[0].temperature.fix(value(feed_state_block.temperature))
# calculate osmotic pressure
# since properties are created on demand, we must touch the property to create it
t.brine[0].pressure_osm
# solve state block
results = solve_indexed_blocks(solver, [t.brine])
check_solve(results)
return value(t.brine[0].pressure_osm) * (1 + over_pressure) | 2252910515ad6b6188c06bbf3add2a36b37da1ea | 22,313 |
from bs4 import BeautifulSoup
def parse_pypi_index(text):
"""Parses the text and returns all the packages
Parameters
----------
text : str
the html of the website (https://pypi.org/simple/)
Returns
-------
List[str]
the list of packages
"""
soup = BeautifulSoup(text, "lxml")
return [i.get_text() for i in soup.find_all("a")] | 68d831aab69f3ffdd879ea1fa7ca5f28fc1b1e75 | 22,314 |
def _get_score_measure(func, alphabeta, color, board, alpha, beta, depth, pid):
"""_get_score_measure
"""
measure(pid)
return _get_score(func, alphabeta, color, board, alpha, beta, depth, pid) | e36723d03c2ee686177ea3f8ce34874b250c2058 | 22,316 |
def mousePressed():
"""
Return True if the mouse has been left-clicked since the
last time mousePressed was called, and False otherwise.
"""
global _mousePressed
if _mousePressed:
_mousePressed = False
return True
return False | 37fd34e71ee7e9c4a671a5ba5a4a946a7441c0da | 22,317 |
def variable_on_cpu(name, shape, initializer):
"""
Next we concern ourselves with graph creation.
However, before we do so we
must introduce a utility function ``variable_on_cpu()``
used to create a variable in CPU memory.
"""
# Use the /cpu:0 device for scoped operations
with tf.device('/gpu:0'):
# Create or get apropos variable
var = tf.get_variable(name=name, shape=shape, initializer=initializer)
return var | 10e724f900d7c7334e81f3380fc4764ca935b284 | 22,318 |
def adaptive_generate_association_rules(patterns, confidence_threshold):
"""
Given a set of frequent itemsets, return a dictof association rules
in the form {(left): (right)}
It has a check with 2048 thus will only retain multimodal rules.
"""
missed = 0
rules = defaultdict(set)
for setn, support in patterns.items():
if len(setn) > 1:
itemset = list(setn) # the itemset I with n element
for i in range(len(itemset)-1, -1, -1):
# the last pos is the inference item i for I->i
# every elem go to the last once, the itemset remains sorted
itemset[i], itemset[-1] = itemset[-1], itemset[i]
setn_1 = tuple(itemset[:-1])
if max(itemset[:-1]) < 2048 <= itemset[-1]:
if setn_1 in patterns:
confidence = patterns[setn] / patterns[setn_1]
if confidence >= confidence_threshold:
rules[setn_1].add(itemset[-1])
else:
missed += 1
print("missed", setn_1)
print('%d freq missed.' % missed)
return rules | 35589916f91aab789a8d31559bcdbaca37bfdcd1 | 22,319 |
from typing import Union
from typing import Collection
def scored_ngrams(
docs: Documents,
n: int = 2,
metric: str = "pmi",
tokenizer: Tokenizer = DEFAULT_TOKENIZER,
preprocessor: CallableOnStr = None,
stopwords: Union[str, Collection[str]] = None,
min_freq: int = 0,
fuse_tuples: bool = False,
sep: str = " ",
) -> Series:
"""Get Series of collocations and scores.
Parameters
----------
docs : str or iterable of str
Documents to scan for ngrams.
n : int, optional
Size of collocations, by default 2.
metric : str, optional
Scoring metric to use. Valid options include:
'raw_freq', 'pmi', 'mi_like', 'likelihood_ratio',
'jaccard', 'poisson_stirling', 'chi_sq', 'student_t'.
See nltk.BigramAssocMeasures, nltk.TrigramAssocMeasures,
and nltk.QuadgramAssocMeasures for additional size-specific
options.
tokenizer : callable, optional
Callable for tokenizing docs.
preprocessor : callable, optional
Callable for preprocessing docs before tokenization, by default None.
stopwords : str or collection of str, optional
Name of known stopwords set or collection of stopwords to remove from docs.
By default None.
min_freq : int, optional
Drop ngrams below this frequency, by default 0.
fuse_tuples : bool, optional
Join ngram tuples with `sep`, by default True.
sep : str, optional
Separator to use for joining ngram tuples, by default " ".
Only relevant if `fuze_tuples=True`.
Returns
-------
Series
Series {ngrams -> scores}.
"""
_validate_strings(docs)
# Get collocation finder and measures
if not isinstance(n, int):
raise TypeError(f"Expected `n` to be int, got {type(n)}.")
if 1 < n < 5:
n = int(n)
finder = NGRAM_FINDERS[n]
measures = NGRAM_METRICS[n]()
else:
raise ValueError(f"Valid `n` values are 2, 3, and 4. Got {n}.")
pre_pipe = []
if preprocessor is not None:
# Apply preprocessing
pre_pipe.append(preprocessor)
# Tokenize
pre_pipe.append(tokenizer)
if stopwords is not None:
# Fetch stopwords if passed str
if isinstance(stopwords, str):
stopwords = fetch_stopwords(stopwords)
# Remove stopwords
pre_pipe.append(partial(remove_stopwords, stopwords=stopwords))
docs = chain_processors(docs, pre_pipe)
# Find and score collocations
ngrams = finder.from_documents(docs)
ngrams.apply_freq_filter(min_freq)
ngram_score = ngrams.score_ngrams(getattr(measures, metric))
# Put the results in a DataFrame, squeeze into Series
kind = {2: "bigram", 3: "trigram", 4: "quadgram"}[n]
ngram_score = pd.DataFrame(ngram_score, columns=[kind, "score"])
if fuse_tuples:
# Join ngram tuples
ngram_score[kind] = ngram_score[kind].str.join(sep)
ngram_score.set_index(kind, inplace=True)
if ngram_score.shape[0] > 1:
ngram_score = ngram_score.squeeze()
return ngram_score | a77b42eb1361c55cb23a1b168e99d4abb1ef9af1 | 22,321 |
def imurl(image_url, return_as_array = False , **kwargs):
"""
Read image from url and convert to bytes or ndarray
Paramters
---------
image_url: http / https url of image
return_as_array: Convert image directly to numpy array
default: False
kwargs:
Keyword arguments of imread can be passed for image modification:
Example:
imurl(image_url,to_array=True,resize=(224,224),color_mode = 'rgb',dtype='float32')
Note: kwargs only works with return_as_array = True
Returns:
--------
PIL Image by default:
if return_as_array is True:
image will be returned as numpy array.
Additional params like resize, color_mode, dtype , return_original can also be passed inorder to refine the image
Raises:
-------
ImportError if requests library is not installed
"""
if request_image is None:
raise ImportError('requests library is required from reading image from url '
'Install it using pip install requests')
if not image_url.startswith('http'):
raise ValueError(f'invalid url found. Required http or https url but got {image_url} instead')
image_response = request_image.get(image_url)
imbytes = BytesIO(image_response.content)
if return_as_array:
return imread(imbytes,**kwargs)
image = pilimage.open(imbytes)
return image | c6c93ab7a2b97b522bca2d6673bfd843fdc8bb72 | 22,323 |
def generate_command(config, work_dir, output_analysis_id_dir, errors, warnings):
"""Build the main command line command to run.
Args:
config (GearToolkitContext.config): run-time options from config.json
work_dir (path): scratch directory where non-saved files can be put
output_analysis_id_dir (path): directory where output will be saved
errors (list of str): error messages
warnings (list of str): warning messages
Returns:
cmd (list of str): command to execute
"""
# start with the command itself:
cmd = [
BIDS_APP,
str(work_dir / "bids"),
str(output_analysis_id_dir),
ANALYSIS_LEVEL,
]
# 3 positional args: bids path, output dir, 'participant'
# This should be done here in case there are nargs='*' arguments
# These follow the BIDS Apps definition (https://github.com/BIDS-Apps)
# editme: add any positional arguments that the command needs
# get parameters to pass to the command by skipping gear config parameters
# (which start with "gear-").
command_parameters = {}
for key, val in config.items():
# these arguments are passed directly to the command as is
if key == "bids_app_args":
bids_app_args = val.split(" ")
for baa in bids_app_args:
cmd.append(baa)
elif not key.startswith("gear-"):
command_parameters[key] = val
# editme: Validate the command parameter dictionary - make sure everything is
# ready to run so errors will appear before launching the actual gear
# code. Add descriptions of problems to errors & warnings lists.
# print("command_parameters:", json.dumps(command_parameters, indent=4))
if "bad_arg" in cmd:
errors.append("A bad argument was found in the config.")
num_things = command_parameters.get("num-things")
if num_things and num_things > 41:
warnings.append(
f"The num-things config value should not be > 41. It is {command_parameters['num-things']}."
)
cmd = build_command_list(cmd, command_parameters)
# editme: fix --verbose argparse argument
for ii, cc in enumerate(cmd):
if cc.startswith("--verbose"):
# handle a 'count' argparse argument where manifest gives
# enumerated possibilities like v, vv, or vvv
# e.g. replace "--verbose=vvv' with '-vvv'
cmd[ii] = "-" + cc.split("=")[1]
elif " " in cc: # then is is a space-separated list so take out "="
# this allows argparse "nargs" to work properly
cmd[ii] = cc.replace("=", " ")
log.info("command is: %s", str(cmd))
return cmd | bb24ff62f3c4fa579eedf721708e84bf4cf3920c | 22,324 |
def ip(
context,
api_client,
api_key,
input_file,
output_file,
output_format,
verbose,
ip_address,
):
"""Query GreyNoise for all information on a given IP."""
ip_addresses = get_ip_addresses(context, input_file, ip_address)
results = [api_client.ip(ip_address=ip_address) for ip_address in ip_addresses]
return results | b4c52e1bb1abb03679b977d4b15f5e0295c1e0c2 | 22,325 |
def get_layer(neurons, neuron_loc, depth=None, return_closest: bool=False):
"""Obtain the layer of neurons corresponding to layer number or specific depth."""
layers = np.unique(neuron_loc[2, :])
if depth is not None:
if depth in layers:
pass
elif return_closest:
depth = layers[np.argmin(np.abs(layers - depth))]
else:
raise Exception('Provided depth does not correspond to layer.')
neuron_mask = neuron_loc[2, :] == depth
return neurons[:, neuron_mask] | d221d294bbe974554b0180ea9d41394294de41dc | 22,326 |
def _format_unpack_code_level(message,
signal_names,
variable_lines,
helper_kinds):
"""Format one unpack level in a signal tree.
"""
body_lines = []
muxes_lines = []
for signal_name in signal_names:
if isinstance(signal_name, dict):
mux_lines = _format_unpack_code_mux(message,
signal_name,
body_lines,
variable_lines,
helper_kinds)
if muxes_lines:
muxes_lines.append('')
muxes_lines += mux_lines
else:
_format_unpack_code_signal(message,
signal_name,
body_lines,
variable_lines,
helper_kinds)
if body_lines:
if body_lines[-1] != '':
body_lines.append('')
if muxes_lines:
muxes_lines.append('')
body_lines = body_lines + muxes_lines
if body_lines:
body_lines = [''] + body_lines
return body_lines | b88362f6fd3cb5ccaf3a3f76472f2002ac9c1518 | 22,327 |
def fileGDB_schema() -> StructType:
"""Schema for dummy FileGDB."""
return StructType(
[
StructField("id", LongType()),
StructField("category", StringType()),
StructField("geometry", BinaryType()),
]
) | 0ef7ad136d64f19e392bb8a9ff471478094193fe | 22,329 |
def set_atom_stereo_parities(sgr, atm_par_dct):
""" set atom parities
"""
atm_dct = mdict.set_by_key_by_position(atoms(sgr), atm_par_dct,
ATM_STE_PAR_POS)
return _create.from_atoms_and_bonds(atm_dct, bonds(sgr)) | 1e733291ce12e614b538054c2c05fc3892ce3206 | 22,330 |
def clean(expr):
"""
cleans up an expression string
Arguments:
expr: string, expression
"""
expr = expr.replace("^", "**")
return expr | f7c990146094c43d256fe15f9543a0ba90877ee3 | 22,331 |
def atom_stereo_keys(sgr):
""" keys to atom stereo-centers
"""
atm_ste_keys = dict_.keys_by_value(_atom_stereo_parities(sgr),
lambda x: x in [True, False])
return atm_ste_keys | c084c30f4601d18941d98c313d3a74b93153cd80 | 22,332 |
def get_node_rd(graph, k=3):
"""
Get k nodes to defend based on Recalculated Degree (RD) Removal :cite:`holme2002attack`.
:param graph: an undirected NetworkX graph
:param k: number of nodes to defend
:return: a list of nodes to defend
"""
return get_node_rd_attack(graph, k) | dbbf501353133a1cb222f6d2d4f632faa07bad1c | 22,333 |
def get_frog():
"""Returns the interface object to frog NLP. (There should only be one
instance, because it spawns a frog process that consumes a lot of RAM.)
"""
global FROG
if FROG is None:
FROG = frog.Frog(frog.FrogOptions(
tok=True, lemma=True, morph=False, daringmorph=False, mwu=True,
chunking=False, ner=False, parser=False
), "/home/rahiel/hortiradar/venv/share/frog/nld/frog.cfg")
return FROG | 5701b2856532241d797eb77d9734fd67ee838312 | 22,334 |
import requests
def fetch(url: str, **kwargs) -> Selector:
"""
Send HTTP request and parse it as a DOM selector.
Args:
url (str): The url of the site.
Returns:
Selector: allows you to select parts of HTML text using CSS or XPath expressions.
"""
kwargs.setdefault('headers', DEFAULT_HEADERS)
try:
res = requests.get(url, **kwargs)
res.encoding = kwargs.get('encoding', DEFAULT_ENCODING)
res.raise_for_status()
except requests.RequestException as e:
print(e)
else:
html = res.text
tree = Selector(text=html)
return tree | f5bbe41f3b7bc83d0092d0b2165681df096413d1 | 22,335 |
import math
def growth(x, a, b):
""" Growth model. a is the value at t=0. b is the so-called R number.
Doesnt work. FIX IT """
return np.power(a * 0.5, (x / (4 * (math.log(0.5) / math.log(b))))) | 6276fd00f270ef72f52ed7493f431dd0e3b34326 | 22,336 |
from datetime import datetime
import pytz
def __to_localdatetime(val):
"""Convert val into a local datetime for tz Europe/Amsterdam."""
try:
# "timestamp": "2019-02-03T19:20:00",
dt = datetime.strptime(val, __DATE_FORMAT)
dt = pytz.timezone(__TIMEZONE).localize(dt)
return dt
except (ValueError, TypeError):
return None | e2eea5da625a3514b6872e5604336d5dfb6f0ccb | 22,337 |
import warnings
def imgMinMaxScaler(img, scale_range):
"""
:param img: image to be rescaled
:param scale_range: (tuple) (min, max) of the desired rescaling
"""
warnings.filterwarnings("ignore")
img = img.astype("float64")
img_std = (img - np.min(img)) / (np.max(img) - np.min(img))
img_scaled = img_std * float(scale_range[1] - scale_range[0]) + float(
scale_range[0]
)
# round at closest integer and transform to integer
img_scaled = np.rint(img_scaled).astype("uint8")
return img_scaled | f55795167f6a284ea81609413edc73c1336a2a5e | 22,338 |
def xor(text, key):
"""Returns the given string XORed with given key."""
while len(key) < len(text): key += key
key = key[:len(text)]
return "".join(chr(ord(a) ^ ord(b)) for (a, b) in zip(text, key)) | 3cae903ef4751b2f39e0e5e28d448b8d079ce249 | 22,340 |
from pathlib import Path
def get_emojis_voc_counts(path):
"""
Generate a value count of words for every emoji present in the csv files
found in the child directories of "path"
Args:
path (str): parent path of the csv files
Return:
em2vocab [dict of dict]: a dict associating each word to its count is mapped for each emoji
"""
path = Path(path)
em2vocab = {}
for path in path.glob("**/[0-9]*.csv"):
df = pd.read_csv(path)
emojis = [col for col in df.columns if col in EMOJIS]
for em in emojis:
vocab = em2vocab.get(em, {})
for word, count in df[em].value_counts().iteritems():
pre_count = vocab.get(word, 0)
pre_count += count
vocab[word] = pre_count
em2vocab[em] = vocab
return em2vocab | b4525be35e191c84a9ea0d781d510f348724ff42 | 22,341 |
from unittest.mock import Mock
from unittest.mock import patch
import asyncio
async def test_camera_snapshot_connection_closed(driver):
"""Test camera snapshot when the other side closes the connection."""
loop = MagicMock()
transport = MagicMock()
transport.is_closing = Mock(return_value=True)
connections = {}
async def _async_get_snapshot(*_):
return b"fakesnap"
acc = Accessory(driver, "TestAcc")
acc.async_get_snapshot = _async_get_snapshot
driver.add_accessory(acc)
hap_proto = hap_protocol.HAPServerProtocol(loop, connections, driver)
hap_proto.connection_made(transport)
hap_proto.hap_crypto = MockHAPCrypto()
hap_proto.handler.is_encrypted = True
with patch.object(hap_proto.transport, "write") as writer:
hap_proto.data_received(
b'POST /resource HTTP/1.1\r\nHost: HASS\\032Bridge\\032BROZ\\0323BF435._hap._tcp.local\r\nContent-Length: 79\r\nContent-Type: application/hap+json\r\n\r\n{"image-height":360,"resource-type":"image","image-width":640,"aid":1411620844}' # pylint: disable=line-too-long
)
hap_proto.close()
await hap_proto.response.task
await asyncio.sleep(0)
assert writer.call_args_list == []
hap_proto.close() | 636f22f167d07699d7e591f74ae92ecde8f460c4 | 22,342 |
import numpy
from re import T
def _as_scalar(res, dtype=None):
"""Return None or a TensorVariable whose type is in T.float_scalar_types"""
if dtype is None:
dtype = config.floatX
if numpy.all(res.type.broadcastable):
while res.owner and isinstance(res.owner.op, T.DimShuffle):
res = res.owner.inputs[0]
# may still have some number of True's
if res.type.broadcastable:
rval = res.dimshuffle()
else:
rval = res
if rval.type.dtype[:3] in ('int', 'uin'):
# We check that the upcast of res and dtype won't change dtype.
# If dtype is float64, we will cast int64 to float64.
# This is valid when res is a scalar used as input to a dot22
# as the cast of the scalar can be done before or after the dot22
# and this will give the same result.
if theano.scalar.upcast(res.dtype, dtype) == dtype:
return T.cast(rval, dtype)
else:
return None
return rval | c5a8b6041a6eb160cec23f6957c9d9cc9147d4f7 | 22,343 |
import datasets
import torch
def partition_dataset():
""" Partitioning MNIST """
dataset = datasets.MNIST(
'./data',
train=True,
download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307, ), (0.3081, ))
]))
size = dist.get_world_size()
bsz = 128 / float(size)
partition_sizes = [1.0 / size for _ in range(size)]
partition = DataPartitioner(dataset, partition_sizes)
partition = partition.use(dist.get_rank())
train_set = torch.utils.data.DataLoader(
partition, batch_size=int(bsz), shuffle=True)
return train_set, bsz | 6922ae2cc80655d93eeee23d31f1224f172de1cc | 22,344 |
def add_size_to_nus(demo_graph, pop, time_left):
"""
adds either nu, or [nu0, growth_rate], where nu0 is the size at the beginning of the epoch
use time_left to set nu0 to the size at the beginning of the epoch
"""
if 'nu' in demo_graph.nodes[pop]:
return demo_graph.nodes[pop]['nu']
else:
tt = demo_graph.nodes[pop]['T'] - time_left
if 'nu0' in demo_graph.nodes[pop] and 'nuF' in demo_graph.nodes[pop]:
growth_rate = np.log(demo_graph.nodes[pop]['nuF']/demo_graph.nodes[pop]['nu0']) / demo_graph.nodes[pop]['T']
nu0 = demo_graph.nodes[pop]['nu0'] * np.exp(growth_rate * tt)
return [nu0, growth_rate]
elif 'growth_rate' in demo_graph.nodes[pop] and 'nuF' in demo_graph.nodes[pop]:
nu0_pop = demo_graph.nodes[pop]['nuF'] * np.exp(-demo_graph.nodes[pop]['growth_rate']*demo_graph.nodes[pop]['T'])
nu0 = nu0_pop * np.exp(growth_rate * tt)
return [nu0, demo_graph.nodes[pop]['growth_rate']]
elif 'growth_rate' in demo_graph.nodes[pop] and 'nu0' in demo_graph.nodes[pop]:
nu0 = demo_graph.nodes[pop]['nu0'] * np.exp(demo_graph.nodes[pop]['growth_rate'] * tt)
return [nu0, demo_graph.nodes[pop]['growth_rate']] | 6e655b157389ca8672433b26baa1f2362f5dde34 | 22,345 |
def _rand_lognormals(logs, sigma):
"""Mock-point"""
return np.random.lognormal(mean=logs, sigma=sigma, size=logs.shape) | 8fbf51e548293ff6c4dee8f385af69ecaaf34cde | 22,346 |
def add_start_end_qualifiers(statement, startVal, endVal):
"""Add start/end qualifiers to a statement if non-None, or return None.
@param statement: The statement to decorate
@type statement: WD.Statement
@param startVal: An ISO date string for the starting point
@type startVal: str, unicode, or None
@param endVal: An ISO date string for the end point
@type endVal: str, unicode, or None
@return: A statement decorated with start/end qualifiers
@rtype: WD.Statement, or None
"""
if not isinstance(statement, WD.Statement):
raise pywikibot.Error(u'Non-statement recieved: %s' % statement)
if statement.isNone():
return None
# add qualifiers
quals = []
if startVal:
quals.append(
WD.Qualifier(
P=START_P,
itis=iso_to_WbTime(startVal)))
if endVal:
quals.append(
WD.Qualifier(
P=END_P,
itis=iso_to_WbTime(endVal)))
for q in quals:
statement.addQualifier(q)
return statement | 9a87feff53aca00ce257a5d0b967621461a5d15a | 22,347 |
def _CheckFilter(text):
"""CHecks if a string could be a filter.
@rtype: bool
"""
return bool(frozenset(text) & FILTER_DETECTION_CHARS) | 0d0dfed55df78ea6f49e4f615e9f7fe5758f9bc1 | 22,348 |
def listProxyServers():
"""return a list of proxy servers as a list of lists.
E.g. [['nodename','proxyname'], ['nodename','proxyname']].
Typical usage:
for (nodename,proxyname) in listProxyServers():
callSomething(nodename,proxyname)
"""
return listServersOfType("PROXY_SERVER") | 0e2ae4a874fa0ca030a04e694c7eacefde4f45f6 | 22,349 |
def api_version(func):
"""
API版本验证装饰器
:param func:
:return:
"""
@wraps(func)
def wrapper(*args, **kwargs):
# 验证api版本
verify_result = verify_version(kwargs.get('version'))
if not verify_result:
raise ApiVersionException() #抛出异常,返回结果状态码400, message:api version is invalid
return func(*args, **kwargs)
return wrapper | 2e73bc7899a4052004246c1e3392001507469c86 | 22,350 |
from typing import List
from typing import Union
def is_prefix(a: List[Union[int, str]], b: List[Union[int, str]]):
"""Check if `a` is a prefix of `b`."""
if len(a) >= len(b):
return False
for i in range(len(a)):
if a[i] != b[i]:
return False
return True | 4b0605af536aa5fa188cfca0cee62588fe41bf5d | 22,351 |
import glob
def shm_data_find(ifo, ldr_type, start, stride, directory='.', verbose=False):
"""a routine to automate discovery of frames within /dev/shm
"""
end = start+stride
frames = []
for frame in sorted(glob.glob(shm_glob_tmp%(directory, ifo, ifo, ldr_type))):
s, d = utils.extract_start_dur(frame, suffix=".gwf")
if (s <= end) and (s+d > start): ### there is some overlap!
frames.append( (frame, s, d) )
return frames | f4aba39ba77edf5d22cdaa0da16f888c26999512 | 22,352 |
def backward_inference(protocol, subsys_x, t_x, subsys_y, t_y, silent=True):
"""
Forward inference answers the question:
Given a measurement result of 'subsys_y' at the end of the protocol,
what can I say about the result an Agent would have received had she done
a measurement of 'subsys_x' before the protocol?
running the protocol?
"""
forward_mapping = forward_inference(protocol, subsys_x, t_x, subsys_y, t_y, silent)['table']
output_vals = list(set(chain(*forward_mapping.values())))
backward_mapping = {v: [] for v in output_vals}
for inpt, possible_outputs in forward_mapping.items():
for output in possible_outputs:
backward_mapping[output] += [inpt]
return InferenceTable(subsys_y, t_y,
subsys_x, t_x,
backward_mapping) | 22e73ff5c4b90b535e9387cf71829bf88745a95d | 22,353 |
def rainfall_interception_hbv(Rainfall, PotEvaporation, Cmax, InterceptionStorage):
"""
Returns:
TF, Interception, IntEvap,InterceptionStorage
"""
Interception = pcr.min(
Rainfall, Cmax - InterceptionStorage
) #: Interception in mm/timestep
InterceptionStorage = (
InterceptionStorage + Interception
) #: Current interception storage
TF = Rainfall - Interception
IntEvap = pcr.min(
InterceptionStorage, PotEvaporation
) #: Evaporation from interception storage
InterceptionStorage = InterceptionStorage - IntEvap
return TF, Interception, IntEvap, InterceptionStorage | 0e95a1088a36d25d0d1210384a56945d0b032fda | 22,354 |
from rspn.learning.structure_learning import get_next_operation, learn_structure
def learn_mspn(
data,
ds_context,
cols="rdc",
rows="kmeans",
min_instances_slice=200,
threshold=0.3,
max_sampling_threshold_cols=10000,
max_sampling_threshold_rows=100000,
ohe=False,
leaves=None,
memory=None,
rand_gen=None,
cpus=-1
):
"""
Adapts normal learn_mspn to use custom identity leafs and use sampling for structure learning.
:param max_sampling_threshold_rows:
:param max_sampling_threshold_cols:
:param data:
:param ds_context:
:param cols:
:param rows:
:param min_instances_slice:
:param threshold:
:param ohe:
:param leaves:
:param memory:
:param rand_gen:
:param cpus:
:return:
"""
if leaves is None:
leaves = create_custom_leaf
if rand_gen is None:
rand_gen = np.random.RandomState(17)
def l_mspn(data, ds_context, cols, rows, min_instances_slice, threshold, ohe):
split_cols, split_rows = get_splitting_functions(max_sampling_threshold_rows, max_sampling_threshold_cols, cols,
rows, ohe, threshold, rand_gen, cpus)
nextop = get_next_operation(min_instances_slice)
node = learn_structure(data, ds_context, split_rows, split_cols, leaves, next_operation=nextop)
return node
if memory:
l_mspn = memory.cache(l_mspn)
spn = l_mspn(data, ds_context, cols, rows, min_instances_slice, threshold, ohe)
return spn | 6ac8117b4d448c89fe148c4c97828da4a09dc471 | 22,355 |
def generate_image_anim(img, interval=200, save_path=None):
"""
Given CT img, return an animation across axial slice
img: [D,H,W] or [D,H,W,3]
interval: interval between each slice, default 200
save_path: path to save the animation if not None, default None
return: matplotlib.animation.Animation
"""
fig = plt.figure()
ims = []
for i in range(len(img)):
im = plt.imshow(img[i], animated=True)
ims.append([im])
anim = animation.ArtistAnimation(fig, ims, interval=interval, blit=True,
repeat_delay=1000)
if save_path:
Writer = animation.writers['ffmpeg']
writer = Writer(fps=30, metadata=dict(artist='Me'), bitrate=1800)
anim.save(save_path)
return anim | 90ebd9d0e21b58f75a2eca8623ac7a9d12b4a820 | 22,357 |
def square_root(s):
""" Function to compute square roots using the Babylonian method
"""
x = s/2
while True:
temp = x
x = (1/2) * ( x + (s/x) )
if temp == x:
return x
# Como la convergencia se alcanza rápidamente, llega un momento en que el error
# es menor que la precisión de la máquina y el valor no cambia de un paso a otro. | 9af22ce073bcb8d131736efba6133a92d9d7dc74 | 22,359 |
def quisort(uslist, lo=None, hi=None):
"""Sort in-place an unsorted list or slice of a list
lo and hi correspond to the start and stop indices for the list slice"""
if hi is None:
hi = len(uslist) - 1
if lo is None:
lo = 0
def partition(uslist, lo, hi):
"""Compare and swap values over list slice"""
p = uslist[hi]
i = lo - 1
j = lo
while j < hi:
if uslist[j] <= p:
i = i + 1
uslist[i], uslist[j] = uslist[j], uslist[i]
j += 1
i += 1
uslist[i], uslist[hi] = uslist[hi], uslist[i]
return i
if lo < hi:
p = partition(uslist, lo, hi)
quisort(uslist, lo, p - 1)
quisort(uslist, p + 1, hi) | a33adbe819ec1c60149e6d9a50ab78555f6021d5 | 22,360 |
def is_generator(f):
"""Return True if a function is a generator."""
isgen = (f.__code__.co_flags & CO_GENERATOR) != 0
return isgen | 239d0854e27a16d9e99102ff9c698086119b8e35 | 22,361 |
import torch
def reward(sample_solution, use_cuda=True, name='reward'):
"""
Args:
sample_solution seq_len of [batch_size]
"""
'''
if 'TSP' in name:
batch_size = sample_solution[0].size(0)
n = len(sample_solution)
tour_len = Variable(torch.zeros([batch_size]))
if use_cuda:
tour_len = tour_len.cuda()
for i in range(n - 1):
distance = torch.norm(sample_solution[i] - sample_solution[i + 1], dim=1)
tour_len += distance
distance = torch.norm(sample_solution[n - 1] - sample_solution[0], dim=1)
tour_len += distance
reward = tour_len
'''
if 'CH' in name:
batch_size = sample_solution[0].size(0)
n = len(sample_solution)
#print "batch_size batch_size batch_size"
#print batch_size
#print "n n n"
#print n
#tour_area = Variable(torch.zeros([batch_size]))
vec_area = Variable(torch.zeros([batch_size]))
#if use_cuda:
#area = area.cuda()
for s in range(batch_size):
points = []
poly_area = 0
for t in range(n):
points.append(sample_solution[t][s].tolist())
if t >= 2:
hull = ConvexHull(points)
poly_area = max (hull.area,poly_area)
vec_area[s] = poly_area
#for i in range(n - 1):
#area = torch.norm(sample_solution[i] - sample_solution[i + 1], dim=1)
#tour_area += area
#area = torch.norm(sample_solution[n - 1] - sample_solution[0], dim=1)
#tour_area += area
#reward = tour_area
reward = vec_area
return reward | fed916437085d15b2c9c6a04486e43251c3b0422 | 22,362 |
def addRegionEntry(Id: int, parentId: int, name: str, RegionType: RegionType, alias=''):
"""
添加自定义地址信息
:param Id: 地址的ID
:param parentId: 地址的父ID, 必须存在
:param name: 地址的名称
:param RegionType: 地址类型,RegionType,
:param alias: 地址的别名, default=''
:return:
"""
geocoding = jpype.JClass('io.patamon.geocoding.Geocoding')
try:
geocoding.addRegionEntry(Id, parentId, name, RegionType, alias)
return True
except:
return False | ba6c78842f847939f1a44b859156d15738adca58 | 22,363 |
def check_movement(pagination):
"""Check for ability to navigate backward or forward between pages."""
pagination_movements = pagination.find_element_by_xpath(
'.//div[@class="search_pagination_right"]'
).find_elements_by_class_name("pagebtn")
# Check for ability to move back
try:
move_back_a = pagination_movements[0]
assert move_back_a.text == "<"
can_move_back = True
print("Can move back, ", end="")
except Exception:
can_move_back = False
print("Can not move back, ", end="")
# Check for ability to move forward
try:
move_forward_a = pagination_movements[-1]
assert move_forward_a.text == ">"
can_move_forward = True
print("Can move forward")
except Exception:
can_move_forward = False
print("Can not move forward, ", end="")
return [can_move_back, can_move_forward] | 37bb55ae4509f8bdc98d3bf52bbef4a4a1e5d600 | 22,364 |
def glint_correct_image(imarr, glintarr, nir_band=7):
"""
Apply the sunglint removal algorithm from section III of Lyzenga et al.
2006 to a multispectral image array.
Parameters
----------
imarr : numpy array (RxCxBands shape)
The multispectral image array. See `OpticalRS.RasterDS` for more info.
glintarr : numpy array
A subset of `imarr` from an optically deep location with sun glint.
nir_band : int (Default value = 7)
The default `nir_band` value of 7 selects the NIR2 band in WorldView-2
imagery. If you're working with a different type of imagery, you will
need figure out the appropriate value to use instead. This is a zero
indexed number (the first band is 0, not 1).
Returns
-------
numpy array
A de-glinted copy of `imarr`.
Notes
-----
This deglinting method may not work well on WorldView-2 imagery because the
bands are not captured exactly concurrently. See section II B of Eugenio et
al. 2015 [1]_ for more information and a different sunglint correction
algorithm that may be more appropriate.
References
----------
.. [1] Eugenio, F., Marcello, J., Martin, J., 2015. High-Resolution Maps of
Bathymetry and Benthic Habitats in Shallow-Water Environments Using
Multispectral Remote Sensing Imagery. IEEE Transactions on Geoscience
and Remote Sensing 53, 3539–3549. doi:10.1109/TGRS.2014.2377300
"""
# calculate the covariance ratios
cov_rats = cov_ratios(glintarr,nir_band)
# get the NIR mean
nirm = nir_mean(glintarr,nir_band)
# we don't want to try to apply the correction
# to the NIR band
nbands = imarr.shape[-1]
bands = range(nbands)
bands.remove(nir_band)
outarr = imarr.copy()
for i,band in enumerate(bands):
outarr[:,:,band] = imarr[:,:,band] - cov_rats[i] * ( imarr[:,:,nir_band] - nirm )
# this will leave the NIR band unchanged
return outarr | 2982883b37fa2452b12311c62f4d0c404f1718f9 | 22,365 |
def get_named_game(id):
"""Get specific game from GB API."""
query_uri = f"{GB_GAME_URL}{id}?format=json&api_key={API_KEY}"
return query_for_goty(query_uri, expect_list=False, always_return_something=False) | 4b4c7efeecace2d07b5ce7052cfa550d233a61bb | 22,366 |
from datetime import datetime
import pytz
def isoweek_datetime(year, week, timezone='UTC', naive=False):
"""
Returns a datetime matching the starting point of a specified ISO week
in the specified timezone (default UTC). Returns a naive datetime in
UTC if requested (default False).
>>> isoweek_datetime(2017, 1)
datetime.datetime(2017, 1, 2, 0, 0, tzinfo=<UTC>)
>>> isoweek_datetime(2017, 1, 'Asia/Kolkata')
datetime.datetime(2017, 1, 1, 18, 30, tzinfo=<UTC>)
>>> isoweek_datetime(2017, 1, 'Asia/Kolkata', naive=True)
datetime.datetime(2017, 1, 1, 18, 30)
>>> isoweek_datetime(2008, 1, 'Asia/Kolkata')
datetime.datetime(2007, 12, 30, 18, 30, tzinfo=<UTC>)
"""
naivedt = datetime.combine(isoweek.Week(year, week).day(0), datetime.min.time())
if isinstance(timezone, str):
tz = pytz.timezone(timezone)
else:
tz = timezone
dt = tz.localize(naivedt).astimezone(pytz.UTC)
if naive:
return dt.replace(tzinfo=None)
else:
return dt | d109d8ca0443b6454c7ab58a9482d5c52ec90799 | 22,367 |
def returned(n):
"""Generate a random walk and return True if the walker has returned to
the origin after taking `n` steps.
"""
## `takei` yield lazily so we can short-circuit and avoid computing the rest of the walk
for pos in randwalk() >> drop(1) >> takei(xrange(n-1)):
if pos == Origin:
return True
return False | 6c501a58c6d2abe9d9fa76736fabf75f3f78dbd9 | 22,368 |
def get_ego_as_agent(frame: np.ndarray) -> np.ndarray:
"""Get a valid agent with information from the AV. Ford Fusion extent is used.
:param frame: The frame from which the Ego states are extracted
:return: An agent numpy array of the Ego states
"""
ego_agent = np.zeros(1, dtype=AGENT_DTYPE)
ego_agent[0]["centroid"] = frame["ego_translation"][:2]
ego_agent[0]["yaw"] = rotation33_as_yaw(frame["ego_rotation"])
ego_agent[0]["extent"] = np.asarray((EGO_EXTENT_LENGTH, EGO_EXTENT_WIDTH, EGO_EXTENT_HEIGHT))
return ego_agent | 249ca88c8aa01c7f06c6acf2d8427ca158926603 | 22,369 |
import json
def load_users(dir="private/users"):
"""load_users will load up all of the user json files in the dir."""
files = get_files_in_dir(dir)
dict = {}
for filename in files:
user = {}
filepath = join(dir, filename)
with open(filepath) as file:
try:
user = json.load(file)
except json.JSONDecodeError:
print("Could not decode file {0}".format(filepath))
except UnicodeDecodeError:
print("Could not decode unicode in {0}".format(filepath))
id = user.get("user_id")
dict[id] = user
return dict | e9181ff8f34a6c351f874649ec328d14b4ba2784 | 22,370 |
def _scale_annots_dict(annot, new_sz, ann_im_sz):
"""Scale annotations to the new_sz, provided the original ann_im_sz.
:param annot: bounding box in dict format
:param new_sz: new size of image (after linear transforms like resize)
:param ann_im_sz: original size of image for which the bounding boxes were given.
:return:
"""
d = {}
for k, v in annot.items():
if k.startswith('x'):
v_ = new_sz[0] * v / ann_im_sz[0]
elif k.startswith('y'):
v_ = new_sz[1] * v / ann_im_sz[1]
else:
# don't destroy other keys
v_ = v
d.update({k: v_})
return d | 44a0f9bf0b1a9befbaea95fd6b6fd5d9440178a4 | 22,371 |
from typing import Any
from typing import Tuple
from typing import List
import inspect
def get_handlers_in_instance(inst: Any) -> Tuple[List[Handler], List[Handler]]:
"""Get all handlers from the members of an instance.
Args:
inst: Instance to get handlers from.
Returns:
2-tuple containing the list of all registration and all subscription
handlers.
Raises:
TypeError: If inst isn't an instance.
"""
if inspect.isclass(inst):
raise TypeError("expected instance, not class. "
"Please create an instance of your template class first")
registrations = []
subscriptions = []
for _, value in inspect.getmembers(inst, callable):
if inspect.ismethod(value):
reg, sub = get_bound_handlers(value)
else:
reg, sub = get_handlers(value)
if reg is not None:
registrations.append(reg)
if sub is not None:
subscriptions.append(sub)
return registrations, subscriptions | c4f268d06fba208ce2a40bac3700b2c43d394051 | 22,372 |
def django_op_to_flag(op):
"""
Converts a django admin operation string to the matching
grainy permission flag
Arguments:
- op <str>
Returns:
- int
"""
return DJANGO_OP_TO_FLAG.get(op, 0) | 6d221271d69db3ed923395b920ee7aba30b50bab | 22,373 |
def rgb2gray(images):
"""将RGB图像转为灰度图"""
# Y' = 0.299 R + 0.587 G + 0.114 B
# https://en.wikipedia.org/wiki/Grayscale#Converting_color_to_grayscale
return np.dot(images[..., :3], [0.299, 0.587, 0.114]) | f011345d43f49e1b7d625a4d379a72ec684cab00 | 22,374 |
def mIou(y_true, y_pred, n_classes):
"""
Mean Intersect over Union metric.
Computes the one versus all IoU for each class and returns the average.
Classes that do not appear in the provided set are not counted in the average.
Args:
y_true (1D-array): True labels
y_pred (1D-array): Predicted labels
n_classes (int): Total number of classes
Returns:
mean Iou (float)
"""
iou = 0
n_observed = n_classes
for i in range(n_classes):
y_t = (np.array(y_true) == i).astype(int)
y_p = (np.array(y_pred) == i).astype(int)
inter = np.sum(y_t * y_p)
union = np.sum((y_t + y_p > 0).astype(int))
if union == 0:
n_observed -= 1
else:
iou += inter / union
return iou / n_observed | aebb9a367f45172b999ddda8eb024371f3e0df3d | 22,376 |
import operator
def drop_last(iterable, n=1):
"""Drops the last item of iterable"""
t1, t2 = tee(iterable)
return map(operator.itemgetter(0), zip(t1, islice(t2, n, None))) | edef599cc1697cd4d8f1e1df2d479e123945aa41 | 22,377 |
def density(height: float) -> float:
"""
Returns the air density in slug/ft^3 based on altitude
Equations from https://www.grc.nasa.gov/www/k-12/rocket/atmos.html
:param height: Altitude in feet
:return: Density in slugs/ft^3
"""
if height < 36152.0:
temp = 59 - 0.00356 * height
p = 2116 * ((temp + 459.7)/518.6)**5.256
elif 36152 <= height < 82345:
temp = -70
p = 473.1*np.exp(1.73 - 0.000048*height)
else:
temp = -205.05 + 0.00164 * height
p = 51.97*((temp + 459.7)/389.98)**-11.388
rho = p/(1718*(temp+459.7))
return rho | ec85f9384035808084a024eb5a374ecfe7a64a2f | 22,378 |
def has_paired_before() -> bool:
"""Simple check for whether a device has previously been paired.
This does not verify that the pairing information is valid or up to date.
The assumption being - if it's previously paired, then it has previously
connected to the internet.
"""
identity = IdentityManager.get()
return identity.uuid != "" | f43ddf1290fcb101f0a0ae3d0fb6eabc368113c2 | 22,380 |
def caller_linkedin(user_input: dict) -> dict:
"""
Call LinkedIn scraping methods to get info about found and potential subjects.
Args:
`user_input`: user input represented as a dictionary.
Returns:
`dict`: the dictionary with information about found or potential subjects.
"""
results_to_filter = {}
linkedin_obj = LinkedinSearchSubjects(user_input)
linkedin_obj.linkedin_search()
linkedin_obj.linkedin_find_ids()
linkedin_obj.linkedin_search_for_info()
if linkedin_obj.found_subjects_info:
results_to_filter["linkedin"] = {"found_subjects": linkedin_obj.found_subjects_info}
else:
results_to_filter["linkedin"] = {
"potential_subjects_after_filtering":
linkedin_obj.potential_subjects_info_after_filtering
}
return results_to_filter | abb6277e699efa184949faf2b5c6585734be2f53 | 22,381 |
def service_request_eqf(stub_response):
"""
Return a function to be used as the value matching a ServiceRequest in
:class:`EQFDispatcher`.
"""
def resolve_service_request(service_request_intent):
eff = concretize_service_request(
authenticator=object(),
log=object(),
service_configs=make_service_configs(),
throttler=lambda stype, method, tid: None,
tenant_id='000000',
service_request=service_request_intent)
# "authenticate"
eff = resolve_authenticate(eff)
# make request
return resolve_effect(eff, stub_response)
return resolve_service_request | f2a052f975ad8c94a58de50c1eb8aaa563522ca1 | 22,382 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.