content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def invert_color(color: str, *, black_or_white: bool = False) -> str:
"""Return a color with opposite red, green and blue values.
Example: ``invert_color('white')`` is ``'#000000'`` (black).
This function uses tkinter for converting the color to RGB. That's
why a tkinter root window must have been created, but *color* can be
any Tk-compatible color string, like a color name or a ``'#rrggbb'``
string. The return value is always a ``'#rrggbb`` string (also compatible
with Tk).
If ``black_or_white=True`` is set, then the result is always ``"#000000"``
(black) or ``"#ffffff"`` (white), depending on whether the color is bright
or dark.
"""
if black_or_white:
return "#000000" if is_bright(color) else "#ffffff"
widget = porcupine.get_main_window() # any widget would do
# tkinter uses 16-bit colors, convert them to 8-bit
r, g, b = (value >> 8 for value in widget.winfo_rgb(color))
return "#%02x%02x%02x" % (0xFF - r, 0xFF - g, 0xFF - b) | cf6a84957489cba046aebc01457bfd6453bc90b6 | 2,070 |
def pcaImageCube(ref, mask = None, pcNum = None, cube=True, ref3D=True, outputEval = False):
"""Principal Component Analysis,
Input:
ref: Cube of references, 3D;
if ref3D==False, 2D (Flattened and Normalized, with maksked region excluded.)
mask: mask, 2D or 1D;
pcNum: how many principal components are needed;
cube: output as a cube? Otherwise a flattend 2D component array will be returned.
ref3D: Ture by default.
outputEval: whether to return the eigen values, False by default.
Output:
The principal components, either cube (3D) or flattend (2D)."""
if mask is None:
mask = np.ones(ref[0].shape)
if pcNum is None:
pcNum = ref.shape[0]
if ref3D:
mask_flat = mask.flatten()
ref_flat = np.zeros((ref.shape[0], np.where(mask_flat == 1)[0].shape[0]))
for i in range(ref_flat.shape[0]):
ref_flat[i], std = flattenAndNormalize(ref[i], mask)
else:
ref_flat = ref
if np.shape(mask.shape)[0] == 1: #1D mask, already flattened
mask_flat = mask
elif np.shape(mask.shape)[0] == 2: #2D mask, need flatten
mask_flat = mask.flatten()
covMatrix = np.dot(ref_flat, np.transpose(ref_flat))
eVal, eVec = np.linalg.eig(covMatrix)
index = (-eVal).argsort()[:pcNum]
eVec = eVec[:,index]
components_flatten = np.dot(np.transpose(eVec), ref_flat)
pc_flat = np.zeros((pcNum, mask_flat.shape[0]))
for i in range(pc_flat.shape[0]):
pc_flat[i][np.where(mask_flat==1)] = components_flatten[i]/np.sqrt(np.dot(components_flatten[i], np.transpose(components_flatten[i])))
if cube == False:
return pc_flat
pc_cube = np.zeros((pcNum, mask.shape[0], mask.shape[1]))
width = mask.shape[0]
for i in range(pc_flat.shape[0]):
pc_cube[i] = np.array(np.split(pc_flat[i], width))
if not outputEval:
return pc_cube
else:
return pc_cube, eVal[index] | 96a05ef8fd6a618af91903b9c0fc9fc49cfd8130 | 2,071 |
def get_cross_kerr_table(epr, swp_variable, numeric):
"""
Function to re-organize the cross-Kerr results once the quantum analysis is finished
Parameters:
-------------------
epr : Object of QuantumAnalysis class
swp_variable : the variable swept in data according to which things will be sorted
numeric : Whether numerical diagonalization of the data was performed
Use notes:
-------------------
* It is assumed the epr.analyze_all_variations has already been called and analysis is finished.
"""
if numeric:
f1 = epr.results.get_frequencies_ND(vs=swp_variable)
chis = epr.get_chis(numeric=numeric,swp_variable=swp_variable)
else:
f1 = epr.results.get_frequencies_O1(vs=swp_variable)
chis = epr.get_chis(numeric=numeric,swp_variable=swp_variable)
#print(f1)
#print(chis)
swp_indices = chis.index.levels[0]
mode_indices = chis.index.levels[1]
#print(mode_indices)
mode_combinations = list(zip(mode_indices,mode_indices))
diff_mode_combinations = list(it.combinations_with_replacement(mode_indices,2))
mode_combinations.extend(diff_mode_combinations)
organized_data = pd.DataFrame({swp_variable:swp_indices})
organized_data.set_index(swp_variable,inplace=True)
for mode_indx in mode_indices:
organized_data['f_'+str(mode_indx)+'(GHz)']=np.round(f1.loc[mode_indx].values/1000,3)
for combo_indx in mode_combinations:
temp_chi_list = [chis.loc[swp_indx].loc[combo_indx] for swp_indx in swp_indices]
organized_data['chi_'+str(combo_indx[0])+str(combo_indx[1])+' (MHz)']=np.round(temp_chi_list,4)
return organized_data | 8dfa860f73c5453ee970f204d4e03d6cef93d010 | 2,072 |
def getSpectra(dataframe, indices):
""" Returns the files for training and testing
Inputs
-----------
dataframe: pd.DataFrame object from which we need to get spectra
indices: row values for which we need the spectra
Returns
-----------
spec_vals: pd.DataFrame object containing spectra values for given
indices
"""
colList = dataframe.columns
spec_inds = [index for index in range(len(colList))
if colList[index].startswith('Spectrum_')]
spec_cols = colList[spec_inds]
spec_vals = dataframe[spec_cols].iloc[indices]
return spec_vals | 606757ffdde39c0847dd0402342441931d66a081 | 2,073 |
def config2():
"""Configure for one of the restart tests."""
return Config.load(f"""
id: cbc_binary_toolkit
version: 0.0.1
database:
_provider: tests.component.persistor_fixtures.mock_persistor.MockPersistorFactory
engine:
_provider: tests.component.engine_fixtures.mock_engine.MockLocalEngineFactory
name: {ENGINE_NAME}
feed_id: {FEED_ID}
type: local
Test: TestPassed
""") | ded0b43392e7e0308cca0f773d2ed687fd0818de | 2,074 |
def diff_cases(couch_cases, log_cases=False):
"""Diff cases and return diff data
:param couch_cases: dict `{<case_id>: <case_json>, ...}`
:returns: `DiffData`
"""
assert isinstance(couch_cases, dict), repr(couch_cases)[:100]
assert "_diff_state" in globals()
data = DiffData()
dd_count = partial(metrics_counter, tags={"domain": get_domain()})
case_ids = list(couch_cases)
sql_case_ids = set()
for sql_case in CaseAccessorSQL.get_cases(case_ids):
case_id = sql_case.case_id
sql_case_ids.add(case_id)
couch_case, diffs, changes = diff_case(sql_case, couch_cases[case_id], dd_count)
if diffs:
dd_count("commcare.couchsqlmigration.case.has_diff")
if changes:
dd_count("commcare.couchsqlmigration.case.did_change")
data.doc_ids.append(case_id)
data.diffs.append((couch_case['doc_type'], case_id, diffs))
data.changes.append((couch_case['doc_type'], case_id, changes))
if log_cases:
log.info("case %s -> %s diffs", case_id, len(diffs))
diffs, changes = diff_ledgers(case_ids, dd_count)
data.diffs.extend(diffs)
data.changes.extend(changes)
add_missing_docs(data, couch_cases, sql_case_ids, dd_count)
return data | 545b35b7e37174f93df9e566bc0e1cd777948563 | 2,076 |
def rk4(a, b, x0, y0, nu=0, F=0, xdot = x_dot, ydot = y_dot):
"""rk(a, b, x0, y0, nu=0, F=0, xdot = x_dot, ydot = y_dot)
Args:
a (float) : Lower bound, t = a*2*pi
b (float) : Upper bound, t = b*2*pi
x0 (float) : Initial position of ball
y0 (float) : Initial velocity of ball
nu (float) : Constant damping coefficient
F (float) : Constant force amplitude coefficient
xdot (function) : Part of the differential equation
ydot (function) : Part of the differential equation
Returns:
t (array) : Array over the time interval with equal dt = .001
x (array) : Array containing the position of the ball at each time in the time array
y (array) : Array containing the velocity of the ball at each time in the time array
"""
dt = 0.001
start = 2*a*np.pi
end = 2*b*np.pi
n = int(np.ceil((end-start)/dt))
t = np.linspace(start,end,n)
x = np.zeros(n)
y = np.zeros(n)
x_dot_vec = np.zeros(n)
y_dot_vec = np.zeros(n)
x[0] = x0
y[0] = y0
for k in range(n):
x_dot_vec[k] = x_dot(y[k])
y_dot_vec[k] = ydot(t[k],y[k],x[k],nu,F)
if k == n-1:
break
else:
k1y = dt*ydot(t[k],y[k],x[k],nu,F)
k2y = dt*ydot((t[k]+dt/2),(y[k]+k1y/2),x[k],nu,F)
k3y = dt*ydot((t[k]+dt/2),(y[k]+k2y/2),x[k],nu,F)
k4y = dt*ydot((t[k]+dt),(y[k]+k3y),x[k],nu,F)
rky = (k1y+(2*k2y)+(2*k3y)+k4y)/6
y[k+1] = y[k]+rky
k1x = dt*xdot(y[k])
k2x = dt*xdot(y[k]+k1x/2)
k3x = dt*xdot(y[k]+k2x/2)
k4x = dt*xdot(y[k]+k3x)
rkx = (k1x+(2*k2x)+(2*k3x)+k4x)/6
x[k+1] = x[k]+rkx
return (t,x,y) | acd97edb74bc27d03908962e52431bc3fdb7a571 | 2,077 |
from furious.async import Async
def decode_callbacks(encoded_callbacks):
"""Decode the callbacks to an executable form."""
callbacks = {}
for event, callback in encoded_callbacks.iteritems():
if isinstance(callback, dict):
async_type = Async
if '_type' in callback:
async_type = path_to_reference(callback['_type'])
callback = async_type.from_dict(callback)
else:
callback = path_to_reference(callback)
callbacks[event] = callback
return callbacks | 0ff066a21bb2f0c0e0979898d218add1e46da544 | 2,078 |
def create_conv_block(
use_depthwise, kernel_size, padding, stride, layer_name, conv_hyperparams,
is_training, freeze_batchnorm, depth):
"""Create Keras layers for depthwise & non-depthwise convolutions.
Args:
use_depthwise: Whether to use depthwise separable conv instead of regular
conv.
kernel_size: A list of length 2: [kernel_height, kernel_width] of the
filters. Can be an int if both values are the same.
padding: One of 'VALID' or 'SAME'.
stride: A list of length 2: [stride_height, stride_width], specifying the
convolution stride. Can be an int if both strides are the same.
layer_name: String. The name of the layer.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
is_training: Indicates whether the feature generator is in training mode.
freeze_batchnorm: Bool. Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
depth: Depth of output feature maps.
Returns:
A list of conv layers.
"""
layers = []
if use_depthwise:
kwargs = conv_hyperparams.params()
# Both the regularizer and initializer apply to the depthwise layer,
# so we remap the kernel_* to depthwise_* here.
kwargs['depthwise_regularizer'] = kwargs['kernel_regularizer']
kwargs['depthwise_initializer'] = kwargs['kernel_initializer']
layers.append(
tf.keras.layers.SeparableConv2D(
depth, [kernel_size, kernel_size],
depth_multiplier=1,
padding=padding,
strides=stride,
name=layer_name + '_depthwise_conv',
**kwargs))
else:
layers.append(tf.keras.layers.Conv2D(
depth,
[kernel_size, kernel_size],
padding=padding,
strides=stride,
name=layer_name + '_conv',
**conv_hyperparams.params()))
layers.append(
conv_hyperparams.build_batch_norm(
training=(is_training and not freeze_batchnorm),
name=layer_name + '_batchnorm'))
layers.append(
conv_hyperparams.build_activation_layer(
name=layer_name))
return layers | 08c45a1ca62ff290d5e34e1cb544618dababaad1 | 2,079 |
def select_eps_for_division(dtype):
"""Selects default values for epsilon to make divisions safe based on dtype.
This function returns an epsilon slightly greater than the smallest positive
floating number that is representable for the given dtype. This is mainly used
to prevent division by zero, which produces Inf values. However, if the
nominator is orders of magnitude greater than `1.0`, eps should also be
increased accordingly. Only floating types are supported.
Args:
dtype: The `tf.DType` of the tensor to which eps will be added.
Raises:
ValueError: If `dtype` is not a floating type.
Returns:
A `float` to be used to make operations safe.
"""
return 10.0 * np.finfo(dtype.as_numpy_dtype).tiny | 7204b2b694c6df98af4608562616655b3c198178 | 2,080 |
def bpm_to_mspt(bpm, res=480):
"""
Coverts an integer value of beats per minute to miliseconds per quarter note
"""
return 60000 / res / bpm | 6b962b8253eac29f52c48ca89a6dce0417adb11b | 2,082 |
import numpy as np
def pseudorandom(n, p, key):
""" Pseudorandom array of integer indexes
>>> pseudorandom(5, [0.5, 0.5], key=123)
array([1, 0, 0, 1, 1], dtype=int8)
>>> pseudorandom(10, [0.5, 0.2, 0.2, 0.1], key=5)
array([0, 2, 0, 3, 0, 1, 2, 1, 0, 0], dtype=int8)
"""
p = list(p)
cp = np.cumsum([0] + p)
assert np.allclose(1, cp[-1])
assert len(p) < 256
x = np.random.RandomState(key).random_sample(n)
out = np.empty(n, dtype='i1')
for i, (low, high) in enumerate(zip(cp[:-1], cp[1:])):
out[(x >= low) & (x < high)] = i
return out | 5ec3dc8e66451a00d1f13f1df1df680879a16bc6 | 2,084 |
def next_hidden(s, A):
"""From a given state s, use the transition matrix A to generate the next
hidden state.
"""
return choose_idx(A[s]) | cc0b106ebeaa98ac2aeba947bd9ed0f653d233b5 | 2,085 |
import torch
def create_network_rcnn(cls, opt):
"""Separate function for rcnn, which always loads weights first, no init."""
net = cls(opt)
net.print_network()
util.load_network_path(net, opt.fastercnn_loc, strict=True, rcnn_load=True)
if len(opt.gpu_ids) > 0:
assert(torch.cuda.is_available())
net.cuda()
return net | d653aa9435435ace4f10b134d28ee474353805bb | 2,086 |
import tkinter
def get_board_frame(window, mqtt_sender):
"""Builds the chessboard GUI."""
frame = ttk.Frame(window, padding=10, borderwidth=5, relief="ridge")
frame.grid()
frame_label = ttk.Label(frame, text="Board")
get_state = ttk.Button(frame, text="Get state")
get_state["command"] = lambda: handle_get_state(mqtt_sender)
mqtt_sender.state = [[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
box = [[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]]
frame_label.grid()
get_state.grid()
hint = {"0": "A", "1": "B", "2": "C", "3": "D", "4": "E", "5": "F", "6": "G", "7": "H"}
for k in range(8):
note = ttk.Label(frame, text=str(hint[str(k)]))
note.grid(row=0, column=k + 2)
for j in range(2):
note = ttk.Label(frame, text=str(j + 1))
note.grid(row=j + 1, column=1)
for k in range(8):
mqtt_sender.state[j][k] = tkinter.IntVar(value=1)
box[j][k] = ttk.Checkbutton(frame, variable=mqtt_sender.state[j][k])
box[j][k].grid(row=j + 1, column=k + 2)
note = ttk.Label(frame, text=str(j + 1))
note.grid(row=j + 1, column=10)
for j in range(2, 6):
note = ttk.Label(frame, text=str(j + 1))
note.grid(row=j + 1, column=1)
for k in range(8):
mqtt_sender.state[j][k] = tkinter.IntVar()
box[j][k] = ttk.Checkbutton(frame, variable=mqtt_sender.state[j][k])
box[j][k].grid(row=j + 1, column=k + 2)
note = ttk.Label(frame, text=str(j + 1))
note.grid(row=j + 1, column=10)
for j in range(6, 8):
note = ttk.Label(frame, text=str(j + 1))
note.grid(row=j + 1, column=1)
for k in range(8):
mqtt_sender.state[j][k] = tkinter.IntVar(value=1)
box[j][k] = ttk.Checkbutton(frame, variable=mqtt_sender.state[j][k])
box[j][k].grid(row=j + 1, column=k + 2)
note = ttk.Label(frame, text=str(j + 1))
note.grid(row=j + 1, column=10)
for k in range(8):
note = ttk.Label(frame, text=str(hint[str(k)]))
note.grid(row=10, column=k + 2)
return frame | d6f5a13312989613f8b945c8e73cedb9ee7e3851 | 2,087 |
import html
def body():
"""Get map page body.
Returns:
html.Div: dash layout
"""
graph_map = get_graph_map()
if graph_map is None:
return html.Div(
dbc.Alert("Cannot retrieve data! Try again later!", color="danger")
)
# Put everything in a dcc container and return
body = dbc.Container(
[
dbc.Row(
dbc.Col(
dbc.Card(
dbc.CardBody(
[
html.P(
"A graph of the UK rail network generated from \
individual train movements captured from the Network Rail feeds and a subset of known fixed locations. \
Each node represents a train describer 'berth' which usually, but not always, represents a signal.\
Red nodes indicate the live locations of trains on the network, \
whilst the node size indicates the frequency of usage. Hovering over each node provides additional information.\
The graph is updated every 5 seconds. \
Only the west coast mainline central signal area (around Manchester) is considered for now."
),
]
),
color="secondary",
),
width={"size": 10, "offset": 1},
)
),
dbc.Row(dbc.Col(dcc.Graph(id="graph-map", figure=graph_map))),
dcc.Interval(
id="graph-page-interval",
interval=1 * 5000,
n_intervals=0, # in milliseconds
),
],
fluid=True,
)
return body | 6474602d65f71dadce26e043c62f35ec0c489a0f | 2,089 |
from datetime import datetime
def custom_strftime(formatting: str, date: datetime.datetime) -> str:
"""Custom strftime formatting function, using fancy number suffixes (1st, 2nd, 3rd...)"""
return date.strftime(formatting).replace("{S}", str(date.day) + suffix(date.day)) | 3199f6e0590f4bb01c1792976c75c7a0d4208831 | 2,090 |
def setup_twitter(config_file='config.py'):
"""Setup auth keys and session with Twitter client."""
config = {}
execfile(config_file, config)
twitter_obj = Twitter(auth=OAuth(config["access_key"],
config["access_secret"],
config["consumer_key"],
config["consumer_secret"]))
return twitter_obj | bb811f3b6cabbe5dbf8f77d8e5217078f9a57c22 | 2,092 |
from datetime import datetime
def create_datediff_test_nulls_df():
"""Create DataFrame with nulls only for DateDifferenceTransformer tests."""
df = pd.DataFrame(
{
"a": [
datetime.datetime(1993, 9, 27, 11, 58, 58),
np.NaN,
],
"b": [
np.NaN,
datetime.datetime(2019, 12, 25, 11, 58, 58),
],
},
index=[0, 1],
)
return df | 542fd3fdf6fcd93a208e3f1f9cd2a76a0c34e46b | 2,093 |
def business_days_list(start_date: date, end_date: date) -> list[date]:
""" business days """
us_holidays = holidays.UnitedStates()
days: list[date] = []
for the_date in get_list_of_days(start_date, end_date):
if (the_date.weekday() < 5) and (the_date not in us_holidays):
days.append(the_date)
return days | daa36fe5fda5fc0857c1b29c75d7e784cafefe93 | 2,094 |
def test_3d():
"""Test FE in 3D"""
def setone(arr):
arr[0, :, (arr.shape[0] - 1) // 2] = 1.0
return arr
assert pipe(
5,
lambda x: np.zeros((1, x, x, x), dtype=int),
setone,
solve_fe(elastic_modulus=(1.0, 10.0), poissons_ratio=(0.0, 0.0)),
lambda x: np.allclose(
[np.mean(x["strain"][0, ..., i]) for i in range(6)],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
),
) | 32f3d5fc18a31f01b2e366a3540ec77dd0e6080f | 2,095 |
from typing import List
def get_xray_edges(elements: List[str], wmin: float, wmax: float):
"""
Using xraydb, return the absorbtion edges
Parameters
----------
elements: List[str]
A list of the element symbols from which to query absorption edges.
wmin: float
The smallest wavelength edge to return
wmax: float
The largest wavelength edge to return
Returns
-------
output_table: List[str]
A table containing absorption edges.
- Elem: the element
- Energy: the photoionisation energy
- Frequency: the frequency of the absorption edge
- Wavelength: the wavelength of the absorption edge
"""
element_absortion_edges_dicts = []
for element in elements:
edges = xraydb.xray_edges(element)
element_absortion_edges_dicts.append(edges)
output_table = []
output_table.append("Elem {:15s} {:15s} {:15s}\n".format("Energy eV", "Frequency Hz", "Wavelength AA"))
for i, edges in enumerate(element_absortion_edges_dicts):
print("-" * COL_LEN)
print("{}: \n".format(elements[i]))
print("{:15s} {:15s} {:15s}".format("Energy eV", "Frequency Hz", "Wavelength AA"))
keys = edges.keys()
prev_key = "K"
for key in keys:
# This bit will skip edges which have the same energy, I hope
if prev_key != key:
if edges[prev_key][0] == edges[key][0]:
continue
prev_key = key
energy = edges[key][0]
frequency = energy / HEV
wavelength = C / frequency / ANGSTROM
print("{:9.1f} {:1.12e} {:13.1f}".format(energy, frequency, wavelength))
if wmin < wavelength < wmax:
output_table_line = "{:4s} {:9.1f} {:1.12e} {:13.1f}\n".format(
elements[i], energy, frequency, wavelength
)
output_table.append(output_table_line)
print()
print("-" * COL_LEN)
with open("xray_edges.txt", "w") as f:
f.writelines(output_table)
return output_table | b78c0b999f4faf9e749b3b8388f0a581a5bff476 | 2,096 |
import urllib
import json
def get_mobility_link():
"""Get Apple Mobility data link
"""
# get link
with urllib.request.urlopen(index_url) as url:
json_link = json.loads(url.read().decode())
base_path = json_link['basePath']
csv_path = json_link['regions']['en-us']['csvPath']
link = site_url + \
base_path + csv_path
return link | a097e9c0b787a522283d31a8a13d4d13b824b77b | 2,097 |
from datetime import datetime
def active_shift(app, token, gqlClient):
"""returns the currently active shift if it exists"""
with app.test_request_context():
request.headers = {'authorization': token}
query = '''mutation CreateShift($Active: Boolean!, $StartTime: String) {
createShift(active: $Active, startTime: $StartTime) {
shift { id startTime active }
}
}
'''
vars = {
'StartTime': (datetime.now() - timedelta(hours=5)).strftime('%Y-%m-%d %H:%M:%S'),
'Active': True
}
res = gqlClient.execute(query, context_value=request, variables=vars)
print("query result:", res)
assert res['data']['createShift']['shift']['active']
shift = res['data']['createShift']['shift']
return shift | 345ba7f30421e28b879bc5b14409c437b9038d89 | 2,098 |
def get_batch_size(input):
"""
Infer the mini-batch size according to `input`.
Args:
input (tf.Tensor): The input placeholder.
Returns:
int or tf.Tensor: The batch size.
"""
if input.get_shape() is None:
batch_size = tf.shape(input)[0]
else:
batch_size = int_shape(input)[0]
if batch_size is None:
batch_size = tf.shape(input)[0]
return batch_size | 66201a3a8223ad442f54ac9551060093ee828f9b | 2,099 |
def _order_points(pts: np.ndarray) -> np.ndarray:
"""Extract top left. top right, bottom left, bottom right of region
Args:
pts (np.ndarray[Tuple]): The coordinate of points
Returns:
np.ndarray: The coordinate of points.
"""
x_sorted = pts[np.argsort(pts[:, 0]), :]
left_most = x_sorted[:2, :]
right_most = x_sorted[2:, :]
left_most = left_most[np.argsort(left_most[:, 1]), :]
(tl, bl) = left_most
distance = dist.cdist(tl[np.newaxis], right_most, "euclidean")[0]
(br, tr) = right_most[np.argsort(distance)[::-1], :]
return np.array([tl, tr, br, bl], dtype="float32") | 46dfb8a8e042929b2475bda2b01b39e5d871e02d | 2,100 |
import logging
import math
def to_image(obj):
""" allgemeine funktion zum anschauen von allen objekttypen (work in progress)
gibt image (numpy arry),description zurück
description sagt, was alles gemacht wurde um bild darzustellen
"""
descr = ""
if (tf.is_tensor(obj)):
obj = obj.numpy()
logger = logging.getLogger()
old_level = logger.level
logger.setLevel(100)
if obj.shape:
#print(f"Max {max(obj)}")
if len(obj.shape) == 2: # grayscale image
obj = norm(obj)
descr += f"Grayscale Image, mean:{obj.mean()}, var:{obj.var()} \n"
if (obj.var() < 0.01):
descr += f"Mean abgzogen {obj.mean()} \n"
obj = obj - obj.mean()
if (obj.mean() < 0.01):
i = 0
while (obj.mean() < 0.1 and obj.shape[0] > 10):
i += 1
obj = skimage.measure.block_reduce(obj, (2,2), np.max)
descr += f"Sehr dunkles Bild, maxpooling ({i} mal)"
# in "rgb" umwandeln
obj = np.stack((obj,)*3, axis=-1)
return obj,descr
elif len(obj.shape) == 3: # könnte ein bild sein
if obj.shape[0] == 3:
obj = np.transpose(obj,(1,2,0))
descr += "channel first \n"
if obj.shape[2] == 3: # normales bild
obj = norm(obj)
descr += f"Mean {obj.mean()}, Variance {obj.var()}\n"
if (obj.var() < 0.1):
obj = obj - obj.mean()
descr += f"Mean abgezogen \n"
if (obj.mean() < 0.1):
i= 0
while (obj.mean() < 0.1 and obj.shape[0] > 10):
i += 1
obj = skimage.measure.block_reduce(obj, (2,2,1), np.max)
descr += f"Bild zu dunkel, maxpooling ({i} mal)"
return obj,descr
else : ## feature map
## zeige ein paar davon
n = math.floor(math.sqrt(obj.shape[2]/3))
n = min(n,8)
f, axs = plt.subplots(n,n,figsize=(15,15))
descr += f"{obj.shape[2]} Feature Maps mit Shape {obj.shape[0:2]}"
print(f'Zeige {n*n*3} Feature Maps via RGB:')
for i in range(n*n):
r = norm(obj[:,:,i*3])
g = norm(obj[:,:,i*3+1])
b = norm(obj[:,:,i*3+2])
axs.flat[i].set_title(f'{i*3} - {i*3+3}')
axs.flat[i].imshow(np.moveaxis(np.array([r,g,b]), 0, 2)) # channels first -> channels last
#axs.flat[i].imshow(r,cmap='gray')
axs.flat[i].axis('off')
elif len(obj.shape) == 4 and obj.shape[0] == 3 and obj.shape[0] == 3: # convolution kernel
descr += f"Convolution Kernel {obj.shape}"
obj = np.transpose(obj,(2,3,0,1))
obj = np.reshape(obj,(obj.shape[0],-1,3))
#obj = obj[:,:,:3]
return to_image(obj)
else:
print("Tensor ",obj.shape)
print(obj)
logger.setLevel(old_level)
else:
return None, "Object of type "+str(type(obj)) | 4ae3be9758a647bbe2d0d2fedc080992840ab124 | 2,101 |
import re
def matchPP(a_string):
"""assumes a_string is a string
returns re match object if it finds two consecutive words that start with P,
else returns None"""
pattern = "[P|p]\w+\s[P|p]\w+"
result = re.search(pattern, a_string)
return result | c46eb4e0380a54cc36db0dc8969d17d65a546bf3 | 2,103 |
def setBoth(s1, s2):
"""
Sets both servo motors to specified number of degrees
Args:
s1, s2 (number): degrees for left and right servos respectively
must be between -90 and 90 and will be rounded
Raises:
Exception if s1 or s2 is not a number
Returns:
None
"""
s1 = restrictServoDegrees(s1)
s2 = restrictServoDegrees(s2)
return _setServos(s1, s2) | 16385e9a8ad23011e9c10f66677afb703f6d19ed | 2,104 |
import json
def transfer_shfe_future_hq(date, file_path, columns_map):
"""
将每天的数据统一标准
:return: pd.DataFrame 统一标准后的数据
"""
ret = pd.DataFrame()
data = json.loads(file_path.read_text())
hq_df = pd.DataFrame(data['o_curinstrument'])
total_df = pd.DataFrame(data['o_curproduct'])
bflag = hq_df.empty or len(hq_df.columns) < len(columns_map) or len(hq_df.columns) > 20
if bflag: # 原始数据文件为null,不重新下载,需要再运行一次程序
print('dce future hq data:{} is not exist, please rerun program!'.format(file_path.name))
return ret
settle_name = columns_map['settle']
hq_df = hq_df[hq_df[settle_name] != '']
hq_df = data_type_conversion(hq_df, 0, list(columns_map.values()), list(columns_map.keys()), date, 'shfe')
hq_df.loc[:, 'code'] = hq_df['code'].str.strip()
# 商品字母缩写转换
hq_df['code'] = hq_df['code'].transform(lambda x: NAME2CODE_MAP['exchange'][x])
# 构建symbol
hq_df['symbol'] = hq_df['code'] + hq_df['symbol'].transform(lambda x: convert_deliver(x, date))
# 计算amount
total_df['PRODUCTNAME'] = total_df['PRODUCTNAME'].str.strip()
total_df['AVGPRICE'] = pd.to_numeric(total_df['AVGPRICE'], downcast='float')
total_df['VOLUME'] = pd.to_numeric(total_df['VOLUME'], downcast='integer')
total_df['TURNOVER'] = pd.to_numeric(total_df['TURNOVER'], downcast='float')
total_df = total_df[total_df['AVGPRICE'] > 0]
total_df['code'] = total_df['PRODUCTNAME'].transform(lambda x: NAME2CODE_MAP['exchange'][x.strip()])
total_df['multiplier'] = total_df['TURNOVER'] / total_df['AVGPRICE'] / total_df['VOLUME'] * 100000000
total_df['multiplier'] = total_df['multiplier'].transform(round)
hq_df = hq_df.join(total_df[['code', 'multiplier']].set_index('code'), on='code')
hq_df['amount'] = hq_df['volume'] * hq_df['settle'] * hq_df['multiplier']
del hq_df['multiplier']
return hq_df | 4e90164f96d4c5018774c0ad8d4deda7fa6dbeec | 2,105 |
def comp_material_bsdf(arg_material_one:bpy.types.Material,
arg_material_two:bpy.types.Material) -> bool:
"""指定マテリアルのBSDFノードを比較する
受け渡したマテリアルの出力ノードに接続されたプリシプルBSDFノードを比較する
比較対象の入力端子のデフォルト値が有効、かつ、全て同一の場合、Trueを返す
Args:
arg_material_one (bpy.types.Material): 比較マテリアル1
arg_material_two (bpy.types.Material): 比較マテリアル2
Returns:
bool: 比較結果(一致:True)
"""
# マテリアルの出力ノードにプリンシプルBSDFノードが接続されているかチェックする
if check_surface_bsdf(arg_material_one) == False:
# プリシプルBSDF出なかった場合は処理を終了して False を返す
return False
# マテリアルの出力ノードにプリンシプルBSDFノードが接続されているかチェックする
if check_surface_bsdf(arg_material_two) == False:
# プリシプルBSDF出なかった場合、処理を終了して False を返す
return False
# プリンシプルBSDFノードを取得する
get_node_one = get_node_linkoutput(arg_material_one)
# プリンシプルBSDFノードを取得する
get_node_two = get_node_linkoutput(arg_material_two)
# 比較結果フラグ(デフォルトで一致判定)
comp_result = True
# 比較対象とする入力端子を全てチェックする
for bsdfnode_inputname in def_comp_bsdfnode_input_list:
# デフォルト値が有効なソケットの情報を取得する
nodesocket_one = get_nodesocket_enabledefault(arg_node=get_node_one, arg_inputname=bsdfnode_inputname)
nodesocket_two = get_nodesocket_enabledefault(arg_node=get_node_two, arg_inputname=bsdfnode_inputname)
# デフォルト値が有効なソケット情報を取得できたか確認する
if ((nodesocket_one == None) or (nodesocket_two == None)):
# ソケット情報を取得できなかった場合は不一致としてチェックを終了する
comp_result = False
break
# ソケットのタイプが同一か確認する
if (type(nodesocket_one) != type(nodesocket_two)):
# 同一でない場合は不一致としてチェックを終了する
comp_result = False
break
# タイプ毎の値比較の実施済みフラグ
checked_flg = False
# NodeSocketFloatのソケットの比較
if isinstance(nodesocket_one, bpy.types.NodeSocketFloat):
# 値が一致するか比較する
if (nodesocket_one.default_value != nodesocket_two.default_value):
# 値が一致しない場合は不一致としてチェックを終了する
comp_result = False
break
else:
# タイプ毎の値比較の実施済みフラグを設定する
checked_flg = True
# NodeSocketFloatFactorのソケットの比較
if isinstance(nodesocket_one, bpy.types.NodeSocketFloatFactor):
# 値が一致するか比較する
if (nodesocket_one.default_value != nodesocket_two.default_value):
# 値が一致しない場合は不一致としてチェックを終了する
comp_result = False
break
else:
# タイプ毎の値比較の実施済みフラグを設定する
checked_flg = True
# NodeSocketVectorのソケットの比較
if isinstance(nodesocket_one, bpy.types.NodeSocketVector):
# 値が一致するか比較する
if ((nodesocket_one.default_value[0] != nodesocket_two.default_value[0]) or
(nodesocket_one.default_value[1] != nodesocket_two.default_value[1]) or
(nodesocket_one.default_value[2] != nodesocket_two.default_value[2])):
# 値が一致しない場合は不一致としてチェックを終了する
comp_result = False
break
else:
# タイプ毎の値比較の実施済みフラグを設定する
checked_flg = True
# NodeSocketColorのソケットの比較
if isinstance(nodesocket_one, bpy.types.NodeSocketColor):
# 値が一致するか比較する
if ((nodesocket_one.default_value[0] != nodesocket_two.default_value[0]) or
(nodesocket_one.default_value[1] != nodesocket_two.default_value[1]) or
(nodesocket_one.default_value[2] != nodesocket_two.default_value[2]) or
(nodesocket_one.default_value[3] != nodesocket_two.default_value[3])):
# 値が一致しない場合は不一致としてチェックを終了する
comp_result = False
break
else:
# タイプ毎の値比較の実施済みフラグを設定する
checked_flg = True
# 値比較を実施済みか確認する
if checked_flg == False:
# 合致するタイプがない場合はBSDFでないと判断して不一致としてチェックを終了する
comp_result = False
break
return comp_result | 884c38c93ea4fd0c6907da0d2e5025a0980bed50 | 2,106 |
def run_filters():
"""Runs filters ('PAINS', 'ZINC', 'BRENK', 'NIH')for molecule selected.
Saves the information to the global molecule_info dict and returns the
information as its own dict.
Pass R Group IDs as queries: /filters?r1=A01&r2=B01
:returns: A json dictionary of the molecule, indexed
by the concatenated string of its R Group IDs, with the values for each
descriptor, with each key being its respective descriptor label.
:rtype: json dict
"""
filter_names = ['PAINS', 'ZINC', 'BRENK', 'NIH']
r_group_1_id = request.args.get('r1')
r_group_2_id = request.args.get('r2')
drug_mol = FinalMolecule(r_group_1_id, r_group_2_id)
drug_filters = drug_mol.filter_properties()
molecule_key = tuple2str((r_group_1_id, r_group_2_id))
filt_dict = {}
filt_dict[molecule_key] = {}
for label in filter_names:
if "filters" in molecule_info[molecule_key].keys():
pass
else:
molecule_info[molecule_key]["filters"] = {}
molecule_info[molecule_key]["filters"][label] = drug_filters[label]
filt_dict[molecule_key][label] = drug_filters[label]
return jsonify({"filter_dict": filt_dict}) | e1bc4719d412a73a7860f49978d47c459dc34d70 | 2,107 |
def read_template(engine, template_name):
"""Read template string from file and get path."""
template_file = get_template_file(engine, template_name)
template_string = template_file.read_text()
return template_string, template_file.parent | 3dc55309df1575d2af2e4794e03e2ba4ccd166a2 | 2,108 |
def get_qbert_v3_url(qbert_url, project_id):
"""Keystone only hands out a v1 url I need v3."""
qbert_v3_url = "{0}/v3/{1}".format(qbert_url[0:-3], project_id)
return qbert_v3_url | 423e1f7a601f4ecafbc7d52d1f95fd59195f193e | 2,109 |
def gen_all_holds(hand):
"""
Generate all possible choices of dice from hand to hold.
hand: sorted full yahtzee hand
Returns a set of tuples, where each tuple is sorted dice to hold
"""
# start off with the original hand in set
set_holds = set([(hand)])
# now iterate with all sub hands with one element removed
for item in hand:
list_hand = list(hand)
list_hand.remove(item)
# add to set_holds this sub hand
set_holds.add(tuple(list_hand))
# also add to set_holds the recursion of this sub hand
# set functionality also takes care of repeated sub hands
set_holds.update(gen_all_holds(tuple(list_hand)))
return set_holds | 5c8af5040f619fabef56918d399b5a1cab8893a4 | 2,110 |
def sndrcv(*args, **kwargs):
# type: (*Any, **Any) -> Tuple[SndRcvList, PacketList]
"""Scapy raw function to send a packet and receive its answer.
WARNING: This is an internal function. Using sr/srp/sr1/srp is
more appropriate in many cases.
"""
sndrcver = SndRcvHandler(*args, **kwargs)
return sndrcver.results() | 6918dbf09bef672b95bab83126e6e4c0ec99e3bf | 2,111 |
from typing import Optional
def get_by_name(db_session: Session, *, name: str) -> Optional[Action]:
"""Return action object based on action name.
Arguments:
db_session {Session} -- SQLAlchemy Session object
name {str} -- action name
Returns:
Optional[Action] -- Returns a Action object or nothing if it doesn't exist
"""
return db_session.query(Action).filter(Action.name == name).first() | fb8c758d401fe09a36b3d2687a0e8e886edac594 | 2,112 |
def langstring(value: str, language: str = "x-none") -> dict:
"""Langstring."""
return {
"langstring": {
"lang": language,
"#text": value,
}
} | dca23a329cfc87d8cfa52cd2b009ce723b7d2270 | 2,113 |
def chinese_half2full():
"""Convert all halfwidth Chinese characters to fullwidth .
Returns:
"""
def string_op(input_str:str):
rstring = ""
for uchar in input_str:
u_code = ord(uchar)
if u_code == 32:
u_code = 12288
elif 33 <= u_code <= 126:
u_code += 65248
rstring += chr(u_code)
return rstring
return string_op | e89a6314a57192e62b32e1f7e044a09700b5bb73 | 2,114 |
def euclidean_distance(p1, p2):
"""
Returns the Euclidean Distance of a particular point from rest of the points in dataset.
"""
distance = 0
for i in range(len(p1)-1):
distance += (p1[i]-p2[i])**(2)
return sqrt(distance) | dd06e44659fdd06972bd6a660afeb313de81c6fe | 2,115 |
def img_histogram(file):
"""
Returns an image's histogram in a combined RGB channel and each individual
channel as an array of 256 values.
A 0 means that a tonal value is the max and 255 means there are 0 pixels at that value.
"""
with Image.open(file) as img:
histogram = img.histogram()
red_histogram = histogram[0:256]
red_max = max(red_histogram)
green_histogram = histogram[256:512]
green_max = max(green_histogram)
blue_histogram = histogram[512:768]
blue_max = max(blue_histogram)
rgb_histogram = []
for i in range(256):
rgb_histogram.append(red_histogram[i] + green_histogram[i] + blue_histogram[i])
rgb_max = max(rgb_histogram)
for i in range(256):
r = red_histogram[i]
g = green_histogram[i]
b = blue_histogram[i]
rgb = rgb_histogram[i]
rgb_histogram[i] = round(255 - (rgb * 255 / rgb_max), 2)
red_histogram[i] = round(255 - (r * 255 / red_max), 2)
green_histogram[i] = round(255 - (g * 255 / green_max), 2)
blue_histogram[i] = round(255 - (b * 255 / blue_max), 2)
return rgb_histogram, red_histogram, green_histogram, blue_histogram | 1f210316e752328190978f908143dd40c9ef6ba4 | 2,117 |
def absModuleToDist(magApp, magAbs):
"""
Convert apparent and absolute magnitude into distance.
Parameters
----------
magApp : float
Apparent magnitude of object.
magAbs : float
Absolute magnitude of object.
Returns
-------
Distance : float
The distance resulting from the difference in
apparent and absolute magnitude [pc].
"""
d = 10.0**(-(magAbs - magApp) / 5.0 + 1.0)
return d | a7d98ff479114f08e47afefc97a1119f5e8ff174 | 2,118 |
import base64
def decoded_anycli(**kwargs):
"""
Return the decoded return from AnyCLI request - Do not print anything
:param kwargs:
keyword value: value to display
:return: return the result of AnyCLI in UTF-8
:Example:
result = cli(url=base_url, auth=s, command="show vlan")
decoded_anycli(result)
"""
value = kwargs.get('value', None)
return base64.b64decode(value['result_base64_encoded']).decode('utf-8') | 223c4f9aabfef530896729205071e7fb8f9c8301 | 2,119 |
import pandas
def open_mcrae_nature_cohort():
""" get proband details for McRae et al., Nature 2017
McRae et al Nature 2017 542:433-438
doi: 10.1038/nature21062
Supplementary table S1.
"""
data = pandas.read_excel(url, sheet_name='Supplementary Table 1')
data['Individual ID'] += '|DDD'
phenotype = ['HP:0001249']
study = ['10.1038/nature21062']
persons = set()
for i, row in data.iterrows():
person = Person(row['Individual ID'], row.Sex, phenotype, study)
persons.add(person)
persons = add_mock_probands(persons, 4293, 'ddd', 'DDD', phenotype, study)
return persons | 8485fdc09c92bab20fc380a14f549f028be950b7 | 2,121 |
def copia_coords_alineadas(align1,align2,coords_molde,PDBname):
""" Devuelve:
1) una lista con las coordenadas de coords_molde
que se pueden copiar segun el alineamiento align1,align2.
2) una estimacion del RMSD segun la curva RMSD(A) = 0.40 e^{l.87(1-ID)}
de Chothia & Lesk (1986) """
aanames = { "A":"ALA","C":"CYS","D":"ASP","E":"GLU","F":"PHE","G":"GLY",
"H":"HIS","I":"ILE","K":"LYS","L":"LEU","M":"MET","N":"ASN","P":"PRO",
"Q":"GLN","R":"ARG","S":"SER","T":"THR","V":"VAL","W":"TRP","Y":"TYR" }
rmsd,identical = 0,0
total1,total2,total_model = -1,-1,0
length = len(align1)
if(length != len(align2)):
print "# copia_coords_alineadas: alineamientos tienen != longitud",
return []
pdbfile = open(PDBname, 'w')
print >> pdbfile, "HEADER comparative model\nREMARK alignment:\n",
print >> pdbfile, "REMARK query : %s\n" % (align1),
print >> pdbfile, "REMARK template: %s\n" % (align2),
for r in range(0, length):
conserved = False
res1 = align1[r:r+1]
res2 = align2[r:r+1]
if(res1 != '-'): total1+=1
if(res2 != '-'): total2+=1
if(res1 == '-' or res2 == '-'): continue # salta los gaps
total_model += 1.0;
if(res1 == res2):
conserved = True
identical += 1.0
for atomo in coords_molde[total2].split("\n"):
if(atomo == ''): break
if(atomo[12:16] == ' CA ' or atomo[12:16] == ' C ' or \
atomo[12:16] == ' N ' or atomo[12:16] == ' O ' \
or conserved):
print >> pdbfile, "%s%s%s%4d%s" % \
(atomo[0:17],aanames[res1],atomo[20:22],total1+1,atomo[26:])
print >> pdbfile, "TER\n",
pdbfile.close()
rmsd = 0.40 * exp(1.87*(1-(identical/total_model)))
identical = (identical/total_model)
return (total_model,identical,rmsd) | 48c730b43dd7059b6a6d7a068d884ecd27d3820e | 2,122 |
def get_amati_relationship(value='o'):
"""
Return the Amati relationship and it's 1 sigma dispersion as given by Tsutsui et al. (2009).
:param value: a string that can be 'o', '+', or '-'. The default is set to 'o' for the actual Amati relationship.
'+' gives the upper bound of uncertainty and '-' gives the lower bound of uncertainty.
:return: returns arrays of the a and y values of the amati relation/ error in the relation
"""
#plot the amati relation given by:
#http://iopscience.iop.org/article/10.1088/1475-7516/2009/08/015/pdf
x=np.linspace(-3,3,100) #log(E_iso/10**52), for caluclation of E_p, add 52 to x @ end to get back normal values
if value=='o':
y=(1/2.01)*(x+3.87) #y is log(E_p/1keV)
elif value=='+':
y=(1/(2.01))*(x+(3.87+0.33))
elif value=='-':
y=(1/(2.01))*(x+(3.87-0.33))
else:
print('This isnt a correct option for value\n')
return 1e52*10**x,10**y | f7618f812dca45640376177383af2443085b6246 | 2,123 |
def load(name, final=False, torch=False, prune_dist=None):
"""
Returns the requested dataset.
:param name: One of the available datasets
:param final: Loads the test/train split instead of the validation train split. In this case the training data
consists of both training and validation.
:return: A pair (triples, meta). `triples` is a numpy 2d array of datatype uint32 contianing integer-encoded
triples. `meta` is an object of metadata containing the following fields:
* e: The number of entities
* r: The number of relations
* i2r:
"""
if name == 'micro':
return micro(final, torch)
# -- a miniature dataset for unit testing
if name in ['aifb', 'am1k', 'amplus', 'dblp', 'mdgenre', 'mdgender', 'dmgfull', 'dmg777k']:
tic()
data = Data(here(f'../datasets/{name}'), final=final, use_torch=torch)
print(f'loaded data {name} ({toc():.4}s).')
else:
raise Exception(f'Dataset {name} not recognized.')
if prune_dist is not None:
tic()
data = prune(data, n=prune_dist)
print(f'pruned ({toc():.4}s).')
return data | 38f379076ba6f5562ab818113b319276f84bd081 | 2,124 |
def is_paragraph_debian_packaging(paragraph):
"""
Return True if the `paragraph` is a CopyrightFilesParagraph that applies
only to the Debian packaging
"""
return isinstance(
paragraph, CopyrightFilesParagraph
) and paragraph.files.values == ['debian/*'] | 726cd3d8c7cdfd14a55dc8bc9764cc9d037b1b63 | 2,125 |
def update_b(b, action_prob, yr_val, predict_mode):
"""Update new shape parameters b using the regression and classification output.
Args:
b: current shape parameters values. [num_examples, num_shape_params].
action_prob: classification output. [num_actions]=[num_examples, 2*num_shape_params]
yr_val: values of db to regress. yr=b-b_gt. [num_examples, num_shape_params]
predict_mode: 0: Hard classification. Move regressed distance only in the direction with maximum probability.
1: Soft classification. Multiply classification probabilities with regressed distances.
2: Regression only.
3: Classification only.
Returns:
b_new: new b after update. [num_examples, num_shape_params]
"""
if predict_mode == 0:
# Hard classification. Move regressed distance only in the direction with maximum probability.
ind = np.argmax(np.amax(np.reshape(action_prob, (b.shape[0], b.shape[1], 2)), axis=2), axis=1) # ind = [num_examples]
row_ind = np.arange(b.shape[0])
b[row_ind, ind] = b[row_ind, ind] - yr_val[row_ind, ind]
elif predict_mode == 1:
# Soft classification. Multiply classification probabilities with regressed distances.
b = b - yr_val * np.amax(np.reshape(action_prob, (b.shape[0], b.shape[1], 2)), axis=2)
elif predict_mode == 2:
# Regression only.
b = b - yr_val
elif predict_mode == 3:
# Classification only
step = 1
action_prob_reshape = np.reshape(action_prob, (b.shape[0], b.shape[1], 2))
ind = np.argmax(np.amax(action_prob_reshape, axis=2), axis=1) # ind=[num_examples]
row_ind = np.arange(b.shape[0])
is_negative = np.argmax(action_prob_reshape[row_ind, ind], axix=1) # is_negative=[num_examples]
# Move b in either positive or negative direction
b[row_ind[is_negative], ind[is_negative]] = b[row_ind[is_negative], ind[is_negative]] + step
b[row_ind[np.logical_not(is_negative)], ind[np.logical_not(is_negative)]] = b[row_ind[np.logical_not(is_negative)], ind[np.logical_not(is_negative)]] - step
return b | ba8535d538ae0e0ac44c452f2fbe94a686b8e5a1 | 2,126 |
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_config(args, cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.merge_from_list(['MODEL.BUA.EXTRACT_FEATS',True])
cfg.merge_from_list(switch_extract_mode(args.extract_mode))
cfg.merge_from_list(set_min_max_boxes(args.min_max_boxes, args.mode))
cfg.freeze()
default_setup(cfg, args)
return cfg | 9dd4495a13c64d4832b889abdf94ffd01133c92a | 2,127 |
def _earth_distance(time='now'):
"""
Return the distance between the Sun and the Earth at a specified time.
Parameters
----------
time : {parse_time_types}
Time to use in a parse_time-compatible format
Returns
-------
out : `~astropy.coordinates.Distance`
The Sun-Earth distance
"""
return get_earth(time).radius | c8646b7e2aa9b821a9740235d5cc263623bd0ec0 | 2,128 |
async def DELETE_Link(request):
"""HTTP method to delete a link"""
log.request(request)
app = request.app
group_id = request.match_info.get('id')
if not group_id:
msg = "Missing group id"
log.warn(msg)
raise HTTPBadRequest(reason=msg)
if not isValidUuid(group_id, obj_class="Group"):
msg = f"Invalid group id: {group_id}"
log.warn(msg)
raise HTTPBadRequest(reason=msg)
link_title = request.match_info.get('title')
validateLinkName(link_title)
username, pswd = getUserPasswordFromRequest(request)
await validateUserPassword(app, username, pswd)
domain = getDomainFromRequest(request)
if not isValidDomain(domain):
msg = f"domain: {domain}"
log.warn(msg)
raise HTTPBadRequest(reason=msg)
bucket = getBucketForDomain(domain)
await validateAction(app, domain, group_id, username, "delete")
req = getDataNodeUrl(app, group_id)
req += "/groups/" + group_id + "/links/" + link_title
params = {}
if bucket:
params["bucket"] = bucket
rsp_json = await http_delete(app, req, params=params)
resp = await jsonResponse(request, rsp_json)
log.response(request, resp=resp)
return resp | 193d6cb86a820a7492c768aad0a0e22fac76198f | 2,129 |
def format_image(image):
"""
Function to format frame
"""
if len(image.shape) > 2 and image.shape[2] == 3:
# determine whether the image is color
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
# Image read from buffer
image = cv2.imdecode(image, cv2.CV_LOAD_IMAGE_GRAYSCALE)
cascade_classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
faces = cascade_classifier.detectMultiScale(image,scaleFactor = 1.3 ,minNeighbors = 5)
if not len(faces) > 0:
return None
# initialize the first face as having maximum area, then find the one with max_area
max_area_face = faces[0]
for face in faces:
if face[2] * face[3] > max_area_face[2] * max_area_face[3]:
max_area_face = face
face = max_area_face
# extract ROI of face
image = image[face[1]:(face[1] + face[2]), face[0]:(face[0] + face[3])]
try:
# resize the image so that it can be passed to the neural network
image = cv2.resize(image, (48,48), interpolation = cv2.INTER_CUBIC) / 255.
except Exception:
print("----->Problem during resize")
return None
return image | 1649814cddab0037f89936d1a39af44d8d5203d9 | 2,130 |
def cpu_stats():
"""Return various CPU stats as a named tuple."""
ctx_switches, interrupts, syscalls, traps = cext.cpu_stats()
soft_interrupts = 0
return _common.scpustats(ctx_switches, interrupts, soft_interrupts,
syscalls) | afdc9e95ba5d0b7760a1bbdf505b85f3fb0a0b7d | 2,131 |
def has_reacted(comment, user, reaction):
"""
Returns whether a user has reacted with a particular reaction on a comment or not.
"""
if user.is_authenticated:
reaction_type = getattr(ReactionInstance.ReactionType, reaction.upper(), None)
if not reaction_type:
raise template.TemplateSyntaxError(ReactionError.TYPE_INVALID.format(reaction_type=reaction))
return ReactionInstance.objects.filter(
user=user,
reaction_type=reaction_type.value,
reaction__comment=comment
).exists()
return False | 8cf537b204ae13c844e80a14b29f11e36d69097b | 2,133 |
import requests
def structure_query(compound, label='pyclassyfire'):
"""Submit a compound information to the ClassyFire service for evaluation
and receive a id which can be used to used to collect results
:param compound: The compound structures as line delimited inchikey or
smiles. Optionally a tab-separated id may be prepended for each
structure.
:type compound: str
:param label: A label for the query
:type label:
:return: A query ID number
:rtype: int
>>> structure_query('CCC', 'smiles_test')
>>> structure_query('InChI=1S/C3H4O3/c1-2(4)3(5)6/h1H3,(H,5,6)')
"""
r = requests.post(url + '/queries.json', data='{"label": "%s", '
'"query_input": "%s", "query_type": "STRUCTURE"}'
% (label, compound),
headers={"Content-Type": "application/json"})
r.raise_for_status()
return r.json()['id'] | cd7c0558dd61f493187169cea3562c96f63634d2 | 2,134 |
def create(*, db_session, ticket_in: TicketCreate) -> Ticket:
"""Creates a new ticket."""
ticket = Ticket(**ticket_in.dict())
db_session.add(ticket)
db_session.commit()
return ticket | 644bcccc56c8fd97ec3c888f6e38c1fc2afc3585 | 2,136 |
def blur(img):
"""
:param img: SimpleImage, an original image.
:return: img: SimpleImage, image with blurred effect.
"""
blank_img = SimpleImage.blank(img.width, img.height)
for y in range(img.height):
for x in range(img.width):
blurred = blank_img.get_pixel(x, y)
if x == 0 and y == 0:
"""
For 4 corners.
The new RGB values of original pixel is the average RGB values
of the original pixel and the other pixels around it.
"""
avg_red1 = (img.get_pixel(x, y).red +
img.get_pixel(x + 1, y).red +
img.get_pixel(x, y + 1).red +
img.get_pixel(x + 1, y + 1).red) / 4
avg_green1 = (img.get_pixel(x, y).green +
img.get_pixel(x + 1, y).green +
img.get_pixel(x, y + 1).green +
img.get_pixel(x + 1, y + 1).green) / 4
avg_blue1 = (img.get_pixel(x, y).blue +
img.get_pixel(x + 1, y).blue +
img.get_pixel(x, y + 1).blue +
img.get_pixel(x + 1, y + 1).blue) / 4
blurred.red = avg_red1
blurred.green = avg_green1
blurred.blue = avg_blue1
elif x == 0 and y == blank_img.height - 1:
avg_red2 = (img.get_pixel(x, y).red +
img.get_pixel(x, y - 1).red +
img.get_pixel(x + 1, y - 1).red +
img.get_pixel(x + 1, y).red) / 4
avg_green2 = (img.get_pixel(x, y).green +
img.get_pixel(x, y - 1).green +
img.get_pixel(x + 1, y - 1).green +
img.get_pixel(x + 1, y).green) / 4
avg_blue2 = (img.get_pixel(x, y).blue +
img.get_pixel(x, y - 1).blue +
img.get_pixel(x + 1, y - 1).blue +
img.get_pixel(x + 1, y).blue) / 4
blurred.red = avg_red2
blurred.green = avg_green2
blurred.blue = avg_blue2
elif x == blank_img.width - 1 and y == 0:
avg_red3 = (img.get_pixel(x, y).red +
img.get_pixel(x - 1, y).red +
img.get_pixel(x - 1, y + 1).red +
img.get_pixel(x, y + 1).red) / 4
avg_green3 = (img.get_pixel(x, y).green +
img.get_pixel(x - 1, y).green +
img.get_pixel(x - 1, y + 1).green +
img.get_pixel(x, y + 1).green) / 4
avg_blue3 = (img.get_pixel(x, y).blue +
img.get_pixel(x - 1, y).blue +
img.get_pixel(x - 1, y + 1).blue +
img.get_pixel(x, y + 1).blue) / 4
blurred.red = avg_red3
blurred.green = avg_green3
blurred.blue = avg_blue3
elif x == blank_img.width - 1 and y == blank_img.height - 1:
avg_red4 = (img.get_pixel(x, y).red +
img.get_pixel(x, y - 1).red +
img.get_pixel(x - 1, y - 1).red +
img.get_pixel(x - 1, y).red) / 4
avg_green4 = (img.get_pixel(x, y).green +
img.get_pixel(x, y - 1).green +
img.get_pixel(x - 1, y - 1).green +
img.get_pixel(x - 1, y).green) / 4
avg_blue4 = (img.get_pixel(x, y).blue +
img.get_pixel(x, y - 1).blue +
img.get_pixel(x - 1, y - 1).blue +
img.get_pixel(x - 1, y).blue) / 4
blurred.red = avg_red4
blurred.green = avg_green4
blurred.blue = avg_blue4
elif x == 0 and 0 < y < blank_img.height - 1:
"""
For 4 edges.
The new RGB values of original pixel is the average RGB values
of the original pixel and the other pixels around it.
"""
avg_red5 = (img.get_pixel(x, y).red +
img.get_pixel(x, y - 1).red +
img.get_pixel(x + 1, y - 1).red +
img.get_pixel(x + 1, y).red +
img.get_pixel(x + 1, y + 1).red +
img.get_pixel(x, y + 1).red) / 5
avg_green5 = (img.get_pixel(x, y).green +
img.get_pixel(x, y - 1).green +
img.get_pixel(x + 1, y - 1).green +
img.get_pixel(x + 1, y).green +
img.get_pixel(x + 1, y + 1).green +
img.get_pixel(x, y + 1).green) / 5
avg_blue5 = (img.get_pixel(x, y).blue +
img.get_pixel(x, y - 1).blue +
img.get_pixel(x + 1, y - 1).blue +
img.get_pixel(x + 1, y).blue +
img.get_pixel(x + 1, y + 1).blue +
img.get_pixel(x, y + 1).blue) / 5
blurred.red = avg_red5
blurred.green = avg_green5
blurred.blue = avg_blue5
elif x == blank_img.width - 1 and 0 < y < blank_img.height - 1:
avg_red6 = (img.get_pixel(x, y).red +
img.get_pixel(x, y - 1).red +
img.get_pixel(x - 1, y - 1).red +
img.get_pixel(x - 1, y).red +
img.get_pixel(x - 1, y + 1).red +
img.get_pixel(x, y + 1).red) / 6
avg_green6 = (img.get_pixel(x, y).green +
img.get_pixel(x, y - 1).green +
img.get_pixel(x - 1, y - 1).green +
img.get_pixel(x - 1, y).green +
img.get_pixel(x - 1, y + 1).green +
img.get_pixel(x, y + 1).green) / 6
avg_blue6 = (img.get_pixel(x, y).blue +
img.get_pixel(x, y - 1).blue +
img.get_pixel(x - 1, y - 1).blue +
img.get_pixel(x - 1, y).blue +
img.get_pixel(x - 1, y + 1).blue +
img.get_pixel(x, y + 1).blue) / 6
blurred.red = avg_red6
blurred.green = avg_green6
blurred.blue = avg_blue6
elif y == 0 and 0 < x < blank_img.width - 1:
avg_red7 = (img.get_pixel(x, y).red +
img.get_pixel(x - 1, y).red +
img.get_pixel(x - 1, y + 1).red +
img.get_pixel(x, y + 1).red +
img.get_pixel(x + 1, y + 1).red +
img.get_pixel(x + 1, y).red) / 6
avg_green7 = (img.get_pixel(x, y).green +
img.get_pixel(x - 1, y).green +
img.get_pixel(x - 1, y + 1).green +
img.get_pixel(x, y + 1).green +
img.get_pixel(x + 1, y + 1).green +
img.get_pixel(x + 1, y).green) / 6
avg_blue7 = (img.get_pixel(x, y).blue +
img.get_pixel(x - 1, y).blue +
img.get_pixel(x - 1, y + 1).blue +
img.get_pixel(x, y + 1).blue +
img.get_pixel(x + 1, y + 1).blue +
img.get_pixel(x + 1, y).blue) / 6
blurred.red = avg_red7
blurred.green = avg_green7
blurred.blue = avg_blue7
elif y == blank_img.height - 1 and 0 < x < blank_img.width - 1:
avg_red8 = (img.get_pixel(x, y).red +
img.get_pixel(x - 1, y).red +
img.get_pixel(x - 1, y - 1).red +
img.get_pixel(x, y - 1).red +
img.get_pixel(x + 1, y - 1).red +
img.get_pixel(x + 1, y).red) / 6
avg_green8 = (img.get_pixel(x, y).green +
img.get_pixel(x - 1, y).green +
img.get_pixel(x - 1, y - 1).green +
img.get_pixel(x, y - 1).green +
img.get_pixel(x + 1, y - 1).green +
img.get_pixel(x + 1, y).green) / 6
avg_blue8 = (img.get_pixel(x, y).blue +
img.get_pixel(x - 1, y).blue +
img.get_pixel(x - 1, y - 1).blue +
img.get_pixel(x, y - 1).blue +
img.get_pixel(x + 1, y - 1).blue +
img.get_pixel(x + 1, y).blue) / 6
blurred.red = avg_red8
blurred.green = avg_green8
blurred.blue = avg_blue8
else:
"""
For other area except the corners and edges.
The new RGB values of original pixel is the average RGB values
of the other pixels around it.
"""
avg_red9 = (img.get_pixel(x, y).red +
img.get_pixel(x - 1, y).red +
img.get_pixel(x + 1, y).red +
img.get_pixel(x - 1, y - 1).red +
img.get_pixel(x, y - 1).red +
img.get_pixel(x + 1, y - 1).red +
img.get_pixel(x - 1, y + 1).red +
img.get_pixel(x, y + 1).red +
img.get_pixel(x + 1, y + 1).red) / 9
avg_green9 = (img.get_pixel(x, y).green +
img.get_pixel(x - 1, y).green +
img.get_pixel(x + 1, y).green +
img.get_pixel(x - 1, y - 1).green +
img.get_pixel(x, y - 1).green +
img.get_pixel(x + 1, y - 1).green +
img.get_pixel(x - 1, y + 1).green +
img.get_pixel(x, y + 1).green +
img.get_pixel(x + 1, y + 1).red) / 9
avg_blue9 = (img.get_pixel(x, y).blue +
img.get_pixel(x - 1, y).blue +
img.get_pixel(x + 1, y).blue +
img.get_pixel(x - 1, y - 1).blue +
img.get_pixel(x, y - 1).blue +
img.get_pixel(x + 1, y - 1).blue +
img.get_pixel(x - 1, y + 1).blue +
img.get_pixel(x, y + 1).blue +
img.get_pixel(x + 1, y + 1).blue) / 9
blurred.red = avg_red9
blurred.green = avg_green9
blurred.blue = avg_blue9
return blank_img | 9a7ac5085aea610a26a626e1d53bd243de19ad9e | 2,137 |
def trans_pressure(src, dest="bar"):
"""
>>>
"""
return trans_basic_unit(src, dest, "pressure") | 120888c024e6158a6e26ab699f7f4b5583cbf243 | 2,138 |
def test_accelerated_bypass_method_against_old(c_ctrl_rr):
"""Confirm that my changes to the bypass method maintain the same
result as the old method"""
OLD_HTCONSTS = dassh.region_rodded.calculate_ht_constants(c_ctrl_rr)
def _calc_coolant_byp_temp_old(self, dz):
"""Calculate the coolant temperatures in the assembly bypass
channels at the axial level j+1
Parameters
----------
self : DASSH RoddedRegion object
dz : float
Axial step size (m)
Notes
-----
The coolant in the bypass channels is assumed to get no
power from neutron/gamma heating (that contribution to
coolant in the assembly interior is already small enough).
"""
# Calculate the change in temperature in each subchannel
dT = np.zeros((self.n_bypass,
self.subchannel.n_sc['bypass']['total']))
# self._update_coolant_byp_params(self.avg_coolant_byp_temp)
for i in range(self.n_bypass):
# This factor is in many terms; technically, the mass flow
# rate is already accounted for in constants defined earlier
# mCp = self.coolant.heat_capacity
# starting index to lookup type is after all interior
# coolant channels and all preceding duct and bypass
# channels
start = (self.subchannel.n_sc['coolant']['total']
+ self.subchannel.n_sc['duct']['total']
+ i * self.subchannel.n_sc['bypass']['total']
+ i * self.subchannel.n_sc['duct']['total'])
# end = start + self.subchannel.n_sc['bypass']['total']
for sci in range(0, self.subchannel.n_sc['bypass']['total']):
# The value of sci is the PYTHON indexing
# type_i = self.subchannel.type[sci + start] - 1
type_i = self.subchannel.type[sci + start]
# Heat transfer to/from adjacent subchannels
for adj in self.subchannel.sc_adj[sci + start]:
# if adj == 0:
if adj == -1:
continue
# type_a = self.subchannel.type[adj - 1] - 1
type_a = self.subchannel.type[adj]
# Convection to/from duct wall
# if type_a in [3, 4]:
if 3 <= type_a <= 4:
if sci + start > adj: # INTERIOR adjacent duct wall
byp_conv_const = \
OLD_HTCONSTS[type_i][type_a][i][0]
byp_conv_dT = \
(self.temp['duct_surf'][i, 1, sci]
- self.temp['coolant_byp'][i, sci])
else: # EXTERIOR adjacent duct wall
byp_conv_const = \
OLD_HTCONSTS[type_i][type_a][i][1]
byp_conv_dT = \
(self.temp['duct_surf'][i + 1, 0, sci]
- self.temp['coolant_byp'][i, sci])
dT[i, sci] += \
(self.coolant_byp_params['htc'][i, type_i - 5]
* dz * byp_conv_const * byp_conv_dT
/ self.coolant.heat_capacity)
# Conduction to/from adjacent coolant subchannels
else:
# sc_adj = adj - start - 1
sc_adj = adj - start
dT[i, sci] += \
(self.coolant.thermal_conductivity
* dz
* OLD_HTCONSTS[type_i][type_a][i]
* (self.temp['coolant_byp'][i, sc_adj]
- self.temp['coolant_byp'][i, sci])
/ self.coolant.heat_capacity)
return dT
dT = np.zeros(c_ctrl_rr.temp['coolant_byp'].shape)
dT_old = dT.copy()
dz = 0.01
start_temp = 623.15
for i in range(50):
duct_surf_temp = \
(np.random.random(c_ctrl_rr.temp['duct_surf'].shape)
+ (start_temp + i * 1.0))
c_ctrl_rr.temp['duct_surf'] = duct_surf_temp
dT_old += _calc_coolant_byp_temp_old(c_ctrl_rr, dz)
dT += c_ctrl_rr._calc_coolant_byp_temp(dz)
print(np.average(dT))
print(np.average(dT_old))
print('max abs diff: ', np.max(np.abs(dT - dT_old)))
assert np.allclose(dT, dT_old) | db6660b8ddc2f7ea409f7b334e4e161fceb743b2 | 2,140 |
import logging
def vraec18(pretrained=False, **kwargs):
"""Constructs a _ResAE-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = _VRAEC(_VariationalBasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
try:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)
except Exception as exp:
logging.warning(exp)
return model | 6b5e5a5d812b20c30bac3f81289a553bdc4884d4 | 2,142 |
import zlib
def encode_zip(data):
"""Zip-compress data. Implies base64 encoding of zip data."""
zipped = zlib.compress(data)
return encode_b64(zipped) | aa048125edd67a411715bf748bf832a6e6d7104f | 2,143 |
def create_class_mask(img, color_map, is_normalized_img=True, is_normalized_map=False, show_masks=False):
"""
Function to create C matrices from the segmented image, where each of the C matrices is for one class
with all ones at the pixel positions where that class is present
img = The segmented image
color_map = A list with tuples that contains all the RGB values for each color that represents
some class in that image
is_normalized_img = Boolean - Whether the image is normalized or not
If normalized, then the image is multiplied with 255
is_normalized_map = Boolean - Represents whether the color map is normalized or not, if so
then the color map values are multiplied with 255
show_masks = Wherether to show the created masks or not
"""
if is_normalized_img and (not is_normalized_map):
img *= 255
if is_normalized_map and (not is_normalized_img):
img = img / 255
mask = []
hw_tuple = img.shape[:-1]
for color in color_map:
color_img = []
for idx in range(3):
color_img.append(np.ones(hw_tuple) * color[idx])
color_img = np.array(color_img, dtype=np.uint8).transpose(1, 2, 0)
mask.append(np.uint8((color_img == img).sum(axis = -1) == 3))
return np.array(mask) | 97452e568d0a29b438a61fc96d90231a318e919b | 2,144 |
import itertools
def reconstruct_grid(mask, ds_dl):
"""
Reconstruction of 2d grid.
Args:
mask (ndarray): land mask used.
ds_dl (ndarray): trained model prediction.
"""
landmask = np.argwhere(np.isnan(mask))
empty = np.zeros((ds_dl.shape[0], mask.shape[0], mask.shape[1]))
counter = 0
for i, j in itertools.product(list(range(mask.shape[0])),list(range(mask.shape[1]))):
if np.argwhere(np.logical_and(np.isin(landmask[:,0], i), np.isin(landmask[:,1], j))).shape[0] > 0:
empty[:, i, j] = np.nan
else:
empty[:, i, j] = ds_dl[:, counter]
counter += 1
return empty | 4d220e0d4ae96ee1ddc55e53f21f2a35d920b03e | 2,145 |
def conv_kernel_initializer(shape, dtype=None):
"""卷积核初始化
和 tf.variance_scaling_initializer最大不同之处就是在于,tf.variance_scaling_initializer 使用的是 truncated norm,
但是却具有未校正的标准偏差,而这里使用正态分布。类似地,tf.initializers.variance_scaling使用带有校正后的标准偏差。
Args:
shape: 卷积核的shape
dtype: 卷积核的dtype
Returns:
经过初始化后的卷积核
"""
kernel_height, kernel_width, input_filters, out_filters = shape
fan_out = int(kernel_height * kernel_width * out_filters)
return tf.random.normal(shape, mean=0.0, stddev=np.sqrt(2.0 / fan_out), dtype=dtype) | f7fd5665aeb8eb592a5f1f0f1785dfd84c9d8d98 | 2,146 |
def prediction_func(data, g_data, grid_search, param_list):
"""Function for using dataset to train a model and
predicting prices for a generated data.
Parameter search is done using RandomizedSearchCV since it is computationally
more efficientcompared to GridSearchCV.
In param_list, learning_rate, subsample and max_depth,
min_child_weight, gamma and colsample_bytree can be included.
Args:
| data (pd.Dataframe): the dataset including house features and prices
| g_data (pd.Dataframe): randomly generated house features for prediction purposes
| grid_search (bool): indicates whether model is trained with parameter
search(True) or use default values(False)
| param_list (list): the list of parameters to be included in parameter search
Returns:
the predicted prices for houses in g_data (np.array)
"""
# Base Model
xgb_reg = xgb.XGBRegressor(n_treads=-1)
if grid_search:
# Search for best parameters in model
params = {
"learning_rate": [i / 20 for i in range(1, 11)],
"min_child_weight": [i for i in range(3, 12)],
"gamma": [i / 10.0 for i in range(3, 8)],
"subsample": [i / 10.0 for i in range(7, 11)],
"colsample_bytree": [i / 10.0 for i in range(6, 11)],
"max_depth": [i for i in range(3, 8)],
}
# Only includes selected parameters
params = {key: params[key] for key in param_list}
xgb_reg = RandomizedSearchCV(
estimator=xgb_reg,
param_distributions=params,
n_iter=5,
cv=3,
random_state=23,
iid=False,
)
xgb_reg.fit(data.drop("price", axis=1), data.price)
return xgb_reg.predict(g_data) | b747578879054947e91e5285b82cf3e07fa313da | 2,147 |
def thv_to_zxy(theta, h):
"""Convert coordinates from (theta, h, v) to (z, x, y) space."""
cos_p = np.cos(theta)
sin_p = np.sin(theta)
srcx = +RADIUS * cos_p - h * sin_p
srcy = +RADIUS * sin_p + h * cos_p
detx = -RADIUS * cos_p - h * sin_p
dety = -RADIUS * sin_p + h * cos_p
return srcx, srcy, detx, dety | 64370dc6c4060a718506a243414afdd698881147 | 2,148 |
from datetime import datetime
def get_most_stale_file(logpath=DEFAULT_PATH):
"""
returns the filename of the file in the fileset that was least recently backed up
and the time of the last backup
"""
oldest_name = ""
oldest_date = datetime.max
for fstat in get_fileset_statlist():
last_backup = datetime.strptime(
get_last_upload_times(fstat[STAT_KEYS.SOURCE], n_times=1)[0],
TIME_FORMAT
)
if last_backup < oldest_date:
oldest_date = last_backup
oldest_name = fstat[STAT_KEYS.SOURCE]
return oldest_name, oldest_date | e0000847513ffeb97b8df0c26941ca4e3380f09d | 2,149 |
from typing import Mapping
from typing import Dict
import re
import logging
def get_instances(context: models.Context) -> Mapping[str, Instance]:
"""Get a list of Instance matching the given context, indexed by instance id."""
instances: Dict[str, Instance] = {}
if not apis.is_enabled(context.project_id, 'compute'):
return instances
gce_api = apis.get_api('compute', 'v1', context.project_id)
requests = [
gce_api.instances().list(project=context.project_id, zone=zone)
for zone in get_gce_zones(context.project_id)
]
items = apis_utils.batch_list_all(
api=gce_api,
requests=requests,
next_function=gce_api.instances().list_next,
log_text=f'listing gce instances of project {context.project_id}')
for i in items:
result = re.match(
r'https://www.googleapis.com/compute/v1/projects/[^/]+/zones/([^/]+)/',
i['selfLink'])
if not result:
logging.error('instance %s selfLink didn\'t match regexp: %s', i['id'],
i['selfLink'])
continue
zone = result.group(1)
labels = i.get('labels', {})
if not context.match_project_resource(location=zone, labels=labels):
continue
instances[i['id']] = Instance(project_id=context.project_id,
resource_data=i)
return instances | 10f4eae30b0a5c752c45378574ba4620bd859320 | 2,150 |
def svn_fs_delete_fs(*args):
"""svn_fs_delete_fs(char const * path, apr_pool_t pool) -> svn_error_t"""
return _fs.svn_fs_delete_fs(*args) | 6e1f34d82899fc257c723990c55853b35f0b06d3 | 2,152 |
from re import T
def translate_output(_output, n_classes, is_binary_classification=False):
""" Gets matrix with one hot encoding where the 1 represent index of class.
Parameters
----------
_output : theano.tensor.matrix
Output sample.
n_classes : int
Number of classes (or size of one hot encoding rows)
is_binary_classification : bool
This flag means that model is for binary classification.
Returns
-------
theano.tensor.matrix
Returns one hot encoding.
"""
if is_binary_classification:
return T.sgn(_output)
else:
return to_one_hot(T.argmax(_output, axis=-1), n_classes) | 03137e6b0704477a69211d454ee5e05a5ab02636 | 2,153 |
def _sphere_point_to_uv(point: Point) -> Vec2d:
"""Convert a 3D point on the surface of the unit sphere into a (u, v) 2D point"""
u = atan2(point.y, point.x) / (2.0 * pi)
return Vec2d(
u=u if u >= 0.0 else u + 1.0,
v=acos(point.z) / pi,
) | c0eb4abb1ebc55f74b908a85f0cb94f71a528c32 | 2,155 |
import tqdm
def generate_formula_dict(materials_store, query=None):
"""
Function that generates a nested dictionary of structures
keyed first by formula and then by task_id using
mongo aggregation pipelines
Args:
materials_store (Store): store of materials
Returns:
Nested dictionary keyed by formula-mp_id with structure values.
"""
props = ["pretty_formula", "structure", "task_id", "magnetic_type"]
results = list(materials_store.groupby("pretty_formula", properties=props,
criteria=query))
formula_dict = {}
for result in tqdm.tqdm(results):
formula = result['_id']['pretty_formula']
task_ids = [d['task_id'] for d in result['docs']]
structures = [d['structure'] for d in result['docs']]
formula_dict[formula] = dict(zip(task_ids, structures))
return formula_dict | ae232c806972262029966307e489df0b12d646f5 | 2,156 |
def truncate(wirevector_or_integer, bitwidth):
""" Returns a wirevector or integer truncated to the specified bitwidth
:param wirevector_or_integer: Either a wirevector or and integer to be truncated
:param bitwidth: The length to which the first argument should be truncated.
:return: Returns a tuncated wirevector or integer as appropriate
This function truncates the most significant bits of the input, leaving a result
that is only "bitwidth" bits wide. For integers this is performed with a simple
bitmask of size "bitwidth". For wirevectors the function calls WireVector.truncate
and returns a wirevector of the specified bitwidth.
Examples: ::
truncate(9,3) # returns 3 (0b101 truncates to 0b101)
truncate(5,3) # returns 3 (0b1001 truncates to 0b001)
truncate(-1,3) # returns 7 (-0b1 truncates to 0b111)
y = truncate(x+1, x.bitwidth) # y.bitwdith will equal x.bitwidth
"""
if bitwidth < 1:
raise PyrtlError('bitwidth must be a positive integer')
x = wirevector_or_integer
try:
return x.truncate(bitwidth)
except AttributeError:
return x & ((1 << bitwidth)-1) | 7ff6d22061944f4202bc69dfde109c1cead20972 | 2,158 |
def pcoef(xte, yte, rle, x_cre, y_cre, d2ydx2_cre, th_cre, surface):
# Docstrings
"""evaluate the PARSEC coefficients"""
# Initialize coefficients
coef = np.zeros(6)
# 1st coefficient depends on surface (pressure or suction)
if surface.startswith('p'):
coef[0] = -sqrt(2*rle)
else:
coef[0] = sqrt(2*rle)
# Form system of equations
A = np.array([
[xte**1.5, xte**2.5, xte**3.5, xte**4.5, xte**5.5],
[x_cre**1.5, x_cre**2.5, x_cre**3.5, x_cre**4.5,
x_cre**5.5],
[1.5*sqrt(xte), 2.5*xte**1.5, 3.5*xte**2.5,
4.5*xte**3.5, 5.5*xte**4.5],
[1.5*sqrt(x_cre), 2.5*x_cre**1.5, 3.5*x_cre**2.5,
4.5*x_cre**3.5, 5.5*x_cre**4.5],
[0.75*(1/sqrt(x_cre)), 3.75*sqrt(x_cre), 8.75*x_cre**1.5,
15.75*x_cre**2.5, 24.75*x_cre**3.5]
])
B = np.array([
[yte - coef[0]*sqrt(xte)],
[y_cre - coef[0]*sqrt(x_cre)],
[tan(th_cre*pi/180) - 0.5*coef[0]*(1/sqrt(xte))],
[-0.5*coef[0]*(1/sqrt(x_cre))],
[d2ydx2_cre + 0.25*coef[0]*x_cre**(-1.5)]
])
# Solve system of linear equations
# X = np.linalg.solve(A,B)
X = np.linalg.lstsq(A,B)[0]
# Gather all coefficients
coef[1:6] = X[0:5,0]
# Return coefficients
return coef | 43cc56ec7f29267678ebbc3572633e5073cda117 | 2,159 |
def iscircular(linked_list):
"""
Determine whether the Linked List is circular or not
Args:
linked_list(obj): Linked List to be checked
Returns:
bool: Return True if the linked list is circular, return False otherwise
"""
slow_runner = linked_list.head
fast_runner = linked_list.head
while slow_runner != None and fast_runner.next != None:
slow_runner = slow_runner.next
fast_runner = fast_runner.next.next
if slow_runner == fast_runner:
return True
return False | 04f86497dae2a2ee77afd37f13bdba8e18ae52b9 | 2,162 |
def shape_extent_to_header(shape, extent, nan_value=-9999):
""" Create a header dict with shape and extent of an array
"""
ncols = shape[1]
nrows = shape[0]
xllcorner = extent[0]
yllcorner = extent[2]
cellsize_x = (extent[1]-extent[0])/ncols
cellsize_y = (extent[3]-extent[2])/nrows
if cellsize_x != cellsize_y:
raise ValueError('extent produces different cellsize in x and y')
cellsize = cellsize_x
header = {'ncols':ncols, 'nrows':nrows,
'xllcorner':xllcorner, 'yllcorner':yllcorner,
'cellsize':cellsize, 'NODATA_value':nan_value}
return header | 957b59e7f464901a5430fd20ab52f28507b55887 | 2,163 |
def build_encoder(opt, embeddings):
"""
Various encoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this encoder.
"""
if opt.encoder_type == "transformer":
return TransformerEncoder(opt.enc_layers, opt.rnn_size,
opt.heads, opt.transformer_ff,
opt.dropout, embeddings)
elif opt.encoder_type == "cnn":
return CNNEncoder(opt.enc_layers, opt.rnn_size,
opt.cnn_kernel_width,
opt.dropout, embeddings)
elif opt.encoder_type == "mean":
return MeanEncoder(opt.enc_layers, embeddings)
else:
# "rnn" or "brnn"
return RNNEncoder(opt.wals_model, opt.rnn_type, opt.brnn, opt.enc_layers,
opt.rnn_size, opt.wals_size, opt.dropout, embeddings,
opt.bridge) | 73b379545aeeb3226ea019cad0a692b00cd7630b | 2,164 |
def efficientnet_b3b(in_size=(300, 300), **kwargs):
"""
EfficientNet-B3-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (300, 300)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b3", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b3b",
**kwargs) | 1d7e0bffe67f9d2f340563b21e1f995201877165 | 2,166 |
import logging
def logged(class_):
"""Class-level decorator to insert logging.
This assures that a class has a ``.log`` member.
::
@logged
class Something:
def __init__(self, args):
self.log(f"init with {args}")
"""
class_.log= logging.getLogger(class_.__qualname__)
return class_ | cd58e355151ab99aa1694cbd9fb6b710970dfa19 | 2,167 |
def TableInFirstNSStart(builder):
"""This method is deprecated. Please switch to Start."""
return Start(builder) | 5ea3cf66842eaf026a36bb241c277076cc8650b8 | 2,168 |
def dicom_to_nifti(dicom_input, output_file=None):
"""
This is the main dicom to nifti conversion function for ge images.
As input ge images are required. It will then determine the type of images and do the correct conversion
:param output_file: filepath to the output nifti
:param dicom_input: directory with dicom files for 1 scan
"""
assert common.is_siemens(dicom_input)
# remove duplicate slices based on position and data
dicom_input = convert_generic.remove_duplicate_slices(dicom_input)
# remove localizers based on image type
dicom_input = convert_generic.remove_localizers_by_imagetype(dicom_input)
# remove_localizers based on image orientation (only valid if slicecount is validated)
dicom_input = convert_generic.remove_localizers_by_orientation(dicom_input)
if _is_4d(dicom_input):
logger.info('Found sequence type: MOSAIC 4D')
return _mosaic_4d_to_nifti(dicom_input, output_file)
grouped_dicoms = _classic_get_grouped_dicoms(dicom_input)
if _is_classic_4d(grouped_dicoms):
logger.info('Found sequence type: CLASSIC 4D')
return _classic_4d_to_nifti(grouped_dicoms, output_file)
logger.info('Assuming anatomical data')
return convert_generic.dicom_to_nifti(dicom_input, output_file) | 0b77d190c2379e9b2ad5fbf9217e1604a7df8bc9 | 2,169 |
def lot_vectors_dense_internal(
sample_vectors,
sample_distributions,
reference_vectors,
reference_distribution,
metric=cosine,
max_distribution_size=256,
chunk_size=256,
spherical_vectors=True,
):
"""Efficiently compute linear optimal transport vectors for
a block of data provided as a list of distributions and a
corresponding list of arrays of vectors.
Parameters
----------
sample_vectors: numba.typed.List of ndarrays
A set of vectors for each distribution.
sample_distributions: numba.typed.List of ndarrays
A set of distributions (1d arrays that sum to one). The ith element of a given
distribution is the probability mass on the ith row of the corresponding entry
in the ``sample_vectors`` list.
reference_vectors: ndarray
The reference vector set for LOT
reference_distribution: ndarray
The reference distribution over the set of reference vectors
metric: function(ndarray, ndarray) -> float
The distance function to use for distance computation
max_distribution_size: int (optional, default=256)
The maximum size of a distribution to consider; larger
distributions over more vectors will be truncated back
to this value for faster performance.
chunk_size: int (optional, default=256)
Operations will be parallelised over chunks of the input.
This specifies the chunk size.
spherical_vectors: bool (optional, default=True)
Whether the vectors live on an n-sphere instead of euclidean space
and thus require some degree of spherical correction.
Returns
-------
lot_vectors: ndarray
The raw linear optimal transport vectors correpsonding to the input.
"""
n_rows = len(sample_vectors)
result = np.zeros((n_rows, reference_vectors.size), dtype=np.float64)
n_chunks = (n_rows // chunk_size) + 1
for n in range(n_chunks):
chunk_start = n * chunk_size
chunk_end = min(chunk_start + chunk_size, n_rows)
for i in range(chunk_start, chunk_end):
row_vectors = sample_vectors[i].astype(np.float64)
row_distribution = sample_distributions[i]
if row_vectors.shape[0] > max_distribution_size:
best_indices = np.argsort(-row_distribution)[:max_distribution_size]
row_vectors = row_vectors[best_indices]
row_distribution = row_distribution[best_indices]
row_sum = row_distribution.sum()
if row_sum > 0.0:
row_distribution /= row_sum
if row_vectors.shape[0] > reference_vectors.shape[0]:
cost = chunked_pairwise_distance(
row_vectors, reference_vectors, dist=metric
)
else:
cost = chunked_pairwise_distance(
reference_vectors, row_vectors, dist=metric
).T
current_transport_plan = transport_plan(
row_distribution, reference_distribution, cost
)
transport_images = (
current_transport_plan * (1.0 / reference_distribution)
).T @ row_vectors
if spherical_vectors:
l2_normalize(transport_images)
transport_vectors = transport_images - reference_vectors
if spherical_vectors:
tangent_vectors = project_to_sphere_tangent_space(
transport_vectors, reference_vectors
)
l2_normalize(tangent_vectors)
scaling = tangent_vectors_scales(
transport_images, reference_vectors
)
transport_vectors = tangent_vectors * scaling
result[i] = transport_vectors.flatten()
# Help the SVD preserve spherical data by sqrt entries
if spherical_vectors:
for i in range(result.shape[0]):
for j in range(result.shape[1]):
result[i, j] = np.sign(result[i, j]) * np.sqrt(np.abs(result[i, j]))
return result | d7f9eaad6b7292f2c28621f361094a88e7deb8a6 | 2,170 |
import rasterio as rio
def load(
filename,
rsc_file=None,
rows=None,
cols=None,
band=1,
**kwargs,
):
"""Load a file, either using numpy or rasterio"""
if rsc_file:
rsc_data = load_rsc(rsc_file)
return load_stacked_img(filename, rsc_data=rsc_data, rows=rows, cols=cols)
else:
try:
except ImportError:
raise ValueError("Need to `conda install rasterio` to load gdal-readable")
with rio.open(filename) as src:
return src.read(band) | 873933b80b7e87f64b10ad74cc8ed25238a93fb3 | 2,171 |
def simple_scan_network():
"""
Do a simple network scan, which only works if your network configuration
is 192.168.1.x
"""
base_ip = "192.168.1."
addresses = ['127.0.0.1']
for index in range(1, 255):
addresses.extend([base_ip + str(index)])
return addresses | b0f19ae1c98678e87d270b308b5359df9a6a4d30 | 2,172 |
def channel_lvlv_2jet():
""" Mostly based on table 8 of the combination paper for the uncertainties and
table 9 for the event counts. """
channel = ROOT.RooStats.HistFactory.Channel( "HWWlvlv2Jet" )
container.append(channel)
channel.SetData(55)
background = ROOT.RooStats.HistFactory.Sample("background")
background.SetValue(36*1.1)
# background.AddOverallSys("ATLAS_LUMI_2012", 1.0-0.036, 1.0+0.036)
# background.AddOverallSys("JES", 0.93, 1.07)
channel.AddSample(background)
container.append(background)
signalGGFttH = ROOT.RooStats.HistFactory.Sample("signalGGFttH")
signalGGFttH.SetValue(10.9*1.00*0.19) # increase by a factor for better agreement with ATLAS contour
signalGGFttH.AddNormFactor("mu", 1, 0, 6)
signalGGFttH.AddNormFactor("mu_XS8_ggF", 1, -5, 10)
signalGGFttH.AddNormFactor("muT_lvlv", 1, -5, 10)
signalGGFttH.AddOverallSys("ATLAS_LUMI_2012", 1.0-0.036, 1.0+0.036)
signalGGFttH.AddOverallSys("QCDscale_Higgs_ggH", 0.87, 1.13)
signalGGFttH.AddOverallSys("QCDscale_Higgs_ggH2in", 0.96, 1.04)
signalGGFttH.AddOverallSys("QCDscale_Higgs_ggH3in", 0.96, 1.04)
signalGGFttH.AddOverallSys("QCDscale_Higgs_acceptance_2jet", 0.97, 1.03)
signalGGFttH.AddOverallSys("UE_2jet", 0.95, 1.05)
signalGGFttH.AddOverallSys("JES", 0.94, 1.06)
channel.AddSample(signalGGFttH)
container.append(signalGGFttH)
signalVBFVH = ROOT.RooStats.HistFactory.Sample("signalVBFVH")
signalVBFVH.SetValue(10.9*1.000*0.81) # increase by a factor for better agreement with ATLAS contour
signalVBFVH.AddNormFactor("mu", 1, 0, 6)
signalVBFVH.AddNormFactor("mu_XS8_VBF", 1, -5, 10)
signalVBFVH.AddNormFactor("muW_lvlv", 1, -5, 10)
signalVBFVH.AddOverallSys("ATLAS_LUMI_2012", 1.0-0.036, 1.0+0.036)
signalVBFVH.AddOverallSys("UE_2jet", 0.95, 1.05)
signalVBFVH.AddOverallSys("JES", 0.94, 1.06)
channel.AddSample(signalVBFVH)
container.append(signalVBFVH)
return channel | f60609a0bf6f22dc850fcb52c4a19b6bae737abc | 2,174 |
def vtkVariantStrictEquality(s1, s2):
"""
Check two variants for strict equality of type and value.
"""
s1 = vtk.vtkVariant(s1)
s2 = vtk.vtkVariant(s2)
t1 = s1.GetType()
t2 = s2.GetType()
# check based on type
if t1 != t2:
return False
v1 = s1.IsValid()
v2 = s2.IsValid()
# check based on validity
if (not v1) and (not v2):
return True
elif v1 != v2:
return False
# extract and compare the values
r1 = getattr(s1, _variant_method_map[t1])()
r2 = getattr(s2, _variant_method_map[t2])()
return (r1 == r2) | cb529c35f6dfc7e20fcff79d5c38b41bd43f1292 | 2,175 |
def is_network_failure(error):
"""Returns True when error is a network failure."""
return ((isinstance(error, RETRY_URLLIB_EXCEPTIONS)
and error.code in RETRY_HTTP_CODES) or
isinstance(error, RETRY_HTTPLIB_EXCEPTIONS) or
isinstance(error, RETRY_SOCKET_EXCEPTIONS) or
isinstance(error, RETRY_REQUESTS_EXCEPTIONS) or
is_retriable_requests_httperror(error)) | 647d10b257b1cb7f78243629edd2b425104f1787 | 2,176 |
import torch
def predict(model, X, threshold=0.5):
"""Generate NumPy output predictions on a dataset using a given model.
Args:
model (torch model): A Pytroch model
X (dataloader): A dataframe-based gene dataset to predict on
"""
X_tensor, _ = convert_dataframe_to_tensor(X, [])
model.eval()
with torch.no_grad():
y_pred = (model(X_tensor) >= threshold).int().numpy()
return y_pred | 57b6137cc8f7e0753e6438432f56b471717a5d88 | 2,177 |
def color_image(
img: np.ndarray, unique_colors=True, threshold=100, approximation_accuracy=150
) -> np.ndarray:
"""
This function detects simple shapes in the image and colors them.
Detected figures will be also subscribed in the final image. The function
can detect triangles, quadrilateral, and circles; any other figure will be
marked "UNEXPECTED".
The algorithm uses OpenCV to find contours on a grayscale version of
the image. Then it uses a polygon approximation algorithm to reduce the
number of vertices in contours. The resulted polygons are used to identify
and color figures in the image.
parameters:
img - image with figures to color
unique_colors - flag to color all figures in unique colores
independent of the number of vertices. The default behavior is
coloring all the figures of the same type in one color
threshold - background threshold for a grayscale image, using that
the algo will separate figures from the background
approximation_accuracy - accuracy of polygon approximation for
detected contours
output:
the image with colored and subscribed figures
"""
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# apply threshold
thresholded_im = np.zeros(img.shape[:2], dtype=np.uint8)
thresholded_im[gray > threshold] = 255
contours, _ = cv2.findContours(
thresholded_im, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
)
if unique_colors:
colors = gen_colors(len(contours))
for i, contour in enumerate(contours):
# find positions of vertices to count them
# we need some value to estimate approximation accuracy - let it be perimeter
object_perimeter = cv2.arcLength(contour, closed=True)
approx = cv2.approxPolyDP(
contour, epsilon=object_perimeter / approximation_accuracy, closed=True
)
n_vertices = len(approx)
# find object centers
# M = cv2.moments(contour)
x, y = approx.squeeze().mean(axis=0).astype(int)
# offset to the left for x
x = (x + 2 * approx[:, 0, 0].min()) // 3
# COLORING PART
# highlight contours
cv2.drawContours(img, [contour], 0, (255, 255, 255), 4)
# fill the object
if unique_colors:
color = colors[i].tolist()
else:
color = get_color_for_figure(n_vertices)
cv2.fillPoly(img, pts=[contour], color=color)
# subscribe the figure
print_figure_name(img, n_vertices, (x, y))
return img | 5637febc69dcc2b3e641f0d79f2e21c6dc7d04ec | 2,178 |
from pathlib import Path
def restore_model(pb_path):
"""Restore the latest model from the given path."""
subdirs = [x for x in Path(pb_path).iterdir()
if x.is_dir() and 'temp' not in str(x)]
latest_model = str(sorted(subdirs)[-1])
predict_fn = predictor.from_saved_model(latest_model)
return predict_fn | bded95b196081e19ca1c70127871abb99d3526d0 | 2,179 |
import math
def _generate_resolution_shells(low, high):
"""Generate 9 evenly spaced in reciprocal space resolution
shells from low to high resolution, e.g. in 1/d^2."""
dmin = (1.0 / high) * (1.0 / high)
dmax = (1.0 / low) * (1.0 / low)
diff = (dmin - dmax) / 8.0
shells = [1.0 / math.sqrt(dmax)]
for j in range(8):
shells.append(1.0 / math.sqrt(dmax + diff * (j + 1)))
return shells | 52fa4309f2f34a39a07d8524dd7f226e3d1bae6a | 2,180 |
from typing import Optional
from typing import Tuple
def add_ports_from_markers_square(
component: Component,
pin_layer: Layer = (69, 0),
port_layer: Optional[Layer] = None,
orientation: Optional[int] = 90,
min_pin_area_um2: float = 0,
max_pin_area_um2: float = 150 * 150,
pin_extra_width: float = 0.0,
port_names: Optional[Tuple[str, ...]] = None,
port_name_prefix: str = "o",
) -> Component:
"""add ports from markers center in port_layer
squared
Args:
component: to read polygons from and to write ports to
pin_layer: for port markers
port_layer: for the new created port
orientation: in degrees 90: north, 0: east, 180: west, 270: south
min_pin_area_um2: ignores pins with area smaller than min_pin_area_um2
max_pin_area_um2: ignore pins for area above certain size
pin_extra_width: 2*offset from pin to straight
port_names: names of the ports (defaults to {i})
"""
port_markers = read_port_markers(component, [pin_layer])
port_names = port_names or [
f"{port_name_prefix}{i+1}" for i in range(len(port_markers.polygons))
]
layer = port_layer or pin_layer
for port_name, p in zip(port_names, port_markers.polygons):
dy = snap_to_grid(p.ymax - p.ymin)
dx = snap_to_grid(p.xmax - p.xmin)
x = p.x
y = p.y
if dx == dy and max_pin_area_um2 > dx * dy > min_pin_area_um2:
component.add_port(
port_name,
midpoint=(x, y),
width=dx - pin_extra_width,
orientation=orientation,
layer=layer,
)
return component | 68858a17b5187e064232f0c101ddf9c4e812c233 | 2,181 |
def P(Document, *fields, **kw):
"""Generate a MongoDB projection dictionary using the Django ORM style."""
__always__ = kw.pop('__always__', set())
projected = set()
omitted = set()
for field in fields:
if field[0] in ('-', '!'):
omitted.add(field[1:])
elif field[0] == '+':
projected.add(field[1:])
else:
projected.add(field)
if not projected: # We only have exclusions from the default projection.
names = set(getattr(Document, '__projection__', Document.__fields__) or Document.__fields__)
projected = {name for name in (names - omitted)}
projected |= __always__
if not projected:
projected = {'_id'}
return {unicode(traverse(Document, name, name)): True for name in projected} | d88a428f5eae1e57bd3b5ddf0d31e6e7c122c27d | 2,182 |
def get_page_url(skin_name, page_mappings, page_id):
""" Returns the page_url for the given page_id and skin_name """
fallback = '/'
if page_id is not None:
return page_mappings[page_id].get('path', '/')
return fallback | 6ead4824833f1a7a002f54f83606542645f53dd6 | 2,183 |
def create_form(erroneous_form=None):
"""Show a form to create a guest server."""
party_id = _get_current_party_id_or_404()
setting = guest_server_service.get_setting_for_party(party_id)
form = erroneous_form if erroneous_form else CreateForm()
return {
'form': form,
'domain': setting.domain,
} | 2d8e9cd6597e4ccb1b9f39d77cca45b354d99371 | 2,185 |
def apply(task, args, kwargs, **options):
"""Apply the task locally.
This will block until the task completes, and returns a
:class:`celery.result.EagerResult` instance.
"""
args = args or []
kwargs = kwargs or {}
task_id = options.get("task_id", gen_unique_id())
retries = options.get("retries", 0)
task = tasks[task.name] # Make sure we get the instance, not class.
default_kwargs = {"task_name": task.name,
"task_id": task_id,
"task_retries": retries,
"task_is_eager": True,
"logfile": None,
"delivery_info": {"is_eager": True},
"loglevel": 0}
supported_keys = fun_takes_kwargs(task.run, default_kwargs)
extend_with = dict((key, val) for key, val in default_kwargs.items()
if key in supported_keys)
kwargs.update(extend_with)
trace = TaskTrace(task.name, task_id, args, kwargs, task=task)
retval = trace.execute()
return EagerResult(task_id, retval, trace.status, traceback=trace.strtb) | 600bc142ca8d96bd020db5cb82103169d255d970 | 2,186 |
from typing import Optional
from typing import Callable
def exp_post_expansion_function(expansion: Expansion) -> Optional[Callable]:
"""Return the specified post-expansion function, or None if unspecified"""
return exp_opt(expansion, 'post') | 6d49f5e40b7c900470a5c84b37d9da1666b217c2 | 2,187 |
def return_(x):
"""Implement `return_`."""
return x | 6557a37db2020bdbb0f9dcf587f2bd42509ff937 | 2,188 |
def create(platformDetails):
"""
This function creates a new platform in the platform list
based on the passed in platform data
:param platform: platform to create in platform structure
:return: 201 on success, 406 on platform exists
"""
# Remove id as it's created automatically
if "id" in platformDetails:
del platformDetails["id"]
# Does the platform exist already?
existing_platform = (
db.session.query(Platform)
.filter(Platform.value == platformDetails["value"])
.one_or_none()
)
if existing_platform is None:
schema = PlatformSchema()
new_platform = schema.load(platformDetails, session=db.session)
db.session.add(new_platform)
db.session.commit()
# Serialize and return the newly created deployment
# in the response
data = schema.dump(new_platform)
return data, 201
# Otherwise, it already exists, that's an error
else:
abort(406, "Platform already exists") | a6b27d6b530ccc11134a001ac3b49c6cb89475a3 | 2,190 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.