content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def shift_time(x, dt):
"""Shift time axis to the left by dt. Used to account for pump & lamp delay"""
x -= dt
return x | c93fdddea8e41221583139dcc7a2d81177ba7c17 | 8,800 |
from datetime import datetime
import json
def eps_xfer(request,client_slug=None,show_slug=None):
"""
Returns all the episodes for a show as json.
Used to synk public url's with the main conference site.
"""
client=get_object_or_404(Client,slug=client_slug)
show=get_object_or_404(Show,client=client,slug=show_slug)
# eps = Episode.objects.filter(show=show)
eps=eps_filters(request.GET).filter(show=show).order_by('start')
if "id" in request.GET:
eps = eps.filter( id=request.GET['id'] )
fields=['id',
'state',
'location', 'location_slug',
'sequence',
'name', 'slug', 'authors', 'description',
'start', 'duration',
'released', 'license', 'tags',
'conf_key', 'conf_url',
'host_url', 'public_url', 'rax_mp4_url',
'archive_url', 'archive_mp4_url',
'twitter_url',
'comment',
]
if request.user.is_authenticated():
fields.extend(['emails', 'edit_key',])
if "fields" in request.GET:
fields_whitelist = request.GET['fields'].split(',')
print(fields_whitelist)
fields = [f for f in fields if f in fields_whitelist]
"""
serializers.serialize("json", eps,
fields=fields, use_natural_foreign_keys=True,
stream=response)
"""
gold_list = ['location', 'location_slug']
ds=[]
for ep in eps:
d = {}
for f in fields:
if f == 'location':
d[f] = ep.location.name
elif f == 'location_slug':
d[f] = ep.location.name = ep.location.slug
else:
d[f]=getattr(ep,f)
# archive_mp4_url is really the url of the page
# make a mp4 url too
# the mp4 link is now:
# https://archive.org/download/pyohio_2019-Changing_Lives_through_Open_Source_Passion_and_Mentoring/Changing_Lives_through_Open_Source_Passion_and_Mentoring.mp4
if 'archive_mp4_url' in d:
d['archive_url'] = d['archive_mp4_url']
d['archive_mp4_url'] = ""
if 'start' in d:
d['start_at'] = (d['start']
- datetime.timedelta(minutes=5)
).strftime('%H:%M %d.%m.%Y')
ds.append(d)
response = HttpResponse(content_type="application/json")
json.dump( ds, response, cls=serializers.json.DjangoJSONEncoder )
return response | 9a6691e0ac750919b5915e45ace0c347aa83cbe3 | 8,801 |
def register(class_, option=None, get_funcs={}):
"""A decorator to register a function as the way to display an object of class_
"""
if option:
key = (class_, option)
else:
key = class_
def decorator(func):
class_function_mapping[key] = (func, get_funcs)
return func
return decorator | c060691dd9e2760905e29a2c643dfa63d4ed029c | 8,802 |
def startup(target: machine.Machine,
workload: str,
count: int = 5,
port: int = 0,
**kwargs):
"""Time the startup of some workload.
Args:
target: A machine object.
workload: The workload to run.
count: Number of containers to start.
port: The port to check for liveness, if provided.
**kwargs: Additional container options.
Returns:
The mean start-up time in seconds.
"""
# Load before timing.
image = target.pull(workload)
netcat = target.pull("netcat")
count = int(count)
port = int(port)
with helpers.Timer() as timer:
for _ in range(count):
if not port:
# Run the container synchronously.
target.container(image, **kwargs).run()
else:
# Run a detached container until httpd available.
with target.container(image, port=port, **kwargs).detach() as server:
(server_host, server_port) = server.address()
target.container(netcat).run(host=server_host, port=server_port)
return timer.elapsed() / float(count) | c53b627d95270aa074f9178e1ebcd6ea49b8eeaa | 8,803 |
def schema_handler(request):
"""
Handle schema request from UI.
"""
logger.debug("schema_handler: enter")
req = request.GET.get('payload', '')
action = request.GET.get('action', '')
logger.debug('Received schema Request (%s)' % action)
if not request.user.is_authenticated():
logger.error('User must be logged in !!')
return HttpResponse(Response.error(action, 'Unauthorized'))
if action == 'get-schema':
return get_schema(request, req)
elif action == 'get-all-schema':
return get_schema(request, req, all=True)
elif action == 'download-schema':
return download_schema(request, req)
elif action == 'add-schema':
return add_schema(request, req) | 0a7997e5ad946aebfbf915b1dd2dddcf46d5fe2e | 8,804 |
def log2_fold_change(df, samp_grps):
"""
calculate fold change - fixed as samp_grps.mean_names[0] over samp_grps.mean_names[1],
where the mean names are sorted alphabetically. The log has already been taken,
so the L2FC is calculated as mean0 - mean1
:param df: expanded and/or filtered dataframe
:param samp_grps: SampleGroups() object
:return: dataframe with fold change column appended, with name as in samp_grps.fc_name
"""
mean1 = samp_grps.mean_names[0]
mean2 = samp_grps.mean_names[1]
df[samp_grps.fc_name] = df[mean1] - df[mean2]
return df | 07fcef6f5143095f4f8f77d0251bbd7ecd486fd9 | 8,805 |
def infer_wheel_units(pos):
"""
Given an array of wheel positions, infer the rotary encoder resolution, encoding type and units
The encoding type varies across hardware (Bpod uses X1 while FPGA usually extracted as X4), and
older data were extracted in linear cm rather than radians.
:param pos: a 1D array of extracted wheel positions
:return units: the position units, assumed to be either 'rad' or 'cm'
:return resolution: the number of decoded fronts per 360 degree rotation
:return encoding: one of {'X1', 'X2', 'X4'}
"""
if len(pos.shape) > 1: # Ensure 1D array of positions
pos = pos.flatten()
# Check the values and units of wheel position
res = np.array([wh.ENC_RES, wh.ENC_RES / 2, wh.ENC_RES / 4])
# min change in rad and cm for each decoding type
# [rad_X4, rad_X2, rad_X1, cm_X4, cm_X2, cm_X1]
min_change = np.concatenate([2 * np.pi / res, wh.WHEEL_DIAMETER * np.pi / res])
pos_diff = np.median(np.abs(np.ediff1d(pos)))
# find min change closest to min pos_diff
idx = np.argmin(np.abs(min_change - pos_diff))
if idx < len(res):
# Assume values are in radians
units = 'rad'
encoding = idx
else:
units = 'cm'
encoding = idx - len(res)
enc_names = {0: 'X4', 1: 'X2', 2: 'X1'}
return units, int(res[encoding]), enc_names[int(encoding)] | 82d1a63c11c31d4de83ba5360def223b85194ef9 | 8,806 |
def extract_tform(landmarks, plane_name):
"""Compute the transformation that maps the reference xy-plane at origin to the GT standard plane.
Args:
landmarks: [landmark_count, 3] where landmark_count=16
plane_name: 'tv' or 'tc'
Returns:
trans_vec: translation vector [3]
quat: quaternions [4]
mat: 4x4 transformation matrix [4, 4]
"""
if plane_name == 'tv':
# Landmarks lying on the TV plane
landmarks_plane = np.vstack((landmarks[1:8], landmarks[12:14]))
# Compute transformation
z_vec, p_plane = fit_plane(landmarks_plane)
landmarks_plane_proj = project_on_plane(landmarks_plane, z_vec, p_plane)
landmarks_line = landmarks_plane_proj[[0, 1, 2, 7, 8], :]
x_vec, p_line = fit_line(landmarks_line)
y_vec = geometry.unit_vector(np.cross(z_vec, x_vec))
# 4x4 transformation matrix
mat = np.eye(4)
mat[:3, :3] = np.vstack((x_vec, y_vec, z_vec)).transpose()
mat[:3, 3] = landmarks_plane_proj[0]
# Quaternions and translation vector
quat = geometry.quaternion_from_matrix(mat[:3, :3])
trans_vec = mat[:3, 3]
elif plane_name == 'tc':
# Landmarks lying on the TC plane
cr = landmarks[10]
cl = landmarks[11]
csp = landmarks[12]
# Compute transformation
csp_cl = cl - csp
csp_cr = cr - csp
z_vec = np.cross(csp_cl, csp_cr)
z_vec = geometry.unit_vector(z_vec)
cr_cl_mid = (cr + cl) / 2.0
x_vec = geometry.unit_vector(cr_cl_mid - csp)
y_vec = geometry.unit_vector(np.cross(z_vec, x_vec))
# 4x4 transformation matrix
mat = np.eye(4)
mat[:3, :3] = np.vstack((x_vec, y_vec, z_vec)).transpose()
mat[:3, 3] = (cr_cl_mid + csp) / 2.0
# Quaternions and translation vector
quat = geometry.quaternion_from_matrix(mat[:3, :3])
trans_vec = mat[:3, 3]
else:
raise ValueError('Invalid plane name.')
return trans_vec, quat, mat | d9d4ed43c9572cdd76b34235e380f22a6eb27d03 | 8,807 |
from typing import TextIO
import csv
def load_events(fhandle: TextIO) -> annotations.Events:
"""Load an URBAN-SED sound events annotation file
Args:
fhandle (str or file-like): File-like object or path to the sound events annotation file
Raises:
IOError: if txt_path doesn't exist
Returns:
Events: sound events annotation data
"""
times = []
labels = []
confidence = []
reader = csv.reader(fhandle, delimiter="\t")
for line in reader:
times.append([float(line[0]), float(line[1])])
labels.append(line[2])
confidence.append(1.0)
events_data = annotations.Events(
np.array(times), "seconds", labels, "open", np.array(confidence)
)
return events_data | 2c2017d754fe12ebd37349b359ba6a92ec115421 | 8,808 |
import os
def loadandcleanRAPIDexport(rapidsubproductsexport):
"""
:param rapidsubproductsexport: an Excel file name with ".xlsx" extention
:return:
"""
exportfile = os.path.realpath("gannt_data/"+ rapidsubproductsexport)
df = pd.read_excel(exportfile,na_values=["-"])
df = convertFYQfieldstodates(df)
df = merge_product_subproduct(df)
df = splitnamefields(df)
return df | 0ff955267ec0baa3e67d0e65e945317b2ea57128 | 8,809 |
def set_nan(df, chrom_bed_file):
"""This function will take in a dataframe and chromosome length bed file
and will replace 0's with np.nan according to each chromosome length.
This will fix any issues when calculating Z-scores"""
# Build dictionary of key=chromosome and value=chromosome_length
chrom_length_dict = {}
for v in chrom_bed_file.itertuples():
chrom_length_dict[v[1]] = v[2]
continue
# Iterate through each column
for chrom in df.columns.to_list():
current_chrom_length = chrom_length_dict[str(chrom)]
# Iterate through each value of a column in reverse
for index, value in zip(
reversed(df.index.to_list()),
reversed(df[chrom].to_list())
):
# Check if index is greater than length of chromosome
if index > current_chrom_length:
df.at[index, chrom] = np.nan
else:
break
return df | e90008c42db5a94c8676c941da5832438301a724 | 8,810 |
def configure_smoothing(new_d,smoothing_scans):
"""
# <batchstep method="net.sf.mzmine.modules.peaklistmethods.peakpicking.smoothing.SmoothingModule">
# <parameter name="Peak lists" type="BATCH_LAST_PEAKLISTS"/>
# <parameter name="Filename suffix">smoothed</parameter>
# <parameter name="Filter width">9</parameter>
# <parameter name="Remove original peak list">false</parameter>
# </batchstep>
"""
idx = [i for i,d in enumerate(new_d['batch']['batchstep']) if 'SmoothingModule' in d['@method']][0]
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'Filter width' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['#text'] = '%.3f'%(smoothing_scans)
return new_d | 031586cf5dbb9fdf1fb6762a89a988367d172942 | 8,811 |
def contact_us():
""" Contact Us Route
Route to lead to the contact page
Args:
None
Returns:
rendered template for contact_us.html
"""
return render_template('contact_us.html', title='CONP | Contact Us', user=current_user) | 2597038074e8f60e14066f10390a161b15cf7071 | 8,812 |
def costFunc1(x, module, output, col, row, bbox, img, prfObj):
"""Debugging function.
Does the same as costFunc, but col and row are constants,
and only the brightness of the prf can be changed.
"""
model = prfObj.getPrfForBbox(module, output, col, row, bbox)
model *= x[0]
cost = img-model
cost = np.sum(cost**2)
return cost | 1af21d844482d773e0bf279a49f12520ffb27aa8 | 8,813 |
import pandas
def query_field(boresight, r1=None, r2=None, observatory='apo',
mag_range=None, mag_column=None, database_params=None):
"""Selects Gaia DR2 stars for a field, from the database.
Parameters
----------
boresight : tuple
A tuple with the right ascension and declination of the boresight,
in degrees.
r1,r2 : float
The internal and external radii along which the GFAs are located, in
degrees.
observatory : str
The observatory, used to load the default configuration for the GFAs.
mag_range : tuple
The range of magnitudes used to select stars.
mag_column : str
The name of the magnitude column to query.
database_params : dict
A dictionary of database parameters to create the connection. Can
include ``user``, ``host``, ``port``, and ``dbname``.
Returns
-------
`~pandas.Dataframe`
A dataframe with the selected stars.
"""
obs_data = config[observatory]
r1 = r1 or obs_data['r1']
r2 = r2 or obs_data['r2']
mag_range = mag_range or config['mag_range']
mag_column = mag_column or config['mag_column']
query = ('WITH x AS MATERIALIZED (SELECT source_id, ra, dec, '
'{mag_column}, pmra, pmdec '
'FROM gaia_dr2_source WHERE '
'q3c_radial_query(ra, dec, {ra}, {dec}, {r2}) AND '
'NOT q3c_radial_query(ra, dec, {ra}, {dec}, {r1})) '
'SELECT * FROM x WHERE {mag_column} > {g_min} AND '
'{mag_column} < {g_max};')
query = query.format(ra=boresight[0], dec=boresight[1], r1=r1, r2=r2,
g_min=mag_range[0], g_max=mag_range[1],
mag_column=mag_column)
if database_params is None:
database_params = config['database']
conn_str = ''
for key in database_params:
conn_str += f'{key}={database_params[key]} '
connection = psycopg2.connect(conn_str)
data = pandas.read_sql(query, connection)
connection.close()
return data | c05276ecfac3b33dcc5382cf54e220b416614656 | 8,814 |
def get_throttling_equilibria(simulation_config, input_params, priority_queue=True, dev_team_factor=1.0):
"""
Returns the equilibrium profiles for throttling configuration under analysis.
:param simulation_config:
:param input_params:
:return:
"""
desc_inf003 = "THROTTLING_INF003"
process_configuration_inf003 = dict(simulation_config)
process_configuration_inf003["THROTTLING_ENABLED"] = True
process_configuration_inf003["GATEKEEPER_CONFIG"] = None
process_configuration_inf003["INFLATION_FACTOR"] = 0.03
process_configuration_inf003["SUCCESS_RATE"] = 0.95
if priority_queue and dev_team_factor == 0.5:
filename_inf003 = "INF3.0_PRIQUEUE_True_DEVFACTOR_0.5_equilibrium_results.csv"
filename_inf010 = "INF10.0_PRIQUEUE_True_DEVFACTOR_0.5_equilibrium_results.csv"
filename_inf020 = "INF20.0_PRIQUEUE_True_DEVFACTOR_0.5_equilibrium_results.csv"
elif priority_queue and dev_team_factor == 1.0:
filename_inf003 = "INF3.0_PRIQUEUE_True_DEVFACTOR_1.0_equilibrium_results.csv"
filename_inf010 = "INF10.0_PRIQUEUE_True_DEVFACTOR_1.0_equilibrium_results.csv"
filename_inf020 = "INF20.0_PRIQUEUE_True_DEVFACTOR_1.0_equilibrium_results.csv"
elif not priority_queue and dev_team_factor == 0.5:
filename_inf003 = "INF3.0_PRIQUEUE_False_DEVFACTOR_0.5_equilibrium_results.csv"
filename_inf010 = "INF10.0_PRIQUEUE_False_DEVFACTOR_0.5_equilibrium_results.csv"
filename_inf020 = "INF20.0_PRIQUEUE_False_DEVFACTOR_0.5_equilibrium_results.csv"
elif not priority_queue and dev_team_factor == 1.0:
filename_inf003 = "INF3.0_PRIQUEUE_False_DEVFACTOR_1.0_equilibrium_results.csv"
filename_inf010 = "INF10.0_PRIQUEUE_False_DEVFACTOR_1.0_equilibrium_results.csv"
filename_inf020 = "INF20.0_PRIQUEUE_False_DEVFACTOR_1.0_equilibrium_results.csv"
equilibrium_profiles_inf003 = get_profiles_from_file("csv/" + filename_inf003, scenario_desc=desc_inf003,
input_params=input_params)
desc_inf010 = "THROTTLING_INF010"
process_configuration_inf010 = dict(process_configuration_inf003)
process_configuration_inf010["INFLATION_FACTOR"] = 0.10
equilibrium_profiles_inf010 = get_profiles_from_file("csv/" + filename_inf010, scenario_desc=desc_inf010,
input_params=input_params)
desc_inf020 = "THROTTLING_INF020"
process_configuration_inf020 = dict(process_configuration_inf003)
process_configuration_inf020["INFLATION_FACTOR"] = 0.20
equilibrium_profiles_inf020 = get_profiles_from_file("csv/" + filename_inf020, scenario_desc=desc_inf020,
input_params=input_params)
return [{"desc": desc_inf003,
"simulation_configuration": process_configuration_inf003,
"equilibrium_profiles": equilibrium_profiles_inf003},
{"desc": desc_inf010,
"simulation_configuration": process_configuration_inf010,
"equilibrium_profiles": equilibrium_profiles_inf010},
{"desc": desc_inf020,
"simulation_configuration": process_configuration_inf020,
"equilibrium_profiles": equilibrium_profiles_inf020}] | 4e0f6dd8fa3b0b36b713b33ab1a5aaf8394d4942 | 8,815 |
def signature(part):
""" return the signature of a partial object """
return (part.func, part.args, part.keywords, part.__dict__) | 522ae88538d6dd880492292c6f2ef169f3bbd06d | 8,816 |
def clean_key(func):
"""Provides a clean, readable key from the funct name and module path.
"""
module = func.__module__.replace("formfactoryapp.", "")
return "%s.%s" % (module, func.__name__) | 946288cd231148eb39af5d1e7e0b957d9f2131e8 | 8,817 |
def rotY(M, alpha):
"""Rotates polygon M around Y axis by alpha degrees.
M needs to be a Numpy Array with shape (4,N) with N>=1"""
T = np.eye(4)
alpha_radians = np.radians(alpha)
sin = np.sin(alpha_radians)
cos = np.cos(alpha_radians)
T[0,0] = cos
T[2,2] = cos
T[0,2] = sin
T[2,0] = -sin
return np.dot(T,M) | 49e850ff66b3c7877e6d8b4a450baaa6707d4f15 | 8,818 |
def is_image_file(filename):
"""
:param filename:
:return:
"""
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) | 40125478c6440efc9a740d2df57ba2f7bb15a5d1 | 8,819 |
def invert_comp_specifier(comp_specifier):
""" return the opposite (logical negation) of @p comp_specifier """
inverse_map = {
Comparison.Equal: Comparison.NotEqual,
Comparison.Less: Comparison.GreaterOrEqual,
Comparison.LessOrEqual: Comparison.Greater,
Comparison.NotEqual: Comparison.Equal,
Comparison.Greater: Comparison.LessOrEqual,
Comparison.GreaterOrEqual: Comparison.Less,
}
return inverse_map[comp_specifier] | 187392dd1dc7f52c744536e8e372cab752ff8c85 | 8,820 |
import utm
def latlong2utm(point):
"""
This function converts a point from lat long to utm
Input : point : (lat,long)
Output : utm point : (x,y,z, n)
"""
return utm.from_latlon(point[0],point[1]) | 3ee82f9df84b02aa35fa0f2a35ec0916edf30e42 | 8,821 |
def multiply(a,b):
"""
multiply values
Args:
a ([float/int]): any value
b ([float/int]): any value
"""
return a*b | 67a85b1675da48684e9de7e9834d3daa4357699b | 8,822 |
from typing import Tuple
from typing import Dict
from typing import List
import regex
def merge_vocab(pair: Tuple[str, str], input_vocab: Dict[str, int]) -> Tuple[Dict[str, int], List]:
"""
>>> pair = ('w', 'o')
>>> input_vocab = {'b i r d @': 3, 'w o r d @': 7, 'w o g @': 13}
>>> new_vocab, new_pairs = merge_vocab(pair, input_vocab)
>>> new_vocab
{'b i r d @': 3, 'wo r d @': 7, 'wo g @': 13}
>>> new_pairs
[(('wo', 'r'), 7), (('o', 'r'), -7), (('wo', 'g'), 13), (('o', 'g'), -13)]
"""
output_vocab = {}
concat_pair_with_space = ' '.join(pair)
concat_pair_with_space_escaped = regex.escape(concat_pair_with_space)
concat_pair = ''.join(pair)
reg = regex.compile('(^|[^ ]+ )(' + concat_pair_with_space_escaped + ')( [^ ]+|$)')
added_pairs = []
for word in input_vocab:
word_occurences = input_vocab[word]
match = reg.search(word)
while match:
# word changed
if match.group(1) != '':
subtoken_before = match.group(1)[:-1]
added_pairs.append(((subtoken_before, concat_pair), word_occurences))
if pair != (subtoken_before, pair[0]):
added_pairs.append(((subtoken_before, pair[0]), -word_occurences))
if match.group(3) != '':
subtoken_after = match.group(3)[1:]
added_pairs.append(((concat_pair, subtoken_after), word_occurences))
if pair != (pair[1], subtoken_after):
added_pairs.append(((pair[1], subtoken_after), -word_occurences))
start, end = match.span(2)
replacement = concat_pair
word = word[:start] + replacement + word[end:]
match = reg.search(word)
output_vocab[word] = word_occurences
return output_vocab, added_pairs | 15226aa9ebd9cae73e5bd00b60cb1b3bbb5d8e07 | 8,823 |
def visualize_bbox_act(img, bboxes,labels, act_preds,
classes=None,thickness=1,
font_scale=0.4,show=False,
wait_time=0,out_file=None):
"""Show the tracks with opencv."""
assert bboxes.ndim == 2
assert labels.ndim == 1
assert bboxes.shape[0] == labels.shape[0]
assert bboxes.shape[1] == 5
if isinstance(img, str):
img = mmcv.imread(img)
img_shape = img.shape
bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])
bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])
text_width, text_height = 8, 15
for i, (bbox, label) in enumerate(zip(bboxes, labels), 0):
x1, y1, x2, y2 = bbox[:4].astype(np.int32)
score = float(bbox[-1])
# bbox
bbox_color = random_color(label)
bbox_color = [int(255 * _c) for _c in bbox_color][::-1]
cv2.rectangle(img, (x1, y1), (x2, y2), bbox_color, thickness=thickness)
# score
text = '{:.02f}'.format(score)
width = len(text) * text_width
img[y1 - text_height:y1, x1:x1 + width, :] = bbox_color
cv2.putText(
img,
text, (x1, y1 - 2),
cv2.FONT_HERSHEY_COMPLEX,
font_scale,
color=(0, 0, 0))
classes_color = random_color(label + 1)
text = classes[label]
width = len(text) * text_width
img[y1:y1 + text_height, x1:x1 + width, :] = bbox_color
cv2.putText(img,text,
(x1, y1 + text_height - 2),
cv2.FONT_HERSHEY_COMPLEX,
font_scale,color=classes_color)
#background_color = random_color(label + 5)
background_color = [255, 204, 153]
if (act_preds is not None) and (len(bboxes)==len(labels)==len(act_preds)):
for j, act_pred in enumerate(act_preds[i]):
text = '{}: {:.02f}'.format(act_pred[0], act_pred[1])
width = len(text) * (text_width)
img[y1+text_height*(j+2) :y1 + text_height*(j+3), x1:x1 + width, :] = background_color
cv2.putText(img, text,
(x1, y1 + text_height*(j+3) - 2),
cv2.FONT_HERSHEY_COMPLEX,
font_scale, color=classes_color)
if show:
mmcv.imshow(img, wait_time=wait_time)
if out_file is not None:
mmcv.imwrite(img, out_file)
return img | de67d5acba2b2994ec2b66ae4e7e0c58498ecebe | 8,824 |
def calculate_similarity(subgraph_degrees):
"""
Given a list of subgraph degrees, this function calls the guidance
function and calculates the similarity of a particular node with all it's
non-connected nodes.
:param subgraph_degrees: A list of lists containing the non connected node
and degrees of common neighbours from the subgraph.
:return: A dictionary of similarity of each non-connected node
"""
similarity_dict = []
for nc_node in subgraph_degrees:
similarity = 0
for common_node in nc_node[1]:
# Getting the degree of the common neighbour node from the original
# graph
original_degree = graph.degrees.filter("id = '{}'".format(
common_node.id)).select("degree").collect()
# Getting the degree of the common neighbour node from the subgraph
sub_degree = common_node.degree
# Calling the function to calculate guidance for the common
# neighbour node
guidance = get_guidance(sub_degree, original_degree[0].degree)
# Adding the guidance to the similarity of the non-connected node
similarity += guidance
similarity_dict.append((nc_node[0], similarity))
return similarity_dict | cd4be7c405b2974f35db24dbd7d7db7bdf9a867e | 8,825 |
def balance_thetas(theta_sets_types, theta_sets_values):
"""Repeats theta values such that all thetas lists have the same length """
n_sets = max([len(thetas) for thetas in theta_sets_types])
for i, (types, values) in enumerate(zip(theta_sets_types, theta_sets_values)):
assert len(types) == len(values)
n_sets_before = len(types)
if n_sets_before != n_sets:
theta_sets_types[i] = [types[j % n_sets_before] for j in range(n_sets)]
theta_sets_values[i] = [values[j % n_sets_before] for j in range(n_sets)]
return theta_sets_types, theta_sets_values | 3ca7316a18d57c95adbfbdfec5f5be36f33dc0ea | 8,826 |
def _format_weights(df, col, targets, regs):
"""
Reformat the edge table (target -> regulator) that's output by amusr into a pivoted table that the rest of the
inferelator workflow can handle
:param df: pd.DataFrame
An edge table (regulator -> target) with columns containing model values
:param col:
Which column to pivot into values
:param targets: list
A list of target genes (the index of the output data)
:param regs: list
A list of regulators (the columns of the output data)
:return out: pd.DataFrame [G x K]
A [targets x regulators] dataframe pivoted from the edge dataframe
"""
# Make sure that the value column is all numeric
df[col] = pd.to_numeric(df[col])
# Pivot an edge table into a matrix of values
out = pd.pivot_table(df, index='target', columns='regulator', values=col, fill_value=0.)
# Reindex to a [targets x regulators] dataframe and fill anything missing with 0s
out = out.reindex(targets).reindex(regs, axis=1)
out = out.fillna(value=0.)
return out | b683846d9d059a39280077a714455718bd710670 | 8,827 |
def put_thread(req_thread: ReqThreadPut):
"""Put thread for video to DynamoDB"""
try:
input = thread_input.update_item(req_thread)
res = table.update_item(**input)
return res
except ClientError as err:
err_message = err.response["Error"]["Message"]
raise HTTPException(status_code=404, detail=err_message)
except BaseException as err:
raise HTTPException(status_code=404, detail=str(err)) | dba9fe080451a3cb68365824faf8dbccad03b1b6 | 8,828 |
def _get_or_create_campaign_team(name, owner, tasks, redudancy):
"""
Creates CampaignTeam instance, if it does not exist yet.
Returns reference to CampaignTeam instance.
"""
# pylint: disable-msg=no-member
_cteam = CampaignTeam.objects.get_or_create(
teamName=name,
owner=owner,
requiredAnnotations=100, # (tasks * redudancy), # TODO: fix
requiredHours=50, # (tasks * redudancy) / 2,
createdBy=owner,
)
_cteam[0].members.add(owner)
_cteam[0].save()
return _cteam[0] | 4bb7980c621e48aa1eea3471004c627a8ea18e21 | 8,829 |
def check_method(adata):
"""Check that method output fits expected API."""
assert "labels_pred" in adata.obs
return True | 78c1a5181395f1675854333c30bf617c578cc1d4 | 8,830 |
def build_index_block(in_channels,
out_channels,
kernel_size,
stride=2,
padding=0,
groups=1,
norm_cfg=dict(type='BN'),
use_nonlinear=False,
expansion=1):
"""Build an conv block for IndexBlock.
Args:
in_channels (int): The input channels of the block.
out_channels (int): The output channels of the block.
kernel_size (int): The kernel size of the block.
stride (int, optional): The stride of the block. Defaults to 2.
padding (int, optional): The padding of the block. Defaults to 0.
groups (int, optional): The groups of the block. Defaults to 1.
norm_cfg (dict, optional): The norm config of the block.
Defaults to dict(type='BN').
use_nonlinear (bool, optional): Whether use nonlinearty in the block.
If true, a ConvModule with kernel size 1 will be appended and an
``ReLU6`` nonlinearty will be added to the origin ConvModule.
Defaults to False.
expansion (int, optional): Expandsion ratio of the middle channels.
Effective when ``use_nonlinear`` is true. Defaults to 1.
Returns:
nn.Module: The built conv block.
"""
if use_nonlinear:
return nn.Sequential(
ConvModule(
in_channels,
in_channels * expansion,
kernel_size,
stride=stride,
padding=padding,
groups=groups,
norm_cfg=norm_cfg,
act_cfg=dict(type='ReLU6')),
ConvModule(
in_channels * expansion,
out_channels,
1,
stride=1,
padding=0,
groups=groups,
bias=False,
norm_cfg=None,
act_cfg=None))
else:
return ConvModule(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
groups=groups,
bias=False,
norm_cfg=None,
act_cfg=None) | 03e15760146ce75f06de64ffd6886fe627afcf9b | 8,831 |
def nodes(*paths, type=None):
"""Call node() on each given path and return the list of results.
nodes('foo', 'bar', ...) is equivalent to
[node('foo'), node('bar'), ...]
"""
return list(map(lambda p: node(p, type=type), paths)) | d1ae50237a275c70b9b9e85684e898494fc6c954 | 8,832 |
def GetCodeBucket(app, project):
"""Gets a bucket reference for a Cloud Build.
Args:
app: App resource for this project
project: str, The name of the current project.
Returns:
storage_util.BucketReference, The bucket to use.
"""
# Attempt to retrieve the default appspot bucket, if one can be created.
log.debug('No bucket specified, retrieving default bucket.')
if not app.codeBucket:
raise exceptions.DefaultBucketAccessError(project)
return storage_util.BucketReference.FromBucketUrl(app.codeBucket) | 603126fc33dedb941407a66618748b1d58b91570 | 8,833 |
def parse_rule(parameter_string):
"""Parse a parameter string into its constituent name, type, and
pattern
For example:
`parse_parameter_string('<param_one:[A-z]>')` ->
('param_one', str, '[A-z]')
:param parameter_string: String to parse
:return: tuple containing
(parameter_name, parameter_type, parameter_pattern)
"""
# We could receive NAME or NAME:PATTERN
if str(parameter_string).startswith('/'):
parameter_string = parameter_string[1:]
parameter_string = str(parameter_string).strip('<>')
name = parameter_string
pattern = 'string'
if ':' in parameter_string:
name, pattern = parameter_string.split(':', 1)
default = (str, pattern)
# Pull from pre-configured types
_type, pattern = REGEX_TYPES.get(pattern, default)
return name, _type, pattern | 881e219ab59c801da078e91cf82ccb15caa7798d | 8,834 |
def plan():
"""
改进方案
:return:
"""
return render_template('plan.htm') | 135d8b003adbe8f6311f781f0d4ff7ed206a81d6 | 8,835 |
def extract_traceback(notebook):
""" Extracts information about an error from the notebook.
Parameters
----------
notebook: :class:`nbformat.notebooknode.NotebookNode`
Executed notebook to find an error traceback.
Returns
-------
bool
Whether the executed notebook has an error traceback.
int or None
Number of a cell with a traceback.
If None, then the notebook doesn't contain an error traceback.
str
Error traceback if exists.
"""
for cell in notebook['cells']:
# Find a cell output with a traceback and extract the traceback
outputs = cell.get('outputs', [])
for output in outputs:
traceback = output.get('traceback', [])
if traceback:
traceback = '\n'.join(traceback)
return True, cell['execution_count'], traceback
return False, None, "" | 9af26f973e6810936eaa68058efcdb7bc145803b | 8,836 |
def get_log() -> str:
"""get_log() -> str
(internal)
"""
return str() | 3e2d7bf82128afc664eded15e6c11f1ed9da45e7 | 8,837 |
def start_server(self, parameters): # pragma: no cover
"""adds the server start to celery's queue
Args:
parameters(dict): The POST JSON parameters
"""
self.update_state(state=CeleryStates.started)
session = ServerSession(parameters)
return session() | c20e2233ee7c1e6b1718b0c4bfbb2b9b5f52e0e1 | 8,838 |
def generate_config(context):
""" Entry point for the deployment resources. """
properties = context.properties
name = properties.get('name', context.env['name'])
project_id = properties.get('project', context.env['project'])
bgp = properties.get('bgp', {'asn': properties.get('asn')})
router = {
'name': context.env['name'],
# https://cloud.google.com/compute/docs/reference/rest/v1/routers
'type': 'gcp-types/compute-v1:routers',
'properties':
{
'name':
name,
'project':
project_id,
'region':
properties['region'],
'bgp': bgp,
'network':
properties.get('networkURL', generate_network_uri(
project_id,
properties.get('network', ''))),
}
}
optional_properties = [
'description',
'bgpPeers',
'interfaces',
'nats',
]
for prop in optional_properties:
append_optional_property(router, properties, prop)
return {
'resources': [router],
'outputs':
[
{
'name': 'name',
'value': name
},
{
'name': 'selfLink',
'value': '$(ref.' + context.env['name'] + '.selfLink)'
},
{
'name':
'creationTimestamp',
'value':
'$(ref.' + context.env['name'] + '.creationTimestamp)'
}
]
} | 506c7ded703b8c00fb9a2a6d7645e9e5d0da6905 | 8,839 |
import time
def cachedmethod(timeout):
"""
Function decorator to enable caching for instance methods.
"""
def _cached(func):
if not(hasattr(func, 'expires')):
func.expires = {}
func.cache = {}
def __cached(self, *args, **kwargs):
if(timeout and func.expires.get(repr(self), 0) < time.time()):
if(repr(self) in func.cache):
del func.cache[repr(self)]
if(repr(self) in func.cache):
return func.cache[repr(self)]
result = func(self, *args, **kwargs)
if(result):
func.cache[repr(self)] = result
func.expires[repr(self)] = time.time() + timeout
return result
return __cached
try:
# see if it's an int
int(timeout)
except TypeError:
func = timeout
timeout = 0
return _cached(func)
return _cached | dd8999a60aa6d92e6b442c7c0661d88cd0e8590e | 8,840 |
def __build_pyramid(models, features):
"""Applies all submodels to each FPN level.
Args:
models (list): List of submodels to run on each pyramid level
(by default only regression, classifcation).
features (list): The FPN features.
Returns:
list: A list of tensors, one for each submodel.
"""
return [__build_model_pyramid(n, m, features) for n, m in models] | 269be978f9aafbdc36b1c9d726171785a85f54a4 | 8,841 |
def get_ap_list():
"""
Method to return list of aps present in the network
"""
return jsonify_params(
CELLULAR_NETWORK.ap_list
) | da0777219025499603425f3147b2897d2bce2da6 | 8,842 |
def _merge_url_rule(rule_before, rule_after):
"""
Merges two url rule parts.
Parameters
----------
rule_before : `None` or `tuple` of `tuple` (`int`, `str`)
First url part if any to join `rule_after` to.
rule_after : `None` or `tuple` of `tuple` (`int`, `str`)
Second url part what's start is extended by `rule_before`.
Returns
-------
merged_rule : `None` or `tuple` of `tuple` (`int`, `str`)
The merged rule.
"""
if rule_before is None:
return rule_after
if rule_after is None:
return rule_before
if rule_after[0] == DUMMY_RULE_PART:
rule_after = rule_after[1:]
return (*rule_before, *rule_after) | 0682734a82b227f746325363652d1c3f378f2e51 | 8,843 |
import argparse
def parse_options(args):
"""
Parse commandline arguments into options for Monitor
:param args:
:return:
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--tcp",
required=True,
action="append",
help="TCP/IP address to monitor, e.g. google.com:80. For best results"
" use multiple addresses."
)
parser.add_argument("--logfile", default="connection.log",
help="Where to store the connection quality data")
parser.add_argument("--interval", default=30.0, type=float,
help="How many seconds between checks")
parser.add_argument("--timeout", default=3.0, type=float,
help="How many seconds to wait for connection")
parser.add_argument("--quiet", default=False, action="store_true",
help="Do not output log data to screen")
return parser.parse_args(args) | adda9497c230b885887b8c21f8e1adfd8bdd2376 | 8,844 |
def create_graphic_model(nodes, edges, gtype):
"""
Create a graphic model given nodes and edges
Parameters
----------
nodes : dict
for each node {key, text, math}
edges : dict
for each edge {key, text, math}
gtype : str [default="text"]
"text" for a verbose version, "math" for a compact version
"""
mod = Digraph()
if gtype == "math":
tindx = 1
else:
tindx = 0
for ckey in nodes.keys():
if ckey == "Like":
cstyle = "filled"
else:
cstyle = None
mod.node(ckey, nodes[ckey][tindx], style=cstyle)
for ckey in edges.keys():
for cval in np.atleast_1d(edges[ckey]):
mod.edge(ckey, cval)
return mod | 028c740cc7fa003642815a8ec0f27154fc6e0dab | 8,845 |
def zero_cross_bounds(arr, dim, num_cross):
"""Find the values bounding an array's zero crossing."""
sign_switch = np.sign(arr).diff(dim)
switch_val = arr[dim].where(sign_switch, drop=True)[num_cross]
lower_bound = max(0.999*switch_val, np.min(arr[dim]))
upper_bound = min(1.001*switch_val, np.max(arr[dim]))
return arr.sel(**{dim: [lower_bound, upper_bound], "method": "backfill"}) | 52d3431c32f61f47223fdccf4c5a85a92589534f | 8,846 |
def remove_tseqs(t: ST_Type) -> ST_Type:
"""
Get just the sseqs and the non-nested types, removing the tseqs
"""
if type(t) == ST_SSeq or type(t) == ST_SSeq_Tuple:
inner_tseqs_removed = remove_tseqs(t.t)
return replace(t, t=inner_tseqs_removed)
elif is_nested(t):
return remove_tseqs(t.t)
else:
return t | 323f9cd3c007c1decf11653091f641dc453d32cb | 8,847 |
def prod_cart(in_list_1: list, in_list_2: list) -> list:
"""
Compute the cartesian product of two list
:param in_list_1: the first list to be evaluated
:param in_list_2: the second list to be evaluated
:return: the prodotto cartesiano result as [[x,y],..]
"""
_list = []
for element_1 in in_list_1:
for element_2 in in_list_2:
_list.append([element_1,element_2])
return _list | 9fdbfc558f5ec3b11c78535b9125e0a1c293035e | 8,848 |
from .esri_basemap import esrimap
def classFactory(iface): # pylint: disable=invalid-name
"""Load esrimap class from file esrimap.
:param iface: A QGIS interface instance.
:type iface: QgsInterface
"""
#
return esrimap(iface) | 3e067a97cba21a07c818077e4207cd8e337143d9 | 8,849 |
def gpib_open(name):
"""
Start a device session.
Returns a unique integer for the instrument at the specified GPIB address.
For example::
>>> gpib_open(lan[158.154.1.110]:19)
4
@param name : LAN/GPIB address of the device
@type name : str
@return: int
"""
(devtype,devID) = name.split()
address = eval(devtype)[devID]['addr']
return _open(address) | fa9e87a3873248866586758c0b0f370a3ad29e6e | 8,850 |
def myjobs_view(request):
"""
Renderbox view
:param request:
:return:
"""
return render(request, 'renderbox/myjobs.html') | ac0ffbc92a33657a165beb5e12905e3dc495c943 | 8,851 |
import functools
import sys
def set_task_payload(func):
"""Set TASK_PAYLOAD and unset TASK_PAYLOAD."""
@functools.wraps(func)
def wrapper(task):
"""Wrapper."""
environment.set_value('TASK_PAYLOAD', task.payload())
try:
return func(task)
except: # Truly catch *all* exceptions.
e = sys.exc_info()[1]
e.extras = {'task_payload': environment.get_value('TASK_PAYLOAD')}
raise
finally:
environment.remove_key('TASK_PAYLOAD')
return wrapper | a1ba88a6bf5df872eab712c6ffe52be2c2fd3283 | 8,852 |
def _match_contact(filter_criteria):
"""
This default matching strategy function will attempt to get a single result
for the specified criteria.
It will fail with an `unmatched` result if there are no matching contacts.
It will fail with a `multiple_matches` result if there are multiple matches
for this criteria.
"""
contact = None
try:
contact = get_queryset_object(Contact.objects.all(), **filter_criteria)
contact_matching_status = ContactMatchingStatus.matched
except Contact.DoesNotExist:
contact_matching_status = ContactMatchingStatus.unmatched
except Contact.MultipleObjectsReturned:
contact_matching_status = ContactMatchingStatus.multiple_matches
return contact, contact_matching_status | 088199ac26dc226e1412b43ed0c9b380c669c64e | 8,853 |
from typing import Generator
def get_objects_dictionary():
"""
creates a dictionary with the types and the circuit objects
:return: Dictionary instance
"""
object_types = {'bus': Bus(),
'load': Load(),
'static_generator': StaticGenerator(),
'battery': Battery(),
'generator': Generator(),
'shunt': Shunt(),
'wires': Wire(),
'overhead_line_types': Tower(),
'underground_cable_types': UndergroundLineType(),
'sequence_line_types': SequenceLineType(),
'transformer_types': TransformerType(),
'branch': Branch(),
'transformer2w': Transformer2W(),
'line': Line(),
'dc_line': DcLine(None, None),
'hvdc': HvdcLine(),
'vsc': VSC(Bus(), Bus(is_dc=True)),
}
return object_types | bd82c2dc30877f841e4275aafbe054849b6f6ba2 | 8,854 |
def create_stripe_onboarding_link(request, stripe_id=None,):
"""Creates stripe connect onboarding link by calling Stripe API."""
account_links = stripe.AccountLink.create(
account=stripe_id,
return_url=request.build_absolute_uri(
reverse("users:stripe_callback")
),
refresh_url=request.build_absolute_uri(
reverse("users:stripe_authorize")
),
type="account_onboarding",
)
return account_links | 1dd1e7c50645fb5eaa36d7426abd5cff198e1610 | 8,855 |
def add_scheme_if_missing(url):
"""
>>> add_scheme_if_missing("example.org")
'http://example.org'
>>> add_scheme_if_missing("https://example.org")
'https://example.org'
"""
if "//" not in url:
url = "http://%s" % url
return url | 97a33ce1f60ab67e6a807ef1bd1d95250b5d18c6 | 8,856 |
from typing import Dict
def _extract_assembly_information(job_context: Dict) -> Dict:
"""Determine the Ensembl assembly version and name used for this index.
Ensembl will periodically release updated versions of the
assemblies which are where the input files for this processor
comes from. All divisions other than the main one have identical
release versions, but we don't know which division these files
came from so we can't just hit thier API again. Therefore, look at
the URL we used to get the files because it contains the assembly
version and name.
I'll admit this isn't the most elegant solution, but since the
transcriptome index's only database model is the OriginalFiles
until processing is complete, there's no other way to pass this
information through to this processor without modifying the
OriginalFile model.
The URL path we're attempting follows this pattern (defined in the surveyor)
ftp://ftp.{url_root}/gtf/{species_sub_dir}/{filename_species}.{assembly_name}.{assembly_version}.gtf.gz
and we are attempting to extract {assembly_version} and {assembly_name}.
"""
original_files = job_context["original_files"]
for og_file in original_files:
if ".gtf.gz" in og_file.source_filename:
extensionless_url = og_file.source_url[:-7]
version_start_index = extensionless_url.rfind(".") + 1
job_context["assembly_version"] = extensionless_url[version_start_index:]
# Decrement the index to skip the period.
versionless_url = extensionless_url[:version_start_index-1]
assembly_name_start_index = versionless_url.rfind(".") + 1
job_context["assembly_name"] = versionless_url[assembly_name_start_index:]
return job_context | b78513b826c0a12bf87563095e33320aee328b76 | 8,857 |
def fixture_circle_2() -> Circle:
"""Return an example circle."""
return Circle(Point(0.0, 0.0), 1.0) | 4040cb356a1e09cfe83280711d93a43b9352ff66 | 8,858 |
from warnings import warn
import logging
from fparser import api
from loopy.frontend.fortran.translator import F2LoopyTranslator
from loopy.transform.callable import merge
from loopy.frontend.fortran.translator import specialize_fortran_division
def parse_fortran(source, filename="<floopy code>", free_form=None, strict=None,
seq_dependencies=None, auto_dependencies=None, target=None):
"""
:returns: a :class:`loopy.TranslationUnit`
"""
parse_plog = ProcessLogger(logger, "parsing fortran file '%s'" % filename)
if seq_dependencies is not None and auto_dependencies is not None:
raise TypeError(
"may not specify both seq_dependencies and auto_dependencies")
if auto_dependencies is not None:
warn("auto_dependencies is deprecated, use seq_dependencies instead",
DeprecationWarning, stacklevel=2)
seq_dependencies = auto_dependencies
if seq_dependencies is None:
seq_dependencies = True
if free_form is None:
free_form = True
if strict is None:
strict = True
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter("%(name)-12s: %(levelname)-8s %(message)s")
console.setFormatter(formatter)
logging.getLogger("fparser").addHandler(console)
tree = api.parse(source, isfree=free_form, isstrict=strict,
analyze=False, ignore_comments=False)
if tree is None:
raise LoopyError("Fortran parser was unhappy with source code "
"and returned invalid data (Sorry!)")
f2loopy = F2LoopyTranslator(filename, target=target)
f2loopy(tree)
kernels = f2loopy.make_kernels(seq_dependencies=seq_dependencies)
prog = merge(kernels)
all_kernels = [clbl.subkernel
for clbl in prog.callables_table.values()]
for knl in all_kernels:
prog.with_kernel(_add_assignees_to_calls(knl, all_kernels))
if len(all_kernels) == 1:
# guesssing in the case of only one function
prog = prog.with_entrypoints(all_kernels[0].name)
prog = specialize_fortran_division(prog)
parse_plog.done()
return prog | 69d85ba20fd429598d3441297a89a12933f69925 | 8,859 |
def compute_sources(radius, evolved_vars):
"""
Computes source terms for the symmetry.
"""
mass_density = evolved_vars[0]
momentum_density = evolved_vars[1]
energy_density = evolved_vars[2]
factor = -_symmetry_alpha / radius
pressure = compute_pressure(mass_density, momentum_density, energy_density)
return (factor * momentum_density,
factor * momentum_density**2 / mass_density, factor *
(energy_density + pressure) * momentum_density / mass_density) | f2c7c68f3d00a063f9a29b220f98f71c6bb02aef | 8,860 |
def average_pq(ps, qs):
""" average the multiple position and quaternion array
Args:
ps (np.array): multiple position array of shape Nx3
qs (np.array): multiple quaternion array of shape Nx4
Returns:
p_mean (np.array): averaged position array
q_mean (np.array): averaged quaternion array
"""
p_average = np.mean(np.asarray(ps), axis=0)
q_average = average_q(np.asarray(qs))
return p_average, q_average | b7064d75f07361d60375de1dad91e0139533b042 | 8,861 |
def logobase(**kwargs):
"""Create a PyGraphviz graph for a logo."""
ag = pygraphviz.AGraph(bgcolor='#D0D0D0', strict=False, directed=True, ranksep=0.3, **kwargs)
ag.edge_attr['penwidth'] = 1.4
ag.edge_attr['arrowsize'] = 0.8
return ag | 60772de3f3b33f58559ecfd3293cffc26cfe8e70 | 8,862 |
import torch
def integral_raycasting(
pixels: Tensor,
mu: Tensor,
rho: Tensor,
lambd: Tensor,
appearance: Tensor,
background_appearance: Tensor,
K: Tensor,
dist_coef: Tensor = None,
alpha: float = 2.5e-2,
beta: float = 2e0,
eps: float = 1e-8,
) -> Tensor:
"""
:param pixels: [H, W, 3, 1]
:param mu: [*, N, 3, 1]
:param rho: [*, N, 3, 3]
:param lambd: [*, N, 3, 1]
:param appearance: [*, N, 3]
:param background_appearance: [*, 1, 3]
:param K: [*, 3, 3]
:param dist_coef: [*, D]
:param alpha:
:param beta:
:param function:
:param eps:
:return:
"""
rays = pixel_grid_to_ray_grid(
xyz=pixels,
K=K,
dist_coef=dist_coef,
)
lambd, alpha = invert_lambd(
lambd=lambd,
alpha=alpha,
eps=eps,
)
rays_sigma_rays, mu_sigma_mu, rays_sigma_mu = compute_quantities(
rays=rays,
mu=mu,
rho=rho,
lambd=lambd,
)
z = optimal_z(rays_sigma_mu=rays_sigma_mu, rays_sigma_rays=rays_sigma_rays, eps=eps)
z_background = beta * max_z(z=z)
weights = density(x=z) * integral(
rays_sigma_rays=rays_sigma_rays,
mu_sigma_mu=mu_sigma_mu,
rays_sigma_mu=rays_sigma_mu,
alpha=alpha,
eps=eps,
)
weight_background = density(x=z_background) * background_integral(
z=z_background,
alpha=alpha,
)
shape = weights.shape[:-1] + weight_background.shape[-1:]
weight_background = weight_background.expand(shape)
weights = torch.cat([weights, weight_background], dim=-1)
weights = normalize_weights(weights=weights, eps=eps)
appearance = torch.cat([appearance, background_appearance], dim=-2)
image = splat_image(weights=weights, appearance=appearance)
return image | fc5165c04732ea021d105df5d5f997524b037abd | 8,863 |
async def cors_handler(request, handler):
"""Middleware to add CORS response headers
"""
response = await handler(request)
response.headers['Access-Control-Allow-Origin'] = '*'
return response | c9f33261b1fb2e6dc3ab3139e657106a94c5bfd1 | 8,864 |
def validate_image(task: ExternalTask):
"""
To simulate BPMN/Failure/Success, this handler uses image name variable (to be passed when launching the process)
"""
log_context = {"WORKER_ID": task.get_worker_id(),
"TASK_ID": task.get_task_id(),
"TOPIC": task.get_topic_name()}
log_with_context("executing validate_image", log_context)
img_name = task.get_variable('imgName')
if "poor" in img_name:
return task.bpmn_error("POOR_QUALITY_IMAGE", "Image quality is bad",
{"img_rejection_code": "POOR_QUALITY_CODE_XX",
"img_rejection_reason": f"Image quality must be at least GOOD"})
elif "jpg" in img_name:
return task.complete({"img_approved": True})
elif "corrupt" in img_name:
return task.failure("Cannot validate image", "image is corrupted", 0, default_config.get("retryTimeout"))
else:
return task.bpmn_error("INVALID_IMAGE", "Image extension must be jpg",
{"img_rejection_code": "INVALID_IMG_NAME",
"img_rejection_reason": f"Image name {img_name} is invalid"}) | 97413656181bfc4480dc7b2a195713e8124d44f2 | 8,865 |
def simulate_patch(app, path, **kwargs):
"""Simulates a PATCH request to a WSGI application.
Equivalent to::
simulate_request(app, 'PATCH', path, **kwargs)
Args:
app (callable): The WSGI application to call
path (str): The URL path to request
Keyword Args:
params (dict): A dictionary of query string parameters,
where each key is a parameter name, and each value is
either a ``str`` or something that can be converted
into a ``str``, or a list of such values. If a ``list``,
the value will be converted to a comma-delimited string
of values (e.g., 'thing=1,2,3').
params_csv (bool): Set to ``False`` to encode list values
in query string params by specifying multiple instances
of the parameter (e.g., 'thing=1&thing=2&thing=3').
Otherwise, parameters will be encoded as comma-separated
values (e.g., 'thing=1,2,3'). Defaults to ``True``.
headers (dict): Additional headers to include in the request
(default: ``None``)
body (str): A string to send as the body of the request.
Accepts both byte strings and Unicode strings
(default: ``None``). If a Unicode string is provided,
it will be encoded as UTF-8 in the request.
json(JSON serializable): A JSON document to serialize as the
body of the request (default: ``None``). If specified,
overrides `body` and the Content-Type header in
`headers`.
protocol: The protocol to use for the URL scheme
(default: 'http')
host(str): A string to use for the hostname part of the fully qualified
request URL (default: 'falconframework.org')
remote_addr (str): A string to use as the remote IP address for the
request (default: '127.0.0.1')
extras (dict): Additional CGI variables to add to the WSGI ``environ``
dictionary for the request (default: ``None``)
"""
return simulate_request(app, 'PATCH', path, **kwargs) | 48fda74dc2765e3a281a71c7ba6f4144e9a258cd | 8,866 |
def minimum_image_box(sizes):
"""Creates a distance wrapper using the minimum image convention
Arguments:
sizes (array-like of float): box sizes
"""
def _box(sizes, distance_vectors):
"""A minimum image wrapper for distances"""
shift = sizes[None, None, :] * np.round(distance_vectors / sizes[None, None, :])
distance_vectors -= shift
return distance_vectors
return partial(_box, np.array(sizes)) | 5d26092a988a011e9fb1967a74c3ceec935f5b1b | 8,867 |
def mlrPredict(W, data):
"""
mlrObjFunction predicts the label of data given the data and parameter W
of Logistic Regression
Input:
W: the matrix of weight of size (D + 1) x 10. Each column is the weight
vector of a Logistic Regression classifier.
X: the data matrix of size N x D
Output:
label: vector of size N x 1 representing the predicted label of
corresponding feature vector given in data matrix
"""
label = np.zeros((data.shape[0], 1))
##################
# YOUR CODE HERE #
##################
# HINT: Do not forget to add the bias term to your input data
x = np.hstack((np.ones((data.shape[0], 1)),data))
label = (np.argmax((np.exp(np.dot(x, W)) / np.sum(np.exp(np.dot(x, W)))), axis=1)).reshape((data.shape[0],1))
return label | 57542e5b54ddd223f4cbcae7adf932e85c4ffeeb | 8,868 |
def calc_mean_score(movies):
"""Helper method to calculate mean of list of Movie namedtuples,
round the mean to 1 decimal place"""
return round(sum([movie.score for movie in movies]) / len(movies), 1) | ccf52f813091d1c907470996c62dafa61303e245 | 8,869 |
import hmac
import hashlib
def get_proxy_signature(query_dict, secret):
"""
Calculate the signature of the given query dict as per Shopify's documentation for proxy requests.
See: http://docs.shopify.com/api/tutorials/application-proxies#security
"""
# Sort and combine query parameters into a single string.
sorted_params = ''
for key in sorted(query_dict.keys()):
sorted_params += "{0}={1}".format(key, ",".join(query_dict.getlist(key)))
signature = hmac.new(secret.encode('utf-8'), sorted_params.encode('utf-8'), hashlib.sha256)
return signature.hexdigest() | c234f18c1d44a936c4844ae2fe1b912a624eef61 | 8,870 |
def candlestick_echarts(data_frame: pd.DataFrame, time_field: str = 'time', open_field: str = "open",
high_field: str = 'high',
low_field: str = 'low',
close_field: str = 'close',
volume_field: str = 'volume', mas: list = [5, 10, 30], log_y: bool = True, title: str = "",
width: str = "100%", height: str = "600px", left_padding: str = '5%',
right_padding: str = '3%') -> Echarts:
"""
绘制K线
:param data_frame:
:param time_field: 时间列名, 如果指定的列不存在,使用index作为time
:param open_field: open列名
:param high_field: high列名
:param low_field: low列名
:param close_field: close列名
:param volume_field: volume列名
:param mas: 均线组
:param log_y: y轴 log分布 底为1.1 一个格子对应10%
:param title: 可选标题
:param width: 输出div的宽度 支持像素和百分比 比如800px/100%
:param height: 输出div的高度 支持像素和百分比 比如800px/100%
:param left_padding: 左侧padding宽度
:param right_padding: 右侧padding宽度
:return:
"""
df = data_frame.copy()
if time_field not in data_frame.columns: # 使用index作为时间
df[time_field] = df.index
df[close_field] = df[close_field].fillna(method="ffill")
df[open_field] = df[open_field].fillna(df[close_field])
df[high_field] = df[high_field].fillna(df[close_field])
df[low_field] = df[low_field].fillna(df[close_field])
df[volume_field] = df[volume_field].fillna(0)
volumes = (df[volume_field]).round(2).tolist()
vol_filter = (df[volume_field]).quantile([0.05, 0.95]).values
bar_items = [({"value": vol} if vol >= vol_filter[0] and vol <= vol_filter[1] else (
{"value": vol, "itemStyle": {"color": "red"}} if vol > vol_filter[1] else {"value": vol,
"itemStyle": {"color": "green"}}))
for vol in volumes]
options = {
'animation': False,
'title': {'text': title},
'legend': {'top': 10, 'left': 'center', 'data': [title]},
'tooltip': {
'trigger': 'axis', 'axisPointer': {'type': 'cross'},
'borderWidth': 1,
'borderColor': '#ccc',
'padding': 10,
'formatter': Js("""
function(params){
var dt = params[0]['axisValue'];
var labels = [];
labels.push('<b><span>时间: </span></b>' + dt + '<br/>');
params.sort(function(a, b) {
if (a.seriesName < b.seriesName ) {return -1;}
else if (a.seriesName > b.seriesName ) {return 1;}
else{ return 0;}
});
for (let i = 0; i < params.length; i++) {
const param = params[i];
var label=["<b><span>"+param['seriesName']+"("+param['seriesType']+"): </span></b>"];
var dimensionNames=param["dimensionNames"];
if (typeof(param['value'])=='object' && dimensionNames.length==param['data'].length){
label.push("<br/>");
for (let j = 1; j <dimensionNames.length; j++) {
var value= param['data'][j];
if (typeof(value)=='number'){
if (value%1==0 || value>100000){
label.push("<span>"+dimensionNames[j]+': '+value.toFixed(0)+"</span><br/>");
}else{
label.push("<span>"+dimensionNames[j]+': '+value.toFixed(2)+"</span><br/>");
}
}else{
label.push("<div style='max-width:15em;word-break:break-all;white-space: normal;'>"+dimensionNames[j]+': '+value+"</div>");
}
}
}else if(param['seriesType']=="candlestick"){
label.push("<br/>");
label.push("<span>open: "+param['data'][1].toFixed(2)+"</span><br/>");
label.push("<span>close: "+param['data'][2].toFixed(2)+"</span><br/>");
label.push("<span>high: "+param['data'][4].toFixed(2)+"</span><br/>");
label.push("<span>low: "+param['data'][3].toFixed(2)+"</span><br/>");
}else if(typeof(param['value'])=='number'){
if (param['value']%1==0){
label.push("<span>"+param['value'].toFixed(0)+"</span><br/>");
}else{
label.push("<span>"+param['value'].toFixed(2)+"</span><br/>");
}
}else if(param['value']){
label.push("<div style='max-width:15em;word-break:break-all;white-space: normal;'>"+value+"</div>");
}else{
label.push("<br/>");
}
var cardStr= label.join('');
labels.push(cardStr);
}
return labels.join('');
}"""),
'textStyle': {'color': '#000'},
'position': Js("""
function (pos, params, el, elRect, size){
var obj = {top: 10};
obj[['left', 'right'][+(pos[0] < size.viewSize[0] / 2)]] = 30;
return obj;
}
""")
},
'axisPointer': {
'link': {'xAxisIndex': 'all'},
'label': {'backgroundColor': '#777'}
},
'grid': [
{'left': left_padding, 'right': right_padding, 'height': '70%'},
{'left': left_padding, 'right': right_padding, 'top': '71%', 'height': '16%'}
],
'xAxis': [
{
'type': 'category',
'data': df[time_field].tolist(),
'scale': True,
'boundaryGap': False,
'axisLine': {'show': False},
'axisLabel': {'show': False},
'axisTick': {'show': False},
'splitLine': {'show': True},
'splitNumber': 20,
'min': 'dataMin',
'max': 'dataMax',
'axisPointer': {
'z': 100
}
},
{
'type': 'category',
'gridIndex': 1,
'data': df[time_field].tolist(),
'scale': True,
'boundaryGap': False,
'axisLine': {'onZero': False, 'show': True},
'axisLine': {'show': True},
'axisLabel': {'show': True},
'axisTick': {'show': True},
'splitLine': {'show': True},
'axisLabel': {'show': True},
'splitNumber': 20,
'min': 'dataMin',
'max': 'dataMax'
}
],
'yAxis': [
{
'scale': True,
'type': 'log' if log_y else 'value',
'logBase': 1.1,
'splitNumber': 10,
'axisLabel': {'show': True,
'formatter': Js("""
function(value,index){
return value.toFixed(2);
}
""")},
'axisLine': {'show': False},
'axisTick': {'show': True},
'splitLine': {'show': True}
},
{
'scale': True,
'gridIndex': 1,
'splitNumber': 2,
'axisLabel': {'show': True,
'formatter': Js("""
function(value,index){
var si = [
{ value: 1, symbol: "" },
{ value: 1E3, symbol: "K" },
{ value: 1E6, symbol: "M" },
{ value: 1E9, symbol: "G" },
{ value: 1E12, symbol: "T" },
{ value: 1E15, symbol: "P" },
{ value: 1E18, symbol: "E" }
];
var rx = /\.0+$|(\.[0-9]*[1-9])0+$/;
var i;
for (i = si.length - 1; i > 0; i--) {
if (value >= si[i].value) {
break;
}
}
return (value / si[i].value).toFixed(2).replace(rx, "$1") + si[i].symbol;
}
""")
},
'axisLine': {'show': False},
'axisTick': {'show': False},
'splitLine': {'show': False}
}
],
'dataZoom': [
{
'type': 'inside',
'xAxisIndex': [0, 1],
'start': 0,
'end': 100
}
],
'series': [
{
'name': title,
'type': 'candlestick',
'data': df[[open_field, close_field, low_field, high_field]].values.tolist(),
'emphasis': {
'itemStyle': {
'borderColor': "#333",
'borderWidth': 1,
'shadowColor': 'rgba(0, 0, 0, 0.5)',
'shadowBlur': 15
}
}
},
{
'name': 'Volume',
'type': 'bar',
'xAxisIndex': 1,
'yAxisIndex': 1,
'data': bar_items,
'emphasis': {
'itemStyle': {
'borderColor': "#333",
'borderWidth': 1,
'shadowColor': 'rgba(0, 0, 0, 0.5)',
'shadowBlur': 15
}
}
}
]
}
for ma_len in mas:
name = "MA" + str(ma_len)
df[name] = df[close_field].rolling(ma_len).mean().round(2)
series_ma = {
'name': name,
'type': 'line',
'data': df[name].tolist(),
'smooth': True,
'showSymbol': False,
'lineStyle': {'opacity': 0.5}
}
options['series'].append(series_ma)
options['legend']['data'].append(name)
return Echarts(options=options, width=width, height=height) | f8bc3d1ef876a5df0f2fdbdf7dbf97b039a54cc4 | 8,871 |
def select_sounder_hac(path_sounder, sounder):
"""
Donne les indices pour un sondeur (sounder) dans un hac (path sounder), et retourne les index de sondeur et de transducer correspondant
inputs:
path_sounder: path du hac à analyser
sounder: nom du transducer
outputs:
index du sondeur et du transducer
"""
list_sounder = util.hac_sounder_descr(FileName=path_sounder)
list_st = [
[
list_sounder.GetSounder(isdr).GetTransducer(itsd).m_transName
for itsd in range(list_sounder.GetSounder(isdr).m_numberOfTransducer)
]
for isdr in range(list_sounder.GetNbSounder())
]
for i in range(len(list_st)):
for j in range(len(list_st[i])):
if list_st[i][j] == sounder:
return i, j
return None | 2f054ef6a8e3a64f0910e5eb4bce9407befc4b33 | 8,872 |
def upvote_checklist(request, checklist_id):
# for "messages", refer https://stackoverflow.com/a/61603003/6543250
"""if user cannot retract upvote, then this code be uncommented
if Upvote.objects.filter(user=User.objects.filter(username=username).first(), checklist=Checklist.objects.get(id=checklist_id)):
msg = 'You have already upvoted the checklist once!'
messages.info(request, msg)
"""
"""
Note: notifications recorded only when a user upvotes the checklist not downvote in order to promote healthy behaviour and not let the author inundate with downvote notifs in case some user decides to harass the author.
"""
if Checklist.objects.get(id=checklist_id).author == request.user:
msg = "Action Denied! You cannot upvote your own checklist!"
messages.error(request, msg)
else:
# remove user's upvote if he has already upvoted
obj = Upvote.objects.filter(
user=request.user, checklist=Checklist.objects.get(id=checklist_id)
)
msg = ""
if obj:
obj.delete()
msg = "Upvote retracted!"
else:
upvote_obj = Upvote(
user=request.user,
checklist=Checklist.objects.get(id=checklist_id),
)
upvote_obj.save()
msg = "Checklist upvoted!"
# also update notifications table so relevant notif can be shown to author
fromUser = request.user
toUser = Checklist.objects.get(id=checklist_id).author
Notification(
fromUser=fromUser,
toUser=toUser,
notif_type=1,
checklist=Checklist.objects.get(id=checklist_id),
).save()
messages.success(request, msg)
if request.META.get("HTTP_REFERER"):
if "login" in request.META.get("HTTP_REFERER") and "next" in request.META.get(
"HTTP_REFERER"
):
return redirect("checklist-home")
# redirect to home url; simply reload the page
# return redirect('checklist-home')
return redirect(request.META.get("HTTP_REFERER", "checklist-home")) | 559f9e0341652391b824b215448f87fa3250baae | 8,873 |
def index(request):
"""查询页面"""
ctx = {}
Advert_1 = Advert.objects.get(advert_num=1) # 广告1
Advert_2 = Advert.objects.get(advert_num=2) # 广告2
ctx['Adverturl1'] = Advert_1.advert_url
ctx['Adverturl2'] = Advert_2.advert_url
ctx['Advertimg1'] = '/advert/'+ str(Advert_1.img)
ctx['Advertimg2'] = '/advert/'+ str(Advert_2.img)
return render(request, 'srsys/index.html',ctx) | 91e7a771273ed262e7025bc289defe7f6a52047e | 8,874 |
def load_amazon():
"""
"""
df = pd.read_csv('data/amazon.txt',
header=None,
delimiter='\t')
X_data = df[0].tolist()
y_data = df[1].tolist()
print 'Preprocessing...'
vectorizer = TfidfVectorizer(strip_accents='unicode',
lowercase=True,
stop_words='english',
ngram_range=(1, 2),
max_df=0.5,
min_df=5,
max_features=20000,
norm='l2',
use_idf=True,
smooth_idf=True,
sublinear_tf=False)
vectorizer.fit(X_data)
X_data = vectorizer.transform(X_data)
X_train, X_test, y_train, y_test = train_test_split(X_data,
y_data,
test_size=0.1,
random_state=0)
X_train, X_val, y_train, y_val = train_test_split(X_train,
y_train,
test_size=0.2,
random_state=0)
return X_train, y_train, X_val, y_val, X_test, y_test | 8e11cf91d616f7dfe17e26da2fcf43d82ea26f80 | 8,875 |
def get_switch_filters(
switch_id, exception_when_missing=True,
user=None, session=None, **kwargs
):
"""get filters of a switch."""
return _get_switch(
switch_id, session=session,
exception_when_missing=exception_when_missing
) | db270f761fcdfb40a9d2970923b4643ebecf7cc3 | 8,876 |
def generalized_zielonka_with_psolC(g):
"""
Zielonka's algorithm with psolC partial solver.
:param g: the game to solve.
:return: the solution in the following format : (W_0, W_1).
"""
return generalized_parity_solver_with_partial(g, psolC_gen.psolC_generalized) | 1ac4a81df393970c16a5f303155c89cf74db34ab | 8,877 |
import seaborn as sns
import matplotlib.pyplot as plt
def plot_matrix(mat, figsize=(7, 4), draw_cbar=True, vmin=0, vmax=1, cmap=None):
"""
wrapper for plotting a matrix of probabilities.
attribues (optional) are used as xlabels
"""
if np.any(mat < 0):
print('rescaling matrix to probabilities')
mat = .5 * (mat + 1)
try:
if cmap is None:
cmap = sns.cubehelix_palette(
8, start=2, dark=0, light=1,
reverse=False, as_cmap=True)
cmap = sns.cubehelix_palette(
4, start=2, dark=0, light=1,
reverse=False, as_cmap=True)
sns.set_style("whitegrid", {'axes.grid': False})
except:
cmap = 'gray_r'
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
cax = ax.imshow(mat, aspect='auto', cmap=cmap,
vmin=vmin, vmax=vmax, origin='upper')
if draw_cbar is True:
fig.colorbar(cax, orientation='vertical')
return fig, ax
# ax.set_yticks([]) | a84948730816e5c59654fa6d0eeab773218fba61 | 8,878 |
from datetime import datetime
def read_properties_core(xml_source):
"""Read assorted file properties."""
properties = DocumentProperties()
root = fromstring(xml_source)
creator_node = root.find(QName(NAMESPACES['dc'], 'creator').text)
if creator_node is not None:
properties.creator = creator_node.text
else:
properties.creator = ''
last_modified_by_node = root.find(
QName(NAMESPACES['cp'], 'lastModifiedBy').text)
if last_modified_by_node is not None:
properties.last_modified_by = last_modified_by_node.text
else:
properties.last_modified_by = ''
created_node = root.find(QName(NAMESPACES['dcterms'], 'created').text)
if created_node is not None:
properties.created = W3CDTF_to_datetime(created_node.text)
else:
properties.created = datetime.datetime.now()
modified_node = root.find(QName(NAMESPACES['dcterms'], 'modified').text)
if modified_node is not None:
properties.modified = W3CDTF_to_datetime(modified_node.text)
else:
properties.modified = properties.created
return properties | 357411103a52bbbfc6e621c47b734b9d11f04284 | 8,879 |
import torch
def batch_decode(loc, priors, variances):
"""Decode locations from predictions using priors to undo
the encoding we did for offset regression at train time.
Args:
loc (tensor): location predictions for loc layers,
Shape: [num_priors,4]
priors (tensor): Prior boxes in center-offset form.
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
decoded bounding box predictions
"""
boxes = torch.cat((
priors[:, :, :2] + loc[:, :, :2] * variances[0] * priors[:, :, 2:],
priors[:, :, 2:] * torch.exp(loc[:, :, 2:] * variances[1])), 2)
boxes[:, :, :2] -= boxes[:, :, 2:] / 2
boxes[:, :, 2:] += boxes[:, :, :2]
return boxes | 7963b771e2c7bc560e5f9e5051abea43de2f46e3 | 8,880 |
def _step2_macs_seq (configs):
"""Step2 MACS if the raw data type is seq. So it will use the output from step1.
"""
# check the input
t_rep_files = configs["samtools.treat_output_replicates"]
t_comb_file = configs["samtools.treat_output"]
c_comb_file = configs["samtools.control_output"]
macs_genome_option = " -g "+ configs["sample.species"]+" "
# run MACS, first for each replicate
for i in range(1,configs["data.number_replicates"]+1):
if configs["data.has_control"]:
# run MACS w/ control
command_line = configs["macs.macs_main"]+macs_genome_option+" -w -S -t "+t_rep_files[i-1]+" -c "+ c_comb_file + " -n "+configs["sample.sample_id"]+"_rep"+str(i)
run_cmd(command_line)
# copy out and rename the wiggle file
command_line = "zcat "+configs["sample.sample_id"]+"_rep"+str(i)+"_MACS_wiggle/treat/"+configs["sample.sample_id"]+"_rep"+str(i)+"_treat_afterfiting_all.wig.gz > "+configs["macs.output_treat_wig_replicates"][i-1]
run_cmd(command_line)
else:
# run MACS w/o control
command_line = configs["macs.macs_main"]+macs_genome_option+" -w -S -t "+t_rep_files[i-1]+" -n "+configs["sample.sample_id"]+"_rep"+str(i)
run_cmd(command_line)
# copy out and rename the wiggle file
command_line = "zcat "+configs["sample.sample_id"]+"_rep"+str(i)+"_MACS_wiggle/treat/"+configs["sample.sample_id"]+"_rep"+str(i)+"_treat_afterfiting_all.wig.gz > "+configs["macs.output_treat_wig_replicates"][i-1]
run_cmd(command_line)
# run MACS for the combined treatment
if configs["data.number_replicates"] == 1:
# no need to run MACS again, simply copy the previous results
command_line = "cp "+configs["sample.sample_id"]+"_rep1_peaks.xls"+" "+configs["macs.output_xls"]
run_cmd(command_line)
command_line = "cp "+configs["sample.sample_id"]+"_rep1_peaks.bed"+" "+configs["macs.output_bed"]
run_cmd(command_line)
command_line = "cp "+configs["sample.sample_id"]+"_rep1_summits.bed"+" "+configs["macs.output_summits"]
run_cmd(command_line)
command_line = "cp "+configs["macs.output_treat_wig_replicates"][0]+" "+configs["macs.output_treat_wig"]
run_cmd(command_line)
if configs["data.has_control"]:
command_line = "zcat "+configs["sample.sample_id"]+"_rep"+str(i)+"_MACS_wiggle/control/"+configs["sample.sample_id"]+"_rep1_control_afterfiting_all.wig.gz > "+configs["macs.output_control_wig"]
run_cmd(command_line)
else:
# run MACS on combined alignment files
if configs["data.has_control"]:
command_line = configs["macs.macs_main"]+macs_genome_option+" -w -S -t "+t_comb_file+" -c "+c_comb_file+" -n "+configs["sample.sample_id"]
run_cmd(command_line)
# copy out and rename the wiggle file
command_line = "zcat "+configs["sample.sample_id"]+"_MACS_wiggle/treat/"+configs["sample.sample_id"]+"_treat_afterfiting_all.wig.gz > "+configs["macs.output_treat_wig"]
run_cmd(command_line)
command_line = "zcat "+configs["sample.sample_id"]+"_MACS_wiggle/control/"+configs["sample.sample_id"]+"_control_afterfiting_all.wig.gz > "+configs["macs.output_control_wig"]
run_cmd(command_line)
else:
command_line = configs["macs.macs_main"]+macs_genome_option+" -w -S -t "+t_comb_file+" -n "+configs["sample.sample_id"]
run_cmd(command_line)
# copy out and rename the wiggle file
command_line = "zcat "+configs["sample.sample_id"]+"_MACS_wiggle/treat/"+configs["sample.sample_id"]+"_treat_afterfiting_all.wig.gz > "+configs["macs.output_treat_wig"]
run_cmd(command_line)
return True | 69deb8fafeb3f7054901d431d6e32c647504258f | 8,881 |
def menu_entry_to_db(entry):
"""
Converts a MenuEntry into Meal, Menu, and MenuItem objects which are stored in the database.
"""
menu, _ = Menu.objects.get_or_create(date=entry.date)
meal = Meal.objects.create(meal_type=entry.meal_type, vendor=entry.vendor)
for item_name in entry.items:
item, _ = MenuItem.objects.get_or_create(name=item_name)
meal.items.add(item)
if entry.meal_type == 'L':
if menu.lunch:
menu.lunch.delete()
menu.lunch = meal
if entry.meal_type == 'D':
if menu.dinner:
menu.dinner.delete()
menu.dinner = meal
menu.save()
return menu | f35ddb4bb715a3a8bcee073fd863a5f4d8240651 | 8,882 |
import torch
def get_device_of(tensor: torch.Tensor) -> int:
"""
Returns the device of the tensor.
"""
if not tensor.is_cuda:
return -1
else:
return tensor.get_device() | 5532712bd812842fc462951e7c763b9753370174 | 8,883 |
def test_script_task(scheduler: Scheduler) -> None:
"""
Tasks should be definable as shell scripts.
"""
@task(script=True)
def task1(message):
return """echo Hello, {message}!""".format(message=message)
assert scheduler.run(task1("World")) == b"Hello, World!\n" | c5f764b06f1245feb9ab0c1af5a13fd368fde362 | 8,884 |
import copy
def __yaml_tag_test(*args, **kwargs):
"""YAML tag constructor for testing only"""
return copy.deepcopy(args), copy.deepcopy(kwargs) | 0abeb68caf32912c7b5a78dacbc89e537061a144 | 8,885 |
import os
import json
def _create_fake_bids_dataset(base_dir='', n_sub=10, n_ses=2,
tasks=['localizer', 'main'],
n_runs=[1, 3], with_derivatives=True,
with_confounds=True, no_session=False):
"""Creates a fake bids dataset directory with dummy files.
Returns fake dataset directory name.
Parameters
----------
base_dir: string (Absolute path), optional
Absolute directory path in which to create the fake BIDS dataset dir.
Default: Current directory.
n_sub: int, optional
Number of subject to be simulated in the dataset.
Default: 10
n_ses: int, optional
Number of sessions to be simulated in the dataset.
Ignored if no_session=True.
Default: 2
n_runs: List[int], optional
Default: [1, 3]
with_derivatives: bool, optional
In the case derivatives are included, they come with two spaces and
descriptions. Spaces are 'MNI' and 'T1w'. Descriptions are 'preproc'
and 'fmriprep'. Only space 'T1w' include both descriptions.
Default: True
with_confounds: bool, optional
Default: True
no_session: bool, optional
Specifying no_sessions will only produce runs and files without the
optional session field. In this case n_ses will be ignored.
Default: False
Returns
-------
dataset directory name: string
'bids_dataset'
Creates
-------
Directory with dummy files
"""
bids_path = os.path.join(base_dir, 'bids_dataset')
os.makedirs(bids_path)
# Create surface bids dataset
open(os.path.join(bids_path, 'README.txt'), 'w')
vox = 4
created_sessions = ['ses-%02d' % label for label in range(1, n_ses + 1)]
if no_session:
created_sessions = ['']
for subject in ['sub-%02d' % label for label in range(1, n_sub + 1)]:
for session in created_sessions:
subses_dir = os.path.join(bids_path, subject, session)
if session == 'ses-01' or session == '':
anat_path = os.path.join(subses_dir, 'anat')
os.makedirs(anat_path)
anat_file = os.path.join(anat_path, subject + '_T1w.nii.gz')
open(anat_file, 'w')
func_path = os.path.join(subses_dir, 'func')
os.makedirs(func_path)
for task, n_run in zip(tasks, n_runs):
run_labels = [
'run-%02d' % label for label in range(1, n_run + 1)]
for run in run_labels:
fields = [subject, session, 'task-' + task]
if '' in fields:
fields.remove('')
file_id = '_'.join(fields)
if n_run > 1:
file_id += '_' + run
bold_path = os.path.join(func_path,
file_id + '_bold.nii.gz')
_write_fake_bold_img(bold_path, [vox, vox, vox, 100])
events_path = os.path.join(func_path, file_id +
'_events.tsv')
_basic_paradigm().to_csv(events_path, sep='\t', index=None)
param_path = os.path.join(func_path, file_id +
'_bold.json')
with open(param_path, 'w') as param_file:
json.dump({'RepetitionTime': 1.5}, param_file)
# Create derivatives files
if with_derivatives:
bids_path = os.path.join(base_dir, 'bids_dataset', 'derivatives')
os.makedirs(bids_path)
for subject in ['sub-%02d' % label for label in range(1, 11)]:
for session in created_sessions:
subses_dir = os.path.join(bids_path, subject, session)
func_path = os.path.join(subses_dir, 'func')
os.makedirs(func_path)
for task, n_run in zip(tasks, n_runs):
for run in ['run-%02d' % label
for label in range(1, n_run + 1)
]:
fields = [subject, session, 'task-' + task]
if '' in fields:
fields.remove('')
file_id = '_'.join(fields)
if n_run > 1:
file_id += '_' + run
preproc = (file_id +
'_space-MNI_desc-preproc_bold.nii.gz'
)
preproc_path = os.path.join(func_path, preproc)
_write_fake_bold_img(preproc_path,
[vox, vox, vox, 100]
)
preproc = (file_id +
'_space-T1w_desc-preproc_bold.nii.gz'
)
preproc_path = os.path.join(func_path, preproc)
_write_fake_bold_img(preproc_path,
[vox, vox, vox, 100]
)
preproc = (file_id +
'_space-T1w_desc-fmriprep_bold.nii.gz'
)
preproc_path = os.path.join(func_path, preproc)
_write_fake_bold_img(preproc_path,
[vox, vox, vox, 100]
)
if with_confounds:
confounds_path = os.path.join(
func_path,
file_id + '_desc-confounds_regressors.tsv',
)
_basic_confounds(100).to_csv(confounds_path,
sep='\t', index=None)
return 'bids_dataset' | 9e9c33d9d51dbcd67eda66c4a14799219982601b | 8,886 |
def format_data_for_training(data):
"""
Create numpy array with planet features ready to feed to the neural net.
:param data: parsed features
:return: numpy array of shape (number of frames, PLANET_MAX_NUM, PER_PLANET_FEATURES)
"""
training_input = []
training_output = []
for d in data:
features, expected_output = d
if len(expected_output.values()) == 0:
continue
features_matrix = []
for planet_id in range(PLANET_MAX_NUM):
if str(planet_id) in features:
features_matrix.append(features[str(planet_id)])
else:
features_matrix.append([0] * PER_PLANET_FEATURES)
fm = np.array(features_matrix)
output = [0] * PLANET_MAX_NUM
for planet_id, p in expected_output.items():
output[int(planet_id)] = p
result = np.array(output)
training_input.append(fm)
training_output.append(result)
return np.array(training_input), np.array(training_output) | b241a932f7a5321ed28dccd8a583fbcf7529e482 | 8,887 |
import urllib
import json
def idcardcert(appcode, card_no):
""" 身份证实名认证身份证二要素一致性验证 """
host = 'http://idquery.market.alicloudapi.com'
path = '/idcard/query'
# method = 'GET'
appcode = appcode
querys = 'number=%s' % card_no
# bodys = {}
url = host + path + '?' + querys
try:
request = urllib.request.Request(url)
request.add_header('Authorization', 'APPCODE ' + appcode)
response = urllib.request.urlopen(request)
content = response.read()
if content:
return json.loads(content.decode("unicode-escape"))
return content
except BaseException:
return None | a359edf15e7b8795fc80ceda1008f1809d9c52a0 | 8,888 |
def custom_error_exception(error=None, exception=None):
"""Define custom exceptions for MySQL server errors
This function defines custom exceptions for MySQL server errors and
returns the current set customizations.
If error is a MySQL Server error number, then you have to pass also the
exception class.
The error argument can also be a dictionary in which case the key is
the server error number, and value the exception to be raised.
If none of the arguments are given, then custom_error_exception() will
simply return the current set customizations.
To reset the customizations, simply supply an empty dictionary.
Examples:
import mysql.connector
from mysql.connector import errorcode
# Server error 1028 should raise a DatabaseError
mysql.connector.custom_error_exception(
1028, mysql.connector.DatabaseError)
# Or using a dictionary:
mysql.connector.custom_error_exception({
1028: mysql.connector.DatabaseError,
1029: mysql.connector.OperationalError,
})
# Reset
mysql.connector.custom_error_exception({})
Returns a dictionary.
"""
global _CUSTOM_ERROR_EXCEPTIONS
if isinstance(error, dict) and not len(error):
_CUSTOM_ERROR_EXCEPTIONS = {}
return _CUSTOM_ERROR_EXCEPTIONS
if not error and not exception:
return _CUSTOM_ERROR_EXCEPTIONS
if not isinstance(error, (int, dict)):
raise ValueError(
"The error argument should be either an integer or dictionary")
if isinstance(error, int):
error = { error: exception }
for errno, exception in error.items():
if not isinstance(errno, int):
raise ValueError("error number should be an integer")
try:
if not issubclass(exception, Exception):
raise TypeError
except TypeError:
raise ValueError("exception should be subclass of Exception")
_CUSTOM_ERROR_EXCEPTIONS[errno] = exception
return _CUSTOM_ERROR_EXCEPTIONS | eb24301d2511199e1ee1407152f27d00b72adba5 | 8,889 |
import hashlib
def cal_md5(content):
"""
计算content字符串的md5
:param content:
:return:
"""
# 使用encode
result = hashlib.md5(content.encode())
# 打印hash
md5 = result.hexdigest()
return md5 | 0cd26654c364e34ecc27b0a0b4d410a539e286c3 | 8,890 |
import os
def get_arkouda_server_info_file():
"""
Returns the name of a file to store connection information for the server.
Defaults to ARKOUDA_HOME + ak-server-info, but can be overridden with
ARKOUDA_SERVER_CONNECTION_INFO
:return: server connection info file name as a string
:rtype: str
"""
dflt = os.path.join(get_arkouda_home(), 'ak-server-info')
return os.getenv('ARKOUDA_SERVER_CONNECTION_INFO', dflt) | c2ab568a02d6799f456bc0d96477353f3515c9fb | 8,891 |
import re
import os
def _GenerateElementInfo(impl_path, names):
"""Generates the data a group needs to load sub elements.
Args:
impl_path: The file path to the command implementation for this group.
names: [str], The names of the sub groups or commands found in the group.
Raises:
LayoutException: if there is a command or group with an illegal name.
Returns:
{str: [str], A mapping from name to a list of paths that implement that
command or group. There can be multiple paths because a command or group
could be implemented in both python and yaml (for different release tracks).
"""
elements = {}
for name in names:
if re.search('[A-Z]', name):
raise LayoutException(
'Commands and groups cannot have capital letters: {0}.'.format(name))
cli_name = name[:-5] if name.endswith('.yaml') else name
sub_path = os.path.join(impl_path, name)
existing = elements.setdefault(cli_name, [])
existing.append(sub_path)
return elements | cbecd4d5ad9ab235d1597ee272db0d244314d0a9 | 8,892 |
import os
def locate_dir(instrument, mode=None):
"""Locate the instrument specific directory for a reference file.
The mode=None test case is disabled because it mysteriously causes these tests to
fail when running the runtests script:
ERROR: test_throughput_lookup_generation (crds.tests.test_synphot_lookup_generator.TestSynphotLookupGenerator)
FAIL: Doctest: crds.tests.test_bad_files.dt_bad_references_fast_mode
FAIL: Doctest: crds.tests.test_bad_files.dt_bad_rules_jwst_getreferences_warning
FAIL: Doctest: crds.tests.test_certify.certify_recursive
FAIL: Doctest: crds.tests.test_certify.certify_table_comparison_context
FAIL: Doctest: crds.tests.test_heavy_client.dt_getreferences_ignore_cache
FAIL: Doctest: crds.tests.test_list.dt_list_cached_references
FAIL: Doctest: crds.tests.test_synphot_hst.dt_synphot_core_integration_test
FAIL: Doctest: crds.tests.test_synphot_hst.dt_synphot_core_integration_test
XXXX TODO: Enable the mode=None test case and resolve the ensuing test failures in other modules.
>> locate_dir('wfi', None) # doctest: +ELLIPSIS
'.../references/roman/wfi'
>>> locate_dir('wfi', 'instrument') # doctest: +ELLIPSIS
'.../references/roman/wfi'
>>> locate_dir('wfi', 'flat') # doctest: +ELLIPSIS
'.../references/roman'
>>> locate_dir('wfi', 'other') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
AssertionError: Invalid CRDS cache subdirectory mode = 'other'
"""
if mode is None:
mode = config.get_crds_ref_subdir_mode(observatory="roman")
else:
config.check_crds_ref_subdir_mode(mode)
crds_refpath = config.get_crds_refpath("roman")
if mode == "instrument": # use simple names inside CRDS cache.
rootdir = os.path.join(crds_refpath, instrument.lower())
if not os.path.exists(rootdir):
if config.writable_cache_or_verbose("Skipping making instrument directory link for", repr(instrument)):
utils.ensure_dir_exists(rootdir + "/locate_dir.fits")
elif mode == "flat": # use original flat cache structure, all instruments in same directory.
rootdir = crds_refpath
else:
raise ValueError("Unhandled reference file location mode " + repr(mode))
return rootdir | 3d52a7b5cd70590c2edb78af5886ee0291476771 | 8,893 |
def pas(al, ap, bl,bp):
""" Postion-angle from spherical coordinates.
:param al: longitude of point A in radians.
:type al: float
:param ap: latitude of point A in radians.
:type ap: float
:param bl: longitude of point B in radians.
:type bl: float
:param bp: latitude of point B in radians.
:type bp: float
:returns: position angle of B with respect to A in radians (float).
.. seealso:: |MANUAL| page 145
"""
return _sofa.iauPas(float(al), float(ap), float(bl), float(bp)) | 9d8321c908c793df84e5ff28c51e4a79f6db99c6 | 8,894 |
def get_messy_items_for_training(mod_factor=5):
"""
Fetch a subset of `FacilityListItem` objects that have been parsed and are
not in an error state.
Arguments:
mod_factor -- Used to partition a subset of `FacilityListItem` records. The
larger the value, the fewer records will be contained in the
subset.
Returns:
A dictionary. The key is the `FacilityListItem` ID. The value is a
dictionary of clean field values keyed by field name (country, name,
address). A "clean" value is one which has been passed through the `clean`
function.
"""
facility_list_item_set = FacilityListItem.objects.exclude(
Q(status=FacilityListItem.UPLOADED)
| Q(status=FacilityListItem.ERROR)
| Q(status=FacilityListItem.ERROR_PARSING)
| Q(status=FacilityListItem.ERROR_GEOCODING)
| Q(status=FacilityListItem.ERROR_MATCHING)
).extra(
select={'country': 'country_code'}).values(
'id', 'country', 'name', 'address')
records = [record for (i, record) in enumerate(facility_list_item_set)
if i % mod_factor == 0]
return {str(i['id']): {k: clean(i[k]) for k in i if k != 'id'}
for i in records} | d04f5471266c33cfea122adac72835043ed6c34a | 8,895 |
def tanh_squared(x: np.ndarray, margin: float, loss_at_margin: float = 0.95):
"""Returns a sigmoidal shaping loss based on Hafner & Reidmiller (2011).
Args:
x: A numpy array representing the error.
margin: Margin parameter, a positive `float`.
loss_at_margin: The loss when `l2_norm(x) == margin`. A `float` between 0
and 1.
Returns:
Shaping loss, a `float` bounded in the half-open interval [0, 1).
Raises:
ValueError: If the value of `margin` or `loss_at_margin` is invalid.
"""
if not margin > 0:
raise ValueError("`margin` must be positive.")
if not 0.0 < loss_at_margin < 1.0:
raise ValueError("`loss_at_margin` must be between 0 and 1.")
error = np.linalg.norm(x)
# Compute weight such that at the margin tanh(w * error) = loss_at_margin
w = np.arctanh(np.sqrt(loss_at_margin)) / margin
s = np.tanh(w * error)
return s * s | 4c8dbb826dad5b047682fe030362f4fe71021f06 | 8,896 |
def _inv_Jacobian_2D(J, detJ):
""" manually invert 2x2 jacobians J in place """
tmp = J[:, 1, 1, :] / detJ
J[:, 0, 1, :] = -J[:, 0, 1, :] / detJ
J[:, 1, 0, :] = -J[:, 1, 0, :] / detJ
J[:, 1, 1, :] = J[:, 0, 0, :] / detJ
J[:, 0, 0, :] = tmp
return J | 23b1ff231e32f09f09dbae781f7e97354f3ca811 | 8,897 |
def ratio_error_acc(y_true, y_pred, epsilon, threshold):
"""
Calculate the ratio error accuracy with the threshold.
:param y_true:
:param y_pred:
:param epsilon:
:param threshold:
:return:
"""
ratio_1 = keras.layers.Lambda(lambda x: (x[0] + x[2]) / (x[1] + x[2]))([y_true, y_pred, epsilon])
ratio_2 = keras.layers.Lambda(lambda x: (x[0] + x[2]) / (x[1] + x[2]))([y_pred, y_true, epsilon])
ratio = K.maximum(ratio_1, ratio_2)
mask = K.cast(K.less(ratio, threshold), dtype="float32")
return K.mean(mask) | 9ae487e056800ac9fb5cc6e92301b74c00d65c21 | 8,898 |
def error_embed(ctx: context.ApplicationContext, title: str, description: str, author: bool = True) -> discord.Embed:
"""Make a basic error message embed."""
return make_embed(
ctx=ctx,
title=title if title else "Error:",
description=description,
color=discord.Color.red(),
author=author,
) | aca18ec2d25c4f0a2dec7f4c083716ab9bf4dbae | 8,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.