content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def create_vertices_intrinsics(disparity, intrinsics):
"""3D mesh vertices from a given disparity and intrinsics.
Args:
disparity: [B, H, W] inverse depth
intrinsics: [B, 4] reference intrinsics
Returns:
[B, L, H*W, 3] vertex coordinates.
"""
# Focal lengths
fx = intrinsics[:, 0]
fy = intrinsics[:, 1]
fx = fx[Ellipsis, tf.newaxis, tf.newaxis]
fy = fy[Ellipsis, tf.newaxis, tf.newaxis]
# Centers
cx = intrinsics[:, 2]
cy = intrinsics[:, 3]
cx = cx[Ellipsis, tf.newaxis]
cy = cy[Ellipsis, tf.newaxis]
batch_size, height, width = disparity.shape.as_list()
vertex_count = height * width
i, j = tf.meshgrid(tf.range(width), tf.range(height))
i = tf.cast(i, tf.float32)
j = tf.cast(j, tf.float32)
width = tf.cast(width, tf.float32)
height = tf.cast(height, tf.float32)
# 0.5 is added to get the position of the pixel centers.
i = (i + 0.5) / width
j = (j + 0.5) / height
i = i[tf.newaxis]
j = j[tf.newaxis]
depths = 1.0 / tf.clip_by_value(disparity, 0.01, 1.0)
mx = depths / fx
my = depths / fy
px = (i-cx) * mx
py = (j-cy) * my
vertices = tf.stack([px, py, depths], axis=-1)
vertices = tf.reshape(vertices, (batch_size, vertex_count, 3))
return vertices | d476767c71fb1a8cefe121a3aaf8cbf9a19e7943 | 16,000 |
def _find_smart_path(challbs, preferences, combinations):
"""Find challenge path with server hints.
Can be called if combinations is included. Function uses a simple
ranking system to choose the combo with the lowest cost.
"""
chall_cost = {}
max_cost = 1
for i, chall_cls in enumerate(preferences):
chall_cost[chall_cls] = i
max_cost += i
# max_cost is now equal to sum(indices) + 1
best_combo = []
# Set above completing all of the available challenges
best_combo_cost = max_cost
combo_total = 0
for combo in combinations:
for challenge_index in combo:
combo_total += chall_cost.get(challbs[
challenge_index].chall.__class__, max_cost)
if combo_total < best_combo_cost:
best_combo = combo
best_combo_cost = combo_total
combo_total = 0
if not best_combo:
_report_no_chall_path()
return best_combo | 96f55288bfa08de32badd9f1a96b3decd76573c8 | 16,001 |
def run_generator_and_test(test_case,
mlmd_connection,
generator_class,
pipeline,
task_queue,
use_task_queue,
service_job_manager,
num_initial_executions,
num_tasks_generated,
num_new_executions,
num_active_executions,
expected_exec_nodes=None,
ignore_node_ids=None):
"""Runs generator.generate() and tests the effects."""
if service_job_manager is None:
service_job_manager = service_jobs.DummyServiceJobManager()
with mlmd_connection as m:
executions = m.store.get_executions()
test_case.assertLen(
executions, num_initial_executions,
f'Expected {num_initial_executions} execution(s) in MLMD.')
tasks = run_generator(
mlmd_connection,
generator_class,
pipeline,
task_queue,
use_task_queue,
service_job_manager,
ignore_node_ids=ignore_node_ids)
with mlmd_connection as m:
test_case.assertLen(
tasks, num_tasks_generated,
f'Expected {num_tasks_generated} task(s) to be generated.')
executions = m.store.get_executions()
num_total_executions = num_initial_executions + num_new_executions
test_case.assertLen(
executions, num_total_executions,
f'Expected {num_total_executions} execution(s) in MLMD.')
active_executions = [
e for e in executions if execution_lib.is_execution_active(e)
]
test_case.assertLen(
active_executions, num_active_executions,
f'Expected {num_active_executions} active execution(s) in MLMD.')
if expected_exec_nodes:
for i, task in enumerate(tasks):
_verify_exec_node_task(test_case, pipeline, expected_exec_nodes[i],
active_executions[i].id, task)
return tasks | c7f03b5db9f100c8c5eec029e843ce4ab1cdb84e | 16,002 |
def sort_func(kd1, kd2):
"""
Compares 2 key descriptions
:param kd1: First key description
:param kd2: Second key description
:return: -1,0,1 depending on whether kd1 le,eq or gt then kd2
"""
_c = type_order(kd1, kd2)
if _c is not None:
return _c
return kid_order(kd1, kd2) | 2ac9100f9c69c283266cc4ba4f6d6262551ce1b5 | 16,003 |
def sumdigits(a: int):
"""Sum of the digits of an integer"""
return sum(map(int, str(a))) | 018bcc429e6ea3842fd9e9e2580820aed29bc0aa | 16,004 |
from datetime import datetime
def nth_weekday_of_month(y, m, n, w):
"""
y = 2020; m = 2
assert nth_weekday_of_month(y, m, -1, 'sat') == dt(2020, 2, 29)
assert nth_weekday_of_month(y, m, -2, 'sat') == dt(2020, 2, 22)
assert nth_weekday_of_month(y, m, 1, 'sat') == dt(2020, 2, 1)
assert nth_weekday_of_month(y, m, 1, 'sun') == dt(2020, 2, 2)
assert nth_weekday_of_month(y, m, 1, 'monday') == dt(2020, 2, 3)
assert nth_weekday_of_month(y, 'G', 3, 'sat') == dt(2020, 2, 15)
assert nth_weekday_of_month(y, 'G', 3, 'sun') == dt(2020, 2, 16)
assert nth_weekday_of_month(y, 'G', 3, 'monday') == dt(2020, 2, 17)
"""
if n < 0 :
return nth_weekday_of_month(y, m+1, 1, w) + datetime.timedelta(7 * n)
t = dt(y, m , 1)
bump = wkdays[w[:3].lower()] - t.weekday()
if bump < 0:
bump = bump + 7
bump = bump + (n-1) * 7
res = t + datetime.timedelta(bump)
return res | 2f422b3fac4d97db64f541b54158248c44afad14 | 16,005 |
def getBits(val, hiIdx: int, loIdx: int) -> int:
"""Returns a bit slice of a value.
Args:
val: Original value.
hiIdx: Upper (high) index of slice.
loIdx: Lower index of slice.
Returns:
The bit slice.
"""
return (~(MASK_32<<(hiIdx-loIdx+1)) & (val>>loIdx)) | acaf1a36fceb12ee99140aca0769dde084ee08d6 | 16,006 |
def get_hostname():
"""Returns the hostname, from /etc/hostname."""
hostname = ""
try:
with open('/etc/hostname') as f:
hostname = f.read().rstrip()
if len(hostname) == 0:
hostname = "Unknown"
except:
hostname = "Unknown"
return hostname | 4cd4ffc1c8c56bc2e440443fdbc315d27fb94033 | 16,007 |
def is_valid_body(val):
"""Body must be a dictionary."""
return isinstance(val, dict) | ef3a605e1e84ce9d74f77c07799d1abb58aaf61a | 16,008 |
def _vba_to_python_op(op, is_boolean):
"""
Convert a VBA boolean operator to a Python boolean operator.
"""
op_map = {
"Not" : "not",
"And" : "and",
"AndAlso" : "and",
"Or" : "or",
"OrElse" : "or",
"Eqv" : "|eq|",
"=" : "|eq|",
">" : ">",
"<" : "<",
">=" : ">=",
"=>" : ">=",
"<=" : "<=",
"=<" : "<=",
"<>" : "|neq|",
"is" : "|eq|"
}
if (not is_boolean):
op_map["Not"] = "~"
op_map["And"] = "&"
op_map["AndAlso"] = "&"
op_map["Or"] = "|"
op_map["OrElse"] = "|"
return op_map[op] | a6ed0c65c6c2d2635f14fb664540eaf283ee4065 | 16,009 |
def file_diff_format(filename1, filename2):
"""
Inputs:
filename1 - name of first file
filename2 - name of second file
Output:
Returns a four line string showing the location of the first
difference between the two files named by the inputs.
If the files are identical, the function instead returns the
string "No differences\n".
If either file does not exist or is not readable, then the
behavior of this function is undefined.
"""
# read files
lst1 = get_file_lines(filename1)
lst2 = get_file_lines(filename2)
# get tuple indicating line and index of first difference between two files
my_tup = multiline_diff(lst1, lst2)
# handle identical case
if my_tup[0] == -1:
return "No differences\n"
else:
# get 3 line formatted output of first difference between two lines
sdf_output = singleline_diff_format(lst1[my_tup[0]], lst2[my_tup[0]], my_tup[1])
# all other cases
return "Line " + str(my_tup[0]) + ":\n" + sdf_output | c2027767ac6694620d895ef1565a03e7b706c2e7 | 16,010 |
import six
def _check_assembly_string(base_asm, instr_type, target, operands):
"""
:param base_asm:
:type base_asm:
:param instr_type:
:type instr_type:
:param target:
:type target:
:param operands:
:type operands:
"""
LOG.debug("Start checking assembly string: %s", base_asm)
operands = list(operands)
relocation_mode = False
for idx, operand in enumerate(operands):
if isinstance(operand, six.string_types) and "@" not in operand:
operands[idx] = Address(base_address=operand)
if isinstance(operand, six.string_types) and "@" in operand:
relocation_mode = True
instruction = target.new_instruction(instr_type.name)
try:
if not relocation_mode:
instruction.set_operands(operands)
else:
# Go one by one, and make relocation safe
for operand, value in zip(instruction.operands(), operands):
if (isinstance(operand.type, OperandImmRange) and
"@" in value):
operand.set_value(value, check=False)
else:
operand.set_value(value)
except MicroprobeValueError:
LOG.debug("End checking assembly string: Operands not valid")
return False
except MicroprobeCodeGenerationError:
LOG.debug(
"End checking assembly string: Operands not valid for "
"callback"
)
return False
nasm = _normalize_asm(instruction.assembly())
base_asm = _normalize_asm(base_asm)
base_asm = base_asm.replace(instr_type.name, instr_type.mnemonic)
LOG.debug("'%s' == '%s' ?", nasm, base_asm)
if nasm == base_asm:
LOG.debug("End checking assembly string: Valid")
return True
LOG.debug("End checking assembly string: Not valid")
return False | 64e6e812d037c6e9576ee5db0c3d06468a7b6414 | 16,011 |
def get_label_number(window):
"""This method assigns to each label of a window a number."""
mode_list = ["bike", "car", "walk", "bus", "train"]
current_label_number = 0
for mode in enumerate(mode_list):
if window[1] == mode[1]:
current_label_number = mode[0]
return current_label_number | 5ed3c683e8619e1b07857992f54079bc68fdfa58 | 16,012 |
def midi_array_to_event(midi_as_array):
"""
Take converted MIDI array and convert to array of Event objects
"""
# Sort MIDI array
midi = sorted(midi_as_array, key=itemgetter(2))
# Init result
result = []
# Accumulators for computing start and end times
active_notes = []
curr_time = 0
# For comparing velocities
prev_vel_range = 0
# For all the entries in the midi array
for i in midi:
# Add the current note
active_notes.append(i)
# Get time shift values
shift_values, shift_sum = get_shift_value(i[2] - curr_time)
# Apply time shift to the next start note
if shift_values:
for s in shift_values:
if s > 0:
result.append(Event(EventType.TIME_SHIFT, s))
else:
result.append(Event(EventType.TIME_SHIFT, shift_sum))
# Update time
curr_time += shift_sum
# Check if there are notes that are playing that need to end
notes_to_end = [x for x in active_notes if curr_time >= x[3]]
active_notes[:] = (x for x in active_notes if curr_time < x[3])
# For the finished notes
for j in notes_to_end:
# End the note
result.append(Event(EventType.NOTE_OFF, j[1]))
# If the velocity has changed by a large enough amount, add a set velocity event
temp_velocity = i[0]
bin_size = (127/20)
for vel in range(20):
if temp_velocity < (vel + 1) * bin_size:
if prev_vel_range != vel:
result.append(Event(EventType.SET_VELOCITY, int((vel + 1) * bin_size)))
prev_vel_range = vel
break
# Start the note
result.append(Event(EventType.NOTE_ON, i[1]))
# If there are still notes in midi_acc
if active_notes:
for i in active_notes:
if i[3] > curr_time:
# Apply time shift
shift_values, shift_sum = get_shift_value(i[3] - curr_time)
if shift_values:
for s in shift_values:
if s > 0:
result.append(Event(EventType.TIME_SHIFT, s))
else:
result.append(Event(EventType.TIME_SHIFT, shift_sum))
# Update time
curr_time += shift_sum
# End note
result.append(Event(EventType.NOTE_OFF, i[1]))
# Return array
return result | 63391a1fa045f2185ce22c3ab5da186169d445e7 | 16,013 |
from typing import Dict
from typing import Type
def find_benchmarks(module) -> Dict[str, Type[Benchmark]]:
"""Enumerate benchmarks in `module`."""
found = {}
for name in module.__all__:
benchmark_type = getattr(module, name)
found[benchmark_type.name] = benchmark_type
return found | 4b456a44963629da0b6072dcb9e6e8946cbaef23 | 16,014 |
def out_folder_android_armv8_clang(ctx, section_name, option_name, value):
""" Configure output folder for Android ARMv8 Clang """
if not _is_user_input_allowed(ctx, option_name, value):
Logs.info('\nUser Input disabled.\nUsing default value "%s" for option: "%s"' % (value, option_name))
return value
# GUI
if not ctx.is_option_true('console_mode'):
return ctx.gui_get_attribute(section_name, option_name, value)
_output_folder_disclaimer(ctx)
return _get_string_value(ctx, 'Android ARMv8 Clang Output Folder', value) | b713642879cfcffe78fc415adbbea7c13c319925 | 16,015 |
import copy
import json
import os
def remap_classes(dataset, class_map):
""" Replaces classes of dataset based on a dictionary"""
class_new_names = list(set(class_map.values()))
class_new_names.sort() # NOTE sort() is a NoneType return method, it sorts the list without outputting new vars
class_originals = copy.deepcopy(dataset['categories'])
dataset['categories'] = [] # removing all dependencies
class_ids_map = {} # map from old id to new id
# Check whether the category has background or not, assign index 0. Useful for panoptic segmentation.
has_background = False
if 'Background' in class_new_names:
# Check whether the backgroun category has index zero.
if class_new_names.index('Background') != 0:
class_new_names.remove('Background')
class_new_names.insert(0, 'Background')
has_background = True
# Catching duplicates - TACO had duplicates for id 4040 and 309. Re-id'd
id_ann_all = []
id_ann_repeated = []
for index_old, ann_old in enumerate(dataset['annotations']):
if ann_old['id'] in id_ann_all:
# if found a duplicate, re-id at the end
id_ann_repeated.append(ann_old['id'])
ann_old['id'] = len(dataset['annotations'])+len(id_ann_repeated)-1
else:
id_ann_all.append(ann_old['id'])
print(f'Found {len(id_ann_repeated)} annotations repeated.'
f'\nPlease double check input file, annotation id(s) {id_ann_repeated} are duplicated!\n')
# Replace categories, iterating through every class name
for id_new, class_new_name in enumerate(class_new_names):
# Make sure id:0 is reserved for background
id_rectified = id_new
if not has_background:
id_rectified += 1
# Creating new category dictionary, using new category ID and the new class name
category = {
'supercategory': '',
'id': id_rectified, # Background has id=0
'name': class_new_name,
}
dataset['categories'].append(category) # assigning new categories
# Map class names
for class_original in class_originals:
# If the new class exists in the value of the class map dict, create new class id
if class_map[class_original['name']] == class_new_name:
class_ids_map[class_original['id']] = id_rectified
# Update annotations category id tag
for ann in dataset['annotations']:
ann['category_id'] = class_ids_map[ann['category_id']]
# Saving the newly created file as a JSON file
num_classes = str(len(class_new_names))
ann_out_path = './data' + '/' + 'ann_'+ 'map_to_' + num_classes +'.json'
with open(ann_out_path, 'w+') as f:
f.write(json.dumps(dataset))
# return path to new file, for loading somewhere else.
return str(os.path.abspath(ann_out_path)) | fd3c971221a102f296c76f72d6296ebf0a0e4763 | 16,016 |
def MCTS(root, verbose = False):
"""initialization of the chemical trees and grammar trees"""
run_time=time.time()+600*2
rootnode = Node(state = root)
state = root.Clone()
maxnum=0
iteration_num=0
start_time=time.time()
"""----------------------------------------------------------------------"""
"""global variables used for save valid compounds and simulated compounds"""
valid_compound=[]
all_simulated_compound=[]
desired_compound=[]
max_score=-100.0
desired_activity=[]
time_distribution=[]
num_searched=[]
current_score=[]
depth=[]
all_score=[]
"""----------------------------------------------------------------------"""
while maxnum<10100:
print maxnum
node = rootnode
state = root.Clone()
"""selection step"""
node_pool=[]
print "current found max_score:",max_score
while node.childNodes!=[]:
node = node.Selectnode()
state.SelectPosition(node.position)
print "state position:,",state.position
depth.append(len(state.position))
if len(state.position)>=81:
re=-1.0
while node != None:
node.Update(re)
node = node.parentNode
else:
"""------------------------------------------------------------------"""
"""expansion step"""
"""calculate how many nodes will be added under current leaf"""
expanded=expanded_node(model,state.position,val)
nodeadded=node_to_add(expanded,val)
all_posible=chem_kn_simulation(model,state.position,val,nodeadded)
generate_smile=predict_smile(all_posible,val)
new_compound=make_input_smile(generate_smile)
node_index,score,valid_smile,all_smile=check_node_type(new_compound,SA_mean,SA_std,logP_mean,logP_std,cycle_mean,cycle_std)
print node_index
valid_compound.extend(valid_smile)
all_simulated_compound.extend(all_smile)
all_score.extend(score)
iteration_num=len(all_simulated_compound)
if len(node_index)==0:
re=-1.0
while node != None:
node.Update(re)
node = node.parentNode
else:
re=[]
for i in range(len(node_index)):
m=node_index[i]
maxnum=maxnum+1
node.Addnode(nodeadded[m],state)
node_pool.append(node.childNodes[i])
if score[i]>=max_score:
max_score=score[i]
current_score.append(max_score)
else:
current_score.append(max_score)
depth.append(len(state.position))
"""simulation"""
re.append((0.8*score[i])/(1.0+abs(0.8*score[i])))
if maxnum==100:
maxscore100=max_score
time100=time.time()-start_time
if maxnum==500:
maxscore500=max_score
time500=time.time()-start_time
if maxnum==1000:
maxscore1000=max_score
time1000=time.time()-start_time
if maxnum==5000:
maxscore5000=max_score
time5000=time.time()-start_time
if maxnum==10000:
time10000=time.time()-start_time
maxscore10000=max_score
#valid10000=10000*1.0/len(all_simulated_compound)
"""backpropation step"""
#print "node pool length:",len(node.childNodes)
for i in range(len(node_pool)):
node=node_pool[i]
while node != None:
node.Update(re[i])
node = node.parentNode
#finish_iteration_time=time.time()-iteration_time
#print "four step time:",finish_iteration_time
"""check if found the desired compound"""
#print "all valid compounds:",valid_compound
finished_run_time=time.time()-start_time
print "logp max found:", current_score
#print "length of score:",len(current_score)
#print "time:",time_distribution
print "valid_com=",valid_compound
print "num_valid:", len(valid_compound)
print "all compounds:",len(all_simulated_compound)
print "score=", all_score
print "depth=",depth
print len(depth)
print "runtime",finished_run_time
#print "num_searched=",num_searched
print "100 max:",maxscore100,time100
print "500 max:",maxscore500,time500
print "1000 max:",maxscore1000,time1000
print "5000 max:",maxscore5000,time5000
print "10000 max:",maxscore10000,time10000
return valid_compound | 686a412c0f4cc4cd81d96872e9929d1ce51e7ed8 | 16,017 |
def update_internalnodes_MRTKStandard() -> bpy.types.NodeGroup:
"""定義中のノードグループの内部ノードを更新する
Returns:
bpy.types.NodeGroup: 作成ノードグループの参照
"""
# データ内に既にMRTKStandardのノードグループが定義されているか確認する
# (get関数は対象が存在しない場合 None が返る)
get_nodegroup = bpy.data.node_groups.get(def_nodegroup_name)
# ノードグループが取得できたか確認する
if get_nodegroup == None:
# ノードグループが定義されていない場合は処理を行わない
return None
# 入力出力ノードを除くノードグループ内部のノードとリンクのみ更新を行う
# 現在の内部ノードを全て操作する
for node in get_nodegroup.nodes:
# 入力ノードか確認する
if node.name == def_inputnode_name:
# 入力ノードの場合、処理しない
continue
# 出力ノードか確認する
if node.name == def_outputnode_name:
# 出力ノードの場合、処理しない
continue
# 入出力ノード以外は全て削除する
get_nodegroup.nodes.remove(node)
# ノードグループにバージョン記載ノードを作成する
group_versionnode = add_nodegroup_MRTKStandard_framenode()
# ノードグループにBSDFノードを作成する
group_bsdfnode = add_nodegroup_MRTKStandard_bsdfnode()
# ノードグループにRGBミックスノードを作成する
group_rgbmix = add_nodegroup_MRTKStandard_rgbmixnode()
# ノードグループに滑らかさ数値反転ノードを作成する
group_smoothinversion = add_nodegroup_MRTKStandard_smoothinversionnode()
# ノードグループを構成するのリンク情報を設定する
link_result = link_MRTKStandardNodeGroup_default()
# リンク接続に成功したか
if link_result == False:
# リンク接続に失敗した場合はノードを返さない
return None
return get_nodegroup | 14d152377b58de842ff6cc228e80fbb0c48c5128 | 16,018 |
def _ensure_consistent_schema(
frame: SparkDF,
schemas_df: pd.DataFrame,
) -> SparkDF:
"""Ensure the dataframe is consistent with the schema.
If there are column data type mismatches, (more than one data type
for a column name in the column schemas) then will try to convert
the data type if possible:
* if they are all number data types, then picks the largest number
type present
* if one of the types is string, then ensures it casts the column to
string type
Also fills any missing columns with Null values, ensuring correct
dtype.
Parameters
----------
frame : SparkDF
column_schemas : set
A set of simple column schemas in the form (name, dtype) for all
dataframes set to be concatenated.
Returns
-------
SparkDF
Input dataframe with consistent schema.
"""
final_schema = _get_final_schema(schemas_df)
missing_fields = [f for f in final_schema if f not in frame.dtypes]
for column, dtype in missing_fields:
# If current frame missing the column in the schema, then
# set values to Null.
vals = (
F.lit(None) if column not in frame.columns
else F.col(column)
)
# Cast the values with the correct dtype.
frame = frame.withColumn(column, vals.cast(dtype))
return frame | 653f2740fa2ba090c1a1ace71b09523848d52be7 | 16,019 |
def shave_bd(img, bd):
"""
Shave border area of spatial views. A common operation in SR.
:param img:
:param bd:
:return:
"""
return img[bd:-bd, bd:-bd, :] | 4b822c5e57787edb74955fd350ad361080b8640b | 16,020 |
import os
def get_raw_dir(args):
"""
Archived function. Ignore this for now
"""
root = "C:\\Workspace\\FakeNews"
if os.name == "posix":
root = '..'
path = osp.join(root, "Demo", "data", f"{args.dataset}", "raw")
return path | 34e1f9277bac24e3dc6938b7bf854e7fd018ee41 | 16,021 |
def plotly_single(ma, average_type, color, label, plot_type='line'):
"""A plotly version of plot_single. Returns a list of traces"""
summary = list(np.ma.__getattribute__(average_type)(ma, axis=0))
x = list(np.arange(len(summary)))
if isinstance(color, str):
color = list(matplotlib.colors.to_rgb(color))
traces = [go.Scatter(x=x, y=summary, name=label, line={'color': "rgba({},{},{},0.9)".format(color[0], color[1], color[2])}, showlegend=False)]
if plot_type == 'fill':
traces[0].update(fill='tozeroy', fillcolor=color)
if plot_type in ['se', 'std']:
if plot_type == 'se': # standard error
std = np.std(ma, axis=0) / np.sqrt(ma.shape[0])
else:
std = np.std(ma, axis=0)
x_rev = x[::-1]
lower = summary - std
trace = go.Scatter(x=x + x_rev,
y=np.concatenate([summary + std, lower[::-1]]),
fill='tozerox',
fillcolor="rgba({},{},{},0.2)".format(color[0], color[1], color[2]),
line=go.Line(color='transparent'),
showlegend=False,
name=label)
traces.append(trace)
return traces | b568d0e4496fc424aa5b07ff90bf45880a374d56 | 16,022 |
from pathlib import Path
def create_task(className, *args, projectDirectory='.', dryrun=None, force=None, source=False):
"""Generates task class from the parameters derived from :class:`.Task`
Fails if the target file already exists unless ``force=True`` or ``--force`` in the CLI is set.
Setting the ``--source`` will generate a different template that have stubs with the functions that need to be overwritten.
Parameters
----------
className : string (CamelCase)
Name of the class to be created
projectDirectory : string (default='.')
Location of the project directory, the code will be created in ``projectDirectory/data_models/class_name.py``.
dryrun : bool (default=None)
If set to ``True`` it returns the generated code as a string
force : bool (default=None)
If set to ``True`` it overwrites the target file
source : bool (default=False)
If set to ``True`` the class will generate stubs for functions to be overwritten
*args : List of strings (CamelCase)
Classes to be imported into the generated code from the datamodel, fails if class not found
Returns
-------
content : string
The generated code if ``dryrun`` is specified
"""
if source:
taskType = NameString('Source')
else:
taskType = NameString('Task')
project = HypergolProject(projectDirectory=projectDirectory, dryrun=dryrun, force=force)
className = NameString(className)
dependencies = [NameString(value) for value in args]
project.check_dependencies(dependencies)
content = project.render(
templateName=f'{taskType.asFileName}.j2',
templateData={'className': className, 'dependencies': dependencies},
filePath=Path(projectDirectory, 'tasks', className.asFileName)
)
return project.cli_final_message(creationType=taskType, name=className, content=(content, )) | 028e751a43930e167f000d5ff0d0c76d1340cec4 | 16,023 |
def asciitable(columns, rows):
"""Formats an ascii table for given columns and rows.
Parameters
----------
columns : list
The column names
rows : list of tuples
The rows in the table. Each tuple must be the same length as
``columns``.
"""
rows = [tuple(str(i) for i in r) for r in rows]
columns = tuple(str(i) for i in columns)
widths = tuple(max(max(map(len, x)), len(c)) for x, c in zip(zip(*rows), columns))
row_template = ("|" + (" %%-%ds |" * len(columns))) % widths
header = row_template % tuple(columns)
bar = "+%s+" % "+".join("-" * (w + 2) for w in widths)
data = "\n".join(row_template % r for r in rows)
return "\n".join([bar, header, bar, data, bar]) | d65e0dfef94060db243de2a3a4f162aa01e12537 | 16,024 |
def __resolve_key(key: Handle) -> PyHKEY:
"""
Returns the full path to the key
>>> # Setup
>>> fake_registry = fake_reg_tools.get_minimal_windows_testregistry()
>>> load_fake_registry(fake_registry)
>>> # Connect registry and get PyHkey Type
>>> reg_handle = ConnectRegistry(None, HKEY_CURRENT_USER)
>>> __resolve_key(key=reg_handle).handle.full_key
'HKEY_CURRENT_USER'
>>> __resolve_key(key=HKEY_CURRENT_USER).handle.full_key
'HKEY_CURRENT_USER'
>>> # Test PyHKey Type (the most common)
>>> discard = __resolve_key(reg_handle)
>>> # Test int Type
>>> discard = __resolve_key(HKEY_CURRENT_USER)
>>> # Test HKEYType
>>> hkey = HKEYType(handle=reg_handle.handle, access=reg_handle._access)
>>> discard = __resolve_key(hkey)
>>> # Test invalid handle
>>> discard = __resolve_key(42)
Traceback (most recent call last):
...
OSError: [WinError 6] The handle is invalid
>>> # Test invalid type
>>> discard = __resolve_key('spam') # noqa
Traceback (most recent call last):
...
RuntimeError: unknown Key Type
"""
if isinstance(key, PyHKEY):
key_handle = key
elif isinstance(key, int):
try:
key_handle = PyHKEY(__fake_registry.hive[key])
except KeyError:
error = OSError("[WinError 6] The handle is invalid")
setattr(error, "winerror", 6)
raise error
elif isinstance(key, HKEYType):
key_handle = PyHKEY(handle=key.handle, access=key._access)
else:
raise RuntimeError("unknown Key Type")
return key_handle | 70344ac3b068793a0c40e4151fb210c269dba743 | 16,025 |
def vec3f_unitZ():
"""vec3f_unitZ() -> vec3f"""
return _libvncxx.vec3f_unitZ() | dd0b6e28333a8d72918113b0f5caf788fb51bd43 | 16,026 |
import os
def _BotNames(source_config, full_mode=False):
"""Returns try bot names to use for the given config file name."""
platform = os.path.basename(source_config).split('.')[0]
assert platform in PLATFORM_BOT_MAP
bot_names = PLATFORM_BOT_MAP[platform]
if full_mode:
return bot_names
return [bot_names[0]] | c514e2534d57ce5867b72e3cfd182854e99c1bf6 | 16,027 |
def get_public_key(public_key_path=None, private_key_path=None):
"""get_public_key.
Loads public key. If no path is specified, loads signing_key.pem.pub from the
current directory. If a private key path is provided, the public key path is
ignored and the public key is loaded from the private key.
:param public_key_path: a string of the public key file name, with relative or full path
:param private_key_path: a string of the private key file name, with relative or full path
:return:
"""
if private_key_path is not None:
private_key = get_private_key(private_key_path)
public_key = private_key.publickey().exportKey("PEM")
return public_key
elif public_key_path is None:
public_key_path = "signing_key.pem.pub"
with open(public_key_path, "rb") as f:
public_key = RSA.importKey(f.read())
return public_key | f37bb64e9a0971c77986b6c82b8519153f3f8eaa | 16,028 |
def InitializeState(binaryString):
"""
State initializer
"""
state = np.zeros(shape=(4, 4), dtype=np.uint8)
plaintextBytes = SplitByN(binaryString, 8)
for col in range(4):
for row in range(4):
binary = plaintextBytes[col * 4 + row]
state[row, col] = int(binary, 2)
return np.matrix(state) | b8de68bba8837865f9e74c43be1b6144774d69ad | 16,029 |
from typing import Dict
from typing import Any
def _apply_modifier(s: str, modifier: str, d: Dict[Any, str]) -> str:
"""
This will search for the ^ signs and replace the next
digit or (digits when {} is used) with its/their uppercase representation.
:param s: Latex string code
:param modifier: Modifier command
:param d: Dict to look upon
:return: New text with replaced text.
"""
s = s.replace(modifier, "^")
newtext = ""
mode_normal, mode_modified, mode_long = range(3)
mode = mode_normal
for ch in s:
if mode == mode_normal and ch == '^':
mode = mode_modified
continue
elif mode == mode_modified and ch == '{':
mode = mode_long
continue
elif mode == mode_modified:
newtext += d.get(ch, ch)
mode = mode_normal
continue
elif mode == mode_long and ch == '}':
mode = mode_normal
continue
if mode == mode_normal:
newtext += ch
else:
newtext += d.get(ch, ch)
return newtext | c54a2c66ff6ee768e588b14472fa5707edf9bc56 | 16,030 |
import concurrent
def threaded_polling(data, max_workers):
"""
Multithreaded polling method to get the data from cryptocompare
:param data: dictionary containing the details to be fetched
:param max_workers: maximum number of threads to spawn
:return list: containing the high low metrics for each pair
"""
hl_parsed_data = list()
exchange = data["name"]
pairs = data["pair_whitelist"]
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
# Start the load operations and mark each future with its URL
future_to_pairs = [
executor.submit(run_parser, _.split("/")[0], _.split("/")[1], exchange)
for _ in pairs
]
total = len(future_to_pairs)
count = 0
for future in concurrent.futures.as_completed(future_to_pairs):
try:
data = future.result()
hl_parsed_data.append(data)
except Exception as exc:
print(exc)
else:
count += 1
msg = f"Parsing {data['symbol']:10} | {count:2}/{total:2}"
print(msg, end="\r")
print(f"Pairs processed from {exchange} | {count:2}/{total:2}")
return hl_parsed_data | 587a07912b8ea6d0267638c72a98fdbeb6b0ebf0 | 16,031 |
from typing import Dict
from typing import Any
from typing import List
def sub(attrs: Dict[str, Any], in_xlayers: List[XLayer]) -> Dict[str, List[int]]:
"""Return numpy-style subtraction layer registration information (shape)
NOTE: https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html"""
assert len(in_xlayers) == 2, "Subtract layer expects two input layers"
lX, rX = in_xlayers
shape = TensorShape(get_numpy_broadcasted_shape(lX.shapes[:], rX.shapes[:]))
return {'shape': shape} | ebe4160dc92c259d2d6fd58a96eb78c697c0a5e4 | 16,032 |
import re
def word_column_filter_df(dataframe, column_to_filter, column_freeze, word_list):
# La fonction .where() donne une position qu'il faut transformer en index
# Il faut entrer le nom d'une colonne repère (exemple: code produit) pour retrouver l'index, ou construire un colonne de re-indexée.
"""Filtre les colonnes d'un dataframe, en fonction d'une liste de mots, puis retourne le dataframe"""
position_to_drop_lst = np.where(dataframe[column_to_filter].str.contains('|'.join(map(re.escape, word_list)),
np.NaN))[0]
indices_to_drop_lst = []
for position in position_to_drop_lst:
indice = (dataframe[dataframe[column_freeze] == dataframe.iloc[position].loc[column_freeze]]).index[0]
indices_to_drop_lst.append(indice)
print("Nombre de lignes supprimées:")
nbr= len(indices_to_drop_lst)
print(nbr)
print("\n")
dataframe.drop(indices_to_drop_lst, axis=0,inplace=True)
return dataframe | f58acf2188a192c1aa6cedecbfeeb66d7d073ba5 | 16,033 |
def adjust_learning_rate(optimizer, iteration, epoch_size, hyp, epoch, epochs):
"""adjust learning rate, warmup and lr decay
:param optimizer: optimizer
:param gamma: gamma
:param iteration: iteration
:param epoch_size: epoch_size
:param hyp: hyperparameters
:param epoch: epoch
:param epochs: the number of epochs
:return: lr
"""
step_index = 0
if epoch < 6:
# The first 6 epochs carried out warm up
learning_rate = 1e-6 + (hyp['lr0'] - 1e-6) * iteration / (epoch_size * 2)
else:
if epoch > epochs * 0.5:
# At 50% of the epochs, the learning rate decays in Gamma
step_index = 1
if epoch > epochs * 0.7:
# At 70% of the epochs, the learning rate decays in Gamma^2
step_index = 2
learning_rate = hyp['lr0'] * (0.1 ** (step_index))
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
return learning_rate | c90c61fcecca99d31214c96cdf7d96b6ba682daa | 16,034 |
import numpy as np
from scipy.io import loadmat
import numpy as np
import h5py
from numpy import fromfile, empty, append
def reading_data(data_file):
"""
Read in a data file (16 bit) and obtain the entire data set that is
multiplexed between ECG and Pulse data. The data is then extracted and appended
to separate arrays
:param data_file: The binary data file to be loaded into the function
:return data: The ECG data in array
"""
try:
m = loadmat(data_file)
x = dict(m)
fs = x.get('f_s')
fs = np.array(fs)
fs = fs.flatten()
pp = x.get('pulse_P')
pp = np.array(pp)
pp = pp.flatten()
ecg = x.get('ECG')
ecg = np.array(ecg)
ecg = ecg.flatten()
print(fs)
return fs, pp, ecg
except ValueError:
try:
# for h5py
with h5py.File(data_file, 'r') as hf:
fs = hf.get('f_s')
fs = np.array(fs)
fs = fs.flatten()
pp = hf.get('pp')
pp = np.array(pp)
pp = pp.flatten()
ecg = hf.get('ECG')
ecg = np.array(ecg)
ecg = ecg.flatten()
print(fs)
return fs, pp, ecg
except IOError:
fs = fromfile(data_file, dtype='uint16', count=1, sep='')
hrData = fromfile(data_file, dtype='uint16', count=-1, sep='')
ecg = empty(shape=[0, len(hrData)], dtype=int) # Initialize Empty Arrays
pp = empty(shape=[0, len(hrData)], dtype=int) # Initialize Empty Arrays
for i in range(1, len(hrData), 2):
ecg = append(ecg, hrData[i])
for k in range(2, len(hrData), 2):
pp = append(pp, hrData[k])
print(ecg)
return fs, pp, ecg | 0ba4980db08a8877fabdfcc282d45c18d868a0a3 | 16,035 |
from typing import Callable
def two_body_mc_grad(env1: AtomicEnvironment, env2: AtomicEnvironment,
d1: int, d2: int, hyps: 'ndarray', cutoffs: 'ndarray',
cutoff_func: Callable = cf.quadratic_cutoff) \
-> (float, 'ndarray'):
"""2-body multi-element kernel between two force components and its
gradient with respect to the hyperparameters.
Args:
env1 (AtomicEnvironment): First local environment.
env2 (AtomicEnvironment): Second local environment.
d1 (int): Force component of the first environment.
d2 (int): Force component of the second environment.
hyps (np.ndarray): Hyperparameters of the kernel function (sig, ls).
cutoffs (np.ndarray): One-element array containing the 2-body
cutoff.
cutoff_func (Callable): Cutoff function of the kernel.
Return:
(float, np.ndarray):
Value of the 2-body kernel and its gradient with respect to the
hyperparameters.
"""
sig = hyps[0]
ls = hyps[1]
r_cut = cutoffs[0]
return two_body_mc_grad_jit(env1.bond_array_2, env1.ctype, env1.etypes,
env2.bond_array_2, env2.ctype, env2.etypes,
d1, d2, sig, ls, r_cut, cutoff_func) | 8cba89674c5e1dea999d026aad8f9b393a57f5cc | 16,036 |
import types
def get_task_name(task):
"""Gets a tasks *string* name, whether it is a task object/function."""
task_name = ""
if isinstance(task, (types.MethodType, types.FunctionType)):
# If its a function look for the attributes that should have been
# set using the task() decorator provided in the decorators file. If
# those have not been set, then we should at least have enough basic
# information (not a version) to form a useful task name.
task_name = get_attr(task, 'name')
if not task_name:
name_pieces = [a for a in get_many_attr(task,
'__module__',
'__name__')
if a is not None]
task_name = join(name_pieces, ".")
else:
task_name = str(task)
return task_name | 181682d930cf358f2532406f1558b007aa09a41f | 16,037 |
def EventAddPublication(builder, publication):
"""This method is deprecated. Please switch to AddPublication."""
return AddPublication(builder, publication) | 39cf5facf251370fd86a004477f848dafd41976c | 16,038 |
def convModel(input1_shape, layers):
"""" convolutional model defined by layers. ith entry
defines ith layer. If entry is a (x,y) it defines a conv layer
with x kernels and y filters. If entry is x it defines a pool layer
with size x"""
model = Sequential()
for (i, layer) in enumerate(layers):
if isinstance(layer, int):
model.add(MaxPool1D(layer))
elif len(layer) == 2:
if i == 0:
model.add(Conv1D(layer[0], layer[1],
input_shape=input1_shape, padding='same',
activation='relu'))
else:
model.add(Conv1D(layer[0], layer[1], padding='same',
activation='relu'))
else:
print("Hodor")
model.add(GlobalMaxPool1D())
model.add(Dropout(0.5))
model.add(Dense(4, activation='softmax'))
model.compile(loss='binary_crossentropy',
metrics=['accuracy',precision],
optimizer=Adam(lr=3e-4))
print(model.inputs)
print(model.summary())
return model | d1a49a42ab0fc4eecd40783345f09121a773ae02 | 16,039 |
def cached_open_doc(db, doc_id, cache_expire=COUCH_CACHE_TIMEOUT, **params):
"""
Main wrapping function to open up a doc. Replace db.open_doc(doc_id)
"""
try:
cached_doc = _get_cached_doc_only(doc_id)
except ConnectionInterrupted:
cached_doc = INTERRUPTED
if cached_doc in (None, INTERRUPTED):
doc = db.open_doc(doc_id, **params)
if cached_doc is not INTERRUPTED:
do_cache_doc(doc, cache_expire=cache_expire)
return doc
else:
return cached_doc | 82118a5c9c43aaf339e7ca3ab8af9680fbd362d1 | 16,040 |
import os
def Voices_preload_and_split(subset='room-1', test_subset='room-2', seconds=3,
path=None, pad=False, splits=None, trim=True):
"""Index and split librispeech dataset.
Args:
subset (string): LibriSpeech subset to parse, load and split.
Currently can only handle one at a time
seconds (int): Minimum length of audio samples to include.
path (string): Path to location containing dataset. If left as None
will search default location 'DATASETS_DIR' specified in
definitions.
pad (bool): Flag to specify whether to pad (with 0's) and keep the
samples with lenght below the minimum.
splits (dict): dictionary with {name:[fractions]} for a user specified
split. The split will be saved to 'DATASPLITS_DIR' under 'name'
trim (bool): trims data by >.5. removes half of mics, and no noise data
Returns:
dict(Dataframes): Dictionary containing the dataframes corresponding
to each split inclduing metadata.
Example:
Todo:
- Write Example.
- More work on user specified splits.
- Add option and functionality to split longer recording into samples
of length 'seconds' to augment data.
"""
num_splits = 6
fragment_seconds = seconds
if path is None:
path = DATASETS_DIR
print('Initialising VOiCESDataset with minimum length = {}s'
' and subset = {}'.format(seconds, subset))
df = load_or_index_subset(subset=subset, path=path,
fragment_seconds=fragment_seconds, pad=pad)
test_df = load_or_index_subset(subset=test_subset, path=path,
fragment_seconds=fragment_seconds, pad=pad)
# remove all None sound from df
if trim:
df = df[df['Noise'] != 'none']
# Convert arbitrary integer labels of dataset to ordered 0-(num_speakers
# - 1) labels
unique_speakers = sorted(df['speaker_id'].unique())
# Convert arbitrary integer labels of dataset to ordered 0-(num_speakers
# - 1) labels
dfs = {} # dictionary of dataframes
sample_dfs = {}
# split df into data-subsets
if splits is None:
# Default behaviour will be to load cyphercat default splits
# check if splits exists.
print('Build/load speaker membership inference splits')
splits_ready = [False]*num_splits
for i_split in range(num_splits):
if os.path.exists(DATASPLITS_DIR+'/VOiCES-%s/speaker_splits/'
'VOiCES_%i.csv' % (subset, i_split)):
splits_ready[i_split] = True
if all(splits_ready): # Found all of the relelvant splits
print('Found default speaker splits, loading dataframe')
dfs = {}
for i_split in range(num_splits):
dfs[i_split] = pd.read_csv(DATASPLITS_DIR +
'/VOiCES-%s/speaker_splits/'
'VOiCES_%i.csv' % (subset, i_split))
else:
# Case when splits not found. This should only occur first time
# VOiCES is parsed by developers (not users), so will include
# a warning
print('WARNING: Creating default speaker splits for VOiCES!')
dfs = default_speaker_splitter2(
dfs, df, trim=trim, test_df=test_df)
# write the default dataframes
for i_df, this_df in enumerate(dfs):
dfs[this_df] = dfs[this_df].drop(columns=['id'])
dfs[this_df].rename(columns={'level_0': 'idx_in_original_df'},
inplace=True)
dfs[this_df].to_csv(DATASPLITS_DIR+'/VOiCES-%s/speaker_splits/'
'VOiCES_%i.csv' % (subset, i_df),
index=False)
print('Build/load sample membership inference splits')
splits_ready = [False]*(num_splits-1)
for i_split in range(num_splits-1):
if os.path.exists(DATASPLITS_DIR+'/VOiCES-%s/sample_splits/'
'VOiCES_%i.csv' % (subset, i_split)):
splits_ready[i_split] = True
if all(splits_ready): # Found all of the relelvant splits
print('Found default sample splits, loading dataframe')
sample_dfs = {}
for i_split in range(num_splits-1):
sample_dfs[i_split] = pd.read_csv(DATASPLITS_DIR +
'/VOiCES-%s/sample_splits/'
'VOiCES_%i.csv' % (subset,
i_split))
else:
# Case when splits not found. This should only occur first time
# LibriSpeech is parsed by developers (not users), so will include
# a warning
print('WARNING: Creating default sample splits for VOiCES!')
sample_dfs = default_sample_splitter(sample_dfs, df, trim)
# write the default dataframes
for i_df, this_df in enumerate(sample_dfs):
sample_dfs[this_df] = sample_dfs[this_df].drop(columns=['id'])
sample_dfs[this_df].rename(columns={'level_0':
'idx_in_original_df'},
inplace=True)
sample_dfs[this_df].to_csv(DATASPLITS_DIR+'/VOiCES-%s/'
'sample_splits/VOiCES_%i.csv' %
(subset, i_df), index=False)
else:
name = list(splits.keys())[0]
print('Creating user defined splits under name %s' %
(list(splits.keys())[0]))
total = 0
for fraction in splits[name]:
total += fraction
if total != 1.:
raise('Data split doesn\'t not add up to 1.')
# this creates user selescted splits according to the list provided
# num speakers for train & test is the same.
# the below was solved with a system of equations
# amt data depends on train data
n = int(len(unique_speakers)//(2+2*splits[0]))
# n is train data for shadow & target networks
unique_speakers1 = unique_speakers[:n] # target
unique_speakers2 = unique_speakers[n:2*n] # shadow
unique_speakers3 = unique_speakers[2*n:] # out (target + shadow)
dfs = splitter(dfs=dfs, df=df, unique_categories=unique_speakers1,
category_id='speaker_id', splits=splits, N=0)
dfs = splitter(dfs=dfs, df=df, unique_categories=unique_speakers2,
category_id='speaker_id', splits=splits, N=2)
# split out data for attack train + test evenly
dfs = splitter(dfs=dfs, df=df, unique_categories=unique_speakers3,
category_id='speaker_id', splits=[0.5, 0.5], N=4)
print('\n ------- Speaker split statistics ------- ')
for d in dfs:
this_df = dfs[d]
male_df = this_df[this_df['sex'] == 'M']
female_df = this_df[this_df['sex'] == 'F']
print('\t\t ---- Split %i ---- \n\tUnique speakers \t Samples' % d)
print('Male:\t\t %i\t\t %i' %
(len(male_df['speaker_id'].unique()), len(male_df)))
print('Female:\t\t %i\t\t %i' %
(len(female_df['speaker_id'].unique()), len(female_df)))
print('Total:\t\t %i\t\t %i' %
(len(this_df['speaker_id'].unique()), len(this_df)))
print(' ---------------------------------------- \n')
print(' ------- Sample split statistics -------- ')
for d in sample_dfs:
this_df = sample_dfs[d]
male_df = this_df[this_df['sex'] == 'M']
female_df = this_df[this_df['sex'] == 'F']
print('\t\t ---- Split %i ---- \n\tUnique speakers \t Samples' % d)
print('Male:\t\t %i\t\t %i' %
(len(male_df['speaker_id'].unique()), len(male_df)))
print('Female:\t\t %i\t\t %i' %
(len(female_df['speaker_id'].unique()), len(female_df)))
print('Total:\t\t %i\t\t %i' %
(len(this_df['speaker_id'].unique()), len(this_df)))
print(' ---------------------------------------- \n')
print('Finished splitting data.')
return dfs, sample_dfs | 46bd0234dbc006b7f5fdb53d3e032bddc679a6f0 | 16,041 |
import torch
def dense2bpseq(sequence: torch.Tensor, label: torch.Tensor) -> str:
"""converts sequence and label tensors to `.bpseq`-style string"""
seq_lab = dense2seqlab(sequence, label)
return seqlab2bpseq | ec0a5d681fef518068042aa2830ee4d2ef3231c8 | 16,042 |
from datetime import datetime
def _base_app(config):
"""
init a barebone flask app.
if it is needed to create multiple flask apps,
use this function to create a base app which can be further modified later
"""
app = Flask(__name__)
app.config.from_object(config)
config.init_app(app)
bootstrap.init_app(app)
app.jinja_env.globals['datetime'] = datetime
app.jinja_env.globals['str_to_datetime'] = lambda x: from_string_to_datetime(x)
app.jinja_env.globals['format_float'] = lambda x: "%.2f" % x if x else None
app.jinja_env.globals['momentjs'] = momentjs
app.jinja_env.globals['get_collapsed_ids'] = get_collapsed
return app | f5f40ed9ea740c5b9bc9ebb8490136179d06f777 | 16,043 |
def applyC(input_map,nbar,MAS_mat,pk_map,Y_lms,k_grids,r_grids,v_cell,shot_fac,include_pix=True):
"""Apply the fiducial covariance to a pixel map x, i.e. C[x] = S[x]+N[x].
We decompose P(k;x) = \sum_l P_l(k) L_l(k.x) where x is the position of the second galaxy and use spherical harmonic decompositions.
P_l(k) are the even fiducial power spectrum multipoles, taken as an input (including the MAS window if relevant).
Parameters
----------
input_map : ndarray
The input map to apply the covariance to.
nbar : ndarray
Map of the background number density.
MAS_mat : ndarray
The mass assignment (i.e. compensation) matrix.
pk_map : ndarray
The fiducial power spectrum multipoles (only used with ML weights).
Y_lms : list
List of spherical harmonic functions, generated by the compute_spherical_harmonic_functions() function.
k_grids : ndarray
3D grids containing the (k_x,k_y,k_z) values.
r_grids : ndarray
3D grids containing the (r_x,r_y,r_z) values.
v_cell : float
Cell volume.
shot_fac : float
Shot noise factor.
include_pix : bool, optional
Whether to include the MAS effects in the covariance (default: True).
Returns
-------
ndarray
Covariance matrix applied to the input map.
"""
return applyS(input_map,nbar,MAS_mat,pk_map,Y_lms,k_grids,r_grids,v_cell,include_pix=include_pix)+applyN(input_map,nbar,MAS_mat,v_cell,shot_fac,include_pix=include_pix) | 91346d935217f540a91947b0a00e91e0125794ef | 16,044 |
def invert_injective_mapping(dictionary):
"""
Inverts a dictionary with a one-to-one mapping from key to value, into a
new dictionary with a one-to-one mapping from value to key.
"""
inverted_dict = {}
for key, value in iteritems(dictionary):
assert value not in inverted_dict, "Mapping is not 1-1"
inverted_dict[value] = key
return inverted_dict | c8cba85f542c5129892eeba4168edf6d9715b54e | 16,045 |
def biosql_dbseqrecord_to_seqrecord(dbseqrecord_, off=False):
"""Converts a DBSeqRecord object into a SeqRecord object.
Motivation of this function was two-fold: first, it makes type testing simpler; and second, DBSeqRecord does
not have a functional implementation of the translate method.
:param DBSeqRecord dbseqrecord_: The DBSeqRecord object to be converted.
:param bool off: Don't actually convert the DBSeqRecord. [Default: False]
:return:
"""
assert isinstance(dbseqrecord_, DBSeqRecord), ('Input must be a DBSeqRecord, '
'was of type {}!').format(type(dbseqrecord_))
if off:
return dbseqrecord_
else:
return SeqRecord(seq=Seq(data=str(dbseqrecord_.seq)), id=dbseqrecord_.id, name=dbseqrecord_.name,
description=dbseqrecord_.description, dbxrefs=dbseqrecord_.dbxrefs,
features=dbseqrecord_.features, annotations=dbseqrecord_.annotations,
letter_annotations=dbseqrecord_.letter_annotations) | 9129c5efd9025a04f0693fd2d35f420b28c2ea91 | 16,046 |
def loadConfig(fileName):
""" Attempt to load the specified config file. If successful, clean the variables/data the
config file has setup """
if not os.path.isfile(fileName):
return False
if not os.access(fileName, os.R_OK):
warn('Unable to read config file: ' + fileName)
return False
try:
execfile(fileName)
# Cache this operation (whether or not we're in debug mode) for faster (hardly)
# debug spamming (from NZBLeecher)
if hasattr(Hellanzb, 'DEBUG_MODE') and Hellanzb.DEBUG_MODE is not None and \
Hellanzb.DEBUG_MODE != False:
# Set this ASAP for sane logging. FIXME: You could possibly lose some debug
# output during initialization if you're using the -d option
Hellanzb.DEBUG_MODE_ENABLED = True
# Ensure the types are lower case
for varName in ('NOT_REQUIRED_FILE_TYPES', 'KEEP_FILE_TYPES'):
types = getattr(Hellanzb, varName)
lowerTypes = [ext.lower() for ext in types]
setattr(Hellanzb, varName, lowerTypes)
if not hasattr(Hellanzb, 'MAX_RATE') or Hellanzb.MAX_RATE is None:
Hellanzb.MAX_RATE = 0
else:
Hellanzb.MAX_RATE = int(Hellanzb.MAX_RATE)
if not hasattr(Hellanzb, 'UNRAR_CMD') or Hellanzb.UNRAR_CMD is None:
Hellanzb.UNRAR_CMD = assertIsExe(['rar', 'unrar'])
else:
Hellanzb.UNRAR_CMD = assertIsExe([Hellanzb.UNRAR_CMD])
if not hasattr(Hellanzb, 'PAR2_CMD') or Hellanzb.PAR2_CMD is None:
Hellanzb.PAR2_CMD = assertIsExe(['par2'])
else:
Hellanzb.PAR2_CMD = assertIsExe([Hellanzb.PAR2_CMD])
if not hasattr(Hellanzb, 'MACBINCONV_CMD') or Hellanzb.MACBINCONV_CMD is None:
# macbinconv is optional when not explicitly specified in the conf
Hellanzb.MACBINCONV_CMD = None
try:
Hellanzb.MACBINCONV_CMD = assertIsExe(['macbinconv'])
except FatalError:
pass
else:
Hellanzb.MACBINCONV_CMD = assertIsExe([Hellanzb.MACBINCONV_CMD])
if not hasattr(Hellanzb, 'SKIP_UNRAR') or Hellanzb.SKIP_UNRAR is None:
Hellanzb.SKIP_UNRAR = False
if not hasattr(Hellanzb, 'SMART_PAR'):
Hellanzb.SMART_PAR = True
if not hasattr(Hellanzb, 'CATEGORIZE_DEST'):
Hellanzb.CATEGORIZE_DEST = True
if not hasattr(Hellanzb, 'NZB_ZIPS'):
Hellanzb.NZB_ZIPS = '.nzb.zip'
if not hasattr(Hellanzb, 'NZB_GZIPS'):
Hellanzb.NZB_GZIPS = '.nzb.gz'
if not hasattr(Hellanzb, 'DISABLE_COLORS'):
Hellanzb.DISABLE_COLORS = False
if not hasattr(Hellanzb, 'DISABLE_ANSI'):
Hellanzb.DISABLE_ANSI = False
Hellanzb.CACHE_LIMIT = unPrettyBytes(getattr(Hellanzb, 'CACHE_LIMIT', 0))
if not hasattr(Hellanzb, 'OTHER_NZB_FILE_TYPES'):
# By default, just match .nzb files in the queue dir
Hellanzb.NZB_FILE_RE = re.compile(r'(?i)\.(nzb)$')
else:
nzbTypeRe = r'(?i)\.(%s)$'
if not isinstance(Hellanzb.OTHER_NZB_FILE_TYPES, list):
Hellanzb.OTHER_NZB_FILE_TYPES = [Hellanzb.OTHER_NZB_FILE_TYPES]
if 'nzb' not in Hellanzb.OTHER_NZB_FILE_TYPES:
Hellanzb.OTHER_NZB_FILE_TYPES.append('nzb')
typesStr = '|'.join(Hellanzb.OTHER_NZB_FILE_TYPES)
Hellanzb.NZB_FILE_RE = re.compile(nzbTypeRe % typesStr)
# Make sure we expand pathnames so that ~ can be used
for expandPath in ('PREFIX_DIR', 'QUEUE_DIR', 'DEST_DIR', 'POSTPONED_DIR',
'CURRENT_DIR', 'TEMP_DIR', 'PROCESSING_DIR', 'STATE_XML_FILE',
'WORKING_DIR', 'LOG_FILE', 'DEBUG_MODE',
'UNRAR_CMD', 'PAR2_CMD', 'MACBINCONV_CMD',
'EXTERNAL_HANDLER_SCRIPT'):
if hasattr(Hellanzb, expandPath):
thisDir = getattr(Hellanzb, expandPath)
if thisDir is not None:
expandedDir = os.path.expanduser(thisDir)
setattr(Hellanzb, expandPath, expandedDir)
if not hasattr(Hellanzb, 'EXTERNAL_HANDLER_SCRIPT') or \
Hellanzb.EXTERNAL_HANDLER_SCRIPT is None or \
not os.path.isfile(Hellanzb.EXTERNAL_HANDLER_SCRIPT) or \
not os.access(Hellanzb.EXTERNAL_HANDLER_SCRIPT, os.X_OK):
Hellanzb.EXTERNAL_HANDLER_SCRIPT = None
debug('Found config file in directory: ' + os.path.dirname(fileName))
return True
except FatalError, fe:
error('A problem occurred while reading the config file', fe)
raise
except Exception, e:
msg = 'An unexpected error occurred while reading the config file'
error(msg, e)
raise | eb959a65743a1e73732f8226a55a69582fe4ac20 | 16,047 |
import re
def parse_rpsbproc(handle):
"""Parse a results file generated by rpsblast->rpsbproc.
This function takes a handle corresponding to a rpsbproc output file.
local.rpsbproc returns a subprocess.CompletedProcess object, which contains the
results as byte string in it's stdout attribute.
"""
# Sanitize input. Should work for either an open file handle (str, still contains \n
# when iterating) or byte-string stdout stored in a CompletedProcess object passed to this
# function as e.g. process.stdout.splitlines()
stdout = "\n".join(
line.decode().strip() if isinstance(line, bytes) else line.strip()
for line in handle
)
# Files produced by rpsbproc have anchors for easy parsing. Each query sequence
# is given a block starting/ending with QUERY/ENDQUERY, and domain hits for the
# query with DOMAINS/ENDDOMAINS.
query_pattern = re.compile(
r"QUERY\tQuery_\d+\tPeptide\t\d+\t([A-Za-z0-9.]+?)\n"
r"DOMAINS\n(.+?)ENDDOMAINS",
re.DOTALL,
)
domains = defaultdict(list)
for match in query_pattern.finditer(stdout):
query = match.group(1)
for row in match.group(2).split("\n"):
try:
domain = domain_from_row(row)
except ValueError:
continue
domains[query].append(domain)
return domains | 64be049b5cb96a3e59f421327d1715cee84e2300 | 16,048 |
from typing import get_type_hints
def _invalidate(obj, depth=0):
"""
Recursively validate type anotated classes.
"""
annotations = get_type_hints(type(obj))
for k, v in annotations.items():
item = getattr(obj, k)
res = not_type_check(item, v)
if res:
return f"{k} field of {type(obj)} : {res}"
if isinstance(item, (list, tuple)):
for ii, i in enumerate(item):
sub = _invalidate(i, depth + 1)
if sub is not None:
return f"{k}.{ii}." + sub
if isinstance(item, dict):
for ii, i in item.items():
sub = _invalidate(i, depth + 1)
if sub is not None:
return f"{k}.{ii}." + sub
else:
sub = _invalidate(item, depth + 1)
if sub is not None:
return f"{k}.{ii}." + sub
# return outcome,s | a1881d45414a4a034456e0078553e4aa7bf6471a | 16,049 |
import os
def maybe_download(filename, expected_bytes, force=False):
"""Download a file if not present, and make sure it's the right size."""
if force or not os.path.exists(filename):
print('Attempting to download:', filename)
filename, _ = urlretrieve(url + filename, filename, reporthook=download_progress_hook)
print('\nDownload Complete!')
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename | a9af221b98647792a92749a901ecc70f09a388ff | 16,050 |
def convtranspose2d_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1, out_pad=0):
"""Calculates the output height and width of a feature map for a ConvTranspose2D operation."""
h_w, kernel_size, stride, pad, dilation, out_pad = num2tuple(h_w), num2tuple(kernel_size), num2tuple(stride), num2tuple(pad), num2tuple(dilation), num2tuple(out_pad)
pad = num2tuple(pad[0]), num2tuple(pad[1])
out_height = (h_w[0] - 1) * stride[0] - sum(pad[0]) + dilation[0] * (kernel_size[0] - 1) + out_pad[0] + 1
out_width = (h_w[1] - 1) * stride[1] - sum(pad[1]) + dilation[1] * (kernel_size[1] - 1) + out_pad[1] + 1
return out_height, out_width | e1ded212929e7e24b138335ae3d9006b1dcfb759 | 16,051 |
def set_multizone_read_mode(session, read_mode, return_type=None, **kwargs):
"""
Modifies where data is read from in multizone environments.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type read_mode: str
:param read_mode: For multizone environments, if set to 'roundrobin', data
will be read from storage nodes in all protection zones. If set to
'localcopy', data from the local protection zone will be favored.
'roundrobin' is the default value. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
verify_read_mode(read_mode)
body_values = {'readmode': read_mode}
path = '/api/settings/raid_read_mode.json'
return session.post_api(path=path, body=body_values,
return_type=return_type, **kwargs) | 0831bfd722514cab792eef38838e357209a0971f | 16,052 |
import re
def get_name_convert_func():
"""
Get the function to convert Caffe2 layer names to PyTorch layer names.
Returns:
(func): function to convert parameter name from Caffe2 format to PyTorch
format.
"""
pairs = [
# ------------------------------------------------------------
# 'nonlocal_conv3_1_theta_w' -> 's3.pathway0_nonlocal3.conv_g.weight'
[
r"^nonlocal_conv([0-9]*)_([0-9]*)_(.*)",
r"s\1.pathway0_nonlocal\2_\3",
],
# 'theta' -> 'conv_theta'
[r"^(.*)_nonlocal([0-9]*)_(theta)(.*)", r"\1_nonlocal\2.conv_\3\4"],
# 'g' -> 'conv_g'
[r"^(.*)_nonlocal([0-9]*)_(g)(.*)", r"\1_nonlocal\2.conv_\3\4"],
# 'phi' -> 'conv_phi'
[r"^(.*)_nonlocal([0-9]*)_(phi)(.*)", r"\1_nonlocal\2.conv_\3\4"],
# 'out' -> 'conv_out'
[r"^(.*)_nonlocal([0-9]*)_(out)(.*)", r"\1_nonlocal\2.conv_\3\4"],
# 'nonlocal_conv4_5_bn_s' -> 's4.pathway0_nonlocal3.bn.weight'
[r"^(.*)_nonlocal([0-9]*)_(bn)_(.*)", r"\1_nonlocal\2.\3.\4"],
# ------------------------------------------------------------
# 't_pool1_subsample_bn' -> 's1_fuse.conv_f2s.bn.running_mean'
[r"^t_pool1_subsample_bn_(.*)", r"s1_fuse.bn.\1"],
# 't_pool1_subsample' -> 's1_fuse.conv_f2s'
[r"^t_pool1_subsample_(.*)", r"s1_fuse.conv_f2s.\1"],
# 't_res4_5_branch2c_bn_subsample_bn_rm' -> 's4_fuse.conv_f2s.bias'
[
r"^t_res([0-9]*)_([0-9]*)_branch2c_bn_subsample_bn_(.*)",
r"s\1_fuse.bn.\3",
],
# 't_pool1_subsample' -> 's1_fuse.conv_f2s'
[
r"^t_res([0-9]*)_([0-9]*)_branch2c_bn_subsample_(.*)",
r"s\1_fuse.conv_f2s.\3",
],
# ------------------------------------------------------------
# 'res4_4_branch_2c_bn_b' -> 's4.pathway0_res4.branch2.c_bn_b'
[
r"^res([0-9]*)_([0-9]*)_branch([0-9]*)([a-z])_(.*)",
r"s\1.pathway0_res\2.branch\3.\4_\5",
],
# 'res_conv1_bn_' -> 's1.pathway0_stem.bn.'
[r"^res_conv1_bn_(.*)", r"s1.pathway0_stem.bn.\1"],
# 'conv1_w_momentum' -> 's1.pathway0_stem.conv.'
[r"^conv1_(.*)", r"s1.pathway0_stem.conv.\1"],
# 'res4_0_branch1_w' -> 'S4.pathway0_res0.branch1.weight'
[
r"^res([0-9]*)_([0-9]*)_branch([0-9]*)_(.*)",
r"s\1.pathway0_res\2.branch\3_\4",
],
# 'res_conv1_' -> 's1.pathway0_stem.conv.'
[r"^res_conv1_(.*)", r"s1.pathway0_stem.conv.\1"],
# ------------------------------------------------------------
# 'res4_4_branch_2c_bn_b' -> 's4.pathway0_res4.branch2.c_bn_b'
[
r"^t_res([0-9]*)_([0-9]*)_branch([0-9]*)([a-z])_(.*)",
r"s\1.pathway1_res\2.branch\3.\4_\5",
],
# 'res_conv1_bn_' -> 's1.pathway0_stem.bn.'
[r"^t_res_conv1_bn_(.*)", r"s1.pathway1_stem.bn.\1"],
# 'conv1_w_momentum' -> 's1.pathway0_stem.conv.'
[r"^t_conv1_(.*)", r"s1.pathway1_stem.conv.\1"],
# 'res4_0_branch1_w' -> 'S4.pathway0_res0.branch1.weight'
[
r"^t_res([0-9]*)_([0-9]*)_branch([0-9]*)_(.*)",
r"s\1.pathway1_res\2.branch\3_\4",
],
# 'res_conv1_' -> 's1.pathway0_stem.conv.'
[r"^t_res_conv1_(.*)", r"s1.pathway1_stem.conv.\1"],
# ------------------------------------------------------------
# pred_ -> head.projection.
[r"pred_(.*)", r"head.projection.\1"],
# '.bn_b' -> '.weight'
[r"(.*)bn.b\Z", r"\1bn.bias"],
# '.bn_s' -> '.weight'
[r"(.*)bn.s\Z", r"\1bn.weight"],
# '_bn_rm' -> '.running_mean'
[r"(.*)bn.rm\Z", r"\1bn.running_mean"],
# '_bn_riv' -> '.running_var'
[r"(.*)bn.riv\Z", r"\1bn.running_var"],
# '_b' -> '.bias'
[r"(.*)[\._]b\Z", r"\1.bias"],
# '_w' -> '.weight'
[r"(.*)[\._]w\Z", r"\1.weight"],
]
def convert_caffe2_name_to_pytorch(caffe2_layer_name):
"""
Convert the caffe2_layer_name to pytorch format by apply the list of
regular expressions.
Args:
caffe2_layer_name (str): caffe2 layer name.
Returns:
(str): pytorch layer name.
"""
for source, dest in pairs:
caffe2_layer_name = re.sub(source, dest, caffe2_layer_name)
return caffe2_layer_name
return convert_caffe2_name_to_pytorch | 4e3cbe0885a0d23d5af151bc0cea7127156aa9c9 | 16,053 |
def generate_dict_entry(key, wordlist):
"""Generate one entry of the python dictionary"""
entry = " '{}': {},\n".format(key, wordlist)
return entry | 57ab3c063df0bde1261602f0c6279c70900a7a88 | 16,054 |
def record_to_dict(record):
"""
Transform string into bovespa.Record
:param record: (string) position string from bovespa.
:return: parsed Record
"""
try:
record = bovespa.Record(record)
except:
return None
return {
'date': record.date, 'year': record.date.year,
'month': record.date.month, 'day': record.date.day,
'money_volume': record.volume, 'volume': record.quantity,
'stock_code': record.stock_code, 'company_name': record.company_name,
'price_open': record.price_open, 'price_close': record.price_close,
'price_mean': record.price_mean, 'price_high': record.price_high,
'price_low': record.price_low
} | 3065d233a0186a72330165c9b082c819369ef449 | 16,055 |
def sample_product(user, **params):
"""Create and return a custom product"""
defaults = {
'name': 'Ron Cacique',
'description': 'El ron cacique es...',
'price': 20,
'weight': '0.70',
'units': 'l',
'featured': True,
}
defaults.update(params)
return Products.objects.create(user=user, **defaults) | 310b2ee775e5497597dd68cf6737623e40b78932 | 16,056 |
def _find_op_path_(block, outputs, inputs, no_grad_set):
"""
no_grad_set will also be changed
"""
input_names = set([inp.name for inp in inputs])
output_names = set([out.name for out in outputs])
relevant_op_flags = [True] * len(block.ops)
# All the inputs of the block are used if inputs is empty,
if inputs:
for i, op in enumerate(block.ops):
if _some_in_set_(op.desc.input_arg_names(), input_names):
for name in op.desc.output_arg_names():
if name not in no_grad_set:
input_names.add(name)
else:
relevant_op_flags[i] = False
for i, op in reversed(list(enumerate(block.ops))):
if _some_in_set_(op.desc.output_arg_names(), output_names):
for name in op.desc.input_arg_names():
if name not in no_grad_set:
output_names.add(name)
else:
relevant_op_flags[i] = False
op_path = [
block.ops[i] for i in range(len(block.ops)) if relevant_op_flags[i]
]
if inputs:
for op in op_path:
for name in op.desc.input_arg_names():
if name not in input_names and block.vars[name].stop_gradient:
no_grad_set.add(name)
return op_path | 05d1b18f883906cc41fa84f6f27f061b30ced4b8 | 16,057 |
def clean_gltf_materials(gltf):
"""
未使用のglTFマテリアルを削除する
:param gltf: glTFオブジェクト
:return: 新しいマテリアルリスト
"""
return filter(lambda m: m['name'] in used_material_names(gltf), gltf['materials']) | 7e429dceb84d48298897589172f976ea907ddcab | 16,058 |
def create_root(request):
"""
Returns a new traversal tree root.
"""
r = Root()
r.add('api', api.create_root(request))
r.add('a', Annotations(request))
r.add('t', TagStreamFactory())
r.add('u', UserStreamFactory())
return r | ebc64f7f49bf6b3405b9971aa0b30e72b3d13c5f | 16,059 |
def sort_basis_functions(basis_functions):
"""Sorts a set of basis functions by their distance to the
function with the smallest two-norm.
Args:
basis_functions: The set of basis functions to sort.
Expected shape is (-1, basis_function_length).
Returns:
sorted_basis: The sorted basis functions
sorted_ids: Mapping from unsorted basis function ids to
their sorted position.
"""
min_norm_idx = np.argmin(np.linalg.norm(basis_functions, axis=-1), axis=0)
min_norm_fn = basis_functions[min_norm_idx]
ids = list(range(len(basis_functions)))
sorted_ids = sorted(ids, key=lambda x: np.linalg.norm(basis_functions[x] - min_norm_fn))
sorted_basis = np.array(basis_functions)[sorted_ids]
return sorted_basis, sorted_ids | 18b11f80c7d08eb6435d823e557ec9ea4e028b92 | 16,060 |
import os
def to_zgrid(roms_file, z_file, src_grid=None, z_grid=None, depth=None,
records=None, threads=2, reftime=None, nx=0, ny=0, weight=10,
vmap=None, cdl=None, dims=2, pmap=None):
"""
Given an existing ROMS history or average file, create (if does not exit)
a new z-grid file. Use the given z_grid or otherwise build one with the
same horizontal extent and the specified depths and interpolate the
ROMS fields onto the z-grid.
Parameters
----------
roms_file : string,
File name of src file to interpolate from
z_file : string,
Name of desination file to write to
src_grid : (string or seapy.model.grid), optional:
Name or instance of source grid. If nothing is specified,
derives grid from the roms_file
z_grid: (string or seapy.model.grid), optional:
Name or instance of output definition
depth: numpy.ndarray, optional:
array of depths to use for z-level
records : numpy.ndarray, optional:
Record indices to interpolate
threads : int, optional:
number of processing threads
reftime: datetime, optional:
Reference time as the epoch for z-grid file
nx : float, optional:
decorrelation length-scale for OA (same units as source data)
ny : float, optional:
decorrelation length-scale for OA (same units as source data)
nx : float, optional:
decorrelation length-scale for OA (same units as source data,
typically twice the difference in the source data)
ny : float, optional:
decorrelation length-scale for OA (same units as source data,
typically twice the difference in the source data)
weight : int, optional:
number of points to use in weighting matrix
vmap : dictionary, optional
mapping source and destination variables
cdl: string, optional,
Use the specified CDL file as the definition for the new
netCDF file.
dims : int, optional
number of dimensions to use for lat/lon arrays (default 2)
pmap : numpy.ndarray, optional:
use the specified pmap rather than compute it
Returns
-------
pmap : ndarray
the weighting matrix computed during the interpolation
"""
if src_grid is None:
src_grid = seapy.model.asgrid(roms_file)
else:
src_grid = seapy.model.asgrid(src_grid)
ncsrc = seapy.netcdf(roms_file)
src_ref, time = seapy.roms.get_reftime(ncsrc)
if reftime is not None:
src_ref = reftime
records = np.arange(0, ncsrc.variables[time].shape[0]) \
if records is None else np.atleast_1d(records)
# Load the grid
if z_grid is not None:
z_grid = seapy.model.asgrid(z_grid)
elif os.path.isfile(z_file):
z_grid = seapy.model.asgrid(z_file)
if not os.path.isfile(z_file):
if z_grid is None:
lat = src_grid.lat_rho.shape[0]
lon = src_grid.lat_rho.shape[1]
if depth is None:
raise ValueError("depth must be specified")
ncout = seapy.roms.ncgen.create_zlevel(z_file, lat, lon,
len(depth), src_ref, "ROMS z-level",
cdl=cdl, dims=dims)
if dims == 1:
ncout.variables["lat"][:] = src_grid.lat_rho[:, 0]
ncout.variables["lon"][:] = src_grid.lon_rho[0, :]
else:
ncout.variables["lat"][:] = src_grid.lat_rho
ncout.variables["lon"][:] = src_grid.lon_rho
ncout.variables["depth"][:] = depth
ncout.variables["mask"][:] = src_grid.mask_rho
ncout.sync()
z_grid = seapy.model.grid(z_file)
else:
lat = z_grid.lat_rho.shape[0]
lon = z_grid.lat_rho.shape[1]
dims = z_grid.spatial_dims
ncout = seapy.roms.ncgen.create_zlevel(z_file, lat, lon,
len(z_grid.z), src_ref, "ROMS z-level",
cdl=cdl, dims=dims)
if dims == 1:
ncout.variables["lat"][:] = z_grid.lat_rho[:, 0]
ncout.variables["lon"][:] = z_grid.lon_rho[0, :]
else:
ncout.variables["lat"][:] = z_grid.lat_rho
ncout.variables["lon"][:] = z_grid.lon_rho
ncout.variables["depth"][:] = z_grid.z
ncout.variables["mask"][:] = z_grid.mask_rho
else:
ncout = netCDF4.Dataset(z_file, "a")
ncout.variables["time"][:] = seapy.roms.date2num(
seapy.roms.num2date(ncsrc, time, records), ncout, "time")
# Call the interpolation
try:
src_grid.set_east(z_grid.east())
pmap = __interp_grids(src_grid, z_grid, ncsrc, ncout, records=records,
threads=threads, nx=nx, ny=ny, vmap=vmap, weight=weight,
z_mask=True, pmap=pmap)
except TimeoutError:
print("Timeout: process is hung, deleting output.")
# Delete the output file
os.remove(z_file)
finally:
# Clean up
ncsrc.close()
ncout.close()
return pmap | 305b25c6e98cf50c4784d4eaa18b3306b415aea3 | 16,061 |
def info_materials_groups_get():
"""
info_materials_groups_get
Get **array** of information for all materials, or if an array of `type_ids` is included, information on only those materials.
:rtype: List[Group]
"""
session = info_map.Session()
mat = aliased(info_map.Material)
grp = aliased(info_map.Group)
q = session.query(mat.group_id,grp.name).join(grp).distinct()
groups = [Group(group=row.group_id,name=row.name) for row in q.all()]
return groups, 200 | 49dcf0785be8d9a94b4bb730af6326d493e79000 | 16,062 |
import os
def TestFSSH():
""" molcas test
1. FSSH calculation
"""
pyrai2mddir = os.environ['PYRAI2MD']
testdir = '%s/fssh' % (os.getcwd())
record = {
'coord' : 'FileNotFound',
'energy' : 'FileNotFound',
'energy1' : 'FileNotFound',
'energy2' : 'FileNotFound',
'energy3' : 'FileNotfound',
'kinetic1' : 'FileNotFound',
'kinetic2' : 'FileNotFound',
'velo1' : 'FileNotFound',
'velo2' : 'FileNotFound',
'nac1' : 'FileNotFound',
'nac2' : 'FileNotFound',
'soc1' : 'FileNotFound',
'soc2' : 'FileNotFound',
'pop2' : 'FileNotFound',
}
filepath = '%s/TEST/fssh/fssh_data/c3h2o.xyz' % (pyrai2mddir)
if os.path.exists(filepath):
record['coord'] = filepath
filepath = '%s/TEST/fssh/fssh_data/c3h2o.energy' % (pyrai2mddir)
if os.path.exists(filepath):
record['energy'] = filepath
filepath = '%s/TEST/fssh/fssh_data/c3h2o.energy.1' % (pyrai2mddir)
if os.path.exists(filepath):
record['energy1'] = filepath
filepath = '%s/TEST/fssh/fssh_data/c3h2o.energy.2' % (pyrai2mddir)
if os.path.exists(filepath):
record['energy2'] = filepath
filepath = '%s/TEST/fssh/fssh_data/c3h2o.energy.3' % (pyrai2mddir)
if os.path.exists(filepath):
record['energy3'] = filepath
filepath = '%s/TEST/fssh/fssh_data/c3h2o.kinetic.1' % (pyrai2mddir)
if os.path.exists(filepath):
record['kinetic1'] = filepath
filepath = '%s/TEST/fssh/fssh_data/c3h2o.kinetic.2' % (pyrai2mddir)
if os.path.exists(filepath):
record['kinetic2'] = filepath
filepath = '%s/TEST/fssh/fssh_data/c3h2o.nac.1' % (pyrai2mddir)
if os.path.exists(filepath):
record['nac1'] = filepath
filepath = '%s/TEST/fssh/fssh_data/c3h2o.nac.2' % (pyrai2mddir)
if os.path.exists(filepath):
record['nac2'] = filepath
filepath = '%s/TEST/fssh/fssh_data/c3h2o.soc.1' % (pyrai2mddir)
if os.path.exists(filepath):
record['soc1'] = filepath
filepath = '%s/TEST/fssh/fssh_data/c3h2o.soc.2' % (pyrai2mddir)
if os.path.exists(filepath):
record['soc2'] = filepath
filepath = '%s/TEST/fssh/fssh_data/c3h2o.velo.1' % (pyrai2mddir)
if os.path.exists(filepath):
record['velo1'] = filepath
filepath = '%s/TEST/fssh/fssh_data/c3h2o.velo.2' % (pyrai2mddir)
if os.path.exists(filepath):
record['velo2'] = filepath
filepath = '%s/TEST/fssh/fssh_data/c3h2o.pop.2' % (pyrai2mddir)
if os.path.exists(filepath):
record['pop2'] = filepath
summary = """
*---------------------------------------------------*
| |
| FSSH Test Calculation |
| |
*---------------------------------------------------*
Check files and settings:
-------------------------------------------------------
"""
for key, location in record.items():
summary += ' %-10s %s\n' % (key, location)
for key, location in record.items():
if location == 'FileNotFound':
summary += '\n Test files are incomplete, please download it again, skip test\n\n'
return summary, 'FAILED(test file unavailable)'
if location == 'VariableNotFound':
summary += '\n Environment variables are not set, cannot find program, skip test\n\n'
return summary, 'FAILED(enviroment variable missing)'
CopyInput(record, testdir)
Setup(record, testdir)
summary += """
Copy files:
%-10s --> %s/c3h2o.xyz
%-10s --> %s/c3h2o.energy
%-10s --> %s/c3h2o.energy.1
%-10s --> %s/c3h2o.energy.2
%-10s --> %s/c3h2o.energy.3
%-10s --> %s/c3h2o.kinetic
%-10s --> %s/c3h2o.kinetic.1
%-10s --> %s/c3h2o.nac
%-10s --> %s/c3h2o.nac.1
%-10s --> %s/c3h2o.soc
%-10s --> %s/c3h2o.soc.1
%-10s --> %s/c3h2o.velo
%-10s --> %s/c3h2o.velo.1
%-10s --> %s/c3h2o.pop.1
Run FSSH Calculation:
""" % ('coord', testdir,
'energy1', testdir,
'energy2', testdir,
'energy', testdir,
'energy3', testdir,
'kinetic1', testdir,
'kinetic2', testdir,
'nac1', testdir,
'nac2', testdir,
'soc1', testdir,
'soc2', testdir,
'velo1', testdir,
'velo2', testdir,
'pop2', testdir)
results, code = RunFSSH(record, testdir, pyrai2mddir)
if code == 'PASSED':
summary += """
-------------------------------------------------------
FSSH OUTPUT (NAC)
-------------------------------------------------------
%s
-------------------------------------------------------
nactype == nac test done, entering nactype == ktdc test
""" % (results)
else:
summary += """
nactype == test failed, stop here
"""
return summary, code
results, code = RunFSSH2(record, testdir, pyrai2mddir)
summary += """
-------------------------------------------------------
FSSH OUTPUT (kTDC)
-------------------------------------------------------
%s
-------------------------------------------------------
""" % (results)
return summary, code | be7f26eb2a398996a87e756905b596c26c428332 | 16,063 |
def relu(x):
"""
Compute the relu of x
Arguments:
x -- A scalar or numpy array of any size.
Return:
s -- relu(x)
"""
s = np.maximum(0,x)
return s | 40b838889b62abca0a88788436b5d648261a3c67 | 16,064 |
def kane_frstar_alt(bodies, coordinates, speeds, kdeqs, inertial_frame, uaux=Matrix(), udep=None, Ars=None):
"""Form the generalized inertia force."""
t = dynamicsymbols._t
N = inertial_frame
# Derived inputs
q = Matrix(coordinates) # q
u = Matrix(speeds) # u
udot = u.diff(t)
qdot_u_map,_,_,_k_kqdot = _initialize_kindiffeq_matrices(q, u, kdeqs, uaux=Matrix())
# Dicts setting things to zero
udot_zero = dict((i, 0) for i in udot)
uaux_zero = dict((i, 0) for i in uaux)
uauxdot = [diff(i, t) for i in uaux]
uauxdot_zero = dict((i, 0) for i in uauxdot)
# Dictionary of q' and q'' to u and u'
q_ddot_u_map = dict((k.diff(t), v.diff(t)) for (k, v) in qdot_u_map.items())
q_ddot_u_map.update(qdot_u_map)
# Fill up the list of partials: format is a list with num elements
# equal to number of entries in body list. Each of these elements is a
# list - either of length 1 for the translational components of
# particles or of length 2 for the translational and rotational
# components of rigid bodies. The inner most list is the list of
# partial velocities.
def get_partial_velocity(body):
if isinstance(body,YAMSRigidBody) or isinstance(body, RigidBody):
vlist = [body.masscenter.vel(N), body.frame.ang_vel_in(N)]
elif isinstance(body, Particle):
vlist = [body.point.vel(N),]
elif isinstance(body,YAMSFlexibleBody):
print('>>>> FlexibleBody TODO, Jv Jo to partials')
vlist=[body.masscenter.vel(N), body.frame.ang_vel_in(N)]
else:
raise TypeError('The body list may only contain either ' 'RigidBody or Particle as list elements.')
v = [msubs(vel, qdot_u_map) for vel in vlist]
return partial_velocity(v, u, N)
partials = [get_partial_velocity(body) for body in bodies]
# Compute fr_star in two components:
# fr_star = -(MM*u' + nonMM)
o = len(u)
MM = zeros(o, o)
nonMM = zeros(o, 1)
zero_uaux = lambda expr: msubs(expr, uaux_zero)
zero_udot_uaux = lambda expr: msubs(msubs(expr, udot_zero), uaux_zero)
for i, body in enumerate(bodies):
bodyMM = zeros(o, o)
bodynonMM = zeros(o, 1)
if isinstance(body,YAMSRigidBody) or isinstance(body, RigidBody):
# Rigid Body (see sympy.mechanics.kane)
M = zero_uaux( body.mass )
I = zero_uaux( body.central_inertia )
vel = zero_uaux( body.masscenter.vel(N) )
omega = zero_uaux( body.frame.ang_vel_in(N) )
acc = zero_udot_uaux( body.masscenter.acc(N) )
# --- Mas Matrix
for j in range(o):
tmp_vel = zero_uaux(partials[i][0][j])
tmp_ang = zero_uaux(I & partials[i][1][j])
for k in range(o):
# translational
bodyMM[j, k] += M * (tmp_vel & partials[i][0][k])
# rotational
bodyMM[j, k] += (tmp_ang & partials[i][1][k])
# --- Full inertial loads Matrix
inertial_force = (M.diff(t) * vel + M * acc)
inertial_torque = zero_uaux((I.dt(body.frame) & omega) + msubs(I & body.frame.ang_acc_in(N), udot_zero) + (omega ^ (I & omega))) # "&" = dot, "^"=cross
for j in range(o):
bodynonMM[j] += inertial_force & partials[i][0][j]
bodynonMM[j] += inertial_torque & partials[i][1][j]
elif isinstance(body,YAMSFlexibleBody):
print('>>>> FlexibleBody TODO')
M = zero_uaux(body.mass)
#I = zero_uaux(body.central_inertia)
vel = zero_uaux(body.origin.vel(N))
omega = zero_uaux(body.frame.ang_vel_in(N))
acc = zero_udot_uaux(body.origin.acc(N))
inertial_force=0 # Fstar !<<<< TODO
inertial_torque=0 # Tstar !<<<< TODO
else:
# Particles
M = zero_uaux(body.mass)
vel = zero_uaux(body.point.vel(N))
acc = zero_udot_uaux(body.point.acc(N))
inertial_force = (M.diff(t) * vel + M * acc)
inertial_torque=0 # Tstar
for j in range(o):
temp = zero_uaux(partials[i][0][j])
for k in range(o):
bodyMM[j, k] += M * (temp & partials[i][0][k])
bodynonMM[j] += inertial_force & partials[i][0][j]
# Perform important substitution and store body contributions
body.MM_alt = zero_uaux(msubs(bodyMM, q_ddot_u_map))
body.nonMM_alt_bef = bodynonMM
#body.nonMM_alt = msubs(msubs(bodynonMM, q_ddot_u_map), udot_zero, uauxdot_zero, uaux_zero)
# Cumulative MM and nonMM over all bodies
MM += bodyMM
nonMM += bodynonMM
# --- Storing for debug
body.acc_alt = acc
body.vel_alt = vel
body.omega_alt = omega
body.inertial_force_alt = inertial_force
body.inertial_torque_alt = inertial_torque
body.Jv_vect_alt=partials[i][0]
body.Jo_vect_alt=partials[i][1]
# End loop on bodies
nonMM = msubs(msubs(nonMM, q_ddot_u_map), udot_zero, uauxdot_zero, uaux_zero)
# Compose fr_star out of MM and nonMM
fr_star = -(MM * msubs(Matrix(udot), uauxdot_zero) + nonMM)
# If there are dependent speeds, we need to find fr_star_tilde
if udep:
p = o - len(udep)
fr_star_ind = fr_star[:p, 0]
fr_star_dep = fr_star[p:o, 0]
fr_star = fr_star_ind + (Ars.T * fr_star_dep)
# Apply the same to MM
MMi = MM[:p, :]
MMd = MM[p:o, :]
MM = MMi + (Ars.T * MMd)
MM_full= mass_matrix_full(MM,_k_kqdot)
#self._bodylist = bodies
#self._frstar = fr_star
#self._k_d = MM
#self._f_d = -msubs(self._fr + self._frstar, udot_zero)
return fr_star, MM, MM_full | a42d8c902f7a27cb3867e9c85f8a844129293f7d | 16,065 |
def line(value):
"""
| Line which can be used to cross with functions like RSI or MACD.
| Name: line\_\ **value**\
:param value: Value of the line
:type value: float
"""
def return_function(data):
column_name = f'line_{value}'
if column_name not in data.columns:
data[column_name] = value
return data[column_name].copy()
return return_function | 07b4f9671ae06cf63c02062a9da4eb2a0b1a265a | 16,066 |
import json
import requests
def goodsGetSku(spuId,regionId):
"""
:param spuId:
:param regionId:
:return:
"""
reqUrl = req_url('goods', "/goods/getGoodsList")
if reqUrl:
url = reqUrl
else:
return "服务host匹配失败"
headers = {
'Content-Type': 'application/json',
'X-Region-Id': regionId,
}
body = json.dumps(
{
"spuId": spuId,
"groundStatus": "",
"environment": "",
"page": 1,
"limit": 20
}
)
result = requests.post(url=url,headers=headers,data=body)
resultJ = json.loads(result.content)
return resultJ | 23c3960384529c7a45e730a612cc7999fa316bd4 | 16,067 |
import os
def build_model(config, model_dir=None, weight=None):
"""
Inputs:
config: train_config, see train_celery.py
model_dir: a trained model's output dir, None if model has not been trained yet
weight: class weights
"""
contents = os.listdir(model_dir)
print(contents)
return ClassificationModel(
"roberta",
model_dir or "roberta-base",
use_cuda=USE_CUDA,
args={
# https://github.com/ThilinaRajapakse/simpletransformers/#sliding-window-for-long-sequences
"sliding_window": config.get("sliding_window", False),
"reprocess_input_data": True,
"overwrite_output_dir": True,
"use_cached_eval_features": False,
"no_cache": True,
"num_train_epochs": config["num_train_epochs"],
"weight": weight,
# TODO I don't need checkpoints yet - disable this to save disk space
"save_eval_checkpoints": False,
"save_model_every_epoch": False,
"save_steps": 999999,
# Bug in the library, need to specify it here and in the .train_model kwargs
"output_dir": config.get("model_output_dir"),
# Maybe a bug in the library, need to turn off multiprocessing for prediction
# We may also want to look at the process_count config. It may use too many cpus
"use_multiprocessing": False,
# Note: 512 requires 16g of GPU mem. You can try 256 for 8g.
"max_seq_length": config.get("max_seq_length", 512),
},
) | d526404b5b5ad73c16a3bbb12af9c03f6099a17b | 16,068 |
def get_scheduler(config, optimizer):
"""
:param config: 配置参数
:param optimizer: 优化器
:return: 学习率衰减策略
"""
# 加载学习率衰减策略
if config.scheduler_name == 'StepLR':
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=config.StepLR['decay_step'],
gamma=config.StepLR["gamma"])
elif config.scheduler_name == 'Cosine':
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=config.Cosine['restart_step'],
eta_min=config.Cosine['eta_min'])
elif config.scheduler_name == 'author':
scheduler = WarmupMultiStepLR(optimizer,
config.WarmupMultiStepLR["steps"],
config.WarmupMultiStepLR["gamma"],
config.WarmupMultiStepLR["warmup_factor"],
config.WarmupMultiStepLR["warmup_iters"],
config.WarmupMultiStepLR["warmup_method"]
)
return scheduler | 1e4be51c74ed6c35bde3343547c4a7a88736179a | 16,069 |
def pipeline_dict() -> dict:
"""Pipeline config dict. You need to update the labels!"""
pipeline_dictionary = {
"name": "german_business_names",
"features": {
"word": {"embedding_dim": 16, "lowercase_tokens": True},
"char": {
"embedding_dim": 16,
"encoder": {
"type": "gru",
"num_layers": 1,
"hidden_size": 32,
"bidirectional": True,
},
"dropout": 0.1,
},
},
"head": {
"type": "TextClassification",
"labels": [
"Unternehmensberatungen",
"Friseure",
"Tiefbau",
"Dienstleistungen",
"Gebrauchtwagen",
"Restaurants",
"Architekturbüros",
"Elektriker",
"Vereine",
"Versicherungsvermittler",
"Sanitärinstallationen",
"Edv",
"Maler",
"Physiotherapie",
"Werbeagenturen",
"Apotheken",
"Vermittlungen",
"Hotels",
"Autowerkstätten",
"Elektrotechnik",
"Allgemeinärzte",
"Handelsvermittler Und -vertreter",
],
"pooler": {
"type": "gru",
"num_layers": 1,
"hidden_size": 16,
"bidirectional": True,
},
"feedforward": {
"num_layers": 1,
"hidden_dims": [16],
"activations": ["relu"],
"dropout": [0.1],
},
},
}
return pipeline_dictionary | d9e15fb1a09678d65b30a49b7c7c811843420c57 | 16,070 |
def _is_hangul_syllable(i):
"""
Function for determining if a Unicode scalar value i is within the range of Hangul syllables.
:param i: Unicode scalar value to lookup
:return: Boolean: True if the lookup value is within the range of Hangul syllables, otherwise False.
"""
if i in range(0xAC00, 0xD7A3 + 1): # Range of Hangul characters as defined in UnicodeData.txt
return True
return False | 793519ec33a8920ea13328b0e5a4f814c859b0d3 | 16,071 |
def shape14_4(tik_instance, input_x, res, input_shape, shape_info):
"""input_shape == ((32, 16, 14, 14, 16), 'float16', (1, 1), (1, 1))"""
stride_w, stride_h, filter_w, filter_h, dilation_filter_w, dilation_filter_h = shape_info
pad = [0, 0, 0, 0]
l1_h = 14
l1_w = 14
c1_index = 0
jump_stride = 1
repeat_mode = 1
with tik_instance.for_range(0, 32, block_num=32) as block_index:
eeb0 = block_index % 2
eeb1 = block_index // 2
input_1_1_local_l1 = tik_instance.Tensor("float16", (196 * 32 * 16,), scope=tik.scope_cbuf,
name="input_1_1_local_l1")
input_1_1_fractal_l1_local_ub = tik_instance.Tensor("float16", (106496 // 2,), scope=tik.scope_ubuf,
name="input_1_1_fractal_l1_local_ub")
input_1_2_fractal_l1_local_ub = tik_instance.Tensor("float16", (196 * 16 * 16,), scope=tik.scope_ubuf,
name="input_1_2_fractal_l1_local_ub")
with tik_instance.for_range(0, 32) as i:
tik_instance.data_move(input_1_1_local_l1[i * 3136], input_x[i, eeb1, 0, 0, 0], 0, 1, 196, 0, 0)
with tik_instance.for_range(0, 16) as i:
fetch_filter_w = 0
fetch_filter_h = 0
left_top_h = 0
left_top_w = 0
tik_instance.load3dv1(input_1_1_fractal_l1_local_ub[i * 3328],
input_1_1_local_l1[i * 3136 + eeb0 * 16 * 3136],
pad, l1_h, l1_w, c1_index, fetch_filter_w, fetch_filter_h,
left_top_w, left_top_h, stride_w, stride_h, filter_w,
filter_h, dilation_filter_w, dilation_filter_h,
jump_stride, repeat_mode, 13)
with tik_instance.for_range(0, 16) as i:
tik_instance.data_move(input_1_2_fractal_l1_local_ub[i * 196 * 16],
input_1_1_fractal_l1_local_ub[i * 3328], 0, 1, 196, 0, 0)
with tik_instance.for_range(0, 196) as i:
tik_instance.data_move(res[eeb1, i + 196 * eeb0, 0, 0], input_1_2_fractal_l1_local_ub[256 * i], 0, 1,
16, 0, 0)
return tik_instance, res | 5606e2e6445ea5415960e1784009bcc75de29669 | 16,072 |
from typing import Dict
from typing import Any
from typing import Optional
from typing import Iterator
from typing import Union
from typing import List
def read_json(
downloader: Download, datasetinfo: Dict, **kwargs: Any
) -> Optional[Iterator[Union[List, Dict]]]:
"""Read data from json source allowing for JSONPath expressions
Args:
downloader (Download): Download object for downloading JSON
datasetinfo (Dict): Dictionary of information about dataset
**kwargs: Variables to use when evaluating template arguments
Returns:
Optional[Iterator[Union[List,Dict]]]: Iterator or None
"""
url = get_url(datasetinfo["url"], **kwargs)
response = downloader.download(url)
json = response.json()
expression = datasetinfo.get("jsonpath")
if expression:
expression = parse(expression)
json = expression.find(json)
if isinstance(json, list):
return iter(json)
return None | 77183992d42d9c3860965222c3feda23aca588dc | 16,073 |
import json
def json_dumps_safer(obj, **kwargs):
"""Convert obj to json, with some extra encodable types."""
return json.dumps(obj, cls=WandBJSONEncoder, **kwargs) | 816e97051553f1adc4c39a7c5e4559fb3a354197 | 16,074 |
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
# raise NotImplementedError()
full_data = pd.read_csv(filename).drop_duplicates()
data = full_data.drop(['id', 'date', 'lat', 'long'],
axis=1)
data = data.dropna()
for f in ZERO_AND_ABOVE:
data = data[data[f] >= 0]
for f in ONLY_POSITIVE:
data = data[data[f] > 0]
data['yr_renovated'] = np.where(data['yr_renovated'] == 0.0,
data['yr_built'], data['yr_renovated'])
data = pd.get_dummies(data, columns=['zipcode'],
drop_first=True)
features, label = data.drop("price", axis=1), data['price']
return features, label | 74707af8839b37de80d682d715ed8000375cdd7c | 16,075 |
from uuid import uuid4
from redis import Redis
from qiita_core.configuration_manager import ConfigurationManager
from qiita_db.sql_connection import SQLConnectionHandler
from moi.job import submit_nouser
def test(runner):
"""Test the environment
* Verify redis connectivity indepedent of moi
* Verify database connectivity
* Verify submission via moi
Tests are performed both on the server and ipengines.
"""
def redis_test(**kwargs):
"""Put and get a key from redis"""
config = ConfigurationManager()
r_client = Redis(host=config.redis_host,
port=config.redis_port,
password=config.redis_password,
db=config.redis_db)
key = str(uuid4())
r_client.set(key, 42, ex=1)
return int(r_client.get(key))
def postgres_test(**kwargs):
"""Open a connection and query postgres"""
c = SQLConnectionHandler()
return c.execute_fetchone("SELECT 42")[0]
def moi_test(**kwargs):
"""Submit a function via moi"""
def inner(a, b, **kwargs):
return a + b
_, _, ar = submit_nouser(inner, 7, 35)
state, result = _ipy_wait(ar)
return result
if runner == 'all':
runner = ('local', 'remote', 'moi')
else:
runner = [runner]
for name in runner:
_test_runner(name, "redis", redis_test, 42)
_test_runner(name, "postgres", postgres_test, 42)
_test_runner(name, "submit via moi", moi_test, 42) | ea4f8f50e3d85c3df6f7c890b5b91140a63bac65 | 16,076 |
import os
def reset_database():
"""仅限开发阶段使用,请不要在发布阶段开启这样的危险命令
"""
if app.config['ADMIN_KEY']:
if request.args.get('key') == app.config['ADMIN_KEY']:
if request.args.get('totp') == pyotp.TOTP(app.config['TOTP_SECRET']).now():
os.remove(app.config['SQLALCHEMY_DATABASE_PATH'])
db.create_all()
return 'Success!'
abort(401) | b3059fe9e2e3671803d01a7fd8521e58e315f19e | 16,077 |
def validate_model(model):
"""
Validate a single data model parameter or a full data model block by
recursively calling the 'validate' method on each node working from
the leaf nodes up the tree.
:param model: part of data model to validate
:type model: :graphit:GraphAxis
:return: overall successful validation
:rtype: :py:bool
"""
allnodes = model.nodes.keys()
leaves = model.leaves(return_nids=True)
done = []
def _walk_ancestors(nodes, success=True):
parents = []
for node in nodes:
node = model.getnodes(node)
# Continue only if the node was found and it has a 'validate' method
if not node.empty() and hasattr(node, 'validate'):
val = node.validate()
done.append(node.nid)
if not val:
return False
pnid = node.parent().nid
if pnid not in done and pnid in allnodes:
parents.append(pnid)
if parents:
return _walk_ancestors(set(parents), success=success)
return success
# Recursively walk the tree from leaves up to root.
return _walk_ancestors(leaves) | 009c629fe80af65f574c698567cb6b5213e9c888 | 16,078 |
def simple_get(url):
"""
Attempts to get the content at `url` by making an HTTP GET request.
If the content-type of response is some kind of HTML/XML, return the
text content, otherwise return None.
"""
filename = "{0}.html".format(url.split("/").pop().lower())
filepath = abspath(join(dirname(__file__), "./cache", filename))
file_data = read_file(filepath)
if file_data != None:
return file_data
try:
print("Fetching: {0}...".format(url))
with closing(get(url, stream=True)) as resp:
if is_good_response(resp):
write_cache_file(filepath, resp.content)
return resp.content
else:
return None
except RequestException as e:
log_error('Error during requests to {0} : {1}'.format(url, str(e)))
return None | 60b7714a439d949f42b1b8de6064c8ba087ccfdc | 16,079 |
def ensemble_tsfresh(forecast_in, forecast_out, season, perd):
"""
Create rolled time series for ts feature extraction
"""
def tsfresh_run(forecast, season, insample=True, forecast_out=None):
df_roll_prep = forecast.reset_index()
if insample:
df_roll_prep = df_roll_prep.drop(["Target", "Date"], axis=1)
df_roll_prep["id"] = 1
target = forecast["Target"]
else:
df_roll_prep = df_roll_prep.drop(["index"], axis=1)
df_roll_prep["id"] = 1
df_roll = roll_time_series(
df_roll_prep,
column_id="id",
column_sort=None,
column_kind=None,
rolling_direction=1,
max_timeshift=season - 1,
)
counts = df_roll["id"].value_counts()
df_roll_cut = df_roll[df_roll["id"].isin(counts[counts >= season].index)]
# TS feature extraction
concat_df = pd.DataFrame()
concat_df = extract_features(
df_roll_cut.ffill(),
column_id="id",
column_sort="sort",
n_jobs=season,
show_warnings=False,
disable_progressbar=True,
)
if insample:
concat_df = concat_df.dropna(axis=1, how="all")
concat_df.index = (
target[df_roll_cut["id"].value_counts().index]
.sort_index()
.to_frame()
.index
)
concat_df = pd.merge(
target[df_roll_cut["id"].value_counts().index].sort_index().to_frame(),
concat_df,
left_index=True,
right_index=True,
how="left",
)
concat_df_list = constant_feature_detect(data=concat_df, threshold=0.95)
concat_df = concat_df.drop(concat_df_list, axis=1)
else:
forecast_out.index.name = "Date"
concat_df.index = forecast_out.index
concat_df = impute(concat_df)
return concat_df
_LOG.info("LightGBM ensemble have been successfully built")
concat_df_drop_in = tsfresh_run(forecast_in, season, insample=True)
extracted_n_selected = select_features(
concat_df_drop_in.drop("Target", axis=1),
concat_df_drop_in["Target"],
fdr_level=0.01,
n_jobs=12,
) # fdr is the significance level.
forecast_out_add = pd.concat(
(forecast_in.iloc[-season + 1 :, :].drop(["Target"], axis=1), forecast_out),
axis=0,
)
concat_df_drop_out = tsfresh_run(
forecast_out_add, season, insample=False, forecast_out=forecast_out
)
extracted_n_selected_out = concat_df_drop_out[extracted_n_selected.columns]
# Reduce the dimensions of generated time series features
pca2 = PCA(n_components=8)
pca2.fit(extracted_n_selected)
pca2_results_in = pca2.transform(extracted_n_selected)
pca2_results_out = pca2.transform(extracted_n_selected_out)
cols = 0
for i in range(pca2_results_in.shape[1]):
cols = cols + 1
extracted_n_selected["pca_" + str(i)] = pca2_results_in[:, i]
extracted_n_selected_out["pca_" + str(i)] = pca2_results_out[:, i]
df = forecast_in.iloc[season - 1 :, :].copy()
df = time_feature(df, perd)
df["mean"] = df.drop(["Target"], axis=1).mean(axis=1)
df_new = pd.concat(
(df.reset_index(), extracted_n_selected.iloc[:, -cols:].reset_index(drop=True)),
axis=1,
)
df_new = df_new.set_index("Date")
forecast_train, forecast_test = tts(
df_new, train_size=0.5, shuffle=False, stratify=None
)
target = "Target"
d_train = lgb.Dataset(
forecast_train.drop(columns=[target]), label=forecast_train[target]
)
params = {
"boosting_type": "gbdt",
"objective": "regression",
"metric": "rmsle",
"max_depth": 6,
"learning_rate": 0.1,
"verbose": 0,
"num_threads": 16,
}
model = lgb.train(params, d_train, 100, verbose_eval=1)
ensemble_ts = pd.DataFrame(index=forecast_test.index)
ensemble_ts["ensemble_ts"] = model.predict(forecast_test.drop(columns=[target]))
df_out = forecast_out.copy()
df_out = time_feature(df_out, perd)
df_out["mean"] = df_out.mean(axis=1)
ensemble_ts_out = pd.DataFrame(index=df_out.index)
ensemble_ts_out["ensemble_ts"] = model.predict(df_out)
_LOG.info("LightGBM ensemble have been successfully built")
return ensemble_ts, ensemble_ts_out | 3bcef19cd495c043e51a591118c1ae8e043290b5 | 16,080 |
import os
def __modules_with_root_module_path(path):
"""
Returns all modules beneath the root module path. This treats all
directories as packages regardless of whether or not they include
a __init__.py.
"""
modules = []
if os.path.isfile(path) and os.path.splitext(path)[1] == '.py' and os.path.basename(path) != '__init__.py':
name = os.path.splitext(os.path.basename(path))[0]
modules.append(name)
elif os.path.isdir(path):
pkg_name = os.path.basename(path)
modules.append(pkg_name)
for ff in os.listdir(path):
modules.extend(['.'.join([pkg_name, m]) for m in __modules_with_root_module_path(os.path.join(path, ff))])
return modules | 96086c8f8e7a277033086c2da6a3bdad16c41756 | 16,081 |
from affine import Affine
def transform_from_latlon(lat, lon):
"""
Tranform from latitude and longitude
NOTES:
- credit - Shoyer https://gist.github.com/shoyer/0eb96fa8ab683ef078eb
"""
lat = np.asarray(lat)
lon = np.asarray(lon)
trans = Affine.translation(lon[0], lat[0])
scale = Affine.scale(lon[1] - lon[0], lat[1] - lat[0])
return trans * scale | 1f6fccfddd23423c0a621efa74a62cdb61b53665 | 16,082 |
from typing import OrderedDict
def xml_to_json(xml_text: str) -> OrderedDict:
"""Converts xml text to json.
Args:
xml_text (str): xml text to be parsed
Returns:
OrderedDict: an ordered dict representing the xml text as json
"""
return xmltodict.parse(xml_text) | 243156c6f0b3b0f0bf92d8eeaa3ecf52f5846fc8 | 16,083 |
import sys
def get_calendar_future_events(api_service):
"""Se trae todos los eventos del calendario de Sysarmy.
Args:
api_service (googleapiclient.discovery.Resource): servicio ya autenticado para
pegarle a la API de calendar.
Returns:
list: lista de diccionarios con los eventos futuros ya registrados en el Calendar.
"""
page_token = None
future_set_events = []
while True:
try:
set_events = (
api_service.events()
.list(calendarId=ADMINBIRRATOR_CALENDAR_ID, pageToken=page_token)
.execute()
)
except TypeError as e:
LOGGER.error(
f"Calendar ID incorrecto. Chequear variable de entorno {set_bold_text('$ADMINBIRRATOR_CALENDAR_ID')}. {e}"
)
sys.exit(1)
except googleErrors.HttpError as e:
LOGGER.error(
f"Calendar ID incorrecto. Chequeá bien las fechas y que la service account tenga acceso al calendar seteado en {set_bold_text('$ADMINBIRRATOR_CALENDAR_ID')} ({e})."
)
sys.exit(1)
# La idea general es crear eventos nuevos y updatear los ya existentes,
# así que solo nos quedamos con los eventos futuros.
for event in set_events["items"]:
try:
event_start_date = dt.strptime(
event["start"]["dateTime"][:19],
"%Y-%m-%dT%H:%M:%S",
)
except KeyError:
event_start_date = dt.strptime(
event["start"]["date"][:19],
"%Y-%m-%d",
)
if event_start_date > dt.now():
future_set_events.append(event)
# La misma response de la API te da un token para la próx page, creo
# que te trae de a 25 eventos por default. Habría que jugar para ver
# si puede traer solo los futuros que son los que nos interesan.
page_token = set_events.get("nextPageToken")
if not page_token:
break
return future_set_events | f3b077f3af700c70c54e017a991118e81bbdb99a | 16,084 |
import requests
def getWeather(city, apikey):
"""
天気を取得する
リクエストにAPIKeyと都市をパラメーターに入れる
https://openweathermap.org/forecast5
"""
payload = {
'APIKEY': APIKEY,
'q': CITY
}
r = requests.get(
APIBASE,
params=payload
)
return r | 41a61d1bd9d1bd5d835963c305d0692babd6b64a | 16,085 |
def func1(xc):
"""Function which sets the data value"""
s = .1
res = np.exp(-xc**2/(2*s**2))
return res | 75ab06abf5b348746e322cf41660cfb908d69b62 | 16,086 |
def GetEnabledDiskTemplates(*args):
"""Wrapper for L{_QaConfig.GetEnabledDiskTemplates}.
"""
return GetConfig().GetEnabledDiskTemplates(*args) | c07707de13be5c055386659be620831b2f057d64 | 16,087 |
def is_pull_request_merged(pull_request):
"""Takes a github3.pulls.ShortPullRequest object"""
return pull_request.merged_at is not None | 0fecf82b96f7a46cfb4e9895897bd4998d6f225b | 16,088 |
def arraytoptseries(arr, crs={'epsg': '4326'}):
"""Convert an array of shape (2, ...) or (3, ...) to a
geopandas GeoSeries containing shapely Point objects.
"""
if arr.shape[0] == 2:
result = geopandas.GeoSeries([Point(x[0], x[1])
for x in arr.reshape(2, -1).T])
else:
result = geopandas.GeoSeries([Point(x[0], x[1], x[2])
for x in arr.reshape(3, -1).T])
#result.crs = crs
return result | b193f9bcb4144b81becca1acaa8285f8cafaaff2 | 16,089 |
async def get_kml_network_link():
""" Return KML network link file """
logger.info('/c-haines/network-link')
headers = {"Content-Type": kml_media_type,
"Content-Disposition": "inline;filename=c-haines-network-link.kml"}
return Response(headers=headers, media_type=kml_media_type, content=fetch_network_link_kml()) | 699ac59529ce085264a79dfdd048c96b3771e0a8 | 16,090 |
def splitToPyNodeList(res):
# type: (str) -> List[pymel.core.general.PyNode]
"""
converts a whitespace-separated string of names to a list of PyNode objects
Parameters
----------
res : str
Returns
-------
List[pymel.core.general.PyNode]
"""
return toPyNodeList(res.split()) | 44e8780833ed2a5d7418c972afb9e184ca82670b | 16,091 |
def get_jira_issue(commit_message):
"""retrieve the jira issue referenced in the commit message
>>> get_jira_issue(b"BAH-123: ")
{b'BAH-123'}
>>> messages = (
... b"this is jira issue named plainly BAH-123",
... b"BAH-123 plainly at the beginning",
... b"in parens (BAH-123)",
... b"(BAH-123) at the beginning",
... b"after a colon :BAH-123",
... b"Merged from \\FOO-4325 foo.\\n\\nsvn path=/foo/trunk/; revision=12345\\n"
... )
>>> issuesets = (get_jira_issue(i) for i in messages)
>>> issues = set()
>>> for issueset in issuesets:
... for issue in issueset: issues.add(issue)
>>> sorted(list(issues))
[b'BAH-123', b'FOO-4325']
>>> get_jira_issue(b"there is no issue here")
set()
>>> with open("tomatch.txt", "rb") as f: data = f.read().splitlines()
>>> missed = list(i for i in (None if get_jira_issue(i) else i for i in data) if i is not None)
>>> len(missed)
0
>>> for i in missed:
... print(i)
>>> with open("missed-strings.txt", "rb") as f: data = f.read().splitlines()
>>> missed = list(i for i in (None if get_jira_issue(i) else i for i in data) if i is not None)
>>> len(missed)
0
>>> for i in missed:
... print(i)
"""
start = 0
match = JIRA_ID_MATCHER.search(commit_message[start:])
issues = set()
while match:
issues.add(match.group(1))
start += match.end(1)
match = JIRA_ID_MATCHER.search(commit_message[start:])
return issues | b0bf47319c492ec297dd9898645a10ce8a53b43f | 16,092 |
import os
import urllib
def hello(event, context):
"""
This is my awesome hello world function that encrypts text! It's like my first web site, only in Lambda.
Maybe I can add a hit counter later? What about <blink>?
Args:
event:
context:
Returns:
"""
# General run of the mill dangerous, but it will be ok right?
stuff = event['query'].get('stuff', "")
url = event['query'].get('url')
eval_stuff = event['query'].get('eval', "")
my_secret = os.environ.get('my_secret', "default_secret")
print("processing a request, using super secret code: {}".format(my_secret))
# You wanna do what?! Dangerous.
if url:
with urllib.request.urlopen(url) as response:
extra_stuff = response.read()
else:
extra_stuff = ""
# OK Like WTF?! Are you suicidal? level of danger.
if eval_stuff.lower() == "yes":
eval_result = "<pre>{}</pre><hr/>".format(eval(stuff)) # Seriously crazy dangerous!
else:
eval_result = ""
body = """
<html>
<header><title>Hello World!</title></header>
<body>
Encrypted stuff: {}
<hr/>
{}<br/>
Some random URL's Content:<br/>{}
<hr/>
</body>
</html>
""".format(encrypt(stuff, my_secret), eval_result, extra_stuff)
return body | 2e60f86bfcbfaf1433954077a2c19f0941d4aeee | 16,093 |
def get_mean_std(dataloader):
"""Compute mean and std on the fly.
Args:
dataloader (Dataloader): Dataloader class from torch.utils.data.
Returns:
ndarray: ndarray of mean and std.
"""
cnt = 0
mean = 0
std = 0
for l in dataloader: # Now in (batch, channel, h, w)
data = l[0].double() # set dtype
b = data.size(0) # batch size at axis=0
data = data.view(b, data.size(1), -1) # reshape the tensor into (b, channel, h, w)
mean += data.mean(2).sum(0) # calculate mean for 3 channels
std += data.std(2).sum(0) # calculate std for 3 channels
cnt += b # get the count of data
mean /= cnt
std /= cnt
return mean.cpu().detach().numpy(), std.cpu().detach().numpy() | d943ae5244743749fa6a2186aacfbf0ea160d17b | 16,094 |
import os
def do_retrieve(url, fname):
"""Retrieve given url to target filepath fname."""
folder = os.path.dirname(fname)
if not os.path.exists(folder):
os.makedirs(folder)
print(f"{folder}{os.path.sep} created.")
if not os.path.exists(fname):
try:
with open(fname, 'wb') as fout:
print(f"retrieving {url}: ", end='', flush=True)
resp = urlopen(url)
fout.write(resp.read())
except BaseException:
print('failed')
os.unlink(fname)
raise
print(f"{fname} saved.")
return fname | a5df667e8d2eec5458ff09bcbb7dfcf3147973de | 16,095 |
import random
import math
def create_random_camera(bbox, frac_space_x, frac_space_y, frac_space_z):
""" Creates a new camera, sets a random position for it, for a scene inside the bbox.
Given the same random_seed the pose of the camera is deterministic.
Input:
bbox - same rep as output from get_scene_bbos.
Output:
new camera created
"""
rand_theta = random.uniform(0, 2 * math.pi) # Rotate around z
# Phi: 0 - top view, 0.5 * pi - side view, -pi - bottom view
rand_sign = random.randint(0, 1) * 2 - 1.0
rand_phi = rand_sign * random.normalvariate(0.4, 0.2) * math.pi
max_dim = max(bbox.get_dims())
r = random.uniform(max_dim * 0.4, max_dim * 0.6)
x = frac_space_x * r * math.cos(rand_theta) * math.sin(rand_phi) + bbox.get_center()[0]
y = frac_space_y * r * math.sin(rand_theta) * math.sin(rand_phi) + bbox.get_center()[1]
z = frac_space_z * r * math.cos(rand_phi) + bbox.get_center()[2]
bpy.ops.object.camera_add(location=Vector((x, y, z)))
cam = bpy.context.object
cam.data.clip_start = 0.01
cam.data.clip_end = max(170, r * 2 * 10)
look_at(cam, Vector(bbox.get_center()))
return cam | 8cfa86127d569493a2456e13ae5924da886640a9 | 16,096 |
def temp2():
""" This is weird, but correct """
if True:
return (1, 2)
else:
if True:
return (2, 3)
return (4, 5) | c63f5566a6e52a3b5d175640fce830b7aad33ebe | 16,097 |
from typing import Tuple
def compute_blade_representation(bitmap: int, firstIdx: int) -> Tuple[int, ...]:
"""
Takes a bitmap representation and converts it to the tuple
blade representation
"""
bmp = bitmap
blade = []
n = firstIdx
while bmp > 0:
if bmp & 1:
blade.append(n)
bmp = bmp >> 1
n = n + 1
return tuple(blade) | 3bf6672280629dee8361ee3c0dbf5920afa4edda | 16,098 |
import filecmp
import os
def are_dir_trees_equal(dir1, dir2):
"""
Compare two directories recursively. Files in each directory are
assumed to be equal if their names and contents are equal.
@param dir1: First directory path
@param dir2: Second directory path
@return: True if the directory trees are the same and
there were no errors while accessing the directories or files,
False otherwise.
"""
dirs_cmp = filecmp.dircmp(dir1, dir2)
if len(dirs_cmp.left_only)>0 or len(dirs_cmp.right_only)>0 or \
len(dirs_cmp.funny_files)>0:
return False
(_, mismatch, errors) = filecmp.cmpfiles(
dir1, dir2, dirs_cmp.common_files, shallow=False)
if len(mismatch)>0 or len(errors)>0:
return False
for common_dir in dirs_cmp.common_dirs:
new_dir1 = os.path.join(dir1, common_dir)
new_dir2 = os.path.join(dir2, common_dir)
if not are_dir_trees_equal(new_dir1, new_dir2):
return False
return True | 77973e2dc494aa8f4636f633b0810d9370de7076 | 16,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.