content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from typing import List
from re import T
def reverse(ls: List[T]) -> List[T]:
"""
Reverses a list.
:param ls: The list to be reversed
:return: The reversed list
"""
for i in range(len(ls) // 2):
ls[i], ls[len(ls) - 1 - i] = ls[len(ls) - 1 - i], ls[i]
return ls
|
eacee56b5325178ec27a13283d64d0155c7a97ed
| 25,530 |
def test_get_annotations_not_5(
test_gb_file, test_accession, coordination_args, monkeypatch
):
"""Test get_annotations when length of protein data is not 5."""
def mock_get_gb_file(*args, **kwargs):
gb_file = test_gb_file
return gb_file
def mock_get_record(*args, **kwargs):
return
monkeypatch.setattr(get_genbank_annotations, "get_genbank_file", mock_get_gb_file)
monkeypatch.setattr(get_genbank_annotations, "get_record_feature", mock_get_record)
get_genbank_annotations.get_annotations(
test_accession, coordination_args["args"],
)
|
a9021af24ecb339ebea89d6ad7beb6e4097c5519
| 25,531 |
def increment_with_offset(c: str, increment: int, offset: int) -> str:
""" Caesar shift cipher. """
return chr(((ord(c) - offset + increment) % 26) + offset)
|
50b10b6d3aff3dff157dfc46c368ae251ed060bb
| 25,532 |
import logging
def uploadfiles():
"""
function to upload csv to db
:return: renders success.html
"""
# get the uploaded file
uploaded_file = request.files['filename']
if uploaded_file.filename != '':
csv_to_db(uploaded_file)
return render_template('success.html')
logging.info("No file uploaded")
return render_template('startindex.html')
|
5baa9dfb8930e70ebd37b502a211ae847194e08f
| 25,533 |
def static_html(route):
"""
Route in charge of routing users to Pages.
:param route:
:return:
"""
page = get_page(route)
if page is None:
abort(404)
else:
if page.auth_required and authed() is False:
return redirect(url_for("auth.login", next=request.full_path))
return render_template("page.html", content=page.content)
|
52c74b63c5856a04b294f8e539b4be26deec0209
| 25,534 |
import math
def getCenterFrequency(filterBand):
"""
Intermediate computation used by the mfcc function.
Compute the center frequency (fc) of the specified filter band (l)
This where the mel-frequency scaling occurs. Filters are specified so that their
center frequencies are equally spaced on the mel scale
"""
centerFrequency = 0
if filterBand == 0:
centerFrequency = 0;
elif filterBand >= 1 and filterBand <= 14:
centerFrequency = (200.0 * filterBand) / 3.0
else:
exponent = filterBand - 14
centerFrequency = math.pow(1.0711703, exponent)
centerFrequency = centerFrequency * 1073.4
return centerFrequency
|
e043774093c4417658cdfd052d486ea5e30efb81
| 25,535 |
import numpy
def phi_analytic(dist, t, t_0, k, phi_1, phi_2):
""" the analytic solution to the Gaussian diffusion problem """
phi = (phi_2 - phi_1)*(t_0/(t + t_0)) * \
numpy.exp(-0.25*dist**2/(k*(t + t_0))) + phi_1
return phi
|
49fac597afa876f81ba5774bf82fedcfb88f6c7f
| 25,536 |
def geometric_median(X, eps=1e-5):
"""
calculate the geometric median as implemented in https://stackoverflow.com/a/30305181
:param X: 2D dataset
:param eps:
:return: median value from X
"""
y = np.mean(X, 0)
while True:
D = cdist(X, [y])
nonzeros = (D != 0)[:, 0]
Dinv = 1 / D[nonzeros]
Dinvs = np.sum(Dinv)
W = Dinv / Dinvs
T = np.sum(W * X[nonzeros], 0)
num_zeros = len(X) - np.sum(nonzeros)
if num_zeros == 0:
y1 = T
elif num_zeros == len(X):
return y
else:
R = (T - y) * Dinvs
r = np.linalg.norm(R)
rinv = 0 if r == 0 else num_zeros/r
y1 = max(0, 1-rinv)*T + min(1, rinv)*y
if euclidean(y, y1) < eps:
return y1
y = y1
|
9c8b0d69b4f66dc471bcb838b19ecac934493c54
| 25,538 |
def distance(bbox, detection):
"""docstring for distance"""
nDetections = detection.shape[0]
d = np.zeros(nDetections)
D = detection - np.ones([nDetections,1])*bbox
for i in xrange(nDetections):
d[i] = np.linalg.norm(D[i],1)
return d
|
21c4beea66df1dde96cd91cff459bf10f1b7a41e
| 25,539 |
from typing import TextIO
from typing import Tuple
def _read_float(line: str,
pos: int,
line_buffer: TextIO
) -> Tuple[float, str, int]:
"""Read float value from line.
Args:
line: line.
pos: current position.
line_buffer: line buffer for nnet3 file.
Returns:
float value, line string and current position.
"""
del line_buffer # Unused.
tok, pos = read_next_token(line, pos)
return float(tok), line, pos
|
f0c76b2224a17854902aadbe7a715ca00da64932
| 25,540 |
def pk_to_p2wpkh_in_p2sh_addr(pk, testnet=False):
"""
Compressed public key (hex string) -> p2wpkh nested in p2sh address. 'SegWit address.'
"""
pk_bytes = bytes.fromhex(pk)
assert is_compressed_pk(pk_bytes), \
"Only compressed public keys are compatible with p2sh-p2wpkh addresses. See BIP49."
# Script sig is just 0 + PUSH(20){hash160(cpk)}
script_sig = OP_0 + push_bytes(hash160_bytes(pk_bytes))
# Address is then prefix + hash160(script_sig)
address = Base58.check_encode(_prefix_bytes('p2sh', testnet=testnet) + hash160_bytes(script_sig))
return address
|
10e9b2659df98b02b5030c1eec1820c9bbdd1a8b
| 25,542 |
def remove_imaginary(pauli_sums):
"""
Remove the imaginary component of each term in a Pauli sum
:param PauliSum pauli_sums: The Pauli sum to process.
:return: a purely hermitian Pauli sum.
:rtype: PauliSum
"""
if not isinstance(pauli_sums, PauliSum):
raise TypeError("not a pauli sum. please give me one")
new_term = sI(0) * 0.0
for term in pauli_sums:
new_term += term_with_coeff(term, term.coefficient.real)
return new_term
|
2edd93f338d4e2dc1878953ced5edf954f509ccc
| 25,543 |
def log_sigmoid_deprecated(z):
"""
Calculate the log of sigmod, avoiding overflow underflow
"""
if abs(z) < 30:
return np.log(sigmoid(z))
else:
if z > 0:
return -np.exp(-z)
else:
return z
|
576d7de9bf61aa32c3e39fc5ca7f4428b43519bb
| 25,544 |
def roty(t):
"""Rotation about the y-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]])
|
9c05a96c8c36fd3cd7eee1860574b9242d7543d6
| 25,545 |
def ranks_to_metrics_dict(ranks):
"""Calculates metrics, returns metrics as a dict."""
mean_rank = np.mean(ranks)
mean_reciprocal_rank = np.mean(1. / ranks)
hits_at = {}
for k in (1, 3, 10):
hits_at[k] = np.mean(ranks <= k)*100
return {
'MR': mean_rank,
'MRR': mean_reciprocal_rank,
'hits@[1,3,10]': hits_at
}
|
60ee20fdf43240e3f0aa0e414fd49bcc52f83446
| 25,546 |
def bias_correction(input_data, output_filename='', mask_filename='', method="ants", command="/home/abeers/Software/ANTS/ANTs.2.1.0.Debian-Ubuntu_X64/N4BiasFieldCorrection", temp_dir='./'):
""" A catch-all function for motion correction. Will perform motion correction on an input volume
depending on the 'method' and 'command' inputted.
Parameters
----------
input_data: str or array
Can be a 4D volume or a filename.
output_filename: str
Location to save output data to. If left as '', will return numpy array.
method: str
Will perform motion correction according to the provided method.
Currently available: ['fsl']
command: str
The literal command-line string to be inputted via Python's subprocess module.
temp_dir: str
If temporary files are created, they will be saved here.
Returns
-------
output: array
Output data, only if output_filename is left as ''.
"""
bias_correction_methods = ['ants', 'slicer']
if method not in bias_correction_methods:
print 'Input \"method\" parameter is not available. Available methods: ', bias_correction_methods
return
if method == 'ants':
# A good reason to have a Class for qtim methods is to cut through all of this extra code.
temp_input, temp_output = False, False
if not isinstance(input_data, basestring):
input_filename = os.path.join(temp_dir, 'temp.nii.gz')
save_numpy_2_nifti(input_data, input_filename)
temp_input = True
else:
input_filename = input_data
if output_filename == '':
temp_output = True
output_filename = os.path.join(temp_dir, 'temp_out.nii.gz')
print ' '.join([command, '-i', input_filename, '-o', output_filename, '-x', mask_filename])
if mask_filename != '':
subprocess.call([command, '-i', input_filename, '-o', output_filename, '-x', mask_filename])
else:
subprocess.call([command, '-i', input_filename, '-o', output_filename])
if temp_input:
os.remove(input_filename)
pass
if temp_output:
output = convert_input_2_numpy(output_filename)
os.remove(output_filename)
return output
if method == 'slicer':
print 'Slicer method not yet implemented! Sorry...'
|
5236cff562dc50390146a5902a8f9924457e5426
| 25,547 |
def randperm2d(H, W, number, population=None, mask=None):
"""randperm 2d function
genarates diffrent random interges in range [start, end)
Parameters
----------
H : {integer}
height
W : {integer}
width
number : {integer}
random numbers
population : {list or numpy array(1d or 2d)}
part of population in range(0, H*W)
"""
if population is None:
population = np.array(range(0, H * W)).reshape(H, W)
population = np.array(population)
if mask is not None and np.sum(mask) != 0:
population = population[mask > 0]
population = population.flatten()
population = np.random.permutation(population)
Ph = np.floor(population / W).astype('int')
Pw = np.floor(population - Ph * W).astype('int')
# print(Pw + Ph * W)
return Ph[0:number], Pw[0:number]
|
a3507c488740e0190673cb0bd920c0c0f15b77a1
| 25,548 |
def get_engine(db_credentials):
"""
Get SQLalchemy engine using credentials.
Input:
db: database name
user: Username
host: Hostname of the database server
port: Port number
passwd: Password for the database
"""
url = 'postgresql://{user}:{passwd}@{host}:{port}/{db}'.format(
user=db_credentials['user'], passwd=db_credentials['pwd'], host=db_credentials['host'],
port=db_credentials['port'], db=db_credentials['db'])
engine = create_engine(url, pool_size = 50)
return engine
|
ff66c10c7a79b0f5751979f0f5fc74c16d97eac0
| 25,549 |
def numpy_to_vtkIdTypeArray(num_array, deep=0):
"""
Notes
-----
This was pulled from VTK and modified to eliminate numpy 1.14 warnings.
VTK uses a BSD license, so it's OK to do that.
"""
isize = vtk.vtkIdTypeArray().GetDataTypeSize()
dtype = num_array.dtype
if isize == 4:
if dtype != np.int32:
raise ValueError(
'Expecting a numpy.int32 array, got %s instead.' % (str(dtype)))
else:
if dtype != np.int64:
raise ValueError(
'Expecting a numpy.int64 array, got %s instead.' % (str(dtype)))
return numpy_to_vtk(num_array, deep, vtkConstants.VTK_ID_TYPE)
|
149da1f117968839801f2720c132451045b21fb6
| 25,550 |
def denormalize_ged(g1, g2, nged):
"""
Converts normalized ged into ged.
"""
return round(nged * (g1.num_nodes + g2.num_nodes) / 2)
|
214813120d552ef5ece10349978238117fe26cf3
| 25,551 |
from datetime import datetime
import time
def get_current_time():
"""just returns time stamp
"""
time_stamp = datetime.datetime.fromtimestamp(
time()).strftime('%Y-%m-%d %H:%M:%S')
return time_stamp
|
236bd2b141c3686bb4c05a18a6d0f0ef3b15ea6b
| 25,552 |
import asyncio
async def test_script_mode_2(hass, hass_ws_client, script_mode, script_execution):
"""Test overlapping runs with max_runs > 1."""
id = 1
def next_id():
nonlocal id
id += 1
return id
flag = asyncio.Event()
@callback
def _handle_event(_):
flag.set()
event = "test_event"
script_config = {
"script1": {
"sequence": [
{"event": event, "event_data": {"value": 1}},
{"wait_template": "{{ states.switch.test.state == 'off' }}"},
{"event": event, "event_data": {"value": 2}},
],
"mode": script_mode,
}
}
client = await hass_ws_client()
hass.bus.async_listen(event, _handle_event)
assert await async_setup_component(hass, "script", {"script": script_config})
hass.states.async_set("switch.test", "on")
await hass.services.async_call("script", "script1")
await asyncio.wait_for(flag.wait(), 1)
# List traces
await client.send_json({"id": next_id(), "type": "trace/list", "domain": "script"})
response = await client.receive_json()
assert response["success"]
trace = _find_traces(response["result"], "script", "script1")[0]
assert trace["state"] == "running"
# Start second run of script while first run is suspended in wait_template.
flag.clear()
await hass.services.async_call("script", "script1")
await asyncio.wait_for(flag.wait(), 1)
# List traces
await client.send_json({"id": next_id(), "type": "trace/list", "domain": "script"})
response = await client.receive_json()
assert response["success"]
trace = _find_traces(response["result"], "script", "script1")[1]
assert trace["state"] == "running"
# Let both scripts finish
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
# List traces
await client.send_json({"id": next_id(), "type": "trace/list", "domain": "script"})
response = await client.receive_json()
assert response["success"]
trace = _find_traces(response["result"], "script", "script1")[0]
assert trace["state"] == "stopped"
assert trace["script_execution"] == script_execution
trace = _find_traces(response["result"], "script", "script1")[1]
assert trace["state"] == "stopped"
assert trace["script_execution"] == "finished"
|
76a251dc4f2f7aa17e280ee1bcb76aa8333388cb
| 25,554 |
def ease_of_movement(high, low, close, volume, n=20, fillna=False):
"""Ease of movement (EoM, EMV)
It relate an asset's price change to its volume and is particularly useful
for assessing the strength of a trend.
https://en.wikipedia.org/wiki/Ease_of_movement
Args:
high(pandas.Series): dataset 'High' column.
low(pandas.Series): dataset 'Low' column.
close(pandas.Series): dataset 'Close' column.
volume(pandas.Series): dataset 'Volume' column.
n(int): n period.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
emv = (high.diff(1) + low.diff(1)) * (high - low) / (2 * volume)
emv = emv.rolling(n).mean()
if fillna:
emv = emv.replace([np.inf, -np.inf], np.nan).fillna(0)
return pd.Series(emv, name='eom_' + str(n))
|
c25720e866b1d4635d7e8256b9ace94f78b463ed
| 25,555 |
def Document(docx=None, word_open_xml=None):
"""
Return a |Document| object loaded from *docx*, where *docx* can be
either a path to a ``.docx`` file (a string) or a file-like object.
Optionally, ``word_open_xml`` can be specified as a string of xml.
Either ``docx`` or `word_open_xml`` may be specified, but not both. If
*docx* is missing or ``None`` and *open_word_xml* is None, the built-in
default document "template" is loaded.
"""
if docx and word_open_xml:
msg = "Must either specify docx or word_open_xml, but not both"
raise ValueError(msg)
if word_open_xml is None:
docx = _default_docx_path() if docx is None else docx
document_part = Package.open(docx).main_document_part
if document_part.content_type != CT.WML_DOCUMENT_MAIN:
tmpl = "file '%s' is not a Word file, content type is '%s'"
raise ValueError(tmpl % (docx, document_part.content_type))
else:
# word_open_xml
document_part = Package.open(
word_open_xml, is_from_file=False
).main_document_part
if document_part.content_type != CT.WML_DOCUMENT_MAIN:
tmpl = "string '%s' is not a Word document, content type is '%s'"
raise ValueError(tmpl % (word_open_xml, document_part.content_type))
return document_part.document
|
565dd4f7f1d815f2e5ef97226d1175283ba942de
| 25,556 |
def css_tag(parser, token):
"""
Renders a tag to include the stylesheet. It takes an optional second
parameter for the media attribute; the default media is "screen, projector".
Usage::
{% css "<somefile>.css" ["<projection type(s)>"] %}
Examples::
{% css "myfile.css" %}
{% css "myfile.css" "screen, projection"%}
"""
path = get_path_from_tokens(token)
tokens = token.split_contents()
if len(tokens) > 2:
# Get the media types from the tag call provided by the user.
media_type = tokens[2][1:-1]
else:
# Default values.
media_type = "screen, projection"
return CssTagNode(path, media_type=media_type)
|
b05deebf31c864408df33a41ba95016a06f48e2e
| 25,557 |
def camelcase(path):
"""Applies mixedcase and capitalizes the first character"""
return mixedcase('_{0}'.format(path))
|
484bfcf8797637f56d5d0bdcad6c370f158773c0
| 25,558 |
import copy
def ImproveData_v2 (Lidar_DataOld,Lidar_Data,Data_Safe,Speed,orientation,orientationm1):
"""
The function calculates new positions for obstacles now taking into account the car's relative speed in relation to each point. We need the accelerometer for that.
Return:
Advanced_Data : [step_i, distance_i,x_i,y_i, Xsafe_i or Xvel_i, Ysafe_i or Yvel_i]
"""
"""
Filtering the data within the alpha range.
Data_Safe -(90,270)
DataSafeFiltered (-alpha,+alpha)
"""
temp=[]
i=0
#Updating the data set with the new positions calculated according to the relative speed of the car and the objects
Advanced_Data=copy.deepcopy(Data_Safe)
while i<len(temp):
j=0
while j<len(Data_Safe):
if temp[i][0]==Advanced_Data[j][0]:
Advanced_Data[j][4]=temp[i][6]
Advanced_Data[j][5]=temp[i][7]
j+=1
i+=1
return(Advanced_Data)
|
2bd6c0f167e65ad4a461d75a95539b68dc0b1a70
| 25,559 |
def label_by_track(mask, label_table):
"""Label objects in mask with track ID
Args:
mask (numpy.ndarray): uint8 np array, output from main model.
label_table (pandas.DataFrame): track table.
Returns:
numpy.ndarray: uint8/16 dtype based on track count.
"""
assert mask.shape[0] == np.max(label_table['frame'] + 1)
if np.max(label_table['trackId']) * 2 > 254:
mask = mask.astype('uint16')
for i in np.unique(label_table['frame']):
sub_table = label_table[label_table['frame'] == i]
sl = mask[i, :, :].copy()
lbs = np.unique(sl).tolist()
'''
if lbs[-1] + 1 != len(lbs):
raise ValueError('Mask is not continuously or wrongly labeled.')
'''
ori_labels = set(lbs) - {0}
untracked = list(ori_labels - set(list(sub_table['continuous_label'])))
# remove untracked
for j in untracked:
sl[mask[i, :, :] == j] = 0
# update tracked
for j in sub_table.index:
sl[mask[i, :, :] == sub_table.loc[j, 'continuous_label']] = sub_table.loc[j, 'trackId']
mask[i, :, :] = sl.copy()
return mask
|
9190714e8cfc3955d1aeffd22d20574d14889538
| 25,560 |
import zipfile
import xml
def load_guidata(filename, report):
"""Check if we have a GUI document."""
report({'INFO'}, "load guidata..")
guidata = None
zdoc = zipfile.ZipFile(filename)
if zdoc:
if "GuiDocument.xml" in zdoc.namelist():
gf = zdoc.open("GuiDocument.xml")
guidata = gf.read()
gf.close()
Handler = FreeCAD_xml_handler()
xml.sax.parseString(guidata, Handler)
guidata = Handler.guidata
for key, properties in guidata.items():
# open each diffusecolor files and retrieve values
# first 4 bytes are the array length,
# then each group of 4 bytes is abgr
if "DiffuseColor" in properties:
# print ("opening:",guidata[key]["DiffuseColor"])
df = zdoc.open(guidata[key]["DiffuseColor"])
buf = df.read()
# print (buf," length ",len(buf))
df.close()
cols = []
for i in range(1, int(len(buf)/4)):
cols.append(
(buf[i*4+3], buf[i*4+2], buf[i*4+1], buf[i*4]))
guidata[key]["DiffuseColor"] = cols
zdoc.close()
report({'INFO'}, "load guidata done.")
# print("guidata:", guidata)
return guidata
|
3828d895a5abb9c6f783eee52d8c747f2f32c20c
| 25,561 |
def question_answers(id2line, convos):
""" Divide the dataset into two sets: questions and answers. """
questions, answers = [], []
for convo in convos:
for index, line in enumerate(convo[:-1]):
questions.append(id2line[convo[index]])
answers.append(id2line[convo[index + 1]])
assert len(questions) == len(answers)
return questions, answers
|
f2654fcff2b9d90e78750cc8632eea9771361c4d
| 25,562 |
import copy
def subgrid_kernel(kernel, subgrid_res, odd=False, num_iter=100):
"""
creates a higher resolution kernel with subgrid resolution as an interpolation of the original kernel in an
iterative approach
:param kernel: initial kernel
:param subgrid_res: subgrid resolution required
:return: kernel with higher resolution (larger)
"""
subgrid_res = int(subgrid_res)
if subgrid_res == 1:
return kernel
nx, ny = np.shape(kernel)
d_x = 1. / nx
x_in = np.linspace(d_x/2, 1-d_x/2, nx)
d_y = 1. / nx
y_in = np.linspace(d_y/2, 1-d_y/2, ny)
nx_new = nx * subgrid_res
ny_new = ny * subgrid_res
if odd is True:
if nx_new % 2 == 0:
nx_new -= 1
if ny_new % 2 == 0:
ny_new -= 1
d_x_new = 1. / nx_new
d_y_new = 1. / ny_new
x_out = np.linspace(d_x_new/2., 1-d_x_new/2., nx_new)
y_out = np.linspace(d_y_new/2., 1-d_y_new/2., ny_new)
kernel_input = copy.deepcopy(kernel)
kernel_subgrid = image_util.re_size_array(x_in, y_in, kernel_input, x_out, y_out)
kernel_subgrid = kernel_norm(kernel_subgrid)
for i in range(max(num_iter, 1)):
# given a proposition, re-size it to original pixel size
if subgrid_res % 2 == 0:
kernel_pixel = averaging_even_kernel(kernel_subgrid, subgrid_res)
else:
kernel_pixel = util.averaging(kernel_subgrid, numGrid=nx_new, numPix=nx)
delta = kernel - kernel_pixel
temp_kernel = kernel_input + delta
kernel_subgrid = image_util.re_size_array(x_in, y_in, temp_kernel, x_out, y_out)#/norm_subgrid
kernel_subgrid = kernel_norm(kernel_subgrid)
kernel_input = temp_kernel
#from scipy.ndimage import zoom
#ratio = subgrid_res
#kernel_subgrid = zoom(kernel, ratio, order=4) / ratio ** 2
#print(np.shape(kernel_subgrid))
# whatever has not been matched is added to zeroth order (in squares of the undersampled PSF)
if subgrid_res % 2 == 0:
return kernel_subgrid
kernel_pixel = util.averaging(kernel_subgrid, numGrid=nx_new, numPix=nx)
kernel_pixel = kernel_norm(kernel_pixel)
delta_kernel = kernel_pixel - kernel_norm(kernel)
id = np.ones((subgrid_res, subgrid_res))
delta_kernel_sub = np.kron(delta_kernel, id)/subgrid_res**2
return kernel_norm(kernel_subgrid - delta_kernel_sub)
|
8c62e9a09052faf2f52dc2141b0432b115c79417
| 25,563 |
import spacy.en
import logging
def get_spacy():
"""
Loads the spaCy english processor.
Tokenizing, Parsing, and NER are enabled. All other features are disabled.
Returns:
A spaCy Language object for English
"""
logging.info('Loading spaCy...')
nlp = spacy.en.English(tagger=False, parser=True, matcher=False)
return nlp
|
6abe2c9cb8cb0027c53c5e013d4127829b339699
| 25,564 |
import astroobs as obs
import re
from datetime import datetime
def get_JDs(period='102', night=True, arrays=True, verbose=True):
"""
Get the Julian days for all ESPRESSO GTO runs in a given period. If
`night`=True, return the JD of sunset and sunrise. This function returns the
runs' start and end in arrays (lists if `arrays`=False).
"""
if night:
# from astroplan import Observer
# paranal = Observer.at_site("paranal")
VLT = obs.Observation('vlt', moonAvoidRadius=15, horizon_obs=0)
if isinstance(period, int):
period = str(period)
if ',' in period:
periods = period.split(',')
else:
periods = [period]
starts, ends = [], []
for period in periods:
if verbose:
print(f'Period: {period},', end=' ')
print('starting ESO query...', end=' ', flush=True)
r = query(period)
if verbose:
print('done')
lines = r.text.split('\n')[2:-1]
pattern = re.compile(r"between \d* \w* \d* and \d* \w* \d*")
if verbose and night:
print('calculating sunset/sunrise times...')
for line in lines:
try:
found = re.findall(pattern, line)[0]
except IndexError:
continue
date1 = found[8:-16]
if night:
t = Time(parse_date(date1) + ' 12:00:00')
VLT.change_date(t.datetime)
jd1 = Time(datetime.datetime.strptime(str(VLT.sunset),
r'%Y/%m/%d %H:%M:%S')).mjd
# jd1 = paranal.sun_set_time(t, 'next').mjd
else:
jd1 = Time(parse_date(date1)).mjd # - 24e5
date2 = found[24:]
if night:
t = Time(parse_date(date2) + ' 12:00:00')
VLT.change_date(t.datetime)
jd2 = Time(datetime.datetime.strptime(str(VLT.sunset),
r'%Y/%m/%d %H:%M:%S')).mjd
# jd2 = paranal.sun_rise_time(t, 'previous').mjd
else:
jd2 = Time(parse_date(date2)).mjd # - 24e5
starts.append(jd1)
ends.append(jd2)
starts, ind = np.unique(starts, return_index=True)
ends = np.array(ends)[ind]
if arrays:
return starts, ends
else:
return list(starts), list(ends)
|
f21aea967e0d1a481d599bf7ffea2316d401a7ea
| 25,565 |
def normalize_breton(breton_string: str) -> str:
"""Applies Breton mutations."""
return (breton_string.strip().lower() @
DO_PREPROCESSING @
DO_SOFT_MUTATION @
DO_HARD_MUTATION @
DO_SPIRANT_MUTATION @
DO_POSTPROCESSING).string()
|
f5536f98c881d854fc279b81b5a6e99e4811165f
| 25,566 |
from keras.utils.data_utils import get_file
from art import DATA_PATH
def load_mnist(raw=False):
"""Loads MNIST dataset from `DATA_PATH` or downloads it if necessary.
:param raw: `True` if no preprocessing should be applied to the data. Otherwise, data is normalized to 1.
:type raw: `bool`
:return: `(x_train, y_train), (x_test, y_test), min, max`
:rtype: `(np.ndarray, np.ndarray), (np.ndarray, np.ndarray), float, float`
"""
path = get_file('mnist.npz', cache_subdir=DATA_PATH, origin='https://s3.amazonaws.com/img-datasets/mnist.npz')
f = np.load(path)
x_train = f['x_train']
y_train = f['y_train']
x_test = f['x_test']
y_test = f['y_test']
f.close()
# Add channel axis
min_, max_ = 0, 255
if not raw:
min_, max_ = 0., 1.
x_train = np.expand_dims(x_train, axis=3)
x_test = np.expand_dims(x_test, axis=3)
x_train, y_train = preprocess(x_train, y_train)
x_test, y_test = preprocess(x_test, y_test)
return (x_train, y_train), (x_test, y_test), min_, max_
|
fc661afef4062e14a90a3cbc1a837cd6f68b6039
| 25,567 |
def word_flag(*args):
"""
word_flag() -> flags_t
Get a flags_t representing a word.
"""
return _ida_bytes.word_flag(*args)
|
765051d3c51974f24cf71a846ab3ffed4767a3d0
| 25,568 |
from typing import Optional
from typing import Dict
from typing import Iterable
from typing import Union
from typing import List
def get_sequence_annotations(
sequence: str,
allow: Optional[set] = {"H", "K", "L"},
scheme: Optional[str] = "chothia",
cdr1_scheme: Optional[Dict[str, Iterable]] = {
"H": range(26, 33),
"L": range(24, 35),
},
cdr2_scheme: Optional[Dict[str, Iterable]] = {
"H": range(52, 57),
"L": range(50, 57),
},
cdr3_scheme: Optional[Dict[str, Iterable]] = {
"H": range(95, 103),
"L": range(89, 98),
},
assign_germline: Optional[bool] = True,
) -> Dict[str, Union[str, int, List[str]]]:
"""
For VH or VL amino acid sequence returns the three CDR sequences as determined
from the input numbering (scheme) and the given ranges.
default ranges are Chothia CDRs.
============================================================================
Note:
* Gracefully stolen and refactored get_cdr_simple() from Parapred source code.
* Returns a dictionary with CDR sequences, numbering scheme positions for each CDR residue.
"""
anarci_output = run_and_parse_anarci(
sequence, scheme=scheme, allow=allow, assign_germline=assign_germline
)
numbering = anarci_output["NUMBERING"] # numbering starts with 1 and not 0
chain_type = anarci_output["CHAIN_TYPE"]
if chain_type == "K" and chain_type not in cdr1_scheme:
chain_type = "L"
if chain_type not in cdr1_scheme:
raise ValueError(f"chain_type {chain_type} is not in input CDR scheme")
cdr1_scheme = cdr1_scheme[chain_type]
cdr2_scheme = cdr2_scheme[chain_type]
cdr3_scheme = cdr3_scheme[chain_type]
# extract CDR sequences
cdr1, cdr2, cdr3 = "", "", ""
cdr1_numbering, cdr2_numbering, cdr3_numbering = [], [], []
for num_tuple, res in numbering:
residue_position = str(num_tuple[0]) + num_tuple[1].rstrip()
if num_tuple[0] in cdr1_scheme:
if res != "-":
cdr1_numbering.append(residue_position)
cdr1 += res
elif num_tuple[0] in cdr2_scheme:
if res != "-":
cdr2_numbering.append(residue_position)
cdr2 += res
elif num_tuple[0] in cdr3_scheme:
if res != "-":
cdr3_numbering.append(residue_position)
cdr3 += res
annotation_dict = {
"CDR1": cdr1,
"CDR1_NUMBERING": cdr1_numbering,
"CDR2": cdr2,
"CDR2_NUMBERING": cdr2_numbering,
"CDR3": cdr3,
"CDR3_NUMBERING": cdr3_numbering,
}
annotation_dict = {**annotation_dict, **anarci_output}
del annotation_dict["NUMBERING"]
return annotation_dict
|
3f7d74693086e7603215d912083653005cdddb5a
| 25,570 |
import stat
def skew(variable=None, weights=None, data=None):
"""Return the asymmetry coefficient of a sample.
Parameters
----------
data : pandas.DataFrame
variable : array-like, str
weights : array-like, str
data : pandas.DataFrame
Object which stores ``variable`` and ``weights``.
Returns
-------
skew : float
References
----------
Moment (mathematics). (2017, May 6). In Wikipedia, The Free Encyclopedia.
Retrieved 14:40, May 15, 2017, from
https://en.wikipedia.org/w/index.php?title=Moment_(mathematics)
Notes
-----
It is an alias of the standardized third-order moment.
"""
variable, weights = utils.extract_values(data, variable, weights)
return stat.skew(variable, weights)
|
08be7f2e9741855b699e847307c61b14ab6b3009
| 25,571 |
def deep_initial_state(batch_size, h_size, stack_size):
""" Function to make a stack of inital state for a multi-layer GRU.
"""
return tuple(static_initial_state(batch_size, h_size) for layer in range(stack_size))
|
4d6bc65d2fcb158a99a08d88c755c81ca08433f3
| 25,572 |
def create_element(pan_elem, elem_type=None)->Element:
"""
Find the element type and call constructor specified by it.
"""
etype = 'ELEMENT TYPE MISSING'
if elem_type is not None:
etype = elem_type
elif 't' in pan_elem:
etype = pan_elem['t']
elif 'pandoc-api-version' in pan_elem:
etype = 'Pandoc'
if etype not in _ELEMENT_TYPES:
# Invalid etype( = 'ELEMENT TYPE MISSING' or invalid `elem_type`)
raise KeyError(etype)
element = _ELEMENT_TYPES[etype]['class'](
pan_elem, etype, _ELEMENT_TYPES[etype], create_element)
return element
|
c5507a35e7a75676e450d0f960fd3b70c873440d
| 25,573 |
def load_weights(variables, file_name):
"""Reshapes and loads official pretrained Yolo weights.
Args:
variables: A list of tf.Variable to be assigned.
file_name: A name of a file containing weights.
Returns:
A list of assign operations.
"""
with open(file_name, "rb") as f:
# Skip first 5 values containing irrelevant info
np.fromfile(f, dtype=np.int32, count=5)
weights = np.fromfile(f, dtype=np.float32)
assign_ops = []
ptr = 0
# Load weights for Darknet part.
# Each convolution layer has batch normalization.
for i in range(52):
conv_var = variables[5 * i]
gamma, beta, mean, variance = variables[5 * i + 1:5 * i + 5]
batch_norm_vars = [beta, gamma, mean, variance]
for var in batch_norm_vars:
shape = var.shape.as_list()
num_params = np.prod(shape)
var_weights = weights[ptr:ptr + num_params].reshape(shape)
ptr += num_params
assign_ops.append(tf.assign(var, var_weights))
shape = conv_var.shape.as_list()
num_params = np.prod(shape)
var_weights = weights[ptr:ptr + num_params].reshape(
(shape[3], shape[2], shape[0], shape[1]))
var_weights = np.transpose(var_weights, (2, 3, 1, 0))
ptr += num_params
assign_ops.append(tf.assign(conv_var, var_weights))
# Loading weights for Yolo part.
# 7th, 15th and 23rd convolution layer has biases and no batch norm.
ranges = [range(0, 6), range(6, 13), range(13, 20)]
unnormalized = [6, 13, 20]
for j in range(3):
for i in ranges[j]:
current = 52 * 5 + 5 * i + j * 2
conv_var = variables[current]
gamma, beta, mean, variance = \
variables[current + 1:current + 5]
batch_norm_vars = [beta, gamma, mean, variance]
for var in batch_norm_vars:
shape = var.shape.as_list()
num_params = np.prod(shape)
var_weights = weights[ptr:ptr + num_params].reshape(shape)
ptr += num_params
assign_ops.append(tf.assign(var, var_weights))
shape = conv_var.shape.as_list()
num_params = np.prod(shape)
var_weights = weights[ptr:ptr + num_params].reshape(
(shape[3], shape[2], shape[0], shape[1]))
var_weights = np.transpose(var_weights, (2, 3, 1, 0))
ptr += num_params
assign_ops.append(tf.assign(conv_var, var_weights))
bias = variables[52 * 5 + unnormalized[j] * 5 + j * 2 + 1]
shape = bias.shape.as_list()
num_params = np.prod(shape)
var_weights = weights[ptr:ptr + num_params].reshape(shape)
ptr += num_params
assign_ops.append(tf.assign(bias, var_weights))
conv_var = variables[52 * 5 + unnormalized[j] * 5 + j * 2]
shape = conv_var.shape.as_list()
num_params = np.prod(shape)
var_weights = weights[ptr:ptr + num_params].reshape(
(shape[3], shape[2], shape[0], shape[1]))
var_weights = np.transpose(var_weights, (2, 3, 1, 0))
ptr += num_params
assign_ops.append(tf.assign(conv_var, var_weights))
return assign_ops
|
3d953792ae1e13285044f40dd840fe2400f20243
| 25,574 |
def parsing_sa_class_id_response(pdu: list) -> int:
"""Parsing TaiSEIA class ID response protocol data."""
packet = SAInfoResponsePacket.from_pdu(pdu=pdu)
if packet.service_id != SARegisterServiceIDEnum.READ_CLASS_ID:
raise ValueError(f'pdu service id invalid, {pdu}')
return int.from_bytes(packet.data_bytes, 'big')
|
e55c6e7041349f036babfd7e9699bfcfe1ff5dea
| 25,575 |
def wrr(self) -> int:
"""
Name: Write ROM port.
Function: The content of the accumulator is transferred to the ROM
output port of the previously selected ROM chip.
The data is available on the output pins until a new WRR
is executed on the same chip.
The content of the ACC and the carry/link are unaffected.
Syntax: WRR
Assembled: 1110 0010
Symbolic: (ACC) --> ROM output lines
Execution: 1 word, 8-bit code and an execution time of 10.8 usec.
Side-effects: The LSB bit of the accumulator appears on I/O 0, Pin 16,
of the 4001 ROM chip until it is changed.
Notes: No operation is performed on I/O lines coded as inputs.
4 chips per bank, 8 banks = 32 addressable ROM ports.
An address set by the previous SRC instruction is interpreted as follows:
(Bits in this order : 12345678)
Bits 1 - 4 = The ROM chip targetted
Bits 5 - 8 = Not relevant
"""
rom, _unused1, _unused2 = \
decode_command_register(self.COMMAND_REGISTER, 'ROM_PORT')
self.ROM_PORT[rom] = self.ACCUMULATOR
self.increment_pc(1)
return self.ACCUMULATOR
|
a019f176bba0e50d73906abd8a20862c4993b75f
| 25,576 |
def convert_to_signed_int_32_bit(hex_str):
"""
Utility function to convert a hex string into a 32 bit signed hex integer value
:param hex_str: hex String
:return: signed 32 bit integer
"""
val = int(hex_str, 16)
if val > 0x7FFFFFFF:
val = ((val+0x80000000) & 0xFFFFFFFF) - 0x80000000
return val
|
f8d39b20475c30f162948167f8534e367d9c58e8
| 25,577 |
def parent_node(max_child_node, max_parent_node):
"""
Parents child node into parent node hierarchy
:param max_child_node: MaxPlus.INode
:param max_parent_node: MaxPlus.INode
"""
max_child_node.SetParent(max_parent_node)
return max_child_node
|
1a54d4c485e61361633165da0f05c8f871296ae6
| 25,578 |
import tensorflow as tf
import torch
def to_numpy_or_python_type(tensors):
"""Converts a structure of `Tensor`s to `NumPy` arrays or Python scalar types.
For each tensor, it calls `tensor.numpy()`. If the result is a scalar value,
it converts it to a Python type, such as a float or int, by calling
`result.item()`.
Numpy scalars are converted, as Python types are often more convenient to deal
with. This is especially useful for bfloat16 Numpy scalars, which don't
support as many operations as other Numpy values.
Args:
tensors: A structure of tensors.
Returns:
`tensors`, but scalar tensors are converted to Python types and non-scalar
tensors are converted to Numpy arrays.
"""
def _to_single_numpy_or_python_type(t):
if isinstance(t, torch.Tensor):
x = t.detach().cpu().numpy()
return x.item() if np.ndim(x) == 0 else x
return t # Don't turn ragged or sparse tensors to NumPy.
return tf.nest.map_structure(_to_single_numpy_or_python_type, tensors)
|
34ea32fb2cf4fe8e45c429139876e7f1afc9f794
| 25,580 |
def _get_flow(args):
"""Ensure the same flow is used in hello world example and system test."""
return (
Flow(cors=True)
.add(uses=MyTransformer, replicas=args.replicas)
.add(uses=MyIndexer, workspace=args.workdir)
)
|
625164c400f420cbb255cfdaa32f79c4862e23ea
| 25,581 |
def get_group_id(
client: AlgodClient,
txids: list
) -> list:
"""
Gets Group IDs from Transaction IDs
:param client: an AlgodClient (GET)
:param txids: Transaction IDs
:return: gids - Group IDs
"""
# Get Group IDs
gids = []
print("Getting gids...")
try:
while True:
txn_infos = get_transaction_info(
txids=txids,
client=client
)
if len(txn_infos) != 0:
for txn_info in txn_infos:
gid = txn_info['transaction']['group']
if len(gid) > 0:
gids.append(gid)
break
except Exception as e:
print(e.args)
return gids
|
937b29f6b482ed1e62612a07cc80c17c6737c143
| 25,582 |
import logging
import tqdm
import multiprocessing
def _simple_proc(st, sampling_rate=10, njobs=1):
"""
A parallel version of `_proc`, i.e., Basic processing including downsampling, detrend, and demean.
:param st: an obspy stream
:param sampling_rate: expected sampling rate
:param njobs: number of jobs or CPU to use
:return st: stream after processing
"""
# downsampling, detrend, demean
do_work = partial(_proc, sampling_rate=sampling_rate)
# trace_list = []
# for tr in st:
# trace_list.append(tr)
#
st2 = Stream()
logging.info("simple processing for full event correlogram.")
print("simple processing for full event correlogram.")
if njobs == 1:
logging.info('do work sequential (%d cores)', njobs)
for tr in tqdm(st, total=len(st)):
tr2 = do_work(tr)
st2.append(tr2)
else:
logging.info('do work parallel (%d cores)', njobs)
pool = multiprocessing.Pool(njobs)
for tr2 in tqdm(pool.imap_unordered(do_work, st), total=len(st)):
st2.append(tr2)
pool.close()
pool.join()
return st2
|
aa24340d0d43ad8f6c042ed5e04bc94f2ec28cc3
| 25,583 |
from datetime import datetime
def closing_time(date=datetime.date.today()):
"""
Get closing time of the current date.
"""
return datetime.time(13, 0) if date in nyse_close_early_dates(date.year) else datetime.time(16, 0)
|
40670512dbebfe65c3eb2b2790881fc91415aa40
| 25,584 |
def cos_fp16(x: tf.Tensor) -> tf.Tensor:
"""Run cos(x) in FP16, first running mod(x, 2*pi) for range safety."""
if x.dtype == tf.float16:
return tf.cos(x)
x_16 = tf.cast(tf.mod(x, 2 * np.pi), tf.float16)
return tf.cos(x_16)
|
3212eb19e43fa733490d2cfcfffcc0094715022b
| 25,585 |
from typing import Callable
def is_documented_by(original: Callable) -> Callable[[_F], _F]:
"""
Decorator to set the docstring of the ``target`` function to that of the ``original`` function.
This may be useful for subclasses or wrappers that use the same arguments.
:param original:
"""
def wrapper(target: _F) -> _F:
document_object_from_another(target, original)
return target
return wrapper
|
acd582112371ccfffd53762546415353abbd3129
| 25,586 |
def check_if_bst(root, min, max):
"""Given a binary tree, check if it follows binary search tree property
To start off, run `check_if_bst(BT.root, -math.inf, math.inf)`"""
if root == None:
return True
if root.key < min or root.key >= max:
return False
return check_if_bst(root.left, min, root.key) and check_if_bst(
root.right, root.key, max
)
|
1bb4b601ef548aec9a4ab2cf5242bc5875c587a2
| 25,587 |
from typing import Union
import pathlib
from typing import Sequence
from typing import Any
import torchvision
def create_video_file(
root: Union[pathlib.Path, str],
name: Union[pathlib.Path, str],
size: Union[Sequence[int], int] = (1, 3, 10, 10),
fps: float = 25,
**kwargs: Any,
) -> pathlib.Path:
"""Create an video file from random data.
Args:
root (Union[str, pathlib.Path]): Root directory the video file will be placed in.
name (Union[str, pathlib.Path]): Name of the video file.
size (Union[Sequence[int], int]): Size of the video that represents the
``(num_frames, num_channels, height, width)``. If scalar, the value is used for the height and width.
If not provided, ``num_frames=1`` and ``num_channels=3`` are assumed.
fps (float): Frame rate in frames per second.
kwargs (Any): Additional parameters passed to :func:`torchvision.io.write_video`.
Returns:
pathlib.Path: Path to the created image file.
Raises:
UsageError: If PyAV is not available.
"""
if isinstance(size, int):
size = (size, size)
if len(size) == 2:
size = (3, *size)
if len(size) == 3:
size = (1, *size)
if len(size) != 4:
raise UsageError(
f"The 'size' argument should either be an int or a sequence of length 2, 3, or 4. Got {len(size)} instead"
)
video = create_image_or_video_tensor(size)
file = pathlib.Path(root) / name
torchvision.io.write_video(str(file), video.permute(0, 2, 3, 1), fps, **kwargs)
return file
|
f11748ae86a80a5f4d9c859c313837fac7effa32
| 25,589 |
def aggregate(collection, pipeline):
"""Executes an aggregation on a collection.
Args:
collection: a `pymongo.collection.Collection` or
`motor.motor_tornado.MotorCollection`
pipeline: a MongoDB aggregation pipeline
Returns:
a `pymongo.command_cursor.CommandCursor` or
`motor.motor_tornado.MotorCommandCursor`
"""
return collection.aggregate(pipeline, allowDiskUse=True)
|
03ea889ea23fb81c6a329ee270df2ac253e90d69
| 25,590 |
def decryptAES(key, data, mode=2):
"""decrypt data with aes key"""
return aes.decryptData(key, data, mode)
|
30f5b4173a8ed388a13481a2fd41293cd2304b21
| 25,591 |
import requests
def __ipv6_safe_get(endpoint: str, addr: str) -> str:
"""HTTP GET from endpoint with IPv6-safe Host: header
Args:
endpoint: The endpoint path starting with /
addr: full address (IPV6 or IPv4) of server
Notes:
* This is needed because the Pyton requests module creates HTTP
requests with the Host: header containing the scope (%xxxx)
for IPv6, and some servers see this as invalid and return
a 400 Bad Request.
"""
if(addr.startswith('[') and not addr.startswith('[::1]')):
headers = {'Host': f'{addr.split("%")[0]}]'}
else:
headers = {}
return requests.get(f"http://{addr}{endpoint}", headers=headers)
|
adb1c7c2300e9e41049a9eda957f264322095d9c
| 25,592 |
def format_advertisement(data):
""" format advertisement data and scan response data. """
resolve_dict = {
# FLAGS AD type
st_constant.AD_TYPE_FLAGS: 'FLAGS',
# Service UUID AD types
st_constant.AD_TYPE_16_BIT_SERV_UUID: '16_BIT_SERV_UUID',
st_constant.AD_TYPE_16_BIT_SERV_UUID_CMPLT_LIST: '16_BIT_SERV_UUID_CMPLT_LIST',
st_constant.AD_TYPE_32_BIT_SERV_UUID: '32_BIT_SERV_UUID',
st_constant.AD_TYPE_32_BIT_SERV_UUID_CMPLT_LIST: '32_BIT_SERV_UUID_CMPLT_LIST',
st_constant.AD_TYPE_128_BIT_SERV_UUID: '128_BIT_SERV_UUID',
st_constant.AD_TYPE_128_BIT_SERV_UUID_CMPLT_LIST: '128_BIT_SERV_UUID_CMPLT_LIST',
# Local name AD types
st_constant.AD_TYPE_SHORTENED_LOCAL_NAME: 'SHORTENED_LOCAL_NAME',
st_constant.AD_TYPE_COMPLETE_LOCAL_NAME: 'COMPLETE_LOCAL_NAME',
# TX power level AD type
st_constant.AD_TYPE_TX_POWER_LEVEL: 'TX_POWER_LEVEL',
# Class of device
st_constant.AD_TYPE_CLASS_OF_DEVICE: 'CLASS_OF_DEVICE',
# Security manager TK value AD type
st_constant.AD_TYPE_SEC_MGR_TK_VALUE: 'SEC_MGR_TK_VALUE',
# Security manager OOB flags
st_constant.AD_TYPE_SEC_MGR_OOB_FLAGS: 'SEC_MGR_OOB_FLAGS',
# Slave connection interval AD type
st_constant.AD_TYPE_SLAVE_CONN_INTERVAL: 'SLAVE_CONN_INTERVAL',
# Service solicitation UUID list AD types
st_constant.AD_TYPE_SERV_SOLICIT_16_BIT_UUID_LIST: 'SERV_SOLICIT_16_BIT_UUID_LIST',
st_constant.AD_TYPE_SERV_SOLICIT_32_BIT_UUID_LIST: 'SERV_SOLICIT_32_BIT_UUID_LIST',
st_constant.AD_TYPE_SERV_SOLICIT_128_BIT_UUID_LIST: 'SERV_SOLICIT_128_BIT_UUID_LIST',
# Service data AD type
st_constant.AD_TYPE_SERVICE_DATA: 'SERVICE_DATA',
# Manufaturer specific data AD type
st_constant.AD_TYPE_MANUFACTURER_SPECIFIC_DATA: 'MANUFACTURER_SPECIFIC_DATA'
}
offset = 0
size = len(data)
advertisement_dict = {}
while offset < size:
field_len = int.from_bytes(data[offset:offset + 1], 'little')
if field_len == 0 or offset + field_len > size:
return advertisement_dict
field_type = int.from_bytes(data[offset + 1:offset + 2], 'little')
field_value = data[offset + 2:offset + 2 + field_len - 1]
advertisement_dict.update({resolve_dict[field_type]: field_value})
offset += field_len + 1
return advertisement_dict
|
a2b2740c45debe6c801ac80d99c8ed2b4537c205
| 25,593 |
def is_dicom_file(path):
"""Check if the given path appears to be a dicom file.
Only looks at the extension, not the contents.
Args:
path (str): The path to the dicom file
Returns:
bool: True if the file appears to be a dicom file
"""
path = path.lower()
for ext in DICOM_EXTENSIONS:
if path.endswith(ext):
return True
return False
|
2bd20b0f9bf40db24e9c6df4591127f59d07f882
| 25,594 |
import math
def build_graph(df_list, sens='ST', top=410, min_sens=0.01,
edge_cutoff=0.0, edge_width=150, log=False):
"""
Initializes and constructs a graph where vertices are the parameters
selected from the first dataframe in 'df_list', subject to the
constraints set by 'sens', 'top', and 'min_sens'. Edges are the second
order sensitivities of the interactions between those vertices,
with sensitivities greater than 'edge_cutoff'.
Parameters
-----------
df_list : list
A list of two dataframes. The first dataframe should be
the first/total order sensitivities collected by the
function data_processing.get_sa_data().
sens : str, optional
A string with the name of the sensitivity that you would
like to use for the vertices ('ST' or 'S1').
top : int, optional
An integer specifying the number of vertices to display (
the top sensitivity values).
min_sens : float, optional
A float with the minimum sensitivity to allow in the graph.
edge_cutoff : float, optional
A float specifying the minimum second order sensitivity to
show as an edge in the graph.
edge_width : float, optional
A float specifing the edge width to be displayed
log : bool, optional
take the log of all the values
Returns
--------
g : graph-tool object
a graph-tool graph object of the network described above. Each
vertex has properties 'param', 'sensitivity', and 'confidence'
corresponding to the name of the parameter, value of the sensitivity
index, and it's confidence interval. The only edge property is
'second_sens', the second order sensitivity index for the
interaction between the two vertices it connects.
"""
# get the first/total index dataframe and second order dataframe
df = df_list[0]
df2 = df_list[1]
# Make sure sens is ST or S1
if sens not in set(['ST', 'S1']):
raise ValueError('sens must be ST or S1')
# Make sure that there is a second order index dataframe
try:
if not df2:
raise Exception('Missing second order dataframe!')
except:
pass
# slice the dataframes so the resulting graph will only include the top
# 'top' values of 'sens' greater than 'min_sens'.
df = df.sort_values(sens, ascending=False)
df = df.loc[df[sens] > min_sens, :].head(top)
df = df.reset_index()
# initialize a graph
g = Graph()
vprop_sens = g.new_vertex_property('double')
vprop_conf = g.new_vertex_property('double')
vprop_name = g.new_vertex_property('string')
eprop_sens = g.new_edge_property('double')
g.vertex_properties['param'] = vprop_name
g.vertex_properties['sensitivity'] = vprop_sens
g.vertex_properties['confidence'] = vprop_conf
g.edge_properties['second_sens'] = eprop_sens
# keep a list of all the vertices
v_list = []
# Add the vertices to the graph
for i, param in enumerate(df['Parameter']):
v = g.add_vertex()
vprop_sens[v] = df.loc[i, sens]
vprop_conf[v] = 1 + df.loc[i, '%s_conf' % sens] / df.loc[i, sens]
if log:
vprop_sens[v] = math.log10(vprop_sens[v])
vprop_conf[v] = math.log10(vprop_conf[v])
vprop_name[v] = param
v_list.append(v)
# Make two new columns in second order dataframe that point to the vertices
# connected on each row.
df2['vertex1'] = -999
df2['vertex2'] = -999
for vertex in v_list:
param = g.vp.param[vertex]
df2.loc[df2['Parameter_1'] == param, 'vertex1'] = vertex
df2.loc[df2['Parameter_2'] == param, 'vertex2'] = vertex
# Only allow edges for vertices that we've defined
df_edges = df2[(df2['vertex1'] != -999) & (df2['vertex2'] != -999)]
# eliminate edges below a certain cutoff value
pruned = df_edges[df_edges['S2'] > edge_cutoff]
pruned.reset_index(inplace=True)
# Add the edges for the graph
for i, sensitivity in enumerate(pruned['S2']):
v1 = pruned.loc[i, 'vertex1']
v2 = pruned.loc[i, 'vertex2']
e = g.add_edge(v1, v2)
# multiply by a number to make the lines visible on the plot
eprop_sens[e] = sensitivity if sensitivity > 0 else sensitivity * -1
# if log:
# eprop_sens[e] = math.log10(eprop_sens[e])
eprop_sens[e] *= edge_width
# These are ways you can reference properties of vertices or edges
# g.vp.param[g.vertex(77)]
# g.vp.param[v_list[0]]
print('Created a graph with %s vertices and %s edges.\nVertices are the '
'top %s %s values greater than %s.\nOnly S2 values (edges) '
'greater than %s are included.' %
(g.num_vertices(), g.num_edges(), top, sens, min_sens, edge_cutoff))
return g
|
b17b3f57ab21df0117e61a12005f401f81620368
| 25,595 |
def ranking_scores(prng=None, mix=False, permute=False, gamma=0.01, beta=5., N=100, l=1, means=None, stds=None):
"""
Generate the ranking scores.
Parameters
----------
prng : random generator container
Seed for the random number generator.
mix : bool
Flag for generating the ranking scores with a Gaussian mixture.
permute : bool
Flag for permuting the node before associating a ranking score to each of them,
i.e. the hierarchical block structure induced on the adjacency matrix is randomized.
gamma : float
The spring constant for (s, origin).
beta : float
Inveres temperature parameter.
N : int
Number of nodes.
l : int
Number of leagues
means : list
List of means to be used for the scores generation.
stds : list
List of means to be used for the scores generation.
Returns
----------
s : Numpy array
N-dimensional array of real ranking scores for each node.
nodes_s : Numpy array
Result of the random permutation applied to the node IDs (if required).
Can be used for inverting the permutation and induce the block structure
generated by the leagues on the adjacency matrix.
"""
if prng is None:
# Set seed random number generator
prng = np.random.RandomState(seed = 42)
if mix:
if means is None:
means = prng.randint(-5, 5, l)
if stds is None:
stds = prng.randint(0, 1, l)
s = np.concatenate([prng.normal(means[i], stds[i], N // l) for i in range(l - 1)])
if N % l:
s = np.concatenate([s, prng.normal(means[-1], stds[-1], N - s.shape[0])])
if permute:
# shuffle s in order to not have a ranking structure overlapped to the communities one
nodes_s = prng.permutation(N)
s = s[nodes_s]
else:
nodes_s = np.arange(N)
else:
# Generate s through factorized Gaussian, l0 = 0
s = prng.normal(0, 1. / np.sqrt(gamma * beta), N)
nodes_s = np.arange(N)
return s, nodes_s
|
40801599ab67d852740d5219d22debdbed91de39
| 25,596 |
def calculate_direction(G, cutoff, normalize=True):
""" Calculate direction for entire network
Parameters
----------
G : nx.graph
Fault network
cutoff : int, float
Cutoff distance for direction
normalize : bolean
Normalize direction (default: True)
Returns
-------
G
nx.graph
"""
# Assertions
assert isinstance(G, nx.Graph), 'G is not a NetworkX graph'
# Calculation
for node in G.nodes:
length = nx.single_source_shortest_path_length(G, node, cutoff=cutoff)
keys = [keys for keys, values in length.items() if values == max(length.values())]
if len(keys) > 2:
(node_0, node_1) = keys[:2]
if len(keys) == 2:
(node_0, node_1) = keys
if len(keys) == 1:
node_0 = keys[0]
length = nx.single_source_shortest_path_length(G, node, cutoff=cutoff - 1)
keys = [keys for keys, values in length.items() if values == max(length.values())]
node_1 = keys[0]
# extrac position
pt_0 = G.nodes[node_0]['pos']
pt_1 = G.nodes[node_1]['pos']
# calculate vector
dx = pt_0[0] - pt_1[0]
dy = pt_0[1] - pt_1[1]
# normalize
v_norm = np.array([dx,dy])/np.linalg.norm([dx, dy])
dx = v_norm[0]
dy = v_norm[1]
# write to graph
G.nodes[node]['dx'] = dx
G.nodes[node]['dy'] = dy
return G
|
9b64e0e8226579728f76ab510e672372cb708338
| 25,597 |
from datetime import datetime
def generateVtBar(row):
"""生成K线"""
bar = VtBarData()
bar.symbol = row['code']
bar.exchange = ''
bar.vtSymbol = bar.symbol
bar.open = row['open']
bar.high = row['high']
bar.low = row['low']
bar.close = row['close']
bar.volume = row['volume']
bar.datetime = datetime.strptime(row['time_key'], '%Y-%m-%d %H:%M:%S')
bar.date = bar.datetime.strftime("%Y%m%d")
bar.time = bar.datetime.strftime("%H:%M:%S")
return bar
|
8431b313927692743d727ef9225e33899cc6c916
| 25,598 |
def coords_to_id(traversed):
"""calculate the id in level-order from the coordinates
Args:
input: traversed tree as list of dict
Returns:
traversed tree (dict) with id as key
"""
traversed_id = {}
#print('coords to id, traversed ', traversed)
for node in traversed:
k = full_tree_id(node['coords'])
traversed_id[k] = node
parent = [k for k, val in traversed_id.items() if val['coords'] == node['coords'][:-1]]
traversed_id[k]['parent_id'] = parent[0] if len(parent) == 1 else -1
# level_no = 0
# while True:
# current_level =list(filter(lambda d: len(d['coords']) == level_no+1, traversed))
# if len(current_level) == 0:
# break
# for d in sorted(current_level, key=lambda d: sum(d['coords'])):
# k = full_tree_id(d['coords'])
# traversed_id[k] = d
# parent = [k for k, val in traversed_id.items() if val['coords'] == d['coords'][:-1]]
# traversed_id[k]['parent_id'] = parent[0] if len(parent) == 1 else -1
# level_no += 1
#print('coords to id, traversed_id ', traversed_id)
return traversed_id
|
91f993f9693e01983de1f7fa124dcb5cb39a92f9
| 25,600 |
def dataframe_to_ipy_image(df, f=None, **kwargs):
"""Create IPython Image from PIL Image.
Args:
df - dataframe to render
f - operation to perform on PIL Image (e.g. f=lambda img: img.rotate(-90, expand=True))
kwargs - arguments to IPython.display.Image, such as width and height for html display
"""
pil_image = dataframe_to_pil_image(df)
if not f is None:
pil_image = f(pil_image)
return pil_to_ipy_image(pil_image=pil_image, **kwargs)
|
44348ac041067620bfa37cdedb22f3544e6bc940
| 25,601 |
def read_dmarkov(columns, rows, D, symbolization_type, division_order, suffix=["normal", "gaussian002"]):
"""
Reads the result files for the D-Markov algorithm. The function requires a configuration for the parameters of
the D-Markov. The suffix parameter indicates if the non-modified files should be loaded ("normal"), the noisy
files should be loaded ("gaussian002") or both.
:param columns: Number of columns in the division.
:param rows: Number of rows in the division.
:param D: Number of previous symbols to take into account (Markov property).
:param symbolization_type: Type of symbolization. It should be an Enum of type SymbolizationType (observations_set.py)
(see EqualWidthLimits, EqualFrequencyLimits and EqualFrequencyLimitsNoBounds in observations_set.py).
:param division_order: Only for EqualFrequencyLimits and EqualFrequencyLimitsNoBounds. Should we do a row-first
or column-first division? It should be an Enum of type DivisionOrder (observations_set.py)
:param suffix: Load non-modified ("normal"), noisy ("gaussian002") or both file results.
:return: A pandas dataframe with the information in the result files. Also, an "Anomalous" column is created, which
is False for the "normal" result files and True for the "gaussian002" files.
"""
if isinstance(suffix, str):
suffix = [suffix]
basename = 'results/DMarkovMachine/Type{:d}/DMarkovMachine_{:d}_{:d}_{:d}_{}_{}.csv'
if symbolization_type == SymbolizationType.EQUAL_WIDTH:
symb_str = "EW"
else:
if symbolization_type == SymbolizationType.EQUAL_FREQUENCY:
symb_str = "EF"
elif symbolization_type == SymbolizationType.EQUAL_FREQUENCY_NO_BOUNDS:
symb_str = "EFNB"
if division_order == DivisionOrder.ROWS_THEN_COLUMNS:
symb_str += "_RC"
elif division_order == DivisionOrder.COLUMNS_THEN_ROWS:
symb_str += "_CR"
dmarkov_df = pd.DataFrame()
for type_idx in range(1,37):
for s in suffix:
normal_name = basename.format(type_idx, rows, columns, D, symb_str, s)
file_df = pd.read_csv(normal_name, dtype={'Name': 'object', 'AnomalyScore': 'float64'})
if s == "normal":
file_df["Anomalous"] = False
elif s == "gaussian002":
file_df["Anomalous"] = True
dmarkov_df = dmarkov_df.append(file_df)
return dmarkov_df
|
c9fec2d46cbc8c3f4bcf7fc112779432dd6e9155
| 25,603 |
import torch
def compute_output_shape(observation_space, layers):
"""Compute the size of the output after passing an observation from
`observation_space` through the given `layers`."""
# [None] adds a batch dimension to the random observation
torch_obs = torch.tensor(observation_space.sample()[None])
with torch.no_grad():
sample = preprocess_obs(torch_obs, observation_space, normalize_images=True)
for layer in layers:
# forward prop to compute the right size
sample = layer(sample)
# make sure batch axis still matches
assert sample.shape[0] == torch_obs.shape[0]
# return everything else
return sample.shape[1:]
|
865b9b90f39f5726feb16da70afc515071991fd7
| 25,604 |
def tenure_type():
""" RESTful CRUD controller """
return s3_rest_controller(#rheader = s3db.stdm_rheader,
)
|
bfee3c2be579e1db6e8799b4a9d3156130b802a9
| 25,605 |
def _format_port(port):
"""
compute the right port type str
Arguments
-------
port: input/output port object
Returns
-------
list
a list of ports with name and type
"""
all_ports = []
for key in port:
one_port = {}
one_port['name'] = key
port_type = port[key]['type']
if isinstance(port_type, list):
types = []
for t in port_type:
type_name = t.__module__+'.'+t.__name__
types.append(type_name)
one_port['type'] = types
else:
type_name = port_type.__module__+'.'+port_type.__name__
one_port['type'] = [type_name]
all_ports.append(one_port)
return all_ports
|
2fa65686b6b764afc97a200a02baec65645c9879
| 25,606 |
import io
def proc_cgroups(proc='self'):
"""Read a process' cgroups
:returns:
``dict`` - Dictionary of all the process' subsystem and cgroups.
"""
assert isinstance(proc, int) or '/' not in proc
cgroups = {}
with io.open(_PROC_CGROUP.format(proc), 'r') as f:
for cgroup_line in f:
(_id, subsys, path) = cgroup_line.strip().split(':', 2)
cgroups[subsys] = path
return cgroups
|
95cb24cbbb4167dd2fa26ce36d78e5f532f10c1a
| 25,608 |
import csv
def load_csv_data(
data_file_name,
*,
data_module=DATA_MODULE,
descr_file_name=None,
descr_module=DESCR_MODULE,
):
"""Loads `data_file_name` from `data_module with `importlib.resources`.
Parameters
----------
data_file_name : str
Name of csv file to be loaded from `data_module/data_file_name`.
For example `'wine_data.csv'`.
data_module : str or module, default='sklearn.datasets.data'
Module where data lives. The default is `'sklearn.datasets.data'`.
descr_file_name : str, default=None
Name of rst file to be loaded from `descr_module/descr_file_name`.
For example `'wine_data.rst'`. See also :func:`load_descr`.
If not None, also returns the corresponding description of
the dataset.
descr_module : str or module, default='sklearn.datasets.descr'
Module where `descr_file_name` lives. See also :func:`load_descr`.
The default is `'sklearn.datasets.descr'`.
Returns
-------
data : ndarray of shape (n_samples, n_features)
A 2D array with each row representing one sample and each column
representing the features of a given sample.
target : ndarry of shape (n_samples,)
A 1D array holding target variables for all the samples in `data`.
For example target[0] is the target variable for data[0].
target_names : ndarry of shape (n_samples,)
A 1D array containing the names of the classifications. For example
target_names[0] is the name of the target[0] class.
descr : str, optional
Description of the dataset (the content of `descr_file_name`).
Only returned if `descr_file_name` is not None.
"""
with resources.open_text(data_module, data_file_name) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float64)
target[i] = np.asarray(ir[-1], dtype=int)
if descr_file_name is None:
return data, target, target_names
else:
assert descr_module is not None
descr = load_descr(descr_module=descr_module, descr_file_name=descr_file_name)
return data, target, target_names, descr
|
3629dded45954c25e538c53b5c7bc5d0dfec0a39
| 25,609 |
def ptFromSudakov(sudakovValue):
"""Returns the pt value that solves the relation
Sudakov = sudakovValue (for 0 < sudakovValue < 1)
"""
norm = (2*CA/pi)
# r = Sudakov = exp(-alphas * norm * L^2)
# --> log(r) = -alphas * norm * L^2
# --> L^2 = log(r)/(-alphas*norm)
L2 = log(sudakovValue)/(-alphas * norm)
pt = ptHigh * exp(-sqrt(L2))
return pt
|
8ba504749f13ed1046799b5456d1f6f3c74bfc1e
| 25,610 |
def _set_lod_2(gml_bldg, length, width, height, bldg_center):
"""Adds a LOD 2 representation of the building based on building length,
width and height
alternative way to handle building position
Parameters
----------
gml_bldg : bldg.Building() object
A building object, where bldg is a reference to
`pyxb.bundles.opengis.citygml.building`.
length : float
length of the building
width : float
width of the building
height : float
height of the building
bldg_center : list
coordinates in the reference system of the building center
Returns
-------
gml_bldg : bldg.Building() object
Returns the modified building object
"""
boundary_surface = []
lod_2_solid = gml.SolidPropertyType()
lod_2_solid.Solid = gml.Solid_()
exterior_solid = gml.SurfacePropertyType()
composite_surface = gml.CompositeSurface()
bldg_center[0] -= length / 2
bldg_center[1] -= width / 2
# Ground surface
coords = [[bldg_center[0], bldg_center[1], bldg_center[2]],
[length + bldg_center[0], bldg_center[1], bldg_center[2]],
[length + bldg_center[0], width + bldg_center[1], bldg_center[2]],
[bldg_center[0], width + bldg_center[1], bldg_center[2]]]
composite_surface = _add_surface(composite_surface, coords)
composite_surface.surfaceMember[-1].Surface.id = gml_bldg.name[
0].value() + "_ground"
boundary_surface.append(bldg.BoundarySurfacePropertyType())
boundary_surface[-1].BoundarySurface = bldg.FloorSurface()
boundary_surface[-1].BoundarySurface = _add_gml_boundary(
boundary_surface[-1].BoundarySurface,
gml_bldg.name[0].value() + "_ground")
# Roof surface
coords = [[bldg_center[0], bldg_center[1], bldg_center[2] + height],
[length + bldg_center[0], bldg_center[1],
bldg_center[2] + height],
[length + bldg_center[0], width + bldg_center[1],
bldg_center[2] + height],
[bldg_center[0], width + bldg_center[1], bldg_center[2] + height]]
composite_surface = _add_surface(composite_surface, coords)
composite_surface.surfaceMember[-1].Surface.id = (gml_bldg.name[0].value() +
"_roof")
boundary_surface.append(bldg.BoundarySurfacePropertyType())
boundary_surface[-1].BoundarySurface = bldg.RoofSurface()
boundary_surface[-1].BoundarySurface = _add_gml_boundary(
boundary_surface[-1].BoundarySurface,
gml_bldg.name[0].value() + "_roof")
# Side a surface
coords = [[bldg_center[0], bldg_center[1], bldg_center[2]],
[length + bldg_center[0], bldg_center[1], bldg_center[2]],
[length + bldg_center[0], bldg_center[1],
bldg_center[2] + height],
[bldg_center[0], bldg_center[1], bldg_center[2] + height]]
composite_surface = _add_surface(composite_surface, coords)
composite_surface.surfaceMember[-1].Surface.id = (gml_bldg.name[0].value() +
"_a")
boundary_surface.append(bldg.BoundarySurfacePropertyType())
boundary_surface[-1].BoundarySurface = bldg.WallSurface()
boundary_surface[-1].BoundarySurface = _add_gml_boundary(
boundary_surface[-1].BoundarySurface,
gml_bldg.name[0].value() + "_a")
# Side b surface
coords = [[bldg_center[0], width + bldg_center[1], bldg_center[2]],
[length + bldg_center[0], width + bldg_center[1],
bldg_center[2]],
[length + bldg_center[0], width + bldg_center[1],
bldg_center[2] + height],
[bldg_center[0], width + bldg_center[1], bldg_center[2] + height]]
composite_surface = _add_surface(composite_surface, coords)
composite_surface.surfaceMember[-1].Surface.id = (gml_bldg.name[0].value() +
"_b")
boundary_surface.append(bldg.BoundarySurfacePropertyType())
boundary_surface[-1].BoundarySurface = bldg.WallSurface()
boundary_surface[-1].BoundarySurface = _add_gml_boundary(
boundary_surface[-1].BoundarySurface,
gml_bldg.name[0].value() + "_b")
# Side c surface
coords = [[bldg_center[0], bldg_center[1], bldg_center[2]],
[bldg_center[0], width + bldg_center[1], bldg_center[2]],
[bldg_center[0], width + bldg_center[1], bldg_center[2] + height],
[bldg_center[0], bldg_center[1], bldg_center[2] + height]]
composite_surface = _add_surface(composite_surface, coords)
composite_surface.surfaceMember[-1].Surface.id = (gml_bldg.name[0].value() +
"_c")
boundary_surface.append(bldg.BoundarySurfacePropertyType())
boundary_surface[-1].BoundarySurface = bldg.WallSurface()
boundary_surface[-1].BoundarySurface = _add_gml_boundary(
boundary_surface[-1].BoundarySurface,
gml_bldg.name[0].value() + "_c")
# Side d surface
coords = [[length + bldg_center[0], bldg_center[1], bldg_center[2]],
[length + bldg_center[0], width + bldg_center[1],
bldg_center[2]],
[length + bldg_center[0], width + bldg_center[1],
bldg_center[2] + height],
[length + bldg_center[0], bldg_center[1],
bldg_center[2] + height]]
composite_surface = _add_surface(composite_surface, coords)
composite_surface.surfaceMember[-1].Surface.id = (gml_bldg.name[0].value() +
"_d")
boundary_surface.append(bldg.BoundarySurfacePropertyType())
boundary_surface[-1].BoundarySurface = bldg.WallSurface()
boundary_surface[-1].BoundarySurface = _add_gml_boundary(
boundary_surface[-1].BoundarySurface,
gml_bldg.name[0].value() + "_d")
exterior_solid.Surface = composite_surface
lod_2_solid.Solid.exterior = exterior_solid
gml_bldg.lod2Solid = lod_2_solid
gml_bldg.boundedBy_ = boundary_surface
return gml_bldg
|
309f66319c5cce07adbcb456548b3c29f707d96c
| 25,611 |
def send_mail(subject, message, from_email, recipient_list, html_message='',
scheduled_time=None, headers=None, priority=PRIORITY.medium):
"""
Add a new message to the mail queue. This is a replacement for Django's
``send_mail`` core email method.
"""
subject = force_text(subject)
status = None if priority == PRIORITY.now else STATUS.queued
emails = []
for address in recipient_list:
emails.append(
OutgoingEmail.objects.create(
from_email=from_email, to=address, subject=subject,
message=message, html_message=html_message, status=status,
headers=headers, priority=priority, scheduled_time=scheduled_time
)
)
if priority == PRIORITY.now:
for email in emails:
email.dispatch()
return emails
|
a97103e5e56463170122252073ebcc873306c708
| 25,613 |
def proxy_a_distance(source_X, target_X):
"""
Compute the Proxy-A-Distance of a source/target representation
"""
nb_source = np.shape(source_X)[0]
nb_target = np.shape(target_X)[0]
train_X = np.vstack((source_X, target_X))
train_Y = np.hstack((np.zeros(nb_source, dtype=int), np.ones(nb_target, dtype=int)))
clf = svm.LinearSVC(random_state=0)
clf.fit(train_X, train_Y)
y_pred = clf.predict(train_X)
error = metrics.mean_absolute_error(train_Y, y_pred)
dist = 2 * (1 - 2 * error)
return dist
|
fe0102cfd2a5a3cadb64a5ddfb7705e7b8440028
| 25,614 |
import json
def load_metadata(stock_model_name="BlackScholes", time_id=None):
"""
load the metadata of a dataset specified by its name and id
:return: dict (with hyperparams of the dataset)
"""
time_id = _get_time_id(stock_model_name=stock_model_name, time_id=time_id)
path = '{}{}-{}/'.format(training_data_path, stock_model_name, int(time_id))
with open('{}metadata.txt'.format(path), 'r') as f:
hyperparam_dict = json.load(f)
return hyperparam_dict
|
1171bf3a06327e907449872755315db8c34565c8
| 25,615 |
from typing import List
import torch
def evaluate(env: AlfEnvironment, algorithm: RLAlgorithm,
num_episodes: int) -> List[alf.metrics.StepMetric]:
"""Perform one round of evaluation.
Args:
env: the environment
algorithm: the training algorithm
num_episodes: number of episodes to evaluate
Returns:
a list of metrics from the evaluation
"""
batch_size = env.batch_size
env.reset()
time_step = common.get_initial_time_step(env)
algorithm.eval()
policy_state = algorithm.get_initial_predict_state(env.batch_size)
trans_state = algorithm.get_initial_transform_state(env.batch_size)
episodes_per_env = (num_episodes + batch_size - 1) // batch_size
env_episodes = torch.zeros(batch_size, dtype=torch.int32)
episodes = 0
metrics = [
alf.metrics.AverageReturnMetric(
buffer_size=num_episodes, example_time_step=time_step),
alf.metrics.AverageEpisodeLengthMetric(
example_time_step=time_step, buffer_size=num_episodes),
alf.metrics.AverageEnvInfoMetric(
example_time_step=time_step, buffer_size=num_episodes),
alf.metrics.AverageDiscountedReturnMetric(
buffer_size=num_episodes, example_time_step=time_step)
]
time_step = common.get_initial_time_step(env)
while episodes < num_episodes:
# For parallel play, we cannot naively pick the first finished `num_episodes`
# episodes to estimate the average return (or other statitics) as it can be
# biased towards short episodes. Instead, we stick to using the first
# episodes_per_env episodes from each environment to calculate the
# statistics and ignore the potentially extra episodes from each environment.
invalid = env_episodes >= episodes_per_env
# Force the step_type of the extra episodes to be StepType.FIRST so that
# these time steps do not affect metrics as the metrics are only updated
# at StepType.LAST. The metric computation uses cpu version of time_step.
time_step.cpu().step_type[invalid] = StepType.FIRST
next_time_step, policy_step, trans_state = policy_trainer._step(
algorithm=algorithm,
env=env,
time_step=time_step,
policy_state=policy_state,
trans_state=trans_state,
metrics=metrics)
time_step.step_type[invalid] = StepType.FIRST
for i in range(batch_size):
if time_step.step_type[i] == StepType.LAST:
env_episodes[i] += 1
episodes += 1
policy_state = policy_step.state
time_step = next_time_step
env.reset()
return metrics
|
0218f1a38be8f897ac3b2a70036213877f5f7654
| 25,616 |
from pagure.hooks import BaseHook
def get_plugin_names(blacklist=None, without_backref=False):
"""Return the list of plugins names.
:arg blacklist: name or list of names to not return
:type blacklist: string or list of strings
:arg without_backref: whether or not to include hooks that
have backref "None"
:type without_backref: bool
:return: list of plugin names (strings)
"""
plugins = load("pagure.hooks", subclasses=BaseHook)
if not blacklist:
blacklist = []
elif not isinstance(blacklist, list):
blacklist = [blacklist]
output = [
plugin.name
for plugin in plugins
if plugin.name not in blacklist and (plugin.backref or without_backref)
]
# The default hook is not one we show
if "default" in output:
output.remove("default")
return sorted(output)
|
7f3b560334a5680fdcb4a47929613706bb699393
| 25,617 |
def is_autosync(*args):
"""
is_autosync(name, type) -> bool
is_autosync(name, tif) -> bool
Is the specified idb type automatically synchronized?
@param name (C++: const char *)
@param type (C++: const type_t *)
"""
return _ida_typeinf.is_autosync(*args)
|
0f7eacc9931897f5fc0f076d0e07e0f1e1e01bce
| 25,618 |
def scanboards(dirpath):
"""Scans the directory for board files and returns an array"""
print("Scanning for JSON board data files...", end = "")
files = [x for x in subfiles(dirpath) if x.endswith(".json") and not x.endswith("index.json")]
print("Found {} in \"{}\"".format(len(files), dirpath))
return files
|
9cfce78b06fef0b8f7ebaa3d1c5904dfd3e0ec56
| 25,619 |
def router_get_notification() -> dict:
"""Lista todas as configurações do BOT Telegram."""
logger.log('LOG ROTA', "Chamada rota /get_all.")
return {"configuracoes": TelegramNotifier.make_current_cfg_dict()}
|
46faf67e02d537de49616085a1bcbb30f3087805
| 25,620 |
def script_filter_maximum_value(config):
""" The scripting version of `filter_maximum_value`. This
function applies the filter to the entire directory (or single
file). It also adds the tags to the header file of each fits file
indicating the number of pixels filtered for this filter.
Parameters
----------
config : ConfigObj
The configuration object that is to be used for this
function.
Returns
-------
None
"""
# Extract the global configuration parameters, including
# the directory.
data_directory = core.config.extract_configuration(
config_object=config, keys=['data_directory'])
subfolder = core.config.extract_configuration(
config_object=config, keys=['subfolder'])
filter_tag_name = core.config.extract_configuration(
config_object=config, keys=['filter_tag_name'])
# Extract the run flag for this particular script.
run_flag = core.config.extract_configuration(
config_object=config, keys=['filter','run_filter_maximum_value'])
# Extract the filter programs configuration parameters.
maximum_value = core.config.extract_configuration(
config_object=config, keys=['filter','maximum_value'])
# The function that is being used to calculate the masks.
filter_function = mask.filter_maximum_value
# Compiling the arguments that the masking function uses.
filter_arguments = {'maximum_value':maximum_value}
# Create the filters from the directory.
mask.base.create_directory_filter_files(data_directory=data_directory,
filter_function=filter_function,
filter_arguments=filter_arguments,
filter_file_tag=filter_tag_name,
subfolder=subfolder,
run=run_flag)
# All done.
return None
|
8eccc2356c803d63c1ddfc7603e1dc784ccc49fe
| 25,621 |
import time
def pretty_date(d):
""" returns a html formatted pretty date """
special_suffixs = {1 : "st", 2 : "nd" , 3 : "rd", 21 : "st", 22 : "nd", 23 : "rd", 31 : "st"}
suffix = "th"
if d.tm_mday in special_suffixs:
suffix = special_suffixs[d.tm_mday]
suffix = "<sup>" + suffix + "</sup>"
day = time.strftime("%A", d)
month = time.strftime("%B", d)
return day + " the " + str(d.tm_mday) + suffix + " of " + month + ", " + str(d.tm_year)
|
7d6675f115021ddd46b2a614e831c9fae8faf7ad
| 25,622 |
from datetime import datetime
import dateutil
def update(model, gcs_bucket, gcs_object):
"""Updates the given GCS object with new data from the given model.
Uses last_modified to determine the date to get items from. Bases the
identity of entities in the GCS object on their 'id' field -- existing
entities for which new data is found will be replaced.
Parameters
----------
model : ndb.Model
gcs_bucket : str
gcs_object : str
"""
# If file doesn't exist, just dump
if not cloud_storage.exists(gcs_bucket, gcs_object):
LOG.info('No object to update, calling dump(...)')
return dump(model, gcs_bucket, gcs_object)
# Get preexisting items
transferred_items = cloud_storage.download_object(gcs_bucket, gcs_object)
LOG.info('{} items exist'.format(len(transferred_items)))
# Find the most recently modified one
last_date = datetime.datetime(1, 1, 1)
for item in transferred_items:
modified_date = dateutil.parser.parse(item['last_modified'])
if modified_date > last_date:
last_date = modified_date
# Get all items modified after that date
LOG.info('Last date on record: {}'.format(last_date.isoformat()))
new_items_iter = model.query(model.last_modified > last_date).iter()
new_items = tuple(item.serializable() for item in new_items_iter)
new_items_by_id = {i['id']: i for i in new_items}
if new_items: # Found new items -- update existing items GCS
items_by_id = {i['id']: i for i in transferred_items}
items_by_id.update(new_items_by_id)
items = items_by_id.values()
LOG.info("Updating {n} items in {m} to {o}".format(n=len(new_items),
m=model._get_kind(),
o=gcs_object))
cloud_storage.upload_data(items, gcs_bucket, gcs_object)
else: # Nothing to update with.
LOG.info("No new items in {m} to append to {o}".format(m=model._get_kind(),
o=gcs_object))
|
66bde1371383f16c9449a3aec29e894e6a473d44
| 25,623 |
def member_requests_list(context, data_dict):
""" Show request access check """
return _only_registered_user()
|
c3ffdf798aabc80b3bd91160e9a580ff38c9540d
| 25,626 |
def get_conductivity(sw_tdep,mesh,rvec,ham_r,ndegen,avec,fill,temp_max,temp_min,tstep,sw_tau,idelta=1e-3,tau0=100):
"""
this function calculates conductivity at tau==1 from Boltzmann equation in metal
"""
def calc_Kn(eig,veloc,temp,mu,tau):
dfermi=0.25*(1.-np.tanh(0.5*(eig-mu)/temp)**2)/temp
#Kn=sum_k(v*v*(e-mu)^n*(-df/de))
K0=np.array([[(vk1*vk2*dfermi*tau).sum() for vk2 in veloc.T] for vk1 in veloc.T])
K1=np.array([[(vk1*vk2*(eig-mu)*dfermi*tau).sum() for vk2 in veloc.T] for vk1 in veloc.T])
K2=np.array([[(vk1*vk2*(eig-mu)**2*dfermi*tau).sum() for vk2 in veloc.T] for vk1 in veloc.T])
K0=comm.allreduce(K0,MPI.SUM)
K1=comm.allreduce(K1,MPI.SUM)
K2=comm.allreduce(K2,MPI.SUM)
return(K0,K1,K2)
if sw_unit:
kb=scconst.physical_constants['Boltzmann constant in eV/K'][0] #the unit of temp is kBT[eV], so it need to convert eV>K
eC=scconst.e #electron charge, it need to convert eV>J (1eV=eCJ)
tau_u=1.e-15 #unit of tau is sec. default of tau is 1fs
else:
kb=1.
eC=1.
tau_u=1.
itau0=1./tau0
gsp=(1.0 if with_spin else 2.0) #spin weight
Nk,count,k_mpi=gen_klist(mesh)
Vuc=sclin.det(avec)*1e-30 #unit is AA^3. Nk*Vuc is Volume of system.
ham=np.array([get_ham(k,rvec,ham_r,ndegen) for k in k_mpi])
eig=np.array([sclin.eigvalsh(h) for h in ham]).T/mass
veloc=np.array([get_vec(k,rvec,ham_r,ndegen,avec) for k in k_mpi])/mass
emin=comm.allreduce(eig.min(),MPI.MIN)
emax=comm.allreduce(eig.max(),MPI.MAX)
wlength=np.linspace(emin,emax,300)
tdf=np.array([[[(v1*v2*tau_u/((w-eig)**2+idelta**2)).sum() for w in wlength]
for v1 in veloc.T] for v2 in veloc.T])
tdf=gsp*comm.allreduce(tdf,MPI.SUM)/Nk
if rank==0:
f=open('tdf.dat','w')
for w,td in zip(wlength,tdf.T):
f.write('%7.3f '%w)
for i,d in enumerate(td):
for dd in d[:i+1]:
f.write('%10.3e '%(dd))
f.write('\n')
f.close()
if sw_tdep:
temp0=np.linspace(temp_min,temp_max,tstep)
else:
temp0=[temp_max]
for temp in temp0:
mu=calc_mu(eig,Nk,fill,temp)
if sw_tau==0:
tauw=eig*0+1.
elif sw_tau==1:
tauw=1./(itau0+(eig-mu)**2)
K0,K1,K2=calc_Kn(eig,veloc,temp,mu,tauw)
sigma=gsp*tau_u*eC*K0/(Nk*Vuc) #sigma=e^2K0 (A/Vm) :1eC is cannceled with eV>J
#kappa=gsp*tau_u*kb*eC*K2/(temp*Nk*Vuc) #kappa=K2/T (W/Km) :eC(kb) appears with converting eV>J(eV>K)
kappa=gsp*tau_u*kb*eC*(K2-K1.dot(sclin.inv(K0).dot(K1)))/(temp*Nk*Vuc)
sigmaS=gsp*tau_u*kb*eC*K1/(temp*Nk*Vuc) #sigmaS=eK1/T (A/mK)
Seebeck=-kb*sclin.inv(K0).dot(K1)/temp #S=K0^(-1)K1/eT (V/K) :kb appears with converting eV>K
Pertier=K1.dot(sclin.inv(K0)) #pi=K1K0^(-1)/e (V:J/C) :eC is cannceled with eV>J
PF=sigmaS.dot(Seebeck)
if rank==0:
'''
sigma,kappa,sigmaS consistent with boltzwann in cartesian coordinate.
but S is sign inverted. should we multiply by a minus?
Lorenz number of free electron is 2.44e-8(WOhmK^-2)
O(L)~1e-8
'''
print('temperature = %4.0d[K]'%int(temp/kb))
print('mu = %7.3f'%mu)
print('sigma matrix')
print(sigma.round(10))
print('kappa matrix')
print(kappa.round(10))
print('sigmaS matrix')
print(sigmaS.round(10))
print('Seebeck matrix')
print(Seebeck.round(10))
print('Pertier matrix')
print(Pertier.round(13))
print('Lorenz matrix')
print(kb*kappa/(sigma*temp))
print('Power Factor')
print(PF.round(10))
|
0304781bac6160b353a90e5bce061faa89075bc0
| 25,627 |
from typing import List
from typing import Union
import time
def time_match(
data: List,
times: Union[List[str], List[int], int, str],
conv_codes: List[str],
strptime_attr: str,
name: str,
) -> np.ndarray:
"""
Match times by applying conversion codes to filtering list.
Parameters
----------
data
Input data to perform filtering on
times
Times to match
conv_codes
If :obj:`times` contains strings, conversion codes to try passing to
:func:`time.strptime` to convert :obj:`times` to :class:`datetime.datetime`
strptime_attr
If :obj:`times` contains strings, the :class:`datetime.datetime` attribute to
finalize the conversion of strings to integers
name
Name of the part of a datetime to extract, used to produce useful error
messages.
Returns
-------
:class:`numpy.ndarray` of :obj:`bool`
Array where ``True`` indicates a match
Raises
------
ValueError
If input times cannot be converted understood or if input strings do not lead to
increasing integers (i.e. "Nov-Feb" will not work, one must use ["Nov-Dec",
"Jan-Feb"] instead)
"""
times_list = [times] if isinstance(times, (int, str)) else times
def conv_strs(strs_to_convert, conv_codes, name):
res = None
for conv_code in conv_codes:
try:
res = [
getattr(time.strptime(t, conv_code), strptime_attr)
for t in strs_to_convert
]
break
except ValueError:
continue
if res is None:
error_msg = "Could not convert {} '{}' to integer".format(
name, strs_to_convert
)
raise ValueError(error_msg)
return res
if isinstance(times_list[0], str):
to_delete = []
to_append = [] # type: List
for i, timeset in enumerate(times_list):
# ignore type as already established we're looking at strings
if "-" in timeset: # type: ignore
ints = conv_strs(timeset.split("-"), conv_codes, name) # type: ignore
if ints[0] > ints[1]:
error_msg = (
"string ranges must lead to increasing integer ranges,"
" {} becomes {}".format(timeset, ints)
)
raise ValueError(error_msg)
# + 1 to include last month
to_append += [j for j in range(ints[0], ints[1] + 1)]
to_delete.append(i)
for i in to_delete:
del times_list[i]
times_list = conv_strs(times_list, conv_codes, name)
times_list += to_append
return is_in(data, times_list)
|
0480f5ca3e29ebcc4f44bef5a81db8fb36f78616
| 25,628 |
def find_best_input_size(sizes=[40]):
""" Returns the average and variance of the models """
accuracies = []
accuracy = []
t = []
sigma = []
time = []
#sizes = np.arange(5, 80, 5)
for size in sizes:
#for size in [80]:
accuracy = []
N = 20
for j in range(N):
tf.keras.backend.clear_session
accuracy_, _, _, _, _, _, t_ = train_model(size=size)
accuracy.append(accuracy_)
t.append(t_)
time.append(np.average(t))
accuracies.append(np.average(accuracy))
sigma.append(np.std(accuracy))
print("Average accuracy: " + str(np.average(accuracy)))
print("Standard deviation: " + str(np.std(accuracy)))
return accuracies, sigma, time
|
3d22441d07b44779cde6c4347669a435568f0378
| 25,629 |
import torch
def eval_acc(trainer, dataset="val"):
"""
"""
trainer.model.eval()
with torch.no_grad():
shot_count = 0
total_count = 0
for inputs,targets in trainer.val_dataset():
inputs = nested_to_cuda(inputs, trainer.device)
targets = nested_to_cuda(targets, trainer.device)
outputs = trainer.model(inputs)
pred = outputs[0]
shot = torch.sum(pred.argmax(1) == targets[0].view(-1))
shot_count = shot_count + shot.item()
total_count = total_count + targets[0].size(0)
acc = shot_count / total_count
trainer.logger.info("acc:%f" % acc)
return acc
|
452861ccb5805778d5dd0bc83226b73539b8aebb
| 25,630 |
def _floor(n, base=1):
"""Floor `n` to a multiple of `base`"""
return n // base * base
|
49019e4aa925b4f77a7f13f9919d36948bd132cc
| 25,632 |
def fixed_timezone(offset): # type: (int) -> _FixedTimezone
"""
Return a Timezone instance given its offset in seconds.
"""
if offset in _tz_cache:
return _tz_cache[offset]
tz = _FixedTimezone(offset)
_tz_cache[offset] = tz
return tz
|
401303d1893bc2ab7bee19ba09161549a2cc7fb2
| 25,634 |
def getFactoriesInfo():
"""
Returns a dictionary with information on how to create an object Sensor from its factory
"""
return {'Stitcher':
{
'factory':'createStitcher'
}
}
|
75806002b1ada6bd1a87c9bde6b2e47f587d988d
| 25,635 |
from typing import Dict
from typing import List
from typing import Optional
def pending_observations_as_array(
pending_observations: Dict[str, List[ObservationFeatures]],
outcome_names: List[str],
param_names: List[str],
) -> Optional[List[np.ndarray]]:
"""Re-format pending observations.
Args:
pending_observations: List of raw numpy pending observations.
outcome_names: List of outcome names.
param_names: List fitted param names.
Returns:
Filtered pending observations data, by outcome and param names.
"""
if len(pending_observations) == 0:
pending_array: Optional[List[np.ndarray]] = None
else:
pending_array = [np.array([]) for _ in outcome_names]
for metric_name, po_list in pending_observations.items():
# It is possible that some metrics attached to the experiment should
# not be included in pending features for a given model. For example,
# if a model is fit to the initial data that is missing some of the
# metrics on the experiment or if a model just should not be fit for
# some of the metrics attached to the experiment, so metrics that
# appear in pending_observations (drawn from an experiment) but not
# in outcome_names (metrics, expected for the model) are filtered out.ß
if metric_name not in outcome_names:
continue
pending_array[outcome_names.index(metric_name)] = np.array(
[[po.parameters[p] for p in param_names] for po in po_list]
)
return pending_array
|
bc9bfff51b991b413b5861f55c8b0f55331ab763
| 25,636 |
def inverted_conditional_planar(input_dim, context_dim, hidden_dims=None):
"""
A helper function to create a
:class:`~pyro.distributions.transforms.ConditionalPlanar` object that takes care
of constructing a dense network with the correct input/output dimensions.
:param input_dim: Dimension of input variable
:type input_dim: int
:param context_dim: Dimension of context variable
:type context_dim: int
:param hidden_dims: The desired hidden dimensions of the dense network. Defaults
to using [input_dim * 10, input_dim * 10]
:type hidden_dims: list[int]
"""
if hidden_dims is None:
hidden_dims = [input_dim * 10, input_dim * 10]
nn = DenseNN(context_dim, hidden_dims, param_dims=[1, input_dim, input_dim])
return InvertedConditionalPlanar(nn)
|
8bf5ae5dd6d8743a3eb1506b26dec5cf51af2bde
| 25,638 |
def create_category_index(categories):
"""Creates dictionary of COCO compatible categories keyed by category id.
Args:
categories: a list of dicts, each of which has the following keys:
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog'.
Returns:
category_index: a dict containing the same entries as categories, but keyed
by the 'id' field of each category.
"""
category_index = {}
for index, cat in enumerate(categories):
category_index[index] = cat
return category_index
|
226a39189d4203e2861bbba7334d5b8bbaa3b7df
| 25,639 |
import torch
def n_step_returns(q_values, rewards, kls, discount=0.99):
"""
Calculates all n-step returns.
Args:
q_values (torch.Tensor): the Q-value estimates at each time step [time_steps+1, batch_size, 1]
rewards (torch.Tensor): the rewards at each time step [time_steps, batch_size, 1]
kls (torch.Tensor): the scaled kl divergences at each time step [time_steps, batch_size, 1]
discount (float): the temporal discount factor
"""
discounts = torch.cat([(discount*torch.ones_like(q_values[:1]))**i for i in range(rewards.shape[0])], 0)
rewards[1:] = rewards[1:] - kls[:-1]
discounted_returns = torch.cumsum(discounts * rewards, dim=0)
terminal_values = discount * discounts * (q_values[1:] - kls)
# return torch.cat([q_values[:1], discounted_returns], dim=0)
return torch.cat([q_values[:1], discounted_returns + terminal_values], dim=0)
|
3bbd6026046328dc8ef63ab3e871f6c47636cb80
| 25,640 |
import random
def random_split_exact(iterable, split_fractions=None):
"""Randomly splits items into multiple sample lists according to the given
split fractions.
The number of items in each sample list will be given exactly by the
specified fractions.
Args:
iterable: a finite iterable
split_fractions: an optional list of split fractions, which should sum
to 1. By default, [0.5, 0.5] is used
Returns:
sample_lists: a list of lists, of the same length as `split_fractions`.
Each sub-list contains items from the original iterable
"""
split_fractions = _validate_split_fractions(split_fractions)
shuffled = list(iterable)
random.shuffle(shuffled)
return _split_in_order(shuffled, split_fractions)
|
2b7ae86e55b9be225e94cfc983295beeb3ed08cf
| 25,642 |
def computeMaskIntra(inputFilename, outputFilename, m=0.2, M=0.9, cc=1):
""" Depreciated, see compute_mask_intra.
"""
print "here we are"
return compute_mask_intra(inputFilename, outputFilename,
m=m, M=M, cc=cc)
|
0eaf8b8845c12b1fc90cb032881dacf53a2c7d12
| 25,644 |
def read_space_delimited(filename, skiprows=None, class_labels=True):
"""Read an space-delimited file
skiprows: list of rows to skip when reading the file.
Note: we can't use automatic comment detection, as
`#` characters are also used as data labels.
class_labels: boolean
if true, the last column is treated as the class label
"""
with open(filename, 'r') as fd:
df = pd.read_table(fd, skiprows=skiprows, skip_blank_lines=True, comment=None, header=None, sep=' ', dtype=str)
# targets are last column. Data is everything else
if class_labels is True:
target = df.loc[:, df.columns[-1]].values
data = df.loc[:, df.columns[:-1]].values
else:
data = df.values
target = np.zeros(data.shape[0])
return data, target
|
be25b4f6c3c775f12fdfef7f334b4886c85a514e
| 25,645 |
def get_gas_price(endpoint=_default_endpoint, timeout=_default_timeout) -> int:
"""
Get network gas price
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
int
Network gas price
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
"""
method = 'hmy_gasPrice'
try:
return int(rpc_request(method, endpoint=endpoint, timeout=timeout)['result'], 16)
except TypeError as e:
raise InvalidRPCReplyError(method, endpoint) from e
|
b7f18a5a5044d8aeee7a63b702b01944cbff597b
| 25,646 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.