content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def failed_jobs(username, root_wf_id, wf_id):
"""
Get a list of all failed jobs of the latest instance for a given workflow.
"""
dashboard = Dashboard(g.master_db_url, root_wf_id, wf_id)
args = __get_datatables_args()
total_count, filtered_count, failed_jobs_list = dashboard.get_failed_jobs(
wf_id, **args
)
for job in failed_jobs_list:
job.exec_job_id = '<a href="' + url_for(
'.job',
root_wf_id=root_wf_id,
wf_id=wf_id,
job_id=job.job_id,
job_instance_id=job.job_instance_id
) + '">' + job.exec_job_id + '</a>'
job.stdout = '<a target="_blank" href="' + url_for(
'.stdout',
root_wf_id=root_wf_id,
wf_id=wf_id,
job_id=job.job_id,
job_instance_id=job.job_instance_id
) + '">Application Stdout/Stderr</a>'
job.stderr = '<a target="_blank" href="' + url_for(
'.stderr',
root_wf_id=root_wf_id,
wf_id=wf_id,
job_id=job.job_id,
job_instance_id=job.job_instance_id
) + '">Condor Stderr/Pegasus Lite Log</a>'
return render_template(
'workflow/jobs_failed.xhr.json',
count=total_count,
filtered=filtered_count,
jobs=failed_jobs_list,
table_args=args
) | 5d755f6e84f7c406174fb1a7bdb8de64a6e0c049 | 3,200 |
def get_vm_types(resources):
"""
Get all vm_types for a list of heat resources, do note that
some of the values retrieved may be invalid
"""
vm_types = []
for v in resources.values():
vm_types.extend(list(get_vm_types_for_resource(v)))
return set(vm_types) | f13e8860d5b25cd03360e859527d61a06d103f53 | 3,201 |
import os
def list_files(directory, suffix='.nc'):
"""
Return a list of all the files with the specified suffix in the submission
directory structure and sub-directories.
:param str directory: The root directory of the submission
:param str suffix: The suffix of the files of interest
:returns: A list of absolute filepaths
"""
nc_files = []
dir_files = os.listdir(directory)
for filename in dir_files:
file_path = os.path.join(directory, filename)
if os.path.isdir(file_path):
nc_files.extend(list_files(file_path))
elif file_path.endswith(suffix):
nc_files.append(file_path)
return nc_files | ba9a00ba749930120cfa3677771a24181bf58685 | 3,202 |
def interface_names(obj):
"""
Return: a list of interface names to which `obj' is conformant.
The list begins with `obj' itself if it is an interface.
Names are returned in depth-first order, left to right.
"""
return [o.__name__ for o in interfaces(obj)] | 99aad05e14daeb13ed7f19599684acc8b324df84 | 3,203 |
import six
def add_basic(token):
"""For use with Authorization headers, add "Basic "."""
if token:
return (u"Basic " if isinstance(token, six.text_type) else b"Basic ") + token
else:
return token | cd579e77e243fdfba0853a87087e45cf58bcc6f2 | 3,204 |
import json
import requests
def updateUser(token, leaderboard=None, showUsername=None, username=None):
"""
Update user account information.
Parameters-
token: Authentication token.
leaderboard: True to show user's profit on leaderboard.
showUsername: True to show the username on LN Marktes public data.
username: username to display.
"""
headers = {
'content-type': "application/json",
'accept': "application/json",
'authorization': f"Bearer {token}",
}
payloadDict = dict()
if showUsername is not None:
payloadDict['show_username'] = showUsername
if leaderboard is not None:
payloadDict['show_leaderboard'] = leaderboard
if username is not None:
payloadDict['username'] = username
payload = json.dumps(payloadDict)
userInfo = requests.put(
APIUrls.lnapi+APIUrls.userUrl,
data=payload,
headers=headers,
)
if userInfo.status_code == 200:
return userInfo.json()
else:
raise RuntimeError(
'Unable to update user information:\n'
f'{userInfo.text}'
) | 9b953256b85327411445729b574cf9b79750e735 | 3,205 |
def init_validator(required, cls, *additional_validators):
"""
Create an attrs validator based on the cls provided and required setting.
:param bool required: whether the field is required in a given model.
:param cls: the expected class type of object value.
:return: attrs validator chained correctly (e.g. optional(instance_of))
"""
validator = validators.instance_of(cls)
if additional_validators:
additional_validators = list(additional_validators)
additional_validators.append(validator)
validator = composite(*additional_validators)
return validator if required else validators.optional(validator) | 924b5ff6e77d38c989eef187498f774a2322ed48 | 3,206 |
import requests
def next_search(request, *args, **kwargs):
"""
Handle search requests
:param request:
:return:
"""
server = FhirServerUrl()
in_fmt = "json"
get_fmt = get_format(request.GET)
if settings.DEBUG:
print("Server:", server)
print("Kwargs:",kwargs)
context = {'display':"Search",
'name': "Search",
'server': server,
'in_fmt': in_fmt,
'get_fmt': get_fmt,
'template': 'v1api/search.html',
}
request_string = "?"
for item in request.GET:
request_string += item +"=" + request.GET[item] +"&"
if request_string[:0] =="&":
request_string = request_string[:-1]
if not "patient=Patient/" in request_string:
try:
xwalk = Crosswalk.objects.get(user=request.user)
patient_id = xwalk.fhir_url_id
request_string += "&patient=Patient/"+patient_id
except Crosswalk.DoesNotExist:
return kickout_404("ID for this user not found:%s" % request.user)
if settings.DEBUG:
print("Gets:", request_string)
try:
r = requests.get(server+request_string)
context = process_page(request, r, context)
return publish_page(request, context)
except requests.ConnectionError:
print("Whoops - Problem connecting to FHIR Server")
messages.error(request,
"FHIR Server is unreachable. "
"Are you on the CMS Network?")
return render_to_response(context['template'],
RequestContext(request, context, )) | 47316d3a127a0c31dfb8ce10b0f8c4465bdfd8ba | 3,207 |
def ShiftRight(x, **unused_kwargs):
"""Layer to shift the tensor to the right by padding on axis 1."""
if not isinstance(x, (list, tuple)): # non-chunked inputs
pad_widths = [(0, 0)] * len(x.shape)
pad_widths[1] = (1, 0) # Padding on axis=1
padded = np.pad(x, pad_widths, mode='constant')
return padded[:, :-1]
# Handling chunked inputs. Recall that the list of chunks represents a big
# sequence (the concatenation of the chunks). We want to shift that sequence,
# so we put a 0 in the beginning of the first chunk and the last element of
# that chunk is used as the new first element of the next chunk, and so on.
padded = []
last_value = np.zeros_like(x[0][:, -1])
for chunk in x:
padded_chunk = np.concatenate([last_value[:, np.newaxis], chunk], axis=1)
last_value = chunk[:, -1]
padded.append(padded_chunk[:, :-1])
return padded | ec5265b5937e3e90e2c3267b6501b008ac7090e5 | 3,208 |
import os
def connect_kafka_producer():
"""Return a MSK client to publish the streaming messages."""
# Use a global variable so Lambda can reuse the persisted client on future invocations
global kafka_client
if kafka_client is None:
logger.debug('Creating new Kafka client.')
try:
kafka_client = KafkaProducer(bootstrap_servers=os.environ['MSK_BOOTSTRAP_SRV'])
except Exception as ex:
logger.error('Failed to create new Kafka client: {}'.format(ex))
send_sns_alert(str(ex))
raise
return kafka_client | c0dc1c98881dd6452fdf8359aec11f3e6cf8b71a | 3,209 |
def empty_record():
"""Create an empty record."""
record = dump_empty(Marc21RecordSchema)
record["metadata"] = "<record> <leader>00000nam a2200000zca4500</leader></record>"
record["is_published"] = False
record["files"] = {"enabled": True}
return record | 7797e1bf0ade98a2400daff1f7937b7af2da280d | 3,210 |
def illuminanceToPhotonPixelRate(illuminance,
objective_numerical_aperture=1.0,
illumination_wavelength=0.55e-6,
camera_pixel_size=6.5e-6,
objective_magnification=1,
system_magnification=1,
sample_quantum_yield=1.,
**kwargs):
"""
Function which converts source illuminance and microscope parameters to
photons / px / s.
Based heavily on the publication:
"When Does Computational Imaging Improve Performance?,"
O. Cossairt, M. Gupta and S.K. Nayar,
IEEE Transactions on Image Processing,
Vol. 22, No. 2, pp. 447–458, Aug. 2012.
However, this function implements the same result for
microscopy, replacing f/# with NA, removing reflectance,
and including magnification.
Args:
exposure_time: Integration time, s
source_illuminance: Photometric source illuminance, lux
numerical_aperture: System numerical aperture
pixel_size: Pixel size of detector, um
magnification: Magnification of imaging system
Returns:
Photon counts at the camera.
"""
# Conversion factor from radiometric to photometric cordinates
# https://www.thorlabs.de/catalogPages/506.pdf
K = 1 / 680
# Planck's constant
# h_bar = 6.626176e-34
h_bar = 1.054572e-34
# Speed of light
c = 2.9979e8
# Constant term
const = K * illumination_wavelength / h_bar / c
# Calculate photon_pixel_rate
photon_pixel_rate = sample_quantum_yield * const * (objective_numerical_aperture ** 2) * illuminance * (camera_pixel_size / (system_magnification * objective_magnification)) ** 2
# Return
return photon_pixel_rate | cbbb2f6bdce7592f997b7ab3784c15beb2b846b1 | 3,211 |
def stop_tuning(step):
""" stop tuning the current step method """
if hasattr(step, 'tune'):
step.tune = False
elif hasattr(step, 'methods'):
step.methods = [stop_tuning(s) for s in step.methods]
return step | 45e02b8d3ec86ceda97de69bbc730aa62affb06d | 3,212 |
import json
def assemble_english():
"""Assemble each statement into """
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
stmts_json = body.get('statements')
stmts = stmts_from_json(stmts_json)
sentences = {}
for st in stmts:
enga = EnglishAssembler()
enga.add_statements([st])
model_str = enga.make_model()
sentences[st.uuid] = model_str
res = {'sentences': sentences}
return res | 24c267e2763198056e275feda1e81ef1bf280bdb | 3,213 |
def schema_class(classname, schema, schemarepr=None, basename='SchemaBase'):
"""Generate code for a schema class
Parameters
----------
classname : string
The name of the class to generate
schema : dict
The dictionary defining the schema class
basename : string (default: "SchemaBase")
The name of the base class to use in the class definition
schemarepr : CodeSnippet or object, optional
An object whose repr will be used in the place of the explicit schema.
This can be useful, for example, when the generated code should reference
a predefined schema object. The user must ensure that the schema within
the evaluated code is identical to the schema used to generate the code.
"""
return SCHEMA_CLASS_TEMPLATE.format(
classname=classname,
basename=basename,
schema=schema if schemarepr is None else schemarepr,
docstring=docstring(classname, schema, indent=4),
init_code=init_code(classname, schema, indent=4)
) | 2f497de54205e5c180805d77638c9ffe342f76c8 | 3,214 |
import requests
def orthology_events(ids='R-HSA-6799198,R-HSA-168256,R-HSA-168249', species='49633'):
"""
Reactome uses the set of manually curated human reactions to computationally infer reactions in
twenty evolutionarily divergent eukaryotic species for which high-quality whole-genome sequence
data are available, and hence a comprehensive and high-quality set of protein predictions exists.
Thus, this method retrieves the orthologies for any given set of events or entities in the specified species.
:param ids: The events identifiers for which the orthology is requested
:param species: The species id for which the orthology is requested
:return: Json dictionary object of the orthologies of a given set of events or entities
"""
headers = {
'accept': 'application/json',
'content-type': 'text/plain',
}
data = ids
url = 'https://reactome.org/ContentService/data/orthologies/ids/species/%s' % species
try:
response = requests.post(url=url, headers=headers, data=data)
except ConnectionError as e:
print(e)
if response.status_code == 200:
return response.json()
else:
print('Status code returned a value of %s' % response.status_code) | 8a75e2bc9af34358164492d1d2b2d8154d4e696e | 3,215 |
def judge(name):
"""
Return some sort of score for automatically ranking names based on all the
features we can extract so far.
I guess we'll just add the scores * weights up for now.
"""
score = 0
for scoreID, scorer, weight in weights:
subscore = scorer(name)
score += subscore * weight
name.scores[scoreID] = subscore
name.score = score
return score | 34811f49fc8fe6c88ef31978702cacddbacd5314 | 3,216 |
import re
def parse_year(inp, option='raise'):
"""
Attempt to parse a year out of a string.
Parameters
----------
inp : str
String from which year is to be parsed
option : str
Return option:
- "bool" will return True if year is found, else False.
- Return year int / raise a RuntimeError otherwise
Returns
-------
out : int | bool
Year int parsed from inp,
or boolean T/F (if found and option is bool).
Examples
--------
>>> year_str = "NSRDB_2018.h5"
>>> parse_year(year_str)
2018
>>> year_str = "NSRDB_2018.h5"
>>> parse_year(year_str, option='bool')
True
>>> year_str = "NSRDB_TMY.h5"
>>> parse_year(year_str)
RuntimeError: Cannot parse year from NSRDB_TMY.h5
>>> year_str = "NSRDB_TMY.h5"
>>> parse_year(year_str, option='bool')
False
"""
# char leading year cannot be 0-9
# char trailing year can be end of str or not 0-9
regex = r".*[^0-9]([1-2][0-9]{3})($|[^0-9])"
match = re.match(regex, inp)
if match:
out = int(match.group(1))
if 'bool' in option:
out = True
else:
if 'bool' in option:
out = False
else:
raise RuntimeError('Cannot parse year from {}'.format(inp))
return out | a91efb0614e7d0ad6753118f9b4efe8c3b40b4e2 | 3,217 |
def retry_import(e, **kwargs):
"""
When an exception occurs during channel/content import, if
* there is an Internet connection error or timeout error,
or HTTPError where the error code is one of the RETRY_STATUS_CODE,
return return True to retry the file transfer
* the file does not exist on the server or disk, skip the file and return False.
This only applies to content import not channel import.
* otherwise, raise the exception.
return value:
* True - needs retry.
* False - file is skipped. Does not need retry.
"""
skip_404 = kwargs.pop("skip_404")
if (
isinstance(e, ConnectionError)
or isinstance(e, Timeout)
or isinstance(e, ChunkedEncodingError)
or (isinstance(e, HTTPError) and e.response.status_code in RETRY_STATUS_CODE)
or (isinstance(e, SSLERROR) and "decryption failed or bad record mac" in str(e))
):
return True
elif skip_404 and (
(isinstance(e, HTTPError) and e.response.status_code == 404)
or (isinstance(e, OSError) and e.errno == 2)
):
return False
else:
raise e | b125841dff7154352b2031013ffa7058242e4974 | 3,218 |
def cvCloneMat(*args):
"""cvCloneMat(CvMat mat) -> CvMat"""
return _cv.cvCloneMat(*args) | 84dc4f59d29580477b7ded41f26d95011b2804b3 | 3,219 |
from typing import Callable
import time
def run_episode(kwargs) -> [Trajectory]:
"""
Runs a single episode and collects the trajectories of each agent
"""
total_controller_time = 0
env_dict: Callable = kwargs.get("env_dict")
obs_builder = kwargs.get("obs_builder")
controller_creator: Callable = kwargs.get("controller_creator")
episode_id: int = kwargs.get("episode_id")
max_episode_length: int = kwargs.get("max_episode_length", 1000)
render: bool = kwargs.get("render", False)
# Create and Start Environment
_env = load_env(env_dict, obs_builder_object=obs_builder)
obs, info = _env.reset(regenerate_rail=False, regenerate_schedule=True, )
score = 0
_trajectories = [Trajectory() for _ in _env.get_agent_handles()]
# Create and Start Controller
controller: AbstractController = controller_creator()
start = time.time()
controller.start_of_round(obs=obs, env=_env)
total_controller_time += time.time() - start
if render:
env_renderer = RenderTool(_env)
env_renderer.reset()
for step in range(max_episode_length):
start = time.time()
action_dict, processed_obs = controller.act(observation=obs)
total_controller_time += time.time() - start
next_obs, all_rewards, done, info = _env.step(action_dict)
if render:
env_renderer.render_env(show=True, show_observations=True, show_predictions=False)
# Save actions and rewards for each agent
[_trajectories[agent_handle].add_row(
state=processed_obs[agent_handle],
action=action_dict[agent_handle],
reward=all_rewards[agent_handle],
done=done[agent_handle])
for agent_handle in _env.get_agent_handles()]
score += sum(all_rewards)
obs = next_obs.copy()
if done['__all__']:
break
if render:
env_renderer.close_window()
# print(f"\nController took a total time of: {total_controller_time} seconds", flush=True)
return _trajectories | 91b64a8df57e1fc47ffecc184b0473d633b545c4 | 3,220 |
async def _reverse_proxy_handler(request: web.Request) -> web.Response:
"""
- Adds auth layer
- Adds access layer
- Forwards request to catalog service
SEE https://gist.github.com/barrachri/32f865c4705f27e75d3b8530180589fb
"""
user_id = request[RQT_USERID_KEY]
# path & queries
backend_url = to_backend_service(
request.rel_url,
request.app[f"{__name__}.catalog_origin"],
request.app[f"{__name__}.catalog_version_prefix"],
)
# FIXME: hack
if "/services" in backend_url.path:
backend_url = backend_url.update_query({"user_id": user_id})
logger.debug("Redirecting '%s' -> '%s'", request.url, backend_url)
# body
raw = None
if request.can_read_body:
raw: bytes = await request.read()
# injects product discovered by middleware in headers
fwd_headers = request.headers.copy()
product_name = request[RQ_PRODUCT_KEY]
fwd_headers.update({X_PRODUCT_NAME_HEADER: product_name})
# forward request
return await _request_catalog(
request.app, request.method, backend_url, fwd_headers, raw
) | f26a4410ab43dd9d3c12f3490f6dfb9fb2da234a | 3,221 |
def get_data(request: Request):
"""
Get the data page.
Parameters
----------
request : Request
The request object.
Returns
-------
HTMLResponse
The data page.
"""
return templates.TemplateResponse("data.html", {"request": request}) | 4a44df5122f9db9009a769d4d9bec99d924fb0f7 | 3,222 |
def remove_last_measurements(dag_circuit, perform_remove=True):
"""Removes all measurements that occur as the last operation
on a given qubit for a DAG circuit. Measurements that are followed by
additional gates are untouched.
This operation is done in-place on the input DAG circuit if perform_pop=True.
Parameters:
dag_circuit (qiskit.dagcircuit._dagcircuit.DAGCircuit): DAG circuit.
perform_remove (bool): Whether to perform removal, or just return node list.
Returns:
list: List of all measurements that were removed.
"""
removed_meas = []
try:
meas_nodes = dag_circuit.get_named_nodes('measure')
except DAGCircuitError:
return removed_meas
for idx in meas_nodes:
_, succ_map = dag_circuit._make_pred_succ_maps(idx)
if len(succ_map) == 2:
# All succesors of the measurement are outputs, one for qubit and one for cbit
# (As opposed to more gates being applied), and it is safe to remove the
# measurement node and add it back after the swap mapper is done.
removed_meas.append(dag_circuit.multi_graph.node[idx])
if perform_remove:
dag_circuit._remove_op_node(idx)
return removed_meas | 858a16c33de67f835cf32535f2e69f9c144d6e25 | 3,223 |
import urllib3
import certifi
import sys
def get_html(url):
"""
Given a URL, will return the HTML using urllib3.
:param url: The url to extract the HTML from
:return: If extracted successfully, the HTML is returned. If there is a failure, a message with HTTP status. If an exception is thrown, -1 is returned witha description of the error
"""
try:
# urllib3.disable_warnings()
# Try with new where function, but sometimes it failes
# so then try old where function
# Read more: https://github.com/certifi/python-certifi#usage
try:
http = urllib3.PoolManager(
cert_reqs='CERT_REQUIRED',
ca_certs=certifi.where()
)
except:
http = urllib3.PoolManager(
cert_reqs='CERT_REQUIRED',
ca_certs=certifi.old_where()
)
r = http.request('GET', url, timeout=5.0)
if str(r.status).startswith("2"):
html = r.data.decode("utf-8")
return html
else:
return "Failed to get html, status: " + str(r.status)
except Exception as e:
sys.stdout.write(str(e))
return "-1: " + str(e) | 031a444d28674593b9e6994b6ddcae7dd2c4b8cd | 3,224 |
def J(*args, **kwargs):
"""Wrapper around jsonify that sets the Content-Type of the response to
application/vnd.api+json.
"""
response = jsonify(*args, **kwargs)
response.mimetype = "application/vnd.api+json"
return response | 714537b180cab60b7ad614018fa551020aeee292 | 3,225 |
import os
import sys
def readable_dir(prospective_dir):
""" check if dir is exist or acessable"""
if not os.path.isdir(prospective_dir):
sys.exit("{} is not a valid path".format(prospective_dir))
if os.access(prospective_dir, os.R_OK):
return prospective_dir
else:
sys.exit("{} is not a readable dir".format(prospective_dir)) | 8c8ea6928605baa3dfb224c258102b2d263932cd | 3,226 |
def is_gzipped(filename):
""" Returns True if the target filename looks like a GZIP'd file.
"""
with open(filename, 'rb') as fh:
return fh.read(2) == b'\x1f\x8b' | b1afb5b9cddc91fbc304392171f04f4b018fa929 | 3,227 |
import os
def load_data(loc='./data/'):
"""
Load the SICK semantic-relatedness dataset
"""
trainA, trainB, devA, devB, testA, testB = [],[],[],[],[],[]
trainS, devS, testS = [],[],[]
with open(os.path.join(loc, 'sick_train.txt'), 'r') as f:
for line in f:
text = line.strip().split('\t')
trainA.append(text[0])
trainB.append(text[1])
trainS.append(text[2])
with open(os.path.join(loc, 'sick_dev.txt'), 'r') as f:
for line in f:
text = line.strip().split('\t')
devA.append(text[0])
devB.append(text[1])
devS.append(text[2])
with open(os.path.join(loc, 'sick_test.txt'), 'r') as f:
for line in f:
text = line.strip().split('\t')
testA.append(text[0])
testB.append(text[1])
testS.append(text[2])
trainS = [float(s) for s in trainS]
devS = [float(s) for s in devS]
testS = [float(s) for s in testS]
return [trainA, trainB], [devA, devB], [testA, testB], [trainS, devS, testS] | 58c15084e3874fa0cdda4cce3b701b18a2529634 | 3,228 |
def tag_helper(tag, items, locked=True, remove=False):
""" Simple tag helper for editing a object. """
if not isinstance(items, list):
items = [items]
data = {}
if not remove:
for i, item in enumerate(items):
tagname = '%s[%s].tag.tag' % (tag, i)
data[tagname] = item
if remove:
tagname = '%s[].tag.tag-' % tag
data[tagname] = ','.join(items)
data['%s.locked' % tag] = 1 if locked else 0
return data | 27500df099824fff1d93afbe7649d42141ffa9c1 | 3,229 |
def get_keys_from_file(csv):
"""Extract the credentials from a csv file."""
lines = tuple(open(csv, 'r'))
creds = lines[1]
access = creds.split(',')[2]
secret = creds.split(',')[3]
return access, secret | eccf56c52dd82656bf85fef618133f86fd9276e6 | 3,230 |
import pkg_resources
import json
def fix_model(project, models, invert=False):
"""Fix model name where file attribute is different from values accepted by facets
>>> fix_model('CMIP5', ['CESM1(BGC)', 'CESM1-BGC'])
['CESM1(BGC)', 'CESM1(BGC)']
>>> fix_model('CMIP5', ['CESM1(BGC)', 'CESM1-BGC'], invert=True)
['CESM1-BGC', 'CESM1-BGC']
Args:
project (str): data project
models (list) models to convert
invert (bool): Invert the conversion (so go from ``CESM1(BGC)`` to ``CESM1-BGC``)
"""
project = project.upper().split('-')[0]
if project in ['CMIP5', 'CORDEX']:
mfile = pkg_resources.resource_filename(__name__, 'data/'+project+'_model_fix.json')
with open(mfile, 'r') as f:
mdict = json.loads(f.read())
if invert:
mfix = {v: k for k, v in mdict.items()}
else:
mfix = mdict
return [mfix[m] if m in mfix.keys() else m for m in models] | b1e91ba7305a75ed376948d92607b1ab97bc93f2 | 3,231 |
from re import X
def rectified_linear_unit(x):
""" Returns the ReLU of x, or the maximum between 0 and x."""
# TODO
return np.maximum(0, X) | 0cf0057d771a69e01a7c9de94f94544359ee6489 | 3,232 |
import os
def create_save_directory(path, directory_name):
"""
This function makes the directory to save the data.
Parameters
----------
path : string
Where the the directory_name will be.
directory_name : string
The directory name where the plots will be save
Returns
----------
succes : bool
True if the directories were created successfully.
"""
try:
if not os.path.isdir(f'{path}'):
os.mkdir(f'{path}')
os.mkdir(f'{path}\\{directory_name}')
return True
except OSError:
print('Error creating directories')
return False | 3f449935bd5e3e72fdffd9c31968d8dcef615b0d | 3,233 |
import os
def encode_to_filename(folder, animal, session, ftypes="processed_all"):
"""
:param folder: str
folder for data storage
:param animal: str
animal name: e.g. A2A-15B-B_RT
:param session: str
session name: e.g. p151_session1_FP_RH
:param ftype: list or str:
list (or a single str) of typed files to return
'exper': .mat files
'bin_mat': binary file
'green': green fluorescence
'red': red FP
'behavior': .mat behavior file
'FP': processed dff hdf5 file
if ftypes=="all"
:return:
returns all 5 files in a dictionary; otherwise return all file types
in a dictionary, None if not found
"""
# TODO: enable aliasing
paths = [os.path.join(folder, animal, session), os.path.join(folder, animal+'_'+session),
os.path.join(folder, animal), folder]
if ftypes == "raw all":
ftypes = ["exper", "bin_mat", "green", "red"]
elif ftypes == "processed_all":
ftypes = ["processed", "green", "red", "FP"]
elif isinstance(ftypes, str):
ftypes = [ftypes]
results = {ft: None for ft in ftypes}
registers = 0
for p in paths:
if os.path.exists(p):
for f in os.listdir(p):
opt = decode_from_filename(f)
if opt is not None:
ift = opt['ftype']
check_mark = opt['animal'] == animal and opt['session'] == session
#print(opt['session'], animal, session)
check_mark_mdl = (opt['animal'] == animal) and (opt['session'] in session)
cm_mdl = (ift == 'modeling' and check_mark_mdl)
# TODO: temporary hacky method for modeling
#print(opt['session'], animal, session, check_mark_mdl, ift, cm_mdl)
if ift in ftypes and results[ift] is None and (check_mark or cm_mdl):
results[ift] = os.path.join(p, f)
registers += 1
if registers == len(ftypes):
return results if len(results) > 1 else results[ift]
return results if len(results) > 1 else list(results.values())[0] | 8c976f0522fd3a485f81b9f778c91248f71e9b04 | 3,234 |
from typing import Union
from typing import List
from typing import Tuple
def _get_choices(choices: Union[str, List]) -> List[Tuple[str, str]]:
"""Returns list of choices, used for the ChoiceFields"""
result = [('', '')]
if isinstance(choices, str):
result.append((choices, choices))
else:
for choice in choices:
result.append((choice, choice))
return result | 249a068571b0ddca858cd8dcc4f2f7af25689b9d | 3,235 |
def invalid_file():
"""Create an invalid filename string."""
return "/tmp/INVALID.FILE" | 9a249f3ef9445cb78bc962e46ef524360bb44bdb | 3,236 |
def get_model(app_label, model_name):
"""
Fetches a Django model using the app registery.
All other methods to acces models might raise an exception about
registery not being ready yet.
This doesn't require that an app with the given app label exists,
which makes it safe to call when the registery is being populated.
Raises LookupError if model isn't found
"""
try:
return apps.get_model(app_label, model_name)
except AppRegistryNotReady:
if apps.apps_ready and not apps.models_ready:
# if this function is called while `apps.populate()` is
# loading models, ensure that the module thar defines
# the target model has been imorted and try looking the
# model up in the app registery. This effectiveness emulates
# `from path.to.app.models import Model` where we use
# `Model = get_model('app', 'Model')` instead
app_config = apps.get_app_config(app_label)
# `app_config.import_models()` cannot be used here because
# it would interfere with `app.populate()`
import_module("%s.%s" % (app_config.name, MODELS_MODULE_NAME))
# In order to account for case-insensitivity of model_name,
# look up the model through a private API of the app registry.
return apps.get_registered_model(app_label, model_name)
else:
# This must be a different case (e.g. the model really doesn't
# exist). We just re-raise the exception.
raise | dd3ba70f2220ba09d256ae58b418cd3401f129e6 | 3,237 |
def affaires_view(request):
"""
Return all affaires
"""
# Check connected
if not check_connected(request):
raise exc.HTTPForbidden()
query = request.dbsession.query(VAffaire).order_by(VAffaire.id.desc()).all()
return Utils.serialize_many(query) | c44e703680034230121c426e7496746355b8ee4b | 3,238 |
from typing import Union
def metric_try_to_float(s: str) -> Union[float, str]:
"""
Try to convert input string to float value.
Return float value on success or input value on failure.
"""
v = s
try:
if "%" in v:
v = v[:-1]
return float(v)
except ValueError:
return str(s) | 6b0121469d35bc6af04d4808721c3ee06955d02e | 3,239 |
import os
import logging
def calculate_boot_time(pngs_dir, fps, refer_end_pic):
"""
通过一系列的截图文件,计算出启动时间
:param pngs_dir: 截图所在目录
:param fps: 帧数
:param refer_end_pic: 结束位置参考图片
:return: 启动时间
"""
# 找启动的开始(点击响应)、结束时间(渲染首页内容)点
pngs = os.listdir(pngs_dir)
pngs.sort()
start_t, end_t, boot_time = 0, 0, 0
# 找开始点,对比和第一张图的相似度
refer_start_pic = os.path.join(pngs_dir, pngs[0])
for png in pngs[1:]:
dest_png = os.path.join(pngs_dir, png)
factor = ssim.compute_ssim(refer_start_pic, dest_png)
logging.info("%s 相似度:%f" % (png, factor))
if factor < 0.9:
start_t = int(png.split('.png')[0])
break
if start_t > 0:
# 继续找结束点,和灰度的连续匹配两次的最后位置
third_f, second_f, first_f = 0, 0, 0
for png in pngs[start_t:]:
dest_png = os.path.join(pngs_dir, png)
current_f = ssim.compute_ssim(refer_end_pic, dest_png)
logging.info("%s 相似度:%f" % (png, current_f))
third_f = second_f
second_f = first_f
first_f = current_f
# TODO 这个范围根据实际的业务场景自己确定
if third_f > 0.96 and second_f > 0.96 and first_f < 0.96:
end_t = int(png.split('.png')[0])
break
# 有效性判断和时间计算
if start_t == 0 or end_t == 0:
logging.warning("没有找到开始或者结束图片")
elif end_t == len(pngs):
logging.warning("结束位置错误")
else:
boot_time = int((end_t - start_t) * 1000 / fps)
logging.info("开始位置:%d,结束位置:%d,本次启动耗时:%d毫秒", start_t, end_t, boot_time)
return boot_time | ef1d370ad024450956f400f9ee4747b159a6ad01 | 3,240 |
def _table_difference(left: TableExpr, right: TableExpr):
"""
Form the table set difference of two table expressions having identical
schemas. A set difference returns only the rows present in the left table
that are not present in the right table
Parameters
----------
left : TableExpr
right : TableExpr
Returns
-------
difference : TableExpr
"""
return ops.Difference(left, right).to_expr() | aae66ddb29d30a0bc95750d62ce87c16773d3d63 | 3,241 |
def select(receivers, senders, exceptions, timeout):
"""
receivers - list of one element, the simulated receiver socket
senders - list of one element, the simulated sender socket
exceptions - empty list, the simulated sockets with exceptions
ignore timeout - there is no real concurrency here
"""
# print 'select: recv buffers "%s", send buffers "%s", bufsize %d' % \
# (''.join(receivers[0].buffers), ''.join(senders[0].buffers), bufsize) #DEBUG
inputready = receivers if len(receivers[0].buffers) > 0 else []
outputready = senders if (socket_simulator.bufsize
- len(senders[0].buffers)) > 0 else []
exceptions = []
return inputready, outputready, exceptions | 4fd94593521c0e0626b574922286b2b374707fce | 3,242 |
def parse_args():
"""
It parses the command-line arguments.
Parameters
----------
args : list[str]
List of command-line arguments to parse
Returns
-------
parsed_args : argparse.Namespace
It contains the command-line arguments that are supplied by the user
"""
parser = ap.ArgumentParser(description="Encoding algorithm.")
parser.add_argument("docking_program", type=str,
help="Path to folder containing the PDB files.")
parser.add_argument("output", type=str,
help="Path to the output file.")
parser.add_argument("-c","--n_proc", type=int,
help='Number of processor.', default = 1)
parser.add_argument("--chain", type=str,
help='Chain ID from the ligand protein.', default = 'B')
parser.add_argument("--score", type=str,
help='Path to normalized scoring file to add in the ' +
'encoding.')
parsed_args = parser.parse_args()
return parsed_args | 7849b9a1422e959be9e5b2504dc7d42c2475572d | 3,243 |
def parse_row(row, entity_dict, span_capture_list, previous_entity):
""" updates the entity dict and span capture list based on row contents """
bio_tag, entity = parse_tag(row.tag)
if bio_tag == 'B':
# update with previous entity, if applicable
entity_dict, span_capture_list, previous_entity = update_entity_dict(entity_dict, span_capture_list, previous_entity)
# start collecting new entity
span_capture_list = [row.word]
previous_entity = entity
elif bio_tag == 'I':
# continue collecting entity
span_capture_list.append(row.word)
else:
# update with previous entity, if applicable
entity_dict, span_capture_list, previous_entity = update_entity_dict(entity_dict, span_capture_list, previous_entity)
previous_entity = None
return entity_dict, span_capture_list, previous_entity | a7d49b6e4dbe747c65688c01652f1d413314b407 | 3,244 |
def _is_fn_init(
tokens: list[Token] | Token,
errors_handler: ErrorsHandler,
path: str,
namehandler: NameHandler,
i: int = 0
):
""" "fn" <fn-name> "("<arg>*")" (":" <returned-type>)? <code-body>"""
tokens = extract_tokens_with_code_body(tokens, i)
if tokens is None or not is_kw(tokens[0], 'fn'):
return False
has_type_annotation = len(tokens) >= 4 and is_op(tokens[3], '->')
if len(tokens) < 4 or not is_base_name(tokens[1]) or tokens[2].type != TokenTypes.PARENTHESIS \
or not _is_code_body(tokens[-1]) or (
has_type_annotation and not _is_type_expression(tokens[:-1], errors_handler, path, namehandler, 4)
) or (not has_type_annotation and len(tokens) != 4):
errors_handler.final_push_segment(
path,
'SyntaxError: invalid syntax',
tokens[-1],
fill=True
)
return False
args_tokens = tokens[2].value
if args_tokens:
if args_tokens[0].type == TokenTypes.TUPLE:
has_default_argument = False
for arg_tokens in args_tokens[0].value:
if not arg_tokens:
break
if not _is_setvalue_expression(arg_tokens, errors_handler, path, namehandler, init_type='let'):
errors_handler.final_push_segment(
path,
'SyntaxError: invalid syntax',
arg_tokens[0],
fill=True
)
return False
if DummyToken(TokenTypes.OP, '=') in arg_tokens:
has_default_argument = True
elif has_default_argument:
errors_handler.final_push_segment(
path,
'SyntaxError: non-default argument follows default argument',
arg_tokens[0],
fill=True
)
return False
elif not _is_setvalue_expression(args_tokens, errors_handler, path, namehandler, init_type='let'):
return False
return True | 2e65dbe9e7976f7e13215fbf04bd40f08da7e16e | 3,245 |
import asyncio
from typing import cast
async def http_connect(address: str, port: int) -> HttpConnection:
"""Open connection to a remote host."""
loop = asyncio.get_event_loop()
_, connection = await loop.create_connection(HttpConnection, address, port)
return cast(HttpConnection, connection) | 2d98815b17f6d0e03763b643a052737f6931a33f | 3,246 |
def make_parallel_transformer_config() -> t5_architecture.EncoderDecoder:
"""Returns an EncoderDecoder with parallel=True."""
dtype = jnp.bfloat16
num_attn_heads = 8
make_dropout = lambda: nn.Dropout(rate=0.1, broadcast_dims=(-2,))
make_layer_norm = layer_norm.T5LayerNorm
def _make_encoder_layer(shared_relative_position_bias):
assert shared_relative_position_bias is None
return t5_architecture.EncoderLayer(
attention=make_attention1(num_attn_heads, dtype),
mlp=make_mlp1(dtype),
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
relative_position_bias_factory=(
lambda: _make_relative_position_bias(num_attn_heads, dtype)),
parallel=True,
)
def _make_decoder_layer(shared_relative_position_bias):
assert shared_relative_position_bias is None
return t5_architecture.DecoderLayer(
self_attention=make_attention1(num_attn_heads, dtype),
encoder_decoder_attention=make_attention1(num_attn_heads, dtype),
mlp=make_mlp1(dtype),
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
relative_position_bias_factory=(
lambda: _make_relative_position_bias(num_attn_heads, dtype)),
parallel=True,
)
def _make_encoder(shared_token_embedder):
assert shared_token_embedder is None
return t5_architecture.Encoder(
num_layers=3,
token_embedder_factory=lambda: make_token_emb1(2_000, dtype),
layer_factory=_make_encoder_layer,
input_dropout_factory=make_dropout,
output_dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
dtype=dtype,
)
def _make_decoder(shared_token_embedder):
assert shared_token_embedder is None
return t5_architecture.Decoder(
num_layers=2,
token_embedder_factory=lambda: make_token_emb1(2_000, dtype),
layer_factory=_make_decoder_layer,
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
output_logits_factory=None,
dtype=dtype,
)
return t5_architecture.EncoderDecoder(
shared_token_embedder_factory=lambda: None,
encoder_factory=_make_encoder,
decoder_factory=_make_decoder,
) | f61c02d66075fb71fbecd58e8c369a6ba406c15f | 3,247 |
def get_device_mapping(embedding_sizes, num_gpus, data_parallel_bottom_mlp,
experimental_columnwise_split, num_numerical_features):
"""Get device mappings for hybrid parallelism
Bottom MLP running on device 0. Embeddings will be distributed across among all the devices.
Optimal solution for partitioning set of N embedding tables into K devices to minimize maximal subset sum
is an NP-hard problem. Additionally, embedding tables distribution should be nearly uniform due to the performance
constraints. Therefore, suboptimal greedy approach with max bucket size is used.
Args:
embedding_sizes (Sequence[int]): embedding tables sizes
num_gpus (int): Default 8.
Returns:
device_mapping (dict):
"""
if num_numerical_features == 0:
bottom_mlp_ranks = []
elif data_parallel_bottom_mlp:
bottom_mlp_ranks = list(range(num_gpus))
else:
bottom_mlp_ranks = [0]
if experimental_columnwise_split:
gpu_buckets = num_gpus * [list(range(len(embedding_sizes)))]
vectors_per_gpu = [len(bucket) for bucket in gpu_buckets]
if num_numerical_features > 0:
vectors_per_gpu[0] += 1 # count bottom mlp
return MultiGpuMetadata(bottom_mlp_ranks=bottom_mlp_ranks,
rank_to_categorical_ids=gpu_buckets,
rank_to_feature_count=vectors_per_gpu)
if num_gpus > 4 and not data_parallel_bottom_mlp and num_numerical_features > 0:
# for higher no. of GPUs, make sure the one with bottom mlp has no embeddings
gpu_buckets = distribute_to_buckets(embedding_sizes, num_gpus - 1) # leave one device out for the bottom MLP
gpu_buckets.insert(0, [])
else:
gpu_buckets = distribute_to_buckets(embedding_sizes, num_gpus)
vectors_per_gpu = [len(bucket) for bucket in gpu_buckets]
if not data_parallel_bottom_mlp:
for rank in bottom_mlp_ranks:
vectors_per_gpu[rank] += 1 # count bottom mlp
return MultiGpuMetadata(bottom_mlp_ranks=bottom_mlp_ranks,
rank_to_categorical_ids=gpu_buckets,
rank_to_feature_count=vectors_per_gpu) | 2265831d87d8f48c4b87ca020c7f56293cb62647 | 3,248 |
def _generate_relative_positions_embeddings(length, depth,
max_relative_position, name):
"""Generates tensor of size [length, length, depth]."""
with tf.variable_scope(name):
relative_positions_matrix = _generate_relative_positions_matrix(
length, max_relative_position)
vocab_size = max_relative_position * 2 + 1
# Generates embedding for each relative position of dimension depth.
embeddings_table = tf.get_variable("embeddings", [vocab_size, depth])
embeddings = tf.gather(embeddings_table, relative_positions_matrix)
return embeddings | 7c69705cf5cc161144181b09a377f66d863b12ae | 3,249 |
def continTapDetector(
fs: int, x=[], y=[], z=[], side='right',
):
"""
Detect the moments of finger-raising and -lowering
during a fingertapping task.
Function detects the axis with most variation and then
first detects several large/small pos/neg peaks, then
the function determines sample-wise in which part of a
movement or tap the acc-timeseries is, and defines the
exact moments of finger-raising, finger-lowering, and
the in between stopping moments.
Input:
- x, y, z (arr): all three one-dimensional data-
arrays containing one acc-axis each. Exact
labeling x/y/z is not important. Should have equal
lengths. Typically timeseries from one run.
- fs (int): corresponding sample frequency
- side (string): side where acc-data origin from
Return:
- tapTimes (list of lists): each list contains 4 timestamps
(in seconds from array-start) indicating moments of:
[finger-raise start, finger raise end,
finger-lowering start, finger-lowering end]
- moveTimes, restTimes: idem but then for 'other
movements' and rest periods (of > 1 sec), each list
contains the first and last timestamp of move/rest
period.
"""
# input sanity checks
if x != [] and y != []:
assert len(x) == len(y), f'Arrays X and Y should'
' have equal lengths'
if x != [] and z != []:
assert len(x) == len(z), f'Arrays X and Z should'
' have equal lengths'
if z != [] and y != []:
assert len(y) == len(z), f'Arrays X and Z should'
' have equal lengths'
assert side in ['left', 'right'], f'Side should be '
'left or right'
ax_arrs = []
for ax in [x, y, z]:
if ax != []: ax_arrs.append(ax)
# Find axis with most variation
maxVar = np.argmax([variation(arr) for arr in ax_arrs])
# maxRMS = np.argmax([sum(arr) for arr in ax_arrays])
sig = ax_arrs[maxVar] # acc-signal to use
# check data for pos/neg and order of magn
sig = check_PosNeg_and_Order(sig, fs)
# add differential of signal
sigdf = np.diff(sig)
# timestamps from start (in sec)
timeStamps = np.arange(0, len(sig), 1 / fs)
# Thresholds for movement detection
posThr = np.mean(sig)
negThr = -np.mean(sig)
# Find peaks to help movement detection
peaksettings = {
'peak_dist': 0.1,
'cutoff_time': .25,
}
# find relevant positive peaks
posPeaks = find_peaks(
sig,
height=(posThr, np.max(sig)),
distance=fs * .05, # settings[task]['peak_dist']
)[0]
# select Pos-peaks with surrounding >> Pos and Neg Diff
endPeaks = [np.logical_or(
any(sigdf[i -3:i + 3] < np.percentile(sig, 10)),
any(sigdf[i -3:i + 3] > np.percentile(sig, 90))
) for i in posPeaks]
endPeaks = posPeaks[endPeaks]
# delete endPeaks from posPeaks
for i in endPeaks:
idel = np.where(posPeaks == i)
posPeaks = np.delete(posPeaks, idel)
# delete endPeaks which are too close after each other
# by starting with std False before np.diff, the diff-
# scores represent the distance to the previous peak
tooclose = endPeaks[np.append(
np.array(False), np.diff(endPeaks) < (fs / 6))]
for p in tooclose:
i = np.where(endPeaks == p)
endPeaks = np.delete(endPeaks, i)
posPeaks = np.append(posPeaks, p)
# double check endPeaks with np.diff
hop = 3
endP2 = []
for n in np.arange(hop, sig.shape[0]):
if np.logical_and(
any(np.diff(sig)[n - hop:n] > np.percentile(sig, 90)),
any(np.diff(sig)[n- hop:n] < np.percentile(sig, 10))
): # if diff is above extremes within hop-distance
endP2.append(n)
endP2 = list(compress(endP2, np.diff(endP2) > hop))
for p2 in endP2: # add to endPeaks if not containing
if min(abs(p2 - endPeaks)) > 5:
endPeaks = np.append(endPeaks, p2)
smallNeg = find_peaks(
-1 * sig, # convert pos/neg for negative peaks
height=(-.5e-7, abs(np.min(sig)) * .5),
distance=fs * peaksettings['peak_dist'] * .5,
prominence=abs(np.min(sig)) * .05,
# wlen=40,
)[0]
# largeNeg = find_peaks(
# -1 * sig,
# height=abs(np.min(sig)) * .4,
# # first value is min, second is max
# distance=fs * peaksettings['peak_dist'],
# # prominence=np.min(yEpoch) * .1,
# # wlen=40,
# )[0]
# Lists to store collected indices and timestamps
tapi = [] # list to store indices of tap
movei = [] # list to store indices of other move
resti = [] # list to store indices of rest
resttemp = [] # temp-list to collect rest-indices [1st, Last]
starttemp = [np.nan] * 6 # for during detection process
# [startUP, fastestUp, stopUP,
# startDown, fastestDown, stopDown]
tempi = starttemp.copy() # to start process
state = 'lowRest'
# Sample-wise movement detection
for n, y in enumerate(sig[:-1]):
if state == 'otherMov':
# PM LEAVE OUT OTHER-MOV-STATE
if n in endPeaks: # during other Move: end Tap
tempi[-1] = n # finish and store index list
if (tempi[-1] - tempi[0]) > fs * .1:
movei.append(tempi) # save if long enough
state='lowRest'
tempi = starttemp.copy() # after end: start lowRest
continue
try:
next10 = sum([negThr < Y < posThr for Y in sig[range(n, n + int(fs * .2)
)]])
if next10 > (fs * .2) * .8:
# End 'other move' if 8 / 10 next samples are inactive
tempi[-1] = n # END of OTHER MOVE
if (tempi[-1] - tempi[0]) > fs * .1:
movei.append(tempi)
tempi = starttemp.copy() # after end: start lowRest
state = 'lowRest'
except IndexError: # prevent indexerror out of range for next10
# print('end of timeseries')
continue
elif state == 'lowRest':
if np.logical_and(
y > posThr, # if value is over pos-threshold
sigdf[n] > np.percentile(sigdf, 75) # AND diff is over Thr
# any([Y in posPeaks for Y in range(n, n + int(fs * .2))]) # USED IN PAUSED
):
if resttemp: # close and store active rest period
resttemp.append(n) # Add second and last rest-ind
if (resttemp[1] - resttemp[0]) > fs: # if rest > 1 sec
resti.append(resttemp) # add finished rest-indices
resttemp = [] # reset resttemp list
state='upAcc1'
tempi[0] = n # START TIME Tap-UP
# print('save start UP', n)
# elif np.logical_or(
# np.logical_or(n in posPeaks, n in smallNeg[0]),
# ~ (negThr < y < posThr)
# ):
# if resttemp: # close and store active rest period
# resttemp.append(n) # Add second and last rest-ind
# if (resttemp[1] - resttemp[0]) > fs: # if rest > 1 sec
# resti.append(resttemp) # add finished rest-indices
# resttemp = [] # reset resttemp list
# state = 'otherMov'
# tempi.append(n) # START TIME Othermovement
elif n in endPeaks: # during lowRest, endPeak found
resttemp.append(n) # Add second and last rest-ind
if (resttemp[1] - resttemp[0]) > fs: # if rest > 1 sec
resti.append(resttemp) # add finished rest-indices
resttemp = [] # reset resttemp list
state='lowRest'
tempi = starttemp.copy() # after end: start lowRest
continue
else: # lowRest stays lowRest
if not resttemp: # if rest-temp list is empty
resttemp.append(n) # start of rest period
elif state == 'upAcc1':
if n in posPeaks:
state='upAcc2'
# acc getting less, veloc still increasing
# print('acc-peakUP detected', n)
elif n in endPeaks:
state = 'downDec2'
# emergency out if endPeak is found
elif state == 'upAcc2':
if y < 0: # crossing zero-line, start of decelleration
tempi[1] = n # save n as FASTEST MOMENT UP
state='upDec1'
# print('fastest point UP', n)
elif n in endPeaks:
state = 'downDec2'
# emergency out if endPeak is found
elif state=='upDec1':
if n in smallNeg:
state='upDec2'
elif n in endPeaks:
state = 'downDec2'
# emergency out if endPeak is found
elif state == 'upDec2':
if np.logical_or(y > 0, sigdf[n] < 0):
# if acc is pos, or goes into acceleration
# phase of down movement
state='highRest' # end of UP-decell
tempi[2]= n # END OF UP !!!
elif n in endPeaks:
state = 'downDec2'
# emergency out if endPeak is found
elif state == 'highRest':
if np.logical_and(
y < negThr,
sigdf[n] < 0 #np.percentile(sigdf, 25)
# from highRest: LOWERING starts when acc
# gets below negative-threshold AND when
# differential is negative
):
state='downAcc1'
tempi[3] = n # START OF LOWERING
# print('LOWERING START', n)
elif n in endPeaks:
state = 'downDec2'
# emergency out if endPeak is found
# elif state == 'downAcc1':
# if n in largeNeg[0]:
# state='downAcc2'
# elif n - tempi[2] > (fs * peaksettings[task]['cutoff_time']):
# # if down-move takes > defined cutoff time
# state = 'otherMov' # reset to start-state
# movei.append(tempi) # newly added
# tempi = [] # newly added
# elif state == 'downAcc2':
elif state == 'downAcc1':
if np.logical_and(
y > 0,
sigdf[n] > 0
):
# if acceleration gets positive again and keeps
# one increasing (sigdf) downwards acceleration
# is finished -> ADD FASTEST DOWNW MOMENT
state='downDec1'
tempi[4] = n
# print('fastest DOWN @', n)
elif n in endPeaks:
state = 'downDec2'
# emergency out if endPeak is found
# elif n - tempi[2] > (fs * peaksettings[task]['cutoff_time']):
# # if down-move takes > defined cutoff time
# state = 'otherMov' # reset to start-state
# movei.append(tempi) # newly added
# tempi = [] # newly added
elif state == 'downDec1':
if n in endPeaks:
state = 'downDec2'
elif state=='downDec2':
if np.logical_or(
y < 0,
sigdf[n] < 0
): # after large pos-peak, before around impact
# artefectual peaks
state='lowRest'
tempi[5] = n
# store current indices
tapi.append(tempi)
tempi = starttemp.copy() # restart w/ 6*nan
# drop first tap due to starting time
tapi = tapi[1:]
# convert detected indices-lists into timestamps
tapTimes = [] # list to store timeStamps of tap
# moveTimes = [] # alternative list for movements
# restTimes = [] # list to sore rest-timestamps
for tap in tapi: tapTimes.append(
[timeStamps[I] for I in tap if I is not np.nan]
)
# for tap in movei: moveTimes.append(timeStamps[tap])
# for tap in resti: restTimes.append(timeStamps[tap])
return tapi, tapTimes, endPeaks | 742a7f5590e8ad76e521efe5d1c293c43d71de0b | 3,250 |
def parallelize(df, func):
""" Split data into max core partitions and execute func in parallel.
https://www.machinelearningplus.com/python/parallel-processing-python/
Parameters
----------
df : pandas Dataframe
func : any functions
Returns
-------
data : pandas Dataframe
Returned dataframe of func.
"""
cores = cpu_count()
data_split = np.array_split(df, cores)
pool = Pool(cores)
data = pd.concat(pool.map(func, data_split), ignore_index=1)
pool.close()
pool.join()
return data | dc9c085ada6ffa26675bd9c4a218cc06807f9511 | 3,251 |
def get_functional_groups(alkoxy_mol):
"""
given a molecule object `alkoxy_mol`. This method returns
a dictionary of groups used in the Vereecken SAR with the
key being the group and the value being the number of occurances
it has.
"""
#print 'getting groups from {}'.format(alkoxy_mol.toSMILES())
alkoxy_mol.assignAtomIDs()
labeled_atoms = alkoxy_mol.getLabeledAtoms()
assert labeled_atoms['*1'].symbol == 'C'
assert labeled_atoms['*3'].symbol == 'C', alkoxy_mol.toAdjacencyList() + str(labeled_atoms)
alpha_groups = get_atom_groups(labeled_atoms['*1'])
beta_groups = get_atom_groups(labeled_atoms['*3'])
# find cyclic groups here (after project finished)
all_groups = {}
for label, num in alpha_groups.items():
all_groups['alpha{}'.format(label)] = num
for label, num in beta_groups.items():
all_groups['beta{}'.format(label)] = num
return all_groups | 9c0280bb09e6ef606aac2a14fe2826c0a9feb06d | 3,252 |
def rough(material, coverage, scale, det, e0=20.0, withPoisson=True, nTraj=defaultNumTraj, dose=defaultDose, sf=True, bf=True, xtraParams=defaultXtraParams):
"""rough(material, coverage, scale, det, [e0=20.0], [withPoisson=True], [nTraj=defaultNumTraj], [dose = 120.0], [sf=True], [bf=True], [xtraParams={}])
Monte Carlo simulate a spectrum from a rough surface with roughness modeled as square pillars of the specified scale and fractional coverage.
The features are also offset by a randomized x,y offset of size approximately scale to ensure that the beam doesn't always strike at the same sort of a position.
+ material - Composition of material
+ coverage of pillars on surface (0.0 to 1.0 -> 0% to 100%)
+ scale - height and width of pillars
+ depth - Depth of trough"""
tmp = u"MC simulation of a %0.2lg um %d%% coverage rough surface of %s at %0.1f keV%s%s" % (1.0e6 * scale, int(100.0 * coverage), material, e0, (" + CSF" if sf else ""), (" + BSF" if bf else ""))
return base(det, e0, withPoisson, nTraj, dose, sf, bf, tmp, buildRough, { "Scale" : scale, "Coverage" : coverage, "Size" : 1.0e-5, "Material" : material }, xtraParams) | 0aa6a21a2cdae22bf9f56cd6babfa9c3402ce465 | 3,253 |
def jsonify(comment_lower: str) -> str:
"""pyNastran: SPOINT={'id':10, 'xyz':[10.,10.,10.]}"""
sline = comment_lower.split('=')
rhs = sline[1].rstrip()
return rhs.replace("'", '"').replace('}', ',}').replace(',,}', ',}') | e8641d5e94cff32389f7ade3360935a2abbcf297 | 3,254 |
import time
def set_attributes_polling(test_case, device_proxy, device_server, poll_periods):
"""Set attribute polling and restore after test
Parameters
----------
test_case : unittest.TestCase instance
device_proxy : tango.DeviceProxy instance
device_server : tango.Device instance
The instance of the device class `device_proxy` is talking to
poll_periods : dict {"attribute_name" : poll_period}
`poll_poriod` in milliseconds as per Tango APIs, 0 or falsy to disable
polling.
Return value
------------
restore_polling : function
This function can be used to restore polling if it is to happen before the end of
the test. Should be idempotent if only one set_attributes_polling() is called per
test.
"""
# TODO (NM 2016-04-11) check if this is still needed after upgrade to Tango 9.x For
# some reason it only works if the device_proxy is used to set polling, but the
# device_server is used to clear the polling. If polling is cleared using device_proxy
# it seem to be impossible to restore the polling afterwards.
attributes = poll_periods.keys()
initial_polling = {
attr: device_proxy.get_attribute_poll_period(attr) for attr in attributes
}
retry_time = 0.5
for attr in attributes:
initial_period = initial_polling[attr]
new_period = poll_periods[attr]
# Disable polling for attributes with poll_period of zero / falsy
# zero initial_period implies no polling currently configed
if not new_period and initial_period != 0:
LOGGER.debug("not setting polling for {}".format(attr))
device_server.stop_poll_attribute(attr)
else:
# Set the polling
LOGGER.debug("setting polling for {}".format(attr))
try:
device_proxy.poll_attribute(attr, new_period)
# TODO See (NM 2016-04-11) comment below about back-to-back calls
time.sleep(0.05)
except Exception:
retry = True
LOGGER.warning(
"Setting polling of attribute {} in {} due to unhandled"
"exception in poll_attribute command".format(attr, retry_time),
exc_info=True,
)
else:
retry = False
if retry:
time.sleep(retry_time)
device_proxy.poll_attribute(attr, new_period)
def restore_polling():
"""Restore initial polling, for use during cleanup / teardown"""
for attr, period in initial_polling.items():
if period == 0:
continue # zero period implies no polling, nothing to do
try:
device_proxy.poll_attribute(attr, period)
# TODO (NM 2016-04-11) For some reason Tango doesn't seem to handle
# back-to-back calls, and even with the sleep it sometimes goes bad. Need
# to check if this is fixed (and core dumps) when we upgrade to Tango 9.x
time.sleep(0.05)
except Exception:
retry = True
LOGGER.warning(
"retrying restore of attribute {} in {} due to unhandled"
"exception in poll_attribute command".format(attr, retry_time),
exc_info=True,
)
else:
retry = False
if retry:
time.sleep(retry_time)
device_proxy.poll_attribute(attr, period)
test_case.addCleanup(restore_polling)
return restore_polling | 103d9dba615e0c99ac35766348f016139831c12c | 3,255 |
from catalyst.engines.torch import (
DataParallelEngine,
DeviceEngine,
DistributedDataParallelEngine,
)
from catalyst.engines.amp import (
AMPEngine,
DataParallelAMPEngine,
DistributedDataParallelAMPEngine,
)
from catalyst.engines.apex import (
APEXEngine,
DataParallelAPEXEngine,
DistributedDataParallelAPEXEngine,
)
def get_available_engine(
fp16: bool = False, ddp: bool = False, amp: bool = False, apex: bool = False
) -> "IEngine":
"""Returns available engine based on given arguments.
Args:
fp16 (bool): option to use fp16 for training. Default is `False`.
ddp (bool): option to use DDP for training. Default is `False`.
amp (bool): option to use APEX for training. Default is `False`.
apex (bool): option to use APEX for training. Default is `False`.
Returns:
IEngine which match requirements.
"""
if fp16 and not amp and not apex:
amp = SETTINGS.amp_required or (SETTINGS.amp_required and SETTINGS.apex_required)
apex = SETTINGS.apex_required and (not SETTINGS.amp_required)
if amp:
assert (
SETTINGS.amp_required
), "catalyst[amp] is not available, to install it, run `pip install catalyst[amp]`."
assert not apex, "Could not use both apex and amp engines"
if apex:
assert (
SETTINGS.apex_required
), "catalyst[apex] is not available, to install it, run `pip install catalyst[apex]`."
assert not amp, "Could not use both apex and amp engines"
is_multiple_gpus = NUM_CUDA_DEVICES > 1
if not IS_CUDA_AVAILABLE:
return DeviceEngine("cpu")
elif is_multiple_gpus:
if ddp:
if amp:
return DistributedDataParallelAMPEngine()
elif apex:
return DistributedDataParallelAPEXEngine()
else:
return DistributedDataParallelEngine()
else:
if amp:
return DataParallelAMPEngine()
elif apex:
return DataParallelAPEXEngine()
else:
return DataParallelEngine()
else:
if amp:
return AMPEngine()
elif apex:
return APEXEngine()
else:
return DeviceEngine("cuda") | 6d29e0c1938c5889b6e4a7fa972945065bc2cf3a | 3,256 |
import shutil
def disk_usage(pathname):
"""Return disk usage statistics for the given path"""
### Return tuple with the attributes total,used,free in bytes.
### usage(total=118013599744, used=63686647808, free=48352747520)
return shutil.disk_usage(pathname) | c7a36e2f3200e26a67c38d50f0a97dd015f7ccfa | 3,257 |
import os
import yaml
def get_default_log_config():
"""Get the default logging configuration.
Returns:
dict: The default logging configuration.
"""
root = os.path.dirname(__file__)
config_file = os.path.join(root, "logging.yaml")
with open(config_file, "r") as file_object:
data = yaml.load(file_object, yaml.FullLoader)
return data["logging"] | 7fc4479c7efb666b80ddd3e450b107ac73cf3c16 | 3,258 |
from typing import Tuple
def create_new_deployment(
runner: Runner, deployment_arg: str, expose: PortMapping,
add_custom_nameserver: bool
) -> Tuple[str, str]:
"""
Create a new Deployment, return its name and Kubernetes label.
"""
span = runner.span()
run_id = runner.session_id
runner.show(
"Starting network proxy to cluster using "
"new Deployment {}".format(deployment_arg)
)
def remove_existing_deployment(quiet=False):
if not quiet:
runner.show("Cleaning up Deployment {}".format(deployment_arg))
runner.check_call(
runner.kubectl(
"delete",
"--ignore-not-found",
"svc,deploy",
"--selector=telepresence=" + run_id,
)
)
runner.add_cleanup("Delete new deployment", remove_existing_deployment)
remove_existing_deployment(quiet=True)
command = [
"run", # This will result in using Deployment:
"--restart=Always",
"--limits=cpu=100m,memory=256Mi",
"--requests=cpu=25m,memory=64Mi",
deployment_arg,
"--image=" + get_image_name(expose),
"--labels=telepresence=" + run_id,
]
# Provide a stable argument ordering. Reverse it because that happens to
# make some current tests happy but in the long run that's totally
# arbitrary and doesn't need to be maintained. See issue 494.
for port in sorted(expose.remote(), reverse=True):
command.append("--port={}".format(port))
if expose.remote():
command.append("--expose")
# If we're on local VM we need to use different nameserver to prevent
# infinite loops caused by sshuttle:
if add_custom_nameserver:
command.append(
"--env=TELEPRESENCE_NAMESERVER=" + get_alternate_nameserver()
)
try:
runner.check_call(runner.kubectl(command))
except CalledProcessError as exc:
raise runner.fail(
"Failed to create deployment {}:\n{}".format(
deployment_arg, exc.stderr
)
)
span.end()
return deployment_arg, run_id | d15e9e1ec9d09669b8becd4e169049d5a1e836ab | 3,259 |
import logging
def score_latency(
references, reference_wavs, partial_translations, target_language="en-US"
):
"""Measures the "final" translation lag after all corrections have been made."""
logger = logging.getLogger("evaluation")
tokenizer = get_tokenizer(target_language)
min_len = min(len(partial_translations), len(references))
if len(partial_translations) != len(references):
logger.warning(
f"Found {len(references)} references, {len(partial_translations)} partial "
+ f"translations. Evaluating only the first {min_len}"
)
partial_translations = partial_translations[:min_len]
references = references[:min_len]
# Make case insensitive and tokenize
partial_translations_tokenized = [
[(t_time, tokenizer.tokenize(t.upper())) for t_time, t in transcript]
for transcript in partial_translations
]
references = [tokenizer.tokenize(r.upper()) for r in references]
# Compute total lag
output_words, total_lag = 0, 0
for reference, (_, reference_wav), partial_translation in zip(
references, reference_wavs, partial_translations_tokenized
):
if len(partial_translation) == 0:
continue
final_time, final_translation = partial_translation[-1]
reference_duration = get_duration_seconds(reference_wav)
for j in range(1, len(final_translation) + 1):
# Compare a time a word was finalized in the output
# to the time its corresponding word was uttered
finalization_time = get_finalization_time(
final_translation, j, partial_translation
)
original_token = int(j * len(reference) / len(final_translation))
original_time = get_token_time(
original_token, reference, reference_duration
)
total_lag += max(0, finalization_time - original_time)
output_words += 1
return total_lag / max(1, output_words) | 9d31e029247e44448103d99760019f0dffa1cf44 | 3,260 |
def shapelet_with_w_term(
coords, frequency, coeffs, beta, delta_lm, lm, dtype=np.complex128
):
"""
shapelet: outputs visibilities corresponding to that of a shapelet
Inputs:
coords: coordinates in (u,v) space with shape (nrow, 3)
frequency: frequency values with shape (nchan,)
coeffs: shapelet coefficients with shape, where
coeffs[3, 4] = coeffs_l[3] * coeffs_m[4] (nsrc, nmax1, nmax2)
beta: characteristic shapelet size with shape (nsrc, 2)
delta_l: pixel size in l dim
delta_m: pixel size in m dim
lm: source center coordinates of shape (nsource, 2)
Returns:
out_shapelets: Shapelet with shape (nrow, nchan, nsrc)
"""
nrow = coords.shape[0]
nsrc = coeffs.shape[0]
nchan = frequency.shape[0]
out_shapelets = np.empty((nrow, nchan, nsrc), dtype=np.complex128)
delta_l, delta_m = delta_lm
for row in range(nrow):
u, v, w = coords[row, :]
for chan in range(nchan):
fu = u * 2 * np.pi * frequency[chan] / lightspeed
fv = v * 2 * np.pi * frequency[chan] / lightspeed
for src in range(nsrc):
nmax1, nmax2 = coeffs[src, :, :].shape
beta_u, beta_v = beta[src, :]
l, m = lm[src, :]
if beta_u == 0 or beta_v == 0:
out_shapelets[row, chan, src] = 1
continue
tmp_shapelet = 0 + 0j
for n1 in range(nmax1):
for n2 in range(nmax2):
tmp_shapelet += (
0
if coeffs[src][n1, n2] == 0
else coeffs[src][n1, n2]
* basis_function(
n1, fu, beta_u, True, delta_x=delta_l
)
* basis_function(
n2, fv, beta_v, True, delta_x=delta_m
)
)
w_term = phase_steer_and_w_correct(
(u, v, w), (l, m), frequency[chan]
)
out_shapelets[row, chan, src] = tmp_shapelet * w_term
return out_shapelets | f6c9f9011306cc2de5054e015857b3b47c7e6cd9 | 3,261 |
import numpy
def CylindricalVectorsToCartesian(coordinates, data):
"""
Project the supplied cylindrical coordinates (r-phi-z) vectors to 3D Cartesian
(x-y-z). coordinates must be in Cartesian.
"""
if optimise.DebuggingEnabled():
assert(len(coordinates) == len(data))
for i, coord in enumerate(coordinates):
assert(len(coord) == 3)
assert(len(data[i]) == 3)
newData = numpy.empty((len(data), 3))
for i, coord in enumerate(coordinates):
datum = data[i]
rMag = L2Norm(coord[:2])
x = [coord[0] / rMag, -coord[1] / rMag]
y = [-x[1], x[0]]
newData[i, :] = [datum[0] * x[0] + datum[1] * x[1], datum[0] * y[0] + datum[1] * y[1], datum[2]]
return newData | abc3bd3eecd6f087dc932b882600e8779903f556 | 3,262 |
from typing import Counter
def _entropy_counter2(arr):
"""
calculate the base 2 entropy of the distribution given in `arr` using a
`Counter` and the `values` method (for python3)
"""
arr_len = len(arr)
if arr_len == 0:
return 0
log_arr_len = np.log2(len(arr))
return -sum(val * (np.log2(val) - log_arr_len)
for val in Counter(arr).values()) / arr_len | 1f72c7a7e5db56aa9a0e5c3811cf28c600420949 | 3,263 |
from sys import path
def update_deleted_strain(_, strain_to_del):
"""Update ``deleted-strain`` var.
This happens after a user clicks the OK btn in the confirm strain
deletion modal.
We also delete the files associated with the strain at this step.
:param _: User clicked the OK btn
:param strain_to_del: Strain corresponding to del btn user clicked
:type strain_to_del: str
"""
remove(path.join(USER_DATA_DIR, strain_to_del + ".gvf"))
rmtree(path.join(USER_SURVEILLANCE_REPORTS_DIR, strain_to_del))
return strain_to_del | 0086aa0c17910ed75f480e4a6ac0d2950d5d84dd | 3,264 |
def get_changes_between_models(model1, model2, excludes=None):
"""
Return a dict of differences between two model instances
"""
if excludes is None:
excludes = []
changes = {}
for field in model1._meta.fields:
if (isinstance(field, (fields.AutoField,
fields.related.RelatedField))
or field.name in excludes):
continue
if field.value_from_object(model1) != field.value_from_object(model2):
changes[field.verbose_name] = (field.value_from_object(model1),
field.value_from_object(model2))
return changes | 1f62afdc7818574553fa7a53eb05e766c2805edd | 3,265 |
def get_intersect(x1, y1, x2, y2):
"""
Returns the point of intersection of the lines or None if lines are parallel
Ex. p1=(x1,x2)... line_intersection((p1,p2), (p3,p4))
a1: [x, y] a point on the first line
a2: [x, y] another point on the first line
b1: [x, y] a point on the second line
b2: [x, y] another point on the second line
"""
s = np.vstack([x1, y1, x2, y2]) # s for stacked
h = np.hstack((s, np.ones((4, 1)))) # h for homogeneous
l1 = np.cross(h[0], h[1]) # get first line
l2 = np.cross(h[2], h[3]) # get second line
x, y, z = np.cross(l1, l2) # point of intersection
if z == 0: # lines are parallel
return None, None
return x / z, y / z | 8e9ed2f2351b41658400badc7339eedc9791db8a | 3,266 |
def removeDuplicateColumns(df):
"""
Removes columns that have a duplicate name.
:return pd.DataFrame:
"""
duplicates = getDuplicates(df.columns)
done = False
idx = 0
df_result = df.copy()
additions_dict = {}
while not done:
if idx >= len(df_result.columns):
done = True
break
column = df_result.columns[idx]
if column in duplicates:
df1 = df_result[column]
values = df1.iloc[:,1]
del df_result[column]
duplicates.remove(column)
additions_dict[column] = values
else:
idx += 1
df_add = pd.DataFrame(additions_dict)
df_result = pd.concat([df_result, df_add], axis=1, sort=True)
return df_result | dc46580d221b8e4279ba73e8d97eee079e65309c | 3,267 |
def conv_block(data, name, channels,
kernel_size=(3, 3), strides=(1, 1), padding=(1, 1),
epsilon=1e-5):
"""Helper function to construct conv-bn-relu"""
# convolution + bn + relu
conv = sym.conv2d(data=data, channels=channels,
kernel_size=kernel_size, strides=strides,
padding=padding, use_bias=False,
layout="NCHW", name=name + "_conv")
bn = sym.batch_norm(data=conv, epsilon=epsilon, name=name + "_bn")
act = sym.relu(data=bn, name=name + "_relu")
return act | 90464c208c12a6e9907f5a206ddd324fd92638ff | 3,268 |
import sys
def var2fa(stream, gzipped=False):
"""convert variant calling's .var file to fasta"""
for line in stream:
if gzipped: line = line.decode()
if line[0]!='V': continue
line = line.strip().split('\t')
_1, chrom, start, end, _2, _3, ref, alt, queryname, q_start, q_end, strand = line
if abs(len(ref)-len(alt))<50: continue # not long enough
if len(ref)>len(alt):
newname = 'DEL_'+'_'.join([queryname, chrom+strand, start+'-'+end, q_start+'-'+q_end])
seq = ref.upper()
else:
newname = 'INS_'+'_'.join([queryname, chrom+strand, start+'-'+end, q_start+'-'+q_end])
seq = alt.upper()
sys.stdout.write('>{0}\n{1}\n'.format(newname, seq))
return 0 | 48e5c9390dc954dcf602e63152815e5af843d270 | 3,269 |
import pickle
import torchvision
import torch
def utzappos_tensor_dset(img_size, observed, binarized, drop_infreq,
cache_fn, *dset_args, transform=None, **dset_kwargs):
"""
Convert folder dataset to tensor dataset.
"""
cache_fn = UTZapposIDImageFolder.get_cache_name(cache_fn, img_size, observed, binarized, drop_infreq)
try:
with open(cache_fn, 'rb') as f:
dset_samples, dset_labels, dset_label_info = pickle.load(f)
except FileNotFoundError:
img_transform = torchvision.transforms.Compose([torchvision.transforms.Resize((img_size, img_size)),
torchvision.transforms.ToTensor()])
dset = UTZapposIDImageFolder(*dset_args, img_size=img_size, transform=img_transform,
observed=observed, binarized=binarized, drop_infreq=drop_infreq,
**dset_kwargs)
dset_examples = [dset[ind] for ind in range(len(dset))]
dset_samples, dset_labels = map(torch.stack, zip(*dset_examples))
# find_duplicates_in_dsets((dset_samples, dset_labels), (dset_samples, dset_labels),
# tuple_format=True, itself=True)
dset_label_info = dset._label_info
with open(cache_fn, 'wb') as handle:
pickle.dump((dset_samples, dset_labels, dset_label_info), handle, protocol=4)
return CustomTensorDataset(dset_samples, dset_labels, transform=transform), dset_label_info, cache_fn | 8008f8d19453884106832746a4cefb55c9813c45 | 3,270 |
def compare_versions(aStr, bStr):
"""
Assumes Debian version format:
[epoch:]upstream_version[-debian_revision]
Returns:
-1 : a < b
0 : a == b
1 : a > b
"""
# Compare using the version class
return cmp(Version(aStr), Version(bStr)) | a17e333cc555b1b260cf826a5e4c29b0e291c479 | 3,271 |
import sys
def alpha_034(code, end_date=None, fq="pre"):
"""
公式:
MEAN(CLOSE,12)/CLOSE
Inputs:
code: 股票池
end_date: 查询日期
Outputs:
因子的值
"""
end_date = to_date_str(end_date)
func_name = sys._getframe().f_code.co_name
return JQDataClient.instance().get_alpha_191(**locals()) | 1f251db49bb5cb6a0326af5cf2d07c4bbef2144a | 3,272 |
import numbers
def unscale_parameter(value: numbers.Number,
petab_scale: str) -> numbers.Number:
"""Bring parameter from scale to linear scale.
:param value:
Value to scale
:param petab_scale:
Target scale of ``value``
:return:
``value`` on linear scale
"""
if petab_scale == LIN:
return value
if petab_scale == LOG10:
return np.power(10, value)
if petab_scale == LOG:
return np.exp(value)
raise ValueError(f"Unknown parameter scale {petab_scale}. "
f"Must be from {(LIN, LOG, LOG10)}") | f04156220e8a39c31473507a60fee3d5185bda0c | 3,273 |
def perturb(sentence, bertmodel, num):
"""Generate a list of similar sentences by BERT
Arguments:
sentence: Sentence which needs to be perturbed
bertModel: MLM model being used (BERT here)
num: Number of perturbations required for a word in a sentence
"""
# Tokenize the sentence
tokens = tokenizer.tokenize(sent)
pos_inf = nltk.tag.pos_tag(tokens)
# the elements in the lists are tuples <index of token, pos tag of token>
bert_masked_indexL = list()
# collect the token index for substitution
for idx, (word, tag) in enumerate(pos_inf):
if (tag.startswith("JJ") or tag.startswith("JJR") or tag.startswith("JJS")
or tag.startswith("PRP") or tag.startswith("PRP$") or tag.startswith("RB")
or tag.startswith("RBR") or tag.startswith("RBS") or tag.startswith("VB") or
tag.startswith("VBD") or tag.startswith("VBG") or tag.startswith("VBN") or
tag.startswith("VBP") or tag.startswith("VBZ") or tag.startswith("NN") or
tag.startswith("NNS") or tag.startswith("NNP") or tag.startswith("NNPS")):
tagFlag = tag[:2]
if (idx!=0 and idx!=len(tokens)-1):
bert_masked_indexL.append((idx, tagFlag))
bert_new_sentences = list()
# generate similar setences using Bert
if bert_masked_indexL:
bert_new_sentences = perturbBert(sent, bertmodel, num, bert_masked_indexL)
return bert_new_sentences | 598ed7e37185de6bf2a977c226bb58677684772d | 3,274 |
import logging
def discovery_dispatch(task: TaskRequest) -> TaskResponse:
"""Runs appropriate discovery function based on protocol
Args:
task (TaskRequest): namedtuple
Returns:
TaskResponse[str, dict[str, str|int|bool|list]]
"""
task = TaskRequest(*task)
proto = constant.Proto(task.proto)
logging.info(
"Dispatching: host=%s, hostname=%s, proto=%s",
task.host,
task.hostname,
proto,
)
discoverer = get_discovery(proto)
device = discoverer(
host=task.host,
hostname=task.hostname,
sysinfo=task.sysinfo,
extra=task.extra,
**task.kwargs,
)
logging.info("Dispatch received response from %s", task.host)
return TaskResponse(task.host, device.dump()) | 3fe6394cf81fdb3e25343df27479f4b4ab3033fa | 3,275 |
def get_free_times(busy_times, begin_date, end_date):
"""
Gets a list of free times calculated from a list of busy times.
:param busy_times: is the list of busy times in ascending order.
:param begin_date: is the start of the selected time interval.
:param end_date: is the end of the selected time interval.
:return: a list of free times.
"""
free_times = []
busy_times_original = busy_times
begin_date = arrow.get(begin_date).replace(hour=9)
end_date = arrow.get(end_date).replace(hour=17)
# print('free times')
if len(busy_times) == 0:
free_times.append((begin_date.isoformat(), end_date.isoformat()))
else:
begin_date_end = begin_date.replace(hour=17)
begin_day = begin_date.format('YYYYMMDD')
begin_time = '09:00'
end_time = '17:00'
end_date_start = arrow.get(end_date).replace(hour=9)
end_day = end_date.format('YYYYMMDD')
stored_event = busy_times[0]
busy_times = busy_times[1:]
if len(busy_times) == 0:
stored_event_start = arrow.get(stored_event['start']['dateTime'])
stored_event_end = arrow.get(stored_event['end']['dateTime'])
if (stored_event_start == begin_date and
stored_event_end < begin_date_end):
free_times.append((stored_event_end.isoformat(),
end_date.isoformat()))
elif (stored_event_end == end_date and
stored_event_start > end_date_start):
free_times.append((begin_date.isoformat(),
stored_event_start.isoformat()))
elif (stored_event_start > begin_date and
stored_event_end < end_date):
free_times.append((begin_date.isoformat(),
stored_event_start.isoformat()))
free_times.append((stored_event_end.isoformat(),
end_date.isoformat()))
for event in busy_times:
event_start = arrow.get(event['start']['dateTime'])
event_end = arrow.get(event['end']['dateTime'])
event_start_time = event_start.format('HH:mm')
event_end_time = event_end.format('HH:mm')
event_end_day = event_end.format('YYYYMMDD')
stored_event_start = arrow.get(stored_event['start']['dateTime'])
stored_event_start_time = stored_event_start.format('HH:mm')
stored_event_start_day = arrow.get(
stored_event['start']['dateTime']).format('YYYYMMDD')
stored_event_end = stored_event['end']['dateTime']
stored_event_end_time = arrow.get(stored_event_end).format('HH:mm')
event_start = event_start.isoformat()
# starting free time on begin day after start of day
if (stored_event_start_day == begin_day and
stored_event_start_time > begin_time):
free_times.append((begin_date.isoformat(),
stored_event_start.isoformat()))
# print('0 {} - {}'.format(begin_date.isoformat(),
# stored_event_start.isoformat()))
# middle free times
if (stored_event_end < event_start and
(stored_event_end, event_start) not in free_times):
if event_start_time == '09:00':
event_start = arrow.get(
event['start']['dateTime']).replace(
days=-1, hour=17).isoformat()
if stored_event_end_time == '17:00':
stored_event_end = arrow.get(
stored_event_end).replace(days=+1,
hour=START_TIME).isoformat()
free_times.append((stored_event_end, event_start))
# print('1 {} - {}'.format(stored_event_end,
# event_start))
# ending free time
if (event_end_day == end_day and
event_end_time != end_time):
free_times.append((event_end.isoformat(), end_date.isoformat()))
# print('2 {} - {}'.format(event_end.isoformat(),
# end_date.isoformat()))
# ending free time for final events that end before end_date
if (busy_times.index(event) == len(busy_times) - 1 and
event_end < end_date):
if event_end_time == '17:00':
event_end = event_end.replace(days=+1, hour=START_TIME)
free_times.append((event_end.isoformat(), end_date.isoformat()))
# print('3 {} - {}'.format(event_end.isoformat(),
# end_date.isoformat()))
# starting free time not on begin day
if (arrow.get(free_times[0][0]) != begin_date and
stored_event_start != begin_date and
begin_date != arrow.get(
busy_times_original[0]['start']['dateTime'])):
free_times.insert(0, (begin_date.isoformat(),
stored_event_start.isoformat()))
# print('4 {} - {}'.format(begin_date.isoformat(),
# stored_event_start.isoformat()))
stored_event = event
# print()
# print('free times')
# for time in free_times:
# print(time)
return free_times | 95f33c22e28e9ed7bc299ac966767a2292cf6d7b | 3,276 |
from datetime import datetime
import pytz
def upstream_has_data(valid):
"""Does data exist upstream to even attempt a download"""
utcnow = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
# NCEP should have at least 24 hours of data
return (utcnow - datetime.timedelta(hours=24)) < valid | e222ca16820f2e9030170877f8e2ae4faff8d5b7 | 3,277 |
def encode_array(x, base=2, **kwds):
"""Encode array of integer-symbols.
Parameters
----------
x : (N, k) array_like
Array of integer symbols.
base : int
Encoding base.
**kwds :
Keyword arguments passed to :py:func:`numpy.ravel`.
Returns
-------
int
Integer code of an array.
"""
seq = np.ravel(x, **kwds)
return encode_sequence(seq, base=base) | b16546350638967dd60812b98295ffc4c95abd4d | 3,278 |
import itertools
def str_for_model(model: Model, formatting: str = "plain", include_params: bool = True) -> str:
"""Make a human-readable string representation of Model, listing all random variables
and their distributions, optionally including parameter values."""
all_rv = itertools.chain(model.unobserved_RVs, model.observed_RVs, model.potentials)
rv_reprs = [rv.str_repr(formatting=formatting, include_params=include_params) for rv in all_rv]
rv_reprs = [rv_repr for rv_repr in rv_reprs if "TransformedDistribution()" not in rv_repr]
if not rv_reprs:
return ""
if "latex" in formatting:
rv_reprs = [
rv_repr.replace(r"\sim", r"&\sim &").strip("$")
for rv_repr in rv_reprs
if rv_repr is not None
]
return r"""$$
\begin{{array}}{{rcl}}
{}
\end{{array}}
$$""".format(
"\\\\".join(rv_reprs)
)
else:
# align vars on their ~
names = [s[: s.index("~") - 1] for s in rv_reprs]
distrs = [s[s.index("~") + 2 :] for s in rv_reprs]
maxlen = str(max(len(x) for x in names))
rv_reprs = [
("{name:>" + maxlen + "} ~ {distr}").format(name=n, distr=d)
for n, d in zip(names, distrs)
]
return "\n".join(rv_reprs) | 89711e4fd12572339a501698c39fc8b81deca8a3 | 3,279 |
from typing import Callable
from typing import Optional
from typing import Union
def get_device(
raw_data: dict, control_data: dict, request: Callable
) -> Optional[
Union[
HomeSeerDimmableDevice,
HomeSeerFanDevice,
HomeSeerLockableDevice,
HomeSeerStatusDevice,
HomeSeerSwitchableDevice,
HomeSeerCoverDevice,
HomeSeerSetPointDevice
]
]:
"""
Parses control_data to return an appropriate device object
based on the control pairs detected for the device.
On/Off = HomeSeerSwitchableDevice
On/Off/Dim = HomeSeerDimmableDevice
On/Off/Fan = HomeSeerFanDevice
Lock/Unlock = HomeSeerLockableDevice
other = HomeSeerStatusDevice
"""
item = next((x for x in control_data if x["ref"] == raw_data["ref"]), None)
supported_features = get_supported_features(item)
return build_device(raw_data, item, request, supported_features) | 616c16e749fef7dc45539a7eb8bdbc9f11d3edd1 | 3,280 |
def wait():
"""
Gets the New Block work unit to send to clients
"""
return _event.get() | d24047d92b3774c675369ee739ec697ab23f0fea | 3,281 |
from typing import Optional
def process(msd_id: str, counter: AtomicCounter) -> Optional[dict]:
"""
Processes the given MSD id and increments the counter. The
method will find and return the artist.
:param msd_id: the MSD id to process
:param counter: the counter to increment
:return: the dictionary containing the MSD id and the artist, raises an
exception if the file cannot be processed
"""
try:
with tables.open_file(msd_id_to_h5(msd_id, args.path_dataset_dir)) as h5:
artist = h5.root.metadata.songs.cols.artist_name[0].decode("utf-8")
return {"msd_id": msd_id, "artist": artist}
except Exception as e:
print(f"Exception during processing of {msd_id}: {e}")
finally:
counter.increment() | 6bd93bf72a7ecfa6ddb41557b0550a629b9612f4 | 3,282 |
def choose_run(D, var2align, run):
"""Get input for the alignment.
Do it by indicating a run to align to.
Args:
D (pd.DataFrame): DataFrame containing columns 'id', 'run', and ...
var2align (str): Name of the column to align.
run (whatever): The run to align to.
Returns:
tuple of pd.DataFrames: The data ready for alignment and the remainder.
"""
X = D[['id', 'run', var2align]] # subselect the data for alignment
X.columns = ['id', 'run', 'x']
ref = X.loc[X.run == run] # the reference peptides
other = X.loc[X.run != run] # all other peptides
# we can align peptides in other runs only to those found in chosen run.
alignable_idx = other.id.isin(set(other.id) & set(ref.id))
X = other.loc[alignable_idx,]
unalignable = other.loc[~alignable_idx,]
ref = ref[['id','x']].set_index('id')
ref.columns = ['y']
X = pd.concat([X.set_index('id'), ref], axis=1, join='inner')
return X, unalignable | 54fc84e61b3874219d473659c85bd369b367a05d | 3,283 |
import os
def keyring(homedir, monkeypatch, scope='module'):
"""Default keyring, using the test profile"""
monkeypatch.setattr(os.path, "expanduser", lambda d: homedir)
kr = S3Keyring(profile_name='test')
kr.configure(ask=False)
return kr | c3c5c1a77148338a4457a7a27db7e4bbfb837acf | 3,284 |
def stack(tensor_list, axis=0):
"""
This function is the same as torch.stack but handles both
numpy.ndarray and torch.Tensor
:param tensor_list:
:param axis:
:return:
"""
if isinstance(tensor_list[0], th.Tensor):
return th.stack(tensor_list, axis)
else:
return np.stack(tensor_list, axis) | 9d8e5d8fbd9f89acb40ada362d0ae8d4913df939 | 3,285 |
def alias(alias):
"""Select a single alias."""
return {'alias': alias} | 35364346da4d7b1f6de2d7ba6e0b5721b6bef1dd | 3,286 |
def model_creator(config):
"""Constructor function for the model(s) to be optimized.
You will also need to provide a custom training
function to specify the optimization procedure for multiple models.
Args:
config (dict): Configuration dictionary passed into ``PyTorchTrainer``.
Returns:
One or more torch.nn.Module objects.
"""
return nn.Linear(1, 1) | 81909a284bddd83a62c8c9adacfbe75cf46650bd | 3,287 |
import numbers
def ensure_r_vector(x):
"""Ensures that the input is rendered as a vector in R.
It is way more complicated to define an array in R than in Python because an array
in R cannot end with an comma.
Examples
--------
>>> ensure_r_vector("string")
"c('string')"
>>> ensure_r_vector(1)
'c(1)'
>>> ensure_r_vector(list("abcd"))
"c('a', 'b', 'c', 'd')"
>>> ensure_r_vector((1, 2))
'c(1, 2)'
"""
if isinstance(x, str):
out = f"c('{x}')"
elif isinstance(x, numbers.Number):
out = f"c({x})"
elif isinstance(x, (tuple, list)):
mapped = map(lambda l: str(l) if isinstance(l, numbers.Number) else f"'{l}'", x)
concatenated = ", ".join(mapped)
out = f"c({concatenated})"
else:
raise NotImplementedError(
f"'ensure_r_vector' is not defined for dtype {type(x)}"
)
return out | 14fdeb6bf73244c69d9a6ef89ba93b33aa4a66d8 | 3,288 |
from typing import Optional
def open_and_prepare_avatar(image_bytes: Optional[bytes]) -> Optional[Image.Image]:
"""Opens the image as bytes if they exist, otherwise opens the 404 error image. then circular crops and resizes it"""
if image_bytes is not None:
try:
with Image.open(BytesIO(image_bytes)) as im:
prepared_image = crop_circular_border_w_transparent_bg(im)
prepared_image = resize_image(prepared_image)
except UnidentifiedImageError as e:
log.error("Error loading Avatar", exc_info=e)
return None
else:
with Image.open("resources/404 Avatar Not Found.png") as im:
prepared_image = crop_circular_border_w_transparent_bg(im)
prepared_image = resize_image(prepared_image)
return prepared_image | f5b4543f64b15180deed3cb8e672a3e1b96956f7 | 3,289 |
def is_GammaH(x):
"""
Return True if x is a congruence subgroup of type GammaH.
EXAMPLES::
sage: from sage.modular.arithgroup.all import is_GammaH
sage: is_GammaH(GammaH(13, [2]))
True
sage: is_GammaH(Gamma0(6))
True
sage: is_GammaH(Gamma1(6))
True
sage: is_GammaH(sage.modular.arithgroup.congroup_generic.CongruenceSubgroup(5))
False
"""
return isinstance(x, GammaH_class) | 9cfba55901a45d4482b6926673bfb87fabc88030 | 3,290 |
def _run_with_interpreter_if_needed(fuzzer_path, args, max_time):
"""Execute the fuzzer script with an interpreter, or invoke it directly."""
interpreter = shell.get_interpreter(fuzzer_path)
if interpreter:
executable = interpreter
args.insert(0, fuzzer_path)
else:
executable = fuzzer_path
runner = new_process.UnicodeProcessRunner(executable)
return runner.run_and_wait(timeout=max_time, additional_args=args) | 3739db213571ed00c5e026f9a768ca610e0ac318 | 3,291 |
def remove_vol(im_in, index_vol_user, todo):
"""
Remove specific volumes from 4D data.
:param im_in: [str] input image.
:param index_vol: [int] list of indices corresponding to volumes to remove
:param todo: {keep, remove} what to do
:return: 4d volume
"""
# get data
data = im_in.data
nt = data.shape[3]
# define index list of volumes to keep/remove
if todo == 'remove':
index_vol = [i for i in range(0, nt) if i not in index_vol_user]
elif todo == 'keep':
index_vol = index_vol_user
else:
printv('ERROR: wrong assignment of variable "todo"', 1, 'error')
# define new 4d matrix with selected volumes
data_out = data[:, :, :, index_vol]
# save matrix inside new Image object
im_out = im_in.copy()
im_out.data = data_out
return im_out | 4e5ffe6ade64f4c9dbabf5e090662711bdf926f7 | 3,292 |
def cost_logistic(p, x, y):
"""
Sum of absolute deviations of obs and logistic function L/(1+exp(-k(x-x0)))
Parameters
----------
p : iterable of floats
parameters (`len(p)=3`)
`p[0]` L - Maximum of logistic function
`p[1]` k - Steepness of logistic function
`p[2]` x0 - Inflection point of logistic function
x : float or array_like of floats
independent variable
y : float or array_like of floats
dependent variable, observations
Returns
-------
float
sum of absolute deviations
"""
return np.sum(np.abs(y-logistic_p(x,p))) | 32b89ef7d33d49b7af63c8d11afffeb641b12de1 | 3,293 |
from datetime import datetime
def estimate_dt(time_array):
"""Automatically estimate timestep in a time_array
Args:
time_array ([list]): List or dataframe with time entries
Returns:
dt ([datetime.timedelta]): Timestep in dt.timedelta format
"""
if len(time_array) < 2:
# Assume arbitrary value
return datetime.timedelta(seconds=0)
dt = np.median(np.diff(time_array))
if not isinstance(dt, datetime.timedelta):
dt = datetime.timedelta(seconds=dt.astype(float)/1e9)
# Check if data is all ascending
if dt <= datetime.timedelta(0):
raise UserWarning('Please only insert time ascending data.')
return dt | 6e6b8dcd4d2d85b4bfb97137294774bb4bcc2673 | 3,294 |
import uu
def gen_uuid() -> str:
"""
获取uuid
:return: uuid
"""
return uu.uuid4().hex | 82fd4fa7a3e39cc0c91ab16be3cf0c6a3f63eb3d | 3,295 |
import inspect
def make_signature(arg_names, member=False):
"""Make Signature object from argument name iterable or str."""
kind = inspect.Parameter.POSITIONAL_OR_KEYWORD
if isinstance(arg_names, str):
arg_names = map(str.strip, arg_name_list.split(','))
if member and arg_names and arg_names[0] != 'self':
arg_names = ['self'] + arg_names
return inspect.Signature([inspect.Parameter(n, kind) for n in arg_names]) | 2730e50ea68e6fe2942c629caa3b3119aea9a325 | 3,296 |
def set_trace_platform(*args):
"""
set_trace_platform(platform)
Set platform name of current trace.
@param platform (C++: const char *)
"""
return _ida_dbg.set_trace_platform(*args) | 9f581018960cdd0949ca41750286eddf1fa43741 | 3,297 |
def leapfrog2(init, tspan, a, beta, omega, h):
"""
Integrate the damped oscillator with damping factor a using single step
Leapfrog for separable Hamiltonians.
"""
f = forcing(beta, omega)
return sym.leapfrog(init, tspan, h, lambda x, p, t: -x-a*p+f(t)) | a8eebe1ee7f50c87e515c2c5cca0bdc30605dc8f | 3,298 |
def get_paths(config, action, dir_name):
"""
Returns 'from' and 'to' paths.
@param config: wrapsync configuration
@param action: 'push'/'pull'
@param dir_name: name of the directory to append to paths from the config
@return: dictionary containing 'from' and 'to' paths
"""
path_from = ''
path_to = ''
if action == 'push':
if dir_name == 'all':
path_from = build_local_path(config, False)
path_to = build_remote_path(config, True)
else:
path_from = f"{build_local_path(config, False)}/{dir_name}"
path_to = build_remote_path(config, False)
else:
if dir_name == 'all':
path_from = build_remote_path(config, False)
path_to = build_local_path(config, True)
else:
path_from = f"{build_remote_path(config, False)}/{dir_name}"
path_to = build_local_path(config, False)
return {
'from': path_from,
'to': path_to
} | f03ee64a76bafcf832f8dddcdcb4f16c28529c5c | 3,299 |
Subsets and Splits