content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import numpy as np
from collections import OrderedDict
import vigra
import os
import ilastik_main
from ilastik.applets.dataSelection import DatasetInfo
from ilastik.workflows.pixelClassification import PixelClassificationWorkflow
def classify_pixel(input_data, classifier, threads=8, ram=4000):
"""
Runs a pre-trained ilastik classifier on a volume of data
Adapted from Stuart Berg's example here:
https://github.com/ilastik/ilastik/blob/master/examples/example_python_client.py
Arguments:
input_data: data to be classified - 3D numpy array
classifier: ilastik trained/classified file
threads: number of thread to use for classifying input data
ram: RAM to use in MB
Returns:
pixel_out: The raw trained classifier
"""
# Before we start ilastik, prepare these environment variable settings.
os.environ["LAZYFLOW_THREADS"] = str(threads)
os.environ["LAZYFLOW_TOTAL_RAM_MB"] = str(ram)
# Set the command-line arguments directly into argparse.Namespace object
# Provide your project file, and don't forget to specify headless.
args = ilastik_main.parser.parse_args([])
args.headless = True
args.project = classifier
# Instantiate the 'shell', (an instance of ilastik.shell.HeadlessShell)
# This also loads the project file into shell.projectManager
shell = ilastik_main.main(args)
assert isinstance(shell.workflow, PixelClassificationWorkflow)
# Obtain the training operator
opPixelClassification = shell.workflow.pcApplet.topLevelOperator
# Sanity checks
assert len(opPixelClassification.InputImages) > 0
assert opPixelClassification.Classifier.ready()
# For this example, we'll use random input data to "batch process"
print("input_data.shape", input_data.shape)
# In this example, we're using 2D data (extra dimension for channel).
# Tagging the data ensures that ilastik interprets the axes correctly.
input_data = vigra.taggedView(input_data, 'xyz')
# In case you're curious about which label class is which,
# let's read the label names from the project file.
label_names = opPixelClassification.LabelNames.value
label_colors = opPixelClassification.LabelColors.value
probability_colors = opPixelClassification.PmapColors.value
print("label_names, label_colors, probability_colors", label_names, label_colors, probability_colors)
# Construct an OrderedDict of role-names -> DatasetInfos
# (See PixelClassificationWorkflow.ROLE_NAMES)
role_data_dict = OrderedDict([("Raw Data",
[DatasetInfo(preloaded_array=input_data)])])
# Run the export via the BatchProcessingApplet
# Note: If you don't provide export_to_array, then the results will
# be exported to disk according to project's DataExport settings.
# In that case, run_export() returns None.
predictions = shell.workflow.batchProcessingApplet.\
run_export(role_data_dict, export_to_array=True)
predictions = np.squeeze(predictions)
print("predictions.dtype, predictions.shape", predictions.dtype, predictions.shape)
print("DONE.")
return predictions
|
7a9aaa137e64d3bc7db49404eeaec7bb8c83d9d1
| 20,300 |
def invite_accepted_candidates():
"""Invites accepted candidates to create an account and set their own password."""
form = InviteAcceptedCandidatesForm()
if form.validate_on_submit():
selected = [ Candidate.query.filter_by(id=c).first() for c in form.selected_candidates.data.split(',') ]
user_role = Role.query.filter_by(name='User').first()
# for each selected candidate create a new user account
for candidate in selected:
user = User.query.filter_by(email=candidate.email).first()
if user is None:
user = User(
role=user_role,
first_name=candidate.first_name,
last_name=candidate.last_name,
email=candidate.email,
candidate=candidate)
db.session.add(user)
db.session.commit()
token = user.generate_confirmation_token()
invite_link = url_for(
'account.join_from_invite',
user_id=user.id,
token=token,
_external=True)
get_queue().enqueue(
send_email,
recipient=user.email,
subject='You Are Invited To Join',
template='account/email/invite',
user=user,
invite_link=invite_link)
str = ''
for c in selected:
str += c.first_name + ' ' + c.last_name + ', '
str = str[:-2]
flash('Candidates {} successfully invited'.format(str),
'form-success')
return render_template('admin/invite_accepted_candidates.html', form=form, all_terms=Term.query.order_by(Term.end_date.desc()).all(), accepted_candidates=Candidate.query.filter_by(status=Status.ASSIGNED).all())
|
7e4165f774f4594afa52cc97420c8b212175c6c9
| 20,301 |
def ramp_overlap(x0, y0, w0, h0, angle0, x1, y1, w1, h1, angle1):
"""Calculates the overlap area between two ramps."""
# Check if bounding spheres do not intersect.
dw0 = _rect_diagonal(w0, h0)
dw1 = _rect_diagonal(w1, h1)
if not bounding_circles_overlap(x0, y0, dw0, x1, y1, dw1):
return 0.
# Check if bounging boxes do not intersect.
x0_begin, x0_end, y0_begin, y0_end = rect_bounding_frame(
x0, y0, w0, h0, angle0)
x1_begin, x1_end, y1_begin, y1_end = rect_bounding_frame(
x1, y1, w1, h1, angle1)
if not bounding_box_overlap(x0_begin, x0_end, y0_begin, y0_end,
x1_begin, x1_end, y1_begin, y1_end):
return 0.
# Otherwise, calculate proper intersection.
rect_1 = _build_shapely_ramp(x0, y0, w0, h0, angle0)
rect_2 = _build_shapely_ramp(x1, y1, w1, h1, angle1)
return rect_1.intersection(rect_2).area
|
78b261bacc4d90d40e2fc80e9f665d80de6b7574
| 20,302 |
def url_external(path, query):
"""Generate external URLs with HTTPS (if configured)."""
try:
api_url = request.url_root
if settings.URL_SCHEME is not None:
parsed = urlparse(api_url)
parsed = parsed._replace(scheme=settings.URL_SCHEME)
api_url = parsed.geturl()
if query is not None:
path = path + query_string(query)
return urljoin(api_url, path)
except RuntimeError:
return None
|
27e73834e47a08b4e22fe89587974da386826627
| 20,303 |
def get_bucket_metadata(bucket, user_settings=None, access=ServiceAccount.STORAGE):
"""
Retrieves metadata about the given bucket.
:param str bucket: name of the Google cloud storage bucket
:param dict user_settings: optional, A dictionary of settings specifying credentials for appropriate services.
If one is not provided, then this method must be invoked by an EngineThread
which defines the settings
:param access: must be 'storage'. Other values are for internal use only
:return: Returns the response obtained from the API by uploading the object
"""
if access == ServiceAccount.STORAGE:
service = CredentialManager.get_client_storage_service(user_settings)
elif access == ServiceAccount.EARTH_ENGINE:
service = CredentialManager.get_earth_engine(user_settings)
else:
service = CredentialManager.get_server_storage_service(user_settings)
# Make a request to buckets.get to retrieve a list of objects in the
# specified bucket.
req = service.buckets().get(bucket=bucket)
return req.execute(num_retries=3)
|
9a587cf4ebba94499d21b7b734586dfd9228aa23
| 20,304 |
import re
def remove_conjunction(conjunction: str, utterance: str) -> str:
"""Remove the specified conjunction from the utterance.
For example, remove the " and" left behind from extracting "1 hour" and "30 minutes"
from "for 1 hour and 30 minutes". Leaving it behind can confuse other intent
parsing logic.
Args:
conjunction: translated conjunction (like the word "and") to be
removed from utterance
utterance: Full request, e.g. "set a 30 second timer"
Returns:
The same utterance with any dashes replaced by spaces.
"""
pattern = r"\s\s{}".format(conjunction)
remaining_utterance = re.sub(pattern, "", utterance, flags=re.IGNORECASE)
return remaining_utterance
|
67313565c7da2eadc4411854d6ee6cb467ee7159
| 20,305 |
def othertitles(hit):
"""Split a hit.Hit_def that contains multiple titles up, splitting out the hit ids from the titles."""
id_titles = hit.Hit_def.text.split('>')
titles = []
for t in id_titles[1:]:
fullid, title = t.split(' ', 1)
hitid, id = fullid.split('|', 2)[1:3]
titles.append(dict(id = id,
hitid = hitid,
fullid = fullid,
title = title))
return titles
|
fa5bbb47d26adbc61817e78e950e81cc05eca4a6
| 20,306 |
import subprocess
def unlock_device(password=None, device=None) -> bool:
"""
Unlocks a device given a device name and the password
:param password:
:param device:
:return: True is sucess, False if error
"""
command_input = ["adb", "-s", device, "shell", "input", "text", password]
command_submit = ["adb", "-s", device, "shell", "input", "keyevent", 66]
if device is None and len(get_connected_devices()) != 1:
print("No device was specified and/or multiple devices are connected")
return False
if device is None:
command_input.pop(1)
command_input.pop(1)
command_submit.pop(1)
command_submit.pop(1)
p = subprocess.Popen(command_input, stdout=None)
p.wait()
p.terminate()
p1 = subprocess.Popen(command_submit, stdout=None)
p1.wait()
p1.terminate()
return True
|
cd591ccaeb49045845dbeb5152fdbb7d4846c63c
| 20,307 |
from ..preprocessing.windowers import _check_windowing_arguments
def create_from_mne_epochs(list_of_epochs, window_size_samples,
window_stride_samples, drop_last_window):
"""Create WindowsDatasets from mne.Epochs
Parameters
----------
list_of_epochs: array-like
list of mne.Epochs
window_size_samples: int
window size
window_stride_samples: int
stride between windows
drop_last_window: bool
whether or not have a last overlapping window, when
windows do not equally divide the continuous signal
Returns
-------
windows_datasets: BaseConcatDataset
X and y transformed to a dataset format that is compativle with skorch
and braindecode
"""
# Prevent circular import
_check_windowing_arguments(0, 0, window_size_samples,
window_stride_samples)
list_of_windows_ds = []
for epochs in list_of_epochs:
event_descriptions = epochs.events[:, 2]
original_trial_starts = epochs.events[:, 0]
stop = len(epochs.times) - window_size_samples
# already includes last incomplete window start
starts = np.arange(0, stop + 1, window_stride_samples)
if not drop_last_window and starts[-1] < stop:
# if last window does not end at trial stop, make it stop there
starts = np.append(starts, stop)
fake_events = [[start, window_size_samples, -1] for start in
starts]
for trial_i, trial in enumerate(epochs):
metadata = pd.DataFrame({
'i_window_in_trial': np.arange(len(fake_events)),
'i_start_in_trial': starts + original_trial_starts[trial_i],
'i_stop_in_trial': starts + original_trial_starts[
trial_i] + window_size_samples,
'target': len(fake_events) * [event_descriptions[trial_i]]
})
# window size - 1, since tmax is inclusive
mne_epochs = mne.Epochs(
mne.io.RawArray(trial, epochs.info), fake_events,
baseline=None,
tmin=0,
tmax=(window_size_samples - 1) / epochs.info["sfreq"],
metadata=metadata)
mne_epochs.drop_bad(reject=None, flat=None)
windows_ds = WindowsDataset(mne_epochs)
list_of_windows_ds.append(windows_ds)
return BaseConcatDataset(list_of_windows_ds)
|
42c7671f7b353db165a41403b760362c0ca15d74
| 20,308 |
def definstance(name, ty, expr):
"""
Arguments:
- `name`: a string
- `ty`: a type of the form ClassName(t1,...,tn)
"""
root, _ = root_app(root_clause(ty))
if root.info.is_class:
class_name = root.name
c = defexpr(name, expr, type=ty, unfold=[class_name])
conf.current_ctxt().class_instances[name] = c.type
conf.current_ctxt().hyps[name] = c.type
return c
else:
raise Exception("Error in definition of {0!s}:"\
"expected {1!s} to be a class name"\
.format(name, root))
|
1b692a9ac49bc6a68568ee232e6e516f83b64adf
| 20,309 |
import traceback
def do(*args, **kwargs):
"""
Function to perform steps defined under ``nornir:actions`` configuration
section at:
* Minion's configuration
* Minion's grains
* Minion's pillar data
* Master configuration (requires ``pillar_opts`` to be set to True in Minion
config file in order to work)
* File on master file system
To retrieve actions content Salt ``nr.do`` uses ``config.get`` execution module
function with ``merge`` key set to ``True``.
Each step definition requires these keywords to be defined:
* ``function`` - mandatory, name of any execution module function to run
* ``args`` - optional, any arguments to use with function
* ``kwargs`` - optional, any keyword arguments to use with function
* ``description`` - optional, used by ``dir`` to list action description
Any other keywords defined inside the step are ignored.
:param stop_on_error: (bool) if True (default) stops execution on error in step,
continue execution in error if False
:param filepath: (str) path to file with actions steps
:param default_renderer: (str) shebang string to render file using ``slsutil.renderer`,
default ``jinja|yaml``
:param describe: (bool) if True, returns action content without executing it, default is False
:param kwargs: (any) additional ``kwargs`` to use with actions steps, ``kwargs`` override
``kwargs`` dictionary defined within each step, for example, in command
``salt nrp1 nr.do configure_ntp FB="*core*"``, ``FB`` argument will override ``FB`` arguments
defined within steps.
:param tf: (bool) if True, ``ToFileProcessor`` saves each step results in file
named after step name if no ``tf`` argument provided within step, default is False
:param diff: (bool) if True, ``DiffProcessor`` runs diff for each step result using files
named after step name if no ``diff`` argument provided within step, default is False
:returns: dictionary with keys: ``failed`` bool, ``result`` list; ``result`` key contains
a list of results for steps; If ``stop_on_error`` set to ``True`` and error happens, ``failed``
key set to ``True``
Special action names ``dir`` and ``dir_list`` used to list all actions available for
proxy minion where ``dir`` returns table and ``dir_list`` produces a list of actions.
.. note:: if ``filepath`` argument provided, actions defined in other places are ignored; file
loaded using Saltstack ``slsutil.renderer`` execution module function, as a result
file can contain any of Saltstack supported renderers content and can be located
at any url supported by ``cp.get_url`` execution module function. File content must
render to a dictionary keyed by actions names.
Sample actions steps definition using proxy minion pillar::
nornir:
actions:
awr:
function: nr.cli
args: ["wr"]
kwargs: {"FO": {"platform": "arista_eos"}}
description: "Save Arista devices configuration"
configure_ntp:
- function: nr.cfg
args: ["ntp server 1.1.1.1"]
kwargs: {"FB": "*"}
- function: nr.cfg
args: ["ntp server 1.1.1.2"]
kwargs: {"FB": "*"}
- function: nr.cli
args: ["show run | inc ntp"]
kwargs: {"FB": "*"}
Sample actions steps definition using text file under ``filepath``::
awr:
function: nr.cli
args: ["wr"]
kwargs: {"FO": {"platform": "arista_eos"}}
description: "Save Arista devices configuration"
configure_ntp:
- function: nr.cfg
args: ["ntp server 1.1.1.1"]
kwargs: {"FB": "*"}
description: "1. Configure NTP server 1.1.1.1"
- function: nr.cfg
args: ["ntp server 1.1.1.2"]
kwargs: {"FB": "*"}
description: "2. Configure NTP server 1.1.1.2"
- function: nr.cli
args: ["show run | inc ntp"]
kwargs: {"FB": "*"}
description: "3. Cllect ntp configuration"
Action name ``awr`` has single step defined, while ``configure_ntp`` action has multiple
steps defined, each executed in order.
Multiple actions names can be supplied to ``nr.do`` call.
.. warning:: having column ``:`` as part of action name not premitted, as ``:`` used by
Salt ``config.get`` execution module function to split arguments on path items.
Sample usage::
salt nrp1 nr.do dir
salt nrp1 nr.do dir_list
salt nrp1 nr.do awr
salt nrp1 nr.do configure_ntp awr stop_on_error=False
salt nrp1 nr.do configure_ntp FB="*core*" add_details=True
salt nrp1 nr.do awr filepath="salt://actions/actions_file.txt"
Sample Python API usage from Salt-Master::
import salt.client
client = salt.client.LocalClient()
task_result = client.cmd(
tgt="nrp1",
fun="nr.do",
arg=["configure_ntp", "awr"],
kwarg={"FB": "R[12]"},
)
"""
ret = {"failed": False, "result": []}
kwargs = {k: v for k, v in kwargs.items() if not k.startswith("__")}
stop_on_error = kwargs.pop("stop_on_error", True)
filepath = kwargs.pop("filepath", None)
default_renderer = kwargs.pop("default_renderer", "jinja|yaml")
describe = kwargs.pop("describe", False)
tf = kwargs.pop("tf", False)
diff = kwargs.pop("diff", False)
# load file if filepath provided
if filepath:
file_content_dict = __salt__["slsutil.renderer"](
path=filepath,
default_renderer=default_renderer,
)
if not file_content_dict:
ret["failed"] = True
ret["result"].append({filepath: "Failed loading filepath content."})
return ret
# check if need to list all actions
if "dir" in args or "dir_list" in args:
actions_config = (
__salt__["config.get"](key="nornir:actions", merge="recurse")
if not filepath
else file_content_dict
)
# iterate over actions and form brief list of them
for action_name, data in actions_config.items():
ret["result"].append(
{
"action name": action_name,
"description": data.get("description", "")
if isinstance(data, dict)
else "\n".join([i.get("description", "") for i in data]).strip(),
}
)
if "dir" in args:
ret["result"] = TabulateFormatter(
ret["result"],
tabulate={"tablefmt": "grid"},
headers=["action name", "description"],
)
return ret
# run actions
for action_name in args:
try:
if filepath:
action_config = file_content_dict.get(action_name)
else:
action_config = __salt__["config.get"](
key="nornir:actions:{}".format(action_name), merge="recurse"
)
if not action_config:
raise CommandExecutionError(
"'{}' action not loaded, content: '{}'".format(
action_name, action_config
)
)
elif describe:
ret["result"].append({action_name: action_config})
continue
elif isinstance(action_config, dict):
action_config = [action_config]
# run steps
for step in action_config:
# form step kwargs
merged_kwargs = step.get("kwargs", {})
merged_kwargs.update(kwargs)
# add tf ToFileProcessor name if tf_each is True
if tf is True:
merged_kwargs.setdefault("tf", action_name)
# add diff for DiffProcessor
if diff:
merged_kwargs.setdefault("diff", action_name)
# get fun name
fun_name = step["function"].split(".")[1].strip()
# run step
log.debug(
"salt_nornir:nr.do running step {}, args {}, kwargs {}".format(
fun_name, step.get("args", []), merged_kwargs
)
)
result = globals()[fun_name](*step.get("args", []), **merged_kwargs)
ret["result"].append({action_name: result})
except:
tb = traceback.format_exc()
log.error(
"nr.do error while running '{}' action:\n{}".format(action_name, tb)
)
ret["result"].append({action_name: tb})
if stop_on_error:
ret["failed"] = True
break
return ret
|
69ebb7939c1b8450935dd2e1fcdc14de40cebc0d
| 20,310 |
from typing import List
def read_plaintext_inputs(path: str) -> List[str]:
"""Read input texts from a plain text file where each line corresponds to one input"""
with open(path, 'r', encoding='utf8') as fh:
inputs = fh.read().splitlines()
print(f"Done loading {len(inputs)} inputs from file '{path}'")
return inputs
|
27b00f4dfcdf4d76e04f08b6e74c062f2f7374d0
| 20,311 |
def extract_tool_and_dsname_from_name(row):
"""
Extract Basecall (MB1.6K) into Basecall and MB1.6K, and 1600 three fields
:param instr:
:return:
"""
try:
toolname, dsname = row['name'].strip().split(' ')
dsname = dsname[1:-1]
except: # No tag process
toolname = row['name'].strip()
dsname = 'None'
if not dsname.startswith('MB'): # not tagged with MB
dsname = 'None'
reads = 0
else:
reads = float(dsname[2:-1])
duration = parse_string_time_to_seconds(row['duration'])
realtime = parse_string_time_to_seconds(row['realtime'])
cpu = float(row['%cpu'][:-1])
peak_rss = parse_mem_str_to_gbsize(row['peak_rss'])
peak_vmem = parse_mem_str_to_gbsize(row['peak_vmem'])
rchar = parse_mem_str_to_gbsize(row['rchar'])
wchar = parse_mem_str_to_gbsize(row['wchar'])
return toolname, dsname, reads, duration, realtime, cpu, peak_rss, peak_vmem, rchar, wchar
|
f02b106da47544c522499af1ee8670870749fb20
| 20,312 |
def pyramid_pooling(inputs, layout='cna', filters=None, kernel_size=1, pool_op='mean', pyramid=(0, 1, 2, 3, 6),
flatten=False, name='psp', **kwargs):
""" Pyramid Pooling module. """
shape = inputs.get_shape().as_list()
data_format = kwargs.get('data_format', 'channels_last')
static_shape = np.array(shape[1: -1] if data_format == 'channels_last' else shape[2:])
dynamic_shape = tf.shape(inputs)[1: -1] if data_format == 'channels_last' else tf.shape(inputs)[2:]
axis = -1 if data_format == 'channels_last' else 1
num_channels = shape[axis]
if filters is None:
filters = num_channels // len(pyramid)
with tf.variable_scope(name):
layers = []
for level in pyramid:
if level == 0:
x = inputs
else:
# Pooling
if None not in static_shape:
x = _static_pyramid_pooling(inputs, static_shape, level, pool_op, name='pool-%d' % level)
upsample_shape = static_shape
else:
x = _dynamic_pyramid_pooling(inputs, level, pool_op, num_channels, data_format)
upsample_shape = dynamic_shape
# Conv block to set number of feature maps
x = ConvBlock(layout=layout, filters=filters, kernel_size=kernel_size,
name='conv-%d' % level, **kwargs)(x)
# Output either vector with fixed size or tensor with fixed spatial dimensions
if flatten:
x = tf.reshape(x, shape=(-1, level*level*filters),
name='reshape-%d' % level)
concat_axis = -1
else:
x = Upsample(layout='b', shape=upsample_shape, name='upsample-%d' % level, **kwargs)(x)
concat_axis = axis
layers.append(x)
x = tf.concat(layers, axis=concat_axis, name='concat')
return x
|
1484a686791cd017a53d182994e19b333ffc00b3
| 20,313 |
def df(r, gamma):
"""
divergence-free function
"""
eta = soft_threshold(r, gamma)
return eta - np.mean(eta != 0) * r
|
bf4d0a5d8bcbb5fa80b66d1dd555f15a44117319
| 20,314 |
def clip_3d_liang_barsky(zmin, zmax, p0, p1):
"""Clips the three-dimensional line segment in the canonial view volume by
the algorithm of Liang and Barsky. Adapted from James D. Foley, ed.,
__Computer Graphics: Principles and Practice__ (Reading, Mass. [u.a.]:
Addison-Wesley, 1998), 274 as well as
http://www.eecs.berkeley.edu/Pubs/TechRpts/1992/CSD-92-688.pdf.
Parameters
----------
zmin, zmax : float
p0, p1 : array (size 3) of float
the endpoints to be clipped (in-place operation)
Returns
-------
is_visible : bool
"""
x0, y0, z0 = p0
x1, y1, z1 = p1
# test for a trivial reject
if (x0 > z0 and x1 > z1) or (y0 > z0 and y1 > z1) or \
(x0 < -z0 and x1 < -z1) or (y0 < -z0 and y1 < -z1) or \
(z0 < zmin) and (z1 < zmin) or (z0 > zmax and z1 > zmax):
return False
tmin_tmax = np.array((0.0, 1.0))
dx = x1 - x0
dz = z1 - z0
if clip_t(-dx - dz, x0 + z0, tmin_tmax): # right side
if clip_t(dx - dz, -x0 + z0, tmin_tmax): # left side
# if we got this far, part of the line is in -z <= x <= z
dy = y1 - y0
if clip_t(dy - dz, -y0 + z0, tmin_tmax): # bottom
if clip_t(-dy - dz, y0 + z0, tmin_tmax): # top
# line is in -z <= x <= z, -z <= y <= z
if clip_t(-dz, z0 - zmin, tmin_tmax): # front
if clip_t(dz, zmax - z0, tmin_tmax): # back
# part of the line is visible in -z <= x <= z,
# -z <= y <= z, -1 <= z <= zmin
tmin, tmax = tmin_tmax
if tmax < 1:
p1[0] = x0 + tmax * dx
p1[1] = y0 + tmax * dy
p1[2] = z0 + tmax * dz
if tmin > 0:
p0[0] += tmin * dx
p0[1] += tmin * dy
p0[2] += tmin * dz
return True
return False
|
2ffcb60a4b2b13344f2255f5d5d1816199c666a4
| 20,315 |
def RunLatencyTest(sending_vm, receiving_vm, use_internal_ip=True):
"""Run the psping latency test.
Uses a TCP request-response time to measure latency.
Args:
sending_vm: the vm to send the tcp request.
receiving_vm: the vm acting as the server.
use_internal_ip: whether or not to use the private IP or the public IP.
Returns:
list of samples representing latency between the two VMs.
"""
server_ip = (receiving_vm.internal_ip if use_internal_ip
else receiving_vm.ip_address)
client_command = (
'cd {psping_exec_dir}; '
'sleep 2;' # sleep to make sure the server starts first.
'.\\psping.exe /accepteula -l {packet_size} -i 0 -q '
'-n {rr_count} -h {bucket_count} {ip}:{port}'
' > {out_file}').format(
psping_exec_dir=sending_vm.temp_dir,
packet_size=FLAGS.psping_packet_size,
rr_count=FLAGS.psping_rr_count,
bucket_count=FLAGS.psping_bucket_count,
ip=server_ip,
port=TEST_PORT,
out_file=PSPING_OUTPUT_FILE)
# PSPing does not have a configurable timeout. To get around this, start the
# server as a background job, then kill it after 10 seconds
server_command = (
'{psping_exec_dir}\\psping.exe /accepteula -s 0.0.0.0:{port};').format(
psping_exec_dir=sending_vm.temp_dir,
port=TEST_PORT)
process_args = [(_RunPsping, (receiving_vm, server_command), {}),
(_RunPsping, (sending_vm, client_command), {})]
background_tasks.RunParallelProcesses(process_args, 200, 1)
cat_command = 'cd {psping_exec_dir}; cat {out_file}'.format(
psping_exec_dir=sending_vm.temp_dir,
out_file=PSPING_OUTPUT_FILE)
output, _ = sending_vm.RemoteCommand(cat_command)
return ParsePspingResults(output, sending_vm, receiving_vm, use_internal_ip)
|
5f235f0adefe988be564f8e6dce7edfd4f292be4
| 20,316 |
def get_descriptor_list(stackdriver):
"""Return a list of all the stackdriver custom metric descriptors."""
type_map = stackdriver.descriptor_manager.fetch_all_custom_descriptors(
stackdriver.project)
descriptor_list = type_map.values()
descriptor_list.sort(compare_descriptor_types)
return descriptor_list
|
9b4b16f3d3b0330a786db0310f889f8ac132cb32
| 20,317 |
def dankerize(string: str, upper_case_ratio=0.2) -> str:
"""
Transform a string to lower case, and randomly set some characters
to upper case and return the result.
string: the string to dankerize
upper_case_ratio: the upper_case/letter ratio
"""
ret = ""
for i in range(len(string)):
if uniform(0, 1.0) <= upper_case_ratio:
ret += string[i].upper()
else:
ret += string[i].lower()
return ret
|
55f186104166b0804cadae2df5fa19deaf36473b
| 20,318 |
def distance_constraints_too_complex(wordConstraints):
"""
Decide if the constraints on the distances between pairs
of search terms are too complex, i. e. if there is no single word
that all pairs include. If the constraints are too complex
and the "distance requirements are strict" flag is set,
the query will find some invalid results, so further (slow)
post-filtering is needed.
"""
if wordConstraints is None or len(wordConstraints) <= 0:
return False
commonTerms = None
for wordPair in wordConstraints:
if commonTerms is None:
commonTerms = set(wordPair)
else:
commonTerms &= set(wordPair)
if len(commonTerms) <= 0:
return True
return False
|
43429fd64dbf5fa118e2cbf1e381686e1a8518c9
| 20,319 |
def greedy_search(model,
decoding_function,
initial_ids,
initial_memories,
int_dtype,
float_dtype,
max_prediction_length,
batch_size,
eos_id,
do_sample,
time_major):
""" Greedily decodes the target sequence conditioned on the output of the encoder and the current output prefix. """
# Declare time-dimension
time_dim = int(not time_major) # i.e. 0 if time_major, 1 if batch_major
# Define the 'body for the tf.while_loop() call
def _decoding_step(current_time_step, all_finished, next_ids, decoded_ids, decoded_score, memories):
""" Defines a single step of greedy decoding. """
# Propagate through decoder
step_logits, memories = decoding_function(next_ids, current_time_step, memories)
step_logits = model.sampling_utils.adjust_logits(step_logits)
# Calculate log probabilities for token prediction at current time-step
step_scores = tf.nn.log_softmax(step_logits)
# Determine next token to be generated, next_ids has shape [batch_size]
if do_sample:
next_ids = tf.squeeze(tf.multinomial(step_scores, num_samples=1, output_dtype=int_dtype), axis=1)
else:
# Greedy decoding
next_ids = tf.argmax(step_scores, -1, output_type=int_dtype)
# Collect scores associated with the selected tokens
score_coordinates = tf.stack([tf.range(batch_size, dtype=int_dtype), next_ids], axis=1)
decoded_score += tf.gather_nd(step_scores, score_coordinates)
# Concatenate newly decoded token ID with the previously decoded ones
decoded_ids = tf.concat([decoded_ids, tf.expand_dims(next_ids, 1)], 1)
# Extend next_id's dimensions to be compatible with input dimensionality for the subsequent step
next_ids = tf.expand_dims(next_ids, time_dim)
# Check if generation has concluded with <EOS>
# all_finished |= tf.equal(tf.squeeze(next_ids, axis=time_dim), eos_id)
all_finished |= tf.equal(tf.reduce_prod(decoded_ids - eos_id, axis=time_dim), eos_id)
return current_time_step + 1, all_finished, next_ids, decoded_ids, decoded_score, memories
# Define the termination condition for the tf.while_loop() call
def _continue_decoding(_current_time_step, _all_finished, *_):
""" Returns 'False' if all of the sequences in the generated sequence batch exceeded the maximum specified
length or terminated with <EOS>, upon which the while loop is exited. """
continuation_check = \
tf.logical_and(tf.less(_current_time_step, max_prediction_length),
tf.logical_not(tf.reduce_all(_all_finished)))
return continuation_check
# Initialize decoding-relevant variables and containers
current_time_step = tf.constant(1)
all_finished = tf.fill([batch_size], False) # None of the sequences is marked as finished
next_ids = initial_ids
decoded_ids = tf.zeros([batch_size, 0], dtype=int_dtype) # Sequence buffer is empty
decoded_score = tf.zeros([batch_size], dtype=float_dtype)
memories = initial_memories
# Execute the auto-regressive decoding step via while loop
_, _, _, decoded_ids, log_scores, memories = \
tf.while_loop(cond=_continue_decoding,
body=_decoding_step,
loop_vars=[current_time_step, all_finished, next_ids, decoded_ids, decoded_score, memories],
shape_invariants=[tf.TensorShape([]),
tf.TensorShape([None]),
tf.TensorShape([None, None]),
tf.TensorShape([None, None]),
tf.TensorShape([None]),
get_memory_invariants(memories)],
parallel_iterations=10,
swap_memory=False,
back_prop=False)
# Should return logits also, for training
return decoded_ids, log_scores
|
3324a45ce13181ea55c8588e497864526272475d
| 20,320 |
import time
def database_mostcited(response: Response,
request: Request=Query(None, title=opasConfig.TITLE_REQUEST, description=opasConfig.DESCRIPTION_REQUEST),
morethan: int=Query(15, title=opasConfig.TITLE_CITED_MORETHAN, description=opasConfig.DESCRIPTION_CITED_MORETHAN),
period: str=Query('5', title="Period (5, 10, 20, or all)", description=opasConfig.DESCRIPTION_MOST_CITED_PERIOD),
pubperiod: int=Query(None, title=opasConfig.TITLE_PUBLICATION_PERIOD, description=opasConfig.DESCRIPTION_PUBLICATION_PERIOD),
author: str=Query(None, title=opasConfig.TITLE_AUTHOR, description=opasConfig.DESCRIPTION_AUTHOR),
title: str=Query(None, title=opasConfig.TITLE_TITLE, description=opasConfig.DESCRIPTION_TITLE),
sourcename: str=Query(None, title=opasConfig.TITLE_SOURCENAME, description=opasConfig.DESCRIPTION_SOURCENAME),
sourcecode: str=Query(None, title=opasConfig.TITLE_SOURCECODE, description=opasConfig.DESCRIPTION_SOURCECODE),
sourcetype: str=Query(None, title=opasConfig.TITLE_SOURCETYPE, description=opasConfig.DESCRIPTION_PARAM_SOURCETYPE),
abstract:bool=Query(False, title="Return an abstract with each match", description="True to return an abstract"),
stat:bool=Query(False, title="Return minimal information", description="True to return minimal information for statistical tables"),
limit: int=Query(10, title=opasConfig.TITLE_LIMIT, description=opasConfig.DESCRIPTION_LIMIT),
offset: int=Query(0, title=opasConfig.TITLE_OFFSET, description=opasConfig.DESCRIPTION_OFFSET)
):
"""
## Function
<b>Return a list of documents for a SourceCode source (and optional year specified in query params).</b>
If you don't request abstracts returned, document permissions will not be checked or returned.
This is intended to speed up retrieval, especially for returning large numbers of
articles (e.g., for downloads.)
Note: The GVPi implementation does not appear to support the limit and offset parameter
## Return Type
models.DocumentList
## Status
This endpoint is working.
## Sample Call
/v1/Database/MostCited/
## Notes
## Potential Errors
"""
time.sleep(.25)
ocd, session_info = opasAPISupportLib.get_session_info(request, response)
# session_id = session_info.session_id
#print ("in most cited")
# return documentList
ret_val, ret_status = opasAPISupportLib.database_get_most_cited( period=period,
more_than=morethan,
publication_period=pubperiod,
author=author,
title=title,
source_name=sourcename,
source_code=sourcecode,
source_type=sourcetype, # see VALS_SOURCE_TYPE (norm_val applied in opasCenralDBLib)
abstract_requested=abstract,
req_url=request.url,
limit=limit,
offset=offset,
session_info=session_info
)
if isinstance(ret_val, models.ErrorReturn):
raise HTTPException(
status_code=ret_val.httpcode,
detail = ret_val.error + " - " + ret_val.error_description
)
else:
status_message = opasCentralDBLib.API_STATUS_SUCCESS
status_code = 200
# Don't record in final build - (ok for now during testing)
ocd, session_info = opasAPISupportLib.get_session_info(request, response)
ocd.record_session_endpoint(api_endpoint_id=opasCentralDBLib.API_DATABASE_MOSTCITED,
session_info=session_info,
params=request.url._url,
return_status_code = status_code,
status_message=status_message
)
#print ("out mostcited")
return ret_val
|
b18d3a3674cb5a52b7d3ea05db985774d7d25a4c
| 20,321 |
def format_event_leef(event):
"""Format an event as QRadar / LEEF"""
syslog_header = f'<13>1 {event["actionTime"]} {hostname}'
leef_header = f'LEEF:2.0|TrinityCyber|PTI|1|{event.pop("id")}|xa6|'
fields = dict()
fields["devTime"] = event.pop("actionTime")
fields[
"devTimeFormat"
] = "yyyy-MM-dd'T'HH:mm:ss.SSSXXX" # (e.g. 2022-04-25T00:01:19.109+00:00)
# LEEF-standard fields
if "source" in event:
fields["src"] = event.pop("source")
if "destination" in event:
fields["dst"] = event.pop("destination")
if "sourcePort" in event:
fields["srcPort"] = event.pop("sourcePort")
if "destinationPort" in event:
fields["dstPort"] = event.pop("destinationPort")
if "transportProtocol" in event:
fields["proto"] = event.pop("transportProtocol")
# Formula-related metadata
formula_metadata = event.pop("formula")
fields["tcFormulaId"] = formula_metadata["formulaId"]
fields["tcFormulaTitle"] = formula_metadata["title"]
for key, value in formula_metadata["tags"].items():
key = "tcFormula" + key.title().replace(" ", "")
fields[key] = value
# Application / protocol related data
for app_fields in event.pop("applicationData"):
for key, value in app_fields.items():
if value is None:
continue
if isinstance(value, str):
# Escape delimiter
value = value.replace("\xa6", "\\\xa6")
fields[key] = value
# Add any extra information from the query
fields.update(event)
fields_formatted = "\xa6".join([f"{key}={value}" for key, value in fields.items()])
return f"{syslog_header} {leef_header}{fields_formatted}"
|
ca463c9e86d6b7880e992aa11cd4b6ae7592dab4
| 20,322 |
def _file_path(ctx, val):
"""Return the path of the given file object.
Args:
ctx: The context.
val: The file object.
"""
return val.path
|
7c930f2511a0950e29ffc327e85cf9b2b3077c02
| 20,323 |
def Mapping_Third(Writelines, ThirdClassDict):
"""
:param Writelines: 将要写入的apk的method
:param ThirdClassDict: 每一个APK对应的第三方的字典
:return: UpDateWritelines
"""
UpDateWriteLines = []
for l in Writelines:
if l.strip() in list(ThirdClassDict.keys()):
UpDateWriteLines.extend(ThirdClassDict[l.strip()])
else:
UpDateWriteLines.extend([l])
return UpDateWriteLines
|
eb94db36d06104007cbbacf8884cf6d45fee46b5
| 20,324 |
def rotate_points_around_origin(points, origin, angle):
"""
Rotate a 2D array of points counterclockwise by a given angle around a given origin.
The angle should be given in degrees.
"""
angle = angle * np.pi / 180
ox, oy = origin.tolist()
new_points = np.copy(points)
new_points[:, 0] = ox + np.cos(angle) * (
points[:, 0] - ox) - np.sin(angle) * (points[:, 1] - oy)
new_points[:, 1] = oy + np.sin(angle) * (
points[:, 0] - ox) + np.cos(angle) * (points[:, 1] - oy)
return new_points
|
0e21c2a9d6c870202935f8dbd9e725d9586670c3
| 20,325 |
def get_names_to_aliases(inp) -> dict:
"""
Returns pair,
- out[0] = dictionary of names to sets of aliases
- out[1] = erros when calling names_to_links, i.e., when file-reading
@param inp: string vault directory or names_to_links dictionary
if string then get_names_to_links method is used
"""
if type(inp) is str:
inp = get_names_to_links(inp)
# now inp must be names_to_links_pair
out = dict()
for filename, dict_links_to_aliases in inp[0].items():
for link_filename, set_of_aliases in dict_links_to_aliases.items():
try:
out[link_filename].update(set_of_aliases)
except KeyError:
out[link_filename] = set(set_of_aliases)
return [out,inp[1]]
|
9648099dc8422abceb5c095191e90f3dad14c4fb
| 20,326 |
def length(vec):
"""
Length of a given vector. If vec is an scalar, its length is 1.
Parameters
----------
vec: scalar or arr
Input vector
Returns
-------
length: int
Length of vec. If vec is an scalar, its length is 1.
"""
if np.ndim(vec)==0:
length=1
else:
length=len(vec)
return length
|
d8baea0b5f5e0bdc30b9e5a5d76b06cd876c87ba
| 20,327 |
def build_eval_infeeds(params):
"""Create the TPU infeed ops."""
eval_size = get_eval_size(params)
num_eval_steps = eval_size // params.eval_batch_size
dev_assign = params.device_assignment
host_to_tpus = {}
for replica_id in range(params.num_replicas):
host_device = dev_assign.host_device(replica=replica_id, logical_core=0)
tpu_ordinal = dev_assign.tpu_ordinal(replica=replica_id, logical_core=0)
if host_device not in host_to_tpus:
host_to_tpus[host_device] = [tpu_ordinal]
else:
assert tpu_ordinal not in host_to_tpus[host_device]
host_to_tpus[host_device].append(tpu_ordinal)
infeed_ops = []
infeed_graphs = []
num_inputs = len(host_to_tpus)
for i, (host, tpus) in enumerate(host_to_tpus.items()):
infeed_graph = tf.Graph()
infeed_graphs.append(infeed_graph)
with infeed_graph.as_default():
def enqueue_fn(host_device=host, input_index=i, device_ordinals=tpus):
"""Docs."""
worker_infeed_ops = []
with tf.device(host_device):
dataset = build_eval_dataset(
params,
batch_size=params.eval_batch_size // num_inputs,
num_workers=num_inputs,
worker_index=input_index)
inputs = tf.data.make_one_shot_iterator(dataset).get_next()
if params.use_xla_sharding and params.num_cores_per_replica > 1:
inputs, partition_dims = pad_inputs_for_xla_sharding(params, inputs)
num_splits = len(device_ordinals)
if len(device_ordinals) > 1:
inputs = [tf.split(v, num_splits, 0) for v in inputs]
else:
inputs = [[v] for v in inputs]
q = tpu_feed._PartitionedInfeedQueue(
number_of_tuple_elements=len(inputs),
host_id=int(host_device.split('/task:')[-1].split('/')[0]),
input_partition_dims=partition_dims,
device_assignment=dev_assign)
inputs = [[v[i] for v in inputs] for i in range(num_splits)]
worker_infeed_ops.extend(q.generate_enqueue_ops(inputs))
else:
num_splits = len(device_ordinals)
if len(device_ordinals) > 1:
inputs = [tf.split(v, num_splits, 0) for v in inputs]
else:
inputs = [[v] for v in inputs]
input_shapes = [v[0].shape for v in inputs]
for j, device_ordinal in enumerate(device_ordinals):
worker_infeed_ops.append(tf.raw_ops.InfeedEnqueueTuple(
inputs=[v[j] for v in inputs],
shapes=input_shapes,
device_ordinal=device_ordinal))
return worker_infeed_ops
def _body(i):
with tf.control_dependencies(enqueue_fn()):
return i+1
infeed_op = tf.while_loop(
lambda step: tf.less(step, tf.cast(num_eval_steps, step.dtype)),
_body, [0], parallel_iterations=1, name='eval_infeed').op
infeed_ops.append(infeed_op)
return infeed_ops, infeed_graphs, eval_size
|
e62586dd8fe6358eaed9a0a2eaf43fd607c0323b
| 20,328 |
import json
def get_featured_parks(request):
""" Returns recommended parks as JSON
"""
featured_parks = Park.objects.filter(featured=True).prefetch_related('images')
response = {
'featured_parks': [{'id': n.pk, 'name': n.name, 'image': n.thumbnail} for n in featured_parks]
}
return HttpResponse(json.dumps(response), mimetype='application/json')
|
a3b45fa5b467434bf375a46f420538e5d5d78688
| 20,329 |
def plot_accuracy(raw_all_grids_df, option=None):
"""
Input: raw condition df
facets: None, 'subjects',
Output: figure(s) that visualize the difference in accuracy btw. el and pl
"""
# Rearrange columns for better readability in temporal order of the experiment
condition_list = raw_all_grids_df['condition'].value_counts().index.tolist()
condition_list_sorted = ['GRID', 'SMALLGRID_BEFORE', 'SMALLGRID_AFTER']
if not set(condition_list).issubset(set(condition_list_sorted)) :
raise ValueError('are you sure everything is okay with the condition reordering?')
# Create a categorical type
condition_cat = CategoricalDtype(categories=condition_list_sorted, ordered=True)
# Cast the existing categories into the new category. Due to a bug in pandas we need to do this via a string.
raw_all_grids_df['condition'] = raw_all_grids_df['condition'].astype(str).astype(condition_cat)
# Get the x and y position of the 13 elements shown in the small Grid condition
element_pairs = raw_all_grids_df.query('condition=="SMALLGRID_BEFORE"').loc[:,['posx', 'posy']]
only_13_elements = pd.merge(raw_all_grids_df, element_pairs, on=['posx', 'posy'], how='inner')
# specify aggregators for different levels
# element level - - block level - - (subject level)
# mean median (mean)
agg_level=[winmean, winmean]
# aggregate data of only_13_elements
mean_over_elements = only_13_elements.groupby(['condition', 'block','subject','et'], as_index=False).agg(agg_level[0])
winmean_over_elements_winmean_over_blocks = mean_over_elements.groupby(['condition', 'subject','et'], as_index=False).agg(agg_level[1])
if option is None:
# compare accuracy values btw eyetrackers. Taking the mean over the subjects
(ggplot(winmean_over_elements_winmean_over_blocks, aes(x='et', y='accuracy',color='condition')) +
# TODO or points or violins??
geom_boxplot(data=winmean_over_elements_winmean_over_blocks, position=position_dodge(width=0.9)) +
ggtitle('Comparing accuracy of conditions')).draw()
elif option == 'facet_subjects':
# plot mean accuracy over all blocks for each subject
(ggplot(winmean_over_elements_winmean_over_blocks, aes(x='et', y='accuracy',color='condition')) +
geom_point(alpha=0.1,data=winmean_over_elements_winmean_over_blocks, position=position_dodge(width=0.5)) +
geom_point(position=position_dodge(width=0.5)) +
geom_line(aes(group='condition'),alpha=0.6, position=position_dodge(width=0.5)) +
facet_grid('.~subject') +
ggtitle('Comparing accuracy of conditions')).draw()
elif option == 'show_variance_for_blocks':
# plot mean accuracy over all blocks for each subject and show range by plotting the mean accuracy for each block
(ggplot(winmean_over_elements_winmean_over_blocks, aes(x='et', y='accuracy',color='condition')) +
# get the mean for each block
geom_point(alpha=0.1,data=raw_all_grids_df.groupby(['et', 'subject','condition','block']).mean().reset_index(level=['et','subject','condition','block']),position=position_dodge(width=0.5)) +
geom_point(position=position_dodge(width=0.5))+
geom_line(aes(group='condition'),position=position_dodge(width=0.5)) +
facet_grid('.~subject') +
ggtitle('Comparing accuracy of conditions')).draw()
elif option == 'final_figure':
# save old theme and set the one for fixation plotting
old_theme = theme_get()
theme_set(mythemes.before_after_grid_theme)
# simple: eyetracker vs mean accuracy over all blocks and subjects
return (ggplot(winmean_over_elements_winmean_over_blocks,aes(x='condition', y='accuracy', fill='et',group='et', color='et')) +
stat_summary(fun_y=winmean, geom='line',position=position_dodge(width=0.1)) +
# pointrange makes a 0.95 bootstrap CI
stat_summary(fun_data=winmean_cl_boot, geom='pointrange', position=position_dodge(width=0.1)) +
#geom_point(aes(group="subject"),data=winmean_over_elements_winmean_over_blocks.query("et=='Pupil Labs'"),alpha=0.5,color='blue')+
#geom_point(aes(group="subject"),data=winmean_over_elements_winmean_over_blocks.query("et=='EyeLink'"),alpha=0.5,color='red')+
ylab("Accuracy [$^\circ$]") +
labs(title='Course of Accuracy'))
# restore old theme
theme_set(old_theme)
elif option == 'subjectvariance':
mean_over_elements.loc[:,'group'] = mean_over_elements.et + mean_over_elements.block
return (ggplot(mean_over_elements,aes(x='condition', y='accuracy', fill='et',group='group', color='et')) +
geom_point(alpha=0.5)+
geom_line()+
ylab("Accuracy [$^\circ$]") +
labs(title='Course of Accuracy'))+facet_wrap('subject',scales='free')
else:
raise ValueError('You must set facets to a valid option. See documentation.')
|
fcb269c9bb0f97ac2df5914d1f26837125501d00
| 20,330 |
from typing import Sequence
from typing import Any
def make_data_output(structures: Sequence[Artefact[bytes]]) -> Artefact[list[Any]]:
"""Take xyz structure from xtb and parse them to a list of dicts."""
def to_dict(xyz: bytes) -> dict[str, Any]:
as_str = xyz.decode().strip()
energy = float(as_str.splitlines()[1].split()[1])
return {"structure": as_str, "energy": energy}
def sort_by_energy(*elements: dict[str, Any]) -> list[dict[str, Any]]:
out = [el for el in elements]
out = sorted(out, key=lambda x: x["energy"]) # type:ignore
return out
out = []
for s in structures:
out += [f.morph(to_dict, s, out=Encoding.json)] # elements to dicts
return f.reduce(sort_by_energy, *out)
|
ca9e7fd187ff98a9c9c5146e12f9bc8b0e1c0466
| 20,331 |
def total_angular_momentum(particles):
"""
Returns the total angular momentum of the particles set.
>>> from amuse.datamodel import Particles
>>> particles = Particles(2)
>>> particles.x = [-1.0, 1.0] | units.m
>>> particles.y = [0.0, 0.0] | units.m
>>> particles.z = [0.0, 0.0] | units.m
>>> particles.vx = [0.0, 0.0] | units.ms
>>> particles.vy = [-1.0, 1.0] | units.ms
>>> particles.vz = [0.0, 0.0] | units.ms
>>> particles.mass = [1.0, .5] | units.kg
>>> particles.total_angular_momentum()
quantity<[0.0, 0.0, 1.5] m**2 * kg * s**-1>
"""
# equivalent to:
# lx=(m*(y*vz-z*vy)).sum()
# ly=(m*(z*vx-x*vz)).sum()
# lz=(m*(x*vy-y*vx)).sum()
return (particles.mass.reshape((-1,1)) *particles.position.cross(particles.velocity)).sum(axis=0)
|
8eca23b7b1a8fc8a7722543f9193f0e4a3397f24
| 20,332 |
def svn_repos_get_logs3(*args):
"""
svn_repos_get_logs3(svn_repos_t repos, apr_array_header_t paths, svn_revnum_t start,
svn_revnum_t end, int limit, svn_boolean_t discover_changed_paths,
svn_boolean_t strict_node_history,
svn_repos_authz_func_t authz_read_func,
svn_log_message_receiver_t receiver,
apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_get_logs3(*args)
|
1dab074e8112e2be0d51709d5d3d93bfc11c8c7d
| 20,333 |
def insertTimerOnOutput (signal, type):
"""
Plug the signal sout of the return entity instead of `signal` to
input signal to enable the timer.
- param signal an output signal.
- return an Timer entity.
"""
Timer = getTimerType (type)
timer = Timer ("timer_of_" + signal.name)
plug(signal, timer.sin)
return timer
|
c8914227e112916ac67faf532babb0119abc502f
| 20,334 |
from typing import List
def anomaly_metrics(contended_task_id: TaskId, contending_task_ids: List[TaskId]):
"""Helper method to create metric based on anomaly.
uuid is used if provided.
"""
metrics = []
for task_id in contending_task_ids:
uuid = _create_uuid_from_tasks_ids(contending_task_ids + [contended_task_id])
metrics.append(Metric(
name='anomaly',
value=1,
labels=dict(
contended_task_id=contended_task_id, contending_task_id=task_id,
resource=ContendedResource.MEMORY_BW, uuid=uuid, type='contention'
),
type=MetricType.COUNTER
))
return metrics
|
df67e16af8b0c018da347c47a430fbe137a8c353
| 20,335 |
import logging
import json
def alarm():
"""."""
if request.method == 'POST':
response = {'message': 'POST Accepted'}
logging.info('alarm POSTED!')
data = request.data
logging.info(data)
string = json.dumps(data)
producer.send('SIP-alarms', string.encode())
return response
return ""
|
a4444c4bc3f761cfdeb98485c745b77e8227817e
| 20,336 |
def train_net(solver_prototxt, roidb, output_dir,
pretrained_model=None, detection_pretrained_model =None, max_iters=40000):
"""Train a TD-CNN network."""
roidb = filter_roidb(roidb)
sw = SolverWrapper(solver_prototxt, roidb, output_dir, pretrained_model=pretrained_model, detection_pretrained_model=detection_pretrained_model)
print 'Solving...'
model_paths = sw.train_model(max_iters)
print 'done solving'
return model_paths
|
af85a78e4a477ab55e949fa0d6f6d44400d1f62f
| 20,337 |
def get_druminst_order(x):
"""helper function to determine order of drum instruments
relies on standard sequence defined in settings
"""
y = shared.get_inst_name(x + shared.octave_length + shared.note2drums)
return shared.standard_printseq.index(y)
|
8cad6cd10487d51b6edd74444115d63b7e599641
| 20,338 |
def find_nearest_idx(array, value):
"""
Find index of value nearest to value in an array
:param np.ndarray array: Array of values in which to look
:param float value: Value for which the index of the closest value in
`array` is desired.
:rtype: int
:return: The index of the item in `array` nearest to `value`
"""
return (np.abs(array - value)).argmin()
|
3eb48426bf01c625419bbf87893b7e877fd0538d
| 20,339 |
def hard_tanh(x):
"""Hard tanh function
Arguments:
x: Input value
hard_tanh(x) = {-1, for x < -2,
tanh(x), for x > -2 and x < 2
1, for x > 2 }
returns value according to hard tanh function
"""
return tf.maximum(
tf.cast(-1, tf.float32), tf.minimum(tf.cast(1, tf.float32), tf.cast(keras.backend.tanh(x) * 1.05, tf.float32))
)
|
3c93f09aaeb57ee9bf4d3ccdb2ebe790333f8f67
| 20,340 |
async def get_publications(publication_id: str, embedded: bool = False):
"""
Given a Publication ID, get the Publication record from metadata store.
"""
publication = await get_publication(publication_id, embedded)
return publication
|
8c4c81776abc1d8268192c23b1dee6c8c10bcfb0
| 20,341 |
def RandomNormal(inp):
"""
Random normally distributed weight initialization.
"""
return np.random.randn(inp)
|
d6922748ece8ec880eec6ba1b701424cd6fdd149
| 20,342 |
def get_transceiver_description(sfp_type, if_alias):
"""
:param sfp_type: SFP type of transceiver
:param if_alias: Port alias name
:return: Transceiver decsription
"""
return "{} for {}".format(sfp_type, if_alias)
|
ccb29d937495e37bc41e6f2cf35747d2acfe0d47
| 20,343 |
def find_contiguous_set(target_sum: int, values: list[int]) -> list[int]:
"""Returns set of at least 2 contiguous values that add to target sum."""
i = 0
set_ = []
sum_ = 0
while sum_ <= target_sum:
sum_ += values[i]
set_.append(values[i])
if sum_ == target_sum and len(set_) >= 2:
return set_
i += 1
return []
|
64b6c1f99946856a33a79fed3d43395a5a9c1000
| 20,344 |
def nesoni_report_to_JSON(reportified):
"""
Convert a nesoni nway.any file that has been reportified to JSON
See: tables.rst for info on what is stored in RethinkDB
:param reportified: the reportified nway.any file (been through
nway_reportify()). This is essentially a list of tuples
:returns: a list of JSON
"""
stats = {}
parsed_list = []
for position in reportified:
for elem in position:
skip = False
ref_id, pos, strain, old, ftype, new, evidence, cons, uncalled = elem
ref_id = '.'.join(ref_id.split('.')[:-1])
# Initialise the stats...
if strain not in stats:
stats[strain] = 0
if new == old:
# Have no change
#dat = ["conserved"]+[None]*9
skip = True
elif new == 'N':
# Have an uncalled base
#dat = ["uncalled"]+[None]*9
skip = True
# Check for mixtures...
elif ftype == "substitution" and new.find('-') != -1:
# Deletion hidden in substitution
ftype = 'deletion'
dat = extract_consequences(cons, ftype)
stats[strain] = stats[strain]+1
elif ftype == "substitution" and len(new) > 1:
# Insertion hidden in substitution
ftype = 'insertion'
dat = extract_consequences(cons, ftype)
stats[strain] = stats[strain]+1
elif ftype == "deletion" and new.find('-') == -1 and len(new) == 1:
# Substitution hidden in deletions
ftype = 'substitution'
dat = extract_consequences(cons, ftype)
stats[strain] = stats[strain]+1
elif ftype == "deletion" and new.find('-') == -1 and len(new) > 1:
# Insertion hidden in deletions
ftype = 'insertion'
dat = extract_consequences(cons, ftype)
stats[strain] = stats[strain]+1
elif ftype == "insertion" and new.find('-') != -1:
# Deletion hidden in insertions
ftype = 'deletion'
dat = extract_consequences(cons, ftype)
stats[strain] = stats[strain]+1
elif ftype == "insertion" and new.find('-') == -1 and len(new) == 1:
# Substitution hidden in insertions
ftype = 'substitution'
dat = extract_consequences(cons, ftype)
stats[strain] = stats[strain]+1
# We have the same change state across all strains
else:
dat = extract_consequences(cons, ftype)
stats[strain] = stats[strain]+1
obs_count = parsers.parse_evidence(evidence)
# Some simple tests
the_classes = ['insertion', 'deletion', 'substitution']
if not skip:
assert dat[0] in the_classes
json = {"id": strain+'_'+ref_id+'_'+str(pos),
"StrainID": strain,
"Position": pos,
"LocusTag": dat[2],
"Class": dat[0],
"SubClass": dat[1],
"RefBase": old,
"ChangeBase": new,
"CDSBaseNum": dat[3],
"CDSAANum": dat[4],
"CDSRegion": dat[5],
"RefAA": dat[6],
"ChangeAA": dat[7],
"Product": dat[8],
"CorrelatedChange": dat[9],
"Evidence": obs_count,
"UncalledBlock": uncalled
}
parsed_list.append(json)
return parsed_list, stats
|
b78d9e9c124104a4e4c634e8fc2926804a06629d
| 20,345 |
def DeferredLightInfoEnd(builder):
"""This method is deprecated. Please switch to End."""
return End(builder)
|
f2bad6ffea3170c53a13206ab43d8b7193ccb89d
| 20,346 |
import configparser
from pathlib import Path
import os
def qiita_get_config():
"""設定ファイルを読む."""
config = configparser.ConfigParser()
path = Path(os.getenv('HOME'), '.qiita.ini')
config.read_file(open(path))
return config
|
5464253eff2e2abe4f895d0eab679c00096b1bb3
| 20,347 |
def move_character(character: dict, direction_index=None, available_directions=None) -> tuple:
"""
Change character's coordinates.
:param character: a dictionary
:param direction_index: a non-negative integer, optional
:param available_directions: a list of strings, optional
:precondition: character keys must contain "X-coordinate" and "Y-coordinate"
:precondition: character values must be integers
:precondition: direction_index must be a non-negative integer validated by validate_option function or None
:precondition: availabe_directions each item must be either "north", "south", "east" or "west", or None
:postcondition: updates character X or Y coordinate based on direction choice if availabe_directions is not None
:postcondition: makes character X or Y coordinate be equal to the previous coordinates
:return: new character's coordinates as a tuple
>>> protagonist = {"Y-coordinate": 1, "X-coordinate": 1, "Previous coordinates": (0, 1)}
>>> move_character(protagonist, 0, ["south", "west"])
(2, 1)
>>> protagonist = {"Y-coordinate": 1, "X-coordinate": 1, "Previous coordinates": (0, 1)}
>>> move_character(protagonist)
(0, 1)
"""
directions_dictionary = {"north": -1, "south": 1, "west": -1, "east": 1}
if available_directions is not None:
direction = available_directions[direction_index]
character["Previous coordinates"] = character["Y-coordinate"], character["X-coordinate"]
if direction in "north south":
character["Y-coordinate"] += directions_dictionary[direction]
else:
character["X-coordinate"] += directions_dictionary[direction]
else:
character["Y-coordinate"] = character["Previous coordinates"][0]
character["X-coordinate"] = character["Previous coordinates"][1]
return character["Y-coordinate"], character["X-coordinate"]
|
cc5cc3115437d0dc4e9b7ba7845565ee8147be30
| 20,348 |
def expandednodeid_to_str(exnode):
"""SOPC_ExpandedNodeId or SOPC_ExpandedNodeId* to its str representation in the OPC-UA XML syntax."""
a = ''
if exnode.ServerIndex:
a += 'srv={};'.format(exnode.ServerIndex)
nsu = string_to_str(ffi.addressof(exnode.NamespaceUri))
if nsu:
a += 'nsu={};'.format(nsu)
b = ffi.string(libsub.SOPC_NodeId_ToCString(ffi.addressof(exnode.NodeId))).decode()
return a + b
|
062012a11128a42bbaeb8d7ff316a2a29a31928b
| 20,349 |
def scrap_insta_description(inst) -> str:
"""
Scrap description from instagram account HTML.
"""
description = inst.body.div.section.main.div.header.section.find_all(
'div')[4].span.get_text()
return description
|
898fa0d1cb44606374b131b8b471178a22ab74ed
| 20,350 |
import time
def merge_by_sim(track_sim_list, track_data_dic, track_list, reid_th):
"""
Merge by sim.
Ref: https://stackoverflow.com/questions/30089675/clustering-cosine-similarity-matrix
"""
print('start clustering')
merge_start_time = time.time()
cost_matrix = get_cost_matrix(track_sim_list, track_data_dic, track_list)
cluster_labels = AgglomerativeClustering(n_clusters=None, distance_threshold=reid_th, affinity='precomputed',
linkage='average').fit_predict(cost_matrix)
labels = get_match(cluster_labels)
# print(merged_index_list)
print('we have %d global tracks after merge, time for merge %.4f s' % (len(labels), time.time()-merge_start_time))
# get real data
valid_global_list = []
valid_count = 0
for person_track_list in labels:
temp = []
for index in person_track_list:
record_name = track_list[index]
temp.append(record_name)
if len(temp) > 1:
cameras = set([t[0] for t in temp])
if len(cameras) > 1:
valid_count += 1
valid_global_list.append(temp)
#clustered_list.append(temp)
print(f'after merge, %d valid global ids are created: {valid_global_list}' % valid_count)
return valid_global_list
|
68478cb1367dcd2ef8a2550c54ea57e265bbbdf5
| 20,351 |
import os
def get_ros_hostname():
""" Try to get ROS_HOSTNAME environment variable.
returns: a ROS compatible hostname, or None.
"""
ros_hostname = os.environ.get('ROS_HOSTNAME')
return ros_hostname if is_legal_name(ros_hostname) else None
|
fb34753b98d657e0db727bffee269125901e9d0d
| 20,352 |
def cycle(*args, **kargs):
"""
Returns the next cycle of the given list
Everytime ``cycle`` is called, the value returned will be the next item
in the list passed to it. This list is reset on every request, but can
also be reset by calling ``reset_cycle()``.
You may specify the list as either arguments, or as a single list argument.
This can be used to alternate classes for table rows::
# In Myghty...
% for item in items:
<tr class="<% cycle("even", "odd") %>">
... use item ...
</tr>
% #endfor
You can use named cycles to prevent clashes in nested loops. You'll
have to reset the inner cycle, manually::
% for item in items:
<tr class="<% cycle("even", "odd", name="row_class") %>
<td>
% for value in item.values:
<span style="color:'<% cycle("red", "green", "blue",
name="colors") %>'">
item
</span>
% #endfor
<% reset_cycle("colors") %>
</td>
</tr>
% #endfor
"""
if len(args) > 1:
items = args
else:
items = args[0]
name = kargs.get('name', 'default')
cycles = request_config().environ.setdefault('railshelpers.cycles', {})
cycle = cycles.setdefault(name, iterdict(items))
if cycles[name].get('items') != items:
cycle = cycles[name] = iterdict(items)
return cycle['iter'].next()
|
6a1606d5fc65eb690c4ccbad3c662dc671219502
| 20,353 |
def calc_rmsd(struct1, struct2):
"""
Basic rmsd calculator for molecules and molecular clusters.
"""
geo1 = struct1.get_geo_array()
ele1 = struct1.elements
geo2 = struct2.get_geo_array()
ele2 = struct2.elements
dist = cdist(geo1,geo2)
idx1,idx2 = linear_sum_assignment(dist)
geo1 = geo1[idx1]
geo2 = geo2[idx2]
rmsd = np.mean(np.linalg.norm(geo1 - geo2,axis=-1))
return rmsd
|
cb368f6e4edaf223194f1e965a0f926f33e72330
| 20,354 |
def validate_flat_dimension(d):
"""Return strue if a 'key:value' dimension is valid."""
key, _, val = d.partition(':')
return validate_dimension_value(val) and validate_dimension_key(key)
|
ee663bbdc62dab3d247c09ee5950dc63dafcad15
| 20,355 |
import six
import os
def file_size(f):
"""
Returns size of file in bytes.
"""
if isinstance(f, (six.string_types, six.text_type)):
return os.path.getsize(f)
else:
cur = f.tell()
f.seek(0, 2)
size = f.tell()
f.seek(cur)
return size
|
f999382958a972abbcf593c02e8e8e3609d8f44a
| 20,356 |
def __get_from_imports(import_tuples):
""" Returns import names and fromlist
import_tuples are specified as
(name, fromlist, ispackage)
"""
from_imports = [(tup[0], tup[1]) for tup in import_tuples
if tup[1] is not None and len(tup[1]) > 0]
return from_imports
|
28df8225ad9440386342c38657944cfe7ac3d3ca
| 20,357 |
def change_dt_utc_to_local(dt):
"""
change UTC date time to local time zone Europe/Paris
"""
return convert_utctime_to_timezone(dt,'%Y%m%dT%H%M%SZ','Europe/Paris','%Y%m%dT%H%M%S')
|
09b52fd15a4fd9512e05f0a2801927b7f8be385f
| 20,358 |
import logging
import warnings
def sarimax_ACO_PDQ_search(endo_var, exog_var_matrix, PDQS, searchSpace, options_ACO, low_memory=False, verbose=False):
"""
Searchs SARIMAX PDQ parameters.
endo_var: is the principal variable.
exog_var_matrix: is the matrix of exogenous variables.
PDQS: list of pdqs parameters. EG: [1, 1, 1, 24].
searchSpace: is the space of search for the particles. E.G.:
p = d = q = range(0, 2)
searchSpace = [p, d, q]
pso_particles: is the number of particles.
pso_interations: is the number of interations.
options_ACO: parametrization for ACO algorithm. E.G.:
{'antNumber':2, 'antTours':1, 'alpha':2, 'beta':2, 'rho':0.5, 'Q':2}
"""
def SARIMAX_AICc(X, *args):
endo = args[0][0]
exog = args[0][1]
param_seasonal = args[0][2]
param = X[0:3]
if param_seasonal[-1] < 0:
param_seasonal[-1] = 1
mod = SARIMAX(endo, exog=exog, order=param, seasonal_order=param_seasonal,
enforce_stationarity=False, enforce_invertibility=False)
aicc = np.inf
try:
results = mod.fit(disp=False, low_memory=low_memory)
aicc = results.aicc
except:
pass
return aicc
antNumber = options_ACO['antNumber']
antTours = options_ACO['antTours']
alpha = options_ACO['alpha']
beta = options_ACO['beta']
rho = options_ACO['rho']
Q = options_ACO['Q']
if verbose:
logging.info("Original search Space: {0}".format(searchSpace))
warnings.filterwarnings("ignore") # specify to ignore warning messages
ACOsearch = ACO(alpha, beta, rho, Q)
best_result, _ = ACOsearch.optimize(antNumber, antTours, dimentionsRanges=searchSpace, function=SARIMAX_AICc,
functionArgs=[endo_var, exog_var_matrix, PDQS], verbose=verbose)
logging.info("BEST result: {0}.".format(best_result))
param = best_result
param_seasonal = PDQS
mod = SARIMAX(endo_var, exog=exog_var_matrix, order=param, seasonal_order=param_seasonal,
enforce_stationarity=False, enforce_invertibility=False)
results = mod.fit(disp=False)
return results.aicc, best_result
|
acae883a3aaeb501646f121753924fb321f471e5
| 20,359 |
from typing import Any
def convert_vue_i18n_format(locale: str, po_content: Any) -> str:
"""
done: will markdown be parsed to html in this method? Or should we do that on the fly, everywhere...
It seems the logical place will be to parse it here. Otherwise the rest of the application becomes more
complex. Using markdown will have the benefit of the output being a single html string with proper
formatting.
todo: change parameters {{ param }} to hello: '%{msg} world'
see: http://kazupon.github.io/vue-i18n/guide/formatting.html#list-formatting
The change is very large we don't need to do that, as we don't need those sentences.
The content is added to the 'internet_nl' key, like this:
const internet_nl_messages = {
en: {
internet_nl: {
key: 'value',
key: 'value'
},
},
}
There is a slight challenge that translations in vue are based on javascript properties, meaning, no quotes.
:return:
"""
content: str = _vue_format_start()
content += _vue_format_locale_start(locale)
for entry in po_content:
# to save a boatload of data, we're not storing the 'content' from the pages of internet.nl
# we'll just have to point to this content.
if entry.msgid.endswith('content'):
continue
content += f" {_js_safe_msgid(entry.msgid)}: '{_js_safe_msgstr(entry.msgstr)}',\n"
content += _vue_format_locale_end()
content += _vue_format_end()
return content
|
93d7351335cbfbc9407954a352aefceea54b84c0
| 20,360 |
def get_index(x, value, closest=True):
"""Get the index of an array that corresponds to a given value.
If closest is true, get the index of the value closest to the
value entered.
"""
if closest:
index = np.abs(np.array(x) - value).argsort()[0]
else:
index = list(x).index(value)
return index
|
19dc68407d576492f25235fc1efcc79895d8cb3f
| 20,361 |
def process_spawn(window, args):
"""
Spawns a child process with its stdin/out/err wired to a PTY in `window`.
`args` should be a list where the first item is the executable and the
remaining will be passed to it as command line arguments.
Returns a process object.
"""
return (yield Trap.PROCESS_SPAWN, window, args)
|
979599c767f3e391541e8fd02a168cd29659bcea
| 20,362 |
import json
import re
def insertTaskParams(taskParams, verbose=False, properErrorCode=False, parent_tid=None):
"""Insert task parameters
args:
taskParams: a dictionary of task parameters
verbose: True to see verbose messages
properErrorCode: True to get a detailed error code
parent_tid: ID of the parent task
returns:
status code
0: communication succeeded to the panda server
255: communication failure
tuple of return code, message from the server, and taskID if successful, or error message if failed
0: request is processed
1: duplication in DEFT
2: duplication in JEDI
3: accepted for incremental execution
4: server error
"""
# serialize
taskParamsStr = json.dumps(taskParams)
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/insertTaskParams'
data = {'taskParams':taskParamsStr,
'properErrorCode':properErrorCode}
if parent_tid:
data['parent_tid'] = parent_tid
status,output = curl.post(url,data)
try:
loaded_output = list(pickle_loads(output))
# extract taskID
try:
m = re.search('jediTaskID=(\d+)', loaded_output[-1])
taskID = int(m.group(1))
except Exception:
taskID = None
loaded_output.append(taskID)
return status, loaded_output
except Exception as e:
errStr = dump_log("insertTaskParams", e, output)
return EC_Failed, output+'\n'+errStr
|
baa9f0c783361cec17cec9f6259e4f856aa0121d
| 20,363 |
def pref_infos():
"""
to update user infos
"""
form = UserParametersForm()
# print current_user
if request.method == 'POST' :
print
log_cis.info("updating an user - POST \n")
# for debugging purposes
for f_field in form :
log_cis.info( "form name : %s / form data : %s ", f_field.name, f_field.data )
if form.validate_on_submit():
existing_user = mongo_users.find_one({"_id" : ObjectId(form.userOID.data)} )
log_cis.debug("existing_user : %s", pformat(existing_user) )
### check if new email is already used by someone else
is_new_email_taken = False
existing_email = mongo_users.find_one( {"userEmail" : form.userEmail.data} )
log_cis.debug("existing_email : %s", pformat(existing_email) )
if existing_email is not None :
if existing_user["_id"] != existing_email["_id"] :
is_new_email_taken = True
if existing_user is None :
flash(u"Erreur : utilisateur inexistant", category='warning')
return redirect(url_for('pref_infos'))
if is_new_email_taken :
flash(u"Erreur : cet email déjà utilisé", category='warning')
return redirect(url_for('pref_infos'))
else :
### saving updated infos in user
user_obj = User()
user_obj.populate_from_dict(dict_input=existing_user)
# # update visitor to user in db --> function from ModelMixin
log_cis.warning("updating new_user in mongo_users" )
user_obj.populate_from_form( form=form )
user_obj.update_document_in_mongo( document=existing_user, coll=mongo_users )
### relog user
login_user( user_obj, remember=existing_user['userRememberMe'] )
flash(u"Vos informations ont bien été mises à jour", category='primary')
return redirect(url_for('pref_infos'))
else :
log_cis.error("form was not validated : form.errors : %s", form.errors )
flash(u"Erreur : formulaire invalide", category='warning')
return redirect(url_for('pref_infos'))
elif request.method == 'GET' :
log_cis.info("updating an user - GET \n")
print current_user.__dict__
# prepopulate input fields
form.userOID.data = str(current_user.userOID)
form.userName.data = current_user.userName.capitalize()
form.userSurname.data = current_user.userSurname.capitalize()
form.userEmail.data = current_user.userEmail
form.userOtherStructure.data = current_user.userOtherStructure
# prepopulate select fields
form.userProfile.process_data(current_user.userProfile)
form.userPartnerStructure.process_data(current_user.userPartnerStructure)
form.userStructureProfile.process_data(current_user.userStructureProfile)
# prepopulate boolean fields
form.userHaveProjects.process_data(current_user.userHaveProjects)
form.userJoinCollective.process_data(current_user.userJoinCollective)
form.userNewsletter.process_data(current_user.userNewsletter)
return render_template('user_preferences/user_parameters.html',
config_name = config_name, # prod or default...
app_metas = app_metas,
language = "fr" ,
languages_dict = app_languages_dict ,
site_section = "preferences",
site_subsection = "infos",
form = form,
user_infos = current_user.get_public_infos # cf model_user.py
)
|
afe57314da056187b60e3a2ac365a27bbef73d2b
| 20,364 |
def read_domains(file_name):
"""
读取域名存储文件,获取要探测的域名,以及提取出主域名
注意:若是不符合规范的域名,则丢弃
"""
domains = []
main_domains = []
no_fetch_extract = tldextract.TLDExtract(suffix_list_urls=None)
file_path = './unverified_domain_data/'
with open(file_path+file_name,'r') as fp:
for d in fp.readlines():
domain_tld = no_fetch_extract(d.strip())
tld, reg_domain = domain_tld.suffix, domain_tld.domain # 提取出顶级域名和主域名部分
if tld and reg_domain:
main_domains.append(reg_domain+'.'+tld)
domains.append(d.strip())
else:
logger.logger.warning('域名%s不符合规范,不进行探测' % d.strip())
return domains, main_domains
|
856a7e7d93023bf59981d51a6210b988236a14a9
| 20,365 |
def determine_last_contact_incomplete(end_time_skyfield, events, times, antenna):
"""
gibt letzten Kontakt vervollständigt und Anzahl der an events in unvollständiger Folge zurück
:param end_time_skyfield: skyfield time
:param events: array of int
:param times: array of skyfield times
:param antenna: Antenna object
:return: Contact object, int
"""
incomplete_event_sequence_end = list()
# letztes Kontakt bei Ende des Analysezeitraums enden lassen
incomplete_event_sequence_end.append(end_time_skyfield)
# falls letztes Event Höchststand, dieses zu ende hinzufügen
if events[-1] == int(SkyfieldEventTypes.Culminate):
incomplete_event_sequence_end.append(times[-1])
# falls letztes Event in Liste Aufgehen, dieses zu Ende hinzufügen
if events[-1 - len(incomplete_event_sequence_end)] == int(SkyfieldEventTypes.Rise):
incomplete_event_sequence_end.append(times[-1 - len(incomplete_event_sequence_end)])
# Invertieren der Liste für chronologisch korrekte Reihenfolge
incomplete_event_sequence_end.reverse()
incomplete_contact_end = Contact(antenna)
for time in incomplete_event_sequence_end:
incomplete_contact_end.add_relative_position_by_skyfield_time(time)
return incomplete_contact_end, len(incomplete_event_sequence_end)
|
8a234bed084fa829c7649de99b933a59c89cba5b
| 20,366 |
def lam_est(data, J, B, Q, L = 3,
paras = [3, 20], n_trees = 200, include_reward = 0, fixed_state_comp = None, method = "QRF"):
"""
construct the pointwise cov lam (for both test stat and c.v.), by combine the two parts (estimated and observed)
Returns
-------
lam: (Q-1)-len list of four lam matrices (n * T-q * B)
"""
dx, da = data[0][0].shape[1], data[0][1].shape[1]
if fixed_state_comp is not None:
dx += 1
# generate uv
rseed(0); npseed(0)
if include_reward:
uv = [randn(B, dx + 1), randn(B, dx + da)]
else:
uv = [randn(B, dx), randn(B, dx + da)]
# estimate characteristic values (cross-fitting): phi_R, psi_R, phi_I,
# psi_I
estimated = cond_char_vaule_est(data = data, uv = uv,
paras = paras, n_trees = n_trees, L = L,
J = J,
include_reward = include_reward, fixed_state_comp = fixed_state_comp,
method = method) # ,obs_ys
if paras == "CV_once":
CV_paras = estimated
return CV_paras
else:
estimated_cond_char = estimated
# cos and sin in batch. (n*T*dx) * (dx* B) = n * T * B:
# c_X,s_X,c_XA,s_XA
observed_cond_char = obs_char(data = data, uv = uv,
include_reward = include_reward, fixed_state_comp = fixed_state_comp)
# combine the above two parts to get cond. corr. estimation.
lam = lam_formula(estimated_cond_char, observed_cond_char, J, Q)
return lam
|
3a1637f2e522e414e7ddc5c470310c1f2c460ce0
| 20,367 |
import os
import csv
def read_training_data(rootpath):
"""
Function for reading the images for training.
:param rootpath: path to the traffic sign data
:return: list of images, list of corresponding image information: width, height, class, track
"""
images = [] # images
img_info = [] # corresponding labels
# loop over all classes
for img_class in os.listdir(rootpath):
prefix = rootpath + '/' + img_class + '/' # subdirectory for class
gt_file = open(prefix + 'GT-' + img_class + '.csv') # annotations file
gt_reader = csv.reader(gt_file, delimiter=';') # csv parser for annotations file
next(gt_reader) # skip header
# loop over all images in current annotations file
for row in gt_reader:
images.append(plt.imread(prefix + row[0])) # numpy array representation of image
img_info.append([int(row[1]), int(row[2]),
img_class, row[0][:5]]) # width, height, class, track
gt_file.close()
return images, img_info
|
e1bbe3d239f93d5daa4e0127ca7bb3f1f7bfd44e
| 20,368 |
def initialize_all(y0, t0, t1, n):
""" An initialization routine for the different ODE solving
methods in the lab. This initializes Y, T, and h. """
if isinstance(y0, np.ndarray):
Y = np.empty((n, y.size)).squeeze()
else:
Y = np.empty(n)
# print y0
# print Y
Y[0] = y0
T = np.linspace(t0, t1, n)
h = float(t1 - t0) / (n - 1)
return Y, T, h
|
552a92dd50aca926b1cb6c9e6aaafd1c1401b5c3
| 20,369 |
import random
def _sequence_event(values, length, verb):
"""Returns sequence (finite product) event.
Args:
values: List of values to sample from.
length: Length of the sequence to generate.
verb: Verb in infinitive form.
Returns:
Instance of `probability.FiniteProductEvent`, together with a text
description.
"""
del verb # unused
samples = [random.choice(values) for _ in range(length)]
events = [probability.DiscreteEvent([sample]) for sample in samples]
event = probability.FiniteProductEvent(events)
sequence = ''.join(str(sample) for sample in samples)
event_description = 'sequence {sequence}'.format(sequence=sequence)
return event, event_description
|
1addd21c6c39451ac29f9bcf7551f070884e9328
| 20,370 |
def ndarrayToQImage(img):
""" convert numpy array image to QImage """
if img.dtype != 'uint8':
raise ValueError('Only support 8U data')
if img.dim == 3:
t = QtGui.QImage.Format_RGB888
elif img.dim == 2:
t = QtGui.QImage.Format_Grayscale8
else:
raise ValueError('Only support 1 and 3 channel image')
qimage = QtGui.QImage(img.data,
img.shape[1], img.shape[0],
img.strides[0], t)
return qimage
|
a08207266b03adff2dd66421572a0905f9525844
| 20,371 |
from datetime import datetime
import re
import base64
import uuid
def object_hook(dct, compile_re=False, ensure_tzinfo=True, encoding=None):
"""
Object hook used by hoplite_loads. This object hook can encode the
dictionary in the right text format. For example, json.loads by default
will decode '{'hey':'hey'}' into {u'hey':u'hey'} rather than {'hey':'hey'}.
If encoding is set to utf-8, this object_hook can make '{'hey':'hey'}'
decode to {'hey':'hey'} This object hook also decodes extended json types
such as objectId and datetime objects. Datetime objects also
have the option to be decoded with or without timezone information.
:param dct: Dictionary this object hook is to operate on.
:param ensure_tzinfo: Boolean deciding if timezone info should be added to
decoded datetime objects
:param encoding: choice of text decoding(unicode/utf-8, perhaps others)
:return:
"""
if encoding:
# Converts all keys and unicode values in the top layer of the current
# dictionary to the desired encoding type.
new_dct = {}
for key, value in dct.iteritems():
if isinstance(key, unicode):
key = key.encode(encoding)
if isinstance(value, unicode):
value = value.encode(encoding)
new_dct[key] = value
dct = new_dct
if "$oid" in dct:
return ObjectId(str(dct["$oid"]))
if "$ref" in dct:
return DBRef(dct["$ref"], dct["$id"], dct.get("$db", None))
if "$date" in dct:
secs = float(dct["$date"]) / 1000.0
if ensure_tzinfo:
return EPOCH_AWARE + datetime.timedelta(seconds=secs)
else:
# Avoid adding time zone info by default, unlike
# bson.json_util.loads. If the developer really wants this, they
# will have to specify it.
return EPOCH_NAIVE + datetime.timedelta(seconds=secs)
if "$regex" in dct:
flags = 0
# PyMongo always adds $options but some other tools may not.
for opt in dct.get("$options", ""):
flags |= _RE_OPT_TABLE.get(opt, 0)
if compile_re:
return re.compile(dct["$regex"], flags)
else:
return Regex(dct["$regex"], flags)
if "$minKey" in dct:
return MinKey()
if "$maxKey" in dct:
return MaxKey()
if "$binary" in dct:
if isinstance(dct["$type"], int):
dct["$type"] = "%02x" % dct["$type"]
subtype = int(dct["$type"], 16)
if subtype >= 0xffffff80: # Handle mongoexport values
subtype = int(dct["$type"][6:], 16)
return Binary(base64.b64decode(dct["$binary"].encode()), subtype)
if "$code" in dct:
return Code(dct["$code"], dct.get("$scope"))
if "$uuid" in dct:
return uuid.UUID(dct["$uuid"])
if "$undefined" in dct:
return None
if "$numberLong" in dct:
return Int64(dct["$numberLong"])
if "$timestamp" in dct:
tsp = dct["$timestamp"]
return Timestamp(tsp["t"], tsp["i"])
return dct
|
bfe93f67813bda8e77e93a7ee33b6dc3bcbfe16a
| 20,372 |
import uuid
import os
import shutil
def grade_submissions(course_name, assignment_name):
"""Grade all submissions for a particular assignment.
A .zip archive should be uploaded as part of the POST request to this endpoint.
The archive should contain a single directory 'Submissions', which should
contain a directory for each student's submission.
See the grade_assignment module for the implementation of the actual testing
logic - this function is merely the endpoint to receive the request and
return the response. The respose returned has an identifier to allow the
client to request a CSV of grade results, as well as some JSON of
results to be displayed to the user on the page."""
try:
course = find_course(course_name)
assignment = course.assignments.filter_by(name=assignment_name).one()
except NoResultFound:
return abort(404)
submissions_archive = request.files['submissions']
csv_id= str(uuid.uuid4())
csvfile, final_results, unittest_stats = grade_assignment(assignment, submissions_archive)
os.makedirs(assignment.grades_dir, exist_ok=True)
shutil.copy(csvfile.name, os.path.join(assignment.grades_dir, csv_id))
csvfile.close()
grade_response = {'csv_id': csv_id, 'results': final_results, 'stats': unittest_stats}
if assignment.problems:
grade_response["problems_max"] = {p.problem_name: p.score for p in assignment.problems}
grade_response["max_score"] = sum(p.score for p in assignment.problems)
else:
grade_response["max_score"] = assignment.max_score
return jsonify(grade_response)
|
20d38a30683c09c1a4e438da5b694774dcc71681
| 20,373 |
import requests
import pickle
def scrape_sp500_tickers():
"""Scrape the wikipedia page for the latest list of sp500 companies
Returns:
[pickle]: [list of sp500 companies]
"""
#set get file to look at wikipedia's list of sp500 companies
resp = requests.get('http://en.wikipedia.org/wiki/List_of_S%26P_500_companies')
soup = bs.BeautifulSoup(resp.text, 'lxml')
table = soup.find('table', {'class': 'wikitable sortable'})
tickers = []
#cycle through wiki table to find get all of the stock tickers
for row in table.findAll('tr')[1:]:
ticker = row.findAll('td')[0].text
tickers.append(ticker)
#save to pickle to speed up process
with open("sp500tickers.pickle","wb") as f:
pickle.dump(tickers,f)
print ('Scraping Complete')
return tickers
|
0c318e84f488c93254256394b1fe5d57d58c81a5
| 20,374 |
def get_top_k_recs(user_reps, item_reps, k):
"""
For each user compute the `n` topmost-relevant items
Args:
user_reps (dict): representations for all `m` unique users
item_reps (:obj:`np.array`): (n, d) `d` latent features for all `n` items
k (int): no. of most relevant items
Returns:
item_recs ([[int]]): list of personalized recommendations for each user
as lists of item IDs
"""
n_user = len(user_reps)
item_recs = []
for u in range(n_user):
user_embed = user_reps[u]['embed']
user_item_scores = np.dot(item_reps, user_embed)
item_recs.append(list(np.argsort(user_item_scores)[::-1][:k]))
return item_recs
|
e209b8794fe3d8f8002dbbe48d858b335171c2f5
| 20,375 |
import logging
import os
import sys
def logger():
"""
Setting upp root and zeep logger
:return: root logger object
"""
root_logger = logging.getLogger()
level = logging.getLevelName(os.environ.get('logLevelDefault', 'INFO'))
root_logger.setLevel(level)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(level)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
stream_handler.setFormatter(formatter)
root_logger.addHandler(stream_handler)
zeep_logger = logging.getLogger('proarc')
zeep = logging.getLevelName(os.environ.get('logLevelZeep', 'CRITICAL'))
zeep_logger.setLevel(zeep)
return root_logger
|
b7f3af5555eae953825c42aed18869deafa9f38d
| 20,376 |
def some_function(t):
"""Another silly function."""
return t + " python"
|
2bd8adc315e97409758f13b0f777ccd17eb4b820
| 20,377 |
def build_model(args):
"""
Function: Build a deep learning model
Input:
args: input parameters saved in the type of parser.parse_args
Output:
"""
if args['debug_mode'] is True:
print("BUILDING MODEL......")
model = Sequential()
# Normalize
model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3)))
# Add a layer to crop images
model.add(Cropping2D(cropping=((70, 25), (0, 0)))) # remaining size: 65,320,3
# Add three 5x5 convolution layers
model.add(Convolution2D(24, 5, 5, activation='elu', subsample=(2, 2)))
model.add(Convolution2D(36, 5, 5, activation='elu', subsample=(2, 2)))
model.add(Convolution2D(48, 5, 5, activation='elu', subsample=(2, 2)))
# Add two 3x3 convolution layers
model.add(Convolution2D(64, 3, 3, activation='elu'))
model.add(Convolution2D(64, 3, 3, activation='elu'))
# Add a flatten layer
model.add(Flatten())
# Add a dropout to overcome overfitting
model.add(Dropout(args['keep_prob']))
# Add three fully connected layers
model.add(Dense(100, activation='elu'))
model.add(Dense(50, activation='elu'))
model.add(Dense(10, activation='elu'))
# Add a fully connected output layer
model.add(Dense(1))
# Summary
model.summary()
return model
|
3008918f1319b235c4bdf575f7df84251337ec65
| 20,378 |
def add_context_for_join_form(context, request):
""" Helper function used by view functions below """
# If the client has already joined a market
if 'trader_id' in request.session:
# If trader is in database
if Trader.objects.filter(id=request.session['trader_id']).exists():
trader = Trader.objects.get(id=request.session['trader_id'])
# If trader has been removed from market
if trader.removed_from_market:
request.session['removed_from_market'] = True
# If trader has been deleted from database
else:
request.session['removed_from_market'] = True
# We add this market to the context to notify the client
market = get_object_or_404(
Market, market_id=request.session['market_id'])
context['market'] = market
return context
|
aad1b592c6d28d9a69ec97a8be9d46f942cb3d7b
| 20,379 |
def create_list(inner_type_info: CLTypeInfo) -> CLTypeInfoForList:
"""Returns CL type information for a list.
:param CLTypeInfo inner_type_info: Type information pertaining to each element within list.
"""
return CLTypeInfoForList(
typeof=CLType.LIST,
inner_type_info=inner_type_info
)
|
e74731984cc83172c60a79e1b1efe05d90a32342
| 20,380 |
def get_baseline(baseline_filename, plugin_filenames=None):
"""
:type baseline_filename: string
:param baseline_filename: name of the baseline file
:type plugin_filenames: tuple
:param plugin_filenames: list of plugins to import
:raises: IOError
:raises: ValueError
"""
if not baseline_filename:
return
raise_exception_if_baseline_file_is_unstaged(baseline_filename)
return SecretsCollection.load_baseline_from_string(
_get_baseline_string_from_file(
baseline_filename,
),
plugin_filenames=plugin_filenames,
)
|
f4318ee676e0c670f152feef0884a220eb1a38ac
| 20,381 |
def del_ind_purged(*args):
"""
del_ind_purged(ea)
"""
return _ida_nalt.del_ind_purged(*args)
|
80133af5acff0a9c284ec7894abd10bafa2671a1
| 20,382 |
import random
import hmac
def hash_password(password, salthex=None, reps=1000):
"""Compute secure (hash, salthex, reps) triplet for password.
The password string is required. The returned salthex and reps
must be saved and reused to hash any comparison password in
order for it to match the returned hash.
The salthex string will be chosen randomly if not provided, and
if provided must be an even-length string of hexadecimal
digits, recommended length 16 or
greater. E.g. salt="([0-9a-z][0-9a-z])*"
The reps integer must be 1 or greater and should be a
relatively large number (default 1000) to slow down brute-force
attacks."""
if not salthex:
salthex = ''.join([ "%02x" % random.randint(0, 0xFF)
for d in range(0,8) ])
salt = []
for p in range(0, len(salthex), 2):
salt.append(int(salthex[p:p+2], 16))
salt = bytes(salt)
if reps < 1:
reps = 1
msg = password.encode()
for r in range(0,reps):
msg = hmac.HMAC(salt, msg, digestmod='MD5').hexdigest().encode()
return (msg.decode(), salthex, reps)
|
cac468818560ed52b415157dde71d5416c34478c
| 20,383 |
from typing import Dict
def create_scheduled_job_yaml_spec(
descriptor_contents: Dict, executor_config: ExecutorConfig, job_id: str, event: BenchmarkEvent
) -> str:
"""
Creates the YAML spec file corresponding to a descriptor passed as parameter
:param event: event that triggered this execution
:param descriptor_contents: dict containing the parsed descriptor
:param executor_config: configuration for the transpiler
:param job_id: str
:return: Tuple with (yaml string for the given descriptor, job_id)
"""
descriptor = BenchmarkDescriptor.from_dict(descriptor_contents, executor_config.descriptor_config)
bai_k8s_builder = create_scheduled_benchmark_bai_k8s_builder(
descriptor, executor_config.bai_config, job_id, event=event
)
return bai_k8s_builder.dump_yaml_string()
|
48e19b6637eafee72b0b3b04a1e31c8e6c163971
| 20,384 |
import argparse
def _parse_args():
"""
Parse arguments for the CLI
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--fovs',
type=str,
required=True,
help="Path to the fov data",
)
parser.add_argument(
'--exp',
type=str,
required=True,
help="Path to experiment file",
)
return parser.parse_args()
|
ecea45baba3c89e3fd81613a256c5be300e01051
| 20,385 |
def ArgMin(iterable, key=None, default=None, retvalue=False):
"""
iterable >> ArgMin(key=None, default=None, retvalue=True)
Return index of first minimum element (and minimum) in input
(transformed or extracted by key function).
>>> [1, 2, 0, 2] >> ArgMin()
2
>>> ['12', '1', '123'] >> ArgMin(key=len, retvalue=True)
(1, '1')
>>> ['12', '1', '123'] >> ArgMin(key=len)
1
>>> [] >> ArgMin(default=0)
0
>>> [] >> ArgMin(default=(None, 0), retvalue=True)
(None, 0)
>>> data = [(3, 10), (2, 20), (1, 30)]
>>> data >> ArgMin(key=0)
2
>>> data >> ArgMin(1)
0
:param iterable iterable: Iterable over numbers
:param int|tuple|function|None key: Key function to extract or
transform elements. None = identity function.
:param object default: Value returned if iterable is empty.
:param bool retvalue: If True the index and the value of the
minimum element is returned.
:return: index of smallest element according to key function
and the smallest element itself if retvalue==True.
:rtype: object | tuple
"""
try:
f = colfunc(key)
i, v = min(enumerate(iterable), key=lambda i_e1: f(i_e1[1]))
return (i, v) if retvalue else i
except Exception:
return default
|
9c2515c3a37ab82e2b5df6b5d8dcf5ded6ad15ad
| 20,386 |
import scipy
def localize_peaks_monopolar_triangulation(traces, local_peak, contact_locations, neighbours_mask, nbefore, nafter, max_distance_um):
"""
This method is from Julien Boussard see spikeinterface.toolki.postprocessing.unit_localization
"""
peak_locations = np.zeros(local_peak.size, dtype=dtype_localize_by_method['monopolar_triangulation'])
for i, peak in enumerate(local_peak):
chan_mask = neighbours_mask[peak['channel_ind'], :]
chan_inds, = np.nonzero(chan_mask)
local_contact_locations = contact_locations[chan_inds, :]
# wf is (nsample, nchan) - chann is only nieghboor
wf = traces[peak['sample_ind']-nbefore:peak['sample_ind']+nafter, :][:, chan_inds]
wf_ptp = wf.ptp(axis=0)
x0, bounds = make_initial_guess_and_bounds(wf_ptp, local_contact_locations, max_distance_um)
args = (wf_ptp, local_contact_locations)
output = scipy.optimize.least_squares(estimate_distance_error, x0=x0, bounds=bounds, args = args)
peak_locations[i] = tuple(output['x'])
return peak_locations
|
c751ec223423007b170ce0020f6c21c72cff3cc2
| 20,387 |
def format_dict_with_indention(data):
"""Return a formatted string of key value pairs
:param data: a dict
:rtype: a string formatted to key='value'
"""
if data is None:
return None
return jsonutils.dumps(data, indent=4)
|
085ac029aa73e5049eeec12e021997a5067966ce
| 20,388 |
import logging
def get_logger(name, info_file, error_file, raw=False):
"""
Get a logger forwarding message to designated places
:param name: The name of the logger
:param info_file: File to log information less severe than error
:param error_file: File to log error and fatal
:param raw: If the output should be log in raw format
:return: Generated logger
"""
# Generate or get the logger object
if isinstance(name, str):
logger = logging.getLogger(name)
else:
logger = name
logger.setLevel(logging.DEBUG)
# Config info level logger handler
# If the file argument is None, forward the log to standard output
if info_file:
info_handler = logging.handlers.TimedRotatingFileHandler(info_file, when='midnight', interval=1)
else:
info_handler = logging.StreamHandler()
info_handler.setLevel(logging.DEBUG)
info_handler.setFormatter(logging.Formatter(RAW_FORMAT if raw else LOG_FORMAT))
# Config error level logger handler
if error_file:
error_handler = logging.FileHandler(error_file)
else:
error_handler = logging.StreamHandler()
error_handler.setLevel(logging.ERROR)
error_handler.setFormatter(logging.Formatter(RAW_FORMAT if raw else LOG_FORMAT))
# Add handlers to loggers
logger.addHandler(info_handler)
logger.addHandler(error_handler)
return logger
|
ea4abed032c201cdac9489ded4c195287106e6b2
| 20,389 |
def _Rx(c, s):
"""Construct a rotation matrix around X-axis given cos and sin.
The `c` and `s` MUST satisfy c^2 + s^2 = 1 and have the same shape.
See https://en.wikipedia.org/wiki/Rotation_matrix#Basic_rotations.
"""
o = np.zeros_like(c)
i = np.ones_like(o)
return _tailstack2([[i, o, o], [o, c, -s], [o, s, c]])
|
01d436bee07458ede0484ed745ccf72568214240
| 20,390 |
def is_fraud(data):
"""
Identifies if the transaction was fraud
:param data: the data in the transaction
:return: true if the transaction was fraud, false otherwise
"""
return data[1] == 1
|
115e45a10f3429b9c33bc81fd94c24eff712f618
| 20,391 |
import scipy
def EvalBinomialPmf(k, n, p):
"""Evaluates the binomial pmf.
Returns the probabily of k successes in n trials with probability p.
"""
return scipy.stats.binom.pmf(k, n, p)
|
0720359be48b514465eb3a10d3f271ac3c25dfb9
| 20,392 |
def add_prospect(site_id, fname, lname, byear, bmonth, bday, p_type, id_type='all'):
"""
Looks up a prospets prospect_id given their first name (fname) last name (lname), site id (site_id), site (p_type), and birthdate (byear, bmonth, bday).
If no prospect is found, adds the player to the professional_prospects table and returns the newly created prospect_id.
"""
fname_search = fname_lookup(fname)
if id_type == 'all':
qry_add = """((mlb_id = "%s" AND mlb_id != 0)
OR (mlb_draft_id = "%s" AND mlb_draft_id IS NOT NULL)
OR (mlb_international_id = "%s" AND mlb_international_id IS NOT NULL)
OR (fg_minor_id = "%s" AND fg_minor_id IS NOT NULL)
OR (fg_major_id = "%s" AND fg_major_id IS NOT NULL)
OR (fg_temp_id = "%s" AND fg_temp_id IS NOT NULL))""" % (site_id, site_id, site_id, site_id, site_id, site_id)
else:
qry_add = """(%s = "%s" AND (%s != 0 OR %s IS NOT NULL))""" % (id_type, site_id, id_type, id_type)
check_qry = """SELECT prospect_id
FROM professional_prospects
WHERE 1
AND %s
;
"""
check_query = check_qry % (qry_add)
check_val = db.query(check_query)
if check_val != ():
prospect_id = check_val[0][0]
return prospect_id
else:
check_other_qry = """SELECT prospect_id
FROM professional_prospects
WHERE birth_year = %s
AND birth_month = %s
AND birth_day = %s
AND (
( REPLACE(REPLACE(REPLACE(REPLACE(mlb_lname, ".", ""),"'",""),"-","")," ","")
LIKE REPLACE(REPLACE(REPLACE(REPLACE("%%%s%%", ".", ""),"'",""),"-","")," ","") ) OR
( REPLACE(REPLACE(REPLACE(REPLACE("%s",".",""),"'",""),"-","")," ","")
LIKE REPLACE(REPLACE(REPLACE(REPLACE(CONCAT("%%",mlb_lname,"%%"), ".", ""),"'",""),"-","")," ","") ) OR
( REPLACE(REPLACE(REPLACE(REPLACE(fg_lname, ".", ""),"'",""),"-","")," ","")
LIKE REPLACE(REPLACE(REPLACE(REPLACE("%%%s%%", ".", ""),"'",""),"-","")," ","") ) OR
( REPLACE(REPLACE(REPLACE(REPLACE("%s", ".", ""),"'",""),"-","")," ","")
LIKE REPLACE(REPLACE(REPLACE(REPLACE(CONCAT("%%",fg_lname,"%%"), ".", ""),"'",""),"-","")," ","") )
)
AND (
( REPLACE(REPLACE(REPLACE(REPLACE(mlb_fname, ".", ""),"'",""),"-","")," ","")
LIKE REPLACE(REPLACE(REPLACE(REPLACE("%%%s%%", ".", ""),"'",""),"-","")," ","") ) OR
( REPLACE(REPLACE(REPLACE(REPLACE("%s",".",""),"'",""),"-","")," ","")
LIKE REPLACE(REPLACE(REPLACE(REPLACE(CONCAT("%%",mlb_fname,"%%"), ".", ""),"'",""),"-","")," ","") ) OR
( REPLACE(REPLACE(REPLACE(REPLACE(fg_fname, ".", ""),"'",""),"-","")," ","")
LIKE REPLACE(REPLACE(REPLACE(REPLACE("%%%s%%", ".", ""),"'",""),"-","")," ","") ) OR
( REPLACE(REPLACE(REPLACE(REPLACE("%s", ".", ""),"'",""),"-","")," ","")
LIKE REPLACE(REPLACE(REPLACE(REPLACE(CONCAT("%%",fg_fname,"%%"), ".", ""),"'",""),"-","")," ","") )
)
;"""
check_other_query = check_other_qry % (byear, bmonth, bday, lname, lname, lname, lname, fname_search, fname_search, fname_search, fname_search)
check_other_val = db.query(check_other_query)
if check_other_val != ():
prospect_id = check_other_val[0][0]
f_name = "mlb_fname"
l_name = "mlb_lname"
if p_type == "professional":
id_column = "mlb_id"
elif p_type == "draft":
id_column = "mlb_draft_id"
elif p_type in ("int", "international"):
id_column = "mlb_international_id"
elif p_type == "fg":
if "_" in site_id:
id_column = "fg_temp_id"
elif site_id[0] == "s":
id_column = "fg_minor_id"
else:
id_column = "fg_major_id"
f_name = "fg_fname"
l_name = "fg_lname"
print "\n\n\t\t\tadding", fname, lname, id_column, site_id, '\n\n'
for col, val in {f_name:fname, l_name:lname, id_column:site_id}.items():
set_str = 'SET %s = "%s"' % (col,val)
set_str2 = "# AND (%s IS NULL OR %s IS NULL)" % (col, col)
update_qry = """UPDATE professional_prospects
%s
WHERE prospect_id = %s
%s;"""
update_query = update_qry % (set_str, prospect_id, set_str2)
print update_query
db.query(update_query)
db.conn.commit()
return prospect_id
else:
entry = {"birth_year":int(byear), "birth_month":int(bmonth), "birth_day":int(bday)}
if p_type == "fg":
if "_" in site_id:
entry["fg_temp_id"] = site_id
elif site_id[0] == "s":
entry["fg_minor_id"] = site_id
else:
entry["fg_major_id"] = site_id
entry["fg_fname"] = fname
entry["fg_lname"] = lname
else:
entry["mlb_fname"] = fname
entry["mlb_lname"] = lname
if p_type == "professional":
entry["mlb_id"] = site_id
elif p_type == "draft":
entry["mlb_draft_id"] = site_id
elif p_type in ("int", "international"):
entry["mlb_international_id"] = site_id
db.insertRowDict(entry, "professional_prospects", debug=1)
db.conn.commit()
print '\n\n\n\n', check_other_query, '\n\n\n\n\n', check_query, '\n\n\n\n'
recheck_val = db.query(check_query)
prospect_id = recheck_val[0][0]
return prospect_id
|
da53dd73cddf1fd08d3bf2ca237c460f83d9a66b
| 20,393 |
def reformat_icd_code(icd_code: str, is_diag: bool = True) -> str:
"""Put a period in the right place because the MIMIC-III data files exclude them.
Generally, procedure ICD codes have dots after the first two digits, while diagnosis
ICD codes have dots after the first three digits.
Adopted from: https://github.com/jamesmullenbach/caml-mimic
"""
icd_code = "".join(icd_code.split("."))
if is_diag:
if icd_code.startswith("E"):
if len(icd_code) > 4:
icd_code = icd_code[:4] + "." + icd_code[4:]
else:
if len(icd_code) > 3:
icd_code = icd_code[:3] + "." + icd_code[3:]
else:
icd_code = icd_code[:2] + "." + icd_code[2:]
return icd_code
|
4992886b257dab5361f84dd0c7774f677466437f
| 20,394 |
def parse_parent(self):
"""Parse enclosing arglist of self"""
gtor_left = tokens_leftwards(self.begin)
gtor_right = tokens_rightwards(self.end)
enc = Arglist()
enc.append_subarglist_right(self) # _left could have worked equally well
try:
parse_left(enc, gtor_left)
parse_right(enc, gtor_right)
except StopIteration:
return None
return enc.complete()
|
aeae5b7d56614299d0d95d52a1cd7e8cecd036ff
| 20,395 |
def dump_annotation(ribo_handle):
"""
Returns annotation of a ribo file in bed format
in string form.
Parameters
----------
ribo_handle : h5py.File
hdf5 handle for the ribo file
Returns
-------
A string that can be output directly as a bed file.
"""
boundaries = get_region_boundaries(ribo_handle)
names = get_reference_names(ribo_handle)
bed_rows = list()
for ref_name, ref_boundaries in zip(names, boundaries):
for region_name, region_boundaries in zip(REGION_names, ref_boundaries ):
if region_boundaries[1] <= region_boundaries[0]:
continue
bed_entries = tuple( map( str,
[ref_name,
region_boundaries[0], region_boundaries[1],
region_name, 0, "+"] ) )
bed_rows.append( "\t".join(bed_entries) )
return "\n".join(bed_rows)
|
54c98527d02ad5a136c0bc29bc794c3819fe5426
| 20,396 |
def encode_snippets_with_states(snippets, states):
""" Encodes snippets by using previous query states instead.
Inputs:
snippets (list of Snippet): Input snippets.
states (list of dy.Expression): Previous hidden states to use.
TODO: should this by dy.Expression or vector values?
"""
for snippet in snippets:
snippet.set_embedding(dy.concatenate([states[snippet.startpos],
states[snippet.endpos]]))
return snippets
|
1352ddb5ed745bae00d34cd23a43595f2c7ecc7b
| 20,397 |
import subprocess
def get_openshift_console_url(namespace: str) -> str:
"""Get the openshift console url for a namespace"""
cmd = (
"oc get route -n openshift-console console -o jsonpath='{.spec.host}'",
)
ret = subprocess.run(cmd, shell=True, check=True, capture_output=True)
if ret.returncode != 0:
raise UMBNotificationError(
"Could not detect the location of openshift console url: {ret.stdout.decode()}"
)
return f"https://{ret.stdout.decode()}/k8s/ns/{namespace}/tekton.dev~v1beta1~PipelineRun/"
|
71c85926c5ca368eac615f7433c7dccca5780a19
| 20,398 |
def calculate_ani(blast_results, fragment_length):
"""
Takes the input of the blast results, and calculates the ANI versus the reference genome
"""
sum_identity = float(0)
number_hits = 0 # Number of hits that passed the criteria
total_aligned_bases = 0 # Total of DNA bases that passed the criteria
total_unaligned_fragments = 0
total_unaligned_bases = 0
conserved_dna_bases = 0
for query in blast_results:
identity = blast_results[query][2]
queryEnd = blast_results[query][7]
queryStart = blast_results[query][6]
perc_aln_length = (float(queryEnd) - float(queryStart)) / fragment_length[query]
if float(identity) > float(69.9999) and float(perc_aln_length) > float(0.69999):
sum_identity += float(identity)
number_hits += 1
total_aligned_bases += fragment_length[query]
else:
total_unaligned_fragments += 1
total_unaligned_bases += fragment_length[query]
if float(identity) > float(89.999):
conserved_dna_bases += fragment_length[query]
return sum_identity, number_hits, total_aligned_bases, total_unaligned_fragments, total_unaligned_bases
|
09b649dda337d2b812f5c5fd9ec75b34737e3f15
| 20,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.