content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def koven_temp_atten(soiltemp, airtemp):
"""Define thermal attenuation ratios as in Koven et al 2013."""
# read in list of observed lats and lons from Koven paper
ex_points = permafrost_koven_sites.site_points
# make amplitudes
airtemp_ampl = make_monthly_amp(airtemp)
soiltemp_ampl = make_monthly_amp(soiltemp)
# interpolate the log to the correct depth
soiltemp_log = iris.analysis.maths.log(soiltemp_ampl)
linear = iris.analysis.Linear()
soiltemp_log_surf = soiltemp_log.interpolate([('depth', 0.0)], linear)
soiltemp_ampl_surf = iris.analysis.maths.exp(soiltemp_log_surf)
soiltemp_log_1m = soiltemp_log.interpolate([('depth', 1.0)], linear)
soiltemp_ampl_1m = iris.analysis.maths.exp(soiltemp_log_1m)
# extract points for eachsite
airtemp_ampl_1d = extract_sites(ex_points, airtemp_ampl)
if len(airtemp_ampl_1d.shape) > 1:
airtemp_ampl_1d = airtemp_ampl_1d[:, 0]
soiltemp_ampl_surf_1d = extract_sites(ex_points, soiltemp_ampl_surf)
soiltemp_ampl_1m_1d = extract_sites(ex_points, soiltemp_ampl_1m)
# assign metrics
metrics = {}
metrics['attenuation 1m over surface'] = np.median(
soiltemp_ampl_1m_1d / soiltemp_ampl_surf_1d)
metrics['attenuation surface over air'] = np.median(
soiltemp_ampl_surf_1d / airtemp_ampl_1d)
return metrics | a9d0ab18cef1a311ee72fc653089acf96f157cf6 | 9,800 |
def clipup(step_size: float,
momentum: float = 0.9,
max_speed: float = 0.15,
fix_gradient_size: bool = True):
"""Construct optimizer triple for ClipUp."""
step_size = optimizers.make_schedule(step_size)
def init(x0):
v0 = jnp.zeros_like(x0)
return x0, v0
def update(i, g, state):
x, v = state
g = jax.lax.cond(fix_gradient_size,
lambda p: p / jnp.sqrt(jnp.sum(p * p)),
lambda p: p,
g)
step = g * step_size(i)
v = momentum * v + step
# Clip.
length = jnp.sqrt(jnp.sum(v * v))
v = jax.lax.cond(length > max_speed,
lambda p: p * max_speed / length,
lambda p: p,
v)
return x - v, v
def get_params(state):
x, _ = state
return x
return init, update, get_params | 7ab67ba4bfb164c2816b6dc478d0a99b85324b66 | 9,801 |
def ramsey_echo_sequence(length, target):
"""
Generate a gate sequence to measure dephasing time in a two-qubit chip including a flip in the middle.
This echo reduce effects detrimental to the dephasing measurement.
Parameters
----------
length : int
Number of Identity gates. Should be even.
target : str
Which qubit is measured. Options: "left" or "right"
Returns
-------
list
Dephasing sequence.
"""
wait = ["Id:Id"]
hlength = length // 2
if target == "left":
rotate_90_p = ["X90p:Id"]
rotate_90_m = ["X90m:Id"]
elif target == "right":
rotate_90_p = ["Id:X90p"]
rotate_90_m = ["Id:X90m"]
S = []
S.extend(rotate_90_p)
S.extend(wait * hlength)
S.extend(rotate_90_p)
S.extend(rotate_90_p)
S.extend(wait * hlength)
S.extend(rotate_90_m)
return S | 0fbe66b915e94b568b5c051c5982ee4e8aeaf945 | 9,802 |
import sympy
def gauss_elimination(matrix) -> np.array:
"""
This function compute Gauss elimination process
:param matrix: generic matrix
:return: matrix after the Gauss elimination
"""
return np.array(sympy.Matrix(matrix).rref()[0]) | e25cb59808ac189bd858d3fc564aec034d7fc841 | 9,803 |
def prune(value, is_removable_function=is_removable):
"""
Deletes ``None`` and empty lists and dicts, recursively.
"""
if isinstance(value, list):
for i, v in enumerate(value):
if is_removable_function(value, i, v):
del value[i]
else:
prune(v, is_removable_function)
elif isinstance(value, dict):
for k, v in value.items():
if is_removable_function(value, k, v):
del value[k]
else:
prune(v, is_removable_function)
return value | 1e254cf2df988f4c7e782b7dace82222e7f09910 | 9,804 |
def get_country_gateway_url(country):
"""TODO: Keep config in environment or file"""
return {
'countrya': environ.get('first_gateway_url'),
'countryb': environ.get('first_gateway_url'),
'countryc': environ.get('second_gateway_url'),
}.get(country.lower()) | a3de88c250f5f36935301e024578f71ca9f1f012 | 9,805 |
import math
def n_permutations(n, r=None):
"""Number of permutations (unique by position)
:param n: population length
:param r: sample length
:return: int
"""
if r is None:
r = n
if n < 0 or r < 0:
raise ValueError("n and r must be positive")
if n == 0 or r > n:
return 0
return math.factorial(n) // math.factorial(n - r) | 441081c534c07bb98b6a32cce4c87d64b030a5a7 | 9,806 |
def parse_s3_event(event):
"""Decode the S3 `event` message generated by message write operations.
See S3 docs: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-content-structure.html
See also the callers of this function.
Returns bucket_name, ipppssoot
"""
log.verbose("S3 Event:", event)
message = event["Records"][0]["s3"]["object"]["key"]
bucket_name = event["Records"][0]["s3"]["bucket"]["name"]
ipst = message.split("-")[-1]
log.info(f"received {message} : bucket = {bucket_name}, ipppssoot = {ipst}")
return "s3://" + bucket_name, ipst | bf9530d49191f29d1132188507da55c7568e26a6 | 9,807 |
def clip_2d_liang_barsky(xmin, ymin, xmax, ymax, x0, y0, x1, y1):
"""Clips the two-dimensional line segment by the algorithm of Liang and
Barsky. Adapted from James D. Foley, ed., __Computer Graphics: Principles
and Practice__ (Reading, Mass. [u.a.]: Addison-wesley, 1998), 122.
Parameters
----------
xmin, ymin, xmax, ymax, x0, y0, x1, y1 : float
Returns
-------
is_visible : bool
x0, y0, x1, y1 : float
"""
dx = x1 - x0
dy = y1 - y0
if dx == 0 and dy == 0 and clip_point(xmin, ymin, xmax, ymax, x0, y0):
return False, x0, y0, x1, y1
tE_tL = np.array((0.0, 1.2))
if clip_t(dx, xmin - x0, tE_tL):
if clip_t(-dx, x0 - xmax, tE_tL):
if clip_t(dy, ymin - y0, tE_tL):
if clip_t(-dy, y0 - ymax, tE_tL):
# compute PL intersection, if tL has moved
tE, tL = tE_tL
if tL < 1:
x1 = x0 + tL * dx
y1 = y0 + tL * dy
# compute PE intersection, if tE has moved
if tE > 0:
x0 += tE * dx
y0 += tE * dy
return True, x0, y0, x1, y1
return False, x0, y0, x1, y1 | d38dc2fcda72fe186d95befd1e0e2ef2c688297e | 9,808 |
import re
def _(pattern, key_path: str, case_ignored=False) -> bool:
"""Called when the concerned Key is defined as a re.Pattern, and case_ignored flag is neglected."""
return re.fullmatch(pattern, key_path) is not None | c5759a7940dcb9babc791322cac1397a640dc94d | 9,809 |
def sophos_firewall_web_filter_update_command(client: Client, params: dict) -> CommandResults:
"""Update an existing object
Args:
client (Client): Sophos XG Firewall Client
params (dict): params to update the object with
Returns:
CommandResults: Command results object
"""
return generic_save_and_get(client, WEB_FILTER['endpoint_tag'], params, web_filter_builder,
WEB_FILTER['table_headers'], True) | f8306dcce60de64dea3114137f0a27813f455a56 | 9,810 |
def transition_with_random_block(block_randomizer):
"""
Build a block transition with randomized data.
Provide optional sub-transitions to advance some
number of epochs or slots before applying the random block.
"""
return {
"block_producer": block_randomizer,
} | acf0d285a7633b40ffb46853831412dafa6617e5 | 9,811 |
def vmobj_to_list(o):
"""Converts TVM objects returned by VM execution to Python List.
Parameters
----------
o : Obj
VM Object as output from VM runtime executor.
Returns
-------
result : list
Numpy objects as list with equivalent values to the input object.
"""
if isinstance(o, tvm.nd.NDArray):
result = [o.numpy()]
elif isinstance(o, tvm.runtime.container.ADT):
result = []
for f in o:
result.extend(vmobj_to_list(f))
elif isinstance(o, tvm.relay.backend.interpreter.ConstructorValue):
if o.constructor.name_hint == "Cons":
tl = vmobj_to_list(o.fields[1])
hd = vmobj_to_list(o.fields[0])
hd.extend(tl)
result = hd
elif o.constructor.name_hint == "Nil":
result = []
elif "tensor_nil" in o.constructor.name_hint:
result = [0]
elif "tensor" in o.constructor.name_hint:
result = [o.fields[0].numpy()]
else:
raise RuntimeError("Unknown object type: %s" % o.constructor.name_hint)
else:
raise RuntimeError("Unknown object type: %s" % type(o))
return result | 8ecc9e22b47072adea98f1fc23f00a696619d0a4 | 9,812 |
import logging
def fetch_run(workspace: Workspace, run_recovery_id: str) -> Run:
"""
Finds an existing run in an experiment, based on a recovery ID that contains the experiment ID
and the actual RunId. The run can be specified either in the experiment_name:run_id format,
or just the run_id.
:param workspace: the configured AzureML workspace to search for the experiment.
:param run_recovery_id: The Run to find. Either in the full recovery ID format, experiment_name:run_id
or just the run_id
:return: The AzureML run.
"""
experiment, run = split_recovery_id(run_recovery_id)
try:
experiment_to_recover = Experiment(workspace, experiment)
except Exception as ex:
raise Exception(f"Unable to retrieve run {run} in experiment {experiment}: {str(ex)}")
run_to_recover = fetch_run_for_experiment(experiment_to_recover, run)
logging.info("Fetched run #{} {} from experiment {}.".format(run, run_to_recover.number, experiment))
return run_to_recover | 91255f02888ef1186d18e21e42c4343713bb3b29 | 9,813 |
def get_wl_band(radar_frequency):
"""Returns integer corresponding to radar frequency.
Args:
radar_frequency (float): Radar frequency (GHz).
Returns:
int: 0=35GHz radar, 1=94Ghz radar.
"""
return 0 if (30 < radar_frequency < 40) else 1 | cf2eaa12f111f7ad6751fb31f58e0bc01666494a | 9,814 |
def get_topology_node(name: str, topology: ServiceTopology) -> TopologyNode:
"""
Fetch a topology node by name
:param name: node name
:param topology: service topology with all nodes
:return: TopologyNode
"""
node = topology.__dict__.get(name)
if not node:
raise ValueError(f"{name} node not found in {topology}")
return node | 996e6cc1e69a44eb1ce8e4e041d7af84d592e894 | 9,815 |
import hashlib
def md5hash(string):
"""
Return the MD5 hex digest of the given string.
"""
return hashlib.md5(string).hexdigest() | cfc0d44c3c84fb08d277d7b397a5aca453025d96 | 9,816 |
def userlist(request):
"""Shows a user list."""
return common_userlist(request, locale=request.view_lang) | bdfd4477d352d62076d644e045d746a5912993e6 | 9,817 |
def return_heartrates(patient_id):
"""
access database to get heart rate history for a patient
:param patient_id: integer ID of patient to get heart rates of
:return: json with the heart rate list for patient, or error message
"""
patient_id = int(patient_id)
if heart_server_helpers.validate_patient(pat_id) is False:
return jsonify({"Error": "invalid patient ID"})
if heart_server_helpers.existing_beats(pat_id) is False:
return jsonify({"Error": "no heartbeats recorded for patient"})
for user in Patient.objects.raw({"_id": patient_id}):
patient = user
heartrate_list = patient.heart_rate
print(heartrate_list)
heart_list = {"heat_rates": heartrate_list}
return jsonify(heart_list) | 864c3912bcd5be43c43c6ccfc17c0e6f675c7d6d | 9,818 |
def describe_vpn_connections_header():
"""generate output header"""
return misc.format_line((
"Account",
"Region",
"VpcId",
"VpcCidr",
"VpnName",
"VpnId",
"State",
"CutomerGwId",
"CutomerGwAddress",
"Type"
)) | 5539f2beb017f9c2aaaa4b4a70952f1c17fa5761 | 9,819 |
def to_numpy(a):
"""Convert an object to NumPy.
Args:
a (object): Object to convert.
Returns:
`np.ndarray`: `a` as NumPy.
"""
return convert(a, NPOrNum) | 4af733bd61256505d7db49d74d0dc1f96e10701d | 9,820 |
def transpose(self, perm=None, copy=True):
"""Return a tensor with permuted axes.
Parameters
----------
perm : Union[Sequence[int], dragon.Tensor]], optional
The output permutation.
copy : bool, optional, default=True
Return a new tensor or transpose in-place.
Returns
-------
dragon.Tensor
The output tensor.
See Also
--------
`dragon.transpose(...)`_
"""
return array_ops.transpose(self, perm=perm, copy=copy) | be340cf2797555870020d55c163d2765346fd6fa | 9,821 |
def brainrender_vis(regions, colors=None, atlas_name="allen_mouse_25um"):
"""Visualise regions in atlas using brainrender"""
if colors is None:
cm = ColorManager(num_colors=len(regions), method="rgb")
colors = cm.colors
def get_n_random_points_in_region(region, N):
"""
Gets N random points inside (or on the surface) of a mes
"""
region_bounds = region.mesh.bounds()
X = np.random.randint(region_bounds[0], region_bounds[1], size=10000)
Y = np.random.randint(region_bounds[2], region_bounds[3], size=10000)
Z = np.random.randint(region_bounds[4], region_bounds[5], size=10000)
pts = [[x, y, z] for x, y, z in zip(X, Y, Z)]
ipts = region.mesh.insidePoints(pts).points()
if N < ipts.shape[0]:
return ipts[np.random.choice(ipts.shape[0], N, replace=False), :]
else:
return ipts
scene = brainrender.Scene(root=True, title="Labelled cells", atlas_name=atlas_name)
# Get a numpy array with (fake) coordinates of some labelled cells
brain_region_actors = []
for region, color in zip(regions, colors):
brain_region = scene.add_brain_region(region, alpha=0.15, color=color)
coordinates = get_n_random_points_in_region(brain_region.mesh, 2000)
color = [color] * coordinates.shape[0]
# Add to scene
scene.add(
brainrender.actors.Points(coordinates, name=f"{region} CELLS", colors=color)
)
brain_region_actors.append(brain_region)
hemisphere_points = [
get_points_in_hemisphere(scene.atlas, brain_region_actor)
for brain_region_actor in brain_region_actors
]
p1 = hemisphere_points[0].mean(axis=0)
p2 = hemisphere_points[1].mean(axis=0)
mesh = vedo.shapes.Cylinder(pos=[p1, p2], c="blue", r=100, alpha=0.5)
cylinder = brainrender.actor.Actor(mesh, name="Cylinder", br_class="Cylinder")
scene.add(cylinder)
# render
scene.content
scene.render() | a85fda126f482a5ee1be9b7d129f7b091bc26c79 | 9,822 |
def randsel(path, minlen=0, maxlen=None, unit="second"):
"""Randomly select a portion of audio from path.
Parameters
----------
path: str
File path to audio.
minlen: float, optional
Inclusive minimum length of selection in seconds or samples.
maxlen: float, optional
Exclusive maximum length of selection in seconds or samples.
unit: str, optional
The unit in which `minlen` and `maxlen` are interpreted.
Options are:
- 'second' (default)
- 'sample'
Returns
-------
tstart, tend: tuple of int
integer index of selection
"""
info = audioinfo(path)
sr, sigsize = info.samplerate, info.frames
if unit == 'second':
minoffset = int(minlen*sr)
maxoffset = int(maxlen*sr) if maxlen else sigsize
else:
minoffset = minlen
maxoffset = maxlen if maxlen else sigsize
assert (minoffset < maxoffset) and (minoffset <= sigsize), \
f"""BAD: siglen={sigsize}, minlen={minoffset}, maxlen={maxoffset}"""
# Select begin sample
tstart = randrange(max(1, sigsize-minoffset))
tend = randrange(tstart+minoffset, min(tstart+maxoffset, sigsize+1))
return tstart, tend | de9e328eb025ab54f6a25bf1b028212470339c8d | 9,823 |
import splunklib
from datetime import datetime
def parse_datetime(splunk_uri, session_key, time_str):
"""
Leverage splunkd to do time parseing,
:time_str: ISO8601 format, 2011-07-06T21:54:23.000-07:00
"""
if not time_str:
return None
scheme, host, port = tuple(splunk_uri.replace("/", "").split(":"))
service = splunklib.client.Service(token=session_key, scheme=scheme,
host=host, port=port)
endpoint = splunklib.client.Endpoint(service, "search/timeparser/")
r = endpoint.get(time=time_str, output_time_format="%s")
response = splunklib.data.load(r.body.read()).response
seconds = response[time_str]
return datetime.datetime.utcfromtimestamp(float(seconds)) | bece967102ae37ec0cd1a1eb84e3bfc3eb5bef0f | 9,824 |
def _get_assessment_url(assessment):
"""Returns string URL for assessment view page."""
return urlparse.urljoin(utils.get_url_root(), utils.view_url_for(assessment)) | fd6c7a8dcb4a28a244645dcd93610d18682672b4 | 9,825 |
from typing import Set
def get_secret_setting_names(settings: dict) -> Set[str]:
"""guess the setting names that likely contain sensitive values"""
return {
key for key in settings.keys()
if AUTOFIND_SECRET_SETTINGS.match(key)
and key not in AUTOFIND_SECRET_SETTINGS_EXCLUDED
} | {
key for key, value in settings['SETTINGS_DEFAULTS'].items()
if value == PLACEHOLDER_FOR_SECRET
and key not in AUTOFIND_SECRET_SETTINGS_EXCLUDED
} | 7cd7c8d90299d0bb3b143d10901ed33a90e85645 | 9,826 |
import base64
import requests
def main(dict):
"""
Function that allows to send a get request to twitter API and retrieve the last 3 tweets of a
specific account name. The parameter of the account is passed by Watson Assistant throught a
context variable.
Args:
dict (dict): containing the parameter - in our case only one is used : "account" (e.g. @blackmirror)
Return:
list_tweets (list) : list containing text (and image) of the last three tweets.
"""
account_name = dict.get("account")[1:]
client_key = '// your twitter dev account client_key //'
client_secret = '// your twitter dev account client_secret //'
key_secret = '{}:{}'.format(client_key, client_secret).encode('ascii')
b64_encoded_key = base64.b64encode(key_secret)
b64_encoded_key = b64_encoded_key.decode('ascii')
base_url = 'https://api.twitter.com/'
auth_url = '{}oauth2/token'.format(base_url)
auth_headers = {
'Authorization': 'Basic {}'.format(b64_encoded_key),
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8'
}
auth_data = {
'grant_type': 'client_credentials'
}
auth_resp = requests.post(auth_url, headers=auth_headers, data=auth_data)
access_token = auth_resp.json()['access_token']
search_headers = {
'Authorization': 'Bearer {}'.format(access_token)
}
search_url = '{}1.1/statuses/user_timeline.json?screen_name={}&count=3'.format(base_url, account_name)
search_resp = requests.get(search_url, headers=search_headers)
tweet_data = search_resp.json()
list_tweets =[]
for i in range(len(tweet_data)):
# store the text of the tweet
text = tweet_data[i].get("text")
# if the tweet contains an image add this to the tweet text
if(tweet_data[i].get("entities").get("media")):
image = tweet_data[i].get("entities").get("media")[0].get("media_url_https")
width = tweet_data[i].get("entities").get("media")[0].get("sizes").get("small").get("w")
height = tweet_data[i].get("entities").get("media")[0].get("sizes").get("small").get("h")
url = tweet_data[i].get("entities").get("media")[0].get("url")
final = text + "<a href = '" + url + "'>" + "<img src = '" +image + "' height =" + str(height) + " width = "+ str(width) + ">" + "</a>"
list_tweets.append(final)
# if there is no image, then just save the text of the tweet
else:
list_tweets.append(text)
return {"result": list_tweets} | 5dddf4ad7c4ee45d1bf3a61f308989dffc451cc2 | 9,827 |
def build_classifier_model(tfhub_handle_preprocess, tfhub_handle_encoder):
"""Builds a simple binary classification model with BERT trunk."""
text_input = tf.keras.layers.Input(shape=(), dtype=tf.string, name='text')
preprocessing_layer = hub.KerasLayer(tfhub_handle_preprocess, name='preprocessing')
encoder_inputs = preprocessing_layer(text_input)
encoder = hub.KerasLayer(tfhub_handle_encoder, trainable=True, name='BERT_encoder')
outputs = encoder(encoder_inputs)
net = outputs['pooled_output']
net = tf.keras.layers.Dropout(0.1)(net)
net = tf.keras.layers.Dense(1, activation=None, name='classifier')(net)
return tf.keras.Model(text_input, net) | 03267ac3ab89acf3b6e7381e0149917371e1f29f | 9,828 |
def execute_sql(sql: str, param: list):
"""
执行查询sql, 返回列表
:param param: 执行参数
:param sql: 执行的sql语句
:return: 结果列表
"""
cursor = connection.cursor()
res = cursor.execute(sql, param)
return res | cf33a7f4d6b6486def88e6a8256ebbfe8a5a2b0d | 9,829 |
def query_jwt_required(fn):
"""
A decorator to protect a query resolver.
If you decorate an resolver with this, it will ensure that the requester
has a valid access token before allowing the resolver to be called. This
does not check the freshness of the access token.
"""
@wraps(fn)
def wrapper(*args, **kwargs):
print(args[0])
token = kwargs.pop(current_app.config['JWT_TOKEN_ARGUMENT_NAME'])
try:
verify_jwt_in_argument(token)
except Exception as e:
return AuthInfoField(message=str(e))
return fn(*args, **kwargs)
return wrapper | 74d665dd853deb180554db13cfb40af5815d2858 | 9,830 |
def callers_for_code(code):
"""
Return all users matching the code.
:param code:
:return:
"""
return db.session.query(Caller).filter(Caller.code==code).all() | b8b1779c880144e8455e17a6f8c5051daa0839b2 | 9,831 |
import numpy
import collections
import json
def WriteJsonFile(filename, params, database):
"""Write database out as a .dat file.
Args:
filename: Name of output file to write database to.
params: Parameter structure used to generate the database.
database: Dictionary of ndarrays of aerodynamic coefficients and
derivatives.
"""
def _PrepareCoefficientArray(array):
return numpy.reshape(numpy.rollaxis(array, -1), (array.size,)).tolist()
keys_and_values = [
('num_alphas', len(database['alphas'])),
('num_betas', len(database['betas'])),
('num_deltas', [len(database['delta1s']), len(database['delta2s']),
len(database['delta3s']), len(database['delta4s']),
len(database['delta5s']), len(database['delta6s']),
len(database['delta7s']), len(database['delta8s'])]),
('reynolds_number', database['reynolds_number']),
('alphas', database['alphas']),
('betas', database['betas']),
('delta1s', database['delta1s']),
('delta2s', database['delta2s']),
('delta3s', database['delta3s']),
('delta4s', database['delta4s']),
('delta5s', database['delta5s']),
('delta6s', database['delta6s']),
('delta7s', database['delta7s']),
('delta8s', database['delta8s']),
('cfm', _PrepareCoefficientArray(database['cfm'])),
('dcfm_dp', _PrepareCoefficientArray(database['dcfm_dp'])),
('dcfm_dq', _PrepareCoefficientArray(database['dcfm_dq'])),
('dcfm_dr', _PrepareCoefficientArray(database['dcfm_dr'])),
('dcfm1', _PrepareCoefficientArray(database['dcfm1'])),
('dcfm1_dp', _PrepareCoefficientArray(database['dcfm1_dp'])),
('dcfm1_dq', _PrepareCoefficientArray(database['dcfm1_dq'])),
('dcfm1_dr', _PrepareCoefficientArray(database['dcfm1_dr'])),
('dcfm2', _PrepareCoefficientArray(database['dcfm2'])),
('dcfm2_dp', _PrepareCoefficientArray(database['dcfm2_dp'])),
('dcfm2_dq', _PrepareCoefficientArray(database['dcfm2_dq'])),
('dcfm2_dr', _PrepareCoefficientArray(database['dcfm2_dr'])),
('dcfm3', _PrepareCoefficientArray(database['dcfm3'])),
('dcfm3_dp', _PrepareCoefficientArray(database['dcfm3_dp'])),
('dcfm3_dq', _PrepareCoefficientArray(database['dcfm3_dq'])),
('dcfm3_dr', _PrepareCoefficientArray(database['dcfm3_dr'])),
('dcfm4', _PrepareCoefficientArray(database['dcfm4'])),
('dcfm4_dp', _PrepareCoefficientArray(database['dcfm4_dp'])),
('dcfm4_dq', _PrepareCoefficientArray(database['dcfm4_dq'])),
('dcfm4_dr', _PrepareCoefficientArray(database['dcfm4_dr'])),
('dcfm5', _PrepareCoefficientArray(database['dcfm5'])),
('dcfm5_dp', _PrepareCoefficientArray(database['dcfm5_dp'])),
('dcfm5_dq', _PrepareCoefficientArray(database['dcfm5_dq'])),
('dcfm5_dr', _PrepareCoefficientArray(database['dcfm5_dr'])),
('dcfm6', _PrepareCoefficientArray(database['dcfm6'])),
('dcfm6_dp', _PrepareCoefficientArray(database['dcfm6_dp'])),
('dcfm6_dq', _PrepareCoefficientArray(database['dcfm6_dq'])),
('dcfm6_dr', _PrepareCoefficientArray(database['dcfm6_dr'])),
('dcfm7', _PrepareCoefficientArray(database['dcfm7'])),
('dcfm7_dp', _PrepareCoefficientArray(database['dcfm7_dp'])),
('dcfm7_dq', _PrepareCoefficientArray(database['dcfm7_dq'])),
('dcfm7_dr', _PrepareCoefficientArray(database['dcfm7_dr'])),
('dcfm8', _PrepareCoefficientArray(database['dcfm8'])),
('dcfm8_dp', _PrepareCoefficientArray(database['dcfm8_dp'])),
('dcfm8_dq', _PrepareCoefficientArray(database['dcfm8_dq'])),
('dcfm8_dr', _PrepareCoefficientArray(database['dcfm8_dr']))
]
output_dict = collections.OrderedDict(
keys_and_values + [('params', dict_util.OrderDict(params))])
class _ParamsEncoder(json.JSONEncoder):
"""JSON encoder which handles the Airfoil objects and numpy arrays."""
def default(self, o):
if isinstance(o, airfoil.Airfoil):
return str(o)
elif isinstance(o, numpy.ndarray):
return o.tolist()
return json.JSONEncoder.default(self, o)
with open(filename, 'w') as f:
output_string = json.dumps(output_dict, separators=(', ', ':\n '),
cls=_ParamsEncoder)
output_string = (output_string
.replace(', \"', ',\n\"')
.replace('], [', '],\n [')
.replace(' [[', '[[')
.replace('{', '{\n')
.replace('}', '\n}')) + '\n'
f.write(output_string) | 38fe51e463bb099ecb7f1a5beb3c427f471b7611 | 9,832 |
def headers():
""" default HTTP headers for all API calls """
return {"Content-type": "application/json"} | e601410c7ba22f28a88e47742349087744495c6b | 9,833 |
import logging
def preprocess_confidence(train_df, test_df=None):
"""
Feature creation that should be done given training data and then merged \
with test data.
"""
ATTRIBUTION_CATEGORIES = [
# V1 Features #
###############
['ip'], ['app'], ['device'], ['os'], ['channel'],
# V2 Features #
###############
['app', 'channel'],
['app', 'os'],
['app', 'device'],
# V3 Features #
###############
['channel', 'os'],
['channel', 'device'],
['os', 'device']
]
# Find frequency of is_attributed for each unique value in column
logging.info("Calculating new features: Confidence rates...")
for cols in ATTRIBUTION_CATEGORIES:
# New feature name
new_feature = '_'.join(cols) + '_confRate'
logging.info(new_feature)
# Perform the groupby
group_object = train_df.groupby(cols)
# Group sizes
group_sizes = group_object.size()
# Print group size descriptives once
if test_df is None:
logging.info(
"Calculating confidence-weighted rate for: {}.\n Saving to: {}. \
Group Max / Mean / Median / Min: {} / {} / {} / {}".format(
cols, new_feature,
group_sizes.max(),
np.round(group_sizes.mean(), 2),
np.round(group_sizes.median(), 2),
group_sizes.min()
))
# Merge function
def merge_new_features(group_object, df):
df = df.merge(
group_object['is_attributed']. \
apply(rate_calculation). \
reset_index(). \
rename(
index=str,
columns={'is_attributed': new_feature}
)[cols + [new_feature]],
on=cols, how='left'
)
# Replace NaNs by average of column
df = df.fillna(df.mean())
return df
# Perform the merge
if test_df is None:
train_df = merge_new_features(group_object, train_df)
elif test_df is not None:
test_df = merge_new_features(group_object, test_df)
# Return the relevant data frame
if test_df is None:
return train_df
elif test_df is not None:
return test_df | ff157a02ac9253fb1fd2e2306fb207f6c0022317 | 9,834 |
def ancestral_state_pair(aln,tree,pos1,pos2,\
ancestral_seqs=None,null_value=gDefaultNullValue):
"""
"""
ancestral_seqs = ancestral_seqs or get_ancestral_seqs(aln,tree)
ancestral_names_to_seqs = \
dict(zip(ancestral_seqs.Names,ancestral_seqs.ArraySeqs))
distances = tree.getDistances()
tips = tree.getNodeNames(tipsonly=True)
# map names to nodes (there has to be a built-in way to do this
# -- what is it?)
nodes = dict([(n,tree.getNodeMatchingName(n)) for n in tips])
# add tip branch lengths as distance b/w identical tips -- this is
# necessary for my weighting step, where we want correlated changes
# occuring on a single branch to be given the most weight
distances.update(dict([((n,n),nodes[n].Length) for n in nodes]))
result = 0
names_to_seqs = dict(zip(aln.Names,aln.ArraySeqs))
for i in range(len(tips)):
org1 = tips[i]
seq1 = names_to_seqs[org1]
for j in range(i,len(tips)):
org2 = tips[j]
seq2 = names_to_seqs[org2]
ancestor = nodes[org1].lastCommonAncestor(nodes[org2]).Name
if ancestor == org1 == org2:
# we're looking for correlated change along a
# single branch
ancestral_seq = ancestral_names_to_seqs[\
nodes[org1].ancestors()[0].Name]
else:
# we're looking for correlated change along different
# branches (most cases)
ancestral_seq = ancestral_names_to_seqs[ancestor]
# get state of pos1 in org1, org2, and ancestor
org1_p1 = seq1[pos1]
org2_p1 = seq2[pos1]
ancestor_p1 = ancestral_seq[pos1]
# if pos1 has changed in both organisms since their lca,
# this is a position of interest
if org1_p1 != ancestor_p1 and org2_p1 != ancestor_p1:
# get state of pos2 in org1, org2, and ancestor
org1_p2 = seq1[pos2]
org2_p2 = seq2[pos2]
ancestor_p2 = ancestral_seq[pos2]
# if pos2 has also changed in both organisms since their lca,
# then we add a count for a correlated change
if org1_p2 != ancestor_p2 and org2_p2 != ancestor_p2:
# There are a variety of ways to score. The simplest is
# to increment by one, which seems to be what was done
# in other papers.) This works well, but in a quick test
# (alpha helices/myoglobin with several generally
# high scoring alphabets) weighting works better. A more
# detailed analysis is in order.
#result += 1
# Now I weight based on distance so
# changes in shorter time are scored higher than
# in longer time. (More ancient changes
# are more likely to be random than more recent changes,
# b/c more time has passed for the changes to occur in.)
# This gives results
# that appear to be better under some circumstances,
# and at worst, about the same as simply incrementing
# by 1.
result += (1/distances[(org1,org2)])
# Another one to try might involve discounting the score
# for a pair when one changes and the other doesn't.
return result | f5f1b58231dea83f2d546cc3ffb42ae94b769107 | 9,835 |
def get_a(i,j,k):
"""returns between tad coordinates"""
i,j,k = np.sort([i,j,k])
ax,ay=[],[]
for x_ in range(i,j+1):
for y_ in range(j+1,k+1):
ax.append(x_)
ay.append(y_)
return ax,ay | ed32978045b32d4bf6863919177a726f5ee55d76 | 9,836 |
def get_mode(input_list: list):
"""
Get's the mode of a certain list. If there are few modes, the function returns False.
This is a very slow way to accomplish this, but it gets a mode, which can only be 4 things, so it should be OK
"""
if len(input_list) == 0:
return False
distinguished_elements = {}
for element in input_list:
if element not in distinguished_elements:
distinguished_elements[element] = 0
# Count all of the elements and save them in a dictionary
for key, value in distinguished_elements.items():
distinguished_elements[key] = input_list.count(key)
# Get the mode
max_key = None
max_value = 0
for key, value in distinguished_elements.items():
if value > max_value:
max_key = key
max_value = value
# If there's a second mode, return False
for key, value in distinguished_elements.items():
if value == max_value and key != max_key:
return False
return max_key | 552620bb68e3922dff7b19f52f5da9dbee813ca3 | 9,837 |
def qqe(close, length=None, smooth=None, factor=None, mamode=None, drift=None, offset=None, **kwargs):
"""Indicator: Quantitative Qualitative Estimation (QQE)"""
# Validate arguments
length = int(length) if length and length > 0 else 14
smooth = int(smooth) if smooth and smooth > 0 else 5
factor = float(factor) if factor else 4.236
wilders_length = 2 * length - 1
mamode = mamode if isinstance(mamode, str) else "ema"
close = verify_series(close, max(length, smooth, wilders_length))
drift = get_drift(drift)
offset = get_offset(offset)
if close is None: return
# Calculate Result
rsi_ = rsi(close, length)
_mode = mamode.lower()[0] if mamode != "ema" else ""
rsi_ma = ma(mamode, rsi_, length=smooth)
# RSI MA True Range
rsi_ma_tr = rsi_ma.diff(drift).abs()
# Double Smooth the RSI MA True Range using Wilder's Length with a default
# width of 4.236.
smoothed_rsi_tr_ma = ma("ema", rsi_ma_tr, length=wilders_length)
dar = factor * ma("ema", smoothed_rsi_tr_ma, length=wilders_length)
# Create the Upper and Lower Bands around RSI MA.
upperband = rsi_ma + dar
lowerband = rsi_ma - dar
m = close.size
long = Series(0, index=close.index)
short = Series(0, index=close.index)
trend = Series(1, index=close.index)
qqe = Series(rsi_ma.iloc[0], index=close.index)
qqe_long = Series(npNaN, index=close.index)
qqe_short = Series(npNaN, index=close.index)
for i in range(1, m):
c_rsi, p_rsi = rsi_ma.iloc[i], rsi_ma.iloc[i - 1]
c_long, p_long = long.iloc[i - 1], long.iloc[i - 2]
c_short, p_short = short.iloc[i - 1], short.iloc[i - 2]
# Long Line
if p_rsi > c_long and c_rsi > c_long:
long.iloc[i] = npMaximum(c_long, lowerband.iloc[i])
else:
long.iloc[i] = lowerband.iloc[i]
# Short Line
if p_rsi < c_short and c_rsi < c_short:
short.iloc[i] = npMinimum(c_short, upperband.iloc[i])
else:
short.iloc[i] = upperband.iloc[i]
# Trend & QQE Calculation
# Long: Current RSI_MA value Crosses the Prior Short Line Value
# Short: Current RSI_MA Crosses the Prior Long Line Value
if (c_rsi > c_short and p_rsi < p_short) or (c_rsi <= c_short and p_rsi >= p_short):
trend.iloc[i] = 1
qqe.iloc[i] = qqe_long.iloc[i] = long.iloc[i]
elif (c_rsi > c_long and p_rsi < p_long) or (c_rsi <= c_long and p_rsi >= p_long):
trend.iloc[i] = -1
qqe.iloc[i] = qqe_short.iloc[i] = short.iloc[i]
else:
trend.iloc[i] = trend.iloc[i - 1]
if trend.iloc[i] == 1:
qqe.iloc[i] = qqe_long.iloc[i] = long.iloc[i]
else:
qqe.iloc[i] = qqe_short.iloc[i] = short.iloc[i]
# Offset
if offset != 0:
rsi_ma = rsi_ma.shift(offset)
qqe = qqe.shift(offset)
long = long.shift(offset)
short = short.shift(offset)
# Handle fills
if "fillna" in kwargs:
rsi_ma.fillna(kwargs["fillna"], inplace=True)
qqe.fillna(kwargs["fillna"], inplace=True)
qqe_long.fillna(kwargs["fillna"], inplace=True)
qqe_short.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
rsi_ma.fillna(method=kwargs["fill_method"], inplace=True)
qqe.fillna(method=kwargs["fill_method"], inplace=True)
qqe_long.fillna(method=kwargs["fill_method"], inplace=True)
qqe_short.fillna(method=kwargs["fill_method"], inplace=True)
# Name and Categorize it
_props = f"{_mode}_{length}_{smooth}_{factor}"
qqe.name = f"QQE{_props}"
rsi_ma.name = f"QQE{_props}_RSI{_mode.upper()}MA"
qqe_long.name = f"QQEl{_props}"
qqe_short.name = f"QQEs{_props}"
qqe.category = rsi_ma.category = "momentum"
qqe_long.category = qqe_short.category = qqe.category
# Prepare DataFrame to return
data = {
qqe.name: qqe, rsi_ma.name: rsi_ma,
# long.name: long, short.name: short
qqe_long.name: qqe_long, qqe_short.name: qqe_short
}
df = DataFrame(data)
df.name = f"QQE{_props}"
df.category = qqe.category
return df | 9f1da38313effb6a585dd4b6a162034b83989557 | 9,838 |
def reference_col(
tablename, nullable=False, pk_name="id", foreign_key_kwargs=None, column_kwargs=None
):
"""Column that adds primary key foreign key reference.
Usage: ::
category_id = reference_col('category')
category = relationship('Category', backref='categories')
"""
foreign_key_kwargs = foreign_key_kwargs or {}
column_kwargs = column_kwargs or {}
return Column(
db.ForeignKey("{0}.{1}".format(tablename, pk_name), **foreign_key_kwargs),
nullable=nullable,
**column_kwargs
) | a266d81ac1c323dcf6d19e511b147b2b190b3c7e | 9,839 |
def Ei(x, minfloat=1e-7, maxfloat=10000):
"""Ei integral function."""
minfloat = min(np.abs(x), minfloat)
maxfloat = max(np.abs(x), maxfloat)
def f(t):
return np.exp(t) / t
if x > 0:
return (quad(f, -maxfloat, -minfloat)[0] + quad(f, minfloat, x)[0])
else:
return quad(f, -maxfloat, x)[0] | 6c6630f4d9a188ccb7a2e7a77b26a8c89a976e65 | 9,840 |
def _MaybeMatchCharClassEsc (text, position, include_sce=True):
"""Attempt to match a U{character class escape
<http://www.w3.org/TR/xmlschema-2/#nt-charClassEsc>}
expression.
@param text: The complete text of the regular expression being
translated
@param position: The offset of the backslash that would begin the
potential character class escape
@param include_sce: Optional directive to include single-character
escapes in addition to character cllass escapes. Default is
C{True}.
@return: C{None} if C{position} does not begin a character class
escape; otherwise a pair C{(cps, p)} as in
L{_MatchCharPropBraced}."""
if '\\' != text[position]:
return None
position += 1
if position >= len(text):
raise RegularExpressionError(position, "Incomplete character escape")
nc = text[position]
np = position + 1
cs = None
if include_sce:
cs = unicode.SingleCharEsc.get(nc)
if cs is None:
cs = unicode.MultiCharEsc.get(nc)
if cs is not None:
return (cs, np)
if 'p' == nc:
return _MatchCharPropBraced(text, np)
if 'P' == nc:
(cs, np) = _MatchCharPropBraced(text, np)
return (cs.negate(), np)
if (not include_sce) and (nc in unicode.SingleCharEsc):
return None
raise RegularExpressionError(np, "Unrecognized escape identifier '\\%s'" % (nc,)) | 55394c636b94ea2008161e350ba9e9de3250555d | 9,841 |
import urllib
import http
def request(command, url, headers={}, data=None):
"""Mini-requests."""
class Dummy:
pass
parts = urllib.parse.urlparse(url)
c = http.client.HTTPConnection(parts.hostname, parts.port)
c.request(
command,
urllib.parse.urlunparse(parts._replace(scheme="", netloc="")),
headers=headers,
body=data,
)
r = c.getresponse()
result = Dummy()
result.status_code = r.status
result.headers = r.headers
result.content = r.read()
return result | a08c0be0549ebe133f42ca56b77c4f113627db2a | 9,842 |
def __ConvertOSBGToLocal(easting, northing, Eo, No, one_over_CSF):
"""
Convert OSBG36 Easting-Northing to local grid coordinates
:param easting: easting in OSBG36
:param northing: northing in OSBG36
:param Eo: delta easting of local grid
:param No: delta northing of local grid
:param one_over_CSF: reciprocal CSF (combinated scale factor, = 1/CSF)
:returns: todo
"""
#x-coord in local grid
x_local = (easting - Eo)*(one_over_CSF)
#y-coord in local grid
y_local = (northing - No)*(one_over_CSF)
#return a tuple of coordinates
return (x_local, y_local) | 276eb0f6be4caa9c9e02bdc23e5214ef66cd9417 | 9,843 |
import time
def sum_of_n_2(n):
"""
迭代求和,仅使用加法。
:param n: 1 到 n 的和
:return: 元组(元素的组合),第一位为值,第二位为所花时间
"""
start = time.time()
the_sum = 0
for i in range(1, n + 1):
the_sum = the_sum + i
end = time.time()
return the_sum, end - start | 45a07d5a8d02f515b8d0730a9a10bf71398092e8 | 9,844 |
def TSA_t_g( temperature, temperature_vegetation, vegetation_fraction):
"""
//Temperature of ground from Tvegetation
//Based on two sources pixel split
//Chen et al., 2005. IJRS 26(8):1755-1762.
//Estimation of daily evapotranspiration using a two-layer remote sensing model.
Ground temperature, bare soil
TSA_t_g( temperature, temperature_vegetation, vegetation_fraction)
"""
result = (temperature - (vegetation_fraction*temperature_vegetation)) / (1 - vegetation_fraction)
return result | c1189f2554e3f7dba13a2c8fd8698f906d343611 | 9,845 |
def get_dict_buildin(dict_obj, _type=(int, float, bool, str, list, tuple, set, dict)):
"""
get a dictionary from value, ignore non-buildin object
"""
non_buildin = {key for key in dict_obj if not isinstance(dict_obj[key], _type)}
return dict_obj if not non_buildin else {key: dict_obj[key] for key in dict_obj if key not in non_buildin} | 14e68e5cc5c35a5e4a440dfb4c842a987c441da6 | 9,846 |
def _generic_fix_integers(model):
"""
Fix the integers of a model to its solution, and removes the variables.
:param model:
:return:
"""
continuous_model = model.copy()
continuous_model.name = model.name + ' - continuous'
integer_variables = set()
constraints_with_integer_variables = []
if not hasattr(model, 'solution'):
model.logger.info('Model has no solution to fix the integers, calculating one')
model.optimize()
# We go through all the constraint descriptors and check if at least one of
# their variables is in the integer variable list
for this_cons in continuous_model._cons_dict.values():
has_integer_variable = False
for this_var in this_cons.constraint.variables:
if this_var.type in INTEGER_VARIABLE_TYPES:
has_integer_variable += True
this_var_descriptor = this_var.name
integer_variables.add(this_var_descriptor)
constraints_with_integer_variables.append(this_cons.name)
int_dict = {continuous_model.variables[x]: model.solution.x_dict[x]
for x in integer_variables}
for this_cons_name in constraints_with_integer_variables:
this_cons = model._cons_dict[this_cons_name]
new_expr = this_cons.expr.subs(int_dict)
kind = type(this_cons)
ub = this_cons.constraint.ub
lb = this_cons.constraint.lb
the_id = this_cons.id
# TODO make fatser, using cons.change_expr and ad-hoc subs dicts
continuous_model.remove_constraint(this_cons)
rebuild_constraint(classname=kind.__name__,
model=continuous_model,
this_id=the_id,
new_expr=new_expr,
lb=lb,
ub=ub)
for this_var in integer_variables:
# This_var is an InterfaceVariable object, we want the GenericVariable
# it belongs to
the_generic_var = continuous_model._var_dict[this_var.name]
continuous_model.remove_variable(the_generic_var)
continuous_model._push_queue()
continuous_model.solver.update()
# This will update the values =
print('Is the cobra_model still integer ? {}' \
.format(continuous_model.solver.is_integer))
return continuous_model | f6c05504d678561d8fb99fc8ae79225689ab83fe | 9,847 |
def start(timeout=5, backlog_reassign_delay=None):
"""Create, start, and return the block pipeline."""
pipeline = create_pipeline(timeout=timeout,
backlog_reassign_delay=backlog_reassign_delay)
pipeline.start()
return pipeline | cb157de13af43af5830ffc75654fdba16de4da44 | 9,848 |
def preprocess_variable_features(features, interaction_augmentation, normalization):
"""
Features preprocessing following Khalil et al. (2016) Learning to Branch in Mixed Integer Programming.
Parameters
----------
features : 2D np.ndarray
The candidate variable features to preprocess.
interaction_augmentation : bool
Whether to augment features with 2-degree interactions (useful for linear models such as SVMs).
normalization : bool
Wether to normalize features in [0, 1] (i.e., query-based normalization).
Returns
-------
variable_features : 2D np.ndarray
The preprocessed variable features.
"""
# 2-degree polynomial feature augmentation
if interaction_augmentation:
interactions = (
np.expand_dims(features, axis=-1) * \
np.expand_dims(features, axis=-2)
).reshape((features.shape[0], -1))
features = np.concatenate([features, interactions], axis=1)
# query-based normalization in [0, 1]
if normalization:
features -= features.min(axis=0, keepdims=True)
max_val = features.max(axis=0, keepdims=True)
max_val[max_val == 0] = 1
features /= max_val
return features | 2b8d32a69e4ebe645fc942e7fe57ecea1560f158 | 9,849 |
import os
def load_dicomdir_records(datasets):
""" If a Data Set is a DICOMDIR Record, replace it by the file it
(or its children) references.
"""
result = []
file_ids = set()
for dataset in datasets :
if "directory_record_type" in dataset : # Directory Record Type
children = get_child_file_records(dataset)
file_ids.update([(child.path, tuple(child.referenced_file_id.value))
for child in children])
else :
result.append(dataset)
for index, (path, file_id) in enumerate(file_ids) :
filename = find_dicomdir_file(os.path.dirname(path), file_id)
result.append(dataset_io.read(filename))
load_dicomdir_records.progress(float(1+index)/float(len(file_ids)))
load_dicomdir_records.progress(1.0)
return result | e9262f015e94e9f0af36e0f0b60e909300d647a5 | 9,850 |
def reverse_inverse_from_cholesky_band_proto(S, l):
"""
S -> L
:param S: sparse subset inverse of banded matrix L
:param l: number of subdiagonals in S
:return: Ls: reconstructed cholesky decomposition
"""
# forward pass
k = l + 1 # bandwidth
n = S.shape[1]
# construct vector e = [1, 0, ..., 0]
V = np.zeros_like(S)
e = np.zeros((k))
e[0] = 1
for i in range(n):
chol_S = np.linalg.cholesky(S[i : i + k, i : i + k])
V[i : i + k, i] = cho_solve((chol_S, True), e[: n - i])
Ls = V / np.sqrt(np.diag(V)[None, :])
return Ls | 90c421fc71a76be0adff7ce49d62bc5b364fdf94 | 9,851 |
def fused_bn_grad_5D_run_2(shape, dtype, eps, kernel_name, attrs):
""" test bnGrad_2 """
def get_expect(dgamma_red_hw, dbeta_red_hw, var, gamma, eps, data_shape):
m = data_shape[0] * data_shape[2] * data_shape[3]
neg_m_rec = -1.0 / m
eps = np.array([eps], dtype=var.dtype).reshape([1] * 5)
neg_m_rec = np.array([neg_m_rec], dtype=var.dtype).reshape([1] * 5)
s = (1.0 / np.sqrt(var + eps)).astype(var.dtype)
dgamma = s * np.sum(dgamma_red_hw, axis=0, keepdims=True)
dbeta = np.sum(dbeta_red_hw, axis=0, keepdims=True)
rs = gamma * s
dgamma_dx = neg_m_rec * rs * s * dgamma
dbeta_dx = neg_m_rec * rs * dbeta
return [dgamma, dbeta, rs, dgamma_dx, dbeta_dx]
shape_nc1c0 = (shape[0], shape[1], 1, 1, shape[4])
shape_c1c0 = (1, shape[1], 1, 1, shape[4])
bng2_shapes = [shape_nc1c0, shape_nc1c0, shape_c1c0, shape_c1c0]
bng2_dtypes = ["float32"] * len(bng2_shapes)
bng2_opattrs = [eps, shape]
# np.random.seed(0)
inputs = [np.random.rand(*s).astype(t) for (s, t) in zip(bng2_shapes, bng2_dtypes)]
out_shapes = [shape_c1c0, shape_c1c0, shape_c1c0, shape_c1c0, shape_c1c0]
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(fused_batch_norm_grad_split.fused_bn_grad_2,
bng2_shapes, bng2_dtypes, bng2_opattrs,
kernel_name=kernel_name + "_step2", attrs=attrs, tuning=t)
if t:
outputs = [np.full(s, np.nan, "float32") for s in out_shapes]
expects = get_expect(*inputs, *bng2_opattrs)
return mod, expects, {"args": (*inputs, *outputs), 'outputs': tuple(range(-len(outputs), 0)),
'tuning': False}
else:
return mod
mod = utils.op_build_test(fused_batch_norm_grad_split.fused_bn_grad_2,
bng2_shapes, bng2_dtypes, bng2_opattrs,
kernel_name=kernel_name + "_step2", attrs=attrs)
outputs = [np.full(s, np.nan, "float32") for s in out_shapes]
outputs = list(utils.mod_launch(mod, (*inputs, *outputs), outputs=tuple(range(-len(outputs), 0)),
expect=get_expect(*inputs, *bng2_opattrs)))
expects = get_expect(*inputs, *bng2_opattrs)
rtol, atol = get_rtol_atol("fused_batch_norm_grad", dtype)
results = list(map(lambda x, y: np.allclose(x, y, rtol=rtol, atol=atol), outputs, expects))
print("results", results)
return inputs, outputs, expects, all(results) | 0d792e48be537c7415c9e3ac465f39de5d1c136c | 9,852 |
import numpy
def get_best_distance(pdb_file, reference_point, resname="GRW"):
"""
Finds fragment atom closest to the user-defined reference point.
Parameters
----------
pdb_file : str
Path to PDB file.
reference_point : list[float]
Coordinates of the reference point to which the distance should be calculated.
resname : str
Residue name of the grown fragment, default = "GRW".
Returns
-------
Distance from the closest atom to the reference point and closes atom name.
"""
struct = parser.get_structure("epoch", pdb_file)
ref_vector = numpy.array(reference_point)
best_dist = None
for residue in struct.get_residues():
if residue.resname == resname:
for atom in residue.get_atoms():
atom_vector = numpy.array(atom.get_coord())
dist = numpy.linalg.norm(atom_vector - ref_vector)
if not best_dist or dist < best_dist:
best_atom = atom
best_dist = dist
return best_dist, best_atom | 5ad05055ddede103e2f24d91adbb6c950f6bf886 | 9,853 |
def default_IM_weights(IM_j: IM, IMs: np.ndarray) -> pd.Series:
"""
Returns the default IM weights based on the conditioning IM
If the conditioning IM (IM_j) is spectral acceleration (SA) the
weighting is 70% across the SAs and 30% across all other IMs
Otherwise a uniform weighting distribution is used
Parameters
----------
IM_j: IM
Conditioning IM
IMs: list of IM
IM types for which to get the default weights
Returns
-------
im_weights: pandas series
Weigths for the specified IM types
"""
# Use 70% (SA) / 30% (other) weighting if
# conditioning IM is SA
if IM_j.is_pSA():
pSA_mask = np.asarray([cur_im.im_type is IMType.pSA for cur_im in IMs])
n_pSA_IMs = np.count_nonzero(pSA_mask)
n_other_IMs = IMs.size - n_pSA_IMs
if n_other_IMs == 0:
im_weights = np.ones(n_pSA_IMs, dtype=float) / n_pSA_IMs
else:
im_weights = np.full(IMs.size, np.nan)
im_weights[pSA_mask] = (1.0 / n_pSA_IMs) * 0.7
im_weights[~pSA_mask] = (1.0 / n_other_IMs) * 0.3
# Otherwise, default to uniform weighting
else:
print(
f"WARNING: Defaulting to uniform IM weighting as the "
f"conditioning is not SA."
)
im_weights = np.ones(IMs.size, dtype=float) / IMs.size
return pd.Series(data=im_weights, index=IMs) | d13dcd0e1022923512e35b798307e27e90aba40f | 9,854 |
from typing import Type
import mimetypes
def guess_file_type(filename: FileSystemPath) -> Type[PackFile]:
"""Helper to figure out the most appropriate file type depending on a filename."""
filename = str(filename)
if filename.endswith(".json"):
return JsonFile
elif filename.endswith((".yml", ".yaml")):
return YamlFile
elif filename.endswith(".png"):
return PngFile
mime_type, _ = mimetypes.guess_type(filename, strict=False)
if mime_type and mime_type.startswith("text/"):
return TextFile
return BinaryFile | eedb9f90f8caa754334614017f48ccafea6d2a07 | 9,855 |
import math
def _fcn_mg_joint_pos(t, q_init, q_end, t_strike_end):
"""Helper function for `create_mg_joint_pos_policy()` to fit the `TimePolicy` scheme"""
return ((q_end - q_init) * min(t / t_strike_end, 1) + q_init) / 180 * math.pi | 892a494ea5ee2033d2f29efe7400bce8aab30c1c | 9,856 |
def test_circuit_run(default_compilation_configuration):
"""Test function for `run` method of `Circuit`"""
def f(x):
return x + 42
x = hnp.EncryptedScalar(hnp.UnsignedInteger(3))
inputset = range(2 ** 3)
circuit = hnp.compile_numpy_function(f, {"x": x}, inputset, default_compilation_configuration)
for x in inputset:
assert circuit.run(x) == circuit.engine.run(x) | 068473b1390e9af69ef11d9c4189012ffa2c403e | 9,857 |
def detection():
"""
Programmed by: David Williams, Aspen Henry, and Slate Hayes
Description: detection is a state where the server tries to find all the faces in a frame, if a face is registered
then it looks for fingers held up next to the face.
"""
#STEP 1: Get and Process frame
# print("Detection!")
frame = ''
if not queues["rawFrame"].empty():
frame = queues["rawFrame"].get_nowait()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
ID, conf = recognizer.predict(cv2.resize(gray[y:y+h, x:x+w], (400, 400)))
#print("User ID:",ID, "\tconf:", conf)
# print(conf)
global registered_ids
if ID in registered_ids:
cv2.rectangle(frame, (x,y), (x+w, y+h), (0, 255, 0, 2))
cv2.rectangle(frame, (x-int(1.5*w), y-int(1.5*h/2)), (x-2, y+int(1.5*(h/2))), (255, 0, 0, 2))
cv2.putText(frame, usernames[ID], (x, y+h+40), font, 2,(255,255,255),1,cv2.LINE_AA)
fingers = -1
roi = frame[y-int(1.5*h/2):y+int(1.5*h/2), x-int(1.5*w):x-2]
fingers = gesture.get_fingers(roi, True)
cv2.putText(frame, str(fingers), (x-int(1.5*w), y+int(1.5*h/2)+5), font, 2, (255,255,255), 1, cv2.LINE_AA)
tagProcessing(usernames[ID], fingers)
#print("User ID:",ID," Fingers:", fingers)
else:
cv2.rectangle(frame, (x,y), (x+w, y+h), (0, 0, 255, 2))
cv2.putText(frame, "unkown", (x, y+h+40), font, 1,(255,255,255),1,cv2.LINE_AA)
#STEP 2: Facial Recognition
#STEP 3: Gesture Recognition
#STEP 4: Build CoMPES Tag
#STEP 5: Send processed frame to webpage
return frame | b5b36a8cceeb38467ae5f63840dfc13c8af9cfe7 | 9,858 |
import re
def rex_coverage(patterns, example_freqs, dedup=False):
"""
Given a list of regular expressions and a dictionary of examples
and their frequencies, this counts the number of times each pattern
matches a an example.
If ``dedup`` is set to ``True``, the frequencies are ignored, so that only
the number of keys is returned.
"""
results = []
for p in patterns:
p = '%s%s%s' % ('' if p.startswith('^') else '^',
p,
'' if p.endswith('$') else '$')
r = re.compile(p, re.U)
if dedup:
results.append(sum(1 if re.match(r, k) else 0
for k in example_freqs))
else:
results.append(sum(n if re.match(r, k) else 0
for (k, n) in example_freqs.items()))
return results | a9ac988348d1fa037508b0a2b6c71e077ca41627 | 9,859 |
def build_diamond(validated_letter):
"""
>:param str validated_letter: A capital letter, that will be used to generate the
list of strings needed to print out the diamond.
>**Returns:** A list a strings that contains the correct spacing for printing
the diamond.
build_diamond is used to generate the list of strings needed to print the diamond structure.
It takes a single argument of a letter (in string format), and returns a list of strings.
This list of strings can then be printed with newline characters (using join) to output the
diamond structure.
"""
a_ascii = ord('A')
rows = ord(validated_letter) - a_ascii + 1
diamond = []
for row in list(range(rows)) + list(reversed(range(rows-1))):
if row == 0:
diamond.append('{: <{w1}}{current_letter}'.format('', w1=rows-1, current_letter=chr(a_ascii+row)))
else:
diamond.append('{: <{w1}}{current_letter}{: <{w2}}{current_letter}'.format('', '', w1=rows-row-1, current_letter=chr(a_ascii+row), w2=row*2-1))
return diamond | bd55281ee275d402d4f35701daacb3be0246812e | 9,860 |
from datetime import datetime
def human_timestamp(timestamp, now=datetime.datetime.utcnow):
"""Turn a :py:class:`datetime.datetime` into a human-friendly string."""
fmt = "%d %B at %H:%M"
if timestamp.year < now().year:
fmt = "%d %B %Y at %H:%M"
return timestamp.strftime(fmt) | 37cc6a3918aaa3622945f6650903f64a371e86a9 | 9,861 |
from typing import Concatenate
def inception_block(x: tf.keras.layers.Layer, nb_filters: int=64, name: str="block1"):
"""
3D inception block, as per Itzik et al. (2018)
"""
conv3d = partial(Conv3D, activation="linear", use_bias=False, padding="same")
batchn = partial(BatchNormalization, momentum=0.99, fused=True)
activn = partial(Activation, activation="relu")
conv_1x1 = conv3d(nb_filters, (1, 1, 1), name=name + "_1x1_conv3d")(x)
conv_1x1 = batchn(name=name + "_1x1_bn")(conv_1x1)
conv_1x1 = activn(name=name + "_1x1_relu")(conv_1x1)
conv_3x3 = conv3d(nb_filters // 2, (3, 3, 3), name=name + "_3x3_conv3d")(conv_1x1)
conv_3x3 = batchn(name=name + "_3x3_bn")(conv_3x3)
conv_3x3 = activn(name=name + "_3x3_relu")(conv_3x3)
conv_5x5 = conv3d(nb_filters // 2, (5, 5, 5), name=name + "_5x5_conv3d")(conv_1x1)
conv_5x5 = batchn(name=name + "_5x5_bn")(conv_5x5)
conv_5x5 = activn(name=name + "_5x5_relu")(conv_5x5)
avgpool = AvgPool3D(strides=(1, 1, 1), pool_size=(3, 3, 3), padding="same", name=name+"_avgpool")(x)
avgpool = conv3d(nb_filters, (1, 1, 1), name=name + "_avgpool_conv3d")(avgpool)
avgpool = batchn(name=name + "_avgpool_bn")(avgpool)
avgpool = activn(name=name + "_avgpool_relu")(avgpool)
return Concatenate(axis=-1, name=name+"_concat")([conv_1x1, conv_3x3, conv_5x5, avgpool]) | 8cfebd2a147fb1d8b6174df87dd220ab15a56205 | 9,862 |
import collections
def _flatten_dict(d, parent_key='', sep='/'):
"""Flattens a dictionary, keeping empty leaves."""
items = []
for k, v in d.items():
path = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(_flatten_dict(v, path, sep=sep).items())
else:
items.append((path, v))
# Keeps the empty dict if it was set explicitly.
if parent_key and not d:
items.append((parent_key, {}))
return dict(items) | 4bfb4c19209cb7997856a337c7ad66965598571a | 9,863 |
def secret():
"""
Authenticated only route
@authenticated will flash a message if not authed
"""
return render_template('secret.html') | 4c0708d3e65942d3afbeaa2159f55a735a31a6a9 | 9,864 |
def mk_graph(img_dim, num_labels, poly_width = 3, depth = 3, hidd_repr_size = 512):
""" The function that creates and returns the graph required to
img_dim = image dimensions (Note, that the image needs to be flattened out before feeding here)
num_labels = no_of classes to classify into
"""
comp_graph = tf.Graph()
with comp_graph.as_default():
# step 1: Create the input placeholders for the input to the computation
with tf.name_scope("Input"):
tf_input_images = tf.placeholder(tf.float32, shape=(None, img_dim), name="Input_Labels")
tf_input_labels = tf.placeholder(tf.float32, shape=(None, num_labels), name="Input_Labels")
print("\nInput Placeholder Tensors:", tf_input_images, tf_input_labels)
# step 2: Construct the network architecture based on the width and the depth specified
# Note that this is static graph creation
# There doesn't seem to be any reason for dynamic graph building
def neural_layer(input, out_dim, step):
""" The method that defines a single neural layer
"""
# method to calculate the factorial of a number
factorial = lambda x: 1 if(x <= 1) else x * factorial(x - 1)
with tf.variable_scope("neural_layer"+str(step)):
# create the variable tensors ->
# additive bias
bias = tf.get_variable("bias", shape=(out_dim), initializer=tf.zeros_initializer())
# additive weight transformations
inp_dim = input.get_shape()[-1]
weights = [tf.get_variable("weight"+str(i), shape=(inp_dim, out_dim),
initializer=tf.contrib.layers.xavier_initializer(seed = FLAGS.seed_value))
for i in range(1, poly_width)]
# attach the summary ops to the biases and weights
bias_summary = tf.summary.histogram("Layer"+str(step)+"/bias", bias)
weights_summary = [tf.summary.histogram("Layer"+str(step)+"/"+weight.name, weight)
for weight in weights]
# define the compuataion ops for this layer
out = bias # initialize the output tensor
for degree in range(1, poly_width):
out = out + tf.matmul(tf.pow(input, degree) / factorial(degree), weights[degree - 1])
return out # return the calculated tensor
if(depth > 1):
lay1_out = neural_layer(tf_input_images, hidd_repr_size, 1)
else:
lay1_out = neural_layer(tf_input_images, num_labels, 1)
# define the while loop for creating the hidden layer computations
lay_out = lay1_out # initialize to output of first layer
for lay_no in range(2, depth):
lay_out = neural_layer(lay_out, hidd_repr_size, lay_no)
# define the output layer
if(depth > 1):
output = neural_layer(lay_out, num_labels, depth)
else:
output = lay1_out
print("Final output:", output)
return comp_graph, {"output": output, "labels": tf_input_labels, "input": tf_input_images} | a6d63bd967762289f1998af391112a682f89234f | 9,865 |
import logging
import numpy
def bandpassHLS_1_4(img, band, satsen):
"""Bandpass function applied to Sentinel-2 data as followed in HLS 1.4 products.
Reference:
Claverie et. al, 2018 - The Harmonized Landsat and Sentinel-2 surface reflectance data set.
Args:
img (array): Array containing image pixel values.
band (str): Band that will be processed, which can be 'B02','B03','B04','B8A','B01','B11' or 'B12'.
satsen (str): Satellite sensor, which can be 'S2A' or 'S2B'.
Returns:
array: Array containing image pixel values bandpassed.
"""
logging.info('Applying bandpass band {} satsen {}'.format(band, satsen))
# Skakun et. al, 2018 - Harmonized Landsat Sentinel-2 (HLS) Product User’s Guide
if satsen == 'S2A':
if band == 'coastal': # UltraBlue/coastal #MODIS don't have this band # B01
slope = 0.9959
offset = -0.0002
elif band == 'blue': # Blue # B02
slope = 0.9778
offset = -0.004
elif band == 'green': # Green # B03
slope = 1.0053
offset = -0.0009
elif band == 'red': # Red # B04
slope = 0.9765
offset = 0.0009
elif band == 'nir': # Nir # B08 B8A
slope = 0.9983
offset = -0.0001
elif band == 'swir1': # Swir 1 # B11
slope = 0.9987
offset = -0.0011
elif band == 'swir2': # Swir 2 # B12
slope = 1.003
offset = -0.0012
img = numpy.add(numpy.multiply(img, slope), offset)
elif satsen == 'S2B':
logging.debug("S2B")
if band == 'coastal': # UltraBlue/coastal #MODIS don't have this band # B01
slope = 0.9959
offset = -0.0002
elif band == 'blue': # Blue # B02
slope = 0.9778
offset = -0.004
elif band == 'green': # Green # B03
slope = 1.0075
offset = -0.0008
elif band == 'red': # Red # B04
slope = 0.9761
offset = 0.001
elif band == 'nir': # Nir # B08 B8A
slope = 0.9966
offset = 0.000
elif band == 'swir1': # Swir 1 # B11
slope = 1.000
offset = -0.0003
elif band == 'swir2': # Swir 2 # B12
slope = 0.9867
offset = -0.0004
img = numpy.add(numpy.multiply(img, slope), offset)
return img | 501b1adf2fc9fceced8f0d434ce4c09459fdfbf4 | 9,866 |
import os
import pickle
def read_dictionary(vocab_path):
"""
从路径文件中读取字典
:param vocab_path:
:return:
"""
vocab_path = os.path.join(vocab_path)
with open(vocab_path, 'rb') as fr:
word2id = pickle.load(fr)
return word2id | 72df031eef0d7dba2b8f46d104eaeb5515a48d18 | 9,867 |
def createrawtransaction(inputs, outputs, outScriptGenerator=p2pkh):
"""
Create a transaction with the exact input and output syntax as the bitcoin-cli "createrawtransaction" command.
If you use the default outScriptGenerator, this function will return a hex string that exactly matches the
output of bitcoin-cli createrawtransaction.
But this function is extended beyond bitcoin-cli in the following ways:
inputs can have a "sig" field which is a binary hex string of the signature script
outputs can be a list of tuples rather than a dictionary. In that format, they can pass complex objects to
the outputScriptGenerator (use a tuple or an object), be a list (that is passed to CScript()), or a callable
"""
if not type(inputs) is list:
inputs = [inputs]
tx = CTransaction()
for i in inputs:
sigScript = i.get("sig", b"")
tx.vin.append(CTxIn(COutPoint(i["txid"], i["vout"]), sigScript, 0xffffffff))
pairs = []
if type(outputs) is dict:
for addr, amount in outputs.items():
pairs.append((addr,amount))
else:
pairs = outputs
for addr, amount in pairs:
if callable(addr):
tx.vout.append(CTxOut(amount * BTC, addr()))
elif type(addr) is list:
tx.vout.append(CTxOut(amount * BTC, CScript(addr)))
elif addr == "data":
tx.vout.append(CTxOut(0, CScript([OP_RETURN, unhexlify(amount)])))
else:
tx.vout.append(CTxOut(amount * BTC, outScriptGenerator(addr)))
tx.rehash()
return hexlify(tx.serialize()).decode("utf-8") | 05d41c89356513d989eaffdfc3a89fb4e987e0a5 | 9,868 |
from typing import List
def get_versions() -> List[str]:
"""
Gets a list of recognized CRSD urns.
Returns
-------
List[str]
"""
return list(sorted(urn_mapping.keys())) | e1730f386a4e26197800069a8f93b1a8ff850512 | 9,869 |
import math
def k2_factor_sq(df=inf,p=95):
"""Return a squared coverage factor for an elliptical uncertainty region
:arg df: the degrees-of-freedom (>=2)
:arg p: the coverage probability (%)
:type df: float
:type p: int or float
Evaluates the square of the coverage factor for an elliptical uncertainty
region with coverage probability ``p`` and ``df`` degrees of freedom
based on the F-distribution.
**Example**::
>>> reporting.k2_factor_sq(3)
56.99999999999994
"""
p = p / 100.0
if df > inf_dof:
return -2.0 * math.log(1-p)
elif(df>1):
# norm = l * (n-1) / (n - l) in the general
# 'l'-dimensional case for 'n' observations
# here l = 2, df = n-1
norm = 2*df / (df-1)
# `fdtri` is the inverse of the cumulative F distribution
# returning `x` such that `fdtr(dfn, dfd, x) = p`
return norm*special.fdtri(2.0,df-1.0,p)
else:
raise RuntimeError("invalid df={!r}".format( df ) ) | b6c4fbb0eb1bbb5a9c15ab44c791d937bc7bebd2 | 9,870 |
def get_cluster_version_path(cluster_id):
"""
Gives s3 full path of cluster_version file of a given cluster_id
"""
base_path = s3.get_cluster_info_base_path()
return "%s/%s/cluster_version.json"%(base_path, cluster_id) | ef514cde40aa582aa265ac2fd912077e3e1e17c9 | 9,871 |
import importlib
def import_module_attribute(function_path):
"""Import and return a module attribute given a full path."""
module, attribute = function_path.rsplit(".", 1)
app_module = importlib.import_module(module)
return getattr(app_module, attribute) | ce2647bb193c2a6c07949073f7c0d142ee8cd1b5 | 9,872 |
def rgb_to_hex(red, green, blue):
"""Give three color arrays, return a list of hex RGB strings"""
pat = "#{0:02X}{1:02X}{2:02X}"
return [pat.format(r & 0xff, g & 0xff, b & 0xff)
for r, g, b in zip(red, green, blue)] | 9126ae9e05d4a005d397f13fd4f0d9400efe5a65 | 9,873 |
from typing import Tuple
def run_cli(cmd: str, print_output: bool = True, check: bool = False,) -> Tuple[int, str, str]:
"""Runs the command with `dcos` as the prefix to the shell command
and returns a tuple containing exit code, stdout, and stderr.
eg. `cmd`= "package install pkg-name" results in:
$ dcos package install pkg-name
"""
dcos_cmd = "dcos {}".format(cmd)
log.info("(CLI) {}".format(dcos_cmd))
return _run_cmd(dcos_cmd, print_output, check) | 3c6697061dab28fd3f741931d7f2430360f037ae | 9,874 |
def get_initialization(arg, indent_count):
"""Get the initialization string to use for this argument."""
t = get_base_c_type(arg)
if arg.is_array():
if t == "char*":
init = '[] = { "String 1", "String 2", "String 0" }'
else:
if arg.is_dictionary() or arg.is_structure():
indent = indent_count * " "
si0 = __make_structure_init_string(arg, 0)
si1 = __make_structure_init_string(arg, 1)
si2 = __make_structure_init_string(arg, 2)
f = "[3] =\n{0}{{ {1},\n{0} {2},\n{0} {3} }}"
init = f.format(indent, si0, si1, si2)
else:
init = "[10] = { 0 }"
else:
if arg.arg_type == "b":
init = " = FALSE"
elif arg.arg_type == "d":
init = " = 0.0"
elif arg.is_structure():
init = " = {0}".format(__make_structure_init_string(arg))
else:
init = " = 0"
return init | cc9867966b40bf9a0ce1a43c13cfd0040a3322db | 9,875 |
import re
from datetime import datetime
def get_time_source_from_output(output):
""" Parse out 'Time Source' value from output
Time source output example : 'Time source is NTP, 23:59:38.461 EST Thu Jun 27 2019'
'Time source is NTP, *12:33:45.355 EST Fri Feb 7 2020'
Args:
output ('str'): Text output from command
Returns:
Datetime object
Format : datetime(year, month, day, hour, minute, second, microseconds)
"""
r1 = re.compile(
r"Time\ssource\sis\sNTP\,\s\.*\*?(?P<hour>\d+)\:(?P<minute>\d+)\:"
r"(?P<seconds>\d+)\.(?P<milliseconds>\d+)\s(?P<time_zone>"
r"\S+)\s(?P<day_of_week>\S+)\s(?P<month>\S+)\s(?P<day>\d+)"
r"\s(?P<year>\d+)")
for line in output.splitlines():
line = line.strip()
result = r1.match(line)
if result:
group = result.groupdict()
hour = int(group["hour"])
minute = int(group["minute"])
second = int(group["seconds"])
milliseconds = int(group["milliseconds"])
month = strptime(group["month"], "%b").tm_mon
day = int(group["day"])
year = int(group["year"])
return datetime(year, month, day, hour, minute, second,
milliseconds * 1000)
log.warning('Time source could not be found in output') | aae9c208e06d99ff6eb4ae9699946d0b17e53ca7 | 9,876 |
def getStyleFX():
"""
Defines and returns the style effects
Returns: style effects (list of MNPR_FX)
"""
# general effects
distortionFX = MNPR_FX("distortion", "Substrate distortion", "controlSetB", [[1, 0, 0, 0]], ["distort", "revert"], ["noise"])
gapsOverlapsFX = MNPR_FX("gaps-overlaps", "Gaps and overlaps", "controlSetC", [[0, 0, 1, 0]], ["overlaps", "gaps"], ["noise"])
# watercolor effects
densityFX_WC = MNPR_FX("density", "Pigment turbulence", "controlSetA", [[0, 0, 1, 0]], ["accumulate", "dilute"], ["noise"])
applicationFX_WC = MNPR_FX("application", "Granulate | Dry-brush", "controlSetA", [[0, 1, 0, 0]], ["granulate", "dry-brush"], ["noise"])
blendingFX_WC = MNPR_FX("blending", "Color bleeding (wet-in-wet)", "controlSetC", [[0, 0, 0, 1]], ["bleed", "revert"], ["noise"])
edgeFX_WC = MNPR_FX("edge manip", "Edge darkening", "controlSetC", [[1, 0, 0, 0], [0, 1, 0, 0]], ["darken", "lighten", "wider", "narrower"], ["n. dark", "n. wide"])
watercolorFX = [densityFX_WC, applicationFX_WC, distortionFX, edgeFX_WC, gapsOverlapsFX, blendingFX_WC]
# oil effects
densityFX_OP = MNPR_FX("density", "Pigment turbulence", "controlSetA", [[0, 0, 1, 0]], ["accumulate", "dilute"], ["noise"])
blendingFX_OP = MNPR_FX("blending", "Paint stroke length", "controlSetC", [[0, 0, 0, 1]], ["increase", "decrease"], ["noise"])
detailFX_OP = MNPR_FX("detail", "Paint stroke width", "controlSetA", [[0, 0, 0, 1]], ["increase", "decrease"], ["noise"])
applicationFX_OP = MNPR_FX("application", "Impasto | Dry-brush", "controlSetA", [[0, 1, 0, 0]], ["impasto", "dry-brush"], ["noise"])
oilFX = [densityFX_OP, blendingFX_OP, detailFX_OP, applicationFX_OP, distortionFX, gapsOverlapsFX]
# charcoal effects
densityFX_CH = MNPR_FX("density", "Pigment density", "controlSetA", [[0, 0, 1, 0]], ["accumulate", "dilute"], ["noise"])
applicationFX_CH = MNPR_FX("application", "Pigment application", "controlSetA", [[0, 1, 0, 0]], ["even", "granulation"], ["noise"])
mixingFX_CH = MNPR_FX("mixing", "Mixing", "controlSetC", [[0, 0, 0, 1]], ["mix", "separate"], ["noise"])
smudgingFX_CH = MNPR_FX("smudging", "Smudging", "controlSetA", [[0, 0, 0, 1]], ["smudge", "revert"], ["noise"])
edgeFX_CH = MNPR_FX("edge manip", "Edge manipulation", "controlSetC", [[1, 0, 0, 0]], ["soften", "revert"], ["n. soften", "n. darken"])
charcoalFX = [distortionFX, densityFX_CH, applicationFX_CH, mixingFX_CH, smudgingFX_CH, edgeFX_CH]
# query mnpr style and return
style = cmds.mnpr(style=True, q=True).encode('latin1') # some users have had problems without encode('latin1')
if style == "Watercolor":
return watercolorFX
elif style == "Oil":
return oilFX
elif style == "Charcoal":
return charcoalFX
return [] | acb9382e041b99c9c76a6baa821b220192176bdd | 9,877 |
def deaths_this_year() -> dict:
"""Get number of deaths this year."""
return get_metric_of(label='deaths_this_year') | ae6f9ee5d00ebbf7dc7fea0c3702695268a38a17 | 9,878 |
def get_cls_dropdown_tree_view_item(object_name):
"""Get and return class of TreeViewItem Dropdown object according to
snapshotability
"""
base_cls = tree_view_item.CommonDropdownTreeViewItem
if object_name in objects.ALL_SNAPSHOTABLE_OBJS:
base_cls = tree_view_item.SnapshotsDropdownTreeViewItem
return _factory(cls_name=object_name, parent_cls=base_cls) | f9ee30bb75b2cd84e381293be1b0e2f6dc7a5d7b | 9,879 |
def make_webhdfs_url(host, user, hdfs_path, op, port=50070):
""" Forms the URL for httpfs requests.
INPUT
-----
host : str
The host to connect to for httpfs access to HDFS. (Can be 'localhost'.)
user : str
The user to use for httpfs connections.
hdfs_path : str
The full path of the file or directory being checked.
op : str
The httpfs operation string. E.g., 'GETFILESTATUS'.
port : int
The port to use for httpfs connections.
OUTPUT
------
str : The string to use for an HTTP request to httpfs.
"""
url = 'http://' + host + ':' + str(port) + '/webhdfs/v1'
url += hdfs_path + '?user.name=' + user + '&op=' + op
return url | c4899d75fd54558c6216889cbc749f5d0fe403df | 9,880 |
def get_slash_mapping(bot: commands.Bot):
"""Get all the prefix commands groupped by category."""
categories = {}
for command in bot.slash_commands:
if command:
category_name = get_cog_category(command.cog)
# categories are organized by cog folders
try:
categories[category_name].append(command)
except KeyError:
categories[category_name] = [command]
return categories | dfb7eef9aac5df011ee2801a9976723f8f4f8c7a | 9,881 |
def get_memcached_client(servers, debug=False):
"""
mc.set("name", "python")
ret = mc.get('name')
print(ret)
"""
if isinstance(servers, str):
servers = servers.split(',')
return memcache.Client(servers, debug=debug) | 53ff8966c010d5079df457c82bc79072d190d0e7 | 9,882 |
def part_to_text(part):
"""
Converts an e-mail message part into text.
Returns None if the message could not be decoded as ASCII.
:param part: E-mail message part.
:return: Message text.
"""
if part.get_content_type() != 'text/plain':
return None
charset = part.get_content_charset()
if not charset:
return None
text = str(part.get_payload(decode=True), encoding=charset, errors='ignore')
try:
text = str(text.encode('ascii'), 'ascii')
except UnicodeEncodeError:
return None
except UnicodeDecodeError:
return None
if part.get_param('format') == 'flowed':
text = unflow_text(text, part.get_param('delsp', False))
return text | f6831014a5100e6addaebc5889a5e1c40f74381b | 9,883 |
def icwt(Wx, wavelet='gmw', scales='log-piecewise', nv=None, one_int=True,
x_len=None, x_mean=0, padtype='zero', rpadded=False, l1_norm=True):
"""The inverse Continuous Wavelet Transform of `Wx`, via double or
single integral.
# Arguments:
Wx: np.ndarray
CWT computed via `ssqueezepy.cwt`.
wavelet: str / tuple[str, dict] / `wavelets.Wavelet`
Wavelet sampled in Fourier frequency domain.
- str: name of builtin wavelet. `ssqueezepy.wavs()`
- tuple[str, dict]: name of builtin wavelet and its configs.
E.g. `('morlet', {'mu': 5})`.
- `wavelets.Wavelet` instance. Can use for custom wavelet.
scales: str['log', 'linear', 'log:maximal', ...] / np.ndarray
See help(cwt).
nv: int / None
Number of voices. Suggested >= 32. Needed if `scales` isn't array
(will default to `cwt`'s).
one_int: bool (default True)
Whether to use one-integral iCWT or double.
Current one-integral implementation performs best.
- True: Eq 2.6, modified, of [3]. Explained in [4].
- False: Eq 4.67 of [1]. Explained in [5].
x_len: int / None. Length of `x` used in forward CWT, if different
from Wx.shape[1] (default if None).
x_mean: float. mean of original `x` (not picked up in CWT since it's an
infinite scale component). Default 0.
padtype: str
Pad scheme to apply on input, in case of `one_int=False`.
See `help(utils.padsignal)`.
rpadded: bool (default False)
True if Wx is padded (e.g. if used `cwt(, rpadded=True)`).
l1_norm: bool (default True)
True if Wx was obtained via `cwt(, l1_norm=True)`.
# Returns:
x: np.ndarray
The signal, as reconstructed from Wx.
# References:
1. Wavelet Tour of Signal Processing, 3rd ed. S. Mallat.
https://www.di.ens.fr/~mallat/papiers/WaveletTourChap1-2-3.pdf
2. The Synchrosqueezing algorithm for time-varying spectral analysis:
robustness properties and new paleoclimate applications. G. Thakur,
E. Brevdo, N.-S. Fučkar, and H.-T. Wu.
https://arxiv.org/abs/1105.0010
3. Synchrosqueezed Wavelet Transforms: a Tool for Empirical Mode
Decomposition. I. Daubechies, J. Lu, H.T. Wu.
https://arxiv.org/pdf/0912.2437.pdf
4. One integral inverse CWT. OverLordGoldDragon.
https://dsp.stackexchange.com/a/71274/50076
5. Inverse CWT derivation. OverLordGoldDragon.
https://dsp.stackexchange.com/a/71148/50076
6. Synchrosqueezing Toolbox, (C) 2014--present. E. Brevdo, G. Thakur.
https://github.com/ebrevdo/synchrosqueezing/blob/master/synchrosqueezing/
synsq_cwt_fw.m
"""
#### Prepare for inversion ###############################################
na, n = Wx.shape
x_len = x_len or n
if not isinstance(scales, np.ndarray) and nv is None:
nv = 32 # must match forward's; default to `cwt`'s
wavelet = _process_gmw_wavelet(wavelet, l1_norm)
wavelet = Wavelet._init_if_not_isinstance(wavelet)
# will override `nv` to match `scales`'s
scales, scaletype, _, nv = process_scales(scales, x_len, wavelet, nv=nv,
get_params=True)
assert (len(scales) == na), "%s != %s" % (len(scales), na)
#### Handle piecewise scales case ########################################
# `nv` must be left unspecified so it's inferred automatically from `scales`
# in `process_scales` for each piecewise case
if scaletype == 'log-piecewise':
kw = dict(wavelet=wavelet, one_int=one_int, x_len=x_len, x_mean=x_mean,
padtype=padtype, rpadded=rpadded, l1_norm=l1_norm)
idx = logscale_transition_idx(scales)
x = icwt(Wx[:idx], scales=scales[:idx], **kw)
x += icwt(Wx[idx:], scales=scales[idx:], **kw)
return x
##########################################################################
#### Invert ##############################################################
if one_int:
x = _icwt_1int(Wx, scales, scaletype, l1_norm)
else:
x = _icwt_2int(Wx, scales, scaletype, l1_norm,
wavelet, x_len, padtype, rpadded)
# admissibility coefficient
Cpsi = (adm_ssq(wavelet) if one_int else
adm_cwt(wavelet))
if scaletype == 'log':
# Eq 4.67 in [1]; Theorem 4.5 in [1]; below Eq 14 in [2]
# ln(2**(1/nv)) == ln(2)/nv == diff(ln(scales))[0]
x *= (2 / Cpsi) * np.log(2 ** (1 / nv))
else:
x *= (2 / Cpsi)
x += x_mean # CWT doesn't capture mean (infinite scale)
return x | dc41ac93ab5e9b71fb6afa34e9e545da4b9e81d5 | 9,884 |
def AdvApp2Var_MathBase_msc_(*args):
"""
:param ndimen:
:type ndimen: integer *
:param vecte1:
:type vecte1: doublereal *
:param vecte2:
:type vecte2: doublereal *
:rtype: doublereal
"""
return _AdvApp2Var.AdvApp2Var_MathBase_msc_(*args) | e8204854d32b2bcb174a779b4eb5e1617563dd1e | 9,885 |
def chunkify(arr, n):
"""Breaks a list into n chunks.
Last chunk may not be equal in size to other chunks
"""
return [arr[i : i + n] for i in range(0, len(arr), n)] | 10df800440e8c1d5e4070dc48dd8c7ecc12f3c83 | 9,886 |
def _run_command(command, targets, options):
# type: (str, List[str], List[str]) -> bool
"""Runs `command` + `targets` + `options` in a
subprocess and returns a boolean determined by the
process return code.
>>> result = run_command('pylint', ['foo.py', 'some_module'], ['-E'])
>>> result
True
:param command: str
:param targets: List[str]
:param options: List[str]
:return: bool
"""
print('{0}: targets={1} options={2}'.format(command, targets, options))
cmd = [command] + targets + options
process = Popen(cmd)
process.wait()
return bool(process.returncode) | def38e07dd55b5c45d591e6a677937774185962d | 9,887 |
from typing import Counter
from typing import List
from typing import Dict
def find_untranscribed_words(
gt: Counter, machine: Counter
) -> List[Dict[str, any]]:
"""
Finds untranscribed words.
That is, we find if there exist words in the GT which never occur in the machine transcription.
:param gt: Counter of GT words.
:param machine: Counter of machine words.
:return: List of word/counts which occur in GT but not (or infrequently) in the machine transcription.
"""
result: List[Dict[str, any]] = []
for word, gt_count in gt.most_common():
if word not in machine:
machine_count = 0
else:
machine_count = machine[word]
if gt_count > 0 and machine_count == 0:
r = {"word": word, "machine": machine_count, "gt": gt_count}
result.append(r)
return result | e120898ce32f98cedcfb6831cd288e1700d14189 | 9,888 |
import os
import subprocess
import sys
def get_vcvars(vs_tools, arch):
"""Get the VC tools environment using vswhere.exe or buildtools docker
This is intended to work either when VS is in its standard installation
location, or when the docker instructions have been followed, and we can
find Visual C++ in C:/BuildTools.
Visual Studio provides a docker image with instructions here:
https://docs.microsoft.com/en-us/visualstudio/install/build-tools-container?view=vs-2019
This vswhere code is following the guidelines from strategy 1 in this blog
post:
https://blogs.msdn.microsoft.com/vcblog/2017/03/06/finding-the-visual-c-compiler-tools-in-visual-studio-2017/
It doesn't work when VS is not installed at the default location.
"""
if not arch:
# First check the wow64 processor architecture, since python is probably
# 32-bit, then fall back to PROCESSOR_ARCHITECTURE.
arch = os.environ.get('PROCESSOR_ARCHITEW6432', '').lower()
if not arch:
arch = os.environ.get('PROCESSOR_ARCHITECTURE', '').lower()
else:
arch = arch.lower()
# Use vswhere.exe if it exists.
if os.path.exists(VSWHERE_PATH):
cmd = [VSWHERE_PATH, "-latest", "-property", "installationPath"]
vs_path = subprocess.check_output(cmd).decode(sys.stdout.encoding)
vs_path = vs_path.strip()
util.report("Running vswhere to find VS: " + repr(cmd))
util.report("vswhere output: " + vs_path)
if not os.path.isdir(vs_path):
raise ValueError("VS install path does not exist: " + vs_path)
vcvars_path = pjoin(vs_path, 'VC', 'Auxiliary', 'Build',
'vcvarsall.bat')
elif os.path.exists(BUILDTOOLS_VSDEVCMD):
vcvars_path = BUILDTOOLS_VSDEVCMD
elif vs_tools is None:
vs_tools = os.path.expandvars('%VS140COMNTOOLS%')
vcvars_path = pjoin(vs_tools, '..', '..', 'VC', 'vcvarsall.bat')
# Newer vcvarsall.bat scripts aren't quiet, so direct them to NUL, aka
# Windows /dev/null.
cmd = util.shquote_cmd([vcvars_path, arch]) + ' > NUL && set'
util.report("Running vcvars: " + cmd)
output = \
subprocess.check_output(cmd, shell=True).decode(sys.stdout.encoding)
new_env = {}
for line in output.splitlines():
var, val = line.split('=', 1)
new_env[var] = val
return new_env | 8a6621a9ac6a89626c20a7b91b3e9d49f33ba9d3 | 9,889 |
def set_line_length(length):
"""
set_line_length(int length)
Sets the maximum line length for log messages.
Messages longer than this amount will be broken up into multiline messages.
Parameters
----------
* length :
the maximum log message line length in characters
"""
return _openmoc.set_line_length(length) | 4269b8fb1f28f700cb4150e217e3b85f1491b91d | 9,890 |
def get_teams(pbp_json):
"""
Get teams
:param pbp_json: raw play by play json
:return: dict with home and away
"""
return {'Home': shared.get_team(pbp_json['gameData']['teams']['home']['name'].upper()),
'Away': shared.get_team(pbp_json['gameData']['teams']['away']['name'].upper())} | 925c079a0a4a04de70154e78d6ccb25bd846071d | 9,891 |
def register(args):
"""Register a new user using email and password.
Return CONFLICT is a user with the same email already exists.
"""
if db.session.query(User).filter_by(email=args['email']).first():
return conflict("User already exists.")
new_user = User(args['email'], args['password'])
db.session.add(new_user)
db.session.commit()
user_schema = UserSchema()
return created(data=user_schema.dump(new_user).data) | b7979090683b231f71c319590a120242932229fd | 9,892 |
def _scale_value_to_rpm(value, total):
"""Scale value to reads per million"""
return value * 1 / (total / 1e6) | c3a49c8df8cbb22bd055a2f8076065463041bb72 | 9,893 |
import os
def file_exists_not_empty(filename,):
"""
Tests if file exists and is not empty
:param filename: full path of file to be checked
:type filename: str
"""
if os.path.isfile(filename):
if os.stat(filename).st_size == 0:
return False
else:
return False
return True | a00a9b5afad47263899de10b26ce8c1225c0eb7c | 9,894 |
import itertools
def poly_vals_in_range(minimum, maximum, roots):
"""Return a list of all results of a given polynomial within a range
based on the roots of the polynomial itself.
These roots will be selected by a user from the GUI.
Arguments:
minimum -- the lowest value in the dataset
maximum -- the highest value in the dataset
roots -- the roots of the polynomial
"""
poly = polyfromroots(roots)
vals = itertools.takewhile(lambda x: x <= maximum,
[int(polyval(y, poly)) for y in range(minimum, maximum + 1)])
vals = sorted(filter(lambda x: minimum <= x <= maximum, vals))
return vals | 439c56cd04a70a858340712e66ffbe6144c49e04 | 9,895 |
def width_angle(rectangle: Polygon):
"""Returns the length and angle(in degrees) of the longest side of a
rotated rectangle
"""
point_a, point_b, point_c = rectangle.exterior.coords[:3]
a = distance(point_a, point_b)
b = distance(point_b, point_c)
if a > b:
angle = line_angle(point_a, point_b)
return a, b, angle
angle = line_angle(point_b, point_c)
return b, a, angle | 074cd38e90f83af15bd09113bafa58e035960fd5 | 9,896 |
def mean_absolute_error(y_true, y_pred, discretise = False):
"""
requires input arrays to be same np.dtype.
returns average, not sum of errors
discretising (for classification problems) makes little sense to me,
but may be necessary in some obscure scenarios
"""
if discretise:
y_p = tools.round_probabilities(y_pred)
else:
y_p = y_pred
mae_a = tf.Session().run(tf.keras.losses.mean_absolute_error(y_true, y_p))
return mae_a.mean() | 879607b78856cdc6367dbf1f6e4c7cecad48df24 | 9,897 |
def population_fit_feature_cal(population,
fitFunction,
fitFunctionInput,
ite):
"""Parallel population fitness calculation function
Args:
population (list): Single population
fitFunction (function): Fitness calculation function
fitFunctionInput (list): Auxiliary input of fitness calculation function
ite (int): Current population number
Returns:
[int, list]: Current population number, population characteristics (mean, variance, maximum)
"""
# Calculate population fitness
populationFit = []
for individual in population:
Fit = fitFunction(individual, fitFunctionInput)
populationFit.append(Fit)
# Calculate population fitness characteristics
mean, std, max = np.mean(populationFit), np.std(populationFit), np.max(populationFit)
return ite, [mean, std, max] | 671ddb5acd5d8dee8d8dba660efbb1e86509094c | 9,898 |
def intilise_database2():
"""
Initilse the database and make a table instance
Returns
pymongo object of the table
"""
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb=myclient['subreddit']
maintable2 = mydb["posts2"]
return maintable2 | d70497f837cddcaf0a7383d826a705a77e26dda5 | 9,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.