content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def logoutUser(request):
"""[summary]
Args:
request ([Logout]): [Metodo herado de logout de django para cerrar sesión]
Returns:
[Redirect template]: [Retorna el template del login]
"""
logout(request)
return redirect('/accounts/login/') | 67684fb4dfafd0c5f8671553566dd7f6940b4f6c | 17,600 |
def sin_wave(freq, duration=1, offset=0):
"""Makes a sine wave with the given parameters.
freq: float cycles per second
duration: float seconds
offset: float radians
returns: Wave
"""
signal = SinSignal(freq, offset=offset)
wave = signal.make_wave(duration)
return wave | f0f0e58d0a864a114aafa24f68b683ac4ec2f419 | 17,601 |
def assign_lpvs(lat):
""" Given lattice type return 3 lattice primitive vectors"""
lpv = zeros((3,3))
if lat=='FCC':
lpv[0,1]=1./sqrt(2)
lpv[0,2]=1./sqrt(2)
lpv[1,0]=1./sqrt(2)
lpv[1,2]=1./sqrt(2)
lpv[2,0]=1./sqrt(2)
lpv[2,1]=1./sqrt(2)
elif lat=='SC':
lpv[0,0]=1
lpv[1,1]=1
lpv[2,2]=1
elif lat=='SH':
lpv[0,0]=1./2
lpv[0,1]=-sqrt(3)/2
lpv[1,0]=1./2
lpv[1,1]=sqrt(3)/2
lpv[2,2]=1.
return lpv | ecf599a661446e19e4155f170c41b5ac8271c8cb | 17,602 |
import torch
def flatten_and_batch_shift_indices(indices: torch.LongTensor,
sequence_length: int) -> torch.Tensor:
"""``indices`` of size ``(batch_size, d_1, ..., d_n)`` indexes into dimension 2 of a target tensor,
which has size ``(batch_size, sequence_length, embedding_size)``. This function returns a vector
that correctly indexes into the flattened target. The sequence length of the target must be provided
to compute the appropriate offset.
Args:
indices (torch.LongTensor):
"""
if torch.max(indices) >= sequence_length or torch.min(indices) < 0:
raise ValueError("All the elements should be in range (0, {}), but found ({}, {})".format(
sequence_length - 1, torch.min(indices).item(), torch.max(indices).item()))
offsets = get_range_vector(indices.size(0), indices.device) * sequence_length
for _ in range(len(indices.size()) - 1):
offsets = offsets.unsqueeze(1)
# (batch_size, d_1, ..., d_n) + (batch_size, 1, ..., 1)
offset_indices = indices + offsets
# (batch_size * d_1 * ... * d_n)
offset_indices = offset_indices.view(-1)
return offset_indices | 6b283f3baaa4fde17af194f996b7f2dec409fc0b | 17,603 |
def raveled_affinity_watershed(
image_raveled, marker_coords, offsets, mask, output
):
"""Compute affinity watershed on raveled arrays.
Parameters
----------
image_raveled : 2D array of float(32), shape (npixels, ndim)
The z, y, and x affinities around each pixel.
marker_coords : 1D array of int
The location of each marker along the pixels dimension of
``image_raveled``.
offsets : 1D array of int
The signed offsets to each neighboring pixel.
mask : 1D array of bool, shape (npixels,)
True for pixels to which the watershed should spread.
output : 1D array of int
The output array for markers.
"""
n_neighbors = offsets.shape[0]
age = 0
marker_coords = marker_coords.astype(np.intp)
offsets = offsets.astype(np.intp)
aff_offsets = offsets.copy().astype(np.intp)
aff_offsets[:int(len(offsets) / 2), 1] = 0
heap = [
Element(
image_raveled[0, 0], age, marker_coords[0],
marker_coords[0]
)
]
_ = heappop(heap)
# add each seed to the stack
for i in range(marker_coords.shape[0]):
index = marker_coords[i]
value = np.float32(0.)
source = index
index = index
elem = Element(value, age, index, source)
heappush(heap, elem)
# remove from stack until empty
while len(heap) > 0:
elem = heappop(heap)
for i in range(n_neighbors):
# get the flattened address of the neighbor
# offsets are 2d (size, 2) with columns 0 and 1 corresponding to
# affinities (ie axis) and image neighbour indices respectively
neighbor_index = elem.index + offsets[i, 1]
if not mask[neighbor_index]:
# neighbor is not in mask, move on to next neighbor
continue
if output[neighbor_index]:
# if there is a non-zero value in output, move on to next
# neighbor
continue
# if the neighbor is in the mask and not already labeled,
# label it then add it to the queue
output[neighbor_index] = output[elem.index]
value = image_raveled[aff_offsets[i, 0],
aff_offsets[i, 1] + elem.index]
age += 1
new_elem = Element(value, age, neighbor_index, elem.source)
heappush(heap, new_elem)
return output | bc109b59bec4389a851cfc46a8e02648e1809c60 | 17,604 |
def get_spike_times(units: pynwb.misc.Units, index, in_interval):
"""Use bisect methods to efficiently retrieve spikes from a given unit in a given interval
Parameters
----------
units: pynwb.misc.Units
index: int
in_interval: start and stop times
Returns
-------
"""
st = units['spike_times']
unit_start = 0 if index == 0 else st.data[index - 1]
unit_stop = st.data[index]
start_time, stop_time = in_interval
ind_start = bisect_left(st.target, start_time, unit_start, unit_stop)
ind_stop = bisect_right(st.target, stop_time, ind_start, unit_stop)
return np.asarray(st.target[ind_start:ind_stop]) | c121747deec1fcc9b5e317f6ec5e57349604ebc3 | 17,605 |
def _make_hours(store_hours):
"""Store hours is a dictionary that maps a DOW to different open/close times
Since it's easy to represent disjoing hours, we'll do this by default
Such as, if a store is open from 11am-2pm and then 5pm-10pm
We'll slice the times in to a list of floats representing 30 minute intevals
So for monday, let's assume we have the store hours from 10am - 3pm
We represent this as
monday = [10.0, 10.5, 11.0, 11.5, 12.0, 12.5, 13.0, 13.5, 14.0, 14.5]
"""
week_hrs = {}
for dow in store_hours.keys():
dow_hours = []
for hour_set in store_hours[dow]:
if len(hour_set) < 2:
open_hr = 0.0
close_hr = 24.0
else:
open_hr = float(hour_set[0])
close_hr = float(hour_set[1])
if close_hr < open_hr:
tmp = close_hr
close_hr = open_hr
open_hr = tmp
current_hr_it = open_hr
while((close_hr - current_hr_it) >= .5):
dow_hours.append(current_hr_it)
current_hr_it += .5
week_hrs[dow] = dow_hours
return week_hrs | 4845594e59e5dba2790ac1a3c376ddb8e8290995 | 17,606 |
def mul_time(t1, factor):
"""Get the product of the original Time and the number
time: Time
factor: number
returns: Time
"""
assert valid_time(t1)
secods = time_to_int(t1) * factor
return int_to_time(secods) | 43d9c3a52670b8755590693fe6886748665d81ee | 17,607 |
def create_pmf_from_samples(
t_samples_list, t_trunc=None, bin_width=None, num_bins=None):
"""
Compute the probability distribution of the waiting time from the sampled data.
Parameters
----------
t_samples_list : array-like 1-D
Samples of the waiting time.
t_trunc: int
The truncation time.
bin_width: int
The width of the bins for the histogram.
num_binms: int
The number of bins for the histogram.
If num_bins and bin_width are both given, bin_width has priority.
Returns
-------
pmf: array-like 1-D
The probability distribution also
the normalized histogram of waiting time.
bin_edges: array-like 1-D
The edge of each bins from ``numpy.histogram``.
"""
if t_trunc is None:
t_trunc = max(t_samples_list)
if bin_width is None:
if num_bins is None:
bin_width = int(np.ceil(t_trunc/200))
else:
bin_width = int(np.ceil(t_trunc/num_bins))
start = np.min(t_samples_list)
pmf, bin_edges = np.histogram(
t_samples_list, bins=np.arange(start, t_trunc+1, bin_width))
return pmf/len(t_samples_list), bin_edges | ce14c169ee719979284b01584b7e0523b19f256a | 17,608 |
def box_corner_to_center(boxes):
"""从(左上,右下)转换到(中间,宽度,高度)"""
x1, y1, x2, y2 = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3]
cx = (x1 + x2) / 2
cy = (y1 + y2) / 2
w = x2 - x1
h = y2 - y1
boxes = paddle.stack((cx, cy, w, h), axis=-1)
return boxes | c07ef637576e5b9ebd8ba43795535e630ccf8b09 | 17,609 |
def irrelevant(condition=None, library=None, weblog_variant=None, reason=None):
""" decorator, allow to mark a test function/class as not relevant """
skip = _should_skip(library=library, weblog_variant=weblog_variant, condition=condition)
def decorator(function_or_class):
if not skip:
return function_or_class
full_reason = "not relevant" if reason is None else f"not relevant: {reason}"
return _get_skipped_item(function_or_class, full_reason)
return decorator | 7d2633247569c4ca5bc20d5249e0b49991ae1047 | 17,610 |
def get_all_approved(self) -> list:
"""Get all appliances currently approved
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - appliance
- GET
- /appliance/approved
:return: Returns approved appliances
:rtype: list
"""
return self._get("/appliance/discovered") | 4c8c00cce144cf73b2a7b63d1e82a13f28de383c | 17,611 |
import os
def get_project_root() -> str:
"""Get the path to the project root.
Returns:
str: the path to the project root.
"""
return os.path.abspath(os.path.dirname(os.path.dirname(__file__))) | afe45a30910049264fbe55551415c7897310ec63 | 17,612 |
def bokeh_hover_tooltip(
label=False,
text=False,
image=False,
audio=False,
coords=True,
index=True,
custom=None,
):
"""
???+ note "Create a Bokeh hover tooltip from a template."
- param label: whether to expect and show a "label" field.
- param text: whether to expect and show a "text" field.
- param image: whether to expect and show an "image" (url/path) field.
- param audio: whether to expect and show an "audio" (url/path) field.
- param coords: whether to show xy-coordinates.
- param index: whether to show indices in the dataset.
- param custom: {display: column} mapping of additional (text) tooltips.
"""
# initialize mutable default value
custom = custom or dict()
# prepare encapsulation of a div box and an associated script
divbox_prefix = """<div class="out tooltip">\n"""
divbox_suffix = """</div>\n"""
script_prefix = """<script>\n"""
script_suffix = """</script>\n"""
# dynamically add contents to the div box and the script
divbox = divbox_prefix
script = script_prefix
if label:
divbox += """
<div>
<span style="font-size: 16px; color: #966;">
Label: @label
</span>
</div>
"""
if text:
divbox += """
<div style="word-wrap: break-word; width: 95%; text-overflow: ellipsis; line-height: 90%">
<span style="font-size: 11px;">
Text: @text
</span>
</div>
"""
if image:
divbox += """
<div>
<span style="font-size: 10px;">
Image: @image
</span>
<img
src="@image" height="60" alt="@image" width="60"
style="float: left; margin: 0px 0px 0px 0px;"
border="2"
></img>
</div>
"""
if audio:
divbox += """
<div>
<span style="font-size: 10px;">
Audio: @audio
</span>
<audio autoplay preload="auto" src="@audio">
</audio>
</div>
"""
if coords:
divbox += """
<div>
<span style="font-size: 12px; color: #060;">
Coordinates: ($x, $y)
</span>
</div>
"""
if index:
divbox += """
<div>
<span style="font-size: 12px; color: #066;">
Index: [$index]
</span>
</div>
"""
for _key, _field in custom.items():
divbox += f"""
<div>
<span style="font-size: 12px; color: #606;">
{_key}: @{_field}
</span>
</div>
"""
divbox += divbox_suffix
script += script_suffix
return divbox + script | 198e76e29d62c12c891c0fe51e947d16f39d65bb | 17,613 |
import math
def constant_xavier_initializer(shape, dtype=tf.float32, uniform=True):
"""Initializer function."""
if not dtype.is_floating:
raise TypeError('Cannot create initializer for non-floating point type.')
# Estimating fan_in and fan_out is not possible to do perfectly, but we try.
# This is the right thing for matrix multiply and convolutions.
if shape:
fan_in = float(shape[-2]) if len(shape) > 1 else float(shape[-1])
fan_out = float(shape[-1])
else:
fan_in = 1.0
fan_out = 1.0
for dim in shape[:-2]:
fan_in *= float(dim)
fan_out *= float(dim)
# Average number of inputs and output connections.
n = (fan_in + fan_out) / 2.0
if uniform:
# To get stddev = math.sqrt(factor / n) need to adjust for uniform.
limit = math.sqrt(3.0 * 1.0 / n)
return tf.random_uniform(shape, -limit, limit, dtype, seed=None)
else:
# To get stddev = math.sqrt(factor / n) need to adjust for truncated.
trunc_stddev = math.sqrt(1.3 * 1.0 / n)
return tf.truncated_normal(shape, 0.0, trunc_stddev, dtype, seed=None) | f11403932f04327b77f38930a8a7a235633449da | 17,614 |
from typing import Dict
from typing import Tuple
from typing import Any
def upload_script(name: str, permission_type: str, content: str, entry_id: str) -> Dict:
"""
Uploads a script by either given content or file
:param name: Script name to upload
:param permission_type: Permissions type of script to upload
:param content: PowerShell script content
:param entry_id: Script file to upload
:return: Response JSON which contains errors (if exist) and how many resources were affected
"""
endpoint_url = '/real-time-response/entities/scripts/v1'
body: Dict[str, Tuple[Any, Any]] = {
'name': (None, name),
'permission_type': (None, permission_type)
}
temp_file = None
try:
if content:
body['content'] = (None, content)
else: # entry_id was provided
file_ = demisto.getFilePath(entry_id)
file_name = file_.get('name') # pylint: disable=E1101
temp_file = open(file_.get('path'), 'rb') # pylint: disable=E1101
body['file'] = (file_name, temp_file)
headers = {
'Authorization': HEADERS['Authorization'],
'Accept': 'application/json'
}
response = http_request('POST', endpoint_url, files=body, headers=headers)
return response
finally:
if temp_file:
temp_file.close() | d33aa3a4f19cfac08d8ee5cb559c0088c6f577bb | 17,615 |
def create_orthogonal(left, right, bottom, top, znear, zfar):
"""Create a Mat4 orthographic projection matrix."""
width = right - left
height = top - bottom
depth = zfar - znear
sx = 2.0 / width
sy = 2.0 / height
sz = 2.0 / -depth
tx = -(right + left) / width
ty = -(top + bottom) / height
tz = -(zfar + znear) / depth
return Mat4((sx, 0.0, 0.0, 0.0,
0.0, sy, 0.0, 0.0,
0.0, 0.0, sz, 0.0,
tx, ty, tz, 1.0)) | 3f10bcabe0d95a9832956a7edcef1719c7db0d15 | 17,616 |
def update_image_version(name: str, new_version: str):
"""returns the passed image name modified with the specified version"""
parts = name.rsplit(':', 1)
return f'{parts[0]}:{new_version}' | cde798361a6c74d22f979fe013e963c46028a7e6 | 17,617 |
def compute_entanglement(theta):
"""Computes the second Renyi entropy of circuits with and without a tardigrade present.
Args:
- theta (float): the angle that defines the state psi_ABT
Returns:
- (float): The entanglement entropy of qubit B with no tardigrade
initially present
- (float): The entanglement entropy of qubit B where the tardigrade
was initially present
"""
dev = qml.device("default.qubit", wires=3)
# QHACK #
@qml.qnode(dev)
def circuits(theta, tartigrade):
if not tartigrade:
qml.Hadamard(wires=0)
qml.CNOT(wires=[0, 1])
qml.PauliX(wires=0)
return qml.density_matrix(wires=[0])
def partial_trace(rho, qubit_2_keep): # Credits: GitHub @neversakura.
num_qubit = int(np.log2(rho.shape[0]))
qubit_axis = [(i, num_qubit + i) for i in range(num_qubit)
if i not in qubit_2_keep]
minus_factor = [(i, 2 * i) for i in range(len(qubit_axis))]
minus_qubit_axis = [(q[0] - m[0], q[1] - m[1])
for q, m in zip(qubit_axis, minus_factor)]
rho_res = np.reshape(rho, [2, 2] * num_qubit)
qubit_left = num_qubit - len(qubit_axis)
for i, j in minus_qubit_axis:
rho_res = np.trace(rho_res, axis1=i, axis2=j)
if qubit_left > 1:
rho_res = np.reshape(rho_res, [2 ** qubit_left] * 2)
return rho_res
psi_0 = np.array([1, 0])
psi_1 = np.array([0, 1])
g_bt = np.kron(psi_0, psi_0)
e_bt=np.cos(theta/2)*np.kron(psi_1,psi_0)+np.sin(theta/2)*np.kron(psi_0,psi_1)
psi_abt = 1/np.sqrt(2)*(np.kron(psi_0, e_bt)+np.kron(psi_1, g_bt))
rho_abt = np.outer(psi_abt, np.conj(psi_abt))
rho_b = partial_trace(rho_abt, [1])
mu_b = circuits(theta, 0)
s_mub = second_renyi_entropy(mu_b)
s_rhob = second_renyi_entropy(rho_b)
return s_mub, s_rhob
# QHACK # | bc6d70f1ef76666fa3b4d753f13dc04a8a368374 | 17,618 |
import logging
def parse_CDS_info(CDS_info):
"""
Args:
CDS_info (python d):
'aliases' (list<alias_list (multiple)>):
alias_list
list<'locus_tag', str> AND/OR
list<'old_locus_tag', str> AND/OR
list<'protein_id', str>
'dna_sequence' (str): The actual DNA sequence
'functions' (list<str>): First object of list is the function
'location' (list<scaffold (str), bp (int), strand ("+/-"), length (nt)>)
Returns:
gene_table_list_d (dict):
"locusId":str
"sysName": ?str
"type": 1
"scaffoldId": str
"begin": int
"end": int
"strand": str ("+"/"-")
"name": str (always "unknown" in this case)
"desc": str
"GC": float
"nTA": int
"AA_seq": Amino Acid sequence of gene
"""
gene_table_list_d = {}
#Getting locusId
aliases_l = CDS_info["aliases"]
locusId_obj = aliases_l[0]
if locusId_obj[0] != "locus_tag":
locus_tag_found = False
for i in range(1, len(aliases_l)):
if aliases_l[i][0] == "locus_tag":
locus_tag_found = True
locusId_obj = aliases_l[i]
break
logging.critical(f"Found locus_tag at different loc of list: {i}")
else:
locus_tag_found = True
if not locus_tag_found:
raise Exception("Expecting locus_tag from genome object, did not find it.")
else:
gene_table_list_d["locusId"] = locusId_obj[1]
gene_table_list_d["sysName"] = locusId_obj[1]
# Getting scaffold, location, strand
scaffold, bp_loc, strand, nt_len = get_location_info(CDS_info["location"][0])
gene_table_list_d["scaffoldId"] = scaffold
gene_table_list_d["begin"] = bp_loc
gene_table_list_d["end"] = bp_loc + nt_len
gene_table_list_d["strand"] = strand
# Getting description
gene_table_list_d["desc"] = CDS_info["functions"][0]
# Getting GC and nTA
DNA_seq = CDS_info["dna_sequence"].upper()
gene_table_list_d["GC"] = (DNA_seq.count("G") + DNA_seq.count("C"))/float(len(DNA_seq))
gene_table_list_d["nTA"] = DNA_seq.count("TA")
# Undecidable parts (from the data object)
gene_table_list_d["type"] = 1
gene_table_list_d["name"] = "unknown"
# Adding protein sequence
gene_table_list_d["AA_seq"] = CDS_info["protein_translation"].upper()
return gene_table_list_d | d55e5b2c56b42c89c9abeba63cb7c68213688945 | 17,619 |
import re
def recover_original_schema_name(sql: str, schema_name: str) -> str:
"""Postgres truncates identifiers to 63 characters at parse time and, as pglast
uses bits of PG to parse queries, image names like noaa/climate:64_chars_of_hash
get truncated which can cause ambiguities and issues in provenance. We can't
get pglast to give us back the full identifier, but we can try and figure out
what it used to be and patch the AST to have it again.
"""
if len(schema_name) < POSTGRES_MAX_IDENTIFIER:
return schema_name
candidates = list(set(re.findall(r"(" + re.escape(schema_name) + r"[^.\"]*)[.\"]", sql)))
# Us finding more than one candidate schema is pretty unlikely to happen:
# we'd have to have a truncated schema name that's 63 characters long
# (of kind some_namespace/some_repo:abcdef1234567890....)
# which also somehow features in this query as a non-identifier. Raise an error here if
# this does happen.
assert len(candidates) == 1
return str(candidates[0]) | 041c747e8722dc1e81a94b29b76ee0eded88992c | 17,620 |
def on_method_not_allowed(error):
"""Override the HTML 405 default."""
content = {"msg": "Method not allowed"}
return jsonify(content), 405 | a174592834952beca21c683890ab94c9583544f9 | 17,621 |
def dir_name(dir_path):
"""
转换零时文件夹、输入文件夹路径
:param dir_path: 主目录路径
:return:[tmp_dir, input_dir, res_dir]
"""
tmp_dir = dir_path + "tmp\\"
input_dir = dir_path + "input\\"
res_dir = dir_path + "result\\"
return tmp_dir, input_dir, res_dir | 9f775b4ace14b178fd7bc0dfa94e5df13e583557 | 17,622 |
def profile_binning(
r,
z,
bins,
z_name="pm",
z_clip=None,
z_quantile=None,
return_bin=True,
plot=True,
):
"""Bin the given quantity z in r.
Parameters
----------
r: 1d array, binned x values
z: 1d array, binned y values
bins: 1d array, bins
Returns
--------
r_rbin : 1d array, mean r in bins
z_rbin : 1d array, mean z in bins
z_bins : dict, numbers for bins
"""
if z_clip is None:
clip = clip_quantile_1d(z, z_quantile, return_func=True)
else:
clip = lambda z_: (z_ > z_clip[0]) & (z_ < z_clip[1])
z_bins = {}
if plot:
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
# Clip by bins
for k, b in enumerate(bins[:-1]):
in_bin = (bins[k] <= r) & (r < bins[k + 1])
clipped = clip(z[in_bin])
z_in_bin = z[in_bin][clipped]
r_in_bin = r[in_bin][clipped]
z_bin = {z_name: z_in_bin, "r": r_in_bin}
z_bins[k] = z_bin
if plot:
lab = "{0:.2f}<r<{1:.2f}".format(bins[k], bins[k + 1])
sns.distplot(
z_in_bin,
hist=False,
kde_kws={"lw": 2, "alpha": 0.9},
label=lab,
)
r_rbin, z_rbin = get_mean_rbins(z_bins, z_name=z_name)
z_bins = z_bins if return_bin else None
return r_rbin, z_rbin, z_bins | f040fe7c7505e628978faf733a91578cb1a04709 | 17,623 |
def sequence_to_synergy_sims(inputs, params):
"""same as sequence to synergy, but prep some other tensors first
"""
# set up orig seq tensor
inputs[DataKeys.ORIG_SEQ] = inputs[DataKeys.FEATURES]
# set up thresholds tensor
num_interpretation_tasks = len(params["importance_task_indices"])
thresholds_shape = [
inputs[DataKeys.FEATURES].get_shape().as_list()[0],
num_interpretation_tasks, 1, 1]
inputs[DataKeys.WEIGHTED_SEQ_THRESHOLDS] = tf.zeros(thresholds_shape)
# move inputs to outputs
outputs = dict(inputs)
# and then run sequence to synergy
outputs, params = sequence_to_synergy(outputs, params)
return outputs, params | 6abb659be6d1977e7d8a3c7b47f2f60997faf951 | 17,624 |
import os
def create_invalid_points_feature_class(access_feature_class, invalid_reach_table, invalid_points_feature_class):
"""
Create a feature class of centroid points for the invalid reaches.
:param access_feature_class: Point feature class of all accesses.
:param invalid_reach_table: Table of reaches not passing validation.
:return: Path to invalid points feature class.
"""
# get the reach coordinates for the putin and takeout if they exist
def get_reach_centroid(access_feature_class, reach_id):
# initialize variables to store coordinate pairs as nonetype
putin_coords = None
takeout_coords = None
# attempt to get the coordinates for the putin and takeout
for row in arcpy.da.SearchCursor(access_feature_class, ('reach_id', 'SHAPE@XY'), "type = 'putin'"):
if row[0] == reach_id:
putin_coords = row[1]
break
for row in arcpy.da.SearchCursor(access_feature_class, ('reach_id', 'SHAPE@XY'), "type = 'takeout'"):
if row[0] == reach_id:
takeout_coords = row[1]
break
# return coordinates for the best location for the reach available
if putin_coords is None and takeout_coords is None:
return None
elif putin_coords is None:
return takeout_coords
elif takeout_coords is None:
return putin_coords
else:
return (
min([putin_coords[0], takeout_coords[0]]) + abs(putin_coords[0] - takeout_coords[0]) / 2,
min([putin_coords[1], takeout_coords[1]]) + abs(putin_coords[1] - takeout_coords[1]) / 2
)
# create the output feature class
out_fc = arcpy.CreateFeatureclass_management(
out_path=os.path.dirname(invalid_points_feature_class),
out_name=os.path.basename(invalid_points_feature_class),
geometry_type='POINT',
spatial_reference=arcpy.Describe(access_feature_class).spatialReference
)[0]
# add the fields
arcpy.AddField_management(
in_table=out_fc,
field_name='reach_id',
field_type='TEXT',
field_length=10,
field_alias='Reach ID'
)
arcpy.AddField_management(
in_table=out_fc,
field_name='reason',
field_type='TEXT',
field_length=200,
field_alias='Reason'
)
# create a list of invalid reach id's and invalid reasons
invalid_list = [(row[0], row[1]) for row in arcpy.da.SearchCursor(invalid_reach_table, ('reach_id', 'reason'))]
# use an insert cursor to add records to the feature class
with arcpy.da.InsertCursor(out_fc, ('reach_id', 'reason', 'SHAPE@XY')) as cursor:
# for every invalid reach
for invalid_reach in invalid_list:
# get the reach centroid
centroid = get_reach_centroid(access_feature_class, invalid_reach[0])
# insert a new record
cursor.insertRow([invalid_reach[0], invalid_reach[1], centroid])
# return the path to the output feature class
return out_fc | 3d1a0a73efdb34599c261cf6151c2aa29cb2e004 | 17,625 |
def _gen_roi_func_constant(constant_roi):
"""
Return a RoI function which returns a constant radius.
See :py:func:`map_to_grid` for a description of the parameters.
"""
def roi(zg, yg, xg):
""" constant radius of influence function. """
return constant_roi
return roi | c7c69cf32fb289d5e9c9497474989aa873a231ba | 17,626 |
def less_important_function(num: int) -> str:
"""
Example which is documented in the module documentation but not highlighted on the main page.
:param num: A thing to pass
:return: A return value
"""
return f'{num}' | d6ba0644fc8f4582fb63ceb722b05e824d63312a | 17,627 |
def weth_asset_data(): # pylint: disable=redefined-outer-name
"""Get 0x asset data for Wrapped Ether (WETH) token."""
return asset_data_utils.encode_erc20(
NETWORK_TO_ADDRESSES[NetworkId.GANACHE].ether_token
) | 0341c1f5c46e05a316c99154be82399145ae9d1a | 17,628 |
def match_known_module_name(pattern):
"""
Matching with know module name.
Args:
pattern (Pattern): To be replaced pattern.
Returns:
str, matched module name, return None if not matched.
"""
matched_result = []
for ptn, module_name in BUILT_IN_MODULE_NAME.items():
if pattern.in_degree == ptn.in_degree and pattern.out_degree == ptn.out_degree and \
ptn.head == pattern.head and ptn.tail == pattern.tail:
is_matched, score = pattern_fuzzy_matching(pattern.ptn_items, ptn.ptn_items)
if is_matched:
matched_result.append((module_name, score))
if matched_result:
module_name = (matched_result if len(matched_result) == 1 else
sorted(matched_result, key=lambda x: x[1], reverse=True))[0][0]
if pattern.pattern not in used_module_name:
used_module_name[pattern.pattern] = 1
else:
module_name = f"{module_name}{used_module_name[pattern.pattern]}"
used_module_name[pattern.pattern] += 1
return module_name
return None | 0d76e22517d4fc435101702591e095a96cc5faf7 | 17,629 |
def _get_jones_types(name, numba_ndarray_type, corr_1_dims, corr_2_dims):
"""
Determine which of the following three cases are valid:
1. The array is not present (None) and therefore no Jones Matrices
2. single (1,) or (2,) dual correlation
3. (2, 2) full correlation
Parameters
----------
name: str
Array name
numba_ndarray_type: numba.type
Array numba type
corr_1_dims: int
Number of `numba_ndarray_type` dimensions,
including correlations (first option)
corr_2_dims: int
Number of `numba_ndarray_type` dimensions,
including correlations (second option)
Returns
-------
int
Enumeration describing the Jones Matrix Type
- 0 -- Not Present
- 1 -- (1,) or (2,)
- 2 -- (2, 2)
"""
if is_numba_type_none(numba_ndarray_type):
return JONES_NOT_PRESENT
if numba_ndarray_type.ndim == corr_1_dims:
return JONES_1_OR_2
elif numba_ndarray_type.ndim == corr_2_dims:
return JONES_2X2
else:
raise ValueError("%s.ndim not in (%d, %d)" %
(name, corr_1_dims, corr_2_dims)) | 8a9d6f3441c488e2bf1059dd6fcb506a2285d291 | 17,630 |
def editing_passport_serial_handler(update: Update,
context: CallbackContext) -> int:
"""Get and save passport serial."""
new_state = editing_pd(update, context,
validator=validators.passport_serial_validator,
attribute='passport_serial',
state=PASSPORT_SERIAL,
)
return new_state | 81c86bffa07376f17dd2c013f5eab42856fa4cea | 17,631 |
import requests
def get_overview(ticker: str) -> pd.DataFrame:
"""Get alpha vantage company overview
Parameters
----------
ticker : str
Stock ticker
Returns
-------
pd.DataFrame
Dataframe of fundamentals
"""
# Request OVERVIEW data from Alpha Vantage API
s_req = f"https://www.alphavantage.co/query?function=OVERVIEW&symbol={ticker}&apikey={cfg.API_KEY_ALPHAVANTAGE}"
result = requests.get(s_req, stream=True)
# If the returned data was successful
if result.status_code == 200:
# Parse json data to dataframe
if "Note" in result.json():
console.print(result.json()["Note"], "\n")
return pd.DataFrame()
df_fa = pd.json_normalize(result.json())
# Keep json data sorting in dataframe
df_fa = df_fa[list(result.json().keys())].T
df_fa.iloc[5:] = df_fa.iloc[5:].applymap(lambda x: long_number_format(x))
clean_df_index(df_fa)
df_fa = df_fa.rename(
index={
"E b i t d a": "EBITDA",
"P e ratio": "PE ratio",
"P e g ratio": "PEG ratio",
"E p s": "EPS",
"Revenue per share t t m": "Revenue per share TTM",
"Operating margin t t m": "Operating margin TTM",
"Return on assets t t m": "Return on assets TTM",
"Return on equity t t m": "Return on equity TTM",
"Revenue t t m": "Revenue TTM",
"Gross profit t t m": "Gross profit TTM",
"Diluted e p s t t m": "Diluted EPS TTM",
"Quarterly earnings growth y o y": "Quarterly earnings growth YOY",
"Quarterly revenue growth y o y": "Quarterly revenue growth YOY",
"Trailing p e": "Trailing PE",
"Forward p e": "Forward PE",
"Price to sales ratio t t m": "Price to sales ratio TTM",
"E v to revenue": "EV to revenue",
"E v to e b i t d a": "EV to EBITDA",
}
)
return df_fa
return pd.DataFrame() | ddc87f05c8e67f84f2327cf0f06aded0e31e5e8c | 17,632 |
def effective_sample_size(samples):
"""
Calculates ESS for a matrix of samples.
"""
try:
n_samples, n_params = samples.shape
except (ValueError, IndexError):
raise ValueError('Samples must be given as a 2d array.')
if n_samples < 2:
raise ValueError('At least two samples must be given.')
return [ess_single_param(samples[:, i]) for i in range(0, n_params)] | 7a31d4a2c2bee133ab264dc793f16d0d6bd866f2 | 17,633 |
def get_credentials(credentials_path):
"""
Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid it
returns None.
Returns:
Credentials, the obtained credential or None
"""
store = Storage(credentials_path)
credentials = store.get()
if not credentials or credentials.invalid:
return None
return credentials | b6a6fcd20f8def88c554d276e8f07aae3dc1f536 | 17,634 |
def setup(hass, config):
"""Set up this component."""
conf_track = config[DOMAIN][CONF_TRACK]
_LOGGER.info('version %s is starting, if you have ANY issues with this, please report'
' them here: https://github.com/custom-components/custom_updater', __version__)
ha_conf_dir = str(hass.config.path())
card_controller = CustomCards(hass, ha_conf_dir)
components_controller = CustomComponents(hass, ha_conf_dir)
def check_all_service(call):
"""Set up service for manual trigger."""
if not conf_track:
card_controller.cache_versions(call)
components_controller.cache_versions(call)
elif 'cards' in conf_track and 'components' in conf_track:
card_controller.cache_versions(call)
components_controller.cache_versions(call)
elif 'cards' in conf_track:
card_controller.cache_versions(call)
elif 'components' in conf_track:
components_controller.cache_versions(call)
def update_all_service(call):
"""Set up service for manual trigger."""
if not conf_track:
card_controller.update_all()
components_controller.update_all()
elif 'cards' in conf_track and 'components' in conf_track:
card_controller.update_all()
components_controller.update_all()
elif 'cards' in conf_track:
card_controller.update_all()
elif 'components' in conf_track:
components_controller.update_all()
if not conf_track or 'cards' in conf_track:
def upgrade_card_service(call):
"""Set up service for manual trigger."""
card_controller.upgrade_single(call.data.get(ATTR_CARD))
hass.services.register(DOMAIN, 'upgrade_single_card', upgrade_card_service)
if not conf_track or 'components' in conf_track:
def upgrade_component_service(call):
"""Set up service for manual trigger."""
components_controller.upgrade_single(call.data.get(ATTR_COMPONENT))
hass.services.register(DOMAIN, 'upgrade_single_component', upgrade_component_service)
track_time_interval(hass, card_controller.cache_versions, INTERVAL)
track_time_interval(hass, components_controller.cache_versions, INTERVAL)
hass.services.register(DOMAIN, 'check_all', check_all_service)
hass.services.register(DOMAIN, 'update_all', update_all_service)
return True | d0a18b4c2c3e2c94f19afb66e2e9d2a3d18fea07 | 17,635 |
def coset_enumeration_r(fp_grp, Y, max_cosets=None):
"""
This is easier of the two implemented methods of coset enumeration.
and is often called the HLT method, after Hazelgrove, Leech, Trotter
The idea is that we make use of ``scan_and_fill`` makes new definitions
whenever the scan is incomplete to enable the scan to complete; this way
we fill in the gaps in the scan of the relator or subgroup generator,
that's why the name relator-based method.
# TODO: complete the docstring
See Also
========
scan_and_fill,
References
==========
[1] Holt, D., Eick, B., O'Brien, E.
"Handbook of computational group theory"
>>> from sympy.combinatorics.free_groups import free_group
>>> from sympy.combinatorics.fp_groups import FpGroup, coset_enumeration_r
>>> F, x, y = free_group("x, y")
# Example 5.1 from [1]
>>> f = FpGroup(F, [x**3, y**3, x**-1*y**-1*x*y])
>>> C = coset_enumeration_r(f, [x])
>>> for i in range(len(C.p)):
... if C.p[i] == i:
... print(C.table[i])
[0, 0, 1, 2]
[1, 1, 2, 0]
[2, 2, 0, 1]
>>> C.p
[0, 1, 2, 1, 1]
# Example from exercises Q2 [1]
>>> f = FpGroup(F, [x**2*y**2, y**-1*x*y*x**-3])
>>> C = coset_enumeration_r(f, [])
>>> C.compress(); C.standardize()
>>> C.table
[[1, 2, 3, 4],
[5, 0, 6, 7],
[0, 5, 7, 6],
[7, 6, 5, 0],
[6, 7, 0, 5],
[2, 1, 4, 3],
[3, 4, 2, 1],
[4, 3, 1, 2]]
# Example 5.2
>>> f = FpGroup(F, [x**2, y**3, (x*y)**3])
>>> Y = [x*y]
>>> C = coset_enumeration_r(f, Y)
>>> for i in range(len(C.p)):
... if C.p[i] == i:
... print(C.table[i])
[1, 1, 2, 1]
[0, 0, 0, 2]
[3, 3, 1, 0]
[2, 2, 3, 3]
# Example 5.3
>>> f = FpGroup(F, [x**2*y**2, x**3*y**5])
>>> Y = []
>>> C = coset_enumeration_r(f, Y)
>>> for i in range(len(C.p)):
... if C.p[i] == i:
... print(C.table[i])
[1, 3, 1, 3]
[2, 0, 2, 0]
[3, 1, 3, 1]
[0, 2, 0, 2]
# Example 5.4
>>> F, a, b, c, d, e = free_group("a, b, c, d, e")
>>> f = FpGroup(F, [a*b*c**-1, b*c*d**-1, c*d*e**-1, d*e*a**-1, e*a*b**-1])
>>> Y = [a]
>>> C = coset_enumeration_r(f, Y)
>>> for i in range(len(C.p)):
... if C.p[i] == i:
... print(C.table[i])
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# example of "compress" method
>>> C.compress()
>>> C.table
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
# Exercises Pg. 161, Q2.
>>> F, x, y = free_group("x, y")
>>> f = FpGroup(F, [x**2*y**2, y**-1*x*y*x**-3])
>>> Y = []
>>> C = coset_enumeration_r(f, Y)
>>> C.compress()
>>> C.standardize()
>>> C.table
[[1, 2, 3, 4],
[5, 0, 6, 7],
[0, 5, 7, 6],
[7, 6, 5, 0],
[6, 7, 0, 5],
[2, 1, 4, 3],
[3, 4, 2, 1],
[4, 3, 1, 2]]
# John J. Cannon; Lucien A. Dimino; George Havas; Jane M. Watson
# Mathematics of Computation, Vol. 27, No. 123. (Jul., 1973), pp. 463-490
# from 1973chwd.pdf
# Table 1. Ex. 1
>>> F, r, s, t = free_group("r, s, t")
>>> E1 = FpGroup(F, [t**-1*r*t*r**-2, r**-1*s*r*s**-2, s**-1*t*s*t**-2])
>>> C = coset_enumeration_r(E1, [r])
>>> for i in range(len(C.p)):
... if C.p[i] == i:
... print(C.table[i])
[0, 0, 0, 0, 0, 0]
Ex. 2
>>> F, a, b = free_group("a, b")
>>> Cox = FpGroup(F, [a**6, b**6, (a*b)**2, (a**2*b**2)**2, (a**3*b**3)**5])
>>> C = coset_enumeration_r(Cox, [a])
>>> index = 0
>>> for i in range(len(C.p)):
... if C.p[i] == i:
... index += 1
>>> index
500
# Ex. 3
>>> F, a, b = free_group("a, b")
>>> B_2_4 = FpGroup(F, [a**4, b**4, (a*b)**4, (a**-1*b)**4, (a**2*b)**4, \
(a*b**2)**4, (a**2*b**2)**4, (a**-1*b*a*b)**4, (a*b**-1*a*b)**4])
>>> C = coset_enumeration_r(B_2_4, [a])
>>> index = 0
>>> for i in range(len(C.p)):
... if C.p[i] == i:
... index += 1
>>> index
1024
"""
# 1. Initialize a coset table C for < X|R >
C = CosetTable(fp_grp, Y, max_cosets=max_cosets)
R = fp_grp.relators
A_dict = C.A_dict
A_dict_inv = C.A_dict_inv
p = C.p
for w in Y:
C.scan_and_fill(0, w)
alpha = 0
while alpha < C.n:
if p[alpha] == alpha:
for w in R:
C.scan_and_fill(alpha, w)
# if α was eliminated during the scan then break
if p[alpha] < alpha:
break
if p[alpha] == alpha:
for x in A_dict:
if C.table[alpha][A_dict[x]] is None:
C.define(alpha, x)
alpha += 1
return C | ada1991a732c29c3d14aa5b2a74e54ce57036fc0 | 17,636 |
def get_model(**kwargs):
"""
Returns the model.
"""
model = ShuffleNetV2(**kwargs)
return model | 6b226b56fe603a0b703267bc35e2b92f2c6dda7c | 17,637 |
import torch
def absolute_filter_change(baseline_state_dict, target_state_dict):
""" Calculate sum(abs(K2 - K1) / sum(K1))
Args:
baseline_state_dict (dict): state_dict of ori_net
target_state_dict (dict): state_dict of finetune_net
Returns:
sorted_diff (list): sorted values
sorted_index (list): sorted index of kernel
"""
# save all weight to list
baseline_weight_list = []
for key, value in baseline_state_dict.items():
if key.find('weight') != -1:
weight = value.reshape(-1, 3, 3)
baseline_weight_list.append(weight)
# [-1, 3, 3]
baseline_weight_list = torch.cat(baseline_weight_list, dim=0)
target_weight_list = []
for key, value in target_state_dict.items():
if key.find('weight') != -1:
weight = value.reshape(-1, 3, 3)
target_weight_list.append(weight)
# [-1, 3, 3]
target_weight_list = torch.cat(target_weight_list, dim=0)
sum_baseline_weight = torch.sum(torch.sum(abs(baseline_weight_list), dim=1), dim=1)
sum_baseline_weight = sum_baseline_weight.unsqueeze(1).unsqueeze(1)
diff = torch.sum(torch.sum(abs(target_weight_list - baseline_weight_list) / sum_baseline_weight, dim=1), dim=1)
return diff.cpu().numpy() | ad4616a03ef80f5a5430a87fd07d70d6bb10f7b7 | 17,638 |
import ctypes
import sys
def run_as_admin(argv=None, debug=False):
"""
Helper function to run Python script with admin privileges
"""
shell32 = ctypes.windll.shell32
if argv is None and shell32.IsUserAnAdmin():
return True
if argv is None:
argv = sys.argv
if hasattr(sys, '_MEIPASS'):
# Support pyinstaller wrapped program.
arguments =argv[1:]
else:
arguments = argv
argument_line = u' '.join(arguments)
executable = sys.executable
ret = shell32.ShellExecuteW(None, u"runas", executable, argument_line, None, 1)
if int(ret) <= 32:
return False
return None | fe6e029d47ef5c486ee2e1c7850c95f36443acc4 | 17,639 |
import torch
def load_checkpoints(checkpoint_name):
"""
Load a pretrained checkpoint.
:param checkpoint_name: checkpoint filename
:return: model.state_dict, source_vocabulary, target_vocabulary,
"""
# Get checkpoint from file
checkpoint = torch.load(checkpoint_name, map_location=torch.device('cpu'))
# The epoch when training has been left
epoch = checkpoint['epoch']
# The time elapsed during training
time_elapsed = checkpoint['time_elapsed']
# Get state_dict of the model
model_state_dict = checkpoint['model_state_dict']
# Get the state_dict of the optimizer
optimizer_state_dict = checkpoint['optimizer_state_dict']
# Get source language vocabulary
src_vocabulary = checkpoint['src_vocabulary']
tgt_vocabulary = checkpoint['tgt_vocabulary']
return model_state_dict, optimizer_state_dict, epoch, time_elapsed, src_vocabulary, tgt_vocabulary | e81f094c811d497504fd1f93a8ee537e6b122bd6 | 17,640 |
def _extract_data(prices, n_markets):
""" Extract the open, close, high and low prices from the price matrix. """
os = prices[:, :, :n_markets]
cs = prices[:, :, n_markets:2*n_markets]
hs = prices[:, :, 2*n_markets:3*n_markets]
ls = prices[:, :, 3*n_markets:4*n_markets]
return os, cs, hs, ls | 154af0c8270fbe664b3dd5d07a724b753ff02040 | 17,641 |
def make_screen():
"""creates the code for a new screen"""
return pygame.display.set_mode((800,600)) | ca0e23f5583e652207f0297e7363dacaa5a7f085 | 17,642 |
import os
def listFiles(sDir,ext="_du.mpxml"):
"""
return 1 list of files
"""
lsFile = sorted([_fn
for _fn in os.listdir(sDir)
if _fn.lower().endswith(ext) or _fn.lower().endswith(ext)
])
return lsFile | 873f331db44a96fe9d826dbc926354ce4ded6542 | 17,643 |
def flip_vert(r, c, row, col, reversed):
"""1번 연산"""
if reversed:
row, col = col, row
return row - 1 - r, c, reversed | 053f6a354e5f6387a528af4ce07290cba370830c | 17,644 |
def get_orders(self, **kwargs):
"""
|
| **Current All Open Orders (USER_DATA)**
| *Get all open orders on a symbol. Careful when accessing this with no symbol.*
| *If the symbol is not sent, orders for all symbols will be returned in an array.*
:API endpoint: ``GET /dapi/v1/openOrders``
:API doc: https://binance-docs.github.io/apidocs/delivery/en/#current-all-open-orders-user_data
:parameter symbol: string
:parameter recvWindow: optional int, the value cannot be greater than 60000
|
"""
url_path = "/dapi/v1/openOrders"
params = { **kwargs }
return self.sign_request("GET", url_path, params) | 0561fdeb4863ea08b1644a7695ca7f4ed0622fd9 | 17,645 |
def update_amount(amount_id: int):
"""This function update a data of amount
Args:
amount_id (int): id of amount
Returns:
Response: description of amount
"""
current_app.logger.debug('In PUT /api/amounts/<int:amount_id>')
response = None
try:
# Load data
data = request.get_json()
if 'id_ma' not in data:
data['id_ma'] = amount_id
# Validate fields to update
AmountValidationService.validate(data)
# Checks amount exist
AmountDBService.get_amount_by_id(amount_id)
# Check project not solde
AmountDBService.is_project_solde(receipt_id = data['id_r'])
# Check year unique by receipt
AmountDBService.check_unique_amount_by_year_and_receipt_id(data['annee_ma'], data['id_r'], amount_id)
# Check sum amount value
AmountDBService.check_sum_value(data, amount_id)
response = AmountDBService.update(data)
response = jsonify(response), 200
except ValueError as error:
current_app.logger.error(error)
response = jsonify(error.args[0]), error.args[1]
except Exception as e:
current_app.logger.error(e)
response = jsonify({'message': 'Une erreur est survenue lors de la modification du montant affecté'}), 500
finally:
return response | 7f85609ab152f22fbcc4aff3f4fb9856bf98d2c7 | 17,646 |
def en_13757(data: bytes) -> int:
"""
Compute a CRC-16 checksum of data with the en_13757 algorithm.
:param bytes data: The data to be computed
:return: The checksum
:rtype: int
:raises TypeError: if the data is not a bytes-like object
"""
_ensure_bytes(data)
return _crc_16_en_13757(data) | 85a7793f475f04cca2d7dcf92eeba523fde9b1c2 | 17,647 |
def get_agent_supported_features_list_for_extensions():
"""
List of features that the GuestAgent currently supports (like Extension Telemetry Pipeline, etc) needed by Extensions.
We need to send this list as environment variables when calling extension commands to inform Extensions of all the
features the agent supports.
:return: Dict containing all Extension supported features with the key as their names and the AgentFeature object as
the value if the feature is supported by the Agent.
Eg: {
CRPSupportedFeatureNames.ExtensionTelemetryPipeline: _ETPFeature()
}
"""
return dict((name, feature) for name, feature in __EXTENSION_ADVERTISED_FEATURES.items() if feature.is_supported) | 8a453286c433b3ecaed2fc402c5d557b335f3935 | 17,648 |
def GCMV(image, mask=None):
"""
:param image: input image, color (3 channels) or gray (1 channel);
:param mask: calc gamma value in the mask area, default is the whole image;
:return: gamma, and output
"""
# Step 1. Check the inputs: image
if np.ndim(image) == 3 and image.shape[-1] == 3: # color image
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
img = hsv[:, :, 2]
color_flag = True
elif np.ndim(image) == 2: # gray image
img = image
color_flag = False
else:
print("ERROR:check the input image of AGT function...")
return 1, None
if mask is not None:
mask = mask<255
else:
mask = np.ones_like(img)
# Step 2. Main steps of GCMV
n_img = img/255.0
mean = np.mean(n_img)
gamma_list = np.arange(0.01,1.01,0.01) if mean<=0.5 else np.arange(1.1,10.1,0.1)
score = np.zeros_like(gamma_list)
for k, gamma in enumerate(gamma_list):
t_img = np.power(n_img, gamma)
m1, v1 = np.mean(t_img, axis=0), np.var(t_img, axis=0)
m2, v2 = np.mean(t_img, axis=1), np.var(t_img, axis=1)
score[k] = np.mean(np.power(m1-0.5077,2)) + np.mean(np.power(m2-0.5077,2))+np.mean(np.power(v1-0.0268,2)) + np.mean(np.power(v2-0.0268,2))
# grid search for the optimal gamma
ind = np.argmin(score)
best_gamma =gamma_list[ind]
# print(best_gamma)
# Step 2.4 apply gamma transformation
n_img = (img+0.5)/256
output = np.power(n_img, best_gamma)
# Step 3.0 stretch back and post-process
# if mask is not None:
# output = (output * 256 - 0.5) * mask / 255.0
# else:
output = (output * 256 - 0.5)
output = output.round().astype(np.uint8)
if color_flag:
hsv[:, :, 2] = output
output = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return best_gamma, output | 47070fdda8dcb2507fefd6a5aa922d21481c0896 | 17,649 |
from typing import Union
def downloadStaffFile(request: HttpRequest, filename: str) -> Union[HttpResponse, FileResponse]:
"""Serves the specified 'filename' validating the user is logged in and a staff user"""
return _downloadFileFromStorage(storages.StaffStorage(), filename) | 0e0137f5b5e4140c2d9ff300ed97b7a3e3c37602 | 17,650 |
def get_view_renderer_type(*args):
"""
get_view_renderer_type(v) -> tcc_renderer_type_t
Get the type of renderer currently in use in the given view (
'ui_get_renderer_type' )
@param v (C++: TWidget *)
"""
return _ida_kernwin.get_view_renderer_type(*args) | e35269d7b77196ebd8ea325db3d6301ffdb63908 | 17,651 |
async def create(req):
"""
Add a new label to the labels database.
"""
data = req["data"]
async with AsyncSession(req.app["pg"]) as session:
label = Label(
name=data["name"], color=data["color"], description=data["description"]
)
session.add(label)
try:
await session.flush()
document = label.to_dict()
await session.commit()
except IntegrityError:
raise HTTPBadRequest(text="Label name already exists")
document = await apply_transforms(document, [SampleCountTransform(req.app["db"])])
headers = {"Location": f"/labels/{document['id']}"}
return json_response(document, status=201, headers=headers) | 1d7de257f0a3bc1259168821e1fcd6358d4c31c6 | 17,652 |
def process_radial_velocity(procstatus, dscfg, radar_list=None):
"""
Estimates the radial velocity respect to the radar from the wind velocity
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : string. Dataset keyword
The input data type
latitude, longitude : float
arbitrary coordinates [deg] from where to compute the radial
velocity. If any of them is None it will be the radar position
altitude : float
arbitrary altitude [m MSL] from where to compute the radial
velocity. If None it will be the radar altitude
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
v_speed_field = None
h_speed_field = None
h_dir_field = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'wind_vel_v':
v_speed_field = get_fieldname_pyart(datatype)
if datatype == 'WIND_SPEED':
h_speed_field = get_fieldname_pyart(datatype)
if datatype == 'WIND_DIRECTION':
h_dir_field = get_fieldname_pyart(datatype)
if h_speed_field is None or h_dir_field is None:
warn('Horizontal wind speed and direction fields required'
' to estimate radial velocity')
return None, None
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if h_speed_field not in radar.fields or h_dir_field not in radar.fields:
warn('Unable to estimate radial velocity. '
'Missing horizontal wind')
return None, None
h_speed = radar.fields[h_speed_field]['data']
h_dir = radar.fields[h_dir_field]['data']
if v_speed_field is None or v_speed_field not in radar.fields:
warn('Unknown vertical wind speed. Assumed 0')
if v_speed_field is None:
v_speed_field == 'vertical_wind_component'
v_speed = np.ma.zeros((radar.nrays, radar.ngates))
else:
v_speed = radar.fields[v_speed_field]['data']
# user defined parameters
lat = dscfg.get('latitude', None)
lon = dscfg.get('longitude', None)
alt = dscfg.get('altitude', None)
# get u and v wind components
h_dir_rad = np.deg2rad(h_dir)
speed_h_u = h_speed*np.sin(h_dir_rad) # eastward component
speed_h_v = h_speed*np.cos(h_dir_rad) # northward component
if lat is not None or lon is not None or alt is not None:
# get antenna coordinates respect to new radar location
if lat is None:
lat = radar.latitude['data'][0]
if lon is None:
lon = radar.longitude['data'][0]
if alt is None:
alt = radar.altitude['data'][0]
x, y = pyart.core.geographic_to_cartesian_aeqd(
radar.gate_longitude['data'], radar.gate_latitude['data'], lon,
lat)
z = radar.gate_altitude['data'] - alt
_, azimuths, elevations = pyart.core.cartesian_to_antenna(
x, y, z)
azi_2D_rad = np.deg2rad(azimuths)
ele_2D_rad = np.deg2rad(elevations)
else:
azi_2D_rad = np.broadcast_to(
np.deg2rad(radar.azimuth['data'])[:, np.newaxis],
(radar.nrays, radar.ngates))
ele_2D_rad = np.broadcast_to(
np.deg2rad(radar.elevation['data'])[:, np.newaxis],
(radar.nrays, radar.ngates))
r_speed = pyart.config.get_metadata('velocity')
# assuming no vertical velocity
# r_speed['data'] = h_speed*np.cos(h_dir_rad-azi_2D_rad)*np.cos(ele_2D_rad)
# with vertical velocity included
r_speed['data'] = (
(speed_h_u*np.sin(azi_2D_rad)+speed_h_v*np.cos(azi_2D_rad)) *
np.cos(ele_2D_rad)+np.sin(ele_2D_rad)*v_speed)
# prepare for exit
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field('velocity', r_speed)
return new_dataset, ind_rad | 2114cf4f5524662f80cac69dab45a00729053192 | 17,653 |
def brute_force_diagonalize(answers, wordlist=WORDS, quiet=False):
"""
Find the most cromulent diagonalization for a set of answers, trying all
possible orders. See README.md for a cool example of this with 10 answers.
As a somewhat artificial example, let's suppose we have these seven
answers from the 2000 metas, but don't remember their order:
>>> metas = ['benjamins', 'billgates', 'donors', 'luxor', 'mansion', 'miserly', 'realty']
>>> brute_force_diagonalize(metas)[0] # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
Cromulence Text Info
9.5 RUN EAST
9.2 MIX LAST
9.1 MAX LAST
9.1 BE NOISY
8.8 LINE TO I
...
(9.5, 'RUN EAST', None)
Of course we were looking for the famous red herring "BE NOISY", but
"RUN EAST" sounds like a good way to find the coin also.
"""
results = []
seen = set()
answers = [parse_cell(word) for word in answers]
for i, permutation in enumerate(permutations(answers)):
if not quiet and i > 0 and i % 10000 == 0:
print("Tried %d permutations" % i)
try:
diag = diagonalize(permutation)
except IndexError:
continue
found = wordlist.search(diag, count=1, use_cromulence=True)
if found:
logprob, text = found[0]
slug = slugify(text)
if slug not in seen:
results.append((logprob, text, None))
seen.add(slug)
return wordlist.show_best_results(results) | 25725e34dc328cc605cc5dc147547c84de803873 | 17,654 |
def train():
"""
MNIST training set creator.
It returns a reader creator, each sample in the reader is image pixels in
[-1, 1] and label in [0, 9].
:return: Training reader creator
:rtype: callable
"""
return reader_creator(
paddle.dataset.common.download(TRAIN_IMAGE_URL, 'mnist',
TRAIN_IMAGE_MD5),
paddle.dataset.common.download(TRAIN_LABEL_URL, 'mnist',
TRAIN_LABEL_MD5), 100) | b7008aa61ce49822838c4b30709537396a93f453 | 17,655 |
import base64
import hmac
import hashlib
def sign_v2(key, msg):
"""
AWS version 2 signing by sha1 hashing and base64 encode.
"""
return base64.b64encode(hmac.new(key, msg.encode("utf-8"), hashlib.sha1).digest()) | 1aa54cc2cd3ce20ad5222a889754efda2f4632c3 | 17,656 |
def graph_apply(fun, *args):
"""Currying wrapper around APP(-,-)."""
result = fun
for arg in args:
arg = as_graph(arg)
result = APP(result, arg)
return result | 709306884b37b41c9a7289ad6a372d2b43ede6a9 | 17,657 |
def find_hcf(a, b) :
""" Finds the Highest Common Factor among two numbers """
#print('HCF : ', a, b)
if b == 0 :
return a
return find_hcf(b, a%b) | 818bbc05ab9262e8fd1e8975daf68ca3e0fa6a8b | 17,658 |
def GAU_pdf(x: np.ndarray, mu: float, var: float) -> np.ndarray:
"""
Probability function of Guassian distribution
:param x: ndarray input parameters
:param mu: float mean of the distribution
:param var: float variance of the distribution
:return: ndarray probability of each sample
"""
k = (1 / (np.sqrt(2 * np.pi * var)))
up = -np.power(x - mu, 2) / (2 * var)
return k * np.exp(up) | 9810da4a05d86ac7895a2947a1890fe111faeae4 | 17,659 |
def version_compare(a, b): # real signature unknown; restored from __doc__
"""
version_compare(a: str, b: str) -> int
Compare the given versions; return a strictly negative value if 'a' is
smaller than 'b', 0 if they are equal, and a strictly positive value if
'a' is larger than 'b'.
"""
return 0 | 97b3fd3bbd542d776b75327c88f9e80d776ba248 | 17,660 |
def line_status():
"""
设备线路详情
:return:
"""
device_id = request.args.get("device_id")
lines = Line.objects(device_id=device_id).all()
result = Monitor.device_status(device_id, lines)
result.pop(0)
return Success(result) | 47ca3cfef469c346ad85b701339941707e2084ea | 17,661 |
import sys
import inspect
import warnings
def namedPositionals(func, args):
"""Given a function, and a sequence of positional arguments destined
for that function, identifies the name for each positional argument.
Variable positional arguments are given an automatic name.
:arg func: Function which will accept ``args`` as positionals.
:arg args: Tuple of positional arguments to be passed to ``func``.
"""
# Current implementation will
# result in naming collisions
# for something like this:
#
# def func(args0, *args):
# ...
# because of automatic vararg
# naming. But who would write
# a function like that anyway?
# Remove any decorators
# from the function
func = _unwrap(func)
# getargspec is the only way to
# get the names of positional
# arguments in Python 2.x.
if sys.version_info[0] < 3:
spec = inspect.getargspec(func)
argnames = spec.args
varargs = spec.varargs
# But getargspec is deprecated
# in python 3.x
else:
# getfullargspec is deprecated in
# python 3.5, but not in python 3.6.
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
spec = inspect.getfullargspec(func)
argnames = spec.args
varargs = spec.varargs
# we only care about the arguments
# that are being passed in
argnames = argnames[:len(args)]
# make up names for varargs
nvarargs = len(args) - len(argnames)
if varargs is not None and nvarargs > 0:
argnames += ['{}{}'.format(varargs, i) for i in range(nvarargs)]
return argnames | 1d36efd6ad98d6c21b2d75ea54f42dc84a9e52b0 | 17,662 |
def _hist_fig(df, pred, c):
"""
"""
bins = np.linspace(0, 1, 15)
unlabeled = pred[c][pd.isnull(df[c])].values
fig, (ax1, ax2) = plt.subplots(2,1)
# top plot: training data
pos_labeled = pred[c][(df[c] == 1)&(df["validation"] == False)].values
neg_labeled = pred[c][(df[c] == 0)&(df["validation"] == False)].values
train_auc = _auc(pos_labeled, neg_labeled)
if len(pos_labeled) > 0:
ax1.hist(pos_labeled, bins=bins, alpha=0.5,
label="labeled positive (train)", density=True)
if len(neg_labeled) > 0:
ax1.hist(neg_labeled, bins=bins, alpha=0.5,
label="labeled negative (train)", density=True)
if len(unlabeled) > 0:
ax1.hist(unlabeled, bins=bins, alpha=1., label="unlabeled",
density=True, histtype="step", lw=2)
# bottom plot: validation data
pos_labeled = pred[c][(df[c] == 1)&(df["validation"] == True)].values
neg_labeled = pred[c][(df[c] == 0)&(df["validation"] == True)].values
test_auc = _auc(pos_labeled, neg_labeled)
if len(pos_labeled) > 0:
ax2.hist(pos_labeled, bins=bins, alpha=0.5,
label="labeled positive (val)", density=True)
if len(neg_labeled) > 0:
ax2.hist(neg_labeled, bins=bins, alpha=0.5,
label="labeled negative (val)", density=True)
if len(unlabeled) > 0:
ax2.hist(unlabeled, bins=bins, alpha=1., label="unlabeled",
density=True, histtype="step", lw=2)
for a in [ax1, ax2]:
a.legend(loc="upper left")
a.set_xlabel("assessed probability", fontsize=14)
a.set_ylabel("frequency", fontsize=14)
title = "model outputs for '%s'\nAUC train %s, test AUC %s"%(c, train_auc, test_auc)
ax1.set_title(title, fontsize=14)
plt.close(fig)
return fig | 6836c0228f2db705642e5f5fa4da6d318674fd55 | 17,663 |
import requests
def is_responsive(url, code=200):
"""Check if something responds to ``url`` syncronously"""
try:
response = requests.get(url)
if response.status_code == code:
return True
except requests.exceptions.RequestException as _e:
pass
return False | 1ed307d7be468157c880bf7e481f255bac449c34 | 17,664 |
def fit_and_report(model, X, y, X_valid, y_valid):
"""
It fits a model and returns train and validation scores.
Parameters:
model (sklearn classifier model): The sklearn model
X (numpy.ndarray): The X part of the train set
y (numpy.ndarray): The y part of the train set
X_valid (numpy.ndarray): The X part of the validation set
y_valid (numpy.ndarray): The y part of the validation set
Returns:
scores (list): The list of scores of train and validation
"""
model.fit(X, y)
lr_probs = model.predict_proba(X)
lr_probs = lr_probs[:, 1]
lr_probs_val = model.predict_proba(X_valid)
lr_probs_val = lr_probs_val[:, 1]
# calculate scores
lr_auc = roc_auc_score(y, lr_probs)
lr_auc_val = roc_auc_score(y_valid, lr_probs_val)
scores = [lr_auc, lr_auc_val]
return scores | f993a5410248e5303995f37b5464cb4a57928bcf | 17,665 |
def move_all_generation_to_high_voltage(data):
"""Move all generation sources to the high voltage market.
Uses the relative shares in the low voltage market, **ignoring transmission losses**. In theory, using the production volumes would be more correct, but these numbers are no longer updated since ecoinvent 3.2.
Empties out the medium and low voltage mixes."""
MIXES = {low_voltage_mix, medium_voltage_mix, high_voltage_mix}
mix_filter = lambda ds: ds['name'] in MIXES
for group in toolz.groupby("location", filter(mix_filter, data)).values():
assert len(group) == 3
high, low, medium = sorted(group, key=lambda x: x['name'])
medium_in_low = [ex for ex in low['exchanges']
if ex['name'] == medium_voltage_transformation][0]['amount']
high_in_low = [ex for ex in medium['exchanges']
if ex['name'] == high_voltage_transformation][0]['amount'] * \
medium_in_low
for exc in high['exchanges']:
if (exc['name'] in high_voltage_providers or (
"electricity" in exc['name'] and
"import from" in exc['name'])):
rescale_exchange(exc, high_in_low)
high['exchanges'].extend([rescale_exchange(exc, medium_in_low)
for exc in medium['exchanges']
if exc['name'] in medium_voltage_providers])
high['exchanges'].extend([exc
for exc in low['exchanges']
if exc['name'] in low_voltage_providers])
data = empty_medium_voltage_markets(data)
data = empty_low_voltage_markets(data)
return data | ed9b1fcf60bb1b5645dbd6946fe2e98e6e73ccf3 | 17,666 |
from typing import Union
def parser_first_text_or_content_if_could(html: etree._Element,
query_path: str) -> Union[str, None]:
"""
如果解析出的内容是一个数组,默认取的第一个
"""
nodes = html.xpath(query_path)
if not nodes:
return None
if len(nodes) > 0:
desc = nodes[0]
if hasattr(desc, 'text'):
return str(desc.text)
return str(desc)
return None | 8410280ca71083986af0aa89a312d5082ff36d8d | 17,667 |
from typing import List
import os
import logging
def get_file_paths_from(dir: Text) -> List[Text]:
"""
list all file paths inside a directory.
:param dir: a directory path that need to list.
:return: a string list of file paths.
"""
if not os.path.exists(dir):
logging.info('{} does not exist.'.format(dir))
return None
file_paths = ["{}/{}".format(dir, x) for x in os.listdir(dir)]
return file_paths | e9e26d01baf7f20802b63f3ebcbfd0b3aac1191c | 17,668 |
def get_all_quantity(results, q_func=None):
"""
"""
quantities = []
for res_name in results:
if q_func is not None:
# We change the quantity function
results[res_name].q_func = q_func
min_quantity = results[res_name].min_quantity
quantities.append(min_quantity)
return quantities | 56d50cacab2dcd7cb1554798a11bb1937436c73e | 17,669 |
import random
def generate_example_type_2a(problem, one_step_inferences):
"""Generates a type 2a training example.
Args:
problem: a lib.InferenceProblem instance.
one_step_inferences: the list of one step inferences that can be reahced
form the premises.
Returns:
An instance of "Example", or None if any issue was found.
"""
premises = problem.premises
example_type = "2a"
name_rule = random.choice([True, False])
inputs = ("What can be inferred from the following premises in a single "
"inference step (ignoring inferences that add new predicates or "
"constants)? ")
if name_rule:
inputs += "Name the inference rule being used: "
inputs += (". ".join([rules.render_logic_clause(x) for x in premises])) + "."
inferences_str = []
for [rule_inference, rule] in one_step_inferences:
rule_name = rule.rule_name
inference_str = rules.render_logic_clause(rule_inference)
if name_rule:
inference_str += f" can be inferred via the {rule_name} rule"
inferences_str.append(inference_str)
targets = (". ".join(inferences_str)) + "."
if not inferences_str:
example_type = "2a-empty"
targets = "Nothing can be inferred from these premises."
elif problem.contains_contradiction:
example_type = "2a-cont"
targets = ("Since the premises are contradictory, we can infer anything "
"from them.")
return lib.Example(inputs, targets, example_type, problem) | fafc05b70c7b2a84a2c1476e51fa783f240f2bd5 | 17,670 |
import types
import sys
def name_has_type_hint(name: str, frame: types.FrameType) -> str:
"""Identifies if a variable name has a type hint associated with it.
This can be useful if a user write something like::
name : something
use(name)
instead of::
name = something
use(name)
and sees a NameError.
HOWEVER, when an exception is raised, it seems that the only type hints
that are picked up correctly are those found in the global scope.
"""
type_hint_found_in_scope = _(
"A type hint found for `{name}` in the {scope} scope.\n"
"Perhaps you had used a colon instead of an equal sign and wrote\n\n"
" {name} : {hint}\n\n"
"instead of\n\n"
" {name} = {hint}\n"
)
nonlocals = get_variables_in_frame_by_scope(frame, "nonlocal")
scopes = (
("local", frame.f_locals),
("global", frame.f_globals),
("nonlocal", nonlocals),
)
for scope, scope_dict in scopes:
if "__annotations__" in scope_dict and name in scope_dict["__annotations__"]:
hint = scope_dict["__annotations__"][name]
# For Python 3.10+, all type hints are strings
if (
isinstance(hint, str)
and sys.version_info.major == 3
and sys.version_info.minor < 10
):
hint = repr(hint)
return type_hint_found_in_scope.format(name=name, scope=scope, hint=hint)
return "" | 1f572d6547e6e66cb499e511a24f8eb3c5c9371b | 17,671 |
import os
import yaml
def user_input(address, interface=None, name=None, filename='config.yaml'):
"""
Gather user input for adding an instrument to the YAML configuration file
Parameters
----------
address : dict
The interface as dict key (i.e. 'pyvisa') and the address as the value
name : str
Instrument name (as the top node) used in the YAML
Returns
-------
dict
The configuration dictionary that will be used to append the YAML
"""
# read current YAML
yaml_config = open(os.path.join(home, filename), 'r+')
current_configs = yaml.safe_load(yaml_config)
ok = False
if name is None:
while not ok:
name = input('Enter your desired name for the instrument:')
if len(name) == 0 or not isinstance(name, str):
print('Bad input, try again')
else:
ok = True
config = {name: {}}
if interface is None:
interface = 'pyvisa'
config[name] = {'address': {interface: address}}
# determine the class to assign
instrument_classes = find_instrument_classes()
print('What class to assign to this instrument?')
for num, ic in enumerate(instrument_classes):
print('({}) {}'.format(num, ic))
class_num = int(input(' Enter the number associated with the class: '))
if not isinstance(class_num, int) or (class_num > len(instrument_classes)):
print('Bad selection of class')
return {}
config[name]['python_class'] = instrument_classes[class_num]
# get location of CSV files
print('The instrument command CSV files are within:\n {}/'.format(current_configs['csv_directory']))
print('Enter where (within the directory above) this instruments CSV files are')
csv_loc = input(' An example is keysight/oscilloscope/MSOX3000 : ')
print(current_configs['csv_directory'])
csv_dir = os.path.join(current_configs['csv_directory'], csv_loc)
if not os.path.isdir(csv_dir):
print('Directory {} does not exist. Exiting'.format(csv_dir))
return {}
config[name]['csv_folder'] = csv_loc
return config | 3b518a0dd8bcdb54ece2abbb847e585764158c7e | 17,672 |
def query_title_bar_text(shared_state):
"""return text for title bar, updated when screen changes."""
coll_name = shared_state["active_collection"].name
str_value = f"QUERY SOURCE: {coll_name}"
return str_value | 2ce051cc8d6a87d3c964fba1abb502125b227717 | 17,673 |
def input_handler2():
"""Run the wx event loop by processing pending events only.
This is like inputhook_wx1, but it keeps processing pending events
until stdin is ready. After processing all pending events, a call to
time.sleep is inserted. This is needed, otherwise, CPU usage is at 100%.
This sleep time should be tuned though for best performance.
"""
app = wx.GetApp()
global POLLTIME, ON_INTERRUPT
if app is not None:
if not wx.Thread_IsMain():
raise Exception('wx thread is not the main thread')
evtloop = wx.EventLoop()
activator = wx.EventLoopActivator(evtloop)
while not stdin_ready():
while evtloop.Pending():
evtloop.Dispatch()
app.ProcessIdle()
try:
sleep(POLLTIME)
except KeyboardInterrupt:
if hasattr(ON_INTERRUPT, '__call__'):
ON_INTERRUPT()
activator = None
# del activator
return 0 | d9b3887f82b2a9ef19449d58e40ca18b642a2bf4 | 17,674 |
import requests
def create_upload_record(env, source_id, headers, cookies):
"""Creates an upload resource via the G.h Source API."""
post_api_url = f"{get_source_api_url(env)}/sources/{source_id}/uploads"
print(f"Creating upload via {post_api_url}")
res = requests.post(post_api_url,
json={"status": "IN_PROGRESS", "summary": {}},
cookies=cookies,
headers=headers)
if res and res.status_code == 201:
res_json = res.json()
return res_json["_id"]
e = RuntimeError(
f"Error creating upload record, status={res.status_code}, response={res.text}")
complete_with_error(e) | 7d8bcebec30be7ccba5406f1afc6a1b267e8e398 | 17,675 |
def get_versions(sys):
"""Import stuff and get versions if module
Parameters
----------
sys : module
The sys module object.
Returns
-------
module_versions : dict
The module names and corresponding versions.
"""
module_versions = {}
for name, module in sys.modules.items():
if '.' in name:
continue
if isinstance(name, str) and len(name) and name[0] == '_':
continue
module_version = LooseVersion(getattr(module, '__version__', None))
module_version = getattr(module_version, 'vstring', None)
if module_version is None:
module_version = None
elif 'git' in module_version or '.dev' in module_version:
git_path = op.dirname(op.realpath(module.__file__))
head = _get_git_head(git_path)
module_version += '-HEAD:{}'.format(head)
module_versions[name] = module_version
return module_versions | 172103da6d6f476080a1c1a33b34ebb4d028df05 | 17,676 |
def day05_part1(file: str) -> int:
""" Solves advent of code: day05 part1 """
with open(file) as fid:
seats = [Seat(line.strip()) for line in fid]
highest_seat_num = max(seat.number for seat in seats)
return highest_seat_num | 5ba399053d3a7e855ded402cea60f59c9d79d9a4 | 17,677 |
def GetInput():
"""Get player inputs and lower-case the input"""
Input = str(input("{:>20s}".format("")))
print("\n \n \n \n \n")
return Input.lower() | 9d8626a9c9f0615a0453d0804b6b37244ec373c3 | 17,678 |
def ldns_fskipcs_l(*args):
"""LDNS buffer."""
return _ldns.ldns_fskipcs_l(*args) | 44e357adf381e11aaccb78441c543438abe75ba1 | 17,679 |
def specific_parser(parser, log=False, run_folder=None, mode=None, tot_epochs=None, restoring_rep_path=None,
start_from_epoch=None, pretrained_GAN=None, GAN_epoch=None, data_dir_train=None, data_dir_train2=None,
data_dir_test=None, data_dir_test2=None, images_log_freq=None, batch_size=None, batch_size_SN=None,
acc_log_freq=None, loss_log_freq=None, experiment_name=None, run_description=None, prc_train=None,
prc_test=None, prc_val=None, sar_c=None, optical_c=None, N_classes=None, patch_size=None, SN_log_freq=None,
save_model_freq=None, lambda_identity=None, D_training_ratio=None, lambda_A=None, loss_type=None,
lambda_gp=None, res_block_N=None, pool_prc_O=None, pool_prc_S=None, buff_dim=None, th_low=None, th_high=None,
pool=None, conditioned=None, dropping=None, th_b_h_ratio=None, th_b_l_ratio=None, th_b_h_pool=None,
th_b_l_pool=None, drop_prc=None, seed=None):
"""
This is an intermediate layer between the general parser and the config routine to allow who use this code to easily
access parameters and change them when building his experiment
:param parser:
:param log: decide if print or not
:param run_folder: new value for run folder
:param mode: train mode
:param tot_epochs:
:param restoring_rep_path:
:param start_from_epoch:
:param pretrained_GAN:
:param GAN_epoch:
:param data_dir_train:
:param data_dir_train2:
:param data_dir_test:
:param data_dir_test2:
:param images_log_freq:
:param batch_size:
:param batch_size_SN:
:param acc_log_freq:
:param loss_log_freq:
:param experiment_name:
:param run_description:
:param prc_train:
:param prc_test:
:param prc_val:
:param sar_c:
:param optical_c:
:param N_classes:
:param patch_size:
:param SN_log_freq:
:param save_model_freq:
:param lambda_identity:
:param D_training_ratio:
:param lambda_A:
:param loss_type:
:param lambda_gp:
:param res_block_N:
:param pool_prc_O:
:param pool_prc_S:
:param buff_dim:
:param th_low:
:param th_high:
:param pool:
:param conditioned:
:param dropping:
:param th_b_h_ratio:
:param th_b_l_ratio:
:param th_b_h_pool:
:param th_b_l_pool:
:param drop_prc:
:return: args
"""
args = parser.parse_args()
print('SPECIFIC CONFIG')
args.log_dir = update_arg(args.log_dir, run_folder, 'log_dir', log)
args.tot_epochs = update_arg(args.tot_epochs, tot_epochs, 'tot_epochs', log)
args.mode = update_arg(args.mode, mode, 'mode', log)
args.restoring_rep_path = update_arg(args.restoring_rep_path, restoring_rep_path, 'restoring_rep_path', log)
args.start_from_epoch = update_arg(args.start_from_epoch, start_from_epoch, 'start_from_epoch', log)
args.pretrained_GAN = update_arg(args.pretrained_GAN, pretrained_GAN, 'pretrained_GAN', log)
args.GAN_epoch = update_arg(args.GAN_epoch, GAN_epoch, 'GAN_epoch', log)
args.data_dir_train = update_arg(args.data_dir_train, data_dir_train, 'data_dir_train', log)
args.data_dir_train2 = update_arg(args.data_dir_train2, data_dir_train2, 'data_dir_train2', log)
args.data_dir_test = update_arg(args.data_dir_test, data_dir_test, 'data_dir_test', log)
args.data_dir_test2 = update_arg(args.data_dir_test2, data_dir_test2, 'data_dir_test2', log)
args.images_log_freq = update_arg(args.images_log_freq, images_log_freq, 'images_log_freq', log)
args.batch_size = update_arg(args.batch_size, batch_size, 'batch_size', log)
args.batch_size_SN = update_arg(args.batch_size_SN, batch_size_SN, 'batch_size_SN', log)
args.acc_log_freq = update_arg(args.acc_log_freq, acc_log_freq, 'acc_log_freq', log)
args.loss_log_freq = update_arg(args.loss_log_freq, loss_log_freq, 'loss_log_freq', log)
args.experiment_name = update_arg(args.experiment_name, experiment_name, 'experiment_name', log)
args.run_description = update_arg(args.run_description, run_description, 'run_description', log)
args.prc_train = update_arg(args.prc_train, prc_train, 'prc_train', log)
args.prc_test = update_arg(args.prc_test, prc_test, 'prc_test', log)
args.prc_val = update_arg(args.prc_val, prc_val, 'prc_val', log)
args.sar_c = update_arg(args.sar_c, sar_c, 'sar_c', log)
args.optical_c = update_arg(args.optical_c, optical_c, 'optical_c', log)
args.N_classes = update_arg(args.N_classes, N_classes, 'N_classes', log)
args.patch_size = update_arg(args.patch_size, patch_size, 'patch_size', log)
args.SN_log_freq = update_arg(args.SN_log_freq, SN_log_freq, 'SN_log_freq', log)
args.save_model_freq = update_arg(args.save_model_freq, save_model_freq, 'save_model_freq', log)
args.lambda_identity = update_arg(args.lambda_identity, lambda_identity, 'lambda_identity', log)
args.D_training_ratio = update_arg(args.D_training_ratio, D_training_ratio, 'D_training_ratio', log)
args.lambda_A = update_arg(args.lambda_A, lambda_A, 'lambda_A', log)
args.loss_type = update_arg(args.loss_type, loss_type, 'loss_type', log)
args.lambda_gp = update_arg(args.lambda_gp, lambda_gp, 'lambda_gp', log)
args.res_block_N = update_arg(args.res_block_N, res_block_N, 'res_block_N', log)
args.pool_prc_O = update_arg(args.pool_prc_O, pool_prc_O, 'pool_prc_O', log)
args.pool_prc_S = update_arg(args.pool_prc_S, pool_prc_S, 'pool_prc_S', log)
args.buff_dim = update_arg(args.buff_dim, buff_dim, 'buff_dim', log)
args.th_low = update_arg(args.th_low, th_low, 'th_low', log)
args.th_high = update_arg(args.th_high, th_high, 'th_high', log)
args.pool = update_arg(args.pool, pool, 'pool', log)
args.conditioned = update_arg(args.conditioned, conditioned, 'conditioned', log)
args.dropping = update_arg(args.dropping, dropping, 'dropping', log)
args.th_b_h_ratio = update_arg(args.th_b_h_ratio, th_b_h_ratio, 'th_b_h_ratio', log)
args.th_b_l_ratio = update_arg(args.th_b_l_ratio, th_b_l_ratio, 'th_b_l_ratio', log)
args.th_b_h_pool = update_arg(args.th_b_h_pool, th_b_h_pool, 'th_b_h_pool', log)
args.th_b_l_pool = update_arg(args.th_b_l_pool, th_b_l_pool, 'th_b_l_pool', log)
args.drop_prc = update_arg(args.drop_prc, drop_prc, 'drop_prc', log)
args.seed = update_arg(args.seed, seed, 'seed', log)
return args | cbce4c086da986a3232d40ae2d917b921ff64ff2 | 17,680 |
import re
def to_identifier(text):
"""Converts text to a valid Python identifier by replacing all
whitespace and punctuation and adding a prefix if starting with a digit"""
if text[:1].isdigit():
text = '_' + text
return re.sub('_+', '_', str(text).translate(TRANS_TABLE)) | 8c8ca0c52c13a7d78aa9ec2288ef86ec7e10f84a | 17,681 |
def estimate_next_pos(measurement, OTHER = None):
"""Estimate the next (x, y) position of the wandering Traxbot
based on noisy (x, y) measurements."""
if OTHER is None:
# Setup Kalman Filter
[u, P, H, R] = setup_kalman_filter()
# OTHER = {'x': x, 'P': P, 'u': u, 'matrices':[H, R]}
x = matrix([[measurement[0]], [measurement[1]], [0], [0], [0]])
OTHER = {'z_list': deque([]), 'x': x,
'P': P, 'u': u, 'matrices': [H, R], 'step': 1
# 'zx': [measurement[0]]
}
OTHER['z_list'].append(np.array(measurement))
# return measurement, OTHER
# elif OTHER['step'] == 1:
# # Use first three measurements to seed the filter
# OTHER['step'] = 2
# OTHER['z_list'].append(np.array(measurement))
# # OTHER['zx'].append(measurement[0])
# # OTHER['x_list'].append(measurement)
# return measurement, OTHER
# elif OTHER['step'] == 2:
# OTHER['step'] = 3
# # Get last 3 measurements
# OTHER['z_list'].append(np.array(measurement))
# # OTHER['zx'].append(measurement[0])
# # Get initial estimate of state from the three measurements
# OTHER['x'] = state_from_measurements(OTHER['z_list'])
#
# # Initialization complete
# OTHER['step'] = -1
#
# # Use last 20 measurements only
# num_z = 1000
# # OTHER['x_list'] = deque(maxlen=num_z)
# # OTHER['z_list'] = deque(maxlen=num_z+1)
#
# # Predict next position of robot using the dynamics and current state
# next_state = robot_x_fn(OTHER['x'])
# # OTHER['x_list'].append(next_state)
# return (next_state.value[0][0], next_state.value[1][0]), OTHER
OTHER['z_list'].append(np.array(measurement))
x, P = extended_kalman_filter(measurement, OTHER['x'], OTHER['u'],
OTHER['P'], robot_F_fn, robot_x_fn, *OTHER['matrices'])
# OTHER['x_list'].append(x)
OTHER['x'] = x
OTHER['P'] = P
# print('Trace of P : '+str(P.trace()))
# Predict next position of robot
next_state = robot_x_fn(x)
est_xy = (next_state.value[0][0], next_state.value[1][0])
# You must return xy_estimate (x, y), and OTHER (even if it is None)
# in this order for grading purposes.
# xy_estimate = (3.2, 9.1)
# return z, OTHER
return est_xy, OTHER | a6b6eba0aa7e71a986bc5bed68cc6fb955c02383 | 17,682 |
def AutoBusList(*args):
"""List of Buses or (File=xxxx) syntax for the AutoAdd solution mode."""
# Getter
if len(args) == 0:
return get_string(lib.Settings_Get_AutoBusList())
# Setter
Value, = args
if type(Value) is not bytes:
Value = Value.encode(codec)
lib.Settings_Set_AutoBusList(Value) | aab4ae15dd7b12c46eb5a75bb780ac78609273ae | 17,683 |
from typing import Tuple
from typing import Optional
def validate_inputs(*, input_data: pd.DataFrame) -> Tuple[pd.DataFrame, Optional[dict]]:
"""Check model inputs for unprocessable values."""
# convert syntax error field names (beginning with numbers)
# input_data.rename(columns=config.model_config.variables_to_rename, inplace=True)
input_data["TotalCharges"] = pd.to_numeric(
input_data["TotalCharges"], errors="coerce"
)
relevant_data = input_data[config.model_config.features].copy()
validated_data = drop_na_inputs(input_data=relevant_data)
errors = None
try:
# replace numpy nans so that pydantic can validate
MultipleChurnDataInputs(
inputs=validated_data.replace({np.nan: None}).to_dict(orient="records")
)
except ValidationError as error:
errors = error.json()
return validated_data, errors | b03e616dc10c734af282d71a650da822e961e93f | 17,684 |
def create_env(n_envs, eval_env=False, no_log=False):
"""
Create the environment and wrap it if necessary
:param n_envs: (int)
:param eval_env: (bool) Whether is it an environment used for evaluation or not
:param no_log: (bool) Do not log training when doing hyperparameter optim
(issue with writing the same file)
:return: (Union[gym.Env, VecEnv])
"""
global hyperparams
global env_kwargs
# Do not log eval env (issue with writing the same file)
log_dir = None if eval_env or no_log else save_path
if n_envs == 1:
env = DummyVecEnv([make_env(env_id, 0, args.seed,
wrapper_class=env_wrapper, log_dir=log_dir,
env_kwargs=env_kwargs)])
else:
# env = SubprocVecEnv([make_env(env_id, i, args.seed) for i in range(n_envs)])
# On most env, SubprocVecEnv does not help and is quite memory hungry
env = DummyVecEnv([make_env(env_id, i, args.seed, log_dir=log_dir, env_kwargs=env_kwargs,
wrapper_class=env_wrapper) for i in range(n_envs)])
if normalize:
# Copy to avoid changing default values by reference
local_normalize_kwargs = normalize_kwargs.copy()
# Do not normalize reward for env used for evaluation
if eval_env:
if len(local_normalize_kwargs) > 0:
local_normalize_kwargs['norm_reward'] = False
else:
local_normalize_kwargs = {'norm_reward': False}
if args.verbose > 0:
if len(local_normalize_kwargs) > 0:
print(f"Normalization activated: {local_normalize_kwargs}")
else:
print("Normalizing input and reward")
env = VecNormalize(env, **local_normalize_kwargs)
# Optional Frame-stacking
if hyperparams.get('frame_stack', False):
n_stack = hyperparams['frame_stack']
env = VecFrameStack(env, n_stack)
print(f"Stacking {n_stack} frames")
if is_image_space(env.observation_space):
if args.verbose > 0:
print("Wrapping into a VecTransposeImage")
env = VecTransposeImage(env)
return env | c0d1355cb1ea4446370a71cb49bfb6855799b4b3 | 17,685 |
def ellipse(pts, pc=None, ab=None):
""" Distance function for the ellipse
centered at pc = [xc, yc], with a, b = [a, b]
"""
if pc is None:
pc = [0, 0]
if ab is None:
ab = [1., 2.]
return dist((pts - pc)/ab) - 1.0 | 7ff99b98aa09d86223afe97a987176f4dc0e0f3d | 17,686 |
def _transform(
parsed_date_data: ParsedDate,
parsed_output_format_data: ParsedTargetFormat,
output_format: str,
output_timezone: str,
) -> str:
"""
This function transform parsed result into target format
Parameters
----------
parsed_date_data
generated year, month, day, hour, minute, second
parsed_output_format_data
generated year token, month token, day token, hour token,
minute token, second token of target format
output_format
target format string
output_timezone
target timezone string
"""
result = deepcopy(output_format)
if output_timezone != "":
parsed_date_data = _change_timezone(parsed_date_data, output_timezone)
# Handle year
result = _transform_year(
result, parsed_output_format_data.ymd_token["year_token"], parsed_date_data.ymd["year"]
)
# Handle day
result = _transform_day(
result, parsed_output_format_data.ymd_token["day_token"], parsed_date_data.ymd["day"]
)
# Handle hours
result = _transform_hms(
result,
str(parsed_output_format_data.hms_token["hour_token"]),
bool(parsed_output_format_data.hms_token["ispm"]),
parsed_date_data.hms["hour"],
)
# Handle minutes
result = _transform_hms(
result,
str(parsed_output_format_data.hms_token["minute_token"]),
False,
parsed_date_data.hms["minute"],
)
# Handle seconds
result = _transform_hms(
result,
str(parsed_output_format_data.hms_token["second_token"]),
False,
parsed_date_data.hms["second"],
)
# Handle month
result = _transform_month(
result, parsed_output_format_data.ymd_token["month_token"], parsed_date_data.ymd["month"]
)
# Handle weekday
result = _transform_weekday(
result, parsed_output_format_data.weekday_token, parsed_date_data.weekday
)
# Handle timezone
result = _transform_timezone(
result,
parsed_output_format_data.timezone_token,
str(parsed_date_data.tzinfo["timezone"]),
str(parsed_date_data.tzinfo["utc_add"]),
int(parsed_date_data.tzinfo["utc_offset_hours"]),
int(parsed_date_data.tzinfo["utc_offset_minutes"]),
)
return result | cc51f2776165bf05af1d97bcc6eb70bd6f03702f | 17,687 |
import logging
import uuid
def volunteer_dict_from_request(request: flask.Request, actor: str) -> dict:
"""Creates and returns a dict of volunteer info from the request.
`actor` is the ID/email of the person or entity that is triggering this.
"""
logging.debug('gapps.volunteer_dict_from_request: %s', list(request.values.items()))
# Make sure the user/form/request isn't trying to mess with fields that it
# shouldn't be.
for name, field in _S.volunteer.fields._asdict().items():
if not field.form_field and request.values.get(name) is not None:
# This causes the request processing to stop
flask.abort(400, description='invalid field')
if field.values is not None and request.values.get(name) is not None \
and not set(request.values.get(name).split(config.MULTIVALUE_DIVIDER)).issubset(field.values):
# This causes the request processing to stop
flask.abort(400, description='invalid field value')
volunteer = config.validate_volunteer(request.values)
if not volunteer:
logging.warning('gapps.volunteer_dict_from_request: config.validate_volunteer failed')
# This causes the request processing to stop
flask.abort(400, description='invalid input')
# We didn't validate the geoposition above, so do it now
geoposition = request.values.get(_GEOPOSITION_VALUE_KEY, '')
geoposition_required = _S.volunteer.fields.joined_latlong.required
if not utils.latlong_validator(geoposition, geoposition_required):
logging.warning('gapps.volunteer_dict_from_request: utils.latlong_validator failed')
flask.abort(400, description='invalid input')
geoaddress = helpers.address_from_latlong(geoposition)
# Set the GUID field
volunteer[_S.volunteer.fields.id.name] = str(uuid.uuid4())
# Set the timestamps
volunteer[_S.volunteer.fields.joined.name] = utils.current_datetime()
volunteer[_S.volunteer.fields.joined_by.name] = actor
volunteer[_S.volunteer.fields.joined_latlong.name] = geoposition
volunteer[_S.volunteer.fields.joined_address.name] = geoaddress
volunteer[_S.volunteer.fields.address_latlong.name] = \
helpers.latlong_for_record(_S.volunteer.fields, volunteer)
return volunteer | 832a9e01dd4873d3781e82f5fc8d8063c1aefa13 | 17,688 |
def stop_next_turn():
"""
Dirty way to stop the MCTS in a clean way (without SIGINT or SIGTERM)...
the mcts finish current turn save data and stop (if you are using dft it can take some time...)
write "stop" in the file MCTS/stop_mcts
:return: None
"""
with open(p.f_stop) as f:
stop = f.read()
if "stop" in stop:
print("MCTS stopped with signal 'stop' in '%s' file" % p.f_stop)
return True
return False | eb76187f25f49ae674fefe7969277122bd18e5c8 | 17,689 |
from datetime import datetime
def pull_request_average_time_between_responses(self, repo_group_id, repo_id=None, group_by='month', time_unit='hours', begin_date=None, end_date=None):
""" Avegage time between responeses with merged_status and the time frame
:param repo_group_id: The repository's repo_group_id
:param repo_id: The repository's repo_id, defaults to None
:param group_by: The time frame the data is grouped by, options are: 'day', 'week', 'month' or 'year', defaults to 'month'
:param time_unit: Unit of time for data, options are: 'minutes', or 'hours', defaults to 'hours'
:param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00'
:param end_date: Specifies the end date, defaults to datetime.now()
:return: DataFrame of average time beteen responses
=======
@register_metric()
def pull_request_merged_status_counts(self, repo_group_id, repo_id=None, begin_date='1970-1-1 00:00:01', end_date=None, group_by='week'):
>>>>>>> Stashed changes
"""
if not begin_date:
begin_date = '1970-1-1'
if not end_date:
end_date = datetime.datetime.now().strftime('%Y-%m-%d')
unit_options = ['year', 'month', 'week', 'day']
time_group_bys = []
for unit in unit_options.copy():
if group_by not in unit_options:
continue
time_group_bys.append('closed_{}'.format(unit))
del unit_options[0]
if not repo_id:
pr_all_SQL = s.sql.text("""
SELECT
repo_id,
repo_name,
repo_group_id,
rg_name AS repo_group_name,
date_part( 'year', pr_closed_at :: DATE ) AS closed_year,
date_part( 'month', pr_closed_at :: DATE ) AS closed_month,
date_part( 'week', pr_closed_at :: DATE ) AS closed_week,
date_part( 'day', pr_closed_at :: DATE ) AS closed_day,
(EXTRACT(epoch FROM average_time_between_responses)/3600) AS average_hours_between_responses,
(EXTRACT(epoch FROM average_time_between_responses)/60) AS average_minutes_between_responses,
CASE WHEN pr_merged_at IS NULL THEN 'Rejected' ELSE 'Merged' END AS merged_status,
count(*) AS num_pull_requests
FROM (
SELECT
repo_name,
repo_groups.repo_group_id,
rg_name,
pull_requests.repo_id,
pull_requests.pull_request_id,
pr_closed_at,
pr_created_at,
pr_merged_at,
(MAX(message.msg_timestamp) - MIN(message.msg_timestamp)) / COUNT(DISTINCT message.msg_timestamp) AS average_time_between_responses
FROM pull_request_message_ref, message, repo_groups,
pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id
WHERE pull_requests.repo_id IN
(SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id)
AND repo.repo_id = pull_requests.repo_id
AND pull_requests.pull_request_id = pull_request_message_ref.pull_request_id
AND pull_request_message_ref.msg_id = message.msg_id
AND repo_groups.repo_group_id = repo.repo_group_id
AND pr_created_at::DATE >= :begin_date ::DATE
AND pr_closed_at::DATE <= :end_date ::DATE
GROUP BY pull_requests.pull_request_id, repo.repo_id, repo.repo_name, repo_groups.repo_group_id, repo_groups.rg_name
) time_between_responses
GROUP BY closed_year, closed_month, merged_status, time_between_responses.pr_closed_at, time_between_responses.average_time_between_responses, time_between_responses.repo_id, time_between_responses.repo_name, time_between_responses.repo_group_id, time_between_responses.rg_name
""")
else:
pr_all_SQL = s.sql.text("""
SELECT
date_part( 'year', pr_closed_at :: DATE ) AS closed_year,
date_part( 'month', pr_closed_at :: DATE ) AS closed_month,
date_part( 'week', pr_closed_at :: DATE ) AS closed_week,
date_part( 'day', pr_closed_at :: DATE ) AS closed_day,
(EXTRACT(epoch FROM average_time_between_responses)/3600) AS average_hours_between_responses,
(EXTRACT(epoch FROM average_time_between_responses)/60) AS average_minutes_between_responses,
CASE WHEN pr_merged_at IS NULL THEN 'Rejected' ELSE 'Merged' END AS merged_status,
count(*) AS num_pull_requests
FROM (
SELECT pull_requests.pull_request_id,
pr_closed_at,
pr_created_at,
pr_merged_at,
(MAX(message.msg_timestamp) - MIN(message.msg_timestamp)) / COUNT(DISTINCT message.msg_timestamp) AS average_time_between_responses
FROM pull_requests, repo, pull_request_message_ref, message
WHERE repo.repo_id = :repo_id
AND repo.repo_id = pull_requests.repo_id
AND pull_requests.pull_request_id = pull_request_message_ref.pull_request_id
AND pull_request_message_ref.msg_id = message.msg_id
AND pr_created_at::DATE >= :begin_date ::DATE
AND pr_closed_at::DATE <= :end_date ::DATE
GROUP BY pull_requests.pull_request_id
) time_between_responses
GROUP BY closed_year, closed_month, merged_status, time_between_responses.pr_closed_at, time_between_responses.average_time_between_responses
""")
pr_all = pd.read_sql(pr_all_SQL, self.database,
params={'repo_id': repo_id, 'repo_group_id':repo_group_id,
'begin_date': begin_date, 'end_date': end_date})
if not repo_id:
pr_avg_time_between_responses = pr_all.groupby(['merged_status', 'repo_id', 'repo_name', 'repo_group_id', 'repo_group_name'] + time_group_bys).mean().reset_index()[['merged_status', 'repo_id', 'repo_name', 'repo_group_id', 'repo_group_name'] + time_group_bys + ['average_{}_between_responses'.format(time_unit)]]
else:
pr_avg_time_between_responses = pr_all.groupby(['merged_status'] + time_group_bys).mean().reset_index()[time_group_bys + ['merged_status', 'average_{}_between_responses'.format(time_unit)]]
return pr_avg_time_between_responses | 8834586b1e761c8ba6033140f753a3ac99780da7 | 17,690 |
def create_money(request):
"""Create money object."""
if request.method == 'POST':
form = MoneyForm(request.POST, request.FILES)
if form.is_valid():
money = form.save(commit=False)
money.owner = request.user
money.save()
return redirect(money)
else:
return render(request, 'nec_bank/create_money.html', {'money_form': form})
else:
request.GET._mutable = True
request.GET['created_date'] = timezone.now().astimezone().strftime('%Y-%m-%d %H:%M:%S')
request.GET._mutable = False
form = MoneyForm(request.GET)
return render(request, 'nec_bank/create_money.html', {'money_form': form}) | 483eea12a1c2f49dd63fe2a37a529dafe3a4c6c3 | 17,691 |
def stripper(reply: str, prefix=None, suffix=None) -> str:
"""This is a helper function used to strip off reply prefix and
terminator. Standard Python str.strip() doesn't work reliably because
it operates on character-by-character basis, while prefix/terminator
is usually a group of characters.
Args:
reply: String to be stripped.
prefix: Substring to remove from the beginning of the line.
suffix: Substring to remove from the end of the line.
Returns:
(str): Naked reply.
"""
if prefix is not None and reply.startswith(prefix):
reply = reply[len(prefix):]
if suffix is not None and reply.endswith(suffix):
reply = reply[:-len(suffix)]
return reply | b48281a0dedd5d7f3d476943f12ac49720e67476 | 17,692 |
def resnet_50_generator(block_fn,
lst_layers,
num_classes,
pruning_method=None,
data_format='channels_first',
name=None):
"""Generator for ResNet v1 models.
Args:
block_fn: String that defines whether to use a `residual_block` or
`bottleneck_block`.
lst_layers: list of Ints that denotes number of blocks to include in each
block group. Each group consists of blocks that take inputs of the same
resolution.
num_classes: Int number of possible classes for image classification.
pruning_method: String that specifies the pruning method used to identify
which weights to remove.
data_format: String either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
name: String that specifies name for model layer.
Returns:
Model `function` that takes in `inputs` and `is_training` and returns the
output `Tensor` of the ResNet model.
"""
def model(inputs, is_training):
"""Creation of the model graph."""
with tf.variable_scope(name, 'resnet_model'):
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=64,
kernel_size=7,
strides=2,
pruning_method=pruning_method,
data_format=data_format,
name='initial_conv')
inputs = tf.identity(inputs, 'initial_conv')
inputs = batch_norm_relu(inputs, is_training, data_format=data_format)
inputs = tf.layers.max_pooling2d(
inputs=inputs,
pool_size=3,
strides=2,
padding='SAME',
data_format=data_format,
name='initial_max_pool')
inputs = tf.identity(inputs, 'initial_max_pool')
inputs = block_group(
inputs=inputs,
filters=64,
block_fn=block_fn,
blocks=lst_layers[0],
strides=1,
is_training=is_training,
name='block_group1',
pruning_method=pruning_method,
data_format=data_format)
inputs = block_group(
inputs=inputs,
filters=128,
block_fn=block_fn,
blocks=lst_layers[1],
strides=2,
is_training=is_training,
name='block_group2',
pruning_method=pruning_method,
data_format=data_format)
inputs = block_group(
inputs=inputs,
filters=256,
block_fn=block_fn,
blocks=lst_layers[2],
strides=2,
is_training=is_training,
name='block_group3',
pruning_method=pruning_method,
data_format=data_format)
inputs = block_group(
inputs=inputs,
filters=512,
block_fn=block_fn,
blocks=lst_layers[3],
strides=2,
is_training=is_training,
name='block_group4',
pruning_method=pruning_method,
data_format=data_format)
pool_size = (inputs.shape[1], inputs.shape[2])
inputs = tf.layers.average_pooling2d(
inputs=inputs,
pool_size=pool_size,
strides=1,
padding='VALID',
data_format=data_format,
name='final_avg_pool')
inputs = tf.identity(inputs, 'final_avg_pool')
inputs = tf.reshape(inputs, [-1, 2048])
inputs = tf.layers.dense(
inputs=inputs,
units=num_classes,
kernel_initializer=tf.random_normal_initializer(stddev=.01),
name='final_dense')
inputs = tf.identity(inputs, 'final_dense')
return inputs
model.default_image_size = 224
return model | 5f471b7cc3608c11515d0efb088c3c9bee0e20e6 | 17,693 |
def bracketBalanced(expression):
"""Check if an expression is balanced.
An expression is balanced if all the opening brackets(i.e. '(, {, [') have
a corresponding closing bracket(i.e. '), }, ]').
Args:
expression (str) : The expression to be checked.
Returns:
bool: True if expression is balanced. False if not balanced.
"""
bracket_dict = {'(': ')', '{': '}', '[': ']'}
stack = Stack()
for i in range(len(expression)):
if expression[i] in bracket_dict.keys():
stack.push(expression[i])
elif expression[i] in bracket_dict.values():
if stack.isEmpty() or expression[i] != bracket_dict[stack.peek()]:
return False
else:
stack.pop()
if stack.isEmpty():
return True
else:
return False | bb6ebeb681fb9425c923a4fdcc41c6158ece332a | 17,694 |
def Leq(pressure, reference_pressure=REFERENCE_PRESSURE, axis=-1):
"""
Time-averaged sound pressure level :math:`L_{p,T}` or equivalent-continious sound pressure level :math:`L_{p,eqT}` in dB.
:param pressure: Instantaneous sound pressure :math:`p`.
:param reference_pressure: Reference value :math:`p_0`.
:param axis: Axis.
.. math:: L_{p,T} = L_{p,eqT} = 10.0 \\log_{10}{ \\left( \\frac{\\frac{1}{T} \\int_{t_1}^{t_2} p^2 (t) \\mathrm{d} t }{p_0^2} \\right)}
See section 2.3.
"""
return 10.0 * np.log10((pressure**2.0).mean(axis=axis) / reference_pressure**2.0) | bf7c640a361f3c07aef70310a213f2603a441664 | 17,695 |
import re
def trimBody(body):
""" Quick function for trimming away the fat from emails """
# Cut away "On $date, jane doe wrote: " kind of texts
body = re.sub(
r"(((?:\r?\n|^)((on .+ wrote:[\r\n]+)|(sent from my .+)|(>+[ \t]*[^\r\n]*\r?\n[^\n]*\n*)+)+)+)",
"",
body,
flags=re.I | re.M,
)
# Crop out quotes
lines = body.split("\n")
body = "\n".join([x for x in lines if not x.startswith(">")])
# Remove hyperlinks
body = re.sub(r"[a-z]+://\S+", "", body)
# Remove email addresses
body = re.sub(r"(<[^>]+>\s*\S+@\S+)", "", body)
body = re.sub(r"(\S+@\S+)", "", body)
return body | 19fcb7313e66d7e710781cf195a7550d050b4848 | 17,696 |
def check_host_arp_table_deleted(host, asic, neighs):
"""
Verifies the ARP entry is deleted.
Args:
host: instance of SonicHost to run the arp show.
neighbor_ip: IP address of the neighbor to verify.
arptable: Optional arptable output, if not provided it will be fetched from host.
"""
if host.is_multi_asic:
arptable = host.switch_arptable(namespace=asic.namespace)['ansible_facts']
else:
arptable = host.switch_arptable()['ansible_facts']
neighs_present = []
for neighbor_ip in neighs:
if ':' in neighbor_ip:
table = arptable['arptable']['v6']
else:
table = arptable['arptable']['v4']
if neighbor_ip in table:
neighs_present.append(neighbor_ip)
logger.debug("On host {} asic {}, found neighbors {} that were supposed to be deleted".format(host, asic.asic_index, neighs_present))
return len(neighs_present) == 0 | 378ff5983e8cc856748daa3509844d194f18476f | 17,697 |
def fit_ellipses(contours):
"""
Fit ellipses to contour(s).
Parameters
----------
contours : ndarray or list
Contour(s) to fit ellipses to.
Returns
-------
ellipses : ndarray or list
An array or list corresponding to dimensions to ellipses fitted.
"""
if isinstance(contours, list):
ret = [cv2.fitEllipse(c) for c in contours]
else:
ret = cv2.fitEllipse(contours)
return ret | 9246182a1f96ca1691bcddf34271586be93dcf41 | 17,698 |
def get_argument_parser() -> ArgumentParser:
"""
Get command line arguments.
"""
parser = ArgumentParser(
description="Say Hello")
subparsers = parser.add_subparsers(title="subcommands")
parser_count_above_below = subparsers.add_parser("say-hello")
parser_count_above_below.add_argument('-n', '--name',
help="a name")
parser_count_above_below.set_defaults(func=do_say_hello)
return parser | f799991025283bf4ce2dbcceed845662312cd6d0 | 17,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.